Whamcloud - gitweb
b=17167 libcfs: ensure all libcfs exported symbols to have cfs_ prefix
authorLisa Week <lisa.week@sun.com>
Thu, 14 Jan 2010 05:44:06 +0000 (22:44 -0700)
committerRobert Read <rread@sun.com>
Thu, 14 Jan 2010 16:33:51 +0000 (08:33 -0800)
i=Andreas Dilger

403 files changed:
build/nn-check.py [new file with mode: 0755]
build/nn-final-symbol-list.txt [new file with mode: 0644]
libcfs/include/libcfs/bitmap.h
libcfs/include/libcfs/libcfs.h
libcfs/include/libcfs/libcfs_debug.h
libcfs/include/libcfs/libcfs_hash.h
libcfs/include/libcfs/libcfs_ioctl.h
libcfs/include/libcfs/libcfs_prim.h
libcfs/include/libcfs/libcfs_private.h
libcfs/include/libcfs/libcfs_time.h
libcfs/include/libcfs/linux/Makefile.am
libcfs/include/libcfs/linux/kp30.h
libcfs/include/libcfs/linux/libcfs.h
libcfs/include/libcfs/linux/linux-bitops.h [new file with mode: 0644]
libcfs/include/libcfs/linux/linux-fs.h
libcfs/include/libcfs/linux/linux-lock.h
libcfs/include/libcfs/linux/linux-mem.h
libcfs/include/libcfs/linux/linux-prim.h
libcfs/include/libcfs/linux/linux-time.h
libcfs/include/libcfs/linux/linux-types.h [new file with mode: 0644]
libcfs/include/libcfs/linux/portals_compat25.h
libcfs/include/libcfs/list.h
libcfs/include/libcfs/posix/libcfs.h
libcfs/include/libcfs/posix/posix-types.h
libcfs/include/libcfs/user-bitops.h
libcfs/include/libcfs/user-lock.h
libcfs/include/libcfs/user-mem.h
libcfs/include/libcfs/user-prim.h
libcfs/include/libcfs/user-time.h
libcfs/include/libcfs/util/platform.h
libcfs/include/libcfs/winnt/kp30.h
libcfs/include/libcfs/winnt/libcfs.h
libcfs/include/libcfs/winnt/portals_utils.h
libcfs/include/libcfs/winnt/winnt-fs.h
libcfs/include/libcfs/winnt/winnt-lock.h
libcfs/include/libcfs/winnt/winnt-mem.h
libcfs/include/libcfs/winnt/winnt-prim.h
libcfs/include/libcfs/winnt/winnt-tcpip.h
libcfs/include/libcfs/winnt/winnt-time.h
libcfs/include/libcfs/winnt/winnt-types.h
libcfs/libcfs/darwin/darwin-tracefile.c
libcfs/libcfs/debug.c
libcfs/libcfs/hash.c
libcfs/libcfs/linux/linux-debug.c
libcfs/libcfs/linux/linux-fs.c
libcfs/libcfs/linux/linux-module.c
libcfs/libcfs/linux/linux-prim.c
libcfs/libcfs/linux/linux-proc.c
libcfs/libcfs/linux/linux-tcpip.c
libcfs/libcfs/linux/linux-tracefile.c
libcfs/libcfs/linux/linux-tracefile.h
libcfs/libcfs/lwt.c
libcfs/libcfs/module.c
libcfs/libcfs/nidstrings.c
libcfs/libcfs/posix/posix-debug.c
libcfs/libcfs/tracefile.c
libcfs/libcfs/tracefile.h
libcfs/libcfs/user-bitops.c
libcfs/libcfs/user-lock.c
libcfs/libcfs/user-prim.c
libcfs/libcfs/watchdog.c
libcfs/libcfs/winnt/winnt-curproc.c
libcfs/libcfs/winnt/winnt-fs.c
libcfs/libcfs/winnt/winnt-lock.c
libcfs/libcfs/winnt/winnt-mem.c
libcfs/libcfs/winnt/winnt-module.c
libcfs/libcfs/winnt/winnt-native.c
libcfs/libcfs/winnt/winnt-prim.c
libcfs/libcfs/winnt/winnt-proc.c
libcfs/libcfs/winnt/winnt-sync.c
libcfs/libcfs/winnt/winnt-tcpip.c
libcfs/libcfs/winnt/winnt-tracefile.c
libcfs/libcfs/winnt/winnt-tracefile.h
lnet/include/lnet/lib-lnet.h
lnet/include/lnet/lib-types.h
lnet/include/lnet/lnetst.h
lnet/klnds/gmlnd/gmlnd.h
lnet/klnds/gmlnd/gmlnd_api.c
lnet/klnds/gmlnd/gmlnd_cb.c
lnet/klnds/gmlnd/gmlnd_comm.c
lnet/klnds/gmlnd/gmlnd_utils.c
lnet/klnds/mxlnd/mxlnd.c
lnet/klnds/mxlnd/mxlnd.h
lnet/klnds/mxlnd/mxlnd_cb.c
lnet/klnds/o2iblnd/o2iblnd.c
lnet/klnds/o2iblnd/o2iblnd.h
lnet/klnds/o2iblnd/o2iblnd_cb.c
lnet/klnds/ptllnd/ptllnd.c
lnet/klnds/ptllnd/ptllnd.h
lnet/klnds/ptllnd/ptllnd_cb.c
lnet/klnds/ptllnd/ptllnd_peer.c
lnet/klnds/ptllnd/ptllnd_ptltrace.c
lnet/klnds/ptllnd/ptllnd_rx_buf.c
lnet/klnds/ptllnd/ptllnd_tx.c
lnet/klnds/ptllnd/wirecheck.c
lnet/klnds/qswlnd/qswlnd.c
lnet/klnds/qswlnd/qswlnd.h
lnet/klnds/qswlnd/qswlnd_cb.c
lnet/klnds/ralnd/ralnd.c
lnet/klnds/ralnd/ralnd.h
lnet/klnds/ralnd/ralnd_cb.c
lnet/klnds/socklnd/socklnd.c
lnet/klnds/socklnd/socklnd.h
lnet/klnds/socklnd/socklnd_cb.c
lnet/klnds/socklnd/socklnd_lib-linux.c
lnet/klnds/socklnd/socklnd_lib-winnt.c
lnet/klnds/socklnd/socklnd_lib-winnt.h
lnet/klnds/socklnd/socklnd_proto.c
lnet/lnet/acceptor.c
lnet/lnet/api-ni.c
lnet/lnet/config.c
lnet/lnet/lib-eq.c
lnet/lnet/lib-md.c
lnet/lnet/lib-me.c
lnet/lnet/lib-move.c
lnet/lnet/lib-msg.c
lnet/lnet/module.c
lnet/lnet/peer.c
lnet/lnet/router.c
lnet/lnet/router_proc.c
lnet/selftest/brw_test.c
lnet/selftest/conctl.c
lnet/selftest/conrpc.c
lnet/selftest/conrpc.h
lnet/selftest/console.c
lnet/selftest/console.h
lnet/selftest/framework.c
lnet/selftest/ping_test.c
lnet/selftest/rpc.c
lnet/selftest/selftest.h
lnet/selftest/timer.c
lnet/selftest/timer.h
lnet/selftest/winnt/selftest-winnt.c
lnet/selftest/workitem.c
lnet/ulnds/ptllnd/ptllnd.c
lnet/ulnds/ptllnd/ptllnd.h
lnet/ulnds/ptllnd/ptllnd_cb.c
lnet/ulnds/socklnd/conn.c
lnet/ulnds/socklnd/handlers.c
lnet/ulnds/socklnd/poll.c
lnet/ulnds/socklnd/usocklnd.c
lnet/ulnds/socklnd/usocklnd.h
lnet/ulnds/socklnd/usocklnd_cb.c
lnet/utils/debug.c
lnet/utils/lst.c
lnet/utils/portals.c
lustre/cmm/cmm_device.c
lustre/cmm/cmm_internal.h
lustre/cmm/cmm_object.c
lustre/cmm/cmm_split.c
lustre/cmm/mdc_device.c
lustre/cmm/mdc_internal.h
lustre/fid/fid_handler.c
lustre/fid/fid_request.c
lustre/fid/lproc_fid.c
lustre/fld/fld_cache.c
lustre/fld/fld_handler.c
lustre/fld/fld_internal.h
lustre/fld/fld_request.c
lustre/fld/lproc_fld.c
lustre/include/cl_object.h
lustre/include/dt_object.h
lustre/include/lclient.h
lustre/include/liblustre.h
lustre/include/linux/lprocfs_status.h
lustre/include/linux/lustre_compat25.h
lustre/include/linux/lustre_fsfilt.h
lustre/include/linux/lustre_handles.h
lustre/include/linux/lustre_lite.h
lustre/include/linux/lustre_patchless_compat.h
lustre/include/linux/lvfs.h
lustre/include/linux/lvfs_linux.h
lustre/include/linux/obd.h
lustre/include/lprocfs_status.h
lustre/include/lu_object.h
lustre/include/lu_ref.h
lustre/include/lu_target.h
lustre/include/lustre/lustre_user.h
lustre/include/lustre_capa.h
lustre/include/lustre_cfg.h
lustre/include/lustre_disk.h
lustre/include/lustre_dlm.h
lustre/include/lustre_export.h
lustre/include/lustre_fid.h
lustre/include/lustre_fld.h
lustre/include/lustre_handles.h
lustre/include/lustre_idmap.h
lustre/include/lustre_import.h
lustre/include/lustre_lib.h
lustre/include/lustre_lite.h
lustre/include/lustre_log.h
lustre/include/lustre_mdc.h
lustre/include/lustre_net.h
lustre/include/lustre_quota.h
lustre/include/lustre_sec.h
lustre/include/lustre_ucache.h
lustre/include/md_object.h
lustre/include/obd.h
lustre/include/obd_class.h
lustre/include/obd_ost.h
lustre/include/obd_support.h
lustre/lclient/lcommon_cl.c
lustre/lclient/lcommon_misc.c
lustre/ldlm/l_lock.c
lustre/ldlm/ldlm_extent.c
lustre/ldlm/ldlm_flock.c
lustre/ldlm/ldlm_inodebits.c
lustre/ldlm/ldlm_internal.h
lustre/ldlm/ldlm_lib.c
lustre/ldlm/ldlm_lock.c
lustre/ldlm/ldlm_lockd.c
lustre/ldlm/ldlm_plain.c
lustre/ldlm/ldlm_pool.c
lustre/ldlm/ldlm_request.c
lustre/ldlm/ldlm_resource.c
lustre/liblustre/dir.c
lustre/liblustre/file.c
lustre/liblustre/llite_lib.c
lustre/liblustre/llite_lib.h
lustre/liblustre/lutil.c
lustre/liblustre/namei.c
lustre/liblustre/rw.c
lustre/liblustre/super.c
lustre/llite/dcache.c
lustre/llite/dir.c
lustre/llite/file.c
lustre/llite/llite_capa.c
lustre/llite/llite_close.c
lustre/llite/llite_internal.h
lustre/llite/llite_lib.c
lustre/llite/llite_mmap.c
lustre/llite/llite_rmtacl.c
lustre/llite/lloop.c
lustre/llite/lproc_llite.c
lustre/llite/namei.c
lustre/llite/remote_perm.c
lustre/llite/rw.c
lustre/llite/statahead.c
lustre/llite/super25.c
lustre/llite/vvp_dev.c
lustre/llite/vvp_io.c
lustre/llite/vvp_lock.c
lustre/llite/vvp_object.c
lustre/llite/xattr.c
lustre/lmv/lmv_internal.h
lustre/lmv/lmv_obd.c
lustre/lmv/lmv_object.c
lustre/lmv/lproc_lmv.c
lustre/lov/lov_cl_internal.h
lustre/lov/lov_dev.c
lustre/lov/lov_internal.h
lustre/lov/lov_io.c
lustre/lov/lov_lock.c
lustre/lov/lov_obd.c
lustre/lov/lov_object.c
lustre/lov/lov_pack.c
lustre/lov/lov_pool.c
lustre/lov/lov_qos.c
lustre/lov/lov_request.c
lustre/lov/lovsub_lock.c
lustre/lvfs/fsfilt.c
lustre/lvfs/fsfilt_ext3.c
lustre/lvfs/fsfilt_reiserfs.c
lustre/lvfs/lustre_quota_fmt.c
lustre/lvfs/lustre_quota_fmt.h
lustre/lvfs/lvfs_lib.c
lustre/lvfs/lvfs_linux.c
lustre/lvfs/prng.c
lustre/lvfs/quotafmt_test.c
lustre/lvfs/upcall_cache.c
lustre/mdc/lproc_mdc.c
lustre/mdc/mdc_internal.h
lustre/mdc/mdc_lib.c
lustre/mdc/mdc_locks.c
lustre/mdc/mdc_reint.c
lustre/mdc/mdc_request.c
lustre/mdd/mdd_device.c
lustre/mdd/mdd_internal.h
lustre/mdd/mdd_lock.c
lustre/mdd/mdd_lov.c
lustre/mdd/mdd_lproc.c
lustre/mdd/mdd_object.c
lustre/mds/handler.c
lustre/mds/lproc_mds.c
lustre/mds/mds_lov.c
lustre/mdt/mdt_capa.c
lustre/mdt/mdt_handler.c
lustre/mdt/mdt_identity.c
lustre/mdt/mdt_idmap.c
lustre/mdt/mdt_internal.h
lustre/mdt/mdt_lib.c
lustre/mdt/mdt_lproc.c
lustre/mdt/mdt_open.c
lustre/mdt/mdt_recovery.c
lustre/mdt/mdt_reint.c
lustre/mgc/mgc_request.c
lustre/mgs/lproc_mgs.c
lustre/mgs/mgs_fs.c
lustre/mgs/mgs_handler.c
lustre/mgs/mgs_internal.h
lustre/mgs/mgs_llog.c
lustre/obdclass/capa.c
lustre/obdclass/cl_io.c
lustre/obdclass/cl_lock.c
lustre/obdclass/cl_object.c
lustre/obdclass/cl_page.c
lustre/obdclass/class_obd.c
lustre/obdclass/dt_object.c
lustre/obdclass/genops.c
lustre/obdclass/idmap.c
lustre/obdclass/linux/linux-module.c
lustre/obdclass/linux/linux-obdo.c
lustre/obdclass/linux/linux-sysctl.c
lustre/obdclass/llog.c
lustre/obdclass/llog_cat.c
lustre/obdclass/llog_internal.h
lustre/obdclass/llog_ioctl.c
lustre/obdclass/llog_obd.c
lustre/obdclass/lprocfs_status.c
lustre/obdclass/lu_object.c
lustre/obdclass/lu_ref.c
lustre/obdclass/lu_time.c
lustre/obdclass/lustre_handles.c
lustre/obdclass/lustre_peer.c
lustre/obdclass/md_local_object.c
lustre/obdclass/obd_config.c
lustre/obdclass/obd_mount.c
lustre/obdclass/obdo.c
lustre/obdclass/statfs_pack.c
lustre/obdecho/echo.c
lustre/obdecho/echo_client.c
lustre/obdfilter/filter.c
lustre/obdfilter/filter_capa.c
lustre/obdfilter/filter_internal.h
lustre/obdfilter/filter_io.c
lustre/obdfilter/filter_io_26.c
lustre/obdfilter/filter_log.c
lustre/obdfilter/filter_lvb.c
lustre/obdfilter/lproc_obdfilter.c
lustre/osc/lproc_osc.c
lustre/osc/osc_cl_internal.h
lustre/osc/osc_create.c
lustre/osc/osc_dev.c
lustre/osc/osc_internal.h
lustre/osc/osc_io.c
lustre/osc/osc_lock.c
lustre/osc/osc_object.c
lustre/osc/osc_page.c
lustre/osc/osc_request.c
lustre/osd/osd_handler.c
lustre/osd/osd_iam.c
lustre/osd/osd_iam.h
lustre/osd/osd_iam_lfix.c
lustre/osd/osd_iam_lvar.c
lustre/osd/osd_internal.h
lustre/osd/osd_oi.c
lustre/ost/ost_handler.c
lustre/ptlrpc/client.c
lustre/ptlrpc/connection.c
lustre/ptlrpc/events.c
lustre/ptlrpc/gss/gss_api.h
lustre/ptlrpc/gss/gss_cli_upcall.c
lustre/ptlrpc/gss/gss_internal.h
lustre/ptlrpc/gss/gss_keyring.c
lustre/ptlrpc/gss/gss_krb5_mech.c
lustre/ptlrpc/gss/gss_mech_switch.c
lustre/ptlrpc/gss/gss_pipefs.c
lustre/ptlrpc/gss/gss_rawobj.c
lustre/ptlrpc/gss/gss_svc_upcall.c
lustre/ptlrpc/gss/lproc_gss.c
lustre/ptlrpc/gss/sec_gss.c
lustre/ptlrpc/import.c
lustre/ptlrpc/layout.c
lustre/ptlrpc/llog_client.c
lustre/ptlrpc/llog_net.c
lustre/ptlrpc/llog_server.c
lustre/ptlrpc/lproc_ptlrpc.c
lustre/ptlrpc/niobuf.c
lustre/ptlrpc/pack_generic.c
lustre/ptlrpc/pinger.c
lustre/ptlrpc/ptlrpc_module.c
lustre/ptlrpc/ptlrpcd.c
lustre/ptlrpc/recov_thread.c
lustre/ptlrpc/recover.c
lustre/ptlrpc/sec.c
lustre/ptlrpc/sec_bulk.c
lustre/ptlrpc/sec_config.c
lustre/ptlrpc/sec_gc.c
lustre/ptlrpc/sec_lproc.c
lustre/ptlrpc/sec_null.c
lustre/ptlrpc/sec_plain.c
lustre/ptlrpc/service.c
lustre/ptlrpc/target.c
lustre/quota/lproc_quota.c
lustre/quota/quota_adjust_qunit.c
lustre/quota/quota_check.c
lustre/quota/quota_context.c
lustre/quota/quota_ctl.c
lustre/quota/quota_interface.c
lustre/quota/quota_internal.h
lustre/quota/quota_master.c
lustre/tests/it_test.c
lustre/utils/obd.c

diff --git a/build/nn-check.py b/build/nn-check.py
new file mode 100755 (executable)
index 0000000..12d28aa
--- /dev/null
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+
+# This script is for checking that patches don't introduce non-portable symbols
+# into the Lustre/LNET/libcfs code.
+#
+# Input:
+# 1. (Required) Filename (including path) of the diff file to be checked
+# 2. (Optional) path to the nn-final-symbol-list.txt file (By default, this
+#    script looks for nn-final-symbol-list.txt in the current working
+#    directory.)
+#
+# Output:
+# The output of this script is either PASS or FAIL (with WARNINGS).
+# FAIL means that there may have been symbols found that are not supposed
+# to be used.  This requires the person running the script to look into the
+# WARNINGS that are in the output to determine if there is a problem.
+
+# Author: lisa.week@sun.com
+
+import string
+import re
+import sys
+import optparse
+import os.path
+import fileinput
+
+# Setup command line options for nn-check.py
+from optparse import OptionParser
+usage = "%prog DIFF-FILE [options]"
+parser = OptionParser(usage)
+parser.add_option("-s", "--symb", action="store", dest="symb_pathname",
+                 help="(Optional) PATH to nn-final-symbol-list.txt file",
+                 metavar="PATH")
+
+(options, args) = parser.parse_args()
+
+# Check if we have the minimum number of arguments supplied. 
+if len(args) < 1:
+       parser.error("Incorrect number of arguments, see nn-check -h for help.")
+
+# Check if we were passed a path to the nn-final-symbol-list.txt file
+if options.symb_pathname:
+       symb_file = os.path.join(options.symb_pathname,
+                                 'nn-final-symbol-list.txt')
+else:
+       symb_file = 'nn-final-symbol-list.txt'
+
+# Global Variables
+bad_symbol_cnt = 0
+symbol_dict = dict() 
+
+# Function Definitions
+def search_symbol(line, linenum):
+       global bad_symbol_cnt
+
+       for key, val in symbol_dict.items():
+               regex_match = val.search(line)
+
+               if regex_match:
+                       print_symbol = regex_match.group(0)
+                       print 'WARNING: Found %s at line %d:' \
+                               % (print_symbol, linenum)
+                       print '%s' % line.rstrip()
+                       bad_symbol_cnt += 1
+
+# The main portion of the script
+print '================='
+print 'Starting nn-check'
+print '================='
+
+# Open the nn-final-symbol-list.txt file and pull in the symbols to check into
+# a dictionary object.
+try:
+       f = fileinput.input(symb_file)
+except IOError:
+       print 'nn-check.py: error: %s not found.' % symb_file
+       print 'Is nn-final-symbol-list.txt is in your current working directory'
+       print 'or have you have passed nn-check.py a valid path to the file?'
+       sys.exit(1)
+
+
+for line in f:
+       stripped_symbol = line.rstrip()
+       symbol_dict[stripped_symbol] = re.compile(stripped_symbol)
+
+# Close nn-final-symbol-list.txt
+f.close()
+
+# Open the diff file passed to the script and parse it for the symbols from
+# nn-final-symbol-list.txt
+try:
+       f = fileinput.input(sys.argv[1])
+except IOError:
+       print 'nn-check.py: error: %s not found.' % sys.argv[1] 
+       print 'Check the path provided for the diff file.'
+       sys.exit(1)
+
+index = re.compile(r'^\+\+\+ b/(.*)')
+plus = re.compile(r'^\+')
+for line in f:
+       # Check for the "diff --cc " delimiter in order to grab the file name.
+       index_match = index.match(line)
+
+       if index_match:
+               # Store the file name
+               filename=index_match.group(1)
+               print '--> Checking File: %s' % filename
+       else:
+               # Check if the line starts with a "+" character.
+               plus_match = plus.match(line)
+               if plus_match:
+                       # The line starts with a "+" character.  Look for
+                       # non-portable symbols
+                       search_symbol(line, f.lineno())
+               else:
+                       continue
+
+# Close the diff file
+f.close()
+
+# Finish up and print the results of the script (i.e. total number of
+# bad symbols found)
+if bad_symbol_cnt != 0:
+       print '=============================='
+       print 'Finished nn-check status: FAIL'
+       print '=============================='
+       print 'Found %d potential problem(s).  See "WARNINGS" from script output and refer to https://wikis.lustre.org/intra/index.php/Lustre_Name_Normalization for the complete set of rules to make sure you have not used a non-portable symbol.' % bad_symbol_cnt
+else:
+       print '=============================='
+       print 'Finished nn-check status: PASS'
+       print '=============================='
diff --git a/build/nn-final-symbol-list.txt b/build/nn-final-symbol-list.txt
new file mode 100644 (file)
index 0000000..e2aa87e
--- /dev/null
@@ -0,0 +1,334 @@
+\blist_head\b
+\bspin_unlock\b
+\bspin_lock\b
+\blist_empty\b
+\batomic_read\b
+\blist_entry\b
+\blist_add_tail\b
+\blist_del_init\b
+\batomic_inc\b
+\blist_for_each\b
+\bspinlock_t\b
+\blist_add\b
+\bspin_unlock_irqrestore\b
+\batomic_t\b
+\bup\b
+\bspin_lock_irqsave\b
+\bspin_lock_init\b
+\blist_for_each_entry\b
+\bwrite_unlock_irqrestore\b
+\batomic_set\b
+\bdown\b
+\bhlist_node\b
+\bHZ\b
+\batomic_dec\b
+\bcompletion\b
+\blist_for_each_safe\b
+\bin_interrupt\b
+\blist_for_each_entry_safe\b
+\bmutex_up\b
+\bwrite_lock_irqsave\b
+\bcopy_from_user\b
+\bcopy_to_user\b
+\batomic_dec_and_test\b
+\bmutex_down\b
+\bspin_unlock_bh\b
+\bsemaphore\b
+\bread_unlock\b
+\btest_bit\b
+\bup_write\b
+\bsize_round\b
+\bread_lock\b
+\bread_unlock_irqrestore\b
+\bset_current_state\b
+\bhlist_head\b
+\bspin_lock_bh\b
+\bdo_gettimeofday\b
+\bgroup_info\b
+\bset_bit\b
+\bdown_write\b
+\bup_read\b
+\bread_lock_irqsave\b
+\bwrite_unlock\b
+\bwrite_lock\b
+\brwlock_t\b
+\bhlist_entry\b
+\bmutex_unlock\b
+\bdown_read\b
+\bnum_physpages\b
+\bmutex\b
+\bsema_init\b
+\bmutex_lock\b
+\bclear_bit\b
+\bmb\b
+\bATOMIC_INIT\b
+\btime_after_eq\b
+\blist_splice_init\b
+\bcomplete\b
+\bkstatfs\b
+\bwait_for_completion\b
+\bnum_online_cpus\b
+\bhlist_unhashed\b
+\bLIST_HEAD\b
+\blist_for_each_entry_reverse\b
+\bSPIN_LOCK_UNLOCKED\b
+\binit_completion\b
+\bmight_sleep\b
+\brwlock_init\b
+\bkernel_thread\b
+\bhlist_add_head\b
+\blist_move\b
+\bunlock_kernel\b
+\bschedule_timeout\b
+\brw_semaphore\b
+\bmodule\b
+\bhlist_del_init\b
+\batomic_inc_return\b
+\btime_after\b
+\bmodule_put\b
+\binit_mutex\b
+\bget_random_bytes\b
+\bin_group_p\b
+\btime_before\b
+\bumode_t\b
+\binit_rwsem\b
+\bhlist_for_each_entry_safe\b
+\bmutex_init\b
+\block_kernel\b
+\btry_module_get\b
+\bCURRENT_TIME\b
+\brequest_module\b
+\block_class_key\b
+\bhlist_empty\b
+\bhlist_for_each_entry\b
+\bnum_possible_cpus\b
+\blist_splice\b
+\bour_cond_resched\b
+\bshrinker\b
+\bspin_unlock_irq\b
+\btest_and_set_bit\b
+\bDECLARE_MUTEX\b
+\bINIT_HLIST_NODE\b
+\bdown_write_nested\b
+\bspin_lock_irq\b
+\bsize_round4\b
+\bwait_event\b
+\bINIT_HLIST_HEAD\b
+\bMAX_SCHEDULE_TIMEOUT\b
+\bSLAB_HWCACHE_ALIGN\b
+\bcdebug_show\b
+\bcycles_t\b
+\bgroups_free\b
+\bDEFAULT_SEEKS\b
+\bGET_TIMEOUT\b
+\bremove_shrinker\b
+\bset_shrinker\b
+\batomic_sub\b
+\bgroups_alloc\b
+\bhlist_for_each\b
+\bhlist_for_each_safe\b
+\bNR_IRQS\b
+\bhlist_del\b
+\batomic_add_return\b
+\binit_MUTEX_LOCKED\b
+\binit_mutex_locked\b
+\blist_for_each_prev\b
+\bcpu_online\b
+\binit_MUTEX\b
+\bFREE_BITMAP\b
+\bL1_CACHE_ALIGN\b
+\batomic_dec_and_lock\b
+\bfind_first_zero_bit\b
+\bmutex_trylock\b
+\bHLIST_HEAD\b
+\batomic_dec_return\b
+\bcond_resched\b
+\bhash_long\b
+\bmutex_is_locked\b
+\bdown_read_nested\b
+\bmutex_lock_nested\b
+\bwait_event_interruptible_exclusive\b
+\bwait_event_interruptible\b
+\batomic_add\b
+\bCHECK_STACK\b
+\bfor_each_possible_cpu\b
+\bALLOCATE_BITMAP\b
+\bDEFINE_MUTEX\b
+\blist_empty_careful\b
+\bwrite_lock_bh\b
+\bwrite_unlock_bh\b
+\bTHREAD_SIZE\b
+\blist_for_each_entry_safe_from\b
+\bshrinker_t\b
+\bwait_for_completion_interruptible\b
+\bmutex_destroy\b
+\bdown_read_trylock\b
+\bdown_write_trylock\b
+\bfind_next_zero_bit\b
+\bspin_lock_nested\b
+\bspin_trylock\b
+\bbitmap_t\b
+\bsmp_processor_id\b
+\btracefile\b
+\btracefile_sem\b
+\bKERN_ERR\b
+\bDECLARE_COMPLETION\b
+\bhlist_add_after\b
+\bhlist_add_before\b
+\bhlist_for_each_entry_continue\b
+\bhlist_for_each_entry_from\b
+\bINIT_COMPLETION\b
+\bround_strlen\b
+\bRW_LOCK_UNLOCKED\b
+\bsize_round0\b
+\bsize_round16\b
+\bsize_round32\b
+\blist_t\b
+\bmutex_t\b
+\bCOMPLETION_INITIALIZER\b
+\bHLIST_HEAD_INIT\b
+\btime_before_eq\b
+\bspin_is_locked\b
+\btrace_daemon_command_usrstr\b
+\btrace_debug_print\b
+\btrace_dump_debug_buffer_usrstr\b
+\btrace_refill_stock\b
+\btrace_set_debug_mb_usrstr\b
+\bfind_first_bit\b
+\b__list_splice\b
+\btrace_assertion_failed\b
+\btracefile_exit\b
+\btracefile_init\b
+\btrace_flush_pages\b
+\btrace_start_thread\b
+\b__list_add\b
+\btcd_owns_tage\b
+\bKERN_ALERT\b
+\bmutex_down_trylock\b
+\bspin_lock_bh_init\b
+\btrace_get_debug_mb\b
+\btage_allocated\b
+\btrace_daemon_command\b
+\btrace_set_debug_mb\b
+\bfind_next_bit\b
+\btrace_stop_thread\b
+\btracefile_init_arch\b
+\btrace_get_tcd\b
+\btrace_lock_tcd\b
+\btrace_unlock_tcd\b
+\btrace_copyout_string\b
+\btracefile_dump_all_pages\b
+\b__list_del\b
+\bKERN_EMERG\b
+\btracefile_fini_arch\b
+\btrace_get_console_buffer\b
+\btrace_max_debug_mb\b
+\btrace_put_console_buffer\b
+\btrace_copyin_string\b
+\btracefile_read_lock\b
+\btracefile_read_unlock\b
+\btrace_allocate_string_buffer\b
+\btrace_free_string_buffer\b
+\bdebug_file_path_arr\b
+\btrace_thread_sem\b
+\btracefile_write_lock\b
+\btracefile_write_unlock\b
+\btracefile_size\b
+\bprint_to_console\b
+\btrace_put_tcd\b
+\bdebug_file_path\b
+\bfini_rwsem\b
+\bKERN_WARNING\b
+\bcpumask_t\b
+\bcpus_empty\b
+\bfor_each_cpu_mask\b
+\bcpu_set\b
+\bcpus_weight\b
+\bset_cpus_allowed\b
+\bnodemask_t\b
+\blist_del\b
+\blist_move_tail\b
+\b__hlist_del\b
+\brwlock_fini\b
+\batomic_sub_return\b
+\batomic_inc_and_test\b
+\batomic_sub_and_test\b
+\bcall_wait_handler\b
+\binit_completion_module\b
+\bSLAB_DESTROY_BY_RCU\b
+\bSLAB_KERNEL\b
+\bSLAB_NOFS\b
+\bTASK_INTERRUPTIBLE\b
+\bTASK_RUNNING\b
+\bTRACEFILE_SIZE\b
+\btrace_cleanup\b
+\btrace_page\b
+\btrace_get_tage\b
+\btrace_cpu_data\b
+\btrace_get_tage_try\b
+\btrace_data\b
+\btage_from_list\b
+\btage_alloc\b
+\btage_free\b
+\btage_to_tail\b
+\btcd_shrink\b
+\btcd_for_each\b
+\btcd_for_each_type_lock\b
+\bschedule_timeout_interruptible\b
+\bINIT_LIST_HEAD\b
+\b__fls\b
+\bfls\b
+\b__flz\b
+\bflz\b
+\b__ffs\b
+\bffs\b
+\b__ffz\b
+\bffz\b
+\bDEBUG_FILE_PATH_DEFAULT\b
+\btrace_data_union\b
+\bKERN_CRIT\b
+\bKERN_NOTICE\b
+\bKERN_INFO\b
+\bKERN_DEBUG\b
+\bput_group_info\b
+\bget_group_info\b
+\bcleanup_group_info\b
+\bset_current_groups\b
+\btest_and_clear_bit\b
+\btrace_console_buffers\b
+\bset_ptldebug_header\b
+\btrace_buf_idx_get\b
+\btrace_buf_type_t\b
+\bTCD_TYPE_PROC\b
+\bTCD_TYPE_SOFTIRQ\b
+\bTCD_TYPE_IRQ\b
+\bTCD_TYPE_MAX\b
+\bTCD_TYPE_PASSIVE\b
+\bTCD_TYPE_DISPATCH\b
+\brcu_head\b
+\blockdep_on\b
+\blockdep_off\b
+\blockdep_set_class\b
+\b__module_get\b
+\bmodule_refcount\b
+\bNR_CPUS\b
+\bTRACE_CONSOLE_BUFFER_SIZE\b
+\bcomplete_and_wait\b
+\batomic_add_unless\b
+\batomic_inc_not_zero\b
+\bschedule\b
+\bcomplete_and_exit\b
+\binit_waitqueue_head\b
+\binit_waitqueue_entry\b
+\badd_wait_queue\b
+\badd_wait_queue_exclusive\b
+\bremove_wait_queue\b
+\bwaitqueue_active\b
+\bwake_up\b
+\bwake_up_nr\b
+\bwake_up_all\b
+\bwait_queue_head_t\b
+\bwait_queue_t\b
+\bDECLARE_RWSEM\b
+\bCFS_DECL_RWSEM\b
+\blist_for_each_entry_continue\b
index a593701..9f54a73 100644 (file)
 typedef struct {
         int             size;
         unsigned long   data[0];
-} bitmap_t;
+} cfs_bitmap_t;
 
 #define CFS_BITMAP_SIZE(nbits) \
-     (((nbits/BITS_PER_LONG)+1)*sizeof(long)+sizeof(bitmap_t))
+     (((nbits/BITS_PER_LONG)+1)*sizeof(long)+sizeof(cfs_bitmap_t))
 
 static inline
-bitmap_t *ALLOCATE_BITMAP(int size)
+cfs_bitmap_t *CFS_ALLOCATE_BITMAP(int size)
 {
-        bitmap_t *ptr;
+        cfs_bitmap_t *ptr;
 
         OBD_ALLOC(ptr, CFS_BITMAP_SIZE(size));
         if (ptr == NULL)
@@ -59,42 +59,42 @@ bitmap_t *ALLOCATE_BITMAP(int size)
         RETURN (ptr);
 }
 
-#define FREE_BITMAP(ptr)        OBD_FREE(ptr, CFS_BITMAP_SIZE(ptr->size))
+#define CFS_FREE_BITMAP(ptr)        OBD_FREE(ptr, CFS_BITMAP_SIZE(ptr->size))
 
 static inline
-void cfs_bitmap_set(bitmap_t *bitmap, int nbit)
+void cfs_bitmap_set(cfs_bitmap_t *bitmap, int nbit)
 {
-        set_bit(nbit, bitmap->data);
+        cfs_set_bit(nbit, bitmap->data);
 }
 
 static inline
-void cfs_bitmap_clear(bitmap_t *bitmap, int nbit)
+void cfs_bitmap_clear(cfs_bitmap_t *bitmap, int nbit)
 {
-        test_and_clear_bit(nbit, bitmap->data);
+        cfs_test_and_clear_bit(nbit, bitmap->data);
 }
 
 static inline
-int cfs_bitmap_check(bitmap_t *bitmap, int nbit)
+int cfs_bitmap_check(cfs_bitmap_t *bitmap, int nbit)
 {
-        return test_bit(nbit, bitmap->data);
+        return cfs_test_bit(nbit, bitmap->data);
 }
 
 static inline
-int cfs_bitmap_test_and_clear(bitmap_t *bitmap, int nbit)
+int cfs_bitmap_test_and_clear(cfs_bitmap_t *bitmap, int nbit)
 {
-        return test_and_clear_bit(nbit, bitmap->data);
+        return cfs_test_and_clear_bit(nbit, bitmap->data);
 }
 
 /* return 0 is bitmap has none set bits */
 static inline
-int cfs_bitmap_check_empty(bitmap_t *bitmap)
+int cfs_bitmap_check_empty(cfs_bitmap_t *bitmap)
 {
-        return find_first_bit(bitmap->data, bitmap->size) == bitmap->size;
+        return cfs_find_first_bit(bitmap->data, bitmap->size) == bitmap->size;
 }
 
 #define cfs_foreach_bit(bitmap, pos) \
-       for((pos)=find_first_bit((bitmap)->data, bitmap->size);   \
+       for((pos)=cfs_find_first_bit((bitmap)->data, bitmap->size);   \
             (pos) < (bitmap)->size;                               \
-            (pos) = find_next_bit((bitmap)->data, (bitmap)->size, (pos)))
+            (pos) = cfs_find_next_bit((bitmap)->data, (bitmap)->size, (pos)))
 
 #endif
index 6ba53d3..bedc82f 100644 (file)
@@ -103,14 +103,8 @@ static inline int __is_po2(unsigned long long val)
 
 #include <libcfs/list.h>
 
-/* for_each_possible_cpu is defined newly, the former is
- * for_each_cpu(eg. sles9 and sles10) b=15878 */
-#ifndef for_each_possible_cpu
-# ifdef for_each_cpu
-#  define for_each_possible_cpu(cpu) for_each_cpu(cpu)
-# else
-#  error for_each_possible_cpu is not supported by kernel!
-# endif
+#ifndef cfs_for_each_possible_cpu
+#  error cfs_for_each_possible_cpu is not supported by kernel!
 #endif
 
 /* libcfs tcpip */
@@ -141,7 +135,7 @@ struct lc_watchdog *lc_watchdog_add(int time,
 
 /* Enables a watchdog and resets its timer. */
 void lc_watchdog_touch(struct lc_watchdog *lcw, int timeout);
-#define GET_TIMEOUT(svc) (max_t(int, obd_timeout,                       \
+#define CFS_GET_TIMEOUT(svc) (max_t(int, obd_timeout,                   \
                           AT_OFF ? 0 : at_get(&svc->srv_at_estimate)) * \
                           svc->srv_watchdog_factor)
 
index 18cd208..1cc8844 100644 (file)
@@ -55,10 +55,7 @@ extern cfs_duration_t libcfs_console_max_delay;
 extern cfs_duration_t libcfs_console_min_delay;
 extern unsigned int libcfs_console_backoff;
 extern unsigned int libcfs_debug_binary;
-extern char debug_file_path_arr[1024];
-#ifdef __KERNEL__
-extern char *debug_file_path;
-#endif
+extern char libcfs_debug_file_path_arr[1024];
 
 int libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys);
 int libcfs_debug_str2mask(int *mask, const char *str, int is_subsys);
@@ -177,7 +174,7 @@ typedef struct {
 /**
  * Filters out logging messages based on mask and subsystem.
  */
-static inline int cdebug_show(unsigned int mask, unsigned int subsystem)
+static inline int cfs_cdebug_show(unsigned int mask, unsigned int subsystem)
 {
         return mask & D_CANTMASK ||
                 ((libcfs_debug & mask) && (libcfs_subsystem_debug & subsystem));
@@ -185,9 +182,9 @@ static inline int cdebug_show(unsigned int mask, unsigned int subsystem)
 
 #define __CDEBUG(cdls, mask, format, ...)                               \
 do {                                                                    \
-        CHECK_STACK();                                                  \
+        CFS_CHECK_STACK();                                              \
                                                                         \
-        if (cdebug_show(mask, DEBUG_SUBSYSTEM))                         \
+        if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM))                     \
                 libcfs_debug_msg(cdls, DEBUG_SUBSYSTEM, mask,           \
                                  __FILE__, __FUNCTION__, __LINE__,      \
                                  format, ## __VA_ARGS__);               \
@@ -203,7 +200,7 @@ do {                                            \
 } while (0)
 
 #else /* !CDEBUG_ENABLED */
-static inline int cdebug_show(unsigned int mask, unsigned int subsystem)
+static inline int cfs_cdebug_show(unsigned int mask, unsigned int subsystem)
 {
         return 0;
 }
@@ -335,17 +332,17 @@ extern int libcfs_debug_vmsg2(cfs_debug_limit_state_t *cdls,
     libcfs_debug_vmsg2(cdls, subsys, mask, file, fn,line,NULL,NULL,format, ## __VA_ARGS__)
 
 #define cdebug_va(cdls, mask, file, func, line, fmt, args)      do {          \
-        CHECK_STACK();                                                        \
+        CFS_CHECK_STACK();                                                    \
                                                                               \
-        if (cdebug_show(mask, DEBUG_SUBSYSTEM))                               \
+        if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM))                           \
                 libcfs_debug_vmsg(cdls, DEBUG_SUBSYSTEM, (mask),              \
                                   (file), (func), (line), fmt, args);         \
 } while(0)
 
 #define cdebug(cdls, mask, file, func, line, fmt, ...) do {                   \
-        CHECK_STACK();                                                        \
+        CFS_CHECK_STACK();                                                    \
                                                                               \
-        if (cdebug_show(mask, DEBUG_SUBSYSTEM))                               \
+        if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM))                           \
                 libcfs_debug_msg(cdls, DEBUG_SUBSYSTEM, (mask),               \
                                  (file), (func), (line), fmt, ## __VA_ARGS__);\
 } while(0)
@@ -354,18 +351,18 @@ extern void libcfs_assertion_failed(const char *expr, const char *file,
                                     const char *fn, const int line);
 
 /* one more external symbol that tracefile provides: */
-extern int trace_copyout_string(char *usr_buffer, int usr_buffer_nob,
-                                const char *knl_buffer, char *append);
+extern int cfs_trace_copyout_string(char *usr_buffer, int usr_buffer_nob,
+                                    const char *knl_buffer, char *append);
 
 
 #if defined(HAVE_BGL_SUPPORT)
-#define DEBUG_FILE_PATH_DEFAULT "/bgl/ion/tmp/lustre-log"
+#define LIBCFS_DEBUG_FILE_PATH_DEFAULT "/bgl/ion/tmp/lustre-log"
 #elif defined(__arch_um__)
-#define DEBUG_FILE_PATH_DEFAULT "/r/tmp/lustre-log"
+#define LIBCFS_DEBUG_FILE_PATH_DEFAULT "/r/tmp/lustre-log"
 #elif defined(__WINNT__)
-#define DEBUG_FILE_PATH_DEFAULT "\\SystemRoot\\temp\\lustre-log"
+#define LIBCFS_DEBUG_FILE_PATH_DEFAULT "\\SystemRoot\\temp\\lustre-log"
 #else
-#define DEBUG_FILE_PATH_DEFAULT "/tmp/lustre-log"
+#define LIBCFS_DEBUG_FILE_PATH_DEFAULT "/tmp/lustre-log"
 #endif
 
 #endif /* __LIBCFS_DEBUG_H__ */
index bab504a..769854f 100644 (file)
@@ -65,6 +65,8 @@
 
 #if (defined __linux__ && defined __KERNEL__)
 #include <linux/hash.h>
+
+#define cfs_hash_long(val, bits)    hash_long(val, bits)
 #else
 /* Fast hashing routine for a long.
    (C) 2002 William Lee Irwin III, IBM */
@@ -79,7 +81,7 @@
 #error Define CFS_GOLDEN_RATIO_PRIME for your wordsize.
 #endif
 
-static inline unsigned long hash_long(unsigned long val, unsigned int bits)
+static inline unsigned long cfs_hash_long(unsigned long val, unsigned int bits)
 {
        unsigned long hash = val;
 
@@ -109,7 +111,7 @@ static inline unsigned long hash_long(unsigned long val, unsigned int bits)
 #if 0
 static inline unsigned long hash_ptr(void *ptr, unsigned int bits)
 {
-       return hash_long((unsigned long)ptr, bits);
+       return cfs_hash_long((unsigned long)ptr, bits);
 }
 #endif
 
@@ -119,9 +121,9 @@ static inline unsigned long hash_ptr(void *ptr, unsigned int bits)
 struct cfs_hash_ops;
 
 typedef struct cfs_hash_bucket {
-        struct hlist_head           hsb_head;       /* entries list */
-        atomic_t                    hsb_count;      /* current entries */
-        rwlock_t                    hsb_rwlock;     /* cfs_hash_bucket */
+        cfs_hlist_head_t            hsb_head;       /* entries list */
+        cfs_atomic_t                hsb_count;      /* current entries */
+        cfs_rwlock_t                hsb_rwlock;     /* cfs_hash_bucket */
 } cfs_hash_bucket_t;
 
 #define CFS_MAX_HASH_NAME 16
@@ -134,21 +136,21 @@ typedef struct cfs_hash {
         int                         hs_min_theta;   /* resize min threshold */
         int                         hs_max_theta;   /* resize max threshold */
         int                         hs_flags;       /* hash flags */
-        atomic_t                    hs_count;       /* current entries */
-        atomic_t                    hs_rehash_count;/* resize count */
+        cfs_atomic_t                hs_count;       /* current entries */
+        cfs_atomic_t                hs_rehash_count;/* resize count */
         struct cfs_hash_bucket    **hs_buckets;     /* hash buckets */
         struct cfs_hash_ops        *hs_ops;         /* hash operations */
-        rwlock_t                    hs_rwlock;      /* cfs_hash */
+        cfs_rwlock_t                hs_rwlock;      /* cfs_hash */
         char                        hs_name[CFS_MAX_HASH_NAME];
 } cfs_hash_t;
 
 typedef struct cfs_hash_ops {
         unsigned (*hs_hash)(cfs_hash_t *hs, void *key, unsigned mask);
-        void *   (*hs_key)(struct hlist_node *hnode);
-        int      (*hs_compare)(void *key, struct hlist_node *hnode);
-        void *   (*hs_get)(struct hlist_node *hnode);
-        void *   (*hs_put)(struct hlist_node *hnode);
-        void     (*hs_exit)(struct hlist_node *hnode);
+        void *   (*hs_key)(cfs_hlist_node_t *hnode);
+        int      (*hs_compare)(void *key, cfs_hlist_node_t *hnode);
+        void *   (*hs_get)(cfs_hlist_node_t *hnode);
+        void *   (*hs_put)(cfs_hlist_node_t *hnode);
+        void     (*hs_exit)(cfs_hlist_node_t *hnode);
 } cfs_hash_ops_t;
 
 #define CFS_HASH_DEBUG          0x0001  /* Enable expensive debug checks */
@@ -168,7 +170,7 @@ cfs_hash_id(cfs_hash_t *hs, void *key, unsigned mask)
 }
 
 static inline void *
-cfs_hash_key(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_key(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         LASSERT(hs);
         LASSERT(hnode);
@@ -190,7 +192,7 @@ cfs_hash_key(cfs_hash_t *hs, struct hlist_node *hnode)
  *      ends up not being the case this would be a nice feature.
  */
 static inline int
-cfs_hash_compare(cfs_hash_t *hs, void *key, struct hlist_node *hnode)
+cfs_hash_compare(cfs_hash_t *hs, void *key, cfs_hlist_node_t *hnode)
 {
         LASSERT(hs);
         LASSERT(hnode);
@@ -203,7 +205,7 @@ cfs_hash_compare(cfs_hash_t *hs, void *key, struct hlist_node *hnode)
 }
 
 static inline void *
-cfs_hash_get(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         LASSERT(hs);
         LASSERT(hnode);
@@ -216,7 +218,7 @@ cfs_hash_get(cfs_hash_t *hs, struct hlist_node *hnode)
 }
 
 static inline void *
-cfs_hash_put(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         LASSERT(hs);
         LASSERT(hnode);
@@ -229,7 +231,7 @@ cfs_hash_put(cfs_hash_t *hs, struct hlist_node *hnode)
 }
 
 static inline void
-cfs_hash_exit(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         LASSERT(hs);
         LASSERT(hnode);
@@ -242,7 +244,7 @@ cfs_hash_exit(cfs_hash_t *hs, struct hlist_node *hnode)
 /* Validate hnode references the correct key */
 static inline void
 __cfs_hash_key_validate(cfs_hash_t *hs, void *key,
-                        struct hlist_node *hnode)
+                        cfs_hlist_node_t *hnode)
 {
         if (unlikely(hs->hs_flags & CFS_HASH_DEBUG))
                 LASSERT(cfs_hash_compare(hs, key, hnode) > 0);
@@ -251,7 +253,7 @@ __cfs_hash_key_validate(cfs_hash_t *hs, void *key,
 /* Validate hnode is in the correct bucket */
 static inline void
 __cfs_hash_bucket_validate(cfs_hash_t *hs, cfs_hash_bucket_t *hsb,
-                           struct hlist_node *hnode)
+                           cfs_hlist_node_t *hnode)
 {
         unsigned i;
 
@@ -261,13 +263,13 @@ __cfs_hash_bucket_validate(cfs_hash_t *hs, cfs_hash_bucket_t *hsb,
         }
 }
 
-static inline struct hlist_node *
+static inline cfs_hlist_node_t *
 __cfs_hash_bucket_lookup(cfs_hash_t *hs,
                          cfs_hash_bucket_t *hsb, void *key)
 {
-        struct hlist_node *hnode;
+        cfs_hlist_node_t *hnode;
 
-        hlist_for_each(hnode, &hsb->hsb_head)
+        cfs_hlist_for_each(hnode, &hsb->hsb_head)
                 if (cfs_hash_compare(hs, key, hnode) > 0)
                         return hnode;
 
@@ -277,11 +279,11 @@ __cfs_hash_bucket_lookup(cfs_hash_t *hs,
 static inline void *
 __cfs_hash_bucket_add(cfs_hash_t *hs,
                       cfs_hash_bucket_t *hsb,
-                      struct hlist_node *hnode)
+                      cfs_hlist_node_t *hnode)
 {
-        hlist_add_head(hnode, &(hsb->hsb_head));
-        atomic_inc(&hsb->hsb_count);
-        atomic_inc(&hs->hs_count);
+        cfs_hlist_add_head(hnode, &(hsb->hsb_head));
+        cfs_atomic_inc(&hsb->hsb_count);
+        cfs_atomic_inc(&hs->hs_count);
 
         return cfs_hash_get(hs, hnode);
 }
@@ -289,13 +291,13 @@ __cfs_hash_bucket_add(cfs_hash_t *hs,
 static inline void *
 __cfs_hash_bucket_del(cfs_hash_t *hs,
                       cfs_hash_bucket_t *hsb,
-                      struct hlist_node *hnode)
+                      cfs_hlist_node_t *hnode)
 {
-        hlist_del_init(hnode);
-        LASSERT(atomic_read(&hsb->hsb_count) > 0);
-        atomic_dec(&hsb->hsb_count);
-        LASSERT(atomic_read(&hs->hs_count) > 0);
-        atomic_dec(&hs->hs_count);
+        cfs_hlist_del_init(hnode);
+        LASSERT(cfs_atomic_read(&hsb->hsb_count) > 0);
+        cfs_atomic_dec(&hsb->hsb_count);
+        LASSERT(cfs_atomic_read(&hs->hs_count) > 0);
+        cfs_atomic_dec(&hs->hs_count);
 
         return cfs_hash_put(hs, hnode);
 }
@@ -308,14 +310,14 @@ void cfs_hash_destroy(cfs_hash_t *hs);
 
 /* Hash addition functions */
 void cfs_hash_add(cfs_hash_t *hs, void *key,
-                  struct hlist_node *hnode);
+                  cfs_hlist_node_t *hnode);
 int cfs_hash_add_unique(cfs_hash_t *hs, void *key,
-                        struct hlist_node *hnode);
+                        cfs_hlist_node_t *hnode);
 void *cfs_hash_findadd_unique(cfs_hash_t *hs, void *key,
-                              struct hlist_node *hnode);
+                              cfs_hlist_node_t *hnode);
 
 /* Hash deletion functions */
-void *cfs_hash_del(cfs_hash_t *hs, void *key, struct hlist_node *hnode);
+void *cfs_hash_del(cfs_hash_t *hs, void *key, cfs_hlist_node_t *hnode);
 void *cfs_hash_del_key(cfs_hash_t *hs, void *key);
 
 /* Hash lookup/for_each functions */
@@ -333,7 +335,7 @@ void cfs_hash_for_each_key(cfs_hash_t *hs, void *key,
  */
 int cfs_hash_rehash(cfs_hash_t *hs, int bits);
 void cfs_hash_rehash_key(cfs_hash_t *hs, void *old_key,
-                         void *new_key, struct hlist_node *hnode);
+                         void *new_key, cfs_hlist_node_t *hnode);
 
 
 #define CFS_HASH_THETA_BITS  10
@@ -353,7 +355,7 @@ static inline int __cfs_hash_theta_frac(int theta)
 
 static inline int __cfs_hash_theta(cfs_hash_t *hs)
 {
-        return (atomic_read(&hs->hs_count) <<
+        return (cfs_atomic_read(&hs->hs_count) <<
                 CFS_HASH_THETA_BITS) >> hs->hs_cur_bits;
 }
 
index ce14af7..f08d2ef 100644 (file)
@@ -94,7 +94,7 @@ do {                                                    \
 #ifdef __KERNEL__
 
 struct libcfs_ioctl_handler {
-        struct list_head item;
+        cfs_list_t item;
         int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_data *data);
 };
 
@@ -156,8 +156,8 @@ struct libcfs_ioctl_handler {
 static inline int libcfs_ioctl_packlen(struct libcfs_ioctl_data *data)
 {
         int len = sizeof(*data);
-        len += size_round(data->ioc_inllen1);
-        len += size_round(data->ioc_inllen2);
+        len += cfs_size_round(data->ioc_inllen1);
+        len += cfs_size_round(data->ioc_inllen2);
         return len;
 }
 
@@ -209,7 +209,7 @@ static inline int libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
                 return 1;
         }
         if (data->ioc_inllen2 &&
-            data->ioc_bulk[size_round(data->ioc_inllen1) +
+            data->ioc_bulk[cfs_size_round(data->ioc_inllen1) +
                            data->ioc_inllen2 - 1] != '\0') {
                 CERROR ("LIBCFS ioctl: inlbuf2 not 0 terminated\n");
                 return 1;
index 3d38920..83493e8 100644 (file)
@@ -49,7 +49,9 @@
 /*
  * Schedule
  */
-void cfs_schedule_timeout(cfs_task_state_t state, int64_t timeout);
+void cfs_schedule_timeout_and_set_state(cfs_task_state_t state,
+                                        int64_t timeout);
+void cfs_schedule_timeout(int64_t timeout);
 void cfs_schedule(void);
 void cfs_pause(cfs_duration_t ticks);
 int  cfs_need_resched(void);
index d0fd499..db86bf4 100644 (file)
@@ -138,7 +138,7 @@ void lbug_with_loc(const char *file, const char *func, const int line)
 
 #define LBUG() lbug_with_loc(__FILE__, __FUNCTION__, __LINE__)
 
-extern atomic_t libcfs_kmemory;
+extern cfs_atomic_t libcfs_kmemory;
 /*
  * Memory
  */
@@ -146,11 +146,11 @@ extern atomic_t libcfs_kmemory;
 
 # define libcfs_kmem_inc(ptr, size)             \
 do {                                            \
-        atomic_add(size, &libcfs_kmemory);      \
+        cfs_atomic_add(size, &libcfs_kmemory);  \
 } while (0)
 
 # define libcfs_kmem_dec(ptr, size) do {        \
-        atomic_sub(size, &libcfs_kmemory);      \
+        cfs_atomic_sub(size, &libcfs_kmemory);  \
 } while (0)
 
 #else
@@ -162,7 +162,7 @@ do {                                            \
 
 #define LIBCFS_ALLOC_GFP(ptr, size, mask)                                 \
 do {                                                                      \
-        LASSERT(!in_interrupt() ||                                        \
+        LASSERT(!cfs_in_interrupt() ||                                    \
                (size <= LIBCFS_VMALLOC_SIZE && mask == CFS_ALLOC_ATOMIC));\
         if (unlikely((size) > LIBCFS_VMALLOC_SIZE))                       \
                 (ptr) = cfs_alloc_large(size);                            \
@@ -172,14 +172,14 @@ do {                                                                      \
                 CERROR("LNET: out of memory at %s:%d (tried to alloc '"   \
                        #ptr "' = %d)\n", __FILE__, __LINE__, (int)(size));\
                 CERROR("LNET: %d total bytes allocated by lnet\n",        \
-                       atomic_read(&libcfs_kmemory));                     \
+                       cfs_atomic_read(&libcfs_kmemory));                 \
                 break;                                                    \
         }                                                                 \
         libcfs_kmem_inc((ptr), (size));                                   \
         if (!((mask) & CFS_ALLOC_ZERO))                                   \
                 memset((ptr), 0, (size));                                 \
         CDEBUG(D_MALLOC, "kmalloced '" #ptr "': %d at %p (tot %d).\n",    \
-               (int)(size), (ptr), atomic_read (&libcfs_kmemory));        \
+               (int)(size), (ptr), cfs_atomic_read (&libcfs_kmemory));    \
 } while (0)
 
 #define LIBCFS_ALLOC(ptr, size) \
@@ -198,7 +198,7 @@ do {                                                                    \
         }                                                               \
         libcfs_kmem_dec((ptr), s);                                      \
         CDEBUG(D_MALLOC, "kfreed '" #ptr "': %d at %p (tot %d).\n",     \
-               s, (ptr), atomic_read(&libcfs_kmemory));                 \
+               s, (ptr), cfs_atomic_read(&libcfs_kmemory));             \
         if (unlikely(s > LIBCFS_VMALLOC_SIZE))                          \
                 cfs_free_large(ptr);                                    \
         else                                                            \
@@ -318,9 +318,9 @@ lnet_nid_t  libcfs_str2nid(const char *str);
 int         libcfs_str2anynid(lnet_nid_t *nid, const char *str);
 char       *libcfs_id2str(lnet_process_id_t id);
 int         cfs_iswhite(char c);
-void        cfs_free_nidlist(struct list_head *list);
-int         cfs_parse_nidlist(char *str, int len, struct list_head *list);
-int         cfs_match_nid(lnet_nid_t nid, struct list_head *list);
+void        cfs_free_nidlist(cfs_list_t *list);
+int         cfs_parse_nidlist(char *str, int len, cfs_list_t *list);
+int         cfs_match_nid(lnet_nid_t nid, cfs_list_t *list);
 
 /* how an LNET NID encodes net:address */
 #define LNET_NIDADDR(nid)      ((__u32)((nid) & 0xffffffff))
@@ -339,8 +339,8 @@ int         cfs_match_nid(lnet_nid_t nid, struct list_head *list);
 /* logical equivalence */
 #define equi(a, b) (!!(a) == !!(b))
 
-#ifndef CURRENT_TIME
-# define CURRENT_TIME time(0)
+#ifndef CFS_CURRENT_TIME
+# define CFS_CURRENT_TIME time(0)
 #endif
 
 /* --------------------------------------------------------------------
@@ -365,50 +365,50 @@ struct libcfs_device_userstate
 
 #define MKSTR(ptr) ((ptr))? (ptr) : ""
 
-static inline int size_round4 (int val)
+static inline int cfs_size_round4 (int val)
 {
         return (val + 3) & (~0x3);
 }
 
-static inline int size_round (int val)
+static inline int cfs_size_round (int val)
 {
         return (val + 7) & (~0x7);
 }
 
-static inline int size_round16(int val)
+static inline int cfs_size_round16(int val)
 {
         return (val + 0xf) & (~0xf);
 }
 
-static inline int size_round32(int val)
+static inline int cfs_size_round32(int val)
 {
         return (val + 0x1f) & (~0x1f);
 }
 
-static inline int size_round0(int val)
+static inline int cfs_size_round0(int val)
 {
         if (!val)
                 return 0;
         return (val + 1 + 7) & (~0x7);
 }
 
-static inline size_t round_strlen(char *fset)
+static inline size_t cfs_round_strlen(char *fset)
 {
-        return (size_t)size_round((int)strlen(fset) + 1);
+        return (size_t)cfs_size_round((int)strlen(fset) + 1);
 }
 
 #define LOGL(var,len,ptr)                                       \
 do {                                                            \
         if (var)                                                \
                 memcpy((char *)ptr, (const char *)var, len);    \
-        ptr += size_round(len);                                 \
+        ptr += cfs_size_round(len);                             \
 } while (0)
 
 #define LOGU(var,len,ptr)                                       \
 do {                                                            \
         if (var)                                                \
                 memcpy((char *)var, (const char *)ptr, len);    \
-        ptr += size_round(len);                                 \
+        ptr += cfs_size_round(len);                             \
 } while (0)
 
 #define LOGL0(var,len,ptr)                              \
@@ -417,7 +417,7 @@ do {                                                    \
                 break;                                  \
         memcpy((char *)ptr, (const char *)var, len);    \
         *((char *)(ptr) + len) = 0;                     \
-        ptr += size_round(len + 1);                     \
+        ptr += cfs_size_round(len + 1);                 \
 } while (0)
 
 /**
index 946c2be..96a1d75 100644 (file)
@@ -55,16 +55,6 @@ static inline cfs_duration_t cfs_time_sub(cfs_time_t t1, cfs_time_t t2)
         return (cfs_time_t)(t1 - t2);
 }
 
-static inline int cfs_time_before(cfs_time_t t1, cfs_time_t t2)
-{
-        return time_before(t1, t2);
-}
-
-static inline int cfs_time_beforeq(cfs_time_t t1, cfs_time_t t2)
-{
-        return time_before_eq(t1, t2);
-}
-
 static inline int cfs_time_after(cfs_time_t t1, cfs_time_t t2)
 {
         return cfs_time_before(t2, t1);
index a1f46cc..874efed 100644 (file)
@@ -1,3 +1,3 @@
 EXTRA_DIST := kp30.h libcfs.h linux-fs.h linux-lock.h linux-mem.h      \
        linux-prim.h linux-time.h linux-tcpip.h lltrace.h               \
-       portals_compat25.h
\ No newline at end of file
+       portals_compat25.h linux-bitops.h linux-types.h
index ae23e8c..ba566f7 100644 (file)
@@ -90,12 +90,7 @@ do {                                                                          \
 
 #define PageUptodate Page_Uptodate
 #define our_recalc_sigpending(current) recalc_sigpending(current)
-#define num_online_cpus() smp_num_cpus
-static inline void our_cond_resched(void)
-{
-        if (current->need_resched)
-               schedule ();
-}
+#define cfs_num_online_cpus() smp_num_cpus
 #define work_struct_t                   struct tq_struct
 #define cfs_get_work_data(type,field,data)   (data)
 #else
@@ -120,13 +115,10 @@ do {                                                                          \
 
 #endif
 
+#define cfs_num_online_cpus() num_online_cpus()
 #define wait_on_page wait_on_page_locked
 #define our_recalc_sigpending(current) recalc_sigpending()
 #define strtok(a,b) strpbrk(a, b)
-static inline void our_cond_resched(void)
-{
-        cond_resched();
-}
 #define work_struct_t      struct work_struct
 
 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) */
@@ -210,7 +202,9 @@ static inline void our_cond_resched(void)
 # define time(a) CURRENT_TIME
 
 #ifndef num_possible_cpus
-#define num_possible_cpus() NR_CPUS
+#define cfs_num_possible_cpus() NR_CPUS
+#else
+#define cfs_num_possible_cpus() num_possible_cpus()
 #endif
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
 #define i_size_read(a) ((a)->i_size)
@@ -255,9 +249,9 @@ typedef struct {
 #  if !KLWT_SUPPORT
 
 typedef struct _lwt_page {
-        struct list_head     lwtp_list;
-        struct page         *lwtp_page;
-        lwt_event_t         *lwtp_events;
+        cfs_list_t               lwtp_list;
+        struct page             *lwtp_page;
+        lwt_event_t             *lwtp_events;
 } lwt_page_t;
 
 typedef struct {
@@ -292,8 +286,8 @@ do {                                                                    \
                                                                         \
                 if (cpu->lwtc_current_index >= LWT_EVENTS_PER_PAGE) {   \
                         cpu->lwtc_current_page =                        \
-                                list_entry (p->lwtp_list.next,          \
-                                            lwt_page_t, lwtp_list);     \
+                                cfs_list_entry (p->lwtp_list.next,      \
+                                                lwt_page_t, lwtp_list); \
                         cpu->lwtc_current_index = 0;                    \
                 }                                                       \
                                                                         \
@@ -316,7 +310,7 @@ extern void lwt_fini (void);
 extern int  lwt_lookup_string (int *size, char *knlptr,
                                char *usrptr, int usrsize);
 extern int  lwt_control (int enable, int clear);
-extern int  lwt_snapshot (cycles_t *now, int *ncpu, int *total_size,
+extern int  lwt_snapshot (cfs_cycles_t *now, int *ncpu, int *total_size,
                           void *user_ptr, int user_size);
 # else  /* __KERNEL__ */
 #  define LWT_EVENT(p1,p2,p3,p4)     /* no userland implementation yet */
@@ -405,6 +399,8 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page,
 }
 #endif
 
+#define cfs_smp_processor_id()  smp_processor_id()
+
 #ifndef get_cpu
 # ifdef CONFIG_PREEMPT
 #  define cfs_get_cpu()  ({ preempt_disable(); smp_processor_id(); })
index 9e75d75..eb27038 100644 (file)
@@ -53,6 +53,8 @@
 #include <libcfs/linux/linux-lock.h>
 #include <libcfs/linux/linux-fs.h>
 #include <libcfs/linux/linux-tcpip.h>
+#include <libcfs/linux/linux-bitops.h>
+#include <libcfs/linux/linux-types.h>
 #include <libcfs/linux/kp30.h>
 
 #ifdef HAVE_ASM_TYPES_H
@@ -62,6 +64,7 @@
 #include <asm/timex.h>
 #include <linux/sched.h> /* THREAD_SIZE */
 
+#define CFS_THREAD_SIZE   THREAD_SIZE
 #define LUSTRE_TRACE_SIZE (THREAD_SIZE >> 5)
 
 #if !defined(__x86_64__)
@@ -77,7 +80,7 @@
 
 #define __CHECK_STACK(file, func, line)                                 \
 do {                                                                    \
-        unsigned long _stack = CDEBUG_STACK();                           \
+        unsigned long _stack = CDEBUG_STACK();                          \
                                                                         \
         if (_stack > 3*THREAD_SIZE/4 && _stack > libcfs_stack) {        \
                 libcfs_stack = _stack;                                  \
@@ -88,9 +91,9 @@ do {                                                                    \
               /*panic("LBUG");*/                                        \
         }                                                               \
 } while (0)
-#define CHECK_STACK()     __CHECK_STACK(__FILE__, __func__, __LINE__)
+#define CFS_CHECK_STACK()     __CHECK_STACK(__FILE__, __func__, __LINE__)
 #else /* __x86_64__ */
-#define CHECK_STACK() do { } while(0)
+#define CFS_CHECK_STACK() do { } while(0)
 #define CDEBUG_STACK() (0L)
 #endif /* __x86_64__ */
 
diff --git a/libcfs/include/libcfs/linux/linux-bitops.h b/libcfs/include/libcfs/linux/linux-bitops.h
new file mode 100644 (file)
index 0000000..75e37a6
--- /dev/null
@@ -0,0 +1,53 @@
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright  2009 Sun Microsystems, Inc. All rights reserved
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/include/libcfs/linux/linux-bitops.h
+ */
+#include <linux/bitops.h>
+
+#define cfs_test_bit(nr, addr)              test_bit(nr, addr)
+#define cfs_set_bit(nr, addr)               set_bit(nr, addr)
+#define cfs_clear_bit(nr, addr)             clear_bit(nr, addr)
+#define cfs_test_and_set_bit(nr, addr)      test_and_set_bit(nr, addr)
+#define cfs_test_and_clear_bit(nr, addr)    test_and_clear_bit(nr, addr)
+#define cfs_find_first_bit(addr, size)      find_first_bit(addr, size)
+#define cfs_find_first_zero_bit(addr, size) find_first_zero_bit(addr, size)
+#define cfs_find_next_bit(addr, size, off)  find_next_bit(addr, size, off)
+#define cfs_find_next_zero_bit(addr, size, off) \
+        find_next_zero_bit(addr, size, off)
+
+#define cfs_ffz(x)                          ffz(x)
+#define cfs_ffs(x)                          ffs(x)
+#define cfs_fls(x)                          fls(x)
index dc9ab89..64d8936 100644 (file)
@@ -56,6 +56,7 @@
 typedef struct file cfs_file_t;
 typedef struct dentry cfs_dentry_t;
 typedef struct dirent64 cfs_dirent_t;
+typedef struct kstatfs cfs_kstatfs_t;
 
 #define cfs_filp_size(f)               (i_size_read((f)->f_dentry->d_inode))
 #define cfs_filp_poff(f)                (&(f)->f_pos)
index 67a65cb..789718e 100644 (file)
  * declared by CFS_DECL_* should be initialized explicitly.
  */
 
-
 /*
- * spin_lock (use Linux kernel's primitives)
+ * spin_lock "implementation" (use Linux kernel's primitives)
  *
  * - spin_lock_init(x)
  * - spin_lock(x)
+ * - spin_lock_bh(x)
+ * - spin_lock_bh_init(x)
  * - spin_unlock(x)
+ * - spin_unlock_bh(x)
  * - spin_trylock(x)
+ * - spin_is_locked(x)
  *
+ * - spin_lock_irq(x)
  * - spin_lock_irqsave(x, f)
  * - spin_unlock_irqrestore(x, f)
+ * - read_lock_irqsave(lock, f)
+ * - write_lock_irqsave(lock, f)
+ * - write_unlock_irqrestore(lock, f)
+ *
+ * - SPIN_LOCK_UNLOCKED
  */
 
 /*
- * rw_semaphore (use Linux kernel's primitives)
+ * spinlock "implementation"
+ */
+
+typedef spinlock_t cfs_spinlock_t;
+
+#define cfs_spin_lock_init(lock)             spin_lock_init(lock)
+#define cfs_spin_lock(lock)                  spin_lock(lock)
+#define cfs_spin_lock_bh(lock)               spin_lock_bh(lock)
+#define cfs_spin_lock_bh_init(lock)          spin_lock_bh_init(lock)
+#define cfs_spin_unlock(lock)                spin_unlock(lock)
+#define cfs_spin_unlock_bh(lock)             spin_unlock_bh(lock)
+#define cfs_spin_trylock(lock)               spin_trylock(lock)
+#define cfs_spin_is_locked(lock)             spin_is_locked(lock)
+
+#define cfs_spin_lock_irq(lock)              spin_lock_irq(lock)
+#define cfs_spin_unlock_irq(lock)            spin_unlock_irq(lock)
+#define cfs_read_lock_irqsave(lock, f)       read_lock_irqsave(lock, f)
+#define cfs_write_lock_irqsave(lock, f)      write_lock_irqsave(lock, f)
+#define cfs_write_unlock_irqrestore(lock, f) write_unlock_irqrestore(lock, f)
+#define cfs_spin_lock_irqsave(lock, f)       spin_lock_irqsave(lock, f)
+#define cfs_spin_unlock_irqrestore(lock, f)  spin_unlock_irqrestore(lock, f)
+
+#define CFS_SPIN_LOCK_UNLOCKED               SPIN_LOCK_UNLOCKED
+
+/*
+ * rw_semaphore "implementation" (use Linux kernel's primitives)
  *
+ * - sema_init(x)
  * - init_rwsem(x)
  * - down_read(x)
  * - up_read(x)
  * - down_write(x)
  * - up_write(x)
  */
-#define fini_rwsem(s) do {} while(0)
+typedef struct rw_semaphore cfs_rw_semaphore_t;
+
+#define cfs_sema_init(s, val)     sema_init(s, val)
+#define cfs_init_rwsem(s)         init_rwsem(s)
+#define cfs_down_read(s)          down_read(s)
+#define cfs_down_read_trylock(s)  down_read_trylock(s)
+#define cfs_up_read(s)            up_read(s)
+#define cfs_down_write(s)         down_write(s)
+#define cfs_down_write_trylock(s) down_write_trylock(s)
+#define cfs_up_write(s)           up_write(s)
+
+#define cfs_fini_rwsem(s)         do {} while(0)
+
+#define CFS_DECLARE_RWSEM(name)   DECLARE_RWSEM(name)
 
 /*
- * rwlock_t (use Linux kernel's primitives)
+ * semaphore "implementation" (use Linux kernel's primitives)
+ */
+typedef struct semaphore      cfs_semaphore_t;
+
+/*
+ * rwlock_t "implementation" (use Linux kernel's primitives)
  *
  * - rwlock_init(x)
  * - read_lock(x)
  * - read_unlock(x)
  * - write_lock(x)
  * - write_unlock(x)
+ * - write_lock_bh(x)
+ * - write_unlock_bh(x)
+ *
+ * - RW_LOCK_UNLOCKED
  */
+typedef rwlock_t cfs_rwlock_t;
+
+#define cfs_rwlock_init(lock)                  rwlock_init(lock)
+#define cfs_read_lock(lock)                    read_lock(lock)
+#define cfs_read_unlock(lock)                  read_unlock(lock)
+#define cfs_read_unlock_irqrestore(lock,flags) \
+        read_unlock_irqrestore(lock, flags)
+#define cfs_write_lock(lock)                   write_lock(lock)
+#define cfs_write_unlock(lock)                 write_unlock(lock)
+#define cfs_write_lock_bh(lock)                write_lock_bh(lock)
+#define cfs_write_unlock_bh(lock)              write_unlock_bh(lock)
+
+#define CFS_RW_LOCK_UNLOCKED                   RW_LOCK_UNLOCKED
 
 /*
- * mutex:
+ * completion "implementation" (use Linux kernel's primitives)
  *
- * - init_mutex(x)
- * - init_mutex_locked(x)
- * - mutex_up(x)
- * - mutex_down(x)
+ * - DECLARE_COMPLETION(work)
+ * - INIT_COMPLETION(c)
+ * - COMPLETION_INITIALIZER(work)
+ * - init_completion(c)
+ * - complete(c)
+ * - wait_for_completion(c)
+ * - wait_for_completion_interruptible(c)
+ * - fini_completion(c)
  */
-#define init_mutex(x)                   init_MUTEX(x)
-#define init_mutex_locked(x)            init_MUTEX_LOCKED(x)
-#define mutex_up(x)                     up(x)
-#define mutex_down(x)                   down(x)
-#define mutex_down_trylock(x)           down_trylock(x)
+typedef struct completion cfs_completion_t;
+
+#define CFS_DECLARE_COMPLETION(work)             DECLARE_COMPLETION(work)
+#define CFS_INIT_COMPLETION(c)                   INIT_COMPLETION(c)
+#define CFS_COMPLETION_INITIALIZER(work)         COMPLETION_INITIALIZER(work)
+#define cfs_init_completion(c)                   init_completion(c)
+#define cfs_complete(c)                          complete(c)
+#define cfs_wait_for_completion(c)               wait_for_completion(c)
+#define cfs_wait_for_completion_interruptible(c) \
+        wait_for_completion_interruptible(c)
+#define cfs_complete_and_exit(c, code)           complete_and_exit(c, code)
+#define cfs_fini_completion(c)                   do { } while (0)
 
 /*
- * completion (use Linux kernel's primitives)
+ * mutex "implementation" (use Linux kernel's primitives)
  *
- * - init_complition(c)
- * - complete(c)
- * - wait_for_completion(c)
+ * - DECLARE_MUTEX(name)
+ * - mutex_init(x)
+ * - init_mutex(x)
+ * - init_mutex_locked(x)
+ * - init_MUTEX_LOCKED(x)
+ * - mutex_up(x)
+ * - mutex_down(x)
+ * - up(x)
+ * - down(x)
+ * - mutex_down_trylock(x)
+ * - mutex_lock(x)
+ * - mutex_unlock(x)
  */
+typedef struct mutex cfs_mutex_t;
+
+#define CFS_DEFINE_MUTEX(name)             DEFINE_MUTEX(name)
+#define CFS_DECLARE_MUTEX(name)            DECLARE_MUTEX(name)
+
+#define cfs_mutex_init(x)                   mutex_init(x)
+#define cfs_init_mutex(x)                   init_MUTEX(x)
+#define cfs_init_mutex_locked(x)            init_MUTEX_LOCKED(x)
+#define cfs_mutex_up(x)                     up(x)
+#define cfs_mutex_down(x)                   down(x)
+#define cfs_up(x)                           up(x)
+#define cfs_down(x)                         down(x)
+#define cfs_mutex_down_trylock(x)           down_trylock(x)
+#define cfs_mutex_lock(x)                   mutex_lock(x)
+#define cfs_mutex_unlock(x)                 mutex_unlock(x)
+#define cfs_mutex_trylock(x)                mutex_trylock(x)
 
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
 
  *
  **************************************************************************/
 
-struct mutex;
-
-static inline void mutex_destroy(struct mutex *lock)
+static inline void cfs_mutex_destroy(cfs_mutex_t *lock)
 {
 }
 
@@ -140,12 +244,24 @@ static inline void mutex_destroy(struct mutex *lock)
  *
  * \retval 0 mutex is not locked. This should never happen.
  */
-static inline int mutex_is_locked(struct mutex *lock)
+static inline int cfs_mutex_is_locked(cfs_mutex_t *lock)
 {
         return 1;
 }
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) */
+#define cfs_mutex_destroy(x)    mutex_destroy(x)
+#define cfs_mutex_is_locked(x)  mutex_is_locked(x)
 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) */
 
+/*
+ * Kernel locking primitives
+ *
+ * - lock_kernel
+ * - unlock_kernel
+ */
+#define cfs_lock_kernel()      lock_kernel()
+#define cfs_unlock_kernel()    unlock_kernel()
+
 #ifndef lockdep_set_class
 
 /**************************************************************************
@@ -154,67 +270,57 @@ static inline int mutex_is_locked(struct mutex *lock)
  *
  **************************************************************************/
 
-struct lock_class_key {
+typedef struct cfs_lock_class_key {
         ;
-};
+} cfs_lock_class_key_t;
 
-# define lockdep_set_class(lock, key) \
+#define cfs_lockdep_set_class(lock, key) \
         do { (void)sizeof (lock);(void)sizeof (key); } while (0)
 /* This has to be a macro, so that `subclass' can be undefined in kernels that
  * do not support lockdep. */
 
 
-static inline void lockdep_off(void)
+static inline void cfs_lockdep_off(void)
 {
 }
 
-static inline void lockdep_on(void)
+static inline void cfs_lockdep_on(void)
 {
 }
+#else
+typedef struct lock_class_key cfs_lock_class_key_t;
 
+#define cfs_lockdep_set_class(lock, key) lockdep_set_class(lock, key)
+#define cfs_lockdep_off()                lockdep_off()
+#define cfs_lockdep_on()                 lockdep_on()
 #endif /* lockdep_set_class */
 
 #ifndef CONFIG_DEBUG_LOCK_ALLOC
 #ifndef mutex_lock_nested
-# define mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
+#define cfs_mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
+#else
+#define cfs_mutex_lock_nested(mutex, subclass) \
+        mutex_lock_nested(mutex, subclass)
 #endif
 
 #ifndef spin_lock_nested
-# define spin_lock_nested(lock, subclass) spin_lock(lock)
+#define cfs_spin_lock_nested(lock, subclass) spin_lock(lock)
+#else
+#define cfs_spin_lock_nested(lock, subclass) spin_lock_nested(lock, subclass)
 #endif
 
 #ifndef down_read_nested
-# define down_read_nested(lock, subclass) down_read(lock)
+#define cfs_down_read_nested(lock, subclass) down_read(lock)
+#else
+#define cfs_down_read_nested(lock, subclass) down_read_nested(lock, subclass)
 #endif
 
 #ifndef down_write_nested
-# define down_write_nested(lock, subclass) down_write(lock)
+#define cfs_down_write_nested(lock, subclass) down_write(lock)
+#else
+#define cfs_down_write_nested(lock, subclass) down_write_nested(lock, subclass)
 #endif
 #endif /* CONFIG_DEBUG_LOCK_ALLOC */
 
 
-/*
- * spinlock "implementation"
- */
-
-typedef spinlock_t cfs_spinlock_t;
-
-#define cfs_spin_lock_init(lock) spin_lock_init(lock)
-#define cfs_spin_lock(lock)      spin_lock(lock)
-#define cfs_spin_lock_bh(lock)   spin_lock_bh(lock)
-#define cfs_spin_unlock(lock)    spin_unlock(lock)
-#define cfs_spin_unlock_bh(lock) spin_unlock_bh(lock)
-
-/*
- * rwlock "implementation"
- */
-
-typedef rwlock_t cfs_rwlock_t;
-
-#define cfs_rwlock_init(lock)      rwlock_init(lock)
-#define cfs_read_lock(lock)        read_lock(lock)
-#define cfs_read_unlock(lock)      read_unlock(lock)
-#define cfs_write_lock_bh(lock)    write_lock_bh(lock)
-#define cfs_write_unlock_bh(lock)  write_unlock_bh(lock)
-
 #endif /* __LIBCFS_LINUX_CFS_LOCK_H__ */
index 8f5a925..1769927 100644 (file)
@@ -62,6 +62,11 @@ typedef struct page                     cfs_page_t;
 #define CFS_PAGE_SHIFT                  PAGE_CACHE_SHIFT
 #define CFS_PAGE_MASK                   (~((__u64)CFS_PAGE_SIZE-1))
 
+#define cfs_num_physpages               num_physpages
+
+#define cfs_copy_from_user(to, from, n) copy_from_user(to, from, n)
+#define cfs_copy_to_user(to, from, n)   copy_to_user(to, from, n)
+
 static inline void *cfs_page_address(cfs_page_t *page)
 {
         /*
@@ -116,9 +121,10 @@ extern void __cfs_free_pages(cfs_page_t *page, unsigned int order);
 
 #if BITS_PER_LONG == 32
 /* limit to lowmem on 32-bit systems */
-#define CFS_NUM_CACHEPAGES min(num_physpages, 1UL << (30-CFS_PAGE_SHIFT) *3/4)
+#define CFS_NUM_CACHEPAGES \
+        min(cfs_num_physpages, 1UL << (30 - CFS_PAGE_SHIFT) * 3 / 4)
 #else
-#define CFS_NUM_CACHEPAGES num_physpages
+#define CFS_NUM_CACHEPAGES cfs_num_physpages
 #endif
 
 /*
@@ -142,10 +148,28 @@ extern void *cfs_mem_cache_alloc ( cfs_mem_cache_t *, int);
 extern void cfs_mem_cache_free ( cfs_mem_cache_t *, void *);
 extern int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem);
 
-/*
- */
 #define CFS_DECL_MMSPACE                mm_segment_t __oldfs
-#define CFS_MMSPACE_OPEN                do { __oldfs = get_fs(); set_fs(get_ds());} while(0)
+#define CFS_MMSPACE_OPEN \
+        do { __oldfs = get_fs(); set_fs(get_ds());} while(0)
 #define CFS_MMSPACE_CLOSE               set_fs(__oldfs)
 
+#define CFS_SLAB_HWCACHE_ALIGN          SLAB_HWCACHE_ALIGN
+#define CFS_SLAB_KERNEL                 SLAB_KERNEL
+#define CFS_SLAB_NOFS                   SLAB_NOFS
+
+/*
+ * Shrinker
+ */
+
+#ifndef HAVE_REGISTER_SHRINKER
+/* Shrinker callback */
+typedef shrinker_t cfs_shrinker_t;
+#define cfs_set_shrinker(seeks, shrinker) set_shrinker(seeks, shrinker)
+#define cfs_remove_shrinker(shrinker)     remove_shrinker(shrinker)
+#endif /* !HAVE_REGISTER_SHRINKER */
+
+/* struct shrinker */
+#define cfs_shrinker shrinker
+
+#define CFS_DEFAULT_SEEKS                 DEFAULT_SEEKS
 #endif /* __LINUX_CFS_MEM_H__ */
index f0bf4e6..1dca9ac 100644 (file)
 
 #include <libcfs/linux/linux-time.h>
 
+#define CFS_KERN_EMERG   KERN_EMERG
+#define CFS_KERN_ALERT   KERN_ALERT
+#define CFS_KERN_CRIT    KERN_CRIT
+#define CFS_KERN_ERR     KERN_ERR
+#define CFS_KERN_WARNING KERN_WARNING
+#define CFS_KERN_NOTICE  KERN_NOTICE
+#define CFS_KERN_INFO    KERN_INFO
+#define CFS_KERN_DEBUG   KERN_DEBUG
+
+/*
+ * CPU
+ */
+#ifdef for_each_possible_cpu
+#define cfs_for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
+#elif defined(for_each_cpu)
+#define cfs_for_each_possible_cpu(cpu) for_each_cpu(cpu)
+#endif
+
+#ifdef NR_CPUS
+#define CFS_NR_CPUS     NR_CPUS
+#else
+#define CFS_NR_CPUS     1
+#endif
+
+#define cfs_set_cpus_allowed(t, mask)  set_cpus_allowed(t, mask)
+/*
+ * cache
+ */
+#define CFS_L1_CACHE_ALIGN(x)           L1_CACHE_ALIGN(x)
+
+/*
+ * IRQs
+ */
+#define CFS_NR_IRQS                     NR_IRQS
+
 #define CFS_EXPORT_SYMBOL(s)            EXPORT_SYMBOL(s)
 
 /*
@@ -111,16 +146,20 @@ LL_PROC_PROTO(name)                                     \
 #define cfs_symbol_get(s)               inter_module_get(s)
 #define cfs_symbol_put(s)               inter_module_put(s)
 #define cfs_module_get()                MOD_INC_USE_COUNT
-#define cfs_module_put()                MOD_DEC_USE_COUNT
 #else
 #define cfs_symbol_register(s, p)       do {} while(0)
 #define cfs_symbol_unregister(s)        do {} while(0)
 #define cfs_symbol_get(s)               symbol_get(s)
 #define cfs_symbol_put(s)               symbol_put(s)
 #define cfs_module_get()                try_module_get(THIS_MODULE)
-#define cfs_module_put()                module_put(THIS_MODULE)
+#define cfs_try_module_get(m)           try_module_get(m)
+#define __cfs_module_get(m)             __module_get(m)
+#define cfs_module_put(m)               module_put(m)
+#define cfs_module_refcount(m)          module_refcount(m)
 #endif
 
+typedef struct module cfs_module_t;
+
 /*
  * Proc file system APIs
  */
@@ -138,7 +177,8 @@ typedef struct proc_dir_entry           cfs_proc_dir_entry_t;
 #define CFS_TASK_UNINT                  TASK_UNINTERRUPTIBLE
 #define CFS_TASK_RUNNING                TASK_RUNNING
 
-#define cfs_set_current_state(state) set_current_state(state)
+#define cfs_set_current_state(state)    set_current_state(state)
+#define cfs_wait_event(wq, cond)        wait_event(wq, cond)
 
 typedef wait_queue_t                   cfs_waitlink_t;
 typedef wait_queue_head_t              cfs_waitq_t;
@@ -180,8 +220,9 @@ typedef struct task_struct              cfs_task_t;
 
 /* Module interfaces */
 #define cfs_module(name, version, init, fini) \
-module_init(init);                            \
-module_exit(fini)
+        module_init(init);                    \
+        module_exit(fini)
+#define cfs_request_module              request_module
 
 /*
  * Signal
@@ -236,7 +277,7 @@ do {                                                                 \
 #endif
 
 #ifndef wait_event_interruptible_timeout /* Only for RHEL3 2.4.21 kernel */
-#define __wait_event_interruptible_timeout(wq, condition, timeout, ret)   \
+#define __wait_event_interruptible_timeout(wq, condition, timeout, ret)  \
 do {                                                           \
        int __ret = 0;                                         \
        if (!(condition)) {                                    \
@@ -276,7 +317,7 @@ do {                                                              \
        ret = 0;                                                  \
        if (!(condition))                                         \
                __wait_event_interruptible_timeout(wq, condition, \
-                                               timeout, ret);     \
+                                                   timeout, ret); \
 } while (0)
 #else
 #define cfs_waitq_wait_event_interruptible_timeout(wq, c, timeout, ret) \
@@ -289,13 +330,22 @@ do {                                                              \
 
 typedef atomic_t cfs_atomic_t;
 
-#define cfs_atomic_read(atom)         atomic_read(atom)
-#define cfs_atomic_inc(atom)          atomic_inc(atom)
-#define cfs_atomic_dec(atom)          atomic_dec(atom)
-#define cfs_atomic_dec_and_test(atom) atomic_dec_and_test(atom)
-#define cfs_atomic_set(atom, value)   atomic_set(atom, value)
-#define cfs_atomic_add(value, atom)   atomic_add(value, atom)
-#define cfs_atomic_sub(value, atom)   atomic_sub(value, atom)
+#define cfs_atomic_read(atom)                atomic_read(atom)
+#define cfs_atomic_inc(atom)                 atomic_inc(atom)
+#define cfs_atomic_inc_and_test(atom)        atomic_inc_and_test(atom)
+#define cfs_atomic_inc_return(atom)          atomic_inc_return(atom)
+#define cfs_atomic_inc_not_zero(atom)        atomic_inc_not_zero(atom)
+#define cfs_atomic_dec(atom)                 atomic_dec(atom)
+#define cfs_atomic_dec_and_test(atom)        atomic_dec_and_test(atom)
+#define cfs_atomic_dec_and_lock(atom, lock)  atomic_dec_and_lock(atom, lock)
+#define cfs_atomic_dec_return(atom)          atomic_dec_return(atom)
+#define cfs_atomic_set(atom, value)          atomic_set(atom, value)
+#define cfs_atomic_add(value, atom)          atomic_add(value, atom)
+#define cfs_atomic_add_return(value, atom)   atomic_add_return(value, atom)
+#define cfs_atomic_sub(value, atom)          atomic_sub(value, atom)
+#define cfs_atomic_sub_and_test(value, atom) atomic_sub_and_test(value, atom)
+#define cfs_atomic_sub_return(value, atom)   atomic_sub_return(value, atom)
+#define CFS_ATOMIC_INIT(i)                   ATOMIC_INIT(i)
 
 /*
  * membar
@@ -309,4 +359,24 @@ typedef atomic_t cfs_atomic_t;
 
 #define cfs_in_interrupt() in_interrupt()
 
+/*
+ * might_sleep
+ */
+#define cfs_might_sleep() might_sleep()
+
+/*
+ * group_info
+ */
+typedef struct group_info cfs_group_info_t;
+
+#define cfs_get_group_info(group_info)     get_group_info(group_info)
+#define cfs_put_group_info(group_info)     put_group_info(group_info)
+#define cfs_set_current_groups(group_info) set_current_groups(group_info)
+#define cfs_groups_free(group_info)        groups_free(group_info)
+#define cfs_groups_alloc(gidsetsize)       groups_alloc(gidsetsize)
+
+/*
+ * Random bytes
+ */
+#define cfs_get_random_bytes(buf, nbytes)  get_random_bytes(buf, nbytes)
 #endif
index 7d63338..c473c62 100644 (file)
@@ -69,8 +69,8 @@
  *  cfs_time_t     cfs_time_current(void);
  *  cfs_time_t     cfs_time_add    (cfs_time_t, cfs_duration_t);
  *  cfs_duration_t cfs_time_sub    (cfs_time_t, cfs_time_t);
- *  int            cfs_time_before (cfs_time_t, cfs_time_t);
- *  int            cfs_time_beforeq(cfs_time_t, cfs_time_t);
+ *  int            cfs_impl_time_before (cfs_time_t, cfs_time_t);
+ *  int            cfs_impl_time_before_eq(cfs_time_t, cfs_time_t);
  *
  *  cfs_duration_t cfs_duration_build(int64_t);
  *
@@ -92,6 +92,7 @@
 
 #define ONE_BILLION ((u_int64_t)1000000000)
 #define ONE_MILLION 1000000
+#define CFS_HZ      HZ
 
 #ifndef __KERNEL__
 #error This include is only for kernel use.
@@ -177,7 +178,17 @@ static inline unsigned long long __cfs_fs_time_flat(cfs_fs_time_t *t)
 
 typedef unsigned long cfs_time_t;      /* jiffies */
 typedef long cfs_duration_t;
+typedef cycles_t cfs_cycles_t;
 
+static inline int cfs_time_before(cfs_time_t t1, cfs_time_t t2)
+{       
+        return time_before(t1, t2);
+}
+
+static inline int cfs_time_beforeq(cfs_time_t t1, cfs_time_t t2)
+{       
+        return time_before_eq(t1, t2);
+}
 
 static inline cfs_time_t cfs_time_current(void)
 {
@@ -246,7 +257,8 @@ static inline void cfs_duration_usec(cfs_duration_t d, struct timeval *s)
         s->tv_usec = t;
 #else
         s->tv_sec = d / HZ;
-        s->tv_usec = ((d - (cfs_duration_t)s->tv_sec * HZ) * ONE_MILLION) / HZ;
+        s->tv_usec = ((d - (cfs_duration_t)s->tv_sec * HZ) * \
+                ONE_MILLION) / HZ;
 #endif
 }
 
@@ -306,7 +318,7 @@ static inline int cfs_time_beforeq_64(__u64 t1, __u64 t2)
 #define CFS_TIME_T              "%lu"
 #define CFS_DURATION_T          "%ld"
 
-#define cfs_do_gettimeofday(tv) do_gettimeofday(tv)
+#define cfs_gettimeofday(tv) do_gettimeofday(tv)
 
 #endif /* __LIBCFS_LINUX_LINUX_TIME_H__ */
 /*
diff --git a/libcfs/include/libcfs/linux/linux-types.h b/libcfs/include/libcfs/linux/linux-types.h
new file mode 100644 (file)
index 0000000..672b812
--- /dev/null
@@ -0,0 +1,40 @@
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright  2009 Sun Microsystems, Inc. All rights reserved
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/include/libcfs/user-bitops.h
+ */
+#include <linux/types.h>
+
+typedef umode_t cfs_umode_t;
index 8d66df5..e816908 100644 (file)
 #endif
 
 #ifndef HAVE_CPU_ONLINE
-#define cpu_online(cpu) ((1<<cpu) & (cpu_online_map))
+#define cfs_cpu_online(cpu) ((1<<cpu) & (cpu_online_map))
+#else
+#define cfs_cpu_online(cpu) cpu_online(cpu)
 #endif
 #ifndef HAVE_CPUMASK_T
-typedef unsigned long cpumask_t;
-#define cpu_set(cpu, map) set_bit(cpu, &(map))
-#define cpus_clear(map) memset(&(map), 0, sizeof(cpumask_t))
+typedef unsigned long cfs_cpumask_t;
+#define cfs_cpu_set(cpu, map) set_bit(cpu, &(map))
+#define cpus_clear(map) memset(&(map), 0, sizeof(cfs_cpumask_t))
 #endif
 
 #ifndef __user
@@ -141,7 +143,9 @@ typedef unsigned long cpumask_t;
 #endif
 
 #ifndef __fls
-#define __fls fls
+#define __cfs_fls fls
+#else
+#define __cfs_fls __fls
 #endif
 
 #define ll_proc_dointvec(table, write, filp, buffer, lenp, ppos)        \
index bbdbff5..7b9f9cc 100644 (file)
@@ -5,16 +5,81 @@
 
 #include <linux/list.h>
 
-#define CFS_LIST_HEAD_INIT(n)          LIST_HEAD_INIT(n)
-#define CFS_LIST_HEAD(n)               LIST_HEAD(n)
-#define CFS_INIT_LIST_HEAD(p)          INIT_LIST_HEAD(p)
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-#define CFS_HLIST_HEAD_INIT            HLIST_HEAD_INIT
-#define CFS_HLIST_HEAD(n)              HLIST_HEAD(n)
-#define CFS_INIT_HLIST_HEAD(p)         INIT_HLIST_HEAD(p)
-#define CFS_INIT_HLIST_NODE(p)         INIT_HLIST_NODE(p)
-#endif
+typedef struct list_head cfs_list_t;
+
+#define __cfs_list_add(new, prev, next)      __list_add(new, prev, next)
+#define cfs_list_add(new, head)              list_add(new, head)
+
+#define cfs_list_add_tail(new, head)         list_add_tail(new, head)
+
+#define __cfs_list_del(prev, next)           __list_del(prev, next)
+#define cfs_list_del(entry)                  list_del(entry)
+#define cfs_list_del_init(entry)             list_del_init(entry)
+
+#define cfs_list_move(list, head)            list_move(list, head)
+#define cfs_list_move_tail(list, head)       list_move_tail(list, head)
+
+#define cfs_list_empty(head)                 list_empty(head)
+#define cfs_list_empty_careful(head)         list_empty_careful(head)
+
+#define __cfs_list_splice(list, head)        __list_splice(list, head)
+#define cfs_list_splice(list, head)          list_splice(list, head)
+
+#define cfs_list_splice_init(list, head)     list_splice_init(list, head)
+
+#define cfs_list_entry(ptr, type, member)    list_entry(ptr, type, member)
+#define cfs_list_for_each(pos, head)         list_for_each(pos, head)
+#define cfs_list_for_each_safe(pos, n, head) list_for_each_safe(pos, n, head)
+#define cfs_list_for_each_prev(pos, head)    list_for_each_prev(pos, head)
+#define cfs_list_for_each_entry(pos, head, member) \
+        list_for_each_entry(pos, head, member)
+#define cfs_list_for_each_entry_reverse(pos, head, member) \
+        list_for_each_entry_reverse(pos, head, member)
+#define cfs_list_for_each_entry_safe(pos, n, head, member) \
+        list_for_each_entry_safe(pos, n, head, member)
+#ifdef list_for_each_entry_safe_from
+#define cfs_list_for_each_entry_safe_from(pos, n, head, member) \
+        list_for_each_entry_safe_from(pos, n, head, member)
+#endif /* list_for_each_entry_safe_from */
+#define cfs_list_for_each_entry_continue(pos, head, member) \
+        list_for_each_entry_continue(pos, head, member)
+
+#define CFS_LIST_HEAD_INIT(n)               LIST_HEAD_INIT(n)
+#define CFS_LIST_HEAD(n)                    LIST_HEAD(n)
+#define CFS_INIT_LIST_HEAD(p)               INIT_LIST_HEAD(p)
+
+typedef struct hlist_head cfs_hlist_head_t;
+typedef struct hlist_node cfs_hlist_node_t;
+
+#define cfs_hlist_unhashed(h)              hlist_unhashed(h)
+
+#define cfs_hlist_empty(h)                 hlist_empty(h)
+
+#define __cfs_hlist_del(n)                 __hlist_del(n)
+#define cfs_hlist_del(n)                   hlist_del(n)
+#define cfs_hlist_del_init(n)              hlist_del_init(n)
+
+#define cfs_hlist_add_head(n, next)        hlist_add_head(n, next)
+#define cfs_hlist_add_before(n, next)      hlist_add_before(n, next)
+#define cfs_hlist_add_after(n, next)       hlist_add_after(n, next)
+
+#define cfs_hlist_entry(ptr, type, member) hlist_entry(ptr, type, member)
+#define cfs_hlist_for_each(pos, head)      hlist_for_each(pos, head)
+#define cfs_hlist_for_each_safe(pos, n, head) \
+        hlist_for_each_safe(pos, n, head)
+#define cfs_hlist_for_each_entry(tpos, pos, head, member) \
+        hlist_for_each_entry(tpos, pos, head, member)
+#define cfs_hlist_for_each_entry_continue(tpos, pos, member) \
+        hlist_for_each_entry_continue(tpos, pos, member)
+#define cfs_hlist_for_each_entry_from(tpos, pos, member) \
+        hlist_for_each_entry_from(tpos, pos, member)
+#define cfs_hlist_for_each_entry_safe(tpos, pos, n, head, member) \
+        hlist_for_each_entry_safe(tpos, pos, n, head, member)
+
+#define CFS_HLIST_HEAD_INIT               HLIST_HEAD_INIT
+#define CFS_HLIST_HEAD(n)                 HLIST_HEAD(n)
+#define CFS_INIT_HLIST_HEAD(p)            INIT_HLIST_HEAD(p)
+#define CFS_INIT_HLIST_NODE(p)            INIT_HLIST_NODE(p)
 
 #else /* !defined (__linux__) || !defined(__KERNEL__) */
 
 
 #define prefetch(a) ((void)a)
 
-struct list_head {
-       struct list_head *next, *prev;
+struct cfs_list_head {
+       struct cfs_list_head *next, *prev;
 };
 
-typedef struct list_head list_t;
+typedef struct cfs_list_head cfs_list_t;
 
 #define CFS_LIST_HEAD_INIT(name) { &(name), &(name) }
 
 #define CFS_LIST_HEAD(name) \
-       struct list_head name = CFS_LIST_HEAD_INIT(name)
+       cfs_list_t name = CFS_LIST_HEAD_INIT(name)
 
 #define CFS_INIT_LIST_HEAD(ptr) do { \
        (ptr)->next = (ptr); (ptr)->prev = (ptr); \
@@ -51,9 +116,9 @@ typedef struct list_head list_t;
  * This is only for internal list manipulation where we know
  * the prev/next entries already!
  */
-static inline void __list_add(struct list_head * new,
-                             struct list_head * prev,
-                             struct list_head * next)
+static inline void __cfs_list_add(cfs_list_t * new,
+                                  cfs_list_t * prev,
+                                  cfs_list_t * next)
 {
        next->prev = new;
        new->next = next;
@@ -69,9 +134,10 @@ static inline void __list_add(struct list_head * new,
  * Insert a new entry after the specified head.
  * This is good for implementing stacks.
  */
-static inline void list_add(struct list_head *new, struct list_head *head)
+static inline void cfs_list_add(cfs_list_t *new,
+                                cfs_list_t *head)
 {
-       __list_add(new, head, head->next);
+       __cfs_list_add(new, head, head->next);
 }
 
 /**
@@ -82,9 +148,10 @@ static inline void list_add(struct list_head *new, struct list_head *head)
  * Insert a new entry before the specified head.
  * This is useful for implementing queues.
  */
-static inline void list_add_tail(struct list_head *new, struct list_head *head)
+static inline void cfs_list_add_tail(cfs_list_t *new,
+                                     cfs_list_t *head)
 {
-       __list_add(new, head->prev, head);
+       __cfs_list_add(new, head->prev, head);
 }
 
 /*
@@ -94,7 +161,8 @@ static inline void list_add_tail(struct list_head *new, struct list_head *head)
  * This is only for internal list manipulation where we know
  * the prev/next entries already!
  */
-static inline void __list_del(struct list_head * prev, struct list_head * next)
+static inline void __cfs_list_del(cfs_list_t *prev,
+                                  cfs_list_t *next)
 {
        next->prev = prev;
        prev->next = next;
@@ -103,51 +171,55 @@ static inline void __list_del(struct list_head * prev, struct list_head * next)
 /**
  * Remove an entry from the list it is currently in.
  * \param entry the entry to remove
- * Note: list_empty(entry) does not return true after this, the entry is in an undefined state.
+ * Note: list_empty(entry) does not return true after this, the entry is in an
+ * undefined state.
  */
-static inline void list_del(struct list_head *entry)
+static inline void cfs_list_del(cfs_list_t *entry)
 {
-       __list_del(entry->prev, entry->next);
+       __cfs_list_del(entry->prev, entry->next);
 }
 
 /**
  * Remove an entry from the list it is currently in and reinitialize it.
  * \param entry the entry to remove.
  */
-static inline void list_del_init(struct list_head *entry)
+static inline void cfs_list_del_init(cfs_list_t *entry)
 {
-       __list_del(entry->prev, entry->next);
+       __cfs_list_del(entry->prev, entry->next);
        CFS_INIT_LIST_HEAD(entry);
 }
 
 /**
- * Remove an entry from the list it is currently in and insert it at the start of another list.
+ * Remove an entry from the list it is currently in and insert it at the start
+ * of another list.
  * \param list the entry to move
  * \param head the list to move it to
  */
-static inline void list_move(struct list_head *list, struct list_head *head)
+static inline void cfs_list_move(cfs_list_t *list,
+                                 cfs_list_t *head)
 {
-       __list_del(list->prev, list->next);
-       list_add(list, head);
+       __cfs_list_del(list->prev, list->next);
+       cfs_list_add(list, head);
 }
 
 /**
- * Remove an entry from the list it is currently in and insert it at the end of another list.
+ * Remove an entry from the list it is currently in and insert it at the end of
+ * another list.
  * \param list the entry to move
  * \param head the list to move it to
  */
-static inline void list_move_tail(struct list_head *list,
-                                 struct list_head *head)
+static inline void cfs_list_move_tail(cfs_list_t *list,
+                                      cfs_list_t *head)
 {
-       __list_del(list->prev, list->next);
-       list_add_tail(list, head);
+       __cfs_list_del(list->prev, list->next);
+       cfs_list_add_tail(list, head);
 }
 
 /**
  * Test whether a list is empty
  * \param head the list to test.
  */
-static inline int list_empty(struct list_head *head)
+static inline int cfs_list_empty(cfs_list_t *head)
 {
        return head->next == head;
 }
@@ -159,23 +231,23 @@ static inline int list_empty(struct list_head *head)
  * Tests whether a list is empty _and_ checks that no other CPU might be
  * in the process of modifying either member (next or prev)
  *
- * NOTE: using list_empty_careful() without synchronization
+ * NOTE: using cfs_list_empty_careful() without synchronization
  * can only be safe if the only activity that can happen
- * to the list entry is list_del_init(). Eg. it cannot be used
+ * to the list entry is cfs_list_del_init(). Eg. it cannot be used
  * if another CPU could re-list_add() it.
  */
-static inline int list_empty_careful(const struct list_head *head)
+static inline int cfs_list_empty_careful(const cfs_list_t *head)
 {
-        struct list_head *next = head->next;
+        cfs_list_t *next = head->next;
         return (next == head) && (next == head->prev);
 }
 
-static inline void __list_splice(struct list_head *list,
-                                struct list_head *head)
+static inline void __cfs_list_splice(cfs_list_t *list,
+                                     cfs_list_t *head)
 {
-       struct list_head *first = list->next;
-       struct list_head *last = list->prev;
-       struct list_head *at = head->next;
+       cfs_list_t *first = list->next;
+       cfs_list_t *last = list->prev;
+       cfs_list_t *at = head->next;
 
        first->prev = head;
        head->next = first;
@@ -192,10 +264,11 @@ static inline void __list_splice(struct list_head *list,
  * The contents of \a list are added at the start of \a head.  \a list is in an
  * undefined state on return.
  */
-static inline void list_splice(struct list_head *list, struct list_head *head)
+static inline void cfs_list_splice(cfs_list_t *list,
+                                   cfs_list_t *head)
 {
-       if (!list_empty(list))
-               __list_splice(list, head);
+       if (!cfs_list_empty(list))
+               __cfs_list_splice(list, head);
 }
 
 /**
@@ -206,38 +279,38 @@ static inline void list_splice(struct list_head *list, struct list_head *head)
  * The contents of \a list are added at the start of \a head.  \a list is empty
  * on return.
  */
-static inline void list_splice_init(struct list_head *list,
-                                   struct list_head *head)
+static inline void cfs_list_splice_init(cfs_list_t *list,
+                                        cfs_list_t *head)
 {
-       if (!list_empty(list)) {
-               __list_splice(list, head);
+       if (!cfs_list_empty(list)) {
+               __cfs_list_splice(list, head);
                CFS_INIT_LIST_HEAD(list);
        }
 }
 
 /**
- * Get the container of a list 
+ * Get the container of a list
  * \param ptr   the embedded list.
  * \param type  the type of the struct this is embedded in.
  * \param member the member name of the list within the struct.
  */
-#define list_entry(ptr, type, member) \
+#define cfs_list_entry(ptr, type, member) \
        ((type *)((char *)(ptr)-(char *)(&((type *)0)->member)))
 
 /**
  * Iterate over a list
  * \param pos  the iterator
  * \param head the list to iterate over
- * 
+ *
  * Behaviour is undefined if \a pos is removed from the list in the body of the
  * loop.
  */
-#define list_for_each(pos, head) \
+#define cfs_list_for_each(pos, head) \
        for (pos = (head)->next, prefetch(pos->next); pos != (head); \
                pos = pos->next, prefetch(pos->next))
 
 /**
- * iterate over a list safely
+ * Iterate over a list safely
  * \param pos  the iterator
  * \param n     temporary storage
  * \param head the list to iterate over
@@ -245,11 +318,22 @@ static inline void list_splice_init(struct list_head *list,
  * This is safe to use if \a pos could be removed from the list in the body of
  * the loop.
  */
-#define list_for_each_safe(pos, n, head) \
+#define cfs_list_for_each_safe(pos, n, head) \
        for (pos = (head)->next, n = pos->next; pos != (head); \
                pos = n, n = pos->next)
 
 /**
+ * Iterate over a list continuing after existing point
+ * \param pos    the type * to use as a loop counter
+ * \param head   the list head
+ * \param member the name of the list_struct within the struct  
+ */
+#define cfs_list_for_each_entry_continue(pos, head, member)                 \
+        for (pos = cfs_list_entry(pos->member.next, typeof(*pos), member);  \
+             prefetch(pos->member.next), &pos->member != (head);            \
+             pos = cfs_list_entry(pos->member.next, typeof(*pos), member))
+
+/**
  * \defgroup hlist Hash List
  * Double linked lists with a single pointer list head.
  * Mostly useful for hash tables where the two pointer list head is too
@@ -257,13 +341,13 @@ static inline void list_splice_init(struct list_head *list,
  * @{
  */
 
-struct hlist_head {
-       struct hlist_node *first;
-};
+typedef struct cfs_hlist_node {
+       struct cfs_hlist_node *next, **pprev;
+} cfs_hlist_node_t;
 
-struct hlist_node {
-       struct hlist_node *next, **pprev;
-};
+typedef struct cfs_hlist_head {
+       cfs_hlist_node_t *first;
+} cfs_hlist_head_t;
 
 /* @} */
 
@@ -282,50 +366,46 @@ struct hlist_node {
  */
 
 #define CFS_HLIST_HEAD_INIT { NULL_P }
-#define CFS_HLIST_HEAD(name) struct hlist_head name = { NULL_P }
+#define CFS_HLIST_HEAD(name) cfs_hlist_head_t name = { NULL_P }
 #define CFS_INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL_P)
 #define CFS_INIT_HLIST_NODE(ptr) ((ptr)->next = NULL_P, (ptr)->pprev = NULL_P)
 
-#define HLIST_HEAD_INIT                CFS_HLIST_HEAD_INIT
-#define HLIST_HEAD(n)          CFS_HLIST_HEAD(n)
-#define INIT_HLIST_HEAD(p)     CFS_INIT_HLIST_HEAD(p)
-#define INIT_HLIST_NODE(p)     CFS_INIT_HLIST_NODE(p)
-
-static inline int hlist_unhashed(const struct hlist_node *h)
+static inline int cfs_hlist_unhashed(const cfs_hlist_node_t *h)
 {
        return !h->pprev;
 }
 
-static inline int hlist_empty(const struct hlist_head *h)
+static inline int cfs_hlist_empty(const cfs_hlist_head_t *h)
 {
        return !h->first;
 }
 
-static inline void __hlist_del(struct hlist_node *n)
+static inline void __cfs_hlist_del(cfs_hlist_node_t *n)
 {
-       struct hlist_node *next = n->next;
-       struct hlist_node **pprev = n->pprev;
+       cfs_hlist_node_t *next = n->next;
+       cfs_hlist_node_t **pprev = n->pprev;
        *pprev = next;
        if (next)
                next->pprev = pprev;
 }
 
-static inline void hlist_del(struct hlist_node *n)
+static inline void cfs_hlist_del(cfs_hlist_node_t *n)
 {
-       __hlist_del(n);
+       __cfs_hlist_del(n);
 }
 
-static inline void hlist_del_init(struct hlist_node *n)
+static inline void cfs_hlist_del_init(cfs_hlist_node_t *n)
 {
        if (n->pprev)  {
-               __hlist_del(n);
-               INIT_HLIST_NODE(n);
+               __cfs_hlist_del(n);
+               CFS_INIT_HLIST_NODE(n);
        }
 }
 
-static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+static inline void cfs_hlist_add_head(cfs_hlist_node_t *n,
+                                      cfs_hlist_head_t *h)
 {
-       struct hlist_node *first = h->first;
+       cfs_hlist_node_t *first = h->first;
        n->next = first;
        if (first)
                first->pprev = &n->next;
@@ -334,8 +414,8 @@ static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
 }
 
 /* next must be != NULL */
-static inline void hlist_add_before(struct hlist_node *n,
-                                       struct hlist_node *next)
+static inline void cfs_hlist_add_before(cfs_hlist_node_t *n,
+                                       cfs_hlist_node_t *next)
 {
        n->pprev = next->pprev;
        n->next = next;
@@ -343,8 +423,8 @@ static inline void hlist_add_before(struct hlist_node *n,
        *(n->pprev) = n;
 }
 
-static inline void hlist_add_after(struct hlist_node *n,
-                                       struct hlist_node *next)
+static inline void cfs_hlist_add_after(cfs_hlist_node_t *n,
+                                       cfs_hlist_node_t *next)
 {
        next->next = n->next;
        n->next = next;
@@ -354,13 +434,13 @@ static inline void hlist_add_after(struct hlist_node *n,
                next->next->pprev  = &next->next;
 }
 
-#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+#define cfs_hlist_entry(ptr, type, member) container_of(ptr,type,member)
 
-#define hlist_for_each(pos, head) \
+#define cfs_hlist_for_each(pos, head) \
        for (pos = (head)->first; pos && (prefetch(pos->next), 1); \
             pos = pos->next)
 
-#define hlist_for_each_safe(pos, n, head) \
+#define cfs_hlist_for_each_safe(pos, n, head) \
        for (pos = (head)->first; pos && (n = pos->next, 1); \
             pos = n)
 
@@ -371,10 +451,10 @@ static inline void hlist_add_after(struct hlist_node *n,
  * \param head  the head for your list.
  * \param member the name of the hlist_node within the struct.
  */
-#define hlist_for_each_entry(tpos, pos, head, member)                   \
-       for (pos = (head)->first;                                        \
-            pos && ({ prefetch(pos->next); 1;}) &&                      \
-               ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+#define cfs_hlist_for_each_entry(tpos, pos, head, member)                    \
+       for (pos = (head)->first;                                            \
+            pos && ({ prefetch(pos->next); 1;}) &&                          \
+               ({ tpos = cfs_hlist_entry(pos, typeof(*tpos), member); 1;}); \
             pos = pos->next)
 
 /**
@@ -383,10 +463,10 @@ static inline void hlist_add_after(struct hlist_node *n,
  * \param pos   the &struct hlist_node to use as a loop counter.
  * \param member the name of the hlist_node within the struct.
  */
-#define hlist_for_each_entry_continue(tpos, pos, member)                \
-       for (pos = (pos)->next;                                          \
-            pos && ({ prefetch(pos->next); 1;}) &&                      \
-               ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+#define cfs_hlist_for_each_entry_continue(tpos, pos, member)                 \
+       for (pos = (pos)->next;                                              \
+            pos && ({ prefetch(pos->next); 1;}) &&                          \
+               ({ tpos = cfs_hlist_entry(pos, typeof(*tpos), member); 1;}); \
             pos = pos->next)
 
 /**
@@ -395,9 +475,9 @@ static inline void hlist_add_after(struct hlist_node *n,
  * \param pos   the &struct hlist_node to use as a loop counter.
  * \param member the name of the hlist_node within the struct.
  */
-#define hlist_for_each_entry_from(tpos, pos, member)                    \
-       for (; pos && ({ prefetch(pos->next); 1;}) &&                    \
-               ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+#define cfs_hlist_for_each_entry_from(tpos, pos, member)                        \
+       for (; pos && ({ prefetch(pos->next); 1;}) &&                        \
+               ({ tpos = cfs_hlist_entry(pos, typeof(*tpos), member); 1;}); \
             pos = pos->next)
 
 /**
@@ -408,62 +488,57 @@ static inline void hlist_add_after(struct hlist_node *n,
  * \param head  the head for your list.
  * \param member the name of the hlist_node within the struct.
  */
-#define hlist_for_each_entry_safe(tpos, pos, n, head, member)           \
-       for (pos = (head)->first;                                        \
-            pos && ({ n = pos->next; 1; }) &&                           \
-               ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+#define cfs_hlist_for_each_entry_safe(tpos, pos, n, head, member)            \
+       for (pos = (head)->first;                                            \
+            pos && ({ n = pos->next; 1; }) &&                               \
+               ({ tpos = cfs_hlist_entry(pos, typeof(*tpos), member); 1;}); \
             pos = n)
 
 /* @} */
 
 #endif /* __linux__ && __KERNEL__ */
 
-#ifndef list_for_each_prev
+#ifndef cfs_list_for_each_prev
 /**
  * Iterate over a list in reverse order
  * \param pos  the &struct list_head to use as a loop counter.
  * \param head the head for your list.
  */
-#define list_for_each_prev(pos, head) \
+#define cfs_list_for_each_prev(pos, head) \
        for (pos = (head)->prev, prefetch(pos->prev); pos != (head);     \
                pos = pos->prev, prefetch(pos->prev))
 
-#endif /* list_for_each_prev */
+#endif /* cfs_list_for_each_prev */
 
-#ifndef list_for_each_entry
+#ifndef cfs_list_for_each_entry
 /**
  * Iterate over a list of given type
  * \param pos        the type * to use as a loop counter.
  * \param head       the head for your list.
  * \param member     the name of the list_struct within the struct.
  */
-#define list_for_each_entry(pos, head, member)                         \
-        for (pos = list_entry((head)->next, typeof(*pos), member),     \
-                    prefetch(pos->member.next);                        \
-            &pos->member != (head);                                    \
-            pos = list_entry(pos->member.next, typeof(*pos), member),  \
+#define cfs_list_for_each_entry(pos, head, member)                          \
+        for (pos = cfs_list_entry((head)->next, typeof(*pos), member),      \
+                    prefetch(pos->member.next);                            \
+            &pos->member != (head);                                        \
+            pos = cfs_list_entry(pos->member.next, typeof(*pos), member),  \
             prefetch(pos->member.next))
-#endif /* list_for_each_entry */
+#endif /* cfs_list_for_each_entry */
 
-#ifndef list_for_each_entry_rcu
-#define list_for_each_entry_rcu(pos, head, member) \
-       list_for_each_entry(pos, head, member)
-#endif
-
-#ifndef list_for_each_entry_reverse
+#ifndef cfs_list_for_each_entry_reverse
 /**
  * Iterate backwards over a list of given type.
  * \param pos        the type * to use as a loop counter.
  * \param head       the head for your list.
  * \param member     the name of the list_struct within the struct.
  */
-#define list_for_each_entry_reverse(pos, head, member)                  \
-       for (pos = list_entry((head)->prev, typeof(*pos), member);      \
-            prefetch(pos->member.prev), &pos->member != (head);        \
-            pos = list_entry(pos->member.prev, typeof(*pos), member))
-#endif /* list_for_each_entry_reverse */
+#define cfs_list_for_each_entry_reverse(pos, head, member)                  \
+       for (pos = cfs_list_entry((head)->prev, typeof(*pos), member);      \
+            prefetch(pos->member.prev), &pos->member != (head);            \
+            pos = cfs_list_entry(pos->member.prev, typeof(*pos), member))
+#endif /* cfs_list_for_each_entry_reverse */
 
-#ifndef list_for_each_entry_safe
+#ifndef cfs_list_for_each_entry_safe
 /**
  * Iterate over a list of given type safe against removal of list entry
  * \param pos        the type * to use as a loop counter.
@@ -471,15 +546,15 @@ static inline void hlist_add_after(struct hlist_node *n,
  * \param head       the head for your list.
  * \param member     the name of the list_struct within the struct.
  */
-#define list_for_each_entry_safe(pos, n, head, member)                 \
-        for (pos = list_entry((head)->next, typeof(*pos), member),     \
-               n = list_entry(pos->member.next, typeof(*pos), member); \
-            &pos->member != (head);                                    \
-            pos = n, n = list_entry(n->member.next, typeof(*n), member))
+#define cfs_list_for_each_entry_safe(pos, n, head, member)                   \
+        for (pos = cfs_list_entry((head)->next, typeof(*pos), member),       \
+               n = cfs_list_entry(pos->member.next, typeof(*pos), member);  \
+            &pos->member != (head);                                         \
+            pos = n, n = cfs_list_entry(n->member.next, typeof(*n), member))
 
-#endif /* list_for_each_entry_safe */
+#endif /* cfs_list_for_each_entry_safe */
 
-#ifndef list_for_each_entry_safe_from
+#ifndef cfs_list_for_each_entry_safe_from
 /**
  * Iterate over a list continuing from an existing point
  * \param pos        the type * to use as a loop cursor.
@@ -490,44 +565,45 @@ static inline void hlist_add_after(struct hlist_node *n,
  * Iterate over list of given type from current point, safe against
  * removal of list entry.
  */
-#define list_for_each_entry_safe_from(pos, n, head, member)             \
-        for (n = list_entry(pos->member.next, typeof(*pos), member);    \
-             &pos->member != (head);                                    \
-             pos = n, n = list_entry(n->member.next, typeof(*n), member))
-#endif /* list_for_each_entry_safe_from */
+#define cfs_list_for_each_entry_safe_from(pos, n, head, member)             \
+        for (n = cfs_list_entry(pos->member.next, typeof(*pos), member);    \
+             &pos->member != (head);                                        \
+             pos = n, n = cfs_list_entry(n->member.next, typeof(*n), member))
+#endif /* cfs_list_for_each_entry_safe_from */
 
 #define cfs_list_for_each_entry_typed(pos, head, type, member)         \
-        for (pos = list_entry((head)->next, type, member),             \
-                    prefetch(pos->member.next);                        \
-            &pos->member != (head);                                    \
-            pos = list_entry(pos->member.next, type, member),          \
+        for (pos = cfs_list_entry((head)->next, type, member),         \
+                    prefetch(pos->member.next);                        \
+            &pos->member != (head);                                    \
+            pos = cfs_list_entry(pos->member.next, type, member),      \
             prefetch(pos->member.next))
 
 #define cfs_list_for_each_entry_reverse_typed(pos, head, type, member) \
-       for (pos = list_entry((head)->prev, type, member);              \
+       for (pos = cfs_list_entry((head)->prev, type, member);          \
             prefetch(pos->member.prev), &pos->member != (head);        \
-            pos = list_entry(pos->member.prev, type, member))
+            pos = cfs_list_entry(pos->member.prev, type, member))
 
 #define cfs_list_for_each_entry_safe_typed(pos, n, head, type, member) \
-    for (pos = list_entry((head)->next, type, member),                 \
-               n = list_entry(pos->member.next, type, member);         \
-            &pos->member != (head);                                    \
-            pos = n, n = list_entry(n->member.next, type, member))
-
-#define cfs_list_for_each_entry_safe_from_typed(pos, n, head, type, member)   \
-        for (n = list_entry(pos->member.next, type, member);            \
-             &pos->member != (head);                                    \
-             pos = n, n = list_entry(n->member.next, type, member))
+    for (pos = cfs_list_entry((head)->next, type, member),             \
+               n = cfs_list_entry(pos->member.next, type, member);     \
+            &pos->member != (head);                                    \
+            pos = n, n = cfs_list_entry(n->member.next, type, member))
+
+#define cfs_list_for_each_entry_safe_from_typed(pos, n, head, type, member)  \
+        for (n = cfs_list_entry(pos->member.next, type, member);             \
+             &pos->member != (head);                                         \
+             pos = n, n = cfs_list_entry(n->member.next, type, member))
+
 #define cfs_hlist_for_each_entry_typed(tpos, pos, head, type, member)   \
        for (pos = (head)->first;                                       \
             pos && (prefetch(pos->next), 1) &&                         \
-               (tpos = hlist_entry(pos, type, member), 1);             \
+               (tpos = cfs_hlist_entry(pos, type, member), 1);         \
             pos = pos->next)
 
-#define cfs_hlist_for_each_entry_safe_typed(tpos, pos, n, head, type, member)\
-       for (pos = (head)->first;                                       \
-            pos && (n = pos->next, 1) &&                               \
-               (tpos = hlist_entry(pos, type, member), 1);             \
+#define cfs_hlist_for_each_entry_safe_typed(tpos, pos, n, head, type, member) \
+       for (pos = (head)->first;                                             \
+            pos && (n = pos->next, 1) &&                                     \
+               (tpos = cfs_hlist_entry(pos, type, member), 1);               \
             pos = n)
 
 #endif /* __LIBCFS_LUSTRE_LIST_H__ */
index 2173ab9..c16d8d9 100644 (file)
 #include <libcfs/user-bitops.h>
 #include <libcfs/posix/posix-kernelcomm.h>
 
-# define do_gettimeofday(tv) gettimeofday(tv, NULL);
-typedef unsigned long long cycles_t;
+# define cfs_gettimeofday(tv) gettimeofday(tv, NULL);
+typedef unsigned long long cfs_cycles_t;
 
 #define IS_ERR(a) ((unsigned long)(a) > (unsigned long)-1000L)
 #define PTR_ERR(a) ((long)(a))
@@ -197,12 +197,14 @@ typedef struct dirent64 cfs_dirent_t;
 
 
 # ifndef THREAD_SIZE /* x86_64 linux has THREAD_SIZE in userspace */
-#  define THREAD_SIZE 8192
+#  define CFS_THREAD_SIZE 8192
+# else
+#  define CFS_THREAD_SIZE THREAD_SIZE
 # endif
 
-#define LUSTRE_TRACE_SIZE (THREAD_SIZE >> 5)
+#define LUSTRE_TRACE_SIZE (CFS_THREAD_SIZE >> 5)
 
-#define CHECK_STACK() do { } while(0)
+#define CFS_CHECK_STACK() do { } while(0)
 #define CDEBUG_STACK() (0L)
 
 /* initial pid  */
@@ -225,10 +227,10 @@ typedef __u32 cfs_kernel_cap_t;
 /**
  * Module support (probably shouldn't be used in generic code?)
  */
-struct module {
+typedef struct cfs_module {
         int count;
         char *name;
-};
+} cfs_module_t;
 
 static inline void MODULE_AUTHOR(char *name)
 {
@@ -241,26 +243,26 @@ static inline void MODULE_AUTHOR(char *name)
 #define __init
 #define __exit
 
-static inline int request_module(const char *name, ...)
+static inline int cfs_request_module(const char *name, ...)
 {
         return (-EINVAL);
 }
 
-static inline void __module_get(struct module *module)
+static inline void __cfs_module_get(cfs_module_t *module)
 {
 }
 
-static inline int try_module_get(struct module *module)
+static inline int cfs_try_module_get(cfs_module_t *module)
 {
         return 1;
 }
 
-static inline void module_put(struct module *module)
+static inline void cfs_module_put(cfs_module_t *module)
 {
 }
 
 
-static inline int module_refcount(struct module *m)
+static inline int cfs_module_refcount(cfs_module_t *m)
 {
         return 1;
 }
@@ -271,20 +273,21 @@ static inline int module_refcount(struct module *m)
  *
  ***************************************************************************/
 
-struct shrinker {
+struct cfs_shrinker {
         ;
 };
 
-#define DEFAULT_SEEKS (0)
+#define CFS_DEFAULT_SEEKS (0)
 
-typedef int (*shrinker_t)(int, unsigned int);
+typedef int (*cfs_shrinker_t)(int, unsigned int);
 
-static inline struct shrinker *set_shrinker(int seeks, shrinker_t shrinkert)
+static inline
+struct cfs_shrinker *cfs_set_shrinker(int seeks, cfs_shrinker_t shrink)
 {
-        return (struct shrinker *)0xdeadbea1; // Cannot return NULL here
+        return (struct cfs_shrinker *)0xdeadbea1; // Cannot return NULL here
 }
 
-static inline void remove_shrinker(struct shrinker *shrinker)
+static inline void cfs_remove_shrinker(struct cfs_shrinker *shrinker)
 {
 }
 
@@ -299,12 +302,12 @@ static inline void remove_shrinker(struct shrinker *shrinker)
  ***************************************************************************/
 
 struct radix_tree_root {
-        struct list_head list;
+        cfs_list_t list;
         void *rnode;
 };
 
 struct radix_tree_node {
-        struct list_head _node;
+        cfs_list_t _node;
         unsigned long index;
         void *item;
 };
@@ -334,7 +337,7 @@ static inline int radix_tree_insert(struct radix_tree_root *root,
         CFS_INIT_LIST_HEAD(&node->_node);
         node->index = idx;
         node->item = item;
-        list_add_tail(&node->_node, &root->list);
+        cfs_list_add_tail(&node->_node, &root->list);
         root->rnode = (void *)1001;
         return 0;
 }
@@ -344,7 +347,7 @@ static inline struct radix_tree_node *radix_tree_lookup0(struct radix_tree_root
 {
         struct radix_tree_node *node;
 
-        if (list_empty(&root->list))
+        if (cfs_list_empty(&root->list))
                 return NULL;
 
         cfs_list_for_each_entry_typed(node, &root->list,
@@ -374,10 +377,10 @@ static inline void *radix_tree_delete(struct radix_tree_root *root,
         if (p == NULL)
                 return NULL;
 
-        list_del_init(&p->_node);
+        cfs_list_del_init(&p->_node);
         item = p->item;
         free(p);
-        if (list_empty(&root->list))
+        if (cfs_list_empty(&root->list))
                 root->rnode = NULL;
 
         return item;
index 1736a02..b5c66aa 100644 (file)
@@ -44,7 +44,9 @@
 
 #include <asm/types.h>
 #ifndef HAVE_UMODE_T
-typedef unsigned short umode_t;
+typedef unsigned short cfs_umode_t;
+#else
+typedef umode_t cfs_umode_t;
 #endif
 
 /*
index cd0d220..1b36407 100644 (file)
@@ -42,7 +42,7 @@
 #define __LIBCFS_USER_BITOPS_H__
 
 /* test if bit nr is set in bitmap addr; returns previous value of bit nr */
-static __inline__ int test_and_set_bit(int nr, unsigned long *addr)
+static __inline__ int cfs_test_and_set_bit(int nr, unsigned long *addr)
 {
         unsigned long mask;
 
@@ -53,10 +53,10 @@ static __inline__ int test_and_set_bit(int nr, unsigned long *addr)
         return nr;
 }
 
-#define set_bit(n, a) test_and_set_bit(n, a)
+#define cfs_set_bit(n, a) cfs_test_and_set_bit(n, a)
 
 /* clear bit nr in bitmap addr; returns previous value of bit nr*/
-static __inline__ int test_and_clear_bit(int nr, unsigned long *addr)
+static __inline__ int cfs_test_and_clear_bit(int nr, unsigned long *addr)
 {
         unsigned long mask;
 
@@ -67,16 +67,16 @@ static __inline__ int test_and_clear_bit(int nr, unsigned long *addr)
         return nr;
 }
 
-#define clear_bit(n, a) test_and_clear_bit(n, a)
+#define cfs_clear_bit(n, a) cfs_test_and_clear_bit(n, a)
 
-static __inline__ int test_bit(int nr, const unsigned long *addr)
+static __inline__ int cfs_test_bit(int nr, const unsigned long *addr)
 {
         return ((1UL << (nr & (BITS_PER_LONG - 1))) &
                 ((addr)[nr / BITS_PER_LONG])) != 0;
 }
 
 /* using binary seach */
-static __inline__ unsigned long __fls(long data)
+static __inline__ unsigned long __cfs_fls(long data)
 {
        int pos = 32;
 
@@ -115,7 +115,7 @@ static __inline__ unsigned long __fls(long data)
        return pos;
 }
 
-static __inline__ unsigned long __ffs(long data)
+static __inline__ unsigned long __cfs_ffs(long data)
 {
         int pos = 0;
 
@@ -147,16 +147,17 @@ static __inline__ unsigned long __ffs(long data)
         return pos;
 }
 
-#define __ffz(x)       __ffs(~(x))
-#define __flz(x)       __fls(~(x))
+#define __cfs_ffz(x)   __cfs_ffs(~(x))
+#define __cfs_flz(x)   __cfs_fls(~(x))
 
-unsigned long find_next_bit(unsigned long *addr,
-                            unsigned long size, unsigned long offset);
+unsigned long cfs_find_next_bit(unsigned long *addr,
+                                unsigned long size, unsigned long offset);
 
-unsigned long find_next_zero_bit(unsigned long *addr,
-                                 unsigned long size, unsigned long offset);
+unsigned long cfs_find_next_zero_bit(unsigned long *addr,
+                                     unsigned long size, unsigned long offset);
 
-#define find_first_bit(addr,size)       (find_next_bit((addr),(size),0))
-#define find_first_zero_bit(addr,size)  (find_next_zero_bit((addr),(size),0))
+#define cfs_find_first_bit(addr,size)     (cfs_find_next_bit((addr),(size),0))
+#define cfs_find_first_zero_bit(addr,size)  \
+        (cfs_find_next_zero_bit((addr),(size),0))
 
 #endif
index 958519e..7c7a80c 100644 (file)
  */
 
 /*
- * spin_lock
+ * cfs_spin_lock
  *
- * - spin_lock_init(x)
- * - spin_lock(x)
- * - spin_unlock(x)
- * - spin_trylock(x)
+ * - cfs_spin_lock_init(x)
+ * - cfs_spin_lock(x)
+ * - cfs_spin_unlock(x)
+ * - cfs_spin_trylock(x)
+ * - cfs_spin_lock_bh_init(x)
+ * - cfs_spin_lock_bh(x)
+ * - cfs_spin_unlock_bh(x)
  *
- * - spin_lock_irqsave(x, f)
- * - spin_unlock_irqrestore(x, f)
+ * - cfs_spin_is_locked(x)
+ * - cfs_spin_lock_irqsave(x, f)
+ * - cfs_spin_unlock_irqrestore(x, f)
  *
  * No-op implementation.
  */
-struct spin_lock {int foo;};
+struct cfs_spin_lock {int foo;};
 
-typedef struct spin_lock spinlock_t;
+typedef struct cfs_spin_lock cfs_spinlock_t;
 
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { }
+#define CFS_SPIN_LOCK_UNLOCKED (cfs_spinlock_t) { }
 #define LASSERT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
 #define LINVRNT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
 #define LASSERT_SEM_LOCKED(sem) do {(void)sizeof(sem);} while(0)
 
-void spin_lock_init(spinlock_t *lock);
-void spin_lock(spinlock_t *lock);
-void spin_unlock(spinlock_t *lock);
-int spin_trylock(spinlock_t *lock);
-void spin_lock_bh_init(spinlock_t *lock);
-void spin_lock_bh(spinlock_t *lock);
-void spin_unlock_bh(spinlock_t *lock);
+void cfs_spin_lock_init(cfs_spinlock_t *lock);
+void cfs_spin_lock(cfs_spinlock_t *lock);
+void cfs_spin_unlock(cfs_spinlock_t *lock);
+int cfs_spin_trylock(cfs_spinlock_t *lock);
+void cfs_spin_lock_bh_init(cfs_spinlock_t *lock);
+void cfs_spin_lock_bh(cfs_spinlock_t *lock);
+void cfs_spin_unlock_bh(cfs_spinlock_t *lock);
 
-static inline int spin_is_locked(spinlock_t *l) {return 1;}
-static inline void spin_lock_irqsave(spinlock_t *l, unsigned long f){}
-static inline void spin_unlock_irqrestore(spinlock_t *l, unsigned long f){}
+static inline int cfs_spin_is_locked(cfs_spinlock_t *l) {return 1;}
+static inline void cfs_spin_lock_irqsave(cfs_spinlock_t *l, unsigned long f){}
+static inline void cfs_spin_unlock_irqrestore(cfs_spinlock_t *l,
+                                              unsigned long f){}
 
 /*
  * Semaphore
  *
- * - sema_init(x, v)
+ * - cfs_sema_init(x, v)
  * - __down(x)
  * - __up(x)
  */
-typedef struct semaphore {
+typedef struct cfs_semaphore {
     int foo;
-} mutex_t;
+} cfs_semaphore_t;
 
-void sema_init(struct semaphore *s, int val);
-void __down(struct semaphore *s);
-void __up(struct semaphore *s);
-
-/*
- * Mutex:
- *
- * - init_mutex(x)
- * - init_mutex_locked(x)
- * - mutex_up(x)
- * - mutex_down(x)
- */
-#define DECLARE_MUTEX(name)     \
-        struct semaphore name = { 1 }
-
-#define mutex_up(s)                     __up(s)
-#define up(s)                           mutex_up(s)
-#define mutex_down(s)                   __down(s)
-#define down(s)                         mutex_down(s)
-
-#define init_MUTEX(x)                   sema_init(x, 1)
-#define init_MUTEX_LOCKED(x)            sema_init(x, 0)
-#define init_mutex(s)                   init_MUTEX(s)
+void cfs_sema_init(cfs_semaphore_t *s, int val);
+void __down(cfs_semaphore_t *s);
+void __up(cfs_semaphore_t *s);
 
 /*
  * Completion:
  *
- * - init_completion(c)
- * - complete(c)
- * - wait_for_completion(c)
+ * - cfs_init_completion_module(c)
+ * - cfs_call_wait_handler(t)
+ * - cfs_init_completion(c)
+ * - cfs_complete(c)
+ * - cfs_wait_for_completion(c)
+ * - cfs_wait_for_completion_interruptible(c)
  */
-struct completion {
+typedef struct {
         unsigned int done;
         cfs_waitq_t wait;
-};
+} cfs_completion_t;
+
 typedef int (*cfs_wait_handler_t) (int timeout);
-void init_completion_module(cfs_wait_handler_t handler);
-int  call_wait_handler(int timeout);
-void init_completion(struct completion *c);
-void complete(struct completion *c);
-void wait_for_completion(struct completion *c);
-int wait_for_completion_interruptible(struct completion *c);
-
-#define COMPLETION_INITIALIZER(work) \
+void cfs_init_completion_module(cfs_wait_handler_t handler);
+int  cfs_call_wait_handler(int timeout);
+void cfs_init_completion(cfs_completion_t *c);
+void cfs_complete(cfs_completion_t *c);
+void cfs_wait_for_completion(cfs_completion_t *c);
+int cfs_wait_for_completion_interruptible(cfs_completion_t *c);
+
+#define CFS_COMPLETION_INITIALIZER(work) \
         { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
 
-#define DECLARE_COMPLETION(work) \
-        struct completion work = COMPLETION_INITIALIZER(work)
+#define CFS_DECLARE_COMPLETION(work) \
+        cfs_completion_t work = CFS_COMPLETION_INITIALIZER(work)
 
-#define INIT_COMPLETION(x)      ((x).done = 0)
+#define CFS_INIT_COMPLETION(x)      ((x).done = 0)
 
 
 /*
- * rw_semaphore:
+ * cfs_rw_semaphore:
  *
- * - init_rwsem(x)
- * - down_read(x)
- * - up_read(x)
- * - down_write(x)
- * - up_write(x)
+ * - cfs_init_rwsem(x)
+ * - cfs_down_read(x)
+ * - cfs_down_read_trylock(x)
+ * - cfs_down_write(struct cfs_rw_semaphore *s);
+ * - cfs_down_write_trylock(struct cfs_rw_semaphore *s);
+ * - cfs_up_read(x)
+ * - cfs_up_write(x)
+ * - cfs_fini_rwsem(x)
  */
-struct rw_semaphore {
+typedef struct cfs_rw_semaphore {
         int foo;
-};
+} cfs_rw_semaphore_t;
 
-void init_rwsem(struct rw_semaphore *s);
-void down_read(struct rw_semaphore *s);
-int down_read_trylock(struct rw_semaphore *s);
-void down_write(struct rw_semaphore *s);
-int down_write_trylock(struct rw_semaphore *s);
-void up_read(struct rw_semaphore *s);
-void up_write(struct rw_semaphore *s);
-void fini_rwsem(struct rw_semaphore *s);
+void cfs_init_rwsem(cfs_rw_semaphore_t *s);
+void cfs_down_read(cfs_rw_semaphore_t *s);
+int cfs_down_read_trylock(cfs_rw_semaphore_t *s);
+void cfs_down_write(cfs_rw_semaphore_t *s);
+int cfs_down_write_trylock(cfs_rw_semaphore_t *s);
+void cfs_up_read(cfs_rw_semaphore_t *s);
+void cfs_up_write(cfs_rw_semaphore_t *s);
+void cfs_fini_rwsem(cfs_rw_semaphore_t *s);
 
 /*
  * read-write lock : Need to be investigated more!!
  * XXX nikita: for now, let rwlock_t to be identical to rw_semaphore
  *
- * - DECLARE_RWLOCK(l)
- * - rwlock_init(x)
- * - read_lock(x)
- * - read_unlock(x)
- * - write_lock(x)
- * - write_unlock(x)
+ * - cfs_rwlock_init(x)
+ * - cfs_read_lock(x)
+ * - cfs_read_unlock(x)
+ * - cfs_write_lock(x)
+ * - cfs_write_unlock(x)
+ * - cfs_write_lock_irqsave(x)
+ * - cfs_write_unlock_irqrestore(x)
+ * - cfs_read_lock_irqsave(x)
+ * - cfs_read_unlock_irqrestore(x)
  */
-typedef struct rw_semaphore rwlock_t;
-#define RW_LOCK_UNLOCKED        (rwlock_t) { }
+typedef cfs_rw_semaphore_t cfs_rwlock_t;
+#define CFS_RW_LOCK_UNLOCKED        (cfs_rwlock_t) { }
 
-#define rwlock_init(pl)         init_rwsem(pl)
+#define cfs_rwlock_init(pl)         cfs_init_rwsem(pl)
 
-#define read_lock(l)            down_read(l)
-#define read_unlock(l)          up_read(l)
-#define write_lock(l)           down_write(l)
-#define write_unlock(l)         up_write(l)
+#define cfs_read_lock(l)            cfs_down_read(l)
+#define cfs_read_unlock(l)          cfs_up_read(l)
+#define cfs_write_lock(l)           cfs_down_write(l)
+#define cfs_write_unlock(l)         cfs_up_write(l)
 
 static inline void
-write_lock_irqsave(rwlock_t *l, unsigned long f) { write_lock(l); }
+cfs_write_lock_irqsave(cfs_rwlock_t *l, unsigned long f) { cfs_write_lock(l); }
 static inline void
-write_unlock_irqrestore(rwlock_t *l, unsigned long f) { write_unlock(l); }
+cfs_write_unlock_irqrestore(cfs_rwlock_t *l, unsigned long f) { cfs_write_unlock(l); }
 
 static inline void
-read_lock_irqsave(rwlock_t *l, unsigned long f) { read_lock(l); }
+cfs_read_lock_irqsave(cfs_rwlock_t *l, unsigned long f) { cfs_read_lock(l); }
 static inline void
-read_unlock_irqrestore(rwlock_t *l, unsigned long f) { read_unlock(l); }
+cfs_read_unlock_irqrestore(cfs_rwlock_t *l, unsigned long f) { cfs_read_unlock(l); }
 
 /*
- * Atomic for user-space
- * Copied from liblustre
+ * Atomic for single-threaded user-space
  */
-typedef struct { volatile int counter; } atomic_t;
-
-#define ATOMIC_INIT(i) { (i) }
-
-#define atomic_read(a) ((a)->counter)
-#define atomic_set(a,b) do {(a)->counter = b; } while (0)
-#define atomic_dec_and_test(a) ((--((a)->counter)) == 0)
-#define atomic_dec_and_lock(a,b) ((--((a)->counter)) == 0)
-#define atomic_inc(a)  (((a)->counter)++)
-#define atomic_dec(a)  do { (a)->counter--; } while (0)
-#define atomic_add(b,a)  do {(a)->counter += b;} while (0)
-#define atomic_add_return(n,a) ((a)->counter += n)
-#define atomic_inc_return(a) atomic_add_return(1,a)
-#define atomic_sub(b,a)  do {(a)->counter -= b;} while (0)
-#define atomic_sub_return(n,a) ((a)->counter -= n)
-#define atomic_dec_return(a)  atomic_sub_return(1,a)
-#define atomic_add_unless(v, a, u) ((v)->counter != u ? (v)->counter += a : 0)
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+typedef struct { volatile int counter; } cfs_atomic_t;
 
+#define CFS_ATOMIC_INIT(i) { (i) }
+
+#define cfs_atomic_read(a) ((a)->counter)
+#define cfs_atomic_set(a,b) do {(a)->counter = b; } while (0)
+#define cfs_atomic_dec_and_test(a) ((--((a)->counter)) == 0)
+#define cfs_atomic_dec_and_lock(a,b) ((--((a)->counter)) == 0)
+#define cfs_atomic_inc(a)  (((a)->counter)++)
+#define cfs_atomic_dec(a)  do { (a)->counter--; } while (0)
+#define cfs_atomic_add(b,a)  do {(a)->counter += b;} while (0)
+#define cfs_atomic_add_return(n,a) ((a)->counter += n)
+#define cfs_atomic_inc_return(a) cfs_atomic_add_return(1,a)
+#define cfs_atomic_sub(b,a)  do {(a)->counter -= b;} while (0)
+#define cfs_atomic_sub_return(n,a) ((a)->counter -= n)
+#define cfs_atomic_dec_return(a)  cfs_atomic_sub_return(1,a)
+#define cfs_atomic_add_unless(v, a, u) \
+        ((v)->counter != u ? (v)->counter += a : 0)
+#define cfs_atomic_inc_not_zero(v) cfs_atomic_add_unless((v), 1, 0)
 
 #ifdef HAVE_LIBPTHREAD
 #include <pthread.h>
 
 /*
- * Completion
+ * Multi-threaded user space completion APIs
  */
 
-struct cfs_completion {
+typedef struct {
         int c_done;
         pthread_cond_t c_cond;
         pthread_mutex_t c_mut;
-};
+} cfs_mt_completion_t;
 
-void cfs_init_completion(struct cfs_completion *c);
-void cfs_fini_completion(struct cfs_completion *c);
-void cfs_complete(struct cfs_completion *c);
-void cfs_wait_for_completion(struct cfs_completion *c);
+void cfs_mt_init_completion(cfs_mt_completion_t *c);
+void cfs_mt_fini_completion(cfs_mt_completion_t *c);
+void cfs_mt_complete(cfs_mt_completion_t *c);
+void cfs_mt_wait_for_completion(cfs_mt_completion_t *c);
 
 /*
- * atomic.h
+ * Multi-threaded user space atomic APIs
  */
 
-typedef struct { volatile int counter; } cfs_atomic_t;
+typedef struct { volatile int counter; } cfs_mt_atomic_t;
 
-int cfs_atomic_read(cfs_atomic_t *a);
-void cfs_atomic_set(cfs_atomic_t *a, int b);
-int cfs_atomic_dec_and_test(cfs_atomic_t *a);
-void cfs_atomic_inc(cfs_atomic_t *a);
-void cfs_atomic_dec(cfs_atomic_t *a);
-void cfs_atomic_add(int b, cfs_atomic_t *a);
-void cfs_atomic_sub(int b, cfs_atomic_t *a);
+int cfs_mt_atomic_read(cfs_mt_atomic_t *a);
+void cfs_mt_atomic_set(cfs_mt_atomic_t *a, int b);
+int cfs_mt_atomic_dec_and_test(cfs_mt_atomic_t *a);
+void cfs_mt_atomic_inc(cfs_mt_atomic_t *a);
+void cfs_mt_atomic_dec(cfs_mt_atomic_t *a);
+void cfs_mt_atomic_add(int b, cfs_mt_atomic_t *a);
+void cfs_mt_atomic_sub(int b, cfs_mt_atomic_t *a);
 
 #endif /* HAVE_LIBPTHREAD */
 
@@ -280,26 +274,36 @@ void cfs_atomic_sub(int b, cfs_atomic_t *a);
  * Mutex interface.
  *
  **************************************************************************/
+#define CFS_DECLARE_MUTEX(name)     \
+        cfs_semaphore_t name = { 1 }
+
+#define cfs_mutex_up(s)                     __up(s)
+#define cfs_up(s)                           cfs_mutex_up(s)
+#define cfs_mutex_down(s)                   __down(s)
+#define cfs_down(s)                         cfs_mutex_down(s)
+
+#define cfs_init_mutex(x)                   cfs_sema_init(x, 1)
+#define cfs_init_mutex_locked(x)            cfs_sema_init(x, 0)
 
-struct mutex {
-        struct semaphore m_sem;
-};
+typedef struct cfs_mutex {
+        cfs_semaphore_t m_sem;
+} cfs_mutex_t;
 
-#define DEFINE_MUTEX(m) struct mutex m
+#define CFS_DEFINE_MUTEX(m) cfs_mutex_t m
 
-static inline void mutex_init(struct mutex *mutex)
+static inline void cfs_mutex_init(cfs_mutex_t *mutex)
 {
-        init_mutex(&mutex->m_sem);
+        cfs_init_mutex(&mutex->m_sem);
 }
 
-static inline void mutex_lock(struct mutex *mutex)
+static inline void cfs_mutex_lock(cfs_mutex_t *mutex)
 {
-        mutex_down(&mutex->m_sem);
+        cfs_mutex_down(&mutex->m_sem);
 }
 
-static inline void mutex_unlock(struct mutex *mutex)
+static inline void cfs_mutex_unlock(cfs_mutex_t *mutex)
 {
-        mutex_up(&mutex->m_sem);
+        cfs_mutex_up(&mutex->m_sem);
 }
 
 /**
@@ -309,7 +313,7 @@ static inline void mutex_unlock(struct mutex *mutex)
  * \retval 0 try-lock succeeded (lock acquired).
  * \retval errno indicates lock contention.
  */
-static inline int mutex_down_trylock(struct mutex *mutex)
+static inline int cfs_mutex_down_trylock(cfs_mutex_t *mutex)
 {
         return 0;
 }
@@ -323,12 +327,12 @@ static inline int mutex_down_trylock(struct mutex *mutex)
  * \retval 1 try-lock succeeded (lock acquired).
  * \retval 0 indicates lock contention.
  */
-static inline int mutex_trylock(struct mutex *mutex)
+static inline int cfs_mutex_trylock(cfs_mutex_t *mutex)
 {
-        return !mutex_down_trylock(mutex);
+        return !cfs_mutex_down_trylock(mutex);
 }
 
-static inline void mutex_destroy(struct mutex *lock)
+static inline void cfs_mutex_destroy(cfs_mutex_t *lock)
 {
 }
 
@@ -340,7 +344,7 @@ static inline void mutex_destroy(struct mutex *lock)
  *
  * \retval 0 mutex is not locked. This should never happen.
  */
-static inline int mutex_is_locked(struct mutex *lock)
+static inline int cfs_mutex_is_locked(cfs_mutex_t *lock)
 {
         return 1;
 }
@@ -352,28 +356,27 @@ static inline int mutex_is_locked(struct mutex *lock)
  *
  **************************************************************************/
 
-struct lock_class_key {
+typedef struct cfs_lock_class_key {
         int foo;
-};
+} cfs_lock_class_key_t;
 
-static inline void lockdep_set_class(void *lock, struct lock_class_key *key)
+static inline void cfs_lockdep_set_class(void *lock,
+                                         cfs_lock_class_key_t *key)
 {
 }
 
-static inline void lockdep_off(void)
+static inline void cfs_lockdep_off(void)
 {
 }
 
-static inline void lockdep_on(void)
+static inline void cfs_lockdep_on(void)
 {
 }
 
-/* This has to be a macro, so that can be undefined in kernels that do not
- * support lockdep. */
-#define mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
-#define spin_lock_nested(lock, subclass) spin_lock(lock)
-#define down_read_nested(lock, subclass) down_read(lock)
-#define down_write_nested(lock, subclass) down_write(lock)
+#define cfs_mutex_lock_nested(mutex, subclass) cfs_mutex_lock(mutex)
+#define cfs_spin_lock_nested(lock, subclass) cfs_spin_lock(lock)
+#define cfs_down_read_nested(lock, subclass) cfs_down_read(lock)
+#define cfs_down_write_nested(lock, subclass) cfs_down_write(lock)
 
 
 /* !__KERNEL__ */
index ab501b8..3e72e20 100644 (file)
@@ -18,7 +18,7 @@
 typedef struct page {
         void   *addr;
         unsigned long index;
-        struct list_head list;
+        cfs_list_t list;
         unsigned long private;
 
         /* internally used by liblustre file i/o */
@@ -27,7 +27,7 @@ typedef struct page {
 #ifdef LIBLUSTRE_HANDLE_UNALIGNED_PAGE
         int     _managed;
 #endif
-        struct list_head _node;
+        cfs_list_t _node;
 } cfs_page_t;
 
 
@@ -77,10 +77,10 @@ typedef struct {
          int size;
 } cfs_mem_cache_t;
 
-#define SLAB_HWCACHE_ALIGN 0
-#define SLAB_DESTROY_BY_RCU 0
-#define SLAB_KERNEL 0
-#define SLAB_NOFS 0
+#define CFS_SLAB_HWCACHE_ALIGN 0
+#define CFS_SLAB_DESTROY_BY_RCU 0
+#define CFS_SLAB_KERNEL 0
+#define CFS_SLAB_NOFS 0
 
 cfs_mem_cache_t *
 cfs_mem_cache_create(const char *, size_t, size_t, unsigned long);
@@ -92,13 +92,13 @@ int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem);
 /*
  * Copy to/from user
  */
-static inline int copy_from_user(void *a,void *b, int c)
+static inline int cfs_copy_from_user(void *a,void *b, int c)
 {
         memcpy(a,b,c);
         return 0;
 }
 
-static inline int copy_to_user(void *a,void *b, int c)
+static inline int cfs_copy_to_user(void *a,void *b, int c)
 {
         memcpy(a,b,c);
         return 0;
index a5c4213..a39a4d7 100644 (file)
@@ -59,31 +59,34 @@ typedef struct proc_dir_entry           cfs_proc_dir_entry_t;
  * Just present a single processor until will add thread support.
  */
 #ifndef smp_processor_id
-#define smp_processor_id() 0
+#define cfs_smp_processor_id() 0
+#else
+#define cfs_smp_processor_id() smp_processor_id()
 #endif
 #ifndef num_online_cpus
-#define num_online_cpus() 1
+#define cfs_num_online_cpus() 1
+#else
+#define cfs_num_online_cpus() num_online_cpus()
 #endif
 #ifndef num_possible_cpus
-#define num_possible_cpus() 1
+#define cfs_num_possible_cpus() 1
+#else
+#define cfs_num_possible_cpus() num_possible_cpus()
 #endif
 
 /*
- * Wait Queue. 
+ * Wait Queue.
  */
 
 typedef struct cfs_waitlink {
-        struct list_head sleeping;
+        cfs_list_t sleeping;
         void *process;
 } cfs_waitlink_t;
 
 typedef struct cfs_waitq {
-        struct list_head sleepers;
+        cfs_list_t sleepers;
 } cfs_waitq_t;
 
-/* XXX: need to replace wake_up with cfs_waitq_signal() */
-#define wake_up(q) cfs_waitq_signal(q)
-
 /*
  * Task states
  */
@@ -114,14 +117,14 @@ typedef sigset_t                        cfs_sigset_t;
  */
 
 typedef struct {
-        struct list_head tl_list;
+        cfs_list_t tl_list;
         void (*function)(ulong_ptr_t unused);
         ulong_ptr_t data;
         long expires;
 } cfs_timer_t;
 
 
-#define in_interrupt()    (0)
+#define cfs_in_interrupt()    (0)
 
 typedef void cfs_psdev_t;
 
@@ -136,7 +139,7 @@ static inline int cfs_psdev_deregister(cfs_psdev_t *foo)
 }
 
 #define cfs_lock_kernel()               do {} while (0)
-#define cfs_sigfillset(l) do {}         while (0)
+#define cfs_sigfillset(l)               do {} while (0)
 #define cfs_recalc_sigpending(l)        do {} while (0)
 #define cfs_kernel_thread(l,m,n)        LBUG()
 #define cfs_kthread_run(fn,d,fmt,...)   LBUG()
@@ -178,6 +181,13 @@ struct cfs_stack_trace {
         })
 #endif
 
+/*
+ * Groups
+ */
+typedef struct cfs_group_info {
+
+} cfs_group_info_t;
+
 #ifndef min
 # define min(x,y) ((x)<(y) ? (x) : (y))
 #endif
index 5a5cc3e..4f2bc8b 100644 (file)
@@ -99,11 +99,8 @@ typedef time_t cfs_fs_time_t;
 typedef time_t cfs_time_t;
 typedef time_t cfs_duration_t;
 
-/* looks like linux */
-#define time_after(a, b) ((long)(b) - (long)(a) < 0)
-#define time_before(a, b) time_after(b,a)
-#define time_after_eq(a,b)      ((long)(a) - (long)(b) >= 0)
-#define time_before_eq(a,b) time_after_eq(b,a)
+#define cfs_time_before(a, b) ((long)(a) - (long)(b) < 0)
+#define cfs_time_beforeq(a, b) ((long)(b) - (long)(a) >= 0)
 
 static inline cfs_time_t cfs_time_current(void)
 {
index 137dabe..8a17a83 100644 (file)
@@ -192,7 +192,7 @@ static inline void l_mutex_done(l_mutex_t *mutex)
 static inline void l_mutex_lock(l_mutex_t *mutex)
 {
 #if L_LOCK_DEBUG
-       printf("lock mutex  :%s\n", mutex->s_name);
+       printf("lock cfs_mutex  :%s\n", mutex->s_name);
 #endif
        sem_wait(mutex->s_sem);
 }
@@ -200,7 +200,7 @@ static inline void l_mutex_lock(l_mutex_t *mutex)
 static inline void l_mutex_unlock(l_mutex_t *mutex)
 {
 #if L_LOCK_DEBUG
-       printf("unlock mutex: %s\n", mutex->s_name);
+       printf("unlock cfs_mutex: %s\n", mutex->s_name);
 #endif
        sem_post(mutex->s_sem);
 }
index f2293ab..707f748 100644 (file)
@@ -44,9 +44,6 @@
 
 #define CFS_SYSFS_MODULE_PARM    0 /* no sysfs access to module parameters */
 
-#define cond_resched our_cond_resched
-void our_cond_resched();
-
 #define LASSERT_SPIN_LOCKED(lock) do {} while(0)
 #define LASSERT_SEM_LOCKED(sem) LASSERT(down_trylock(sem) != 0)
 
@@ -93,7 +90,7 @@ void libcfs_unregister_panic_notifier();
 #define cfs_assert     _ASSERT
 
 #ifndef get_cpu
-#define cfs_get_cpu() smp_processor_id()
+#define cfs_get_cpu() cfs_smp_processor_id()
 #define cfs_put_cpu() do { } while (0)
 #else
 #define cfs_get_cpu() get_cpu()
index 0423fe9..19e72f9 100644 (file)
@@ -92,17 +92,17 @@ static inline __u32 query_stack_size()
 
 #endif /* __KERNEL__*/
 
-#ifndef THREAD_SIZE
-# define THREAD_SIZE query_stack_size()
+#ifndef CFS_THREAD_SIZE
+# define CFS_THREAD_SIZE query_stack_size()
 #endif
 
-#define LUSTRE_TRACE_SIZE (THREAD_SIZE >> 5)
+#define LUSTRE_TRACE_SIZE (CFS_THREAD_SIZE >> 5)
 
 #ifdef __KERNEL__
-#define CDEBUG_STACK() (THREAD_SIZE - (__u32)IoGetRemainingStackSize())
-#define CHECK_STACK() do {} while(0)
+#define CDEBUG_STACK() (CFS_THREAD_SIZE - (__u32)IoGetRemainingStackSize())
+#define CFS_CHECK_STACK() do {} while(0)
 #else /* !__KERNEL__ */
-#define CHECK_STACK() do { } while(0)
+#define CFS_CHECK_STACK() do { } while(0)
 #define CDEBUG_STACK() (0L)
 #endif /* __KERNEL__ */
 
index cb60fe2..87ac31c 100644 (file)
@@ -77,24 +77,24 @@ char * ul2dstr(ulong_ptr_t address, char *buf, int len);
 
 unsigned long simple_strtoul(const char *cp,char **endp, unsigned int base);
 
-static inline int set_bit(int nr, void * addr)
+static inline int cfs_set_bit(int nr, void * addr)
 {
     (((volatile ULONG *) addr)[nr >> 5]) |= (1UL << (nr & 31));
     return *((int *) addr);
 }
 
-static inline int test_bit(int nr, void * addr)
+static inline int cfs_test_bit(int nr, void * addr)
 {
     return (int)(((1UL << (nr & 31)) & (((volatile ULONG *) addr)[nr >> 5])) != 0);
 }
 
-static inline int clear_bit(int nr, void * addr)
+static inline int cfs_clear_bit(int nr, void * addr)
 {
     (((volatile ULONG *) addr)[nr >> 5]) &= (~(1UL << (nr & 31)));
     return *((int *) addr);
 }
 
-static inline int test_and_set_bit(int nr, volatile void *addr)
+static inline int cfs_test_and_set_bit(int nr, volatile void *addr)
 {
     int rc;
     unsigned char  mask;
@@ -108,11 +108,11 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
     return rc;
 }
 
-#define ext2_set_bit(nr,addr)   (set_bit(nr, addr), 0)
-#define ext2_clear_bit(nr,addr)        (clear_bit(nr, addr), 0)
-#define ext2_test_bit(nr,addr)  test_bit(nr, addr)
+#define ext2_set_bit(nr,addr)   (cfs_set_bit(nr, addr), 0)
+#define ext2_clear_bit(nr,addr)        (cfs_clear_bit(nr, addr), 0)
+#define ext2_test_bit(nr,addr)  cfs_test_bit(nr, addr)
 
-static inline int ffs(int x)
+static inline int cfs_ffs(int x)
 {
         int r = 1;
 
@@ -141,7 +141,7 @@ static inline int ffs(int x)
         return r;
 }
 
-static inline unsigned long __ffs(unsigned long word)
+static inline unsigned long __cfs_ffs(unsigned long word)
 {
         int num = 0;
 
@@ -180,7 +180,7 @@ static inline unsigned long __ffs(unsigned long word)
  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  */
 static inline
-int fls(int x)
+int cfs_fls(int x)
 {
         int r = 32;
 
@@ -209,14 +209,15 @@ int fls(int x)
         return r;
 }
 
-static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)
+static inline unsigned cfs_find_first_bit(const unsigned long *addr,
+                                          unsigned size)
 {
         unsigned x = 0;
 
         while (x < size) {
                 unsigned long val = *addr++;
                 if (val)
-                        return __ffs(val) + x;
+                        return __cfs_ffs(val) + x;
                 x += (sizeof(*addr)<<3);
         }
         return x;
@@ -239,7 +240,7 @@ static inline void read_random(char *buf, int len)
     }
 }
 
-#define get_random_bytes(buf, len)  read_random(buf, len)
+#define cfs_get_random_bytes(buf, len)  read_random(buf, len)
 
 /* do NOT use function or expression as parameters ... */
 
@@ -264,13 +265,13 @@ static inline void read_random(char *buf, int len)
        ((unsigned char *)&addr)[1],    \
        ((unsigned char *)&addr)[0]
 
-static int copy_from_user(void *to, void *from, int c) 
+static int cfs_copy_from_user(void *to, void *from, int c) 
 {
     memcpy(to, from, c);
     return 0;
 }
 
-static int copy_to_user(void *to, const void *from, int c) 
+static int cfs_copy_to_user(void *to, const void *from, int c) 
 {
     memcpy(to, from, c);
     return 0;
@@ -296,8 +297,8 @@ clear_user(void __user *to, unsigned long n)
     0                           \
 )
 
-#define num_physpages                  (64 * 1024)
-#define CFS_NUM_CACHEPAGES             num_physpages
+#define cfs_num_physpages               (64 * 1024)
+#define CFS_NUM_CACHEPAGES              cfs_num_physpages
 
 #else
 
index e3d52c0..5f6db90 100644 (file)
@@ -91,7 +91,7 @@
 
 struct file_operations
 {
-    struct module *owner;
+    cfs_module_t *owner;
     loff_t (*llseek)(struct file * file, loff_t offset, int origin);
     ssize_t (*read) (struct file * file, char * buf, size_t nbytes, loff_t *ppos);
     ssize_t (*write)(struct file * file, const char * buffer,
@@ -240,14 +240,14 @@ struct inode {
         int             i_uid;
         int             i_gid;
         __u32           i_flags;
-        mutex_t         i_sem;
+        cfs_mutex_t     i_sem;
         void *          i_priv;
 };
 
 #define I_FREEING       0x0001
 
 struct dentry {
-        atomic_t        d_count;
+        cfs_atomic_t    d_count;
         struct {
             int         len;
             char *      name;
@@ -276,7 +276,7 @@ static inline void i_size_write(struct inode *inode, loff_t i_size)
     inode->i_size = i_size;
 }
 
-struct kstatfs {
+typedef struct cfs_kstatfs {
         u64     f_type;
         long    f_bsize;
         u64     f_blocks;
@@ -288,7 +288,7 @@ struct kstatfs {
         long    f_namelen;
         long    f_frsize;
         long    f_spare[5];
-};
+} cfs_kstatfs_t;
 
 struct super_block {
         void *  s_fs_info;
@@ -370,14 +370,15 @@ VOID RadixInitTable(IN PRTL_GENERIC_TABLE Table);
 /* all radix tree routines should be protected by external locks */
 unsigned int
 radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
-                       unsigned long first_index, unsigned int max_items);
+                       unsigned long first_index, unsigned int max_items);
 void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index);
-int radix_tree_insert(struct radix_tree_root *root,unsigned long index, void *item);
+int radix_tree_insert(struct radix_tree_root *root, unsigned long index,
+                      void *item);
 void *radix_tree_delete(struct radix_tree_root *root, unsigned long index);
 
-struct rcu_head {
+typedef struct cfs_rcu_head {
     int     foo;
-};
+} cfs_rcu_head_t;
 
 #else  /* !__KERNEL__ */
 
index cbff12d..4c7bd5e 100644 (file)
  *  spinlock & event definitions
  */
 
-typedef struct spin_lock spinlock_t;
+typedef struct cfs_spin_lock cfs_spinlock_t;
 
 /* atomic */
 
-typedef struct { volatile int counter; } atomic_t;
+typedef struct { volatile int counter; } cfs_atomic_t;
 
-#define ATOMIC_INIT(i) { i }
+#define CFS_ATOMIC_INIT(i)     { i }
 
-#define atomic_read(v) ((v)->counter)
-#define atomic_set(v,i)                (((v)->counter) = (i))
+#define cfs_atomic_read(v)     ((v)->counter)
+#define cfs_atomic_set(v,i)    (((v)->counter) = (i))
 
-void FASTCALL atomic_add(int i, atomic_t *v);
-void FASTCALL atomic_sub(int i, atomic_t *v);
+void FASTCALL cfs_atomic_add(int i, cfs_atomic_t *v);
+void FASTCALL cfs_atomic_sub(int i, cfs_atomic_t *v);
 
-int FASTCALL atomic_sub_and_test(int i, atomic_t *v);
+int FASTCALL cfs_atomic_sub_and_test(int i, cfs_atomic_t *v);
 
-void FASTCALL atomic_inc(atomic_t *v);
-void FASTCALL atomic_dec(atomic_t *v);
+void FASTCALL cfs_atomic_inc(cfs_atomic_t *v);
+void FASTCALL cfs_atomic_dec(cfs_atomic_t *v);
 
-int FASTCALL atomic_dec_and_test(atomic_t *v);
-int FASTCALL atomic_inc_and_test(atomic_t *v);
+int FASTCALL cfs_atomic_dec_and_test(cfs_atomic_t *v);
+int FASTCALL cfs_atomic_inc_and_test(cfs_atomic_t *v);
 
-int FASTCALL atomic_add_return(int i, atomic_t *v);
-int FASTCALL atomic_sub_return(int i, atomic_t *v);
+int FASTCALL cfs_atomic_add_return(int i, cfs_atomic_t *v);
+int FASTCALL cfs_atomic_sub_return(int i, cfs_atomic_t *v);
 
-#define atomic_inc_return(v)  atomic_add_return(1, v)
-#define atomic_dec_return(v)  atomic_sub_return(1, v)
+#define cfs_atomic_inc_return(v)  cfs_atomic_add_return(1, v)
+#define cfs_atomic_dec_return(v)  cfs_atomic_sub_return(1, v)
 
-int FASTCALL atomic_dec_and_lock(atomic_t *v, spinlock_t *lock);
+int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, cfs_spinlock_t *lock);
 
 /* event */
 
@@ -113,7 +113,7 @@ typedef KEVENT          event_t;
  *   N/A
  */
 static inline void
-    cfs_init_event(event_t *event, int type, int status)
+cfs_init_event(event_t *event, int type, int status)
 {
     KeInitializeEvent(
             event,
@@ -144,7 +144,7 @@ cfs_wait_event_internal(event_t * event, int64_t timeout)
     NTSTATUS        Status;
     LARGE_INTEGER   TimeOut;
 
-    TimeOut.QuadPart = -1 * (10000000/HZ) * timeout;
+    TimeOut.QuadPart = -1 * (10000000/CFS_HZ) * timeout;
 
     Status = KeWaitForSingleObject(
                 event,
@@ -215,40 +215,43 @@ cfs_clear_event(event_t * event)
  *
  */
 
-struct spin_lock {
+struct cfs_spin_lock {
     KSPIN_LOCK lock;
     KIRQL      irql;
 };
 
-#define CFS_DECL_SPIN(name)  spinlock_t name;
-#define CFS_DECL_SPIN_EXTERN(name)  extern spinlock_t name;
+#define CFS_DECL_SPIN(name)  cfs_spinlock_t name;
+#define CFS_DECL_SPIN_EXTERN(name)  extern cfs_spinlock_t name;
 
-#define SPIN_LOCK_UNLOCKED {0}
+#define CFS_SPIN_LOCK_UNLOCKED {0}
 
-static inline void spin_lock_init(spinlock_t *lock)
+static inline void cfs_spin_lock_init(cfs_spinlock_t *lock)
 {
     KeInitializeSpinLock(&(lock->lock));
 }
 
-static inline void spin_lock(spinlock_t *lock)
+static inline void cfs_spin_lock(cfs_spinlock_t *lock)
 {
     KeAcquireSpinLock(&(lock->lock), &(lock->irql));
 }
 
-static inline void spin_lock_nested(spinlock_t *lock, unsigned subclass)
+static inline void cfs_spin_lock_nested(cfs_spinlock_t *lock, unsigned subclass)
 {
     KeAcquireSpinLock(&(lock->lock), &(lock->irql));
 }
 
-static inline void spin_unlock(spinlock_t *lock)
+static inline void cfs_spin_unlock(cfs_spinlock_t *lock)
 {
     KIRQL       irql = lock->irql;
     KeReleaseSpinLock(&(lock->lock), irql);
 }
 
 
-#define spin_lock_irqsave(lock, flags)         do {(flags) = 0; spin_lock(lock);} while(0)
-#define spin_unlock_irqrestore(lock, flags)    do {spin_unlock(lock);} while(0)
+#define cfs_spin_lock_irqsave(lock, flags)  \
+do {(flags) = 0; cfs_spin_lock(lock);} while(0)
+
+#define cfs_spin_unlock_irqrestore(lock, flags) \
+do {cfs_spin_unlock(lock);} while(0)
 
 
 /* There's no  corresponding routine in windows kernel.
@@ -258,7 +261,7 @@ static inline void spin_unlock(spinlock_t *lock)
 
 extern int libcfs_mp_system;
 
-static int spin_trylock(spinlock_t *lock)
+static int cfs_spin_trylock(cfs_spinlock_t *lock)
 {
     KIRQL   Irql;
     int     rc = 0;
@@ -295,7 +298,7 @@ static int spin_trylock(spinlock_t *lock)
     return rc;
 }
 
-static int spin_is_locked(spinlock_t *lock)
+static int cfs_spin_is_locked(cfs_spinlock_t *lock)
 {
 #if _WIN32_WINNT >= 0x502
     /* KeTestSpinLock only avalilable on 2k3 server or later */
@@ -307,30 +310,29 @@ static int spin_is_locked(spinlock_t *lock)
 
 /* synchronization between cpus: it will disable all DPCs
    kernel task scheduler on the CPU */
-#define spin_lock_bh(x)                    spin_lock(x)
-#define spin_unlock_bh(x)          spin_unlock(x)
-#define spin_lock_bh_init(x)   spin_lock_init(x)
+#define cfs_spin_lock_bh(x)                cfs_spin_lock(x)
+#define cfs_spin_unlock_bh(x)      cfs_spin_unlock(x)
+#define cfs_spin_lock_bh_init(x)       cfs_spin_lock_init(x)
 
 /*
- * rw_semaphore (using ERESOURCE)
+ * cfs_rw_semaphore (using ERESOURCE)
  */
 
 
-typedef struct rw_semaphore {
+typedef struct cfs_rw_semaphore {
     ERESOURCE   rwsem;
-} rw_semaphore_t;
+} cfs_rw_semaphore_t;
 
 
-#define CFS_DECL_RWSEM(name) rw_semaphore_t name
-#define CFS_DECL_RWSEM_EXTERN(name) extern rw_semaphore_t name
-#define DECLARE_RWSEM CFS_DECL_RWSEM
+#define CFS_DECLARE_RWSEM(name) cfs_rw_semaphore_t name
+#define CFS_DECLARE_RWSEM_EXTERN(name) extern cfs_rw_semaphore_t name
 
 /*
- * init_rwsem
- *   To initialize the the rw_semaphore_t structure
+ * cfs_init_rwsem
+ *   To initialize the the cfs_rw_semaphore_t structure
  *
  * Arguments:
- *   rwsem:  pointer to the rw_semaphore_t structure
+ *   rwsem:  pointer to the cfs_rw_semaphore_t structure
  *
  * Return Value:
  *   N/A
@@ -339,18 +341,18 @@ typedef struct rw_semaphore {
  *   N/A
  */
 
-static inline void init_rwsem(rw_semaphore_t *s)
+static inline void cfs_init_rwsem(cfs_rw_semaphore_t *s)
 {
        ExInitializeResourceLite(&s->rwsem);
 }
-#define rwsem_init init_rwsem
+#define rwsem_init cfs_init_rwsem
 
 /*
- * fini_rwsem
- *   To finilize/destroy the the rw_semaphore_t structure
+ * cfs_fini_rwsem
+ *   To finilize/destroy the the cfs_rw_semaphore_t structure
  *
  * Arguments:
- *   rwsem:  pointer to the rw_semaphore_t structure
+ *   rwsem:  pointer to the cfs_rw_semaphore_t structure
  *
  * Return Value:
  *   N/A
@@ -360,18 +362,17 @@ static inline void init_rwsem(rw_semaphore_t *s)
  *   Just define it NULL for other systems.
  */
 
-static inline void fini_rwsem(rw_semaphore_t *s)
+static inline void cfs_fini_rwsem(cfs_rw_semaphore_t *s)
 {
     ExDeleteResourceLite(&s->rwsem);
 }
-#define rwsem_fini fini_rwsem
 
 /*
- * down_read
- *   To acquire read-lock of the rw_semahore
+ * cfs_down_read
+ *   To acquire read-lock of the cfs_rw_semaphore
  *
  * Arguments:
- *   rwsem:  pointer to the rw_semaphore_t structure
+ *   rwsem:  pointer to the cfs_rw_semaphore_t structure
  *
  * Return Value:
  *   N/A
@@ -380,19 +381,19 @@ static inline void fini_rwsem(rw_semaphore_t *s)
  *   N/A
  */
 
-static inline void down_read(struct rw_semaphore *s)
+static inline void cfs_down_read(cfs_rw_semaphore_t *s)
 {
        ExAcquireResourceSharedLite(&s->rwsem, TRUE);
 }
-#define down_read_nested down_read
+#define cfs_down_read_nested cfs_down_read
 
 
 /*
- * down_read_trylock
- *   To acquire read-lock of the rw_semahore without blocking
+ * cfs_down_read_trylock
+ *   To acquire read-lock of the cfs_rw_semaphore without blocking
  *
  * Arguments:
- *   rwsem:  pointer to the rw_semaphore_t structure
+ *   rwsem:  pointer to the cfs_rw_semaphore_t structure
  *
  * Return Value:
  *   Zero: failed to acquire the read lock
@@ -402,18 +403,18 @@ static inline void down_read(struct rw_semaphore *s)
  *   This routine will return immediately without waiting.
  */
 
-static inline int down_read_trylock(struct rw_semaphore *s)
+static inline int cfs_down_read_trylock(cfs_rw_semaphore_t *s)
 {
        return ExAcquireResourceSharedLite(&s->rwsem, FALSE);
 }
 
 
 /*
- * down_write
- *   To acquire write-lock of the rw_semahore
+ * cfs_down_write
+ *   To acquire write-lock of the cfs_rw_semaphore
  *
  * Arguments:
- *   rwsem:  pointer to the rw_semaphore_t structure
+ *   rwsem:  pointer to the cfs_rw_semaphore_t structure
  *
  * Return Value:
  *   N/A
@@ -422,18 +423,18 @@ static inline int down_read_trylock(struct rw_semaphore *s)
  *   N/A
  */
 
-static inline void down_write(struct rw_semaphore *s)
+static inline void cfs_down_write(cfs_rw_semaphore_t *s)
 {
        ExAcquireResourceExclusiveLite(&(s->rwsem), TRUE);
 }
-#define down_write_nested down_write
+#define cfs_down_write_nested cfs_down_write
 
 /*
  * down_write_trylock
- *   To acquire write-lock of the rw_semahore without blocking
+ *   To acquire write-lock of the cfs_rw_semaphore without blocking
  *
  * Arguments:
- *   rwsem:  pointer to the rw_semaphore_t structure
+ *   rwsem:  pointer to the cfs_rw_semaphore_t structure
  *
  * Return Value:
  *   Zero: failed to acquire the write lock
@@ -443,18 +444,18 @@ static inline void down_write(struct rw_semaphore *s)
  *   This routine will return immediately without waiting.
  */
 
-static inline int down_write_trylock(struct rw_semaphore *s)
+static inline int cfs_down_write_trylock(cfs_rw_semaphore_t *s)
 {
     return ExAcquireResourceExclusiveLite(&(s->rwsem), FALSE);
 }
 
 
 /*
- * up_read
- *   To release read-lock of the rw_semahore
+ * cfs_up_read
+ *   To release read-lock of the cfs_rw_semaphore
  *
  * Arguments:
- *   rwsem:  pointer to the rw_semaphore_t structure
+ *   rwsem:  pointer to the cfs_rw_semaphore_t structure
  *
  * Return Value:
  *   N/A
@@ -463,7 +464,7 @@ static inline int down_write_trylock(struct rw_semaphore *s)
  *   N/A
  */
 
-static inline void up_read(struct rw_semaphore *s)
+static inline void cfs_up_read(cfs_rw_semaphore_t *s)
 {
     ExReleaseResourceForThreadLite(
             &(s->rwsem),
@@ -472,11 +473,11 @@ static inline void up_read(struct rw_semaphore *s)
 
 
 /*
- * up_write
- *   To release write-lock of the rw_semahore
+ * cfs_up_write
+ *   To release write-lock of the cfs_rw_semaphore
  *
  * Arguments:
- *   rwsem:  pointer to the rw_semaphore_t structure
+ *   rwsem:  pointer to the cfs_rw_semaphore_t structure
  *
  * Return Value:
  *   N/A
@@ -485,7 +486,7 @@ static inline void up_read(struct rw_semaphore *s)
  *   N/A
  */
 
-static inline void up_write(struct rw_semaphore *s)
+static inline void cfs_up_write(cfs_rw_semaphore_t *s)
 {
     ExReleaseResourceForThreadLite(
                 &(s->rwsem),
@@ -503,34 +504,37 @@ static inline void up_write(struct rw_semaphore *s)
  */
 
 typedef struct {
-    spinlock_t guard;
-    int        count;
-} rwlock_t;
+    cfs_spinlock_t guard;
+    int            count;
+} cfs_rwlock_t;
+
+void cfs_rwlock_init(cfs_rwlock_t * rwlock);
+void cfs_rwlock_fini(cfs_rwlock_t * rwlock);
 
-void rwlock_init(rwlock_t * rwlock);
-void rwlock_fini(rwlock_t * rwlock);
+void cfs_read_lock(cfs_rwlock_t * rwlock);
+void cfs_read_unlock(cfs_rwlock_t * rwlock);
+void cfs_write_lock(cfs_rwlock_t * rwlock);
+void cfs_write_unlock(cfs_rwlock_t * rwlock);
 
-void read_lock(rwlock_t * rwlock);
-void read_unlock(rwlock_t * rwlock);
-void write_lock(rwlock_t * rwlock);
-void write_unlock(rwlock_t * rwlock);
+#define cfs_write_lock_irqsave(l, f)     do {f = 0; cfs_write_lock(l);} while(0)
+#define cfs_write_unlock_irqrestore(l, f)   do {cfs_write_unlock(l);} while(0)
+#define cfs_read_lock_irqsave(l, f         do {f=0; cfs_read_lock(l);} while(0)
+#define cfs_read_unlock_irqrestore(l, f)    do {cfs_read_unlock(l);} while(0)
 
-#define write_lock_irqsave(l, f)        do {f = 0; write_lock(l);} while(0)
-#define write_unlock_irqrestore(l, f)   do {write_unlock(l);} while(0)
-#define read_lock_irqsave(l, f)                do {f=0; read_lock(l);} while(0)
-#define read_unlock_irqrestore(l, f)    do {read_unlock(l);} while(0)
+#define cfs_write_lock_bh   cfs_write_lock
+#define cfs_write_unlock_bh cfs_write_unlock
 
-#define write_lock_bh   write_lock
-#define write_unlock_bh write_unlock
+typedef struct cfs_lock_class_key {
+        int foo;
+} cfs_lock_class_key_t;
 
-struct lock_class_key {int foo;};
-#define lockdep_set_class(lock, class) do {} while(0)
+#define cfs_lockdep_set_class(lock, class) do {} while(0)
 
-static inline void lockdep_off(void)
+static inline void cfs_lockdep_off(void)
 {
 }
 
-static inline void lockdep_on(void)
+static inline void cfs_lockdep_on(void)
 {
 }
 
@@ -542,27 +546,27 @@ static inline void lockdep_on(void)
  * - __up(x)
  */
 
-struct semaphore{
+typedef struct cfs_semaphore {
        KSEMAPHORE sem;
-};
+} cfs_semaphore_t;
 
-static inline void sema_init(struct semaphore *s, int val)
+static inline void cfs_sema_init(cfs_semaphore_t *s, int val)
 {
        KeInitializeSemaphore(&s->sem, val, val);
 }
 
-static inline void __down(struct semaphore *s)
+static inline void __down(cfs_semaphore_t *s)
 {
    KeWaitForSingleObject( &(s->sem), Executive,
                           KernelMode, FALSE, NULL );
 
 }
-static inline void __up(struct semaphore *s)
+static inline void __up(cfs_semaphore_t *s)
 {
        KeReleaseSemaphore(&s->sem, 0, 1, FALSE);
 }
 
-static inline int down_trylock(struct semaphore * s)
+static inline int down_trylock(cfs_semaphore_t *s)
 {
     LARGE_INTEGER  timeout = {0};
     NTSTATUS status =
@@ -585,10 +589,9 @@ static inline int down_trylock(struct semaphore * s)
  * - mutex_down(x)
  */
 
-#define mutex semaphore
-typedef struct semaphore mutex_t;
+typedef struct cfs_semaphore cfs_mutex_t;
 
-#define DECLARE_MUTEX(x) mutex_t x
+#define CFS_DECLARE_MUTEX(x) cfs_mutex_t x
 
 /*
  * init_mutex
@@ -603,13 +606,12 @@ typedef struct semaphore mutex_t;
  * Notes:
  *   N/A
  */
-#define mutex_init init_mutex
-static inline void init_mutex(mutex_t *mutex)
+#define cfs_mutex_init cfs_init_mutex
+static inline void cfs_init_mutex(cfs_mutex_t *mutex)
 {
-    sema_init(mutex, 1);
+    cfs_sema_init(mutex, 1);
 }
 
-#define init_MUTEX init_mutex
 /*
  * mutex_down
  *   To acquire the mutex lock
@@ -624,15 +626,15 @@ static inline void init_mutex(mutex_t *mutex)
  *   N/A
  */
 
-static inline void mutex_down(mutex_t *mutex)
+static inline void cfs_mutex_down(cfs_mutex_t *mutex)
 {
     __down(mutex);
 }
 
-#define mutex_lock(m) mutex_down(m)
-#define mutex_trylock(s) down_trylock(s)
-#define mutex_lock_nested(m) mutex_down(m)
-#define down(m)       mutex_down(m)
+#define cfs_mutex_lock(m) cfs_mutex_down(m)
+#define cfs_mutex_trylock(s) down_trylock(s)
+#define cfs_mutex_lock_nested(m) cfs_mutex_down(m)
+#define cfs_down(m)       cfs_mutex_down(m)
 
 /*
  * mutex_up
@@ -648,13 +650,13 @@ static inline void mutex_down(mutex_t *mutex)
  *   N/A
  */
 
-static inline void mutex_up(mutex_t *mutex)
+static inline void cfs_mutex_up(cfs_mutex_t *mutex)
 {
     __up(mutex);
 }
 
-#define mutex_unlock(m) mutex_up(m)
-#define up(m)           mutex_up(m)
+#define cfs_mutex_unlock(m) cfs_mutex_up(m)
+#define cfs_up(m)           cfs_mutex_up(m)
 
 /*
  * init_mutex_locked
@@ -670,15 +672,13 @@ static inline void mutex_up(mutex_t *mutex)
  *   N/A
  */
 
-static inline void init_mutex_locked(mutex_t *mutex)
+static inline void cfs_init_mutex_locked(cfs_mutex_t *mutex)
 {
-    init_mutex(mutex);
-    mutex_down(mutex);
+    cfs_init_mutex(mutex);
+    cfs_mutex_down(mutex);
 }
 
-#define init_MUTEX_LOCKED init_mutex_locked
-
-static inline void mutex_destroy(mutex_t *mutex)
+static inline void cfs_mutex_destroy(cfs_mutex_t *mutex)
 {
 }
 
@@ -690,9 +690,9 @@ static inline void mutex_destroy(mutex_t *mutex)
  * - wait_for_completion(c)
  */
 
-struct completion {
+typedef struct {
        event_t  event;
-};
+} cfs_completion_t;
 
 
 /*
@@ -709,7 +709,7 @@ struct completion {
  *   N/A
  */
 
-static inline void init_completion(struct completion *c)
+static inline void cfs_init_completion(cfs_completion_t *c)
 {
        cfs_init_event(&(c->event), 1, FALSE);
 }
@@ -729,7 +729,7 @@ static inline void init_completion(struct completion *c)
  *   N/A
  */
 
-static inline void complete(struct completion *c)
+static inline void cfs_complete(cfs_completion_t *c)
 {
        cfs_wake_event(&(c->event));
 }
@@ -749,55 +749,17 @@ static inline void complete(struct completion *c)
  *   N/A
  */
 
-static inline void wait_for_completion(struct completion *c)
+static inline void cfs_wait_for_completion(cfs_completion_t *c)
 {
     cfs_wait_event_internal(&(c->event), 0);
 }
 
-static inline int wait_for_completion_interruptible(struct completion *c)
+static inline int cfs_wait_for_completion_interruptible(cfs_completion_t *c)
 {
     cfs_wait_event_internal(&(c->event), 0);
     return 0;
 }
 
-/*
- * spinlock "implementation"
- */
-
-typedef spinlock_t cfs_spinlock_t;
-
-#define cfs_spin_lock_init(lock) spin_lock_init(lock)
-#define cfs_spin_lock(lock)      spin_lock(lock)
-#define cfs_spin_lock_bh(lock)   spin_lock_bh(lock)
-#define cfs_spin_unlock(lock)    spin_unlock(lock)
-#define cfs_spin_unlock_bh(lock) spin_unlock_bh(lock)
-
-/*
- * rwlock "implementation"
- */
-
-typedef rwlock_t cfs_rwlock_t;
-
-#define cfs_rwlock_init(lock)      rwlock_init(lock)
-#define cfs_read_lock(lock)        read_lock(lock)
-#define cfs_read_unlock(lock)      read_unlock(lock)
-#define cfs_write_lock_bh(lock)    write_lock_bh(lock)
-#define cfs_write_unlock_bh(lock)  write_unlock_bh(lock)
-
-/*
- * atomic
- */
-
-typedef atomic_t cfs_atomic_t;
-
-#define cfs_atomic_read(atom)         atomic_read(atom)
-#define cfs_atomic_inc(atom)          atomic_inc(atom)
-#define cfs_atomic_dec(atom)          atomic_dec(atom)
-#define cfs_atomic_dec_and_test(atom) atomic_dec_and_test(atom)
-#define cfs_atomic_set(atom, value)   atomic_set(atom, value)
-#define cfs_atomic_add(value, atom)   atomic_add(value, atom)
-#define cfs_atomic_sub(value, atom)   atomic_sub(value, atom)
-
 #else  /* !__KERNEL__ */
 #endif /* !__KERNEL__ */
 #endif
index 7f3d7ae..407244f 100644 (file)
@@ -60,12 +60,12 @@ typedef struct cfs_mem_cache cfs_mem_cache_t;
 #define CFS_PAGE_MASK                   (~(PAGE_SIZE - 1))
 
 typedef struct cfs_page {
-    void *      addr;
-    atomic_t    count;
-    void *      private;
-    void *      mapping;
-    __u32       index;
-    __u32       flags;
+    void *          addr;
+    cfs_atomic_t    count;
+    void *          private;
+    void *          mapping;
+    __u32           index;
+    __u32           flags;
 } cfs_page_t;
 
 #define page cfs_page
@@ -108,42 +108,42 @@ typedef struct cfs_page {
 
 /* Make it prettier to test the above... */
 #define UnlockPage(page)        unlock_page(page)
-#define Page_Uptodate(page)     test_bit(PG_uptodate, &(page)->flags)
+#define Page_Uptodate(page)     cfs_test_bit(PG_uptodate, &(page)->flags)
 #define SetPageUptodate(page) \
        do {                                                            \
                arch_set_page_uptodate(page);                           \
-               set_bit(PG_uptodate, &(page)->flags);                   \
+               cfs_set_bit(PG_uptodate, &(page)->flags);               \
        } while (0)
-#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
-#define PageDirty(page)         test_bit(PG_dirty, &(page)->flags)
-#define SetPageDirty(page)      set_bit(PG_dirty, &(page)->flags)
-#define ClearPageDirty(page)    clear_bit(PG_dirty, &(page)->flags)
-#define PageLocked(page)        test_bit(PG_locked, &(page)->flags)
-#define LockPage(page)          set_bit(PG_locked, &(page)->flags)
-#define TryLockPage(page)       test_and_set_bit(PG_locked, &(page)->flags)
-#define PageChecked(page)       test_bit(PG_checked, &(page)->flags)
-#define SetPageChecked(page)    set_bit(PG_checked, &(page)->flags)
-#define ClearPageChecked(page)  clear_bit(PG_checked, &(page)->flags)
-#define PageLaunder(page)       test_bit(PG_launder, &(page)->flags)
-#define SetPageLaunder(page)    set_bit(PG_launder, &(page)->flags)
-#define ClearPageLaunder(page)  clear_bit(PG_launder, &(page)->flags)
-#define ClearPageArch1(page)    clear_bit(PG_arch_1, &(page)->flags)
-
-#define PageError(page)                test_bit(PG_error, &(page)->flags)
-#define SetPageError(page)     set_bit(PG_error, &(page)->flags)
-#define ClearPageError(page)   clear_bit(PG_error, &(page)->flags)
-#define PageReferenced(page)    test_bit(PG_referenced, &(page)->flags)
-#define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
-#define ClearPageReferenced(page)       clear_bit(PG_referenced, &(page)->flags)
-
-#define PageActive(page)        test_bit(PG_active, &(page)->flags)
-#define SetPageActive(page)     set_bit(PG_active, &(page)->flags)
-#define ClearPageActive(page)   clear_bit(PG_active, &(page)->flags)
-
-#define PageWriteback(page)    test_bit(PG_writeback, &(page)->flags)
-#define TestSetPageWriteback(page) test_and_set_bit(PG_writeback,      \
+#define ClearPageUptodate(page) cfs_clear_bit(PG_uptodate, &(page)->flags)
+#define PageDirty(page)         cfs_test_bit(PG_dirty, &(page)->flags)
+#define SetPageDirty(page)      cfs_set_bit(PG_dirty, &(page)->flags)
+#define ClearPageDirty(page)    cfs_clear_bit(PG_dirty, &(page)->flags)
+#define PageLocked(page)        cfs_test_bit(PG_locked, &(page)->flags)
+#define LockPage(page)          cfs_set_bit(PG_locked, &(page)->flags)
+#define TryLockPage(page)       cfs_test_and_set_bit(PG_locked, &(page)->flags)
+#define PageChecked(page)       cfs_test_bit(PG_checked, &(page)->flags)
+#define SetPageChecked(page)    cfs_set_bit(PG_checked, &(page)->flags)
+#define ClearPageChecked(page)  cfs_clear_bit(PG_checked, &(page)->flags)
+#define PageLaunder(page)       cfs_test_bit(PG_launder, &(page)->flags)
+#define SetPageLaunder(page)    cfs_set_bit(PG_launder, &(page)->flags)
+#define ClearPageLaunder(page)  cfs_clear_bit(PG_launder, &(page)->flags)
+#define ClearPageArch1(page)    cfs_clear_bit(PG_arch_1, &(page)->flags)
+
+#define PageError(page)                cfs_test_bit(PG_error, &(page)->flags)
+#define SetPageError(page)     cfs_set_bit(PG_error, &(page)->flags)
+#define ClearPageError(page)   cfs_clear_bit(PG_error, &(page)->flags)
+#define PageReferenced(page)    cfs_test_bit(PG_referenced, &(page)->flags)
+#define SetPageReferenced(page) cfs_set_bit(PG_referenced, &(page)->flags)
+#define ClearPageReferenced(page) cfs_clear_bit(PG_referenced, &(page)->flags)
+
+#define PageActive(page)        cfs_test_bit(PG_active, &(page)->flags)
+#define SetPageActive(page)     cfs_set_bit(PG_active, &(page)->flags)
+#define ClearPageActive(page)   cfs_clear_bit(PG_active, &(page)->flags)
+
+#define PageWriteback(page)    cfs_test_bit(PG_writeback, &(page)->flags)
+#define TestSetPageWriteback(page) cfs_test_and_set_bit(PG_writeback,  \
                                                        &(page)->flags)
-#define TestClearPageWriteback(page) test_and_clear_bit(PG_writeback,  \
+#define TestClearPageWriteback(page) cfs_test_and_clear_bit(PG_writeback, \
                                                        &(page)->flags)
 
 #define __GFP_FS    (1)
@@ -178,17 +178,17 @@ static inline void cfs_kunmap(cfs_page_t *page)
 
 static inline void cfs_get_page(cfs_page_t *page)
 {
-    atomic_inc(&page->count);
+    cfs_atomic_inc(&page->count);
 }
 
 static inline void cfs_put_page(cfs_page_t *page)
 {
-    atomic_dec(&page->count);
+    cfs_atomic_dec(&page->count);
 }
 
 static inline int cfs_page_count(cfs_page_t *page)
 {
-    return atomic_read(&page->count);
+    return cfs_atomic_read(&page->count);
 }
 
 #define cfs_page_index(p)       ((p)->index)
@@ -210,7 +210,7 @@ extern void  cfs_free_large(void *addr);
  * SLAB allocator
  */
 
-#define SLAB_HWCACHE_ALIGN             0
+#define CFS_SLAB_HWCACHE_ALIGN         0
 
 /* The cache name is limited to 20 chars */
 
@@ -221,26 +221,27 @@ struct cfs_mem_cache {
 };
 
 
-extern cfs_mem_cache_t * cfs_mem_cache_create (const char *, size_t, size_t, unsigned long);
-extern int cfs_mem_cache_destroy ( cfs_mem_cache_t * );
-extern void *cfs_mem_cache_alloc ( cfs_mem_cache_t *, int);
-extern void cfs_mem_cache_free ( cfs_mem_cache_t *, void *);
+extern cfs_mem_cache_t *cfs_mem_cache_create (const char *, size_t, size_t,
+                                              unsigned long);
+extern int cfs_mem_cache_destroy (cfs_mem_cache_t * );
+extern void *cfs_mem_cache_alloc (cfs_mem_cache_t *, int);
+extern void cfs_mem_cache_free (cfs_mem_cache_t *, void *);
 
 /*
  * shrinker 
  */
 typedef int (*shrink_callback)(int nr_to_scan, gfp_t gfp_mask);
-struct shrinker {
-    shrink_callback cb;
+struct cfs_shrinker {
+        shrink_callback cb;
        int seeks;      /* seeks to recreate an obj */
 
        /* These are for internal use */
-       struct list_head list;
+       cfs_list_t list;
        long nr;        /* objs pending delete */
 };
 
-struct shrinker * set_shrinker(int seeks, shrink_callback cb);
-void remove_shrinker(struct shrinker *s);
+struct cfs_shrinker *cfs_set_shrinker(int seeks, shrink_callback cb);
+void cfs_remove_shrinker(struct cfs_shrinker *s);
 
 int start_shrinker_timer();
 void stop_shrinker_timer();
@@ -258,16 +259,15 @@ extern cfs_mem_cache_t *cfs_page_p_slab;
 #define CFS_MMSPACE_CLOSE   do {} while(0)
 
 
-#define mb()     do {} while(0)
-#define rmb()    mb()
-#define wmb()    mb()
-#define cfs_mb() mb()
+#define cfs_mb()     do {} while(0)
+#define rmb()        cfs_mb()
+#define wmb()        cfs_mb()
 
 /*
  * MM defintions from (linux/mm.h)
  */
 
-#define DEFAULT_SEEKS 2 /* shrink seek */
+#define CFS_DEFAULT_SEEKS 2 /* shrink seek */
 
 #else  /* !__KERNEL__ */
 
index 3414023..a7432fb 100644 (file)
@@ -96,10 +96,10 @@ void cfs_enter_debugger(void);
 #define CFS_SYMBOL_LEN     64
 
 struct  cfs_symbol {
-       char    name[CFS_SYMBOL_LEN];
-       void    *value;
-       int     ref;
-       struct  list_head sym_list;
+       char       name[CFS_SYMBOL_LEN];
+       void      *value;
+       int        ref;
+       cfs_list_t sym_list;
 };
 
 extern int      cfs_symbol_register(const char *, const void *);
@@ -260,7 +260,7 @@ struct ctl_table
 struct ctl_table_header
 {
        cfs_sysctl_table_t *    ctl_table;
-       struct list_head        ctl_entry;
+       cfs_list_t              ctl_entry;
 };
 
 /* proc root entries, support routines */
@@ -321,7 +321,7 @@ struct seq_file {
        size_t count;
        loff_t index;
        u32    version;
-       mutex_t lock;
+       cfs_mutex_t lock;
        const struct seq_operations *op;
        void *private;
 };
@@ -358,12 +358,9 @@ int seq_release_private(struct inode *, struct file *);
  * Helpers for iteration over list_head-s in seq_files
  */
 
-extern struct list_head *seq_list_start(struct list_head *head,
-               loff_t pos);
-extern struct list_head *seq_list_start_head(struct list_head *head,
-               loff_t pos);
-extern struct list_head *seq_list_next(void *v, struct list_head *head,
-               loff_t *ppos);
+extern cfs_list_t *seq_list_start(cfs_list_t *head, loff_t pos);
+extern cfs_list_t *seq_list_start_head(cfs_list_t *head, loff_t pos);
+extern cfs_list_t *seq_list_next(void *v, cfs_list_t *head, loff_t *ppos);
 
 /*
  *  declaration of proc kernel process routines
@@ -405,9 +402,9 @@ lustre_write_file( cfs_file_t *    fh,
 
 typedef int cfs_task_state_t;
 
-#define CFS_TASK_INTERRUPTIBLE 0x00000001
-#define CFS_TASK_UNINT         0x00000002
-#define CFS_TASK_RUNNING        0x00000003
+#define CFS_TASK_INTERRUPTIBLE  0x00000001
+#define CFS_TASK_UNINT          0x00000002
+#define CFS_TASK_RUNNING         0x00000003
 #define CFS_TASK_UNINTERRUPTIBLE CFS_TASK_UNINT
 
 #define CFS_WAITQ_MAGIC     'CWQM'
@@ -415,11 +412,11 @@ typedef int cfs_task_state_t;
 
 typedef struct cfs_waitq {
 
-    unsigned int        magic;
-    unsigned int        flags;
-    
-    spinlock_t          guard;
-    struct list_head    waiters;
+    unsigned int            magic;
+    unsigned int            flags;
+
+    cfs_spinlock_t          guard;
+    cfs_list_t              waiters;
 
 } cfs_waitq_t;
 
@@ -434,7 +431,7 @@ typedef struct cfs_waitlink cfs_waitlink_t;
 
 
 typedef struct cfs_waitlink_channel {
-    struct list_head        link;
+    cfs_list_t              link;
     cfs_waitq_t *           waitq;
     cfs_waitlink_t *        waitl;
 } cfs_waitlink_channel_t;
@@ -444,7 +441,7 @@ struct cfs_waitlink {
     unsigned int            magic;
     int                     flags;
     event_t  *              event;
-    atomic_t *              hits;
+    cfs_atomic_t *          hits;
 
     cfs_waitlink_channel_t  waitq[CFS_WAITQ_CHANNELS];
 };
@@ -465,7 +462,6 @@ typedef struct _cfs_thread_context {
 } cfs_thread_context_t;
 
 int cfs_kernel_thread(int (*func)(void *), void *arg, int flag);
-#define kernel_thread cfs_kernel_thread
 
 /*
  * thread creation flags from Linux, not used in winnt
@@ -490,44 +486,52 @@ int cfs_kernel_thread(int (*func)(void *), void *arg, int flag);
  */
 #define NGROUPS_SMALL           32
 #define NGROUPS_PER_BLOCK       ((int)(PAGE_SIZE / sizeof(gid_t)))
-struct group_info {
+typedef struct cfs_group_info {
         int ngroups;
-        atomic_t usage;
+        cfs_atomic_t usage;
         gid_t small_block[NGROUPS_SMALL];
         int nblocks;
         gid_t *blocks[0];
-};
+} cfs_group_info_t;
 
-#define get_group_info(group_info) do { \
-        atomic_inc(&(group_info)->usage); \
+#define cfs_get_group_info(group_info) do { \
+        cfs_atomic_inc(&(group_info)->usage); \
 } while (0)
 
-#define put_group_info(group_info) do { \
-        if (atomic_dec_and_test(&(group_info)->usage)) \
-                groups_free(group_info); \
+#define cfs_put_group_info(group_info) do { \
+        if (cfs_atomic_dec_and_test(&(group_info)->usage)) \
+                cfs_groups_free(group_info); \
 } while (0)
 
-static __inline struct group_info *groups_alloc(int gidsetsize)
+static __inline cfs_group_info_t *cfs_groups_alloc(int gidsetsize)
 {
-    struct group_info * groupinfo;
+    cfs_group_info_t * groupinfo;
     KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__, __FUNCTION__));
-    groupinfo = (struct group_info *)cfs_alloc(sizeof(struct group_info), 0);
+    groupinfo =
+        (cfs_group_info_t *)cfs_alloc(sizeof(cfs_group_info_t), 0);
+
     if (groupinfo) {
-        memset(groupinfo, 0, sizeof(struct group_info));
+        memset(groupinfo, 0, sizeof(cfs_group_info_t));
     }
     return groupinfo;
 }
-static __inline void groups_free(struct group_info *group_info)
+static __inline void cfs_groups_free(cfs_group_info_t *group_info)
 {
-    KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__, __FUNCTION__));
+    KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__,
+            __FUNCTION__));
     cfs_free(group_info);
 }
-static __inline int set_current_groups(struct group_info *group_info) {
-    KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__, __FUNCTION__));
+static __inline int
+cfs_set_current_groups(cfs_group_info_t *group_info)
+{
+    KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__,
+             __FUNCTION__));
     return 0;
 }
-static __inline int groups_search(struct group_info *group_info, gid_t grp) {
-    KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__, __FUNCTION__));
+static __inline int groups_search(cfs_group_info_t *group_info,
+                                  gid_t grp) {
+    KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__,
+            __FUNCTION__));
     return 0;
 }
 
@@ -564,34 +568,34 @@ typedef __u32 cfs_kernel_cap_t;
  * Task struct
  */
 
-#define CFS_MAX_SCHEDULE_TIMEOUT ((long_ptr_t)(~0UL>>12))
-#define schedule_timeout(t)      cfs_schedule_timeout(0, t)
+#define CFS_MAX_SCHEDULE_TIMEOUT     ((long_ptr_t)(~0UL>>12))
+#define cfs_schedule_timeout(t)      cfs_schedule_timeout_and_set_state(0, t)
 
 struct vfsmount;
 
 #define NGROUPS 1
 #define CFS_CURPROC_COMM_MAX (16)
 typedef struct task_sruct{
-    mode_t           umask;
-    sigset_t         blocked;
-
-    pid_t            pid;
-    pid_t            pgrp;
-
-    uid_t            uid,euid,suid,fsuid;
-    gid_t            gid,egid,sgid,fsgid;
-
-    int              ngroups;
-    int              cgroups;
-    gid_t            groups[NGROUPS];
-    struct group_info *group_info;
-    cfs_kernel_cap_t cap_effective,
-                     cap_inheritable,
-                     cap_permitted;
-
-    char             comm[CFS_CURPROC_COMM_MAX];
-    void            *journal_info;
-    struct vfsmount *fs;
+    mode_t                umask;
+    sigset_t              blocked;
+
+    pid_t                 pid;
+    pid_t                 pgrp;
+
+    uid_t                 uid,euid,suid,fsuid;
+    gid_t                 gid,egid,sgid,fsgid;
+
+    int                   ngroups;
+    int                   cgroups;
+    gid_t                 groups[NGROUPS];
+    cfs_group_info_t     *group_info;
+    cfs_kernel_cap_t      cap_effective,
+                          cap_inheritable,
+                          cap_permitted;
+
+    char                  comm[CFS_CURPROC_COMM_MAX];
+    void                 *journal_info;
+    struct vfsmount      *fs;
 }  cfs_task_t;
 
 static inline void task_lock(cfs_task_t *t)
@@ -611,46 +615,46 @@ static inline void task_unlock(cfs_task_t *t)
 
 typedef struct _TASK_MAN {
 
-    ULONG       Magic;      /* Magic and Flags */
-    ULONG       Flags;
+    ULONG           Magic;      /* Magic and Flags */
+    ULONG           Flags;
 
-    spinlock_t  Lock;       /* Protection lock */
+    cfs_spinlock_t  Lock;       /* Protection lock */
 
-    cfs_mem_cache_t * slab; /* Memory slab for task slot */
+    cfs_mem_cache_t *slab; /* Memory slab for task slot */
 
-    ULONG       NumOfTasks; /* Total tasks (threads) */
-    LIST_ENTRY  TaskList;   /* List of task slots */
+    ULONG           NumOfTasks; /* Total tasks (threads) */
+    LIST_ENTRY      TaskList;   /* List of task slots */
 
 } TASK_MAN, *PTASK_MAN;
 
 typedef struct _TASK_SLOT {
 
-    ULONG       Magic;      /* Magic and Flags */
-    ULONG       Flags;
+    ULONG           Magic;      /* Magic and Flags */
+    ULONG           Flags;
 
-    LIST_ENTRY  Link;       /* To be linked to TaskMan */
+    LIST_ENTRY      Link;       /* To be linked to TaskMan */
 
-    event_t     Event;      /* Schedule event */
+    event_t         Event;      /* Schedule event */
 
-    HANDLE      Pid;        /* Process id */
-    HANDLE      Tid;        /* Thread id */
-    PETHREAD    Tet;        /* Pointer to ethread */
+    HANDLE          Pid;        /* Process id */
+    HANDLE          Tid;        /* Thread id */
+    PETHREAD        Tet;        /* Pointer to ethread */
 
-    atomic_t    count;      /* refer count */
-    atomic_t    hits;       /* times of waken event singaled */
+    cfs_atomic_t    count;      /* refer count */
+    cfs_atomic_t    hits;       /* times of waken event singaled */
 
-    KIRQL       irql;       /* irql for rwlock ... */
+    KIRQL           irql;       /* irql for rwlock ... */
 
-    cfs_task_t  task;       /* linux task part */
+    cfs_task_t      task;       /* linux task part */
 
 } TASK_SLOT, *PTASK_SLOT;
 
 
 #define current                      cfs_current()
-#define set_current_state(s)         do {;} while (0)
-#define cfs_set_current_state(state) set_current_state(state)
+#define cfs_set_current_state(s)     do {;} while (0)
+#define cfs_set_current_state(state) cfs_set_current_state(state)
 
-#define wait_event(wq, condition)                               \
+#define cfs_wait_event(wq, condition)                           \
 do {                                                            \
         cfs_waitlink_t __wait;                                  \
                                                                 \
@@ -723,7 +727,7 @@ void    cleanup_task_manager();
 cfs_task_t * cfs_current();
 int     wake_up_process(cfs_task_t * task);
 void sleep_on(cfs_waitq_t *waitq);
-#define might_sleep() do {} while(0)
+#define cfs_might_sleep() do {} while(0)
 #define CFS_DECL_JOURNAL_DATA  
 #define CFS_PUSH_JOURNAL           do {;} while(0)
 #define CFS_POP_JOURNAL                    do {;} while(0)
@@ -738,14 +742,14 @@ void sleep_on(cfs_waitq_t *waitq);
 #define __init
 #endif
 
-struct module {
+typedef struct cfs_module {
     const char *name;
-};
+} cfs_module_t;
 
-extern struct module libcfs_global_module;
+extern cfs_module_t libcfs_global_module;
 #define THIS_MODULE  &libcfs_global_module
 
-#define request_module(x, y) (0)
+#define cfs_request_module(x, y) (0)
 #define EXPORT_SYMBOL(s)
 #define MODULE_AUTHOR(s)
 #define MODULE_DESCRIPTION(s)
@@ -768,9 +772,9 @@ extern struct module libcfs_global_module;
 
 /* Module interfaces */
 #define cfs_module(name, version, init, fini) \
-module_init(init);                            \
-module_exit(fini)
-#define module_refcount(x) (1)
+        module_init(init);                    \
+        module_exit(fini)
+#define cfs_module_refcount(x) (1)
 
 /*
  * typecheck
@@ -797,7 +801,7 @@ module_exit(fini)
 #define GOLDEN_RATIO_PRIME_32 0x9e370001UL
 
 #if 0 /* defined in libcfs/libcfs_hash.h */
-static inline u32 hash_long(u32 val, unsigned int bits)
+static inline u32 cfs_hash_long(u32 val, unsigned int bits)
 {
        /* On some cpus multiply is faster, on others gcc will do shifts */
        u32 hash = val * GOLDEN_RATIO_PRIME_32;
@@ -844,7 +848,7 @@ libcfs_arch_cleanup(void);
  *  cache alignment size
  */
 
-#define L1_CACHE_ALIGN(x) (x)
+#define CFS_L1_CACHE_ALIGN(x) (x)
 
 #define __cacheline_aligned
 
@@ -854,11 +858,11 @@ libcfs_arch_cleanup(void);
 
 
 #define SMP_CACHE_BYTES             128
-#define NR_CPUS                     (32)
+#define CFS_NR_CPUS                 (32)
 #define smp_num_cpus                ((CCHAR)KeNumberProcessors)
-#define num_possible_cpus()         smp_num_cpus
-#define num_online_cpus()           smp_num_cpus
-#define smp_processor_id()                 ((USHORT)KeGetCurrentProcessorNumber())
+#define cfs_num_possible_cpus()     smp_num_cpus
+#define cfs_num_online_cpus()       smp_num_cpus
+#define cfs_smp_processor_id()     ((USHORT)KeGetCurrentProcessorNumber())
 #define smp_call_function(f, a, n, w)          do {} while(0)
 #define smp_rmb()                   do {} while(0)
 
@@ -866,22 +870,21 @@ libcfs_arch_cleanup(void);
  *  Irp related
  */
 
-#define NR_IRQS                                        512
-#define in_interrupt()                     (0)
-#define cfs_in_interrupt()                  in_interrupt()
+#define CFS_NR_IRQS                 512
+#define cfs_in_interrupt()          (0)
 
 /*
  *  printk flags
  */
 
-#define KERN_EMERG      "<0>"   /* system is unusable                   */
-#define KERN_ALERT      "<1>"   /* action must be taken immediately     */
-#define KERN_CRIT       "<2>"   /* critical conditions                  */
-#define KERN_ERR        "<3>"   /* error conditions                     */
-#define KERN_WARNING    "<4>"   /* warning conditions                   */
-#define KERN_NOTICE     "<5>"   /* normal but significant condition     */
-#define KERN_INFO       "<6>"   /* informational                        */
-#define KERN_DEBUG      "<7>"   /* debug-level messages                 */
+#define CFS_KERN_EMERG      "<0>"   /* system is unusable                   */
+#define CFS_KERN_ALERT      "<1>"   /* action must be taken immediately     */
+#define CFS_KERN_CRIT       "<2>"   /* critical conditions                  */
+#define CFS_KERN_ERR        "<3>"   /* error conditions                     */
+#define CFS_KERN_WARNING    "<4>"   /* warning conditions                   */
+#define CFS_KERN_NOTICE     "<5>"   /* normal but significant condition     */
+#define CFS_KERN_INFO       "<6>"   /* informational                        */
+#define CFS_KERN_DEBUG      "<7>"   /* debug-level messages                 */
 
 /*
  * Misc
@@ -897,8 +900,8 @@ libcfs_arch_cleanup(void);
 #define unlikely(exp) (exp)
 #endif
 
-#define lock_kernel()               do {} while(0)
-#define unlock_kernel()             do {} while(0)
+#define cfs_lock_kernel()               do {} while(0)
+#define cfs_unlock_kernel()             do {} while(0)
 
 #define local_irq_save(x)
 #define local_irq_restore(x)
@@ -1106,16 +1109,16 @@ void globfree(glob_t *__pglog);
  *  module routines
  */
 
-static inline void __module_get(struct module *module)
+static inline void __cfs_module_get(cfs_module_t *module)
 {
 }
 
-static inline int try_module_get(struct module *module)
+static inline int cfs_try_module_get(cfs_module_t *module)
 {
     return 1;
 }
 
-static inline void module_put(struct module *module)
+static inline void cfs_module_put(cfs_module_t *module)
 {
 }
 
@@ -1323,7 +1326,7 @@ static __inline void   __cdecl set_getenv(const char *ENV, const char *value, in
 
 int setenv(const char *envname, const char *envval, int overwrite);
 
-struct utsname {
+typedef struct utsname {
          char sysname[64];
          char nodename[64];
          char release[128];
index 0fe6230..dee9b8a 100644 (file)
@@ -166,14 +166,14 @@ typedef VOID (*ks_schedule_cb)(struct socket*, int);
 
 typedef struct _KS_TSDU {
 
-    ULONG               Magic;          /* magic */
-    ULONG               Flags;          /* flags */
+    ULONG                 Magic;          /* magic */
+    ULONG                 Flags;          /* flags */
 
-    struct list_head    Link;           /* link list */
+    cfs_list_t            Link;           /* link list */
 
-    ULONG               TotalLength;    /* total size of KS_TSDU */
-    ULONG               StartOffset;    /* offset of the first Tsdu unit */
-    ULONG               LastOffset;     /* end offset of the last Tsdu unit */
+    ULONG                 TotalLength;    /* total size of KS_TSDU */
+    ULONG                 StartOffset;    /* offset of the first Tsdu unit */
+    ULONG                 LastOffset;     /* end offset of the last Tsdu unit */
 
 /*
     union {
@@ -234,35 +234,35 @@ typedef struct _KS_TSDU_MDL {
 } KS_TSDU_MDL, *PKS_TSDU_MDL;
 
 typedef struct ks_engine_mgr {
-    spinlock_t              lock;
+    cfs_spinlock_t          lock;
     int                     stop;
     event_t                 exit;
     event_t                 start;
-    struct list_head        list;
+    cfs_list_t              list;
 } ks_engine_mgr_t;
 
 typedef struct ks_engine_slot {
     ks_tconn_t *            tconn;
     void *                  tsdumgr;
-    struct list_head        link;
+    cfs_list_t              link;
     int                     queued;
     ks_engine_mgr_t *       emgr;
 } ks_engine_slot_t;
 
 typedef struct _KS_TSDUMGR {
-    struct list_head        TsduList;
+    cfs_list_t              TsduList;
     ULONG                   NumOfTsdu;
     ULONG                   TotalBytes;
     KEVENT                  Event;
-    spinlock_t              Lock;
+    cfs_spinlock_t          Lock;
     ks_engine_slot_t        Slot;
     ULONG                   Payload;
     int                     Busy:1;
     int                     OOB:1;
 } KS_TSDUMGR, *PKS_TSDUMGR;
 
-#define ks_lock_tsdumgr(mgr)   spin_lock(&((mgr)->Lock))
-#define ks_unlock_tsdumgr(mgr) spin_unlock(&((mgr)->Lock))
+#define ks_lock_tsdumgr(mgr)   cfs_spin_lock(&((mgr)->Lock))
+#define ks_unlock_tsdumgr(mgr) cfs_spin_unlock(&((mgr)->Lock))
 
 typedef struct _KS_CHAIN {
     KS_TSDUMGR          Normal;      /* normal queue */
@@ -353,19 +353,19 @@ typedef KS_DISCONNECT_WORKITEM      ks_disconnect_t;
 
 typedef struct ks_backlogs {
 
-        struct list_head    list;   /* list to link the backlog connections */
-        int                 num;    /* number of backlogs in the list */
+        cfs_list_t           list;   /* list to link the backlog connections */
+        int                  num;    /* number of backlogs in the list */
 
 } ks_backlogs_t;
 
 
 typedef struct ks_daemon {
 
-    ks_tconn_t *            tconn;         /* the listener connection object */
-    unsigned short          nbacklogs;     /* number of listening backlog conns */
-    unsigned short          port;          /* listening port number */ 
-    int                     shutdown;      /* daemon threads is to exit */
-    struct list_head        list;          /* to be attached into ks_nal_data_t */
+    ks_tconn_t *            tconn;       /* the listener connection object */
+    unsigned short          nbacklogs;   /* number of listening backlog conns */
+    unsigned short          port;        /* listening port number */ 
+    int                     shutdown;    /* daemon threads is to exit */
+    cfs_list_t              list;        /* to be attached into ks_nal_data_t */
 
 } ks_daemon_t;
 
@@ -425,7 +425,7 @@ struct socket {
         ulong                       kstc_magic;      /* Magic & Flags */
         ulong                       kstc_flags;
 
-        spinlock_t                  kstc_lock;       /* serialise lock*/
+        cfs_spinlock_t              kstc_lock;       /* serialise lock*/
         void *                      kstc_conn;       /* ks_conn_t */
 
         ks_tconn_type_t             kstc_type;          /* tdi connection Type */
@@ -435,9 +435,9 @@ struct socket {
 
         ks_tdi_addr_t               kstc_addr;       /* local address handlers / Objects */
 
-        atomic_t                    kstc_refcount;   /* reference count of ks_tconn_t */
+        cfs_atomic_t                kstc_refcount;   /* reference count of ks_tconn_t */
 
-        struct list_head            kstc_list;       /* linked to global ksocknal_data */
+        cfs_list_t                  kstc_list;       /* linked to global ksocknal_data */
 
         union {
 
@@ -451,19 +451,19 @@ struct socket {
             } listener; 
 
             struct  {
-                ks_tconn_info_t     kstc_info;      /* Connection Info if Connected */
-                ks_chain_t          kstc_recv;      /* tsdu engine for data receiving */
-                ks_chain_t          kstc_send;      /* tsdu engine for data sending */
+                ks_tconn_info_t       kstc_info;      /* Connection Info if Connected */
+                ks_chain_t            kstc_recv;      /* tsdu engine for data receiving */
+                ks_chain_t            kstc_send;      /* tsdu engine for data sending */
 
-                int                 kstc_queued;    /* Attached to Parent->ChildList ... */
-                int                 kstc_queueno;   /* 0: Attached to Listening list 
+                int                   kstc_queued;    /* Attached to Parent->ChildList ... */
+                int                   kstc_queueno;   /* 0: Attached to Listening list 
                                                        1: Attached to Accepted list */
 
-                int                 kstc_busy;      /* referred by ConnectEventCallback ? */
-                int                 kstc_accepted;  /* the connection is built ready ? */
+                int                   kstc_busy;      /* referred by ConnectEventCallback ? */
+                int                   kstc_accepted;  /* the connection is built ready ? */
 
-                struct list_head    kstc_link;      /* linked to parent tdi connection */
-                ks_tconn_t   *      kstc_parent;    /* pointers to it's listener parent */
+                cfs_list_t            kstc_link;      /* linked to parent tdi connection */
+                ks_tconn_t   *        kstc_parent;    /* pointers to it's listener parent */
             } child;
 
             struct {
@@ -621,39 +621,39 @@ typedef struct {
      * Tdi client information
      */
 
-    UNICODE_STRING    ksnd_client_name; /* tdi client module name */
-    HANDLE            ksnd_pnp_handle;  /* the handle for pnp changes */
+    UNICODE_STRING        ksnd_client_name; /* tdi client module name */
+    HANDLE                ksnd_pnp_handle;  /* the handle for pnp changes */
 
-    spinlock_t        ksnd_addrs_lock;  /* serialize ip address list access */
-    LIST_ENTRY        ksnd_addrs_list;  /* list of the ip addresses */
-    int               ksnd_naddrs;      /* number of the ip addresses */
+    cfs_spinlock_t        ksnd_addrs_lock;  /* serialize ip address list access */
+    LIST_ENTRY            ksnd_addrs_list;  /* list of the ip addresses */
+    int                   ksnd_naddrs;      /* number of the ip addresses */
 
     /*
      *  Tdilnd internal defintions
      */
 
-    int               ksnd_init;            /* initialisation state */
+    int                   ksnd_init;            /* initialisation state */
 
-    TDI_PROVIDER_INFO ksnd_provider;        /* tdi tcp/ip provider's information */
+    TDI_PROVIDER_INFO     ksnd_provider;        /* tdi tcp/ip provider's information */
 
-    spinlock_t        ksnd_tconn_lock;      /* tdi connections access serialise */
+    cfs_spinlock_t        ksnd_tconn_lock;      /* tdi connections access serialise */
 
-    int               ksnd_ntconns;         /* number of tconns attached in list */
-    struct list_head  ksnd_tconns;          /* tdi connections list */
-    cfs_mem_cache_t * ksnd_tconn_slab;      /* slabs for ks_tconn_t allocations */
-    event_t           ksnd_tconn_exit;      /* exit event to be signaled by the last tconn */
+    int                   ksnd_ntconns;         /* number of tconns attached in list */
+    cfs_list_t            ksnd_tconns;          /* tdi connections list */
+    cfs_mem_cache_t *     ksnd_tconn_slab;      /* slabs for ks_tconn_t allocations */
+    event_t               ksnd_tconn_exit;      /* exit event to be signaled by the last tconn */
 
-    spinlock_t        ksnd_tsdu_lock;       /* tsdu access serialise */
+    cfs_spinlock_t        ksnd_tsdu_lock;       /* tsdu access serialise */
         
-    int               ksnd_ntsdus;          /* number of tsdu buffers allocated */
-    ulong             ksnd_tsdu_size;       /* the size of a signel tsdu buffer */
-    cfs_mem_cache_t ksnd_tsdu_slab;       /* slab cache for tsdu buffer allocation */
+    int                   ksnd_ntsdus;          /* number of tsdu buffers allocated */
+    ulong                 ksnd_tsdu_size;       /* the size of a signel tsdu buffer */
+    cfs_mem_cache_t       *ksnd_tsdu_slab;       /* slab cache for tsdu buffer allocation */
 
-    int               ksnd_nfreetsdus;      /* number of tsdu buffers in the freed list */
-    struct list_head  ksnd_freetsdus;       /* List of the freed Tsdu buffer. */
+    int                   ksnd_nfreetsdus;      /* number of tsdu buffers in the freed list */
+    cfs_list_t            ksnd_freetsdus;       /* List of the freed Tsdu buffer. */
 
-    int               ksnd_engine_nums;     /* number of tcp sending engine threads */
-    ks_engine_mgr_t ksnd_engine_mgr;      /* tcp sending engine structure */
+    int                   ksnd_engine_nums;     /* number of tcp sending engine threads */
+    ks_engine_mgr_t       *ksnd_engine_mgr;      /* tcp sending engine structure */
 
 } ks_tdi_data_t;
 
index 0a376b7..8ce1927 100644 (file)
@@ -96,7 +96,7 @@ typedef time_t cfs_duration_t;
 
 #include <libcfs/winnt/portals_compat25.h>
 
-#define HZ (100)
+#define CFS_HZ (100)
 
 struct timespec {
     __u32   tv_sec;
@@ -115,7 +115,7 @@ typedef struct timeval cfs_fs_time_t;
 #define jiffies     (ULONG_PTR)JIFFIES()
 #define cfs_jiffies (ULONG_PTR)JIFFIES()
 
-static inline void do_gettimeofday(struct timeval *tv)
+static inline void cfs_gettimeofday(struct timeval *tv)
 {
     LARGE_INTEGER Time;
 
@@ -125,8 +125,6 @@ static inline void do_gettimeofday(struct timeval *tv)
     tv->tv_usec = (suseconds_t) (Time.QuadPart % 10000000) / 10;
 }
 
-#define cfs_do_gettimeofday(tv) do_gettimeofday(tv)
-
 static inline LONGLONG JIFFIES()
 {
     LARGE_INTEGER Tick;
@@ -135,7 +133,7 @@ static inline LONGLONG JIFFIES()
     KeQueryTickCount(&Tick);
 
     Elapse.QuadPart  = Tick.QuadPart * KeQueryTimeIncrement();
-    Elapse.QuadPart /= (10000000 / HZ);
+    Elapse.QuadPart /= (10000000 / CFS_HZ);
 
     return Elapse.QuadPart;
 }
@@ -147,11 +145,11 @@ static inline cfs_time_t cfs_time_current(void)
 
 static inline time_t cfs_time_current_sec(void)
 {
-    return (time_t)(JIFFIES() / HZ);
+    return (time_t)(JIFFIES() / CFS_HZ);
 }
 
-#define time_before(t1, t2) (((signed)(t1) - (signed)(t2)) < 0) 
-#define time_before_eq(t1, t2) (((signed)(t1) - (signed)(t2)) <= 0) 
+#define cfs_time_before(t1, t2) (((signed)(t1) - (signed)(t2)) < 0)
+#define cfs_time_beforeq(t1, t2) (((signed)(t1) - (signed)(t2)) <= 0)
 
 static inline void cfs_fs_time_current(cfs_fs_time_t *t)
 {
@@ -195,26 +193,26 @@ static inline int cfs_fs_time_beforeq(cfs_fs_time_t *t1, cfs_fs_time_t *t2)
 
 static inline cfs_duration_t cfs_time_seconds(cfs_duration_t seconds)
 {
-    return  (cfs_duration_t)(seconds * HZ);
+    return  (cfs_duration_t)(seconds * CFS_HZ);
 }
 
 static inline time_t cfs_duration_sec(cfs_duration_t d)
 {
-    return (time_t)(d / HZ);
+    return (time_t)(d / CFS_HZ);
 }
 
 static inline void cfs_duration_usec(cfs_duration_t d, struct timeval *s)
 {
-    s->tv_sec = (__u32)(d / HZ);
-    s->tv_usec = (__u32)((d - (cfs_duration_t)s->tv_sec * HZ) *
-                              ONE_MILLION / HZ);
+    s->tv_sec = (__u32)(d / CFS_HZ);
+    s->tv_usec = (__u32)((d - (cfs_duration_t)s->tv_sec * CFS_HZ) *
+                              ONE_MILLION / CFS_HZ);
 }
 
 static inline void cfs_duration_nsec(cfs_duration_t d, struct timespec *s)
 {
-    s->tv_sec = (__u32) (d / HZ);
-    s->tv_nsec = (__u32)((d - (cfs_duration_t)s->tv_sec * HZ) *
-                           ONE_BILLION / HZ);
+    s->tv_sec = (__u32) (d / CFS_HZ);
+    s->tv_nsec = (__u32)((d - (cfs_duration_t)s->tv_sec * CFS_HZ) *
+                           ONE_BILLION / CFS_HZ);
 }
 
 static inline void cfs_fs_time_usec(cfs_fs_time_t *t, struct timeval *v)
@@ -278,7 +276,7 @@ struct timespec {
 /* liblustre. time(2) based implementation. */
 int nanosleep(const struct timespec *rqtp, struct timespec *rmtp);
 void sleep(int time);
-void do_gettimeofday(struct timeval *tv);
+void cfs_gettimeofday(struct timeval *tv);
 int gettimeofday(struct timeval *tv, void * tz);
 
 #endif /* !__KERNEL__ */
index 95660e1..2161330 100644 (file)
@@ -130,13 +130,13 @@ typedef     __u32       suseconds_t;
 typedef     __u16       uid_t, gid_t;
 
 typedef     __u16       mode_t;
-typedef     __u16       umode_t;
+typedef     __u16       cfs_umode_t;
 
 typedef     __u32       sigset_t;
 
 typedef int64_t         loff_t;
 typedef void *          cfs_handle_t;
-typedef uint64_t        cycles_t;
+typedef uint64_t        cfs_cycles_t;
 
 #ifndef INVALID_HANDLE_VALUE
 #define INVALID_HANDLE_VALUE ((HANDLE)-1)
index 0052094..74eb1e3 100644 (file)
@@ -162,7 +162,7 @@ int tcd_owns_tage(struct trace_cpu_data *tcd, struct trace_page *tage)
 
 void
 set_ptldebug_header(struct ptldebug_header *header, int subsys, int mask,
-                   const int line, unsigned long stack)
+                    const int line, unsigned long stack)
 {
        struct timeval tv;
        
index 8f0dbde..e637150 100644 (file)
@@ -110,16 +110,16 @@ CFS_MODULE_PARM(libcfs_panic_on_lbug, "i", uint, 0644,
                 "Lustre kernel panic on LBUG");
 EXPORT_SYMBOL(libcfs_panic_on_lbug);
 
-atomic_t libcfs_kmemory = ATOMIC_INIT(0);
+cfs_atomic_t libcfs_kmemory = CFS_ATOMIC_INIT(0);
 EXPORT_SYMBOL(libcfs_kmemory);
 
 static cfs_waitq_t debug_ctlwq;
 
-char debug_file_path_arr[1024] = DEBUG_FILE_PATH_DEFAULT;
+char libcfs_debug_file_path_arr[1024] = LIBCFS_DEBUG_FILE_PATH_DEFAULT;
 
 /* We need to pass a pointer here, but elsewhere this must be a const */
-char *debug_file_path = &debug_file_path_arr[0];
-CFS_MODULE_PARM(debug_file_path, "s", charp, 0644,
+char *libcfs_debug_file_path = &libcfs_debug_file_path_arr[0];
+CFS_MODULE_PARM(libcfs_debug_file_path, "s", charp, 0644,
                 "Path for dumping debug logs, "
                 "set 'NONE' to prevent log dumping");
 
@@ -333,13 +333,13 @@ void libcfs_debug_dumplog_internal(void *arg)
 
         CFS_PUSH_JOURNAL;
 
-        if (strncmp(debug_file_path_arr, "NONE", 4) != 0) {
+        if (strncmp(libcfs_debug_file_path_arr, "NONE", 4) != 0) {
                 snprintf(debug_file_name, sizeof(debug_file_name) - 1,
-                         "%s.%ld." LPLD, debug_file_path_arr,
+                         "%s.%ld." LPLD, libcfs_debug_file_path_arr,
                          cfs_time_current_sec(), (long_ptr_t)arg);
-                printk(KERN_ALERT "LustreError: dumping log to %s\n",
+                printk(CFS_KERN_ALERT "LustreError: dumping log to %s\n",
                        debug_file_name);
-                tracefile_dump_all_pages(debug_file_name);
+                cfs_tracefile_dump_all_pages(debug_file_name);
                 libcfs_run_debug_log_upcall(debug_file_name);
         }
         CFS_POP_JOURNAL;
@@ -362,21 +362,21 @@ void libcfs_debug_dumplog(void)
          * able to set our state to running as it exits before we
          * get to schedule() */
         cfs_waitlink_init(&wait);
-        set_current_state(TASK_INTERRUPTIBLE);
+        cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
         cfs_waitq_add(&debug_ctlwq, &wait);
 
         dumper = cfs_kthread_run(libcfs_debug_dumplog_thread,
                                  (void*)(long)cfs_curproc_pid(),
                                  "libcfs_debug_dumper");
         if (IS_ERR(dumper))
-                printk(KERN_ERR "LustreError: cannot start log dump thread: "
-                       "%ld\n", PTR_ERR(dumper));
+                printk(CFS_KERN_ERR "LustreError: cannot start log dump thread:"
+                       " %ld\n", PTR_ERR(dumper));
         else
                 cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
 
-        /* be sure to teardown if kernel_thread() failed */
+        /* be sure to teardown if cfs_kernel_thread() failed */
         cfs_waitq_del(&debug_ctlwq, &wait);
-        set_current_state(TASK_RUNNING);
+        cfs_set_current_state(CFS_TASK_RUNNING);
 }
 
 int libcfs_debug_init(unsigned long bufsize)
@@ -395,13 +395,13 @@ int libcfs_debug_init(unsigned long bufsize)
 
         /* If libcfs_debug_mb is set to an invalid value or uninitialized
          * then just make the total buffers smp_num_cpus * TCD_MAX_PAGES */
-        if (max > trace_max_debug_mb() || max < num_possible_cpus()) {
+        if (max > cfs_trace_max_debug_mb() || max < cfs_num_possible_cpus()) {
                 max = TCD_MAX_PAGES;
         } else {
-                max = (max / num_possible_cpus());
+                max = (max / cfs_num_possible_cpus());
                 max = (max << (20 - CFS_PAGE_SHIFT));
         }
-        rc = tracefile_init(max);
+        rc = cfs_tracefile_init(max);
 
         if (rc == 0)
                 libcfs_register_panic_notifier();
@@ -412,13 +412,13 @@ int libcfs_debug_init(unsigned long bufsize)
 int libcfs_debug_cleanup(void)
 {
         libcfs_unregister_panic_notifier();
-        tracefile_exit();
+        cfs_tracefile_exit();
         return 0;
 }
 
 int libcfs_debug_clear_buffer(void)
 {
-        trace_flush_pages();
+        cfs_trace_flush_pages();
         return 0;
 }
 
@@ -439,7 +439,7 @@ int libcfs_debug_mark_buffer(const char *text)
 
 void libcfs_debug_set_level(unsigned int debug_level)
 {
-        printk(KERN_WARNING "Lustre: Setting portals debug level to %08x\n",
+        printk(CFS_KERN_WARNING "Lustre: Setting portals debug level to %08x\n",
                debug_level);
         libcfs_debug = debug_level;
 }
index 81e8b6d..6369ada 100644 (file)
@@ -61,28 +61,28 @@ static void
 cfs_hash_rlock(cfs_hash_t *hs)
 {
         if ((hs->hs_flags & CFS_HASH_REHASH) != 0)
-                read_lock(&hs->hs_rwlock);
+                cfs_read_lock(&hs->hs_rwlock);
 }
 
 static void
 cfs_hash_runlock(cfs_hash_t *hs)
 {
         if ((hs->hs_flags & CFS_HASH_REHASH) != 0)
-                read_unlock(&hs->hs_rwlock);
+                cfs_read_unlock(&hs->hs_rwlock);
 }
 
 static void
 cfs_hash_wlock(cfs_hash_t *hs)
 {
         if ((hs->hs_flags & CFS_HASH_REHASH) != 0)
-                write_lock(&hs->hs_rwlock);
+                cfs_write_lock(&hs->hs_rwlock);
 }
 
 static void
 cfs_hash_wunlock(cfs_hash_t *hs)
 {
         if ((hs->hs_flags & CFS_HASH_REHASH) != 0)
-                write_unlock(&hs->hs_rwlock);
+                cfs_write_unlock(&hs->hs_rwlock);
 }
 
 /**
@@ -116,9 +116,9 @@ cfs_hash_create(char *name, unsigned int cur_bits,
 
         strncpy(hs->hs_name, name, sizeof(hs->hs_name));
         hs->hs_name[sizeof(hs->hs_name) - 1] = '\0';
-        atomic_set(&hs->hs_rehash_count, 0);
-        atomic_set(&hs->hs_count, 0);
-        rwlock_init(&hs->hs_rwlock);
+        cfs_atomic_set(&hs->hs_rehash_count, 0);
+        cfs_atomic_set(&hs->hs_count, 0);
+        cfs_rwlock_init(&hs->hs_rwlock);
         hs->hs_cur_bits = cur_bits;
         hs->hs_cur_mask = (1 << cur_bits) - 1;
         hs->hs_min_bits = cur_bits;
@@ -148,8 +148,8 @@ cfs_hash_create(char *name, unsigned int cur_bits,
                 }
 
                 CFS_INIT_HLIST_HEAD(&hs->hs_buckets[i]->hsb_head);
-                rwlock_init(&hs->hs_buckets[i]->hsb_rwlock);
-                atomic_set(&hs->hs_buckets[i]->hsb_count, 0);
+                cfs_rwlock_init(&hs->hs_buckets[i]->hsb_rwlock);
+                cfs_atomic_set(&hs->hs_buckets[i]->hsb_count, 0);
         }
 
         return hs;
@@ -163,8 +163,8 @@ void
 cfs_hash_destroy(cfs_hash_t *hs)
 {
         cfs_hash_bucket_t    *hsb;
-        struct hlist_node    *hnode;
-        struct hlist_node    *pos;
+        cfs_hlist_node_t     *hnode;
+        cfs_hlist_node_t     *pos;
         int                   i;
         ENTRY;
 
@@ -176,20 +176,20 @@ cfs_hash_destroy(cfs_hash_t *hs)
                 if (hsb == NULL)
                         continue;
 
-                write_lock(&hsb->hsb_rwlock);
-                hlist_for_each_safe(hnode, pos, &(hsb->hsb_head)) {
+                cfs_write_lock(&hsb->hsb_rwlock);
+                cfs_hlist_for_each_safe(hnode, pos, &(hsb->hsb_head)) {
                         __cfs_hash_bucket_validate(hs, hsb, hnode);
                         __cfs_hash_bucket_del(hs, hsb, hnode);
                         cfs_hash_exit(hs, hnode);
                 }
 
-                LASSERT(hlist_empty(&(hsb->hsb_head)));
-                LASSERT(atomic_read(&hsb->hsb_count) == 0);
-                write_unlock(&hsb->hsb_rwlock);
+                LASSERT(cfs_hlist_empty(&(hsb->hsb_head)));
+                LASSERT(cfs_atomic_read(&hsb->hsb_count) == 0);
+                cfs_write_unlock(&hsb->hsb_rwlock);
                 CFS_FREE_PTR(hsb);
         }
 
-        LASSERT(atomic_read(&hs->hs_count) == 0);
+        LASSERT(cfs_atomic_read(&hs->hs_count) == 0);
         cfs_hash_wunlock(hs);
 
         LIBCFS_FREE(hs->hs_buckets,
@@ -223,7 +223,7 @@ cfs_hash_rehash_bits(cfs_hash_t *hs)
  * ops->hs_get function will be called when the item is added.
  */
 void
-cfs_hash_add(cfs_hash_t *hs, void *key, struct hlist_node *hnode)
+cfs_hash_add(cfs_hash_t *hs, void *key, cfs_hlist_node_t *hnode)
 {
         cfs_hash_bucket_t    *hsb;
         int                   bits;
@@ -236,11 +236,11 @@ cfs_hash_add(cfs_hash_t *hs, void *key, struct hlist_node *hnode)
         i = cfs_hash_id(hs, key, hs->hs_cur_mask);
         hsb = hs->hs_buckets[i];
         LASSERT(i <= hs->hs_cur_mask);
-        LASSERT(hlist_unhashed(hnode));
+        LASSERT(cfs_hlist_unhashed(hnode));
 
-        write_lock(&hsb->hsb_rwlock);
+        cfs_write_lock(&hsb->hsb_rwlock);
         __cfs_hash_bucket_add(hs, hsb, hnode);
-        write_unlock(&hsb->hsb_rwlock);
+        cfs_write_unlock(&hsb->hsb_rwlock);
 
         bits = cfs_hash_rehash_bits(hs);
         cfs_hash_runlock(hs);
@@ -251,12 +251,12 @@ cfs_hash_add(cfs_hash_t *hs, void *key, struct hlist_node *hnode)
 }
 CFS_EXPORT_SYMBOL(cfs_hash_add);
 
-static struct hlist_node *
+static cfs_hlist_node_t *
 cfs_hash_findadd_unique_hnode(cfs_hash_t *hs, void *key,
-                              struct hlist_node *hnode)
+                              cfs_hlist_node_t *hnode)
 {
         int                   bits = 0;
-        struct hlist_node    *ehnode;
+        cfs_hlist_node_t     *ehnode;
         cfs_hash_bucket_t    *hsb;
         unsigned              i;
         ENTRY;
@@ -267,9 +267,9 @@ cfs_hash_findadd_unique_hnode(cfs_hash_t *hs, void *key,
         i = cfs_hash_id(hs, key, hs->hs_cur_mask);
         hsb = hs->hs_buckets[i];
         LASSERT(i <= hs->hs_cur_mask);
-        LASSERT(hlist_unhashed(hnode));
+        LASSERT(cfs_hlist_unhashed(hnode));
 
-        write_lock(&hsb->hsb_rwlock);
+        cfs_write_lock(&hsb->hsb_rwlock);
         ehnode = __cfs_hash_bucket_lookup(hs, hsb, key);
         if (ehnode) {
                 cfs_hash_get(hs, ehnode);
@@ -278,7 +278,7 @@ cfs_hash_findadd_unique_hnode(cfs_hash_t *hs, void *key,
                 ehnode = hnode;
                 bits = cfs_hash_rehash_bits(hs);
         }
-        write_unlock(&hsb->hsb_rwlock);
+        cfs_write_unlock(&hsb->hsb_rwlock);
         cfs_hash_runlock(hs);
         if (bits)
                 cfs_hash_rehash(hs, bits);
@@ -292,9 +292,9 @@ cfs_hash_findadd_unique_hnode(cfs_hash_t *hs, void *key,
  * Returns 0 on success or -EALREADY on key collisions.
  */
 int
-cfs_hash_add_unique(cfs_hash_t *hs, void *key, struct hlist_node *hnode)
+cfs_hash_add_unique(cfs_hash_t *hs, void *key, cfs_hlist_node_t *hnode)
 {
-        struct hlist_node    *ehnode;
+        cfs_hlist_node_t    *ehnode;
         ENTRY;
 
         ehnode = cfs_hash_findadd_unique_hnode(hs, key, hnode);
@@ -314,9 +314,9 @@ CFS_EXPORT_SYMBOL(cfs_hash_add_unique);
  */
 void *
 cfs_hash_findadd_unique(cfs_hash_t *hs, void *key,
-                        struct hlist_node *hnode)
+                        cfs_hlist_node_t *hnode)
 {
-        struct hlist_node    *ehnode;
+        cfs_hlist_node_t     *ehnode;
         void                 *obj;
         ENTRY;
 
@@ -335,7 +335,7 @@ CFS_EXPORT_SYMBOL(cfs_hash_findadd_unique);
  * on the removed object.
  */
 void *
-cfs_hash_del(cfs_hash_t *hs, void *key, struct hlist_node *hnode)
+cfs_hash_del(cfs_hash_t *hs, void *key, cfs_hlist_node_t *hnode)
 {
         cfs_hash_bucket_t    *hsb;
         void                 *obj;
@@ -348,11 +348,11 @@ cfs_hash_del(cfs_hash_t *hs, void *key, struct hlist_node *hnode)
         i = cfs_hash_id(hs, key, hs->hs_cur_mask);
         hsb = hs->hs_buckets[i];
         LASSERT(i <= hs->hs_cur_mask);
-        LASSERT(!hlist_unhashed(hnode));
+        LASSERT(!cfs_hlist_unhashed(hnode));
 
-        write_lock(&hsb->hsb_rwlock);
+        cfs_write_lock(&hsb->hsb_rwlock);
         obj = __cfs_hash_bucket_del(hs, hsb, hnode);
-        write_unlock(&hsb->hsb_rwlock);
+        cfs_write_unlock(&hsb->hsb_rwlock);
         cfs_hash_runlock(hs);
 
         RETURN(obj);
@@ -369,7 +369,7 @@ void *
 cfs_hash_del_key(cfs_hash_t *hs, void *key)
 {
         void                 *obj = NULL;
-        struct hlist_node    *hnode;
+        cfs_hlist_node_t     *hnode;
         cfs_hash_bucket_t    *hsb;
         unsigned              i;
         ENTRY;
@@ -379,12 +379,12 @@ cfs_hash_del_key(cfs_hash_t *hs, void *key)
         hsb = hs->hs_buckets[i];
         LASSERT(i <= hs->hs_cur_mask);
 
-        write_lock(&hsb->hsb_rwlock);
+        cfs_write_lock(&hsb->hsb_rwlock);
         hnode = __cfs_hash_bucket_lookup(hs, hsb, key);
         if (hnode)
                 obj = __cfs_hash_bucket_del(hs, hsb, hnode);
 
-        write_unlock(&hsb->hsb_rwlock);
+        cfs_write_unlock(&hsb->hsb_rwlock);
         cfs_hash_runlock(hs);
 
         RETURN(obj);
@@ -403,7 +403,7 @@ void *
 cfs_hash_lookup(cfs_hash_t *hs, void *key)
 {
         void                 *obj = NULL;
-        struct hlist_node    *hnode;
+        cfs_hlist_node_t     *hnode;
         cfs_hash_bucket_t    *hsb;
         unsigned              i;
         ENTRY;
@@ -413,12 +413,12 @@ cfs_hash_lookup(cfs_hash_t *hs, void *key)
         hsb = hs->hs_buckets[i];
         LASSERT(i <= hs->hs_cur_mask);
 
-        read_lock(&hsb->hsb_rwlock);
+        cfs_read_lock(&hsb->hsb_rwlock);
         hnode = __cfs_hash_bucket_lookup(hs, hsb, key);
         if (hnode)
                 obj = cfs_hash_get(hs, hnode);
 
-        read_unlock(&hsb->hsb_rwlock);
+        cfs_read_unlock(&hsb->hsb_rwlock);
         cfs_hash_runlock(hs);
 
         RETURN(obj);
@@ -436,7 +436,7 @@ void
 cfs_hash_for_each(cfs_hash_t *hs,
                   cfs_hash_for_each_cb_t func, void *data)
 {
-        struct hlist_node    *hnode;
+        cfs_hlist_node_t     *hnode;
         cfs_hash_bucket_t    *hsb;
         void                 *obj;
         int                   i;
@@ -444,14 +444,14 @@ cfs_hash_for_each(cfs_hash_t *hs,
 
         cfs_hash_rlock(hs);
         cfs_hash_for_each_bucket(hs, hsb, i) {
-                read_lock(&hsb->hsb_rwlock);
-                hlist_for_each(hnode, &(hsb->hsb_head)) {
+                cfs_read_lock(&hsb->hsb_rwlock);
+                cfs_hlist_for_each(hnode, &(hsb->hsb_head)) {
                         __cfs_hash_bucket_validate(hs, hsb, hnode);
                         obj = cfs_hash_get(hs, hnode);
                         func(obj, data);
                         (void)cfs_hash_put(hs, hnode);
                 }
-                read_unlock(&hsb->hsb_rwlock);
+                cfs_read_unlock(&hsb->hsb_rwlock);
         }
         cfs_hash_runlock(hs);
 
@@ -473,8 +473,8 @@ void
 cfs_hash_for_each_safe(cfs_hash_t *hs,
                        cfs_hash_for_each_cb_t func, void *data)
 {
-        struct hlist_node    *hnode;
-        struct hlist_node    *pos;
+        cfs_hlist_node_t     *hnode;
+        cfs_hlist_node_t     *pos;
         cfs_hash_bucket_t    *hsb;
         void                 *obj;
         int                   i;
@@ -482,16 +482,16 @@ cfs_hash_for_each_safe(cfs_hash_t *hs,
 
         cfs_hash_rlock(hs);
         cfs_hash_for_each_bucket(hs, hsb, i) {
-                read_lock(&hsb->hsb_rwlock);
-                hlist_for_each_safe(hnode, pos, &(hsb->hsb_head)) {
+                cfs_read_lock(&hsb->hsb_rwlock);
+                cfs_hlist_for_each_safe(hnode, pos, &(hsb->hsb_head)) {
                         __cfs_hash_bucket_validate(hs, hsb, hnode);
                         obj = cfs_hash_get(hs, hnode);
-                        read_unlock(&hsb->hsb_rwlock);
+                        cfs_read_unlock(&hsb->hsb_rwlock);
                         func(obj, data);
-                        read_lock(&hsb->hsb_rwlock);
+                        cfs_read_lock(&hsb->hsb_rwlock);
                         (void)cfs_hash_put(hs, hnode);
                 }
-                read_unlock(&hsb->hsb_rwlock);
+                cfs_read_unlock(&hsb->hsb_rwlock);
         }
         cfs_hash_runlock(hs);
         EXIT;
@@ -513,7 +513,7 @@ void
 cfs_hash_for_each_empty(cfs_hash_t *hs,
                         cfs_hash_for_each_cb_t func, void *data)
 {
-        struct hlist_node    *hnode;
+        cfs_hlist_node_t     *hnode;
         cfs_hash_bucket_t    *hsb;
         void                 *obj;
         int                   i;
@@ -522,18 +522,18 @@ cfs_hash_for_each_empty(cfs_hash_t *hs,
 restart:
         cfs_hash_rlock(hs);
         cfs_hash_for_each_bucket(hs, hsb, i) {
-                write_lock(&hsb->hsb_rwlock);
-                while (!hlist_empty(&hsb->hsb_head)) {
+                cfs_write_lock(&hsb->hsb_rwlock);
+                while (!cfs_hlist_empty(&hsb->hsb_head)) {
                         hnode =  hsb->hsb_head.first;
                         __cfs_hash_bucket_validate(hs, hsb, hnode);
                         obj = cfs_hash_get(hs, hnode);
-                        write_unlock(&hsb->hsb_rwlock);
+                        cfs_write_unlock(&hsb->hsb_rwlock);
                         cfs_hash_runlock(hs);
                         func(obj, data);
                         (void)cfs_hash_put(hs, hnode);
                         goto restart;
                 }
-                write_unlock(&hsb->hsb_rwlock);
+                cfs_write_unlock(&hsb->hsb_rwlock);
         }
         cfs_hash_runlock(hs);
         EXIT;
@@ -552,7 +552,7 @@ void
 cfs_hash_for_each_key(cfs_hash_t *hs, void *key,
                       cfs_hash_for_each_cb_t func, void *data)
 {
-        struct hlist_node    *hnode;
+        cfs_hlist_node_t     *hnode;
         cfs_hash_bucket_t    *hsb;
         unsigned              i;
         ENTRY;
@@ -562,8 +562,8 @@ cfs_hash_for_each_key(cfs_hash_t *hs, void *key,
         hsb = hs->hs_buckets[i];
         LASSERT(i <= hs->hs_cur_mask);
 
-        read_lock(&hsb->hsb_rwlock);
-        hlist_for_each(hnode, &(hsb->hsb_head)) {
+        cfs_read_lock(&hsb->hsb_rwlock);
+        cfs_hlist_for_each(hnode, &(hsb->hsb_head)) {
                 __cfs_hash_bucket_validate(hs, hsb, hnode);
 
                 if (!cfs_hash_compare(hs, key, hnode))
@@ -573,7 +573,7 @@ cfs_hash_for_each_key(cfs_hash_t *hs, void *key,
                 (void)cfs_hash_put(hs, hnode);
         }
 
-        read_unlock(&hsb->hsb_rwlock);
+        cfs_read_unlock(&hsb->hsb_rwlock);
         cfs_hash_runlock(hs);
 
         EXIT;
@@ -594,8 +594,8 @@ CFS_EXPORT_SYMBOL(cfs_hash_for_each_key);
 int
 cfs_hash_rehash(cfs_hash_t *hs, int bits)
 {
-        struct hlist_node     *hnode;
-        struct hlist_node     *pos;
+        cfs_hlist_node_t      *hnode;
+        cfs_hlist_node_t      *pos;
         cfs_hash_bucket_t    **old_buckets;
         cfs_hash_bucket_t    **rehash_buckets;
         cfs_hash_bucket_t     *hs_hsb;
@@ -609,7 +609,7 @@ cfs_hash_rehash(cfs_hash_t *hs, int bits)
         void                  *key;
         ENTRY;
 
-        LASSERT(!in_interrupt());
+        LASSERT(!cfs_in_interrupt());
         LASSERT(new_mask > 0);
         LASSERT((hs->hs_flags & CFS_HASH_REHASH) != 0);
 
@@ -623,8 +623,8 @@ cfs_hash_rehash(cfs_hash_t *hs, int bits)
                         GOTO(free, rc = -ENOMEM);
 
                 CFS_INIT_HLIST_HEAD(&rehash_buckets[i]->hsb_head);
-                rwlock_init(&rehash_buckets[i]->hsb_rwlock);
-                atomic_set(&rehash_buckets[i]->hsb_count, 0);
+                cfs_rwlock_init(&rehash_buckets[i]->hsb_rwlock);
+                cfs_atomic_set(&rehash_buckets[i]->hsb_count, 0);
         }
 
         cfs_hash_wlock(hs);
@@ -646,13 +646,13 @@ cfs_hash_rehash(cfs_hash_t *hs, int bits)
         hs->hs_cur_bits = bits;
         hs->hs_cur_mask = (1 << bits) - 1;
         hs->hs_buckets = rehash_buckets;
-        atomic_inc(&hs->hs_rehash_count);
+        cfs_atomic_inc(&hs->hs_rehash_count);
 
         for (i = 0; i <= old_mask; i++) {
                 hs_hsb = old_buckets[i];
 
-                write_lock(&hs_hsb->hsb_rwlock);
-                hlist_for_each_safe(hnode, pos, &(hs_hsb->hsb_head)) {
+                cfs_write_lock(&hs_hsb->hsb_rwlock);
+                cfs_hlist_for_each_safe(hnode, pos, &(hs_hsb->hsb_head)) {
                         key = cfs_hash_key(hs, hnode);
                         LASSERT(key);
 
@@ -665,22 +665,22 @@ cfs_hash_rehash(cfs_hash_t *hs, int bits)
                         /*
                          * Delete from old hash bucket.
                          */
-                        hlist_del(hnode);
-                        LASSERT(atomic_read(&hs_hsb->hsb_count) > 0);
-                        atomic_dec(&hs_hsb->hsb_count);
+                        cfs_hlist_del(hnode);
+                        LASSERT(cfs_atomic_read(&hs_hsb->hsb_count) > 0);
+                        cfs_atomic_dec(&hs_hsb->hsb_count);
 
                         /*
                          * Add to rehash bucket, ops->hs_key must be defined.
                          */
                         rehash_hsb = rehash_buckets[cfs_hash_id(hs, key,
                                                                 new_mask)];
-                        hlist_add_head(hnode, &(rehash_hsb->hsb_head));
-                        atomic_inc(&rehash_hsb->hsb_count);
+                        cfs_hlist_add_head(hnode, &(rehash_hsb->hsb_head));
+                        cfs_atomic_inc(&rehash_hsb->hsb_count);
                 }
 
-                LASSERT(hlist_empty(&(hs_hsb->hsb_head)));
-                LASSERT(atomic_read(&hs_hsb->hsb_count) == 0);
-                write_unlock(&hs_hsb->hsb_rwlock);
+                LASSERT(cfs_hlist_empty(&(hs_hsb->hsb_head)));
+                LASSERT(cfs_atomic_read(&hs_hsb->hsb_count) == 0);
+                cfs_write_unlock(&hs_hsb->hsb_rwlock);
         }
 
         cfs_hash_wunlock(hs);
@@ -706,7 +706,7 @@ CFS_EXPORT_SYMBOL(cfs_hash_rehash);
  * not be called.
  */
 void cfs_hash_rehash_key(cfs_hash_t *hs, void *old_key, void *new_key,
-                         struct hlist_node *hnode)
+                         cfs_hlist_node_t *hnode)
 {
         cfs_hash_bucket_t     *old_hsb;
         cfs_hash_bucket_t     *new_hsb;
@@ -715,7 +715,7 @@ void cfs_hash_rehash_key(cfs_hash_t *hs, void *old_key, void *new_key,
         ENTRY;
 
         __cfs_hash_key_validate(hs, new_key, hnode);
-        LASSERT(!hlist_unhashed(hnode));
+        LASSERT(!cfs_hlist_unhashed(hnode));
 
         cfs_hash_rlock(hs);
 
@@ -728,13 +728,13 @@ void cfs_hash_rehash_key(cfs_hash_t *hs, void *old_key, void *new_key,
         LASSERT(j <= hs->hs_cur_mask);
 
         if (i < j) { /* write_lock ordering */
-                write_lock(&old_hsb->hsb_rwlock);
-                write_lock(&new_hsb->hsb_rwlock);
+                cfs_write_lock(&old_hsb->hsb_rwlock);
+                cfs_write_lock(&new_hsb->hsb_rwlock);
         } else if (i > j) {
-                write_lock(&new_hsb->hsb_rwlock);
-                write_lock(&old_hsb->hsb_rwlock);
+                cfs_write_lock(&new_hsb->hsb_rwlock);
+                cfs_write_lock(&old_hsb->hsb_rwlock);
         } else { /* do nothing */
-                read_unlock(&hs->hs_rwlock);
+                cfs_read_unlock(&hs->hs_rwlock);
                 EXIT;
                 return;
         }
@@ -743,14 +743,14 @@ void cfs_hash_rehash_key(cfs_hash_t *hs, void *old_key, void *new_key,
          * Migrate item between hash buckets without calling
          * the cfs_hash_get() and cfs_hash_put() callback functions.
          */
-        hlist_del(hnode);
-        LASSERT(atomic_read(&old_hsb->hsb_count) > 0);
-        atomic_dec(&old_hsb->hsb_count);
-        hlist_add_head(hnode, &(new_hsb->hsb_head));
-        atomic_inc(&new_hsb->hsb_count);
-
-        write_unlock(&new_hsb->hsb_rwlock);
-        write_unlock(&old_hsb->hsb_rwlock);
+        cfs_hlist_del(hnode);
+        LASSERT(cfs_atomic_read(&old_hsb->hsb_count) > 0);
+        cfs_atomic_dec(&old_hsb->hsb_count);
+        cfs_hlist_add_head(hnode, &(new_hsb->hsb_head));
+        cfs_atomic_inc(&new_hsb->hsb_count);
+
+        cfs_write_unlock(&new_hsb->hsb_rwlock);
+        cfs_write_unlock(&old_hsb->hsb_rwlock);
         cfs_hash_runlock(hs);
 
         EXIT;
@@ -796,9 +796,9 @@ int cfs_hash_debug_str(cfs_hash_t *hs, char *str, int size)
                       __cfs_hash_theta_frac(hs->hs_max_theta));
         c += snprintf(str + c, size - c, " 0x%02x ", hs->hs_flags);
         c += snprintf(str + c, size - c, "%6d ",
-                      atomic_read(&hs->hs_rehash_count));
+                      cfs_atomic_read(&hs->hs_rehash_count));
         c += snprintf(str + c, size - c, "%5d ",
-                      atomic_read(&hs->hs_count));
+                      cfs_atomic_read(&hs->hs_count));
 
         /*
          * The distribution is a summary of the chained hash depth in
@@ -814,7 +814,7 @@ int cfs_hash_debug_str(cfs_hash_t *hs, char *str, int size)
          * Non-Uniform hash distribution:  128/125/0/0/0/0/2/1
          */
         cfs_hash_for_each_bucket(hs, hsb, i)
-                dist[min(__fls(atomic_read(&hsb->hsb_count)/max(theta,1)),7)]++;
+                dist[min(__cfs_fls(cfs_atomic_read(&hsb->hsb_count)/max(theta,1)),7)]++;
 
         for (i = 0; i < 8; i++)
                 c += snprintf(str + c, size - c, "%d%c",  dist[i],
index f8dab0f..367a3f0 100644 (file)
 #include <linux/kallsyms.h>
 #endif
 
+/* We need to pass a pointer here, but elsewhere this must be a const */
+static char *debug_file_path = &libcfs_debug_file_path_arr[0];
+CFS_MODULE_PARM(debug_file_path, "s", charp, 0644,
+                "Path for dumping debug logs (deprecated)");
+
 char lnet_upcall[1024] = "/usr/lib/lustre/lnet_upcall";
 char lnet_debug_log_upcall[1024] = "/usr/lib/lustre/lnet_debug_log_upcall";
 
@@ -173,7 +178,8 @@ void lbug_with_loc(const char *file, const char *func, const int line)
 {
         libcfs_catastrophe = 1;
         libcfs_debug_msg(NULL, 0, D_EMERG, file, func, line,
-                         "LBUG - trying to dump log to %s\n", debug_file_path);
+                         "LBUG - trying to dump log to %s\n",
+                         libcfs_debug_file_path);
         libcfs_debug_dumplog();
         libcfs_run_lbug_upcall(file, func, line);
         asm("int $3");
@@ -310,7 +316,7 @@ static int panic_notifier(struct notifier_block *self, unsigned long unused1,
          * console on the rare cases it is ever triggered. */
 
         if (in_interrupt()) {
-                trace_debug_print();
+                cfs_trace_debug_print();
         } else {
                 while (current->lock_depth >= 0)
                         unlock_kernel();
index 0cefbf4..d5d50e7 100644 (file)
@@ -56,8 +56,8 @@ cfs_filp_open (const char *name, int flags, int mode, int *err)
                int rc;
 
                rc = PTR_ERR(filp);
-               printk(KERN_ERR "LustreError: can't open %s file: err %d\n",
-                               name, rc);
+                printk(KERN_ERR "LustreError: can't open %s file: err %d\n",
+                       name, rc);
                if (err)
                        *err = rc;
                filp = NULL;
index 88eab3e..f7daa3d 100644 (file)
@@ -84,7 +84,7 @@ int libcfs_ioctl_getdata(char *buf, char *end, void *arg)
 
         if (data->ioc_inllen2)
                 data->ioc_inlbuf2 = &data->ioc_bulk[0] +
-                        size_round(data->ioc_inllen1);
+                        cfs_size_round(data->ioc_inllen1);
 
         RETURN(0);
 }
index decc551..3cb1a8c 100644 (file)
@@ -74,7 +74,7 @@ EXPORT_SYMBOL(cfs_waitq_add);
 
 void
 cfs_waitq_add_exclusive(cfs_waitq_t *waitq,
-                             cfs_waitlink_t *link)
+                        cfs_waitlink_t *link)
 {
         add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
 }
@@ -123,18 +123,26 @@ cfs_waitq_wait(cfs_waitlink_t *link, cfs_task_state_t state)
 EXPORT_SYMBOL(cfs_waitq_wait);
 
 int64_t
-cfs_waitq_timedwait(cfs_waitlink_t *link, cfs_task_state_t state, int64_t timeout)
+cfs_waitq_timedwait(cfs_waitlink_t *link, cfs_task_state_t state,
+                    int64_t timeout)
 {
         return schedule_timeout(timeout);
 }
 EXPORT_SYMBOL(cfs_waitq_timedwait);
 
 void
-cfs_schedule_timeout(cfs_task_state_t state, int64_t timeout)
+cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t timeout)
 {
         set_current_state(state);
         schedule_timeout(timeout);
 }
+EXPORT_SYMBOL(cfs_schedule_timeout_and_set_state);
+
+void
+cfs_schedule_timeout(int64_t timeout)
+{
+        schedule_timeout(timeout);
+}
 EXPORT_SYMBOL(cfs_schedule_timeout);
 
 void
index fa4474d..553e8f2 100644 (file)
@@ -159,7 +159,7 @@ static int __proc_dobitmasks(void *data, int write,
         int           is_subsys = (mask == &libcfs_subsystem_debug) ? 1 : 0;
         int           is_printk = (mask == &libcfs_printk) ? 1 : 0;
 
-        rc = trace_allocate_string_buffer(&tmpstr, tmpstrlen);
+        rc = cfs_trace_allocate_string_buffer(&tmpstr, tmpstrlen);
         if (rc < 0)
                 return rc;
 
@@ -170,11 +170,11 @@ static int __proc_dobitmasks(void *data, int write,
                 if (pos >= rc) {
                         rc = 0;
                 } else {
-                        rc = trace_copyout_string(buffer, nob,
-                                                  tmpstr + pos, "\n");
+                        rc = cfs_trace_copyout_string(buffer, nob,
+                                                      tmpstr + pos, "\n");
                 }
         } else {
-                rc = trace_copyin_string(tmpstr, tmpstrlen, buffer, nob);
+                rc = cfs_trace_copyin_string(tmpstr, tmpstrlen, buffer, nob);
                 if (rc < 0)
                         return rc;
 
@@ -184,7 +184,7 @@ static int __proc_dobitmasks(void *data, int write,
                         *mask |= D_EMERG;
         }
 
-        trace_free_string_buffer(tmpstr, tmpstrlen);
+        cfs_trace_free_string_buffer(tmpstr, tmpstrlen);
         return rc;
 }
 
@@ -199,7 +199,7 @@ static int __proc_dump_kernel(void *data, int write,
         if (!write)
                 return 0;
 
-        return trace_dump_debug_buffer_usrstr(buffer, nob);
+        return cfs_trace_dump_debug_buffer_usrstr(buffer, nob);
 }
 
 DECLARE_PROC_HANDLER(proc_dump_kernel)
@@ -208,16 +208,16 @@ static int __proc_daemon_file(void *data, int write,
                               loff_t pos, void *buffer, int nob)
 {
         if (!write) {
-                int len = strlen(tracefile);
+                int len = strlen(cfs_tracefile);
 
                 if (pos >= len)
                         return 0;
 
-                return trace_copyout_string(buffer, nob,
-                                            tracefile + pos, "\n");
+                return cfs_trace_copyout_string(buffer, nob,
+                                                cfs_tracefile + pos, "\n");
         }
 
-        return trace_daemon_command_usrstr(buffer, nob);
+        return cfs_trace_daemon_command_usrstr(buffer, nob);
 }
 
 DECLARE_PROC_HANDLER(proc_daemon_file)
@@ -228,15 +228,16 @@ static int __proc_debug_mb(void *data, int write,
         if (!write) {
                 char tmpstr[32];
                 int  len = snprintf(tmpstr, sizeof(tmpstr), "%d",
-                                    trace_get_debug_mb());
+                                    cfs_trace_get_debug_mb());
 
                 if (pos >= len)
                         return 0;
 
-                return trace_copyout_string(buffer, nob, tmpstr + pos, "\n");
+                return cfs_trace_copyout_string(buffer, nob, tmpstr + pos,
+                       "\n");
         }
 
-        return trace_set_debug_mb_usrstr(buffer, nob);
+        return cfs_trace_set_debug_mb_usrstr(buffer, nob);
 }
 
 DECLARE_PROC_HANDLER(proc_debug_mb)
@@ -392,8 +393,8 @@ static cfs_sysctl_table_t lnet_table[] = {
         {
                 .ctl_name = PSDEV_DEBUG_PATH,
                 .procname = "debug_path",
-                .data     = debug_file_path_arr,
-                .maxlen   = sizeof(debug_file_path_arr),
+                .data     = libcfs_debug_file_path_arr,
+                .maxlen   = sizeof(libcfs_debug_file_path_arr),
                 .mode     = 0644,
                 .proc_handler = &proc_dostring,
         },
index 0ac56c6..4f4d14a 100644 (file)
@@ -591,7 +591,7 @@ int sock_create_lite(int family, int type, int protocol, struct socket **res)
         struct socket *sock;
 
         sock = sock_alloc();
-        if (sock == NULL) 
+        if (sock == NULL)
                 return -ENOMEM;
 
         sock->type = type;
index c7c2a21..fd6e6b9 100644 (file)
 #include "tracefile.h"
 
 /* percents to share the total debug memory for each type */
-static unsigned int pages_factor[TCD_TYPE_MAX] = {
-       80,  /* 80% pages for TCD_TYPE_PROC */
-       10,  /* 10% pages for TCD_TYPE_SOFTIRQ */
-       10   /* 10% pages for TCD_TYPE_IRQ */
+static unsigned int pages_factor[CFS_TCD_TYPE_MAX] = {
+       80,  /* 80% pages for CFS_TCD_TYPE_PROC */
+       10,  /* 10% pages for CFS_TCD_TYPE_SOFTIRQ */
+       10   /* 10% pages for CFS_TCD_TYPE_IRQ */
 };
 
-char *trace_console_buffers[NR_CPUS][TCD_TYPE_MAX];
+char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
 
-struct rw_semaphore tracefile_sem;
+cfs_rw_semaphore_t cfs_tracefile_sem;
 
-int tracefile_init_arch()
+int cfs_tracefile_init_arch()
 {
        int    i;
        int    j;
-       struct trace_cpu_data *tcd;
+       struct cfs_trace_cpu_data *tcd;
 
-       init_rwsem(&tracefile_sem);
+       cfs_init_rwsem(&cfs_tracefile_sem);
 
        /* initialize trace_data */
-       memset(trace_data, 0, sizeof(trace_data));
-       for (i = 0; i < TCD_TYPE_MAX; i++) {
-               trace_data[i]=kmalloc(sizeof(union trace_data_union)*NR_CPUS,
-                                                         GFP_KERNEL);
-               if (trace_data[i] == NULL)
+       memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
+       for (i = 0; i < CFS_TCD_TYPE_MAX; i++) {
+               cfs_trace_data[i] =
+                        kmalloc(sizeof(union cfs_trace_data_union) * NR_CPUS,
+                                GFP_KERNEL);
+               if (cfs_trace_data[i] == NULL)
                        goto out;
 
        }
 
        /* arch related info initialized */
-       tcd_for_each(tcd, i, j) {
-               spin_lock_init(&tcd->tcd_lock);
+       cfs_tcd_for_each(tcd, i, j) {
+               cfs_spin_lock_init(&tcd->tcd_lock);
                tcd->tcd_pages_factor = pages_factor[i];
                tcd->tcd_type = i;
                tcd->tcd_cpu = j;
@@ -79,98 +80,98 @@ int tracefile_init_arch()
 
        for (i = 0; i < num_possible_cpus(); i++)
                for (j = 0; j < 3; j++) {
-                       trace_console_buffers[i][j] =
-                               kmalloc(TRACE_CONSOLE_BUFFER_SIZE,
-                                       GFP_KERNEL);
+                        cfs_trace_console_buffers[i][j] =
+                                kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
+                                        GFP_KERNEL);
 
-                       if (trace_console_buffers[i][j] == NULL)
+                       if (cfs_trace_console_buffers[i][j] == NULL)
                                goto out;
                }
 
        return 0;
 
 out:
-       tracefile_fini_arch();
-       printk(KERN_ERR "lnet: No enough memory\n");
+       cfs_tracefile_fini_arch();
+       printk(KERN_ERR "lnet: Not enough memory\n");
        return -ENOMEM;
 
 }
 
-void tracefile_fini_arch()
+void cfs_tracefile_fini_arch()
 {
        int    i;
        int    j;
 
        for (i = 0; i < num_possible_cpus(); i++)
                for (j = 0; j < 3; j++)
-                       if (trace_console_buffers[i][j] != NULL) {
-                               kfree(trace_console_buffers[i][j]);
-                               trace_console_buffers[i][j] = NULL;
+                       if (cfs_trace_console_buffers[i][j] != NULL) {
+                               kfree(cfs_trace_console_buffers[i][j]);
+                               cfs_trace_console_buffers[i][j] = NULL;
                        }
 
-       for (i = 0; trace_data[i] != NULL; i++) {
-               kfree(trace_data[i]);
-               trace_data[i] = NULL;
+       for (i = 0; cfs_trace_data[i] != NULL; i++) {
+               kfree(cfs_trace_data[i]);
+               cfs_trace_data[i] = NULL;
        }
 
-       fini_rwsem(&tracefile_sem);
+       cfs_fini_rwsem(&cfs_tracefile_sem);
 }
 
-void tracefile_read_lock()
+void cfs_tracefile_read_lock()
 {
-       down_read(&tracefile_sem);
+       cfs_down_read(&cfs_tracefile_sem);
 }
 
-void tracefile_read_unlock()
+void cfs_tracefile_read_unlock()
 {
-       up_read(&tracefile_sem);
+       cfs_up_read(&cfs_tracefile_sem);
 }
 
-void tracefile_write_lock()
+void cfs_tracefile_write_lock()
 {
-       down_write(&tracefile_sem);
+       cfs_down_write(&cfs_tracefile_sem);
 }
 
-void tracefile_write_unlock()
+void cfs_tracefile_write_unlock()
 {
-       up_write(&tracefile_sem);
+       cfs_up_write(&cfs_tracefile_sem);
 }
 
-trace_buf_type_t
-trace_buf_idx_get()
+cfs_trace_buf_type_t cfs_trace_buf_idx_get()
 {
        if (in_irq())
-               return TCD_TYPE_IRQ;
+               return CFS_TCD_TYPE_IRQ;
        else if (in_softirq())
-               return TCD_TYPE_SOFTIRQ;
+               return CFS_TCD_TYPE_SOFTIRQ;
        else
-               return TCD_TYPE_PROC;
+               return CFS_TCD_TYPE_PROC;
 }
 
-int trace_lock_tcd(struct trace_cpu_data *tcd)
+int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd)
 {
-       __LASSERT(tcd->tcd_type < TCD_TYPE_MAX);
-        if (tcd->tcd_type == TCD_TYPE_IRQ)
-                spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
-        else if (tcd->tcd_type == TCD_TYPE_SOFTIRQ)
-                spin_lock_bh(&tcd->tcd_lock);
+       __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
+        if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
+                cfs_spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
+        else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
+                cfs_spin_lock_bh(&tcd->tcd_lock);
         else
-                spin_lock(&tcd->tcd_lock);
+                cfs_spin_lock(&tcd->tcd_lock);
        return 1;
 }
 
-void trace_unlock_tcd(struct trace_cpu_data *tcd)
+void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd)
 {
-       __LASSERT(tcd->tcd_type < TCD_TYPE_MAX);
-        if (tcd->tcd_type == TCD_TYPE_IRQ)
-                spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
-        else if (tcd->tcd_type == TCD_TYPE_SOFTIRQ)
-                spin_unlock_bh(&tcd->tcd_lock);
+       __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
+        if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
+                cfs_spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
+        else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
+                cfs_spin_unlock_bh(&tcd->tcd_lock);
         else
-                spin_unlock(&tcd->tcd_lock);
+                cfs_spin_unlock(&tcd->tcd_lock);
 }
 
-int tcd_owns_tage(struct trace_cpu_data *tcd, struct trace_page *tage)
+int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
+                      struct cfs_trace_page *tage)
 {
        /*
         * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
@@ -180,8 +181,8 @@ int tcd_owns_tage(struct trace_cpu_data *tcd, struct trace_page *tage)
 }
 
 void
-set_ptldebug_header(struct ptldebug_header *header, int subsys, int mask,
-                   const int line, unsigned long stack)
+cfs_set_ptldebug_header(struct ptldebug_header *header, int subsys, int mask,
+                        const int line, unsigned long stack)
 {
        struct timeval tv;
 
@@ -205,8 +206,9 @@ set_ptldebug_header(struct ptldebug_header *header, int subsys, int mask,
        return;
 }
 
-void print_to_console(struct ptldebug_header *hdr, int mask, const char *buf,
-                            int len, const char *file, const char *fn)
+void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
+                          const char *buf, int len, const char *file,
+                          const char *fn)
 {
        char *prefix = "Lustre", *ptype = NULL;
 
@@ -227,15 +229,16 @@ void print_to_console(struct ptldebug_header *hdr, int mask, const char *buf,
        if ((mask & D_CONSOLE) != 0) {
                printk("%s%s: %.*s", ptype, prefix, len, buf);
        } else {
-               printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix, hdr->ph_pid,
-                      hdr->ph_extern_pid, file, hdr->ph_line_num, fn, len, buf);
+               printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
+                       hdr->ph_pid, hdr->ph_extern_pid, file, hdr->ph_line_num,
+                       fn, len, buf);
        }
        return;
 }
 
-int trace_max_debug_mb(void)
+int cfs_trace_max_debug_mb(void)
 {
-       int  total_mb = (num_physpages >> (20 - CFS_PAGE_SHIFT));
-       
+       int  total_mb = (cfs_num_physpages >> (20 - PAGE_SHIFT));
+
        return MAX(512, (total_mb * 80)/100);
 }
index bdefc31..b910074 100644 (file)
  * three types of trace_data in linux
  */
 typedef enum {
-       TCD_TYPE_PROC = 0,
-       TCD_TYPE_SOFTIRQ,
-       TCD_TYPE_IRQ,
-       TCD_TYPE_MAX
-} trace_buf_type_t;
+        CFS_TCD_TYPE_PROC = 0,
+        CFS_TCD_TYPE_SOFTIRQ,
+        CFS_TCD_TYPE_IRQ,
+        CFS_TCD_TYPE_MAX
+} cfs_trace_buf_type_t;
 
 #endif
index c8ca37b..3bc1118 100644 (file)
@@ -50,7 +50,7 @@
 
 #if !KLWT_SUPPORT
 int         lwt_enabled;
-lwt_cpu_t   lwt_cpus[NR_CPUS];
+lwt_cpu_t   lwt_cpus[CFS_NR_CPUS];
 #endif
 
 int         lwt_pages_per_cpu;
@@ -63,7 +63,7 @@ lwt_lookup_string (int *size, char *knl_ptr,
                    char *user_ptr, int user_size)
 {
         int   maxsize = 128;
-        
+
         /* knl_ptr was retrieved from an LWT snapshot and the caller wants to
          * turn it into a string.  NB we can crash with an access violation
          * trying to determine the string length, so we're trusting our
@@ -77,17 +77,17 @@ lwt_lookup_string (int *size, char *knl_ptr,
                 maxsize = user_size;
 
         *size = strnlen (knl_ptr, maxsize - 1) + 1;
-        
+
         if (user_ptr != NULL) {
                 if (user_size < 4)
                         return (-EINVAL);
-                
-                if (copy_to_user (user_ptr, knl_ptr, *size))
+
+                if (cfs_copy_to_user (user_ptr, knl_ptr, *size))
                         return (-EFAULT);
 
                 /* Did I truncate the string?  */
                 if (knl_ptr[*size - 1] != 0)
-                        copy_to_user (user_ptr + *size - 4, "...", 4);
+                        cfs_copy_to_user (user_ptr + *size - 4, "...", 4);
         }
 
         return (0);
@@ -106,12 +106,12 @@ lwt_control (int enable, int clear)
         if (!enable) {
                 LWT_EVENT(0,0,0,0);
                 lwt_enabled = 0;
-                mb();
+                cfs_mb();
                 /* give people some time to stop adding traces */
-                schedule_timeout(10);
+                cfs_schedule_timeout(10);
         }
 
-        for (i = 0; i < num_online_cpus(); i++) {
+        for (i = 0; i < cfs_num_online_cpus(); i++) {
                 p = lwt_cpus[i].lwtc_current_page;
 
                 if (p == NULL)
@@ -123,14 +123,14 @@ lwt_control (int enable, int clear)
                 for (j = 0; j < lwt_pages_per_cpu; j++) {
                         memset (p->lwtp_events, 0, CFS_PAGE_SIZE);
 
-                        p = list_entry (p->lwtp_list.next,
-                                        lwt_page_t, lwtp_list);
+                        p = cfs_list_entry (p->lwtp_list.next,
+                                            lwt_page_t, lwtp_list);
                 }
         }
 
         if (enable) {
                 lwt_enabled = 1;
-                mb();
+                cfs_mb();
                 LWT_EVENT(0,0,0,0);
         }
 
@@ -138,7 +138,7 @@ lwt_control (int enable, int clear)
 }
 
 int
-lwt_snapshot (cycles_t *now, int *ncpu, int *total_size, 
+lwt_snapshot (cfs_cycles_t *now, int *ncpu, int *total_size,
               void *user_ptr, int user_size)
 {
         const int    events_per_page = CFS_PAGE_SIZE / sizeof(lwt_event_t);
@@ -150,28 +150,28 @@ lwt_snapshot (cycles_t *now, int *ncpu, int *total_size,
         if (!cfs_capable(CFS_CAP_SYS_ADMIN))
                 return (-EPERM);
 
-        *ncpu = num_online_cpus();
-        *total_size = num_online_cpus() * lwt_pages_per_cpu * bytes_per_page;
+        *ncpu = cfs_num_online_cpus();
+        *total_size = cfs_num_online_cpus() * lwt_pages_per_cpu *
+                bytes_per_page;
         *now = get_cycles();
-        
+
         if (user_ptr == NULL)
                 return (0);
 
-        for (i = 0; i < num_online_cpus(); i++) {
+        for (i = 0; i < cfs_num_online_cpus(); i++) {
                 p = lwt_cpus[i].lwtc_current_page;
 
                 if (p == NULL)
                         return (-ENODATA);
-                
+
                 for (j = 0; j < lwt_pages_per_cpu; j++) {
-                        if (copy_to_user(user_ptr, p->lwtp_events,
-                                         bytes_per_page))
+                        if (cfs_copy_to_user(user_ptr, p->lwtp_events,
+                                             bytes_per_page))
                                 return (-EFAULT);
 
                         user_ptr = ((char *)user_ptr) + bytes_per_page;
-                        p = list_entry(p->lwtp_list.next,
-                                       lwt_page_t, lwtp_list);
-                        
+                        p = cfs_list_entry(p->lwtp_list.next,
+                                           lwt_page_t, lwtp_list);
                 }
         }
 
@@ -179,22 +179,23 @@ lwt_snapshot (cycles_t *now, int *ncpu, int *total_size,
 }
 
 int
-lwt_init () 
+lwt_init ()
 {
        int     i;
         int     j;
 
-        for (i = 0; i < num_online_cpus(); i++)
+        for (i = 0; i < cfs_num_online_cpus(); i++)
                 if (lwt_cpus[i].lwtc_current_page != NULL)
                         return (-EALREADY);
-        
+
         LASSERT (!lwt_enabled);
 
        /* NULL pointers, zero scalars */
        memset (lwt_cpus, 0, sizeof (lwt_cpus));
-        lwt_pages_per_cpu = LWT_MEMORY / (num_online_cpus() * CFS_PAGE_SIZE);
+        lwt_pages_per_cpu =
+                LWT_MEMORY / (cfs_num_online_cpus() * CFS_PAGE_SIZE);
 
-       for (i = 0; i < num_online_cpus(); i++)
+       for (i = 0; i < cfs_num_online_cpus(); i++)
                for (j = 0; j < lwt_pages_per_cpu; j++) {
                        struct page *page = alloc_page (GFP_KERNEL);
                        lwt_page_t  *lwtp;
@@ -218,16 +219,16 @@ lwt_init ()
                        memset (lwtp->lwtp_events, 0, CFS_PAGE_SIZE);
 
                        if (j == 0) {
-                               INIT_LIST_HEAD (&lwtp->lwtp_list);
+                               CFS_INIT_LIST_HEAD (&lwtp->lwtp_list);
                                lwt_cpus[i].lwtc_current_page = lwtp;
                        } else {
-                               list_add (&lwtp->lwtp_list,
+                               cfs_list_add (&lwtp->lwtp_list,
                                    &lwt_cpus[i].lwtc_current_page->lwtp_list);
                        }
                 }
 
         lwt_enabled = 1;
-        mb();
+        cfs_mb();
 
         LWT_EVENT(0,0,0,0);
 
@@ -235,24 +236,24 @@ lwt_init ()
 }
 
 void
-lwt_fini () 
+lwt_fini ()
 {
         int    i;
 
         lwt_control(0, 0);
-        
-        for (i = 0; i < num_online_cpus(); i++)
+
+        for (i = 0; i < cfs_num_online_cpus(); i++)
                 while (lwt_cpus[i].lwtc_current_page != NULL) {
                         lwt_page_t *lwtp = lwt_cpus[i].lwtc_current_page;
-                        
-                        if (list_empty (&lwtp->lwtp_list)) {
+
+                        if (cfs_list_empty (&lwtp->lwtp_list)) {
                                 lwt_cpus[i].lwtc_current_page = NULL;
                         } else {
                                 lwt_cpus[i].lwtc_current_page =
-                                        list_entry (lwtp->lwtp_list.next,
-                                                    lwt_page_t, lwtp_list);
+                                        cfs_list_entry (lwtp->lwtp_list.next,
+                                                        lwt_page_t, lwtp_list);
 
-                                list_del (&lwtp->lwtp_list);
+                                cfs_list_del (&lwtp->lwtp_list);
                         }
                         
                         __free_page (lwtp->lwtp_page);
index a5a8071..f1e7a94 100644 (file)
@@ -187,19 +187,19 @@ static int libcfs_psdev_release(unsigned long flags, void *args)
         RETURN(0);
 }
 
-static struct rw_semaphore ioctl_list_sem;
-static struct list_head ioctl_list;
+static cfs_rw_semaphore_t ioctl_list_sem;
+static cfs_list_t ioctl_list;
 
 int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand)
 {
         int rc = 0;
 
-        down_write(&ioctl_list_sem);
-        if (!list_empty(&hand->item))
+        cfs_down_write(&ioctl_list_sem);
+        if (!cfs_list_empty(&hand->item))
                 rc = -EBUSY;
         else
-                list_add_tail(&hand->item, &ioctl_list);
-        up_write(&ioctl_list_sem);
+                cfs_list_add_tail(&hand->item, &ioctl_list);
+        cfs_up_write(&ioctl_list_sem);
 
         return rc;
 }
@@ -209,12 +209,12 @@ int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand)
 {
         int rc = 0;
 
-        down_write(&ioctl_list_sem);
-        if (list_empty(&hand->item))
+        cfs_down_write(&ioctl_list_sem);
+        if (cfs_list_empty(&hand->item))
                 rc = -ENOENT;
         else
-                list_del_init(&hand->item);
-        up_write(&ioctl_list_sem);
+                cfs_list_del_init(&hand->item);
+        cfs_up_write(&ioctl_list_sem);
 
         return rc;
 }
@@ -247,9 +247,9 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile,unsigned long cmd,
                 break;
 
         case IOC_LIBCFS_LWT_SNAPSHOT: {
-                cycles_t   now;
-                int        ncpu;
-                int        total_size;
+                cfs_cycles_t   now;
+                int            ncpu;
+                int            total_size;
 
                 err = lwt_snapshot (&now, &ncpu, &total_size,
                                     data->ioc_pbuf1, data->ioc_plen1);
@@ -309,7 +309,7 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile,unsigned long cmd,
         default: {
                 struct libcfs_ioctl_handler *hand;
                 err = -EINVAL;
-                down_read(&ioctl_list_sem);
+                cfs_down_read(&ioctl_list_sem);
                 cfs_list_for_each_entry_typed(hand, &ioctl_list,
                         struct libcfs_ioctl_handler, item) {
                         err = hand->handle_ioctl(cmd, data);
@@ -320,7 +320,7 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile,unsigned long cmd,
                                 break;
                         }
                 }
-                up_read(&ioctl_list_sem);
+                cfs_up_read(&ioctl_list_sem);
                 break;
         }
         }
@@ -369,8 +369,8 @@ MODULE_DESCRIPTION("Portals v3.1");
 MODULE_LICENSE("GPL");
 
 extern cfs_psdev_t libcfs_dev;
-extern struct rw_semaphore tracefile_sem;
-extern struct semaphore trace_thread_sem;
+extern cfs_rw_semaphore_t cfs_tracefile_sem;
+extern cfs_semaphore_t cfs_trace_thread_sem;
 
 extern void libcfs_init_nidstrings(void);
 extern int libcfs_arch_init(void);
@@ -382,14 +382,14 @@ static int init_libcfs_module(void)
 
         libcfs_arch_init();
         libcfs_init_nidstrings();
-        init_rwsem(&tracefile_sem);
-        init_mutex(&trace_thread_sem);
-        init_rwsem(&ioctl_list_sem);
+        cfs_init_rwsem(&cfs_tracefile_sem);
+        cfs_init_mutex(&cfs_trace_thread_sem);
+        cfs_init_rwsem(&ioctl_list_sem);
         CFS_INIT_LIST_HEAD(&ioctl_list);
 
         rc = libcfs_debug_init(5 * 1024 * 1024);
         if (rc < 0) {
-                printk(KERN_ERR "LustreError: libcfs_debug_init: %d\n", rc);
+                printk(CFS_KERN_ERR "LustreError: libcfs_debug_init: %d\n", rc);
                 return (rc);
         }
 
@@ -433,7 +433,7 @@ static void exit_libcfs_module(void)
         remove_proc();
 
         CDEBUG(D_MALLOC, "before Portals cleanup: kmem %d\n",
-               atomic_read(&libcfs_kmemory));
+               cfs_atomic_read(&libcfs_kmemory));
 
         rc = cfs_psdev_deregister(&libcfs_dev);
         if (rc)
@@ -443,16 +443,17 @@ static void exit_libcfs_module(void)
         lwt_fini();
 #endif
 
-        if (atomic_read(&libcfs_kmemory) != 0)
+        if (cfs_atomic_read(&libcfs_kmemory) != 0)
                 CERROR("Portals memory leaked: %d bytes\n",
-                       atomic_read(&libcfs_kmemory));
+                       cfs_atomic_read(&libcfs_kmemory));
 
         rc = libcfs_debug_cleanup();
         if (rc)
-                printk(KERN_ERR "LustreError: libcfs_debug_cleanup: %d\n", rc);
+                printk(CFS_KERN_ERR "LustreError: libcfs_debug_cleanup: %d\n",
+                       rc);
 
-        fini_rwsem(&ioctl_list_sem);
-        fini_rwsem(&tracefile_sem);
+        cfs_fini_rwsem(&ioctl_list_sem);
+        cfs_fini_rwsem(&cfs_tracefile_sem);
 
         libcfs_arch_cleanup();
 }
index 3f39930..68c2ebc 100644 (file)
@@ -71,15 +71,15 @@ static char      libcfs_nidstrings[LNET_NIDSTR_COUNT][LNET_NIDSTR_SIZE];
 static int       libcfs_nidstring_idx = 0;
 
 #ifdef __KERNEL__
-static spinlock_t libcfs_nidstring_lock;
+static cfs_spinlock_t libcfs_nidstring_lock;
 
 void libcfs_init_nidstrings (void)
 {
-        spin_lock_init(&libcfs_nidstring_lock);
+        cfs_spin_lock_init(&libcfs_nidstring_lock);
 }
 
-# define NIDSTR_LOCK(f)   spin_lock_irqsave(&libcfs_nidstring_lock, f)
-# define NIDSTR_UNLOCK(f) spin_unlock_irqrestore(&libcfs_nidstring_lock, f)
+# define NIDSTR_LOCK(f)   cfs_spin_lock_irqsave(&libcfs_nidstring_lock, f)
+# define NIDSTR_UNLOCK(f) cfs_spin_unlock_irqrestore(&libcfs_nidstring_lock, f)
 #else
 # define NIDSTR_LOCK(f)   (f=0)                 /* avoid unused var warnings */
 # define NIDSTR_UNLOCK(f) (f=0)
@@ -108,10 +108,10 @@ static int  libcfs_ip_str2addr(const char *str, int nob, __u32 *addr);
 static void libcfs_decnum_addr2str(__u32 addr, char *str);
 static void libcfs_hexnum_addr2str(__u32 addr, char *str);
 static int  libcfs_num_str2addr(const char *str, int nob, __u32 *addr);
-static int  libcfs_ip_parse(char *str, int len, struct list_head *list);
-static int  libcfs_num_parse(char *str, int len, struct list_head *list);
-static int  libcfs_ip_match(__u32 addr, struct list_head *list);
-static int  libcfs_num_match(__u32 addr, struct list_head *list);
+static int  libcfs_ip_parse(char *str, int len, cfs_list_t *list);
+static int  libcfs_num_parse(char *str, int len, cfs_list_t *list);
+static int  libcfs_ip_match(__u32 addr, cfs_list_t *list);
+static int  libcfs_num_match(__u32 addr, cfs_list_t *list);
 
 struct netstrfns {
         int          nf_type;
@@ -120,8 +120,8 @@ struct netstrfns {
         void       (*nf_addr2str)(__u32 addr, char *str);
         int        (*nf_str2addr)(const char *str, int nob, __u32 *addr);
         int        (*nf_parse_addrlist)(char *str, int len,
-                                        struct list_head *list);
-        int        (*nf_match_addr)(__u32 addr, struct list_head *list);
+                                        cfs_list_t *list);
+        int        (*nf_match_addr)(__u32 addr, cfs_list_t *list);
 };
 
 static struct netstrfns  libcfs_netstrfns[] = {
@@ -601,11 +601,11 @@ struct nidrange {
          * Link to list of this structures which is built on nid range
          * list parsing.
          */
-        struct list_head nr_link;
+        cfs_list_t nr_link;
         /**
          * List head for addrrange::ar_link.
          */
-        struct list_head nr_addrranges;
+        cfs_list_t nr_addrranges;
         /**
          * Flag indicating that *@<net> is found.
          */
@@ -627,11 +627,11 @@ struct addrrange {
         /**
          * Link to nidrange::nr_addrranges.
          */
-        struct list_head ar_link;
+        cfs_list_t ar_link;
         /**
          * List head for numaddr_range::nar_link.
          */
-        struct list_head ar_numaddr_ranges;
+        cfs_list_t ar_numaddr_ranges;
 };
 
 /**
@@ -641,11 +641,11 @@ struct numaddr_range {
         /**
          * Link to addrrange::ar_numaddr_ranges.
          */
-        struct list_head nar_link;
+        cfs_list_t nar_link;
         /**
          * List head for range_expr::re_link.
          */
-        struct list_head nar_range_exprs;
+        cfs_list_t nar_range_exprs;
 };
 
 /**
@@ -655,7 +655,7 @@ struct range_expr {
         /**
          * Link to numaddr_range::nar_range_exprs.
          */
-        struct list_head re_link;
+        cfs_list_t re_link;
         __u32 re_lo;
         __u32 re_hi;
         __u32 re_stride;
@@ -830,7 +830,7 @@ failed:
  * \retval 0 otherwise
  */
 static int
-parse_expr_list(struct lstr *str, struct list_head *list,
+parse_expr_list(struct lstr *str, cfs_list_t *list,
                 unsigned min, unsigned max)
 {
         struct lstr res;
@@ -847,7 +847,7 @@ parse_expr_list(struct lstr *str, struct list_head *list,
                 range = parse_range_expr(&res, min, max);
                 if (range == NULL)
                         return 0;
-                list_add_tail(&range->re_link, list);
+                cfs_list_add_tail(&range->re_link, list);
         }
         return 1;
 }
@@ -860,7 +860,7 @@ parse_expr_list(struct lstr *str, struct list_head *list,
  */
 static int
 num_parse(char *str, int len,
-          struct list_head *list, unsigned min, unsigned max)
+          cfs_list_t *list, unsigned min, unsigned max)
 {
         __u32 num;
         struct lstr src;
@@ -872,7 +872,7 @@ num_parse(char *str, int len,
         LIBCFS_ALLOC(numaddr, sizeof(struct numaddr_range));
         if (numaddr == NULL)
                 return 0;
-        list_add_tail(&numaddr->nar_link, list);
+        cfs_list_add_tail(&numaddr->nar_link, list);
         CFS_INIT_LIST_HEAD(&numaddr->nar_range_exprs);
 
         if (libcfs_str2num_check(src.ls_str, src.ls_len, &num, min, max)) {
@@ -885,7 +885,7 @@ num_parse(char *str, int len,
 
                 expr->re_lo = expr->re_hi = num;
                 expr->re_stride = 1;
-                list_add_tail(&expr->re_link, &numaddr->nar_range_exprs);
+                cfs_list_add_tail(&expr->re_link, &numaddr->nar_range_exprs);
                 return 1;
         }
 
@@ -901,7 +901,7 @@ num_parse(char *str, int len,
  * \retval 0 otherwise
  */
 static int
-libcfs_num_parse(char *str, int len, struct list_head *list)
+libcfs_num_parse(char *str, int len, cfs_list_t *list)
 {
         return num_parse(str, len, list, 0, MAX_NUMERIC_VALUE);
 }
@@ -916,7 +916,7 @@ libcfs_num_parse(char *str, int len, struct list_head *list)
  */
 static int
 libcfs_ip_parse(char *str, int len,
-                struct list_head *list)
+                cfs_list_t *list)
 {
         struct lstr src, res;
         int i;
@@ -957,7 +957,7 @@ parse_addrange(const struct lstr *src, struct nidrange *nidrange)
         LIBCFS_ALLOC(addrrange, sizeof(struct addrrange));
         if (addrrange == NULL)
                 return 0;
-        list_add_tail(&addrrange->ar_link, &nidrange->nr_addrranges);
+        cfs_list_add_tail(&addrrange->ar_link, &nidrange->nr_addrranges);
         CFS_INIT_LIST_HEAD(&addrrange->ar_numaddr_ranges);
 
         return nidrange->nr_netstrfns->nf_parse_addrlist(src->ls_str,
@@ -977,7 +977,7 @@ parse_addrange(const struct lstr *src, struct nidrange *nidrange)
  */
 static struct nidrange *
 add_nidrange(const struct lstr *src,
-             struct list_head *nidlist)
+             cfs_list_t *nidlist)
 {
         struct netstrfns *nf;
         struct nidrange *nr;
@@ -1004,7 +1004,7 @@ add_nidrange(const struct lstr *src,
                         return NULL;
         }
 
-        list_for_each_entry(nr, nidlist, nr_link) {
+        cfs_list_for_each_entry(nr, nidlist, nr_link) {
                 if (nr->nr_netstrfns != nf)
                         continue;
                 if (nr->nr_netnum != netnum)
@@ -1015,7 +1015,7 @@ add_nidrange(const struct lstr *src,
         LIBCFS_ALLOC(nr, sizeof(struct nidrange));
         if (nr == NULL)
                 return NULL;
-        list_add_tail(&nr->nr_link, nidlist);
+        cfs_list_add_tail(&nr->nr_link, nidlist);
         CFS_INIT_LIST_HEAD(&nr->nr_addrranges);
         nr->nr_netstrfns = nf;
         nr->nr_all = 0;
@@ -1031,7 +1031,7 @@ add_nidrange(const struct lstr *src,
  * \retval 0 otherwise
  */
 static int
-parse_nidrange(struct lstr *src, struct list_head *nidlist)
+parse_nidrange(struct lstr *src, cfs_list_t *nidlist)
 {
         struct lstr addrrange, net, tmp;
         struct nidrange *nr;
@@ -1062,13 +1062,13 @@ parse_nidrange(struct lstr *src, struct list_head *nidlist)
  * \retval none
  */
 static void
-free_range_exprs(struct list_head *list)
+free_range_exprs(cfs_list_t *list)
 {
-        struct list_head *pos, *next;
+        cfs_list_t *pos, *next;
 
-        list_for_each_safe(pos, next, list) {
-                list_del(pos);
-                LIBCFS_FREE(list_entry(pos, struct range_expr, re_link),
+        cfs_list_for_each_safe(pos, next, list) {
+                cfs_list_del(pos);
+                LIBCFS_FREE(cfs_list_entry(pos, struct range_expr, re_link),
                             sizeof(struct range_expr));
         }
 }
@@ -1082,15 +1082,15 @@ free_range_exprs(struct list_head *list)
  * \retval none
  */
 static void
-free_numaddr_ranges(struct list_head *list)
+free_numaddr_ranges(cfs_list_t *list)
 {
-        struct list_head *pos, *next;
+        cfs_list_t *pos, *next;
         struct numaddr_range *numaddr;
 
-        list_for_each_safe(pos, next, list) {
-                numaddr = list_entry(pos, struct numaddr_range, nar_link);
+        cfs_list_for_each_safe(pos, next, list) {
+                numaddr = cfs_list_entry(pos, struct numaddr_range, nar_link);
                 free_range_exprs(&numaddr->nar_range_exprs);
-                list_del(pos);
+                cfs_list_del(pos);
                 LIBCFS_FREE(numaddr, sizeof(struct numaddr_range));
         }
 }
@@ -1104,15 +1104,15 @@ free_numaddr_ranges(struct list_head *list)
  * \retval none
  */
 static void
-free_addrranges(struct list_head *list)
+free_addrranges(cfs_list_t *list)
 {
-        struct list_head *pos, *next;
+        cfs_list_t *pos, *next;
         struct addrrange *ar;
 
-        list_for_each_safe(pos, next, list) {
-                ar = list_entry(pos, struct addrrange, ar_link);
+        cfs_list_for_each_safe(pos, next, list) {
+                ar = cfs_list_entry(pos, struct addrrange, ar_link);
                 free_numaddr_ranges(&ar->ar_numaddr_ranges);
-                list_del(pos);
+                cfs_list_del(pos);
                 LIBCFS_FREE(ar, sizeof(struct addrrange));
         }
 }
@@ -1126,15 +1126,15 @@ free_addrranges(struct list_head *list)
  * \retval none
  */
 void
-cfs_free_nidlist(struct list_head *list)
+cfs_free_nidlist(cfs_list_t *list)
 {
-        struct list_head *pos, *next;
+        cfs_list_t *pos, *next;
         struct nidrange *nr;
 
-        list_for_each_safe(pos, next, list) {
-                nr = list_entry(pos, struct nidrange, nr_link);
+        cfs_list_for_each_safe(pos, next, list) {
+                nr = cfs_list_entry(pos, struct nidrange, nr_link);
                 free_addrranges(&nr->nr_addrranges);
-                list_del(pos);
+                cfs_list_del(pos);
                 LIBCFS_FREE(nr, sizeof(struct nidrange));
         }
 }
@@ -1153,7 +1153,7 @@ cfs_free_nidlist(struct list_head *list)
  * \retval 0 otherwise
  */
 int
-cfs_parse_nidlist(char *str, int len, struct list_head *nidlist)
+cfs_parse_nidlist(char *str, int len, cfs_list_t *nidlist)
 {
         struct lstr src, res;
         int rc;
@@ -1186,18 +1186,19 @@ cfs_parse_nidlist(char *str, int len, struct list_head *nidlist)
  * \retval 0 otherwise
  */
 static int
-match_numaddr(__u32 addr, struct list_head *list, int shift, __u32 mask)
+match_numaddr(__u32 addr, cfs_list_t *list, int shift, __u32 mask)
 {
         struct numaddr_range *numaddr;
         struct range_expr *expr;
         int ip, ok;
         ENTRY;
 
-        list_for_each_entry(numaddr, list, nar_link) {
+        cfs_list_for_each_entry(numaddr, list, nar_link) {
                 ip = (addr >> shift) & mask;
                 shift -= 8;
                 ok = 0;
-                list_for_each_entry(expr, &numaddr->nar_range_exprs, re_link) {
+                cfs_list_for_each_entry(expr, &numaddr->nar_range_exprs,
+                                        re_link) {
                         if (ip >= expr->re_lo &&
                             ip <= expr->re_hi &&
                             ((ip - expr->re_lo) % expr->re_stride) == 0) {
@@ -1218,7 +1219,7 @@ match_numaddr(__u32 addr, struct list_head *list, int shift, __u32 mask)
  * \retval 0 otherwise
  */
 static int
-libcfs_num_match(__u32 addr, struct list_head *numaddr)
+libcfs_num_match(__u32 addr, cfs_list_t *numaddr)
 {
         return match_numaddr(addr, numaddr, 0, 0xffffffff);
 }
@@ -1230,7 +1231,7 @@ libcfs_num_match(__u32 addr, struct list_head *numaddr)
  * \retval 0 otherwise
  */
 static int
-libcfs_ip_match(__u32 addr, struct list_head *numaddr)
+libcfs_ip_match(__u32 addr, cfs_list_t *numaddr)
 {
         return match_numaddr(addr, numaddr, 24, 0xff);
 }
@@ -1243,20 +1244,20 @@ libcfs_ip_match(__u32 addr, struct list_head *numaddr)
  * \retval 1 on match
  * \retval 0  otherwises
  */
-int cfs_match_nid(lnet_nid_t nid, struct list_head *nidlist)
+int cfs_match_nid(lnet_nid_t nid, cfs_list_t *nidlist)
 {
         struct nidrange *nr;
         struct addrrange *ar;
         ENTRY;
 
-        list_for_each_entry(nr, nidlist, nr_link) {
+        cfs_list_for_each_entry(nr, nidlist, nr_link) {
                 if (nr->nr_netstrfns->nf_type != LNET_NETTYP(LNET_NIDNET(nid)))
                         continue;
                 if (nr->nr_netnum != LNET_NETNUM(LNET_NIDNET(nid)))
                         continue;
                 if (nr->nr_all)
                         RETURN(1);
-                list_for_each_entry(ar, &nr->nr_addrranges, ar_link)
+                cfs_list_for_each_entry(ar, &nr->nr_addrranges, ar_link)
                         if (nr->nr_netstrfns->nf_match_addr(LNET_NIDADDR(nid),
                                                        &ar->ar_numaddr_ranges))
                                 RETURN(1);
index 6a8a4f2..6ca0bb2 100644 (file)
@@ -65,8 +65,8 @@ static char source_nid[sizeof(tmp_utsname->nodename)];
 #endif /* HAVE_CATAMOUNT_DATA_H */
 
 static int source_pid;
-int smp_processor_id = 1;
-char debug_file_path[1024];
+int cfs_smp_processor_id = 1;
+char libcfs_debug_file_path[1024];
 FILE *debug_file_fd;
 
 int portals_do_debug_dumplog(void *arg)
@@ -176,15 +176,17 @@ int libcfs_debug_init(unsigned long bufsize)
 
         debug_filename = getenv("LIBLUSTRE_DEBUG_BASE");
         if (debug_filename)
-                strncpy(debug_file_path,debug_filename,sizeof(debug_file_path));
+                strncpy(libcfs_debug_file_path, debug_filename,
+                        sizeof(libcfs_debug_file_path));
 
         debug_filename = getenv("LIBLUSTRE_DEBUG_FILE");
         if (debug_filename)
                 strncpy(debug_file_name,debug_filename,sizeof(debug_file_name));
 
-        if (debug_file_name[0] == '\0' && debug_file_path[0] != '\0')
+        if (debug_file_name[0] == '\0' && libcfs_debug_file_path[0] != '\0')
                 snprintf(debug_file_name, sizeof(debug_file_name) - 1,
-                         "%s-%s-"CFS_TIME_T".log", debug_file_path, source_nid, time(0));
+                         "%s-%s-"CFS_TIME_T".log", libcfs_debug_file_path,
+                         source_nid, time(0));
 
         if (strcmp(debug_file_name, "stdout") == 0 ||
             strcmp(debug_file_name, "-") == 0) {
index 4da6f81..5a29dfa 100644 (file)
 #include <libcfs/libcfs.h>
 
 /* XXX move things up to the top, comment */
-union trace_data_union (*trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned;
+union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[CFS_NR_CPUS] __cacheline_aligned;
 
-char tracefile[TRACEFILE_NAME_SIZE];
-long long tracefile_size = TRACEFILE_SIZE;
+char cfs_tracefile[TRACEFILE_NAME_SIZE];
+long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
 static struct tracefiled_ctl trace_tctl;
-struct semaphore trace_thread_sem;
+cfs_semaphore_t cfs_trace_thread_sem;
 static int thread_running = 0;
 
-atomic_t tage_allocated = ATOMIC_INIT(0);
+cfs_atomic_t cfs_tage_allocated = CFS_ATOMIC_INIT(0);
 
 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
-                                         struct trace_cpu_data *tcd);
+                                         struct cfs_trace_cpu_data *tcd);
 
-static inline struct trace_page *tage_from_list(struct list_head *list)
+static inline struct cfs_trace_page *
+cfs_tage_from_list(cfs_list_t *list)
 {
-        return list_entry(list, struct trace_page, linkage);
+        return cfs_list_entry(list, struct cfs_trace_page, linkage);
 }
 
-static struct trace_page *tage_alloc(int gfp)
+static struct cfs_trace_page *cfs_tage_alloc(int gfp)
 {
-        cfs_page_t        *page;
-        struct trace_page *tage;
+        cfs_page_t            *page;
+        struct cfs_trace_page *tage;
 
         /*
          * Don't spam console with allocation failures: they will be reported
@@ -86,30 +87,31 @@ static struct trace_page *tage_alloc(int gfp)
         }
 
         tage->page = page;
-        atomic_inc(&tage_allocated);
+        cfs_atomic_inc(&cfs_tage_allocated);
         return tage;
 }
 
-static void tage_free(struct trace_page *tage)
+static void cfs_tage_free(struct cfs_trace_page *tage)
 {
         __LASSERT(tage != NULL);
         __LASSERT(tage->page != NULL);
 
         cfs_free_page(tage->page);
         cfs_free(tage);
-        atomic_dec(&tage_allocated);
+        cfs_atomic_dec(&cfs_tage_allocated);
 }
 
-static void tage_to_tail(struct trace_page *tage, struct list_head *queue)
+static void cfs_tage_to_tail(struct cfs_trace_page *tage,
+                             cfs_list_t *queue)
 {
         __LASSERT(tage != NULL);
         __LASSERT(queue != NULL);
 
-        list_move_tail(&tage->linkage, queue);
+        cfs_list_move_tail(&tage->linkage, queue);
 }
 
-int trace_refill_stock(struct trace_cpu_data *tcd, int gfp,
-                       struct list_head *stock)
+int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, int gfp,
+                           cfs_list_t *stock)
 {
         int i;
 
@@ -119,39 +121,39 @@ int trace_refill_stock(struct trace_cpu_data *tcd, int gfp,
          */
 
         for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++ i) {
-                struct trace_page *tage;
+                struct cfs_trace_page *tage;
 
-                tage = tage_alloc(gfp);
+                tage = cfs_tage_alloc(gfp);
                 if (tage == NULL)
                         break;
-                list_add_tail(&tage->linkage, stock);
+                cfs_list_add_tail(&tage->linkage, stock);
         }
         return i;
 }
 
 /* return a page that has 'len' bytes left at the end */
-static struct trace_page *trace_get_tage_try(struct trace_cpu_data *tcd,
-                                             unsigned long len)
+static struct cfs_trace_page *
+cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
 {
-        struct trace_page *tage;
+        struct cfs_trace_page *tage;
 
         if (tcd->tcd_cur_pages > 0) {
-                __LASSERT(!list_empty(&tcd->tcd_pages));
-                tage = tage_from_list(tcd->tcd_pages.prev);
+                __LASSERT(!cfs_list_empty(&tcd->tcd_pages));
+                tage = cfs_tage_from_list(tcd->tcd_pages.prev);
                 if (tage->used + len <= CFS_PAGE_SIZE)
                         return tage;
         }
 
         if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
                 if (tcd->tcd_cur_stock_pages > 0) {
-                        tage = tage_from_list(tcd->tcd_stock_pages.prev);
+                        tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
                         -- tcd->tcd_cur_stock_pages;
-                        list_del_init(&tage->linkage);
+                        cfs_list_del_init(&tage->linkage);
                 } else {
-                        tage = tage_alloc(CFS_ALLOC_ATOMIC);
+                        tage = cfs_tage_alloc(CFS_ALLOC_ATOMIC);
                         if (tage == NULL) {
                                 if (printk_ratelimit())
-                                        printk(KERN_WARNING
+                                        printk(CFS_KERN_WARNING
                                                "cannot allocate a tage (%ld)\n",
                                        tcd->tcd_cur_pages);
                                 return NULL;
@@ -159,9 +161,9 @@ static struct trace_page *trace_get_tage_try(struct trace_cpu_data *tcd,
                 }
 
                 tage->used = 0;
-                tage->cpu = smp_processor_id();
+                tage->cpu = cfs_smp_processor_id();
                 tage->type = tcd->tcd_type;
-                list_add_tail(&tage->linkage, &tcd->tcd_pages);
+                cfs_list_add_tail(&tage->linkage, &tcd->tcd_pages);
                 tcd->tcd_cur_pages++;
 
                 if (tcd->tcd_cur_pages > 8 && thread_running) {
@@ -176,12 +178,12 @@ static struct trace_page *trace_get_tage_try(struct trace_cpu_data *tcd,
         return NULL;
 }
 
-static void tcd_shrink(struct trace_cpu_data *tcd)
+static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
 {
         int pgcount = tcd->tcd_cur_pages / 10;
         struct page_collection pc;
-        struct trace_page *tage;
-        struct trace_page *tmp;
+        struct cfs_trace_page *tage;
+        struct cfs_trace_page *tmp;
 
         /*
          * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
@@ -189,29 +191,29 @@ static void tcd_shrink(struct trace_cpu_data *tcd)
          */
 
         if (printk_ratelimit())
-                printk(KERN_WARNING "debug daemon buffer overflowed; "
+                printk(CFS_KERN_WARNING "debug daemon buffer overflowed; "
                        "discarding 10%% of pages (%d of %ld)\n",
                        pgcount + 1, tcd->tcd_cur_pages);
 
         CFS_INIT_LIST_HEAD(&pc.pc_pages);
-        spin_lock_init(&pc.pc_lock);
+        cfs_spin_lock_init(&pc.pc_lock);
 
         cfs_list_for_each_entry_safe_typed(tage, tmp, &tcd->tcd_pages,
-                                           struct trace_page, linkage) {
+                                           struct cfs_trace_page, linkage) {
                 if (pgcount-- == 0)
                         break;
 
-                list_move_tail(&tage->linkage, &pc.pc_pages);
+                cfs_list_move_tail(&tage->linkage, &pc.pc_pages);
                 tcd->tcd_cur_pages--;
         }
         put_pages_on_tcd_daemon_list(&pc, tcd);
 }
 
 /* return a page that has 'len' bytes left at the end */
-static struct trace_page *trace_get_tage(struct trace_cpu_data *tcd,
-                                         unsigned long len)
+static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
+                                                 unsigned long len)
 {
-        struct trace_page *tage;
+        struct cfs_trace_page *tage;
 
         /*
          * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
@@ -219,20 +221,20 @@ static struct trace_page *trace_get_tage(struct trace_cpu_data *tcd,
          */
 
         if (len > CFS_PAGE_SIZE) {
-                printk(KERN_ERR
+                printk(CFS_KERN_ERR
                        "cowardly refusing to write %lu bytes in a page\n", len);
                 return NULL;
         }
 
-        tage = trace_get_tage_try(tcd, len);
+        tage = cfs_trace_get_tage_try(tcd, len);
         if (tage != NULL)
                 return tage;
         if (thread_running)
-                tcd_shrink(tcd);
+                cfs_tcd_shrink(tcd);
         if (tcd->tcd_cur_pages > 0) {
-                tage = tage_from_list(tcd->tcd_pages.next);
+                tage = cfs_tage_from_list(tcd->tcd_pages.next);
                 tage->used = 0;
-                tage_to_tail(tage, &tcd->tcd_pages);
+                cfs_tage_to_tail(tage, &tcd->tcd_pages);
         }
         return tage;
 }
@@ -242,32 +244,32 @@ int libcfs_debug_vmsg2(cfs_debug_limit_state_t *cdls, int subsys, int mask,
                        const char *format1, va_list args,
                        const char *format2, ...)
 {
-        struct trace_cpu_data   *tcd = NULL;
-        struct ptldebug_header   header;
-        struct trace_page       *tage;
+        struct cfs_trace_cpu_data *tcd = NULL;
+        struct ptldebug_header     header;
+        struct cfs_trace_page     *tage;
         /* string_buf is used only if tcd != NULL, and is always set then */
-        char                    *string_buf = NULL;
-        char                    *debug_buf;
-        int                      known_size;
-        int                      needed = 85; /* average message length */
-        int                      max_nob;
-        va_list                  ap;
-        int                      depth;
-        int                      i;
-        int                      remain;
+        char                      *string_buf = NULL;
+        char                      *debug_buf;
+        int                        known_size;
+        int                        needed = 85; /* average message length */
+        int                        max_nob;
+        va_list                    ap;
+        int                        depth;
+        int                        i;
+        int                        remain;
 
         if (strchr(file, '/'))
                 file = strrchr(file, '/') + 1;
 
 
-        set_ptldebug_header(&header, subsys, mask, line, CDEBUG_STACK());
+        cfs_set_ptldebug_header(&header, subsys, mask, line, CDEBUG_STACK());
 
-        tcd = trace_get_tcd();
+        tcd = cfs_trace_get_tcd();
         if (tcd == NULL)                /* arch may not log in IRQ context */
                 goto console;
 
         if (tcd->tcd_shutting_down) {
-                trace_put_tcd(tcd);
+                cfs_trace_put_tcd(tcd);
                 tcd = NULL;
                 goto console;
         }
@@ -286,12 +288,12 @@ int libcfs_debug_vmsg2(cfs_debug_limit_state_t *cdls, int subsys, int mask,
          * if needed is to small for this format.
          */
         for (i = 0; i < 2; i++) {
-                tage = trace_get_tage(tcd, needed + known_size + 1);
+                tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
                 if (tage == NULL) {
                         if (needed + known_size > CFS_PAGE_SIZE)
                                 mask |= D_ERROR;
 
-                        trace_put_tcd(tcd);
+                        cfs_trace_put_tcd(tcd);
                         tcd = NULL;
                         goto console;
                 }
@@ -301,9 +303,10 @@ int libcfs_debug_vmsg2(cfs_debug_limit_state_t *cdls, int subsys, int mask,
 
                 max_nob = CFS_PAGE_SIZE - tage->used - known_size;
                 if (max_nob <= 0) {
-                        printk(KERN_EMERG "negative max_nob: %i\n", max_nob);
+                        printk(CFS_KERN_EMERG "negative max_nob: %i\n",
+                               max_nob);
                         mask |= D_ERROR;
-                        trace_put_tcd(tcd);
+                        cfs_trace_put_tcd(tcd);
                         tcd = NULL;
                         goto console;
                 }
@@ -331,7 +334,7 @@ int libcfs_debug_vmsg2(cfs_debug_limit_state_t *cdls, int subsys, int mask,
         }
 
         if (*(string_buf+needed-1) != '\n')
-                printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n",
+                printk(CFS_KERN_INFO "format at %s:%d:%s doesn't end in newline\n",
                        file, line, fn);
 
         header.ph_len = known_size + needed;
@@ -368,7 +371,7 @@ console:
         if ((mask & libcfs_printk) == 0) {
                 /* no console output requested */
                 if (tcd != NULL)
-                        trace_put_tcd(tcd);
+                        cfs_trace_put_tcd(tcd);
                 return 1;
         }
 
@@ -379,7 +382,7 @@ console:
                         /* skipping a console message */
                         cdls->cdls_count++;
                         if (tcd != NULL)
-                                trace_put_tcd(tcd);
+                                cfs_trace_put_tcd(tcd);
                         return 1;
                 }
 
@@ -402,42 +405,45 @@ console:
         }
 
         if (tcd != NULL) {
-                print_to_console(&header, mask, string_buf, needed, file, fn);
-                trace_put_tcd(tcd);
+                cfs_print_to_console(&header, mask, string_buf, needed, file,
+                                     fn);
+                cfs_trace_put_tcd(tcd);
         } else {
-                string_buf = trace_get_console_buffer();
+                string_buf = cfs_trace_get_console_buffer();
 
                 needed = 0;
                 if (format1 != NULL) {
                         va_copy(ap, args);
-                        needed = vsnprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE, format1, ap);
+                        needed = vsnprintf(string_buf,
+                                           CFS_TRACE_CONSOLE_BUFFER_SIZE,
+                                           format1, ap);
                         va_end(ap);
                 }
                 if (format2 != NULL) {
-                        remain = TRACE_CONSOLE_BUFFER_SIZE - needed;
+                        remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed;
                         if (remain > 0) {
                                 va_start(ap, format2);
                                 needed += vsnprintf(string_buf+needed, remain, format2, ap);
                                 va_end(ap);
                         }
                 }
-                print_to_console(&header, mask,
-                                 string_buf, needed, file, fn);
+                cfs_print_to_console(&header, mask,
+                                     string_buf, needed, file, fn);
 
-                trace_put_console_buffer(string_buf);
+                cfs_trace_put_console_buffer(string_buf);
         }
 
         if (cdls != NULL && cdls->cdls_count != 0) {
-                string_buf = trace_get_console_buffer();
+                string_buf = cfs_trace_get_console_buffer();
 
-                needed = snprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE,
+                needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
                          "Skipped %d previous similar message%s\n",
                          cdls->cdls_count, (cdls->cdls_count > 1) ? "s" : "");
 
-                print_to_console(&header, mask,
+                cfs_print_to_console(&header, mask,
                                  string_buf, needed, file, fn);
 
-                trace_put_console_buffer(string_buf);
+                cfs_trace_put_console_buffer(string_buf);
                 cdls->cdls_count = 0;
         }
 
@@ -457,19 +463,19 @@ libcfs_assertion_failed(const char *expr, const char *file,
 EXPORT_SYMBOL(libcfs_assertion_failed);
 
 void
-trace_assertion_failed(const char *str,
-                       const char *fn, const char *file, int line)
+cfs_trace_assertion_failed(const char *str,
+                           const char *fn, const char *file, int line)
 {
         struct ptldebug_header hdr;
 
         libcfs_panic_in_progress = 1;
         libcfs_catastrophe = 1;
-        mb();
+        cfs_mb();
 
-        set_ptldebug_header(&hdr, DEBUG_SUBSYSTEM, D_EMERG, line,
-                            CDEBUG_STACK());
+        cfs_set_ptldebug_header(&hdr, DEBUG_SUBSYSTEM, D_EMERG, line,
+                                CDEBUG_STACK());
 
-        print_to_console(&hdr, D_EMERG, str, strlen(str), file, fn);
+        cfs_print_to_console(&hdr, D_EMERG, str, strlen(str), file, fn);
 
         LIBCFS_PANIC("Lustre debug assertion failure\n");
 
@@ -482,18 +488,19 @@ panic_collect_pages(struct page_collection *pc)
         /* Do the collect_pages job on a single CPU: assumes that all other
          * CPUs have been stopped during a panic.  If this isn't true for some
          * arch, this will have to be implemented separately in each arch.  */
-        int                    i;
-        int                    j;
-        struct trace_cpu_data *tcd;
+        int                        i;
+        int                        j;
+        struct cfs_trace_cpu_data *tcd;
 
         CFS_INIT_LIST_HEAD(&pc->pc_pages);
 
-        tcd_for_each(tcd, i, j) {
-                list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
+        cfs_tcd_for_each(tcd, i, j) {
+                cfs_list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
                 tcd->tcd_cur_pages = 0;
 
                 if (pc->pc_want_daemon_pages) {
-                        list_splice_init(&tcd->tcd_daemon_pages, &pc->pc_pages);
+                        cfs_list_splice_init(&tcd->tcd_daemon_pages,
+                                             &pc->pc_pages);
                         tcd->tcd_cur_daemon_pages = 0;
                 }
         }
@@ -501,22 +508,22 @@ panic_collect_pages(struct page_collection *pc)
 
 static void collect_pages_on_all_cpus(struct page_collection *pc)
 {
-        struct trace_cpu_data *tcd;
+        struct cfs_trace_cpu_data *tcd;
         int i, cpu;
 
-        spin_lock(&pc->pc_lock);
-        for_each_possible_cpu(cpu) {
-                tcd_for_each_type_lock(tcd, i, cpu) {
-                        list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
+        cfs_spin_lock(&pc->pc_lock);
+        cfs_for_each_possible_cpu(cpu) {
+                cfs_tcd_for_each_type_lock(tcd, i, cpu) {
+                        cfs_list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
                         tcd->tcd_cur_pages = 0;
                         if (pc->pc_want_daemon_pages) {
-                                list_splice_init(&tcd->tcd_daemon_pages,
-                                                 &pc->pc_pages);
+                                cfs_list_splice_init(&tcd->tcd_daemon_pages,
+                                                     &pc->pc_pages);
                                 tcd->tcd_cur_daemon_pages = 0;
                         }
                 }
         }
-        spin_unlock(&pc->pc_lock);
+        cfs_spin_unlock(&pc->pc_lock);
 }
 
 static void collect_pages(struct page_collection *pc)
@@ -531,20 +538,20 @@ static void collect_pages(struct page_collection *pc)
 
 static void put_pages_back_on_all_cpus(struct page_collection *pc)
 {
-        struct trace_cpu_data *tcd;
-        struct list_head *cur_head;
-        struct trace_page *tage;
-        struct trace_page *tmp;
+        struct cfs_trace_cpu_data *tcd;
+        cfs_list_t *cur_head;
+        struct cfs_trace_page *tage;
+        struct cfs_trace_page *tmp;
         int i, cpu;
 
-        spin_lock(&pc->pc_lock);
-        for_each_possible_cpu(cpu) {
-                tcd_for_each_type_lock(tcd, i, cpu) {
+        cfs_spin_lock(&pc->pc_lock);
+        cfs_for_each_possible_cpu(cpu) {
+                cfs_tcd_for_each_type_lock(tcd, i, cpu) {
                         cur_head = tcd->tcd_pages.next;
 
                         cfs_list_for_each_entry_safe_typed(tage, tmp,
                                                            &pc->pc_pages,
-                                                           struct trace_page,
+                                                           struct cfs_trace_page,
                                                            linkage) {
 
                                 __LASSERT_TAGE_INVARIANT(tage);
@@ -552,12 +559,12 @@ static void put_pages_back_on_all_cpus(struct page_collection *pc)
                                 if (tage->cpu != cpu || tage->type != i)
                                         continue;
 
-                                tage_to_tail(tage, cur_head);
+                                cfs_tage_to_tail(tage, cur_head);
                                 tcd->tcd_cur_pages++;
                         }
                 }
         }
-        spin_unlock(&pc->pc_lock);
+        cfs_spin_unlock(&pc->pc_lock);
 }
 
 static void put_pages_back(struct page_collection *pc)
@@ -571,62 +578,62 @@ static void put_pages_back(struct page_collection *pc)
  * if we have been steadily writing (and otherwise discarding) pages via the
  * debug daemon. */
 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
-                                         struct trace_cpu_data *tcd)
+                                         struct cfs_trace_cpu_data *tcd)
 {
-        struct trace_page *tage;
-        struct trace_page *tmp;
+        struct cfs_trace_page *tage;
+        struct cfs_trace_page *tmp;
 
-        spin_lock(&pc->pc_lock);
+        cfs_spin_lock(&pc->pc_lock);
         cfs_list_for_each_entry_safe_typed(tage, tmp, &pc->pc_pages,
-                                           struct trace_page, linkage) {
+                                           struct cfs_trace_page, linkage) {
 
                 __LASSERT_TAGE_INVARIANT(tage);
 
                 if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
                         continue;
 
-                tage_to_tail(tage, &tcd->tcd_daemon_pages);
+                cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
                 tcd->tcd_cur_daemon_pages++;
 
                 if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
-                        struct trace_page *victim;
+                        struct cfs_trace_page *victim;
 
-                        __LASSERT(!list_empty(&tcd->tcd_daemon_pages));
-                        victim = tage_from_list(tcd->tcd_daemon_pages.next);
+                        __LASSERT(!cfs_list_empty(&tcd->tcd_daemon_pages));
+                        victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
 
                         __LASSERT_TAGE_INVARIANT(victim);
 
-                        list_del(&victim->linkage);
-                        tage_free(victim);
+                        cfs_list_del(&victim->linkage);
+                        cfs_tage_free(victim);
                         tcd->tcd_cur_daemon_pages--;
                 }
         }
-        spin_unlock(&pc->pc_lock);
+        cfs_spin_unlock(&pc->pc_lock);
 }
 
 static void put_pages_on_daemon_list(struct page_collection *pc)
 {
-        struct trace_cpu_data *tcd;
+        struct cfs_trace_cpu_data *tcd;
         int i, cpu;
 
-        for_each_possible_cpu(cpu) {
-                tcd_for_each_type_lock(tcd, i, cpu)
+        cfs_for_each_possible_cpu(cpu) {
+                cfs_tcd_for_each_type_lock(tcd, i, cpu)
                         put_pages_on_tcd_daemon_list(pc, tcd);
         }
 }
 
-void trace_debug_print(void)
+void cfs_trace_debug_print(void)
 {
         struct page_collection pc;
-        struct trace_page *tage;
-        struct trace_page *tmp;
+        struct cfs_trace_page *tage;
+        struct cfs_trace_page *tmp;
 
-        spin_lock_init(&pc.pc_lock);
+        cfs_spin_lock_init(&pc.pc_lock);
 
         pc.pc_want_daemon_pages = 1;
         collect_pages(&pc);
         cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
-                                           struct trace_page, linkage) {
+                                           struct cfs_trace_page, linkage) {
                 char *p, *file, *fn;
                 cfs_page_t *page;
 
@@ -645,41 +652,42 @@ void trace_debug_print(void)
                         p += strlen(fn) + 1;
                         len = hdr->ph_len - (int)(p - (char *)hdr);
 
-                        print_to_console(hdr, D_EMERG, p, len, file, fn);
+                        cfs_print_to_console(hdr, D_EMERG, p, len, file, fn);
 
                         p += len;
                 }
 
-                list_del(&tage->linkage);
-                tage_free(tage);
+                cfs_list_del(&tage->linkage);
+                cfs_tage_free(tage);
         }
 }
 
-int tracefile_dump_all_pages(char *filename)
+int cfs_tracefile_dump_all_pages(char *filename)
 {
         struct page_collection pc;
         cfs_file_t *filp;
-        struct trace_page *tage;
-        struct trace_page *tmp;
+        struct cfs_trace_page *tage;
+        struct cfs_trace_page *tmp;
         int rc;
 
         CFS_DECL_MMSPACE;
 
-        tracefile_write_lock();
+        cfs_tracefile_write_lock();
 
         filp = cfs_filp_open(filename,
                              O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600, &rc);
         if (!filp) {
                 if (rc != -EEXIST)
-                        printk(KERN_ERR "LustreError: can't open %s for dump: rc %d\n",
+                        printk(CFS_KERN_ERR
+                               "LustreError: can't open %s for dump: rc %d\n",
                                filename, rc);
                 goto out;
         }
 
-        spin_lock_init(&pc.pc_lock);
+        cfs_spin_lock_init(&pc.pc_lock);
         pc.pc_want_daemon_pages = 1;
         collect_pages(&pc);
-        if (list_empty(&pc.pc_pages)) {
+        if (cfs_list_empty(&pc.pc_pages)) {
                 rc = 0;
                 goto close;
         }
@@ -688,62 +696,62 @@ int tracefile_dump_all_pages(char *filename)
          * iobufs with the pages and calling generic_direct_IO */
         CFS_MMSPACE_OPEN;
         cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
-                                           struct trace_page, linkage) {
+                                           struct cfs_trace_page, linkage) {
 
                 __LASSERT_TAGE_INVARIANT(tage);
 
                 rc = cfs_filp_write(filp, cfs_page_address(tage->page),
                                     tage->used, cfs_filp_poff(filp));
                 if (rc != (int)tage->used) {
-                        printk(KERN_WARNING "wanted to write %u but wrote "
+                        printk(CFS_KERN_WARNING "wanted to write %u but wrote "
                                "%d\n", tage->used, rc);
                         put_pages_back(&pc);
-                        __LASSERT(list_empty(&pc.pc_pages));
+                        __LASSERT(cfs_list_empty(&pc.pc_pages));
                         break;
                 }
-                list_del(&tage->linkage);
-                tage_free(tage);
+                cfs_list_del(&tage->linkage);
+                cfs_tage_free(tage);
         }
         CFS_MMSPACE_CLOSE;
         rc = cfs_filp_fsync(filp);
         if (rc)
-                printk(KERN_ERR "sync returns %d\n", rc);
+                printk(CFS_KERN_ERR "sync returns %d\n", rc);
  close:
         cfs_filp_close(filp);
  out:
-        tracefile_write_unlock();
+        cfs_tracefile_write_unlock();
         return rc;
 }
 
-void trace_flush_pages(void)
+void cfs_trace_flush_pages(void)
 {
         struct page_collection pc;
-        struct trace_page *tage;
-        struct trace_page *tmp;
+        struct cfs_trace_page *tage;
+        struct cfs_trace_page *tmp;
 
-        spin_lock_init(&pc.pc_lock);
+        cfs_spin_lock_init(&pc.pc_lock);
 
         pc.pc_want_daemon_pages = 1;
         collect_pages(&pc);
         cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
-                                           struct trace_page, linkage) {
+                                           struct cfs_trace_page, linkage) {
 
                 __LASSERT_TAGE_INVARIANT(tage);
 
-                list_del(&tage->linkage);
-                tage_free(tage);
+                cfs_list_del(&tage->linkage);
+                cfs_tage_free(tage);
         }
 }
 
-int trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
-                        const char *usr_buffer, int usr_buffer_nob)
+int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
+                            const char *usr_buffer, int usr_buffer_nob)
 {
         int    nob;
 
         if (usr_buffer_nob > knl_buffer_nob)
                 return -EOVERFLOW;
 
-        if (copy_from_user((void *)knl_buffer,
+        if (cfs_copy_from_user((void *)knl_buffer,
                            (void *)usr_buffer, usr_buffer_nob))
                 return -EFAULT;
 
@@ -762,8 +770,8 @@ int trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
         return 0;
 }
 
-int trace_copyout_string(char *usr_buffer, int usr_buffer_nob,
-                         const char *knl_buffer, char *append)
+int cfs_trace_copyout_string(char *usr_buffer, int usr_buffer_nob,
+                             const char *knl_buffer, char *append)
 {
         /* NB if 'append' != NULL, it's a single character to append to the
          * copied out string - usually "\n", for /proc entries and "" (i.e. a
@@ -773,11 +781,11 @@ int trace_copyout_string(char *usr_buffer, int usr_buffer_nob,
         if (nob > usr_buffer_nob)
                 nob = usr_buffer_nob;
 
-        if (copy_to_user(usr_buffer, knl_buffer, nob))
+        if (cfs_copy_to_user(usr_buffer, knl_buffer, nob))
                 return -EFAULT;
 
         if (append != NULL && nob < usr_buffer_nob) {
-                if (copy_to_user(usr_buffer + nob, append, 1))
+                if (cfs_copy_to_user(usr_buffer + nob, append, 1))
                         return -EFAULT;
 
                 nob++;
@@ -785,9 +793,9 @@ int trace_copyout_string(char *usr_buffer, int usr_buffer_nob,
 
         return nob;
 }
-EXPORT_SYMBOL(trace_copyout_string);
+EXPORT_SYMBOL(cfs_trace_copyout_string);
 
-int trace_allocate_string_buffer(char **str, int nob)
+int cfs_trace_allocate_string_buffer(char **str, int nob)
 {
         if (nob > 2 * CFS_PAGE_SIZE)            /* string must be "sensible" */
                 return -EINVAL;
@@ -799,22 +807,22 @@ int trace_allocate_string_buffer(char **str, int nob)
         return 0;
 }
 
-void trace_free_string_buffer(char *str, int nob)
+void cfs_trace_free_string_buffer(char *str, int nob)
 {
         cfs_free(str);
 }
 
-int trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob)
+int cfs_trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob)
 {
         char         *str;
         int           rc;
 
-        rc = trace_allocate_string_buffer(&str, usr_str_nob + 1);
+        rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
         if (rc != 0)
                 return rc;
 
-        rc = trace_copyin_string(str, usr_str_nob + 1,
-                                 usr_str, usr_str_nob);
+        rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
+                                     usr_str, usr_str_nob);
         if (rc != 0)
                 goto out;
 
@@ -824,124 +832,125 @@ int trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob)
                 goto out;
         }
 #endif
-        rc = tracefile_dump_all_pages(str);
+        rc = cfs_tracefile_dump_all_pages(str);
 out:
-        trace_free_string_buffer(str, usr_str_nob + 1);
+        cfs_trace_free_string_buffer(str, usr_str_nob + 1);
         return rc;
 }
 
-int trace_daemon_command(char *str)
+int cfs_trace_daemon_command(char *str)
 {
         int       rc = 0;
 
-        tracefile_write_lock();
+        cfs_tracefile_write_lock();
 
         if (strcmp(str, "stop") == 0) {
-                tracefile_write_unlock();
-                trace_stop_thread();
-                tracefile_write_lock();
-                memset(tracefile, 0, sizeof(tracefile));
+                cfs_tracefile_write_unlock();
+                cfs_trace_stop_thread();
+                cfs_tracefile_write_lock();
+                memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
 
         } else if (strncmp(str, "size=", 5) == 0) {
-                tracefile_size = simple_strtoul(str + 5, NULL, 0);
-                if (tracefile_size < 10 || tracefile_size > 20480)
-                        tracefile_size = TRACEFILE_SIZE;
+                cfs_tracefile_size = simple_strtoul(str + 5, NULL, 0);
+                if (cfs_tracefile_size < 10 || cfs_tracefile_size > 20480)
+                        cfs_tracefile_size = CFS_TRACEFILE_SIZE;
                 else
-                        tracefile_size <<= 20;
+                        cfs_tracefile_size <<= 20;
 
-        } else if (strlen(str) >= sizeof(tracefile)) {
+        } else if (strlen(str) >= sizeof(cfs_tracefile)) {
                 rc = -ENAMETOOLONG;
 #ifndef __WINNT__
         } else if (str[0] != '/') {
                 rc = -EINVAL;
 #endif
         } else {
-                strcpy(tracefile, str);
+                strcpy(cfs_tracefile, str);
 
-                printk(KERN_INFO "Lustre: debug daemon will attempt to start writing "
-                       "to %s (%lukB max)\n", tracefile,
-                       (long)(tracefile_size >> 10));
+                printk(CFS_KERN_INFO
+                       "Lustre: debug daemon will attempt to start writing "
+                       "to %s (%lukB max)\n", cfs_tracefile,
+                       (long)(cfs_tracefile_size >> 10));
 
-                trace_start_thread();
+                cfs_trace_start_thread();
         }
 
-        tracefile_write_unlock();
+        cfs_tracefile_write_unlock();
         return rc;
 }
 
-int trace_daemon_command_usrstr(void *usr_str, int usr_str_nob)
+int cfs_trace_daemon_command_usrstr(void *usr_str, int usr_str_nob)
 {
         char *str;
         int   rc;
 
-        rc = trace_allocate_string_buffer(&str, usr_str_nob + 1);
+        rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
         if (rc != 0)
                 return rc;
 
-        rc = trace_copyin_string(str, usr_str_nob + 1,
+        rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
                                  usr_str, usr_str_nob);
         if (rc == 0)
-                rc = trace_daemon_command(str);
+                rc = cfs_trace_daemon_command(str);
 
-        trace_free_string_buffer(str, usr_str_nob + 1);
+        cfs_trace_free_string_buffer(str, usr_str_nob + 1);
         return rc;
 }
 
-int trace_set_debug_mb(int mb)
+int cfs_trace_set_debug_mb(int mb)
 {
         int i;
         int j;
         int pages;
-        int limit = trace_max_debug_mb();
-        struct trace_cpu_data *tcd;
+        int limit = cfs_trace_max_debug_mb();
+        struct cfs_trace_cpu_data *tcd;
 
-        if (mb < num_possible_cpus())
+        if (mb < cfs_num_possible_cpus())
                 return -EINVAL;
 
         if (mb > limit) {
-                printk(KERN_ERR "Lustre: Refusing to set debug buffer size to "
-                       "%dMB - limit is %d\n", mb, limit);
+                printk(CFS_KERN_ERR "Lustre: Refusing to set debug buffer size "
+                       "to %dMB - limit is %d\n", mb, limit);
                 return -EINVAL;
         }
 
-        mb /= num_possible_cpus();
+        mb /= cfs_num_possible_cpus();
         pages = mb << (20 - CFS_PAGE_SHIFT);
 
-        tracefile_write_lock();
+        cfs_tracefile_write_lock();
 
-        tcd_for_each(tcd, i, j)
+        cfs_tcd_for_each(tcd, i, j)
                 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
 
-        tracefile_write_unlock();
+        cfs_tracefile_write_unlock();
 
         return 0;
 }
 
-int trace_set_debug_mb_usrstr(void *usr_str, int usr_str_nob)
+int cfs_trace_set_debug_mb_usrstr(void *usr_str, int usr_str_nob)
 {
         char     str[32];
         int      rc;
 
-        rc = trace_copyin_string(str, sizeof(str), usr_str, usr_str_nob);
+        rc = cfs_trace_copyin_string(str, sizeof(str), usr_str, usr_str_nob);
         if (rc < 0)
                 return rc;
 
-        return trace_set_debug_mb(simple_strtoul(str, NULL, 0));
+        return cfs_trace_set_debug_mb(simple_strtoul(str, NULL, 0));
 }
 
-int trace_get_debug_mb(void)
+int cfs_trace_get_debug_mb(void)
 {
         int i;
         int j;
-        struct trace_cpu_data *tcd;
+        struct cfs_trace_cpu_data *tcd;
         int total_pages = 0;
 
-        tracefile_read_lock();
+        cfs_tracefile_read_lock();
 
-        tcd_for_each(tcd, i, j)
+        cfs_tcd_for_each(tcd, i, j)
                 total_pages += tcd->tcd_max_pages;
 
-        tracefile_read_unlock();
+        cfs_tracefile_read_unlock();
 
         return (total_pages >> (20 - CFS_PAGE_SHIFT)) + 1;
 }
@@ -950,8 +959,8 @@ static int tracefiled(void *arg)
 {
         struct page_collection pc;
         struct tracefiled_ctl *tctl = arg;
-        struct trace_page *tage;
-        struct trace_page *tmp;
+        struct cfs_trace_page *tage;
+        struct cfs_trace_page *tmp;
         struct ptldebug_header *hdr;
         cfs_file_t *filp;
         int last_loop = 0;
@@ -963,50 +972,51 @@ static int tracefiled(void *arg)
         /* this is so broken in uml?  what on earth is going on? */
         cfs_daemonize("ktracefiled");
 
-        spin_lock_init(&pc.pc_lock);
-        complete(&tctl->tctl_start);
+        cfs_spin_lock_init(&pc.pc_lock);
+        cfs_complete(&tctl->tctl_start);
 
         while (1) {
                 cfs_waitlink_t __wait;
 
                 pc.pc_want_daemon_pages = 0;
                 collect_pages(&pc);
-                if (list_empty(&pc.pc_pages))
+                if (cfs_list_empty(&pc.pc_pages))
                         goto end_loop;
 
                 filp = NULL;
-                tracefile_read_lock();
-                if (tracefile[0] != 0) {
-                        filp = cfs_filp_open(tracefile,
+                cfs_tracefile_read_lock();
+                if (cfs_tracefile[0] != 0) {
+                        filp = cfs_filp_open(cfs_tracefile,
                                              O_CREAT | O_RDWR | O_LARGEFILE,
                                              0600, &rc);
                         if (!(filp))
-                                printk(KERN_WARNING "couldn't open %s: %d\n",
-                                       tracefile, rc);
+                                printk(CFS_KERN_WARNING "couldn't open %s: "
+                                       "%d\n", cfs_tracefile, rc);
                 }
-                tracefile_read_unlock();
+                cfs_tracefile_read_unlock();
                 if (filp == NULL) {
                         put_pages_on_daemon_list(&pc);
-                        __LASSERT(list_empty(&pc.pc_pages));
+                        __LASSERT(cfs_list_empty(&pc.pc_pages));
                         goto end_loop;
                 }
 
                 CFS_MMSPACE_OPEN;
 
                 /* mark the first header, so we can sort in chunks */
-                tage = tage_from_list(pc.pc_pages.next);
+                tage = cfs_tage_from_list(pc.pc_pages.next);
                 __LASSERT_TAGE_INVARIANT(tage);
 
                 hdr = cfs_page_address(tage->page);
                 hdr->ph_flags |= PH_FLAG_FIRST_RECORD;
 
                 cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
-                                                   struct trace_page, linkage) {
+                                                   struct cfs_trace_page,
+                                                   linkage) {
                         static loff_t f_pos;
 
                         __LASSERT_TAGE_INVARIANT(tage);
 
-                        if (f_pos >= (off_t)tracefile_size)
+                        if (f_pos >= (off_t)cfs_tracefile_size)
                                 f_pos = 0;
                         else if (f_pos > (off_t)cfs_filp_size(filp))
                                 f_pos = cfs_filp_size(filp);
@@ -1014,38 +1024,41 @@ static int tracefiled(void *arg)
                         rc = cfs_filp_write(filp, cfs_page_address(tage->page),
                                             tage->used, &f_pos);
                         if (rc != (int)tage->used) {
-                                printk(KERN_WARNING "wanted to write %u but "
-                                       "wrote %d\n", tage->used, rc);
+                                printk(CFS_KERN_WARNING "wanted to write %u "
+                                       "but wrote %d\n", tage->used, rc);
                                 put_pages_back(&pc);
-                                __LASSERT(list_empty(&pc.pc_pages));
+                                __LASSERT(cfs_list_empty(&pc.pc_pages));
                         }
                 }
                 CFS_MMSPACE_CLOSE;
 
                 cfs_filp_close(filp);
                 put_pages_on_daemon_list(&pc);
-                if (!list_empty(&pc.pc_pages)) {
+                if (!cfs_list_empty(&pc.pc_pages)) {
                         int i;
 
-                        printk(KERN_ALERT "Lustre: trace pages aren't empty\n");
-                        printk(KERN_ERR "total cpus(%d): ", num_possible_cpus());
-                        for (i = 0; i < num_possible_cpus(); i++)
-                                if (cpu_online(i))
-                                        printk(KERN_ERR "%d(on) ", i);
+                        printk(CFS_KERN_ALERT "Lustre: trace pages aren't "
+                               " empty\n");
+                        printk(CFS_KERN_ERR "total cpus(%d): ",
+                               cfs_num_possible_cpus());
+                        for (i = 0; i < cfs_num_possible_cpus(); i++)
+                                if (cfs_cpu_online(i))
+                                        printk(CFS_KERN_ERR "%d(on) ", i);
                                 else
-                                        printk(KERN_ERR "%d(off) ", i);
-                        printk(KERN_ERR "\n");
+                                        printk(CFS_KERN_ERR "%d(off) ", i);
+                        printk(CFS_KERN_ERR "\n");
 
                         i = 0;
-                        list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
-                                                 linkage)
-                                printk(KERN_ERR "page %d belongs to cpu %d\n",
-                                       ++i, tage->cpu);
-                        printk(KERN_ERR "There are %d pages unwritten\n", i);
+                        cfs_list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
+                                                     linkage)
+                                printk(CFS_KERN_ERR "page %d belongs to cpu "
+                                       "%d\n", ++i, tage->cpu);
+                        printk(CFS_KERN_ERR "There are %d pages unwritten\n",
+                               i);
                 }
-                __LASSERT(list_empty(&pc.pc_pages));
+                __LASSERT(cfs_list_empty(&pc.pc_pages));
 end_loop:
-                if (atomic_read(&tctl->tctl_shutdown)) {
+                if (cfs_atomic_read(&tctl->tctl_shutdown)) {
                         if (last_loop == 0) {
                                 last_loop = 1;
                                 continue;
@@ -1055,68 +1068,69 @@ end_loop:
                 }
                 cfs_waitlink_init(&__wait);
                 cfs_waitq_add(&tctl->tctl_waitq, &__wait);
-                set_current_state(TASK_INTERRUPTIBLE);
+                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
                 cfs_waitq_timedwait(&__wait, CFS_TASK_INTERRUPTIBLE,
                                     cfs_time_seconds(1));
                 cfs_waitq_del(&tctl->tctl_waitq, &__wait);
         }
-        complete(&tctl->tctl_stop);
+        cfs_complete(&tctl->tctl_stop);
         return 0;
 }
 
-int trace_start_thread(void)
+int cfs_trace_start_thread(void)
 {
         struct tracefiled_ctl *tctl = &trace_tctl;
         int rc = 0;
 
-        mutex_down(&trace_thread_sem);
+        cfs_mutex_down(&cfs_trace_thread_sem);
         if (thread_running)
                 goto out;
 
-        init_completion(&tctl->tctl_start);
-        init_completion(&tctl->tctl_stop);
+        cfs_init_completion(&tctl->tctl_start);
+        cfs_init_completion(&tctl->tctl_stop);
         cfs_waitq_init(&tctl->tctl_waitq);
-        atomic_set(&tctl->tctl_shutdown, 0);
+        cfs_atomic_set(&tctl->tctl_shutdown, 0);
 
         if (cfs_kernel_thread(tracefiled, tctl, 0) < 0) {
                 rc = -ECHILD;
                 goto out;
         }
 
-        wait_for_completion(&tctl->tctl_start);
+        cfs_wait_for_completion(&tctl->tctl_start);
         thread_running = 1;
 out:
-        mutex_up(&trace_thread_sem);
+        cfs_mutex_up(&cfs_trace_thread_sem);
         return rc;
 }
 
-void trace_stop_thread(void)
+void cfs_trace_stop_thread(void)
 {
         struct tracefiled_ctl *tctl = &trace_tctl;
 
-        mutex_down(&trace_thread_sem);
+        cfs_mutex_down(&cfs_trace_thread_sem);
         if (thread_running) {
-                printk(KERN_INFO "Lustre: shutting down debug daemon thread...\n");
-                atomic_set(&tctl->tctl_shutdown, 1);
-                wait_for_completion(&tctl->tctl_stop);
+                printk(CFS_KERN_INFO
+                       "Lustre: shutting down debug daemon thread...\n");
+                cfs_atomic_set(&tctl->tctl_shutdown, 1);
+                cfs_wait_for_completion(&tctl->tctl_stop);
                 thread_running = 0;
         }
-        mutex_up(&trace_thread_sem);
+        cfs_mutex_up(&cfs_trace_thread_sem);
 }
 
-int tracefile_init(int max_pages)
+int cfs_tracefile_init(int max_pages)
 {
-        struct trace_cpu_data *tcd;
+        struct cfs_trace_cpu_data *tcd;
         int                    i;
         int                    j;
         int                    rc;
         int                    factor;
 
-        rc = tracefile_init_arch();
+        rc = cfs_tracefile_init_arch();
         if (rc != 0)
                 return rc;
 
-        tcd_for_each(tcd, i, j) {
+        cfs_tcd_for_each(tcd, i, j) {
                 /* tcd_pages_factor is initialized int tracefile_init_arch. */
                 factor = tcd->tcd_pages_factor;
                 CFS_INIT_LIST_HEAD(&tcd->tcd_pages);
@@ -1135,23 +1149,23 @@ int tracefile_init(int max_pages)
 
 static void trace_cleanup_on_all_cpus(void)
 {
-        struct trace_cpu_data *tcd;
-        struct trace_page *tage;
-        struct trace_page *tmp;
+        struct cfs_trace_cpu_data *tcd;
+        struct cfs_trace_page *tage;
+        struct cfs_trace_page *tmp;
         int i, cpu;
 
-        for_each_possible_cpu(cpu) {
-                tcd_for_each_type_lock(tcd, i, cpu) {
+        cfs_for_each_possible_cpu(cpu) {
+                cfs_tcd_for_each_type_lock(tcd, i, cpu) {
                         tcd->tcd_shutting_down = 1;
 
                         cfs_list_for_each_entry_safe_typed(tage, tmp,
                                                            &tcd->tcd_pages,
-                                                           struct trace_page,
+                                                           struct cfs_trace_page,
                                                            linkage) {
                                 __LASSERT_TAGE_INVARIANT(tage);
 
-                                list_del(&tage->linkage);
-                                tage_free(tage);
+                                cfs_list_del(&tage->linkage);
+                                cfs_tage_free(tage);
                         }
 
                         tcd->tcd_cur_pages = 0;
@@ -1159,20 +1173,20 @@ static void trace_cleanup_on_all_cpus(void)
         }
 }
 
-static void trace_cleanup(void)
+static void cfs_trace_cleanup(void)
 {
         struct page_collection pc;
 
         CFS_INIT_LIST_HEAD(&pc.pc_pages);
-        spin_lock_init(&pc.pc_lock);
+        cfs_spin_lock_init(&pc.pc_lock);
 
         trace_cleanup_on_all_cpus();
 
-        tracefile_fini_arch();
+        cfs_tracefile_fini_arch();
 }
 
-void tracefile_exit(void)
+void cfs_tracefile_exit(void)
 {
-        trace_stop_thread();
-        trace_cleanup();
+        cfs_trace_stop_thread();
+        cfs_trace_cleanup();
 }
index 6bfdaaf..5794d33 100644 (file)
 /* trace file lock routines */
 
 #define TRACEFILE_NAME_SIZE 1024
-extern char      tracefile[TRACEFILE_NAME_SIZE];
-extern long long tracefile_size;
+extern char      cfs_tracefile[TRACEFILE_NAME_SIZE];
+extern long long cfs_tracefile_size;
 
 extern void libcfs_run_debug_log_upcall(char *file);
 
-int  tracefile_init_arch(void);
-void tracefile_fini_arch(void);
-
-void tracefile_read_lock(void);
-void tracefile_read_unlock(void);
-void tracefile_write_lock(void);
-void tracefile_write_unlock(void);
-
-int tracefile_dump_all_pages(char *filename);
-void trace_debug_print(void);
-void trace_flush_pages(void);
-int trace_start_thread(void);
-void trace_stop_thread(void);
-int tracefile_init(int max_pages);
-void tracefile_exit(void);
-
-
-
-int trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
-                        const char *usr_buffer, int usr_buffer_nob);
-int trace_copyout_string(char *usr_buffer, int usr_buffer_nob,
-                         const char *knl_str, char *append);
-int trace_allocate_string_buffer(char **str, int nob);
-void trace_free_string_buffer(char *str, int nob);
-int trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob);
-int trace_daemon_command(char *str);
-int trace_daemon_command_usrstr(void *usr_str, int usr_str_nob);
-int trace_set_debug_mb(int mb);
-int trace_set_debug_mb_usrstr(void *usr_str, int usr_str_nob);
-int trace_get_debug_mb(void);
+int  cfs_tracefile_init_arch(void);
+void cfs_tracefile_fini_arch(void);
+
+void cfs_tracefile_read_lock(void);
+void cfs_tracefile_read_unlock(void);
+void cfs_tracefile_write_lock(void);
+void cfs_tracefile_write_unlock(void);
+
+int cfs_tracefile_dump_all_pages(char *filename);
+void cfs_trace_debug_print(void);
+void cfs_trace_flush_pages(void);
+int cfs_trace_start_thread(void);
+void cfs_trace_stop_thread(void);
+int cfs_tracefile_init(int max_pages);
+void cfs_tracefile_exit(void);
+
+
+
+int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
+                            const char *usr_buffer, int usr_buffer_nob);
+int cfs_trace_copyout_string(char *usr_buffer, int usr_buffer_nob,
+                             const char *knl_str, char *append);
+int cfs_trace_allocate_string_buffer(char **str, int nob);
+void cfs_trace_free_string_buffer(char *str, int nob);
+int cfs_trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob);
+int cfs_trace_daemon_command(char *str);
+int cfs_trace_daemon_command_usrstr(void *usr_str, int usr_str_nob);
+int cfs_trace_set_debug_mb(int mb);
+int cfs_trace_set_debug_mb_usrstr(void *usr_str, int usr_str_nob);
+int cfs_trace_get_debug_mb(void);
 
 extern void libcfs_debug_dumplog_internal(void *arg);
 extern void libcfs_register_panic_notifier(void);
 extern void libcfs_unregister_panic_notifier(void);
 extern int  libcfs_panic_in_progress;
-extern int  trace_max_debug_mb(void);
+extern int  cfs_trace_max_debug_mb(void);
 
 #define TCD_MAX_PAGES (5 << (20 - CFS_PAGE_SHIFT))
 #define TCD_STOCK_PAGES (TCD_MAX_PAGES)
-#define TRACEFILE_SIZE (500 << 20)
+#define CFS_TRACEFILE_SIZE (500 << 20)
 
 #ifdef LUSTRE_TRACEFILE_PRIVATE
 
@@ -104,14 +104,14 @@ extern int  trace_max_debug_mb(void);
 #define TCD_MAX_PAGES (5 << (20 - CFS_PAGE_SHIFT))
 #define TCD_STOCK_PAGES (TCD_MAX_PAGES)
 
-#define TRACEFILE_SIZE (500 << 20)
+#define CFS_TRACEFILE_SIZE (500 << 20)
 
 /* Size of a buffer for sprinting console messages if we can't get a page
  * from system */
-#define TRACE_CONSOLE_BUFFER_SIZE   1024
+#define CFS_TRACE_CONSOLE_BUFFER_SIZE   1024
 
-union trace_data_union {
-       struct trace_cpu_data {
+union cfs_trace_data_union {
+       struct cfs_trace_cpu_data {
                /*
                 * Even though this structure is meant to be per-CPU, locking
                 * is needed because in some places the data may be accessed
@@ -119,13 +119,13 @@ union trace_data_union {
                 * and trace_put_tcd, which are called in libcfs_debug_vmsg2 and
                 * tcd_for_each_type_lock
                 */
-               spinlock_t              tcd_lock;
+               cfs_spinlock_t          tcd_lock;
                unsigned long           tcd_lock_flags;
 
                /*
                 * pages with trace records not yet processed by tracefiled.
                 */
-               struct list_head        tcd_pages;
+               cfs_list_t              tcd_pages;
                /* number of pages on ->tcd_pages */
                unsigned long           tcd_cur_pages;
 
@@ -139,7 +139,7 @@ union trace_data_union {
                 * (put_pages_on_daemon_list()). LRU pages from this list are
                 * discarded when list grows too large.
                 */
-               struct list_head        tcd_daemon_pages;
+               cfs_list_t              tcd_daemon_pages;
                /* number of pages on ->tcd_daemon_pages */
                unsigned long           tcd_cur_daemon_pages;
 
@@ -173,7 +173,7 @@ union trace_data_union {
                 * TCD_STOCK_PAGES pagesful are consumed by trace records all
                 * emitted in non-blocking contexts. Which is quite unlikely.
                 */
-               struct list_head        tcd_stock_pages;
+               cfs_list_t              tcd_stock_pages;
                /* number of pages on ->tcd_stock_pages */
                unsigned long           tcd_cur_stock_pages;
 
@@ -183,26 +183,27 @@ union trace_data_union {
                /* The factors to share debug memory. */
                unsigned short          tcd_pages_factor;
        } tcd;
-       char __pad[L1_CACHE_ALIGN(sizeof(struct trace_cpu_data))];
+       char __pad[CFS_L1_CACHE_ALIGN(sizeof(struct cfs_trace_cpu_data))];
 };
 
 #define TCD_MAX_TYPES      8
-extern union trace_data_union (*trace_data[TCD_MAX_TYPES])[NR_CPUS];
+extern union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[CFS_NR_CPUS];
 
-#define tcd_for_each(tcd, i, j)                                       \
-    for (i = 0; trace_data[i] != NULL; i++)                           \
-        for (j = 0, ((tcd) = &(*trace_data[i])[j].tcd);               \
-             j < num_possible_cpus(); j++, (tcd) = &(*trace_data[i])[j].tcd)
+#define cfs_tcd_for_each(tcd, i, j)                                       \
+    for (i = 0; cfs_trace_data[i] != NULL; i++)                           \
+        for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd);               \
+             j < cfs_num_possible_cpus();                                 \
+             j++, (tcd) = &(*cfs_trace_data[i])[j].tcd)
 
-#define tcd_for_each_type_lock(tcd, i, cpu)                           \
-    for (i = 0; trace_data[i] &&                                      \
-         (tcd = &(*trace_data[i])[cpu].tcd) &&                        \
-         trace_lock_tcd(tcd); trace_unlock_tcd(tcd), i++)
+#define cfs_tcd_for_each_type_lock(tcd, i, cpu)                           \
+    for (i = 0; cfs_trace_data[i] &&                                      \
+         (tcd = &(*cfs_trace_data[i])[cpu].tcd) &&                        \
+         cfs_trace_lock_tcd(tcd); cfs_trace_unlock_tcd(tcd), i++)
 
 /* XXX nikita: this declaration is internal to tracefile.c and should probably
  * be moved there */
 struct page_collection {
-       struct list_head        pc_pages;
+       cfs_list_t              pc_pages;
        /*
         * spin-lock protecting ->pc_pages. It is taken by smp_call_function()
         * call-back functions. XXX nikita: Which is horrible: all processors
@@ -210,7 +211,7 @@ struct page_collection {
         * lock. Probably ->pc_pages should be replaced with an array of
         * NR_CPUS elements accessed locklessly.
         */
-       spinlock_t              pc_lock;
+       cfs_spinlock_t          pc_lock;
        /*
         * if this flag is set, collect_pages() will spill both
         * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
@@ -222,11 +223,11 @@ struct page_collection {
 /* XXX nikita: this declaration is internal to tracefile.c and should probably
  * be moved there */
 struct tracefiled_ctl {
-       struct completion       tctl_start;
-       struct completion       tctl_stop;
-       cfs_waitq_t             tctl_waitq;
-       pid_t                   tctl_pid;
-       atomic_t                tctl_shutdown;
+       cfs_completion_t       tctl_start;
+       cfs_completion_t       tctl_stop;
+       cfs_waitq_t            tctl_waitq;
+       pid_t                  tctl_pid;
+       cfs_atomic_t           tctl_shutdown;
 };
 
 /*
@@ -234,38 +235,39 @@ struct tracefiled_ctl {
  */
 /* XXX nikita: this declaration is internal to tracefile.c and should probably
  * be moved there */
-struct trace_page {
+struct cfs_trace_page {
        /*
         * page itself
         */
-       cfs_page_t      *page;
+       cfs_page_t          *page;
        /*
         * linkage into one of the lists in trace_data_union or
         * page_collection
         */
-       struct list_head linkage;
+       cfs_list_t           linkage;
        /*
         * number of bytes used within this page
         */
-       unsigned int     used;
+       unsigned int         used;
        /*
         * cpu that owns this page
         */
-       unsigned short   cpu;
+       unsigned short       cpu;
        /*
         * type(context) of this page
         */
-       unsigned short   type;
+       unsigned short       type;
 };
 
-extern void set_ptldebug_header(struct ptldebug_header *header,
-                          int subsys, int mask, const int line,
-                          unsigned long stack);
-extern void print_to_console(struct ptldebug_header *hdr, int mask, const char *buf,
-                            int len, const char *file, const char *fn);
+extern void cfs_set_ptldebug_header(struct ptldebug_header *header,
+                                    int subsys, int mask, const int line,
+                                    unsigned long stack);
+extern void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
+                                 const char *buf, int len, const char *file,
+                                 const char *fn);
 
-extern int trace_lock_tcd(struct trace_cpu_data *tcd);
-extern void trace_unlock_tcd(struct trace_cpu_data *tcd);
+extern int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd);
+extern void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd);
 
 /**
  * trace_buf_type_t, trace_buf_idx_get() and trace_console_buffers[][]
@@ -274,56 +276,58 @@ extern void trace_unlock_tcd(struct trace_cpu_data *tcd);
  * (see, for example, linux-tracefile.h).
  */
 
-extern char *trace_console_buffers[NR_CPUS][TCD_TYPE_MAX];
-extern trace_buf_type_t trace_buf_idx_get(void);
+extern char *cfs_trace_console_buffers[CFS_NR_CPUS][CFS_TCD_TYPE_MAX];
+extern cfs_trace_buf_type_t cfs_trace_buf_idx_get(void);
 
 static inline char *
-trace_get_console_buffer(void)
+cfs_trace_get_console_buffer(void)
 {
-        return trace_console_buffers[cfs_get_cpu()][trace_buf_idx_get()];
+        unsigned int i = cfs_get_cpu();
+        unsigned int j = cfs_trace_buf_idx_get();
+
+        return cfs_trace_console_buffers[i][j];
 }
 
 static inline void
-trace_put_console_buffer(char *buffer)
+cfs_trace_put_console_buffer(char *buffer)
 {
         cfs_put_cpu();
 }
 
-extern union trace_data_union (*trace_data[TCD_MAX_TYPES])[NR_CPUS];
-
-static inline struct trace_cpu_data *
-trace_get_tcd(void)
+static inline struct cfs_trace_cpu_data *
+cfs_trace_get_tcd(void)
 {
-       struct trace_cpu_data *tcd =
-                &(*trace_data[trace_buf_idx_get()])[cfs_get_cpu()].tcd;
+       struct cfs_trace_cpu_data *tcd =
+                &(*cfs_trace_data[cfs_trace_buf_idx_get()])[cfs_get_cpu()].tcd;
 
-       trace_lock_tcd(tcd);
+       cfs_trace_lock_tcd(tcd);
 
        return tcd;
 }
 
 static inline void
-trace_put_tcd (struct trace_cpu_data *tcd)
+cfs_trace_put_tcd (struct cfs_trace_cpu_data *tcd)
 {
-       trace_unlock_tcd(tcd);
+       cfs_trace_unlock_tcd(tcd);
 
        cfs_put_cpu();
 }
 
-int trace_refill_stock(struct trace_cpu_data *tcd, int gfp,
-                      struct list_head *stock);
+int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, int gfp,
+                           cfs_list_t *stock);
 
 
-int tcd_owns_tage(struct trace_cpu_data *tcd, struct trace_page *tage);
+int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
+                      struct cfs_trace_page *tage);
 
-extern void trace_assertion_failed(const char *str, const char *fn,
-                                  const char *file, int line);
+extern void cfs_trace_assertion_failed(const char *str, const char *fn,
+                                       const char *file, int line);
 
 /* ASSERTION that is safe to use within the debug system */
 #define __LASSERT(cond)                                                 \
     do {                                                                \
         if (unlikely(!(cond))) {                                        \
-                trace_assertion_failed("ASSERTION("#cond") failed",     \
+                cfs_trace_assertion_failed("ASSERTION("#cond") failed", \
                                  __FUNCTION__, __FILE__, __LINE__);     \
         }                                                               \
     } while (0)
index 7eb265b..4a266d3 100644 (file)
@@ -40,8 +40,8 @@
 
 #define OFF_BY_START(start)     ((start)/BITS_PER_LONG)
 
-unsigned long find_next_bit(unsigned long *addr,
-                            unsigned long size, unsigned long offset)
+unsigned long cfs_find_next_bit(unsigned long *addr,
+                                unsigned long size, unsigned long offset)
 {
         unsigned long *word, *last;
         unsigned long first_bit, bit, base;
@@ -55,7 +55,7 @@ unsigned long find_next_bit(unsigned long *addr,
                 return size;
         if (first_bit != 0) {
                 int tmp = (*word++) & (~0UL << first_bit);
-                bit = __ffs(tmp);
+                bit = __cfs_ffs(tmp);
                 if (bit < BITS_PER_LONG)
                         goto found;
                 word++;
@@ -63,7 +63,7 @@ unsigned long find_next_bit(unsigned long *addr,
         }
         while (word <= last) {
                 if (*word != 0UL) {
-                        bit = __ffs(*word);
+                        bit = __cfs_ffs(*word);
                         goto found;
                 }
                 word++;
@@ -74,8 +74,8 @@ found:
         return base + bit;
 }
 
-unsigned long find_next_zero_bit(unsigned long *addr,
-                                 unsigned long size, unsigned long offset)
+unsigned long cfs_find_next_zero_bit(unsigned long *addr,
+                                     unsigned long size, unsigned long offset)
 {
         unsigned long *word, *last;
         unsigned long first_bit, bit, base;
@@ -89,7 +89,7 @@ unsigned long find_next_zero_bit(unsigned long *addr,
                 return size;
         if (first_bit != 0) {
                 int tmp = (*word++) & (~0UL << first_bit);
-                bit = __ffz(tmp);
+                bit = __cfs_ffz(tmp);
                 if (bit < BITS_PER_LONG)
                         goto found;
                 word++;
@@ -97,7 +97,7 @@ unsigned long find_next_zero_bit(unsigned long *addr,
         }
         while (word <= last) {
                 if (*word != ~0UL) {
-                        bit = __ffz(*word);
+                        bit = __cfs_ffz(*word);
                         goto found;
                 }
                 word++;
index 0a3471a..ab5cf6e 100644 (file)
  * No-op implementation.
  */
 
-void spin_lock_init(spinlock_t *lock)
+void cfs_spin_lock_init(cfs_spinlock_t *lock)
 {
         LASSERT(lock != NULL);
         (void)lock;
 }
 
-void spin_lock(spinlock_t *lock)
+void cfs_spin_lock(cfs_spinlock_t *lock)
 {
         (void)lock;
 }
 
-void spin_unlock(spinlock_t *lock)
+void cfs_spin_unlock(cfs_spinlock_t *lock)
 {
         (void)lock;
 }
 
-int spin_trylock(spinlock_t *lock)
+int cfs_spin_trylock(cfs_spinlock_t *lock)
 {
         (void)lock;
        return 1;
 }
 
-void spin_lock_bh_init(spinlock_t *lock)
+void cfs_spin_lock_bh_init(cfs_spinlock_t *lock)
 {
         LASSERT(lock != NULL);
         (void)lock;
 }
 
-void spin_lock_bh(spinlock_t *lock)
+void cfs_spin_lock_bh(cfs_spinlock_t *lock)
 {
         LASSERT(lock != NULL);
         (void)lock;
 }
 
-void spin_unlock_bh(spinlock_t *lock)
+void cfs_spin_unlock_bh(cfs_spinlock_t *lock)
 {
         LASSERT(lock != NULL);
         (void)lock;
@@ -119,20 +119,20 @@ void spin_unlock_bh(spinlock_t *lock)
  * - __up(x)
  */
 
-void sema_init(struct semaphore *s, int val)
+void cfs_sema_init(cfs_semaphore_t *s, int val)
 {
         LASSERT(s != NULL);
         (void)s;
         (void)val;
 }
 
-void __down(struct semaphore *s)
+void __down(cfs_semaphore_t *s)
 {
         LASSERT(s != NULL);
         (void)s;
 }
 
-void __up(struct semaphore *s)
+void __up(cfs_semaphore_t *s)
 {
         LASSERT(s != NULL);
         (void)s;
@@ -149,46 +149,46 @@ void __up(struct semaphore *s)
 
 static cfs_wait_handler_t wait_handler;
 
-void init_completion_module(cfs_wait_handler_t handler)
+void cfs_init_completion_module(cfs_wait_handler_t handler)
 {
         wait_handler = handler;
 }
 
-int call_wait_handler(int timeout)
+int cfs_call_wait_handler(int timeout)
 {
         if (!wait_handler)
                 return -ENOSYS;
         return wait_handler(timeout);
 }
 
-void init_completion(struct completion *c)
+void cfs_init_completion(cfs_completion_t *c)
 {
         LASSERT(c != NULL);
         c->done = 0;
         cfs_waitq_init(&c->wait);
 }
 
-void complete(struct completion *c)
+void cfs_complete(cfs_completion_t *c)
 {
         LASSERT(c != NULL);
         c->done  = 1;
         cfs_waitq_signal(&c->wait);
 }
 
-void wait_for_completion(struct completion *c)
+void cfs_wait_for_completion(cfs_completion_t *c)
 {
         LASSERT(c != NULL);
         do {
-                if (call_wait_handler(1000) < 0)
+                if (cfs_call_wait_handler(1000) < 0)
                         break;
         } while (c->done == 0);
 }
 
-int wait_for_completion_interruptible(struct completion *c)
+int cfs_wait_for_completion_interruptible(cfs_completion_t *c)
 {
         LASSERT(c != NULL);
         do {
-                if (call_wait_handler(1000) < 0)
+                if (cfs_call_wait_handler(1000) < 0)
                         break;
         } while (c->done == 0);
         return 0;
@@ -205,51 +205,51 @@ int wait_for_completion_interruptible(struct completion *c)
  * - up_write(x)
  */
 
-void init_rwsem(struct rw_semaphore *s)
+void cfs_init_rwsem(cfs_rw_semaphore_t *s)
 {
         LASSERT(s != NULL);
         (void)s;
 }
 
-void down_read(struct rw_semaphore *s)
+void cfs_down_read(cfs_rw_semaphore_t *s)
 {
         LASSERT(s != NULL);
         (void)s;
 }
 
-int down_read_trylock(struct rw_semaphore *s)
+int cfs_down_read_trylock(cfs_rw_semaphore_t *s)
 {
         LASSERT(s != NULL);
         (void)s;
        return 1;
 }
 
-void down_write(struct rw_semaphore *s)
+void cfs_down_write(cfs_rw_semaphore_t *s)
 {
         LASSERT(s != NULL);
         (void)s;
 }
 
-int down_write_trylock(struct rw_semaphore *s)
+int cfs_down_write_trylock(cfs_rw_semaphore_t *s)
 {
         LASSERT(s != NULL);
         (void)s;
        return 1;
 }
 
-void up_read(struct rw_semaphore *s)
+void cfs_up_read(cfs_rw_semaphore_t *s)
 {
         LASSERT(s != NULL);
         (void)s;
 }
 
-void up_write(struct rw_semaphore *s)
+void cfs_up_write(cfs_rw_semaphore_t *s)
 {
         LASSERT(s != NULL);
         (void)s;
 }
 
-void fini_rwsem(struct rw_semaphore *s)
+void cfs_fini_rwsem(cfs_rw_semaphore_t *s)
 {
         LASSERT(s != NULL);
         (void)s;
@@ -258,10 +258,10 @@ void fini_rwsem(struct rw_semaphore *s)
 #ifdef HAVE_LIBPTHREAD
 
 /*
- * Completion
+ * Multi-threaded user space completion
  */
 
-void cfs_init_completion(struct cfs_completion *c)
+void cfs_mt_init_completion(cfs_mt_completion_t *c)
 {
         LASSERT(c != NULL);
         c->c_done = 0;
@@ -269,14 +269,14 @@ void cfs_init_completion(struct cfs_completion *c)
         pthread_cond_init(&c->c_cond, NULL);
 }
 
-void cfs_fini_completion(struct cfs_completion *c)
+void cfs_mt_fini_completion(cfs_mt_completion_t *c)
 {
         LASSERT(c != NULL);
         pthread_mutex_destroy(&c->c_mut);
         pthread_cond_destroy(&c->c_cond);
 }
 
-void cfs_complete(struct cfs_completion *c)
+void cfs_mt_complete(cfs_mt_completion_t *c)
 {
         LASSERT(c != NULL);
         pthread_mutex_lock(&c->c_mut);
@@ -285,7 +285,7 @@ void cfs_complete(struct cfs_completion *c)
         pthread_mutex_unlock(&c->c_mut);
 }
 
-void cfs_wait_for_completion(struct cfs_completion *c)
+void cfs_mt_wait_for_completion(cfs_mt_completion_t *c)
 {
         LASSERT(c != NULL);
         pthread_mutex_lock(&c->c_mut);
@@ -296,12 +296,12 @@ void cfs_wait_for_completion(struct cfs_completion *c)
 }
 
 /*
- * atomic primitives
+ * Multi-threaded user space atomic primitives
  */
 
 static pthread_mutex_t atomic_guard_lock = PTHREAD_MUTEX_INITIALIZER;
 
-int cfs_atomic_read(cfs_atomic_t *a)
+int cfs_mt_atomic_read(cfs_mt_atomic_t *a)
 {
         int r;
 
@@ -311,14 +311,14 @@ int cfs_atomic_read(cfs_atomic_t *a)
         return r;
 }
 
-void cfs_atomic_set(cfs_atomic_t *a, int b)
+void cfs_mt_atomic_set(cfs_mt_atomic_t *a, int b)
 {
         pthread_mutex_lock(&atomic_guard_lock);
         a->counter = b;
         pthread_mutex_unlock(&atomic_guard_lock);
 }
 
-int cfs_atomic_dec_and_test(cfs_atomic_t *a)
+int cfs_mt_atomic_dec_and_test(cfs_mt_atomic_t *a)
 {
         int r;
 
@@ -328,20 +328,20 @@ int cfs_atomic_dec_and_test(cfs_atomic_t *a)
         return (r == 0);
 }
 
-void cfs_atomic_inc(cfs_atomic_t *a)
+void cfs_mt_atomic_inc(cfs_mt_atomic_t *a)
 {
         pthread_mutex_lock(&atomic_guard_lock);
         ++a->counter;
         pthread_mutex_unlock(&atomic_guard_lock);
 }
 
-void cfs_atomic_dec(cfs_atomic_t *a)
+void cfs_mt_atomic_dec(cfs_mt_atomic_t *a)
 {
         pthread_mutex_lock(&atomic_guard_lock);
         --a->counter;
         pthread_mutex_unlock(&atomic_guard_lock);
 }
-void cfs_atomic_add(int b, cfs_atomic_t *a)
+void cfs_mt_atomic_add(int b, cfs_mt_atomic_t *a)
 
 {
         pthread_mutex_lock(&atomic_guard_lock);
@@ -349,7 +349,7 @@ void cfs_atomic_add(int b, cfs_atomic_t *a)
         pthread_mutex_unlock(&atomic_guard_lock);
 }
 
-void cfs_atomic_sub(int b, cfs_atomic_t *a)
+void cfs_mt_atomic_sub(int b, cfs_mt_atomic_t *a)
 {
         pthread_mutex_lock(&atomic_guard_lock);
         a->counter -= b;
index 03e4bea..44aad40 100644 (file)
@@ -119,7 +119,7 @@ void cfs_waitq_wait(struct cfs_waitlink *link, cfs_task_state_t state)
         (void)link;
 
         /* well, wait for something to happen */
-        call_wait_handler(0);
+        cfs_call_wait_handler(0);
 }
 
 int64_t cfs_waitq_timedwait(struct cfs_waitlink *link, cfs_task_state_t state,
@@ -127,11 +127,11 @@ int64_t cfs_waitq_timedwait(struct cfs_waitlink *link, cfs_task_state_t state,
 {
         LASSERT(link != NULL);
         (void)link;
-        call_wait_handler(timeout);
+        cfs_call_wait_handler(timeout);
         return 0;
 }
 
-void cfs_schedule_timeout(cfs_task_state_t state, int64_t timeout)
+void cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t timeout)
 {
         cfs_waitlink_t    l;
         /* sleep(timeout) here instead? */
index f6de3a0..e904d0e 100644 (file)
 #include "tracefile.h"
 
 struct lc_watchdog {
-        cfs_timer_t       lcw_timer; /* kernel timer */
-        struct list_head  lcw_list;
-        cfs_time_t        lcw_last_touched;
-        cfs_task_t       *lcw_task;
+        cfs_timer_t           lcw_timer; /* kernel timer */
+        cfs_list_t            lcw_list;
+        cfs_time_t            lcw_last_touched;
+        cfs_task_t           *lcw_task;
 
-        void            (*lcw_callback)(pid_t, void *);
-        void             *lcw_data;
+        void                (*lcw_callback)(pid_t, void *);
+        void                 *lcw_data;
 
-        pid_t             lcw_pid;
+        pid_t                 lcw_pid;
 
         enum {
                 LC_WATCHDOG_DISABLED,
@@ -67,8 +67,8 @@ struct lc_watchdog {
  * and lcw_stop_completion when it exits.
  * Wake lcw_event_waitq to signal timer callback dispatches.
  */
-static struct completion lcw_start_completion;
-static struct completion lcw_stop_completion;
+static cfs_completion_t lcw_start_completion;
+static cfs_completion_t  lcw_stop_completion;
 static cfs_waitq_t lcw_event_waitq;
 
 /*
@@ -85,27 +85,28 @@ static unsigned long lcw_flags = 0;
  * When it hits 0, we stop the distpatcher.
  */
 static __u32         lcw_refcount = 0;
-static DECLARE_MUTEX(lcw_refcount_sem);
+static CFS_DECLARE_MUTEX(lcw_refcount_sem);
 
 /*
  * List of timers that have fired that need their callbacks run by the
  * dispatcher.
  */
-static spinlock_t lcw_pending_timers_lock = SPIN_LOCK_UNLOCKED; /* BH lock! */
-static struct list_head lcw_pending_timers = \
+/* BH lock! */
+static cfs_spinlock_t lcw_pending_timers_lock = CFS_SPIN_LOCK_UNLOCKED;
+static cfs_list_t lcw_pending_timers = \
         CFS_LIST_HEAD_INIT(lcw_pending_timers);
 
 /* Last time a watchdog expired */
 static cfs_time_t lcw_last_watchdog_time;
 static int lcw_recent_watchdog_count;
-static spinlock_t lcw_last_watchdog_lock = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t lcw_last_watchdog_lock = CFS_SPIN_LOCK_UNLOCKED;
 
 static void
 lcw_dump(struct lc_watchdog *lcw)
 {
         ENTRY;
 #if defined(HAVE_TASKLIST_LOCK)
-        read_lock(&tasklist_lock);
+        cfs_read_lock(&tasklist_lock);
 #elif defined(HAVE_TASK_RCU)
         rcu_read_lock();
 #else
@@ -121,7 +122,7 @@ lcw_dump(struct lc_watchdog *lcw)
         }
 
 #if defined(HAVE_TASKLIST_LOCK)
-        read_unlock(&tasklist_lock);
+        cfs_read_unlock(&tasklist_lock);
 #elif defined(HAVE_TASK_RCU)
         rcu_read_unlock();
 #endif
@@ -153,7 +154,7 @@ static void lcw_cb(ulong_ptr_t data)
          * Normally we would not hold the spin lock over the CWARN but in
          * this case we hold it to ensure non ratelimited lcw_dumps are not
          * interleaved on the console making them hard to read. */
-        spin_lock_bh(&lcw_last_watchdog_lock);
+        cfs_spin_lock_bh(&lcw_last_watchdog_lock);
         delta_time = cfs_duration_sec(cfs_time_sub(current_time,
                                                    lcw_last_watchdog_time));
 
@@ -188,15 +189,15 @@ static void lcw_cb(ulong_ptr_t data)
                 lcw_dump(lcw);
        }
 
-        spin_unlock_bh(&lcw_last_watchdog_lock);
-        spin_lock_bh(&lcw_pending_timers_lock);
+        cfs_spin_unlock_bh(&lcw_last_watchdog_lock);
+        cfs_spin_lock_bh(&lcw_pending_timers_lock);
 
-        if (list_empty(&lcw->lcw_list)) {
-                list_add(&lcw->lcw_list, &lcw_pending_timers);
+        if (cfs_list_empty(&lcw->lcw_list)) {
+                cfs_list_add(&lcw->lcw_list, &lcw_pending_timers);
                 cfs_waitq_signal(&lcw_event_waitq);
         }
 
-        spin_unlock_bh(&lcw_pending_timers_lock);
+        cfs_spin_unlock_bh(&lcw_pending_timers_lock);
 
         EXIT;
 }
@@ -205,12 +206,12 @@ static int is_watchdog_fired(void)
 {
         int rc;
 
-        if (test_bit(LCW_FLAG_STOP, &lcw_flags))
+        if (cfs_test_bit(LCW_FLAG_STOP, &lcw_flags))
                 return 1;
 
-        spin_lock_bh(&lcw_pending_timers_lock);
-        rc = !list_empty(&lcw_pending_timers);
-        spin_unlock_bh(&lcw_pending_timers_lock);
+        cfs_spin_lock_bh(&lcw_pending_timers_lock);
+        rc = !cfs_list_empty(&lcw_pending_timers);
+        cfs_spin_unlock_bh(&lcw_pending_timers_lock);
         return rc;
 }
 
@@ -229,17 +230,18 @@ static int lcw_dispatch_main(void *data)
         RECALC_SIGPENDING;
         SIGNAL_MASK_UNLOCK(current, flags);
 
-        complete(&lcw_start_completion);
+        cfs_complete(&lcw_start_completion);
 
         while (1) {
-                cfs_wait_event_interruptible(lcw_event_waitq, is_watchdog_fired(), rc);
+                cfs_wait_event_interruptible(lcw_event_waitq,
+                                             is_watchdog_fired(), rc);
                 CDEBUG(D_INFO, "Watchdog got woken up...\n");
-                if (test_bit(LCW_FLAG_STOP, &lcw_flags)) {
+                if (cfs_test_bit(LCW_FLAG_STOP, &lcw_flags)) {
                         CDEBUG(D_INFO, "LCW_FLAG_STOP was set, shutting down...\n");
 
-                        spin_lock_bh(&lcw_pending_timers_lock);
-                        rc = !list_empty(&lcw_pending_timers);
-                        spin_unlock_bh(&lcw_pending_timers_lock);
+                        cfs_spin_lock_bh(&lcw_pending_timers_lock);
+                        rc = !cfs_list_empty(&lcw_pending_timers);
+                        cfs_spin_unlock_bh(&lcw_pending_timers_lock);
                         if (rc) {
                                 CERROR("pending timers list was not empty at "
                                        "time of watchdog dispatch shutdown\n");
@@ -247,26 +249,26 @@ static int lcw_dispatch_main(void *data)
                         break;
                 }
 
-                spin_lock_bh(&lcw_pending_timers_lock);
-                while (!list_empty(&lcw_pending_timers)) {
+                cfs_spin_lock_bh(&lcw_pending_timers_lock);
+                while (!cfs_list_empty(&lcw_pending_timers)) {
 
-                        lcw = list_entry(lcw_pending_timers.next,
+                        lcw = cfs_list_entry(lcw_pending_timers.next,
                                          struct lc_watchdog,
                                          lcw_list);
-                        list_del_init(&lcw->lcw_list);
-                        spin_unlock_bh(&lcw_pending_timers_lock);
+                        cfs_list_del_init(&lcw->lcw_list);
+                        cfs_spin_unlock_bh(&lcw_pending_timers_lock);
 
                         CDEBUG(D_INFO, "found lcw for pid " LPPID "\n", lcw->lcw_pid);
 
                         if (lcw->lcw_state != LC_WATCHDOG_DISABLED)
                                 lcw->lcw_callback(lcw->lcw_pid, lcw->lcw_data);
 
-                        spin_lock_bh(&lcw_pending_timers_lock);
+                        cfs_spin_lock_bh(&lcw_pending_timers_lock);
                 }
-                spin_unlock_bh(&lcw_pending_timers_lock);
+                cfs_spin_unlock_bh(&lcw_pending_timers_lock);
         }
 
-        complete(&lcw_stop_completion);
+        cfs_complete(&lcw_stop_completion);
 
         RETURN(rc);
 }
@@ -278,18 +280,18 @@ static void lcw_dispatch_start(void)
         ENTRY;
         LASSERT(lcw_refcount == 1);
 
-        init_completion(&lcw_stop_completion);
-        init_completion(&lcw_start_completion);
+        cfs_init_completion(&lcw_stop_completion);
+        cfs_init_completion(&lcw_start_completion);
         cfs_waitq_init(&lcw_event_waitq);
 
         CDEBUG(D_INFO, "starting dispatch thread\n");
-        rc = kernel_thread(lcw_dispatch_main, NULL, 0);
+        rc = cfs_kernel_thread(lcw_dispatch_main, NULL, 0);
         if (rc < 0) {
                 CERROR("error spawning watchdog dispatch thread: %d\n", rc);
                 EXIT;
                 return;
         }
-        wait_for_completion(&lcw_start_completion);
+        cfs_wait_for_completion(&lcw_start_completion);
         CDEBUG(D_INFO, "watchdog dispatcher initialization complete.\n");
 
         EXIT;
@@ -302,10 +304,10 @@ static void lcw_dispatch_stop(void)
 
         CDEBUG(D_INFO, "trying to stop watchdog dispatcher.\n");
 
-        set_bit(LCW_FLAG_STOP, &lcw_flags);
+        cfs_set_bit(LCW_FLAG_STOP, &lcw_flags);
         cfs_waitq_signal(&lcw_event_waitq);
 
-        wait_for_completion(&lcw_stop_completion);
+        cfs_wait_for_completion(&lcw_stop_completion);
 
         CDEBUG(D_INFO, "watchdog dispatcher has shut down.\n");
 
@@ -334,10 +336,10 @@ struct lc_watchdog *lc_watchdog_add(int timeout,
         CFS_INIT_LIST_HEAD(&lcw->lcw_list);
         cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw);
 
-        down(&lcw_refcount_sem);
+        cfs_down(&lcw_refcount_sem);
         if (++lcw_refcount == 1)
                 lcw_dispatch_start();
-        up(&lcw_refcount_sem);
+        cfs_up(&lcw_refcount_sem);
 
         /* Keep this working in case we enable them by default */
         if (lcw->lcw_state == LC_WATCHDOG_ENABLED) {
@@ -377,9 +379,9 @@ void lc_watchdog_touch(struct lc_watchdog *lcw, int timeout)
         ENTRY;
         LASSERT(lcw != NULL);
 
-        spin_lock_bh(&lcw_pending_timers_lock);
-        list_del_init(&lcw->lcw_list);
-        spin_unlock_bh(&lcw_pending_timers_lock);
+        cfs_spin_lock_bh(&lcw_pending_timers_lock);
+        cfs_list_del_init(&lcw->lcw_list);
+        cfs_spin_unlock_bh(&lcw_pending_timers_lock);
 
         lcw_update_time(lcw, "resumed");
         lcw->lcw_state = LC_WATCHDOG_ENABLED;
@@ -396,10 +398,10 @@ void lc_watchdog_disable(struct lc_watchdog *lcw)
         ENTRY;
         LASSERT(lcw != NULL);
 
-        spin_lock_bh(&lcw_pending_timers_lock);
-        if (!list_empty(&lcw->lcw_list))
-                list_del_init(&lcw->lcw_list);
-        spin_unlock_bh(&lcw_pending_timers_lock);
+        cfs_spin_lock_bh(&lcw_pending_timers_lock);
+        if (!cfs_list_empty(&lcw->lcw_list))
+                cfs_list_del_init(&lcw->lcw_list);
+        cfs_spin_unlock_bh(&lcw_pending_timers_lock);
 
         lcw_update_time(lcw, "completed");
         lcw->lcw_state = LC_WATCHDOG_DISABLED;
@@ -417,15 +419,15 @@ void lc_watchdog_delete(struct lc_watchdog *lcw)
 
         lcw_update_time(lcw, "stopped");
 
-        spin_lock_bh(&lcw_pending_timers_lock);
-        if (!list_empty(&lcw->lcw_list))
-                list_del_init(&lcw->lcw_list);
-        spin_unlock_bh(&lcw_pending_timers_lock);
+        cfs_spin_lock_bh(&lcw_pending_timers_lock);
+        if (!cfs_list_empty(&lcw->lcw_list))
+                cfs_list_del_init(&lcw->lcw_list);
+        cfs_spin_unlock_bh(&lcw_pending_timers_lock);
 
-        down(&lcw_refcount_sem);
+        cfs_down(&lcw_refcount_sem);
         if (--lcw_refcount == 0)
                 lcw_dispatch_stop();
-        up(&lcw_refcount_sem);
+        cfs_up(&lcw_refcount_sem);
 
         LIBCFS_FREE(lcw, sizeof(*lcw));
 
index 5b07ffa..e7ae9cb 100644 (file)
@@ -206,7 +206,7 @@ task_manager_notify(
     PLIST_ENTRY ListEntry = NULL; 
     PTASK_SLOT  TaskSlot  = NULL;
 
-    spin_lock(&(cfs_win_task_manger.Lock));
+    cfs_spin_lock(&(cfs_win_task_manger.Lock));
 
     ListEntry = cfs_win_task_manger.TaskList.Flink;
     while (ListEntry != (&(cfs_win_task_manger.TaskList))) {
@@ -228,7 +228,7 @@ task_manager_notify(
         ListEntry = ListEntry->Flink;
     }
 
-    spin_unlock(&(cfs_win_task_manger.Lock));
+    cfs_spin_unlock(&(cfs_win_task_manger.Lock));
 }
 
 int
@@ -241,7 +241,7 @@ init_task_manager()
     cfs_win_task_manger.Magic = TASKMAN_MAGIC;
 
     /* initialize the spinlock protection */
-    spin_lock_init(&cfs_win_task_manger.Lock);
+    cfs_spin_lock_init(&cfs_win_task_manger.Lock);
 
     /* create slab memory cache */
     cfs_win_task_manger.slab = cfs_mem_cache_create(
@@ -287,7 +287,7 @@ cleanup_task_manager()
     }
 
     /* cleanup all the taskslots attached to the list */
-    spin_lock(&(cfs_win_task_manger.Lock));
+    cfs_spin_lock(&(cfs_win_task_manger.Lock));
 
     while (!IsListEmpty(&(cfs_win_task_manger.TaskList))) {
 
@@ -298,7 +298,7 @@ cleanup_task_manager()
         cleanup_task_slot(TaskSlot);
     }
 
-    spin_unlock(&cfs_win_task_manger.Lock);
+    cfs_spin_unlock(&cfs_win_task_manger.Lock);
 
     /* destroy the taskslot cache slab */
     cfs_mem_cache_destroy(cfs_win_task_manger.slab);
@@ -321,7 +321,7 @@ cfs_current()
     PLIST_ENTRY ListEntry = NULL; 
     PTASK_SLOT  TaskSlot  = NULL;
 
-    spin_lock(&(cfs_win_task_manger.Lock));
+    cfs_spin_lock(&(cfs_win_task_manger.Lock));
 
     ListEntry = cfs_win_task_manger.TaskList.Flink;
     while (ListEntry != (&(cfs_win_task_manger.TaskList))) {
@@ -417,7 +417,7 @@ cfs_current()
 
 errorout:
 
-    spin_unlock(&(cfs_win_task_manger.Lock));
+    cfs_spin_unlock(&(cfs_win_task_manger.Lock));
 
     if (!TaskSlot) {
         cfs_enter_debugger();
@@ -431,17 +431,11 @@ errorout:
 void
 cfs_pause(cfs_duration_t ticks)
 {
-    cfs_schedule_timeout(CFS_TASK_UNINTERRUPTIBLE, ticks);
+    cfs_schedule_timeout_and_set_state(CFS_TASK_UNINTERRUPTIBLE, ticks);
 }
 
 void
-our_cond_resched()
-{
-    cfs_schedule_timeout(CFS_TASK_UNINTERRUPTIBLE, 1i64);
-}
-
-void
-cfs_schedule_timeout(cfs_task_state_t state, int64_t time)
+cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t time)
 {
     cfs_task_t * task = cfs_current();
     PTASK_SLOT   slot = NULL;
@@ -464,7 +458,7 @@ cfs_schedule_timeout(cfs_task_state_t state, int64_t time)
 void
 cfs_schedule()
 {
-    cfs_schedule_timeout(CFS_TASK_UNINTERRUPTIBLE, 0);
+    cfs_schedule_timeout_and_set_state(CFS_TASK_UNINTERRUPTIBLE, 0);
 }
 
 int
index 515eee5..e8df942 100644 (file)
@@ -691,17 +691,17 @@ int cfs_file_count(cfs_file_t *fp)
 struct dentry *dget(struct dentry *de)
 {
     if (de) {
-        atomic_inc(&de->d_count);
+        cfs_atomic_inc(&de->d_count);
     }
     return de;
 }
 
 void dput(struct dentry *de)
 {
-    if (!de || atomic_read(&de->d_count) == 0) {
+    if (!de || cfs_atomic_read(&de->d_count) == 0) {
         return;
     }
-    if (atomic_dec_and_test(&de->d_count)) {
+    if (cfs_atomic_dec_and_test(&de->d_count)) {
         cfs_free(de);
     }
 }
index c2ac044..9e4550b 100644 (file)
@@ -43,9 +43,9 @@
 #if defined(_X86_)
 
 void __declspec (naked) FASTCALL
-atomic_add(
+cfs_atomic_add(
     int i,
-    atomic_t *v
+    cfs_atomic_t *v
     )
 {
     // ECX = i
@@ -58,9 +58,9 @@ atomic_add(
 }
 
 void __declspec (naked) FASTCALL
-atomic_sub(
+cfs_atomic_sub(
     int i,
-    atomic_t *v
+    cfs_atomic_t *v
    ) 
 {
     // ECX = i
@@ -73,8 +73,8 @@ atomic_sub(
 }
 
 void __declspec (naked) FASTCALL
-atomic_inc(
-    atomic_t *v
+cfs_atomic_inc(
+    cfs_atomic_t *v
     )
 {
     //InterlockedIncrement((PULONG)(&((v)->counter)));
@@ -88,8 +88,8 @@ atomic_inc(
 }
 
 void __declspec (naked) FASTCALL
-atomic_dec(
-    atomic_t *v
+cfs_atomic_dec(
+    cfs_atomic_t *v
     )
 {
     // ECX = v ; [ECX][0] = v->counter
@@ -101,9 +101,9 @@ atomic_dec(
 }
 
 int __declspec (naked) FASTCALL 
-atomic_sub_and_test(
+cfs_atomic_sub_and_test(
     int i,
-    atomic_t *v
+    cfs_atomic_t *v
     )
 {
 
@@ -119,8 +119,8 @@ atomic_sub_and_test(
 }
 
 int __declspec (naked) FASTCALL
-atomic_inc_and_test(
-    atomic_t *v
+cfs_atomic_inc_and_test(
+    cfs_atomic_t *v
     )
 {
     // ECX = v ; [ECX][0] = v->counter
@@ -134,8 +134,8 @@ atomic_inc_and_test(
 }
 
 int __declspec (naked) FASTCALL
-atomic_dec_and_test(
-    atomic_t *v
+cfs_atomic_dec_and_test(
+    cfs_atomic_t *v
     )
 {
     // ECX = v ; [ECX][0] = v->counter
@@ -151,43 +151,43 @@ atomic_dec_and_test(
 #elif defined(_AMD64_)
 
 void FASTCALL
-atomic_add(
+cfs_atomic_add(
     int i,
-    atomic_t *v
+    cfs_atomic_t *v
     )
 {
     InterlockedExchangeAdd( (PULONG)(&((v)->counter)) , (LONG) (i));
 }
 
 void FASTCALL
-atomic_sub(
+cfs_atomic_sub(
     int i,
-    atomic_t *v
+    cfs_atomic_t *v
    ) 
 {
     InterlockedExchangeAdd( (PULONG)(&((v)->counter)) , (LONG) (-1*i));
 }
 
 void FASTCALL
-atomic_inc(
-    atomic_t *v
+cfs_atomic_inc(
+    cfs_atomic_t *v
     )
 {
    InterlockedIncrement((PULONG)(&((v)->counter)));
 }
 
 void FASTCALL
-atomic_dec(
-    atomic_t *v
+cfs_atomic_dec(
+    cfs_atomic_t *v
     )
 {
     InterlockedDecrement((PULONG)(&((v)->counter)));
 }
 
 int FASTCALL 
-atomic_sub_and_test(
+cfs_atomic_sub_and_test(
     int i,
-    atomic_t *v
+    cfs_atomic_t *v
     )
 {
     int counter, result;
@@ -206,8 +206,8 @@ atomic_sub_and_test(
 }
 
 int FASTCALL
-atomic_inc_and_test(
-    atomic_t *v
+cfs_atomic_inc_and_test(
+    cfs_atomic_t *v
     )
 {
     int counter, result;
@@ -226,8 +226,8 @@ atomic_inc_and_test(
 }
 
 int FASTCALL
-atomic_dec_and_test(
-    atomic_t *v
+cfs_atomic_dec_and_test(
+    cfs_atomic_t *v
     )
 {
     int counter, result;
@@ -258,7 +258,7 @@ atomic_dec_and_test(
  *
  * Atomically adds \a i to \a v and returns \a i + \a v
  */
-int FASTCALL atomic_add_return(int i, atomic_t *v)
+int FASTCALL cfs_atomic_add_return(int i, cfs_atomic_t *v)
 {
     int counter, result;
 
@@ -283,21 +283,21 @@ int FASTCALL atomic_add_return(int i, atomic_t *v)
  *
  * Atomically subtracts \a i from \a v and returns \a v - \a i
  */
-int FASTCALL atomic_sub_return(int i, atomic_t *v)
+int FASTCALL cfs_atomic_sub_return(int i, cfs_atomic_t *v)
 {
-       return atomic_add_return(-i, v);
+       return cfs_atomic_add_return(-i, v);
 }
 
-int FASTCALL atomic_dec_and_lock(atomic_t *v, spinlock_t *lock)
+int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, cfs_spinlock_t *lock)
 {
-    if (atomic_read(v) != 1) {
+    if (cfs_atomic_read(v) != 1) {
         return 0;
-    } 
+    }
 
-       spin_lock(lock);
-       if (atomic_dec_and_test(v))
+       cfs_spin_lock(lock);
+       if (cfs_atomic_dec_and_test(v))
                return 1;
-       spin_unlock(lock);
+       cfs_spin_unlock(lock);
        return 0;
 }
 
@@ -308,19 +308,19 @@ int FASTCALL atomic_dec_and_lock(atomic_t *v, spinlock_t *lock)
 
 
 void
-rwlock_init(rwlock_t * rwlock)
+cfs_rwlock_init(cfs_rwlock_t * rwlock)
 {
-    spin_lock_init(&rwlock->guard);
+    cfs_spin_lock_init(&rwlock->guard);
     rwlock->count = 0;
 }
 
 void
-rwlock_fini(rwlock_t * rwlock)
+cfs_rwlock_fini(cfs_rwlock_t * rwlock)
 {
 }
 
 void
-read_lock(rwlock_t * rwlock)
+cfs_read_lock(cfs_rwlock_t * rwlock)
 {
     cfs_task_t * task = cfs_current();
     PTASK_SLOT   slot = NULL;
@@ -337,18 +337,18 @@ read_lock(rwlock_t * rwlock)
     slot->irql = KeRaiseIrqlToDpcLevel();
 
     while (TRUE) {
-           spin_lock(&rwlock->guard);
+           cfs_spin_lock(&rwlock->guard);
         if (rwlock->count >= 0)
             break;
-        spin_unlock(&rwlock->guard);
+        cfs_spin_unlock(&rwlock->guard);
     }
 
        rwlock->count++;
-       spin_unlock(&rwlock->guard);
+       cfs_spin_unlock(&rwlock->guard);
 }
 
 void
-read_unlock(rwlock_t * rwlock)
+cfs_read_unlock(cfs_rwlock_t * rwlock)
 {
     cfs_task_t * task = cfs_current();
     PTASK_SLOT   slot = NULL;
@@ -362,19 +362,19 @@ read_unlock(rwlock_t * rwlock)
     slot = CONTAINING_RECORD(task, TASK_SLOT, task);
     ASSERT(slot->Magic == TASKSLT_MAGIC);
    
-    spin_lock(&rwlock->guard);
+    cfs_spin_lock(&rwlock->guard);
        ASSERT(rwlock->count > 0);
     rwlock->count--;
     if (rwlock < 0) {
         cfs_enter_debugger();
     }
-       spin_unlock(&rwlock->guard);
+       cfs_spin_unlock(&rwlock->guard);
 
     KeLowerIrql(slot->irql);
 }
 
 void
-write_lock(rwlock_t * rwlock)
+cfs_write_lock(cfs_rwlock_t * rwlock)
 {
     cfs_task_t * task = cfs_current();
     PTASK_SLOT   slot = NULL;
@@ -391,18 +391,18 @@ write_lock(rwlock_t * rwlock)
     slot->irql = KeRaiseIrqlToDpcLevel();
 
     while (TRUE) {
-           spin_lock(&rwlock->guard);
+           cfs_spin_lock(&rwlock->guard);
         if (rwlock->count == 0)
             break;
-        spin_unlock(&rwlock->guard);
+        cfs_spin_unlock(&rwlock->guard);
     }
 
        rwlock->count = -1;
-       spin_unlock(&rwlock->guard);
+       cfs_spin_unlock(&rwlock->guard);
 }
 
 void
-write_unlock(rwlock_t * rwlock)
+cfs_write_unlock(cfs_rwlock_t * rwlock)
 {
     cfs_task_t * task = cfs_current();
     PTASK_SLOT   slot = NULL;
@@ -416,10 +416,10 @@ write_unlock(rwlock_t * rwlock)
     slot = CONTAINING_RECORD(task, TASK_SLOT, task);
     ASSERT(slot->Magic == TASKSLT_MAGIC);
    
-    spin_lock(&rwlock->guard);
+    cfs_spin_lock(&rwlock->guard);
        ASSERT(rwlock->count == -1);
     rwlock->count = 0;
-       spin_unlock(&rwlock->guard);
+       cfs_spin_unlock(&rwlock->guard);
 
     KeLowerIrql(slot->irql);
 }
index 6a8689e..357886f 100644 (file)
@@ -55,8 +55,8 @@ cfs_page_t * virt_to_page(void * addr)
     memset(pg, 0, sizeof(cfs_page_t));
     pg->addr = (void *)((__u64)addr & (~((__u64)PAGE_SIZE-1)));
     pg->mapping = addr;
-    atomic_set(&pg->count, 1);
-    set_bit(PG_virt, &(pg->flags));
+    cfs_atomic_set(&pg->count, 1);
+    cfs_set_bit(PG_virt, &(pg->flags));
     cfs_enter_debugger();
     return pg;
 }
@@ -76,7 +76,7 @@ cfs_page_t * virt_to_page(void * addr)
  *   N/A
  */
 
-atomic_t libcfs_total_pages;
+cfs_atomic_t libcfs_total_pages;
 
 cfs_page_t * cfs_alloc_page(int flags)
 {
@@ -90,13 +90,13 @@ cfs_page_t * cfs_alloc_page(int flags)
 
     memset(pg, 0, sizeof(cfs_page_t));
     pg->addr = cfs_mem_cache_alloc(cfs_page_p_slab, 0);
-    atomic_set(&pg->count, 1);
+    cfs_atomic_set(&pg->count, 1);
 
     if (pg->addr) {
         if (cfs_is_flag_set(flags, CFS_ALLOC_ZERO)) {
             memset(pg->addr, 0, CFS_PAGE_SIZE);
         }
-        atomic_inc(&libcfs_total_pages);
+        cfs_atomic_inc(&libcfs_total_pages);
     } else {
         cfs_enter_debugger();
         cfs_mem_cache_free(cfs_page_t_slab, pg);
@@ -123,11 +123,11 @@ void cfs_free_page(cfs_page_t *pg)
 {
     ASSERT(pg != NULL);
     ASSERT(pg->addr  != NULL);
-    ASSERT(atomic_read(&pg->count) <= 1);
+    ASSERT(cfs_atomic_read(&pg->count) <= 1);
 
-    if (!test_bit(PG_virt, &pg->flags)) {
+    if (!cfs_test_bit(PG_virt, &pg->flags)) {
         cfs_mem_cache_free(cfs_page_p_slab, pg->addr);
-        atomic_dec(&libcfs_total_pages);
+        cfs_atomic_dec(&libcfs_total_pages);
     } else {
         cfs_enter_debugger();
     }
@@ -146,13 +146,13 @@ cfs_page_t *cfs_alloc_pages(unsigned int flags, unsigned int order)
 
     memset(pg, 0, sizeof(cfs_page_t));
     pg->addr = cfs_alloc((CFS_PAGE_SIZE << order),0);
-    atomic_set(&pg->count, 1);
+    cfs_atomic_set(&pg->count, 1);
 
     if (pg->addr) {
         if (cfs_is_flag_set(flags, CFS_ALLOC_ZERO)) {
             memset(pg->addr, 0, CFS_PAGE_SIZE << order);
         }
-        atomic_add(1 << order, &libcfs_total_pages);
+        cfs_atomic_add(1 << order, &libcfs_total_pages);
     } else {
         cfs_enter_debugger();
         cfs_mem_cache_free(cfs_page_t_slab, pg);
@@ -166,9 +166,9 @@ void __cfs_free_pages(cfs_page_t *pg, unsigned int order)
 {
     ASSERT(pg != NULL);
     ASSERT(pg->addr  != NULL);
-    ASSERT(atomic_read(&pg->count) <= 1);
+    ASSERT(cfs_atomic_read(&pg->count) <= 1);
 
-    atomic_sub(1 << order, &libcfs_total_pages);
+    cfs_atomic_sub(1 << order, &libcfs_total_pages);
     cfs_free(pg->addr);
     cfs_mem_cache_free(cfs_page_t_slab, pg);
 }
@@ -415,56 +415,56 @@ void cfs_mem_cache_free(cfs_mem_cache_t * kmc, void * buf)
     ExFreeToNPagedLookasideList(&(kmc->npll), buf);
 }
 
-spinlock_t  shrinker_guard = {0};
+cfs_spinlock_t  shrinker_guard = {0};
 CFS_LIST_HEAD(shrinker_hdr);
 cfs_timer_t shrinker_timer = {0};
 
-struct shrinker * set_shrinker(int seeks, shrink_callback cb)
+struct cfs_shrinker * cfs_set_shrinker(int seeks, shrink_callback cb)
 {
-    struct shrinker * s = (struct shrinker *)
-        cfs_alloc(sizeof(struct shrinker), CFS_ALLOC_ZERO);
+    struct cfs_shrinker * s = (struct cfs_shrinker *)
+        cfs_alloc(sizeof(struct cfs_shrinker), CFS_ALLOC_ZERO);
     if (s) {
         s->cb = cb;
         s->seeks = seeks;
         s->nr = 2;
-        spin_lock(&shrinker_guard);
-        list_add(&s->list, &shrinker_hdr); 
-        spin_unlock(&shrinker_guard);
+        cfs_spin_lock(&shrinker_guard);
+        cfs_list_add(&s->list, &shrinker_hdr); 
+        cfs_spin_unlock(&shrinker_guard);
     }
 
     return s;
 }
 
-void remove_shrinker(struct shrinker *s)
+void cfs_remove_shrinker(struct cfs_shrinker *s)
 {
-    struct shrinker *tmp;
-    spin_lock(&shrinker_guard);
+    struct cfs_shrinker *tmp;
+    cfs_spin_lock(&shrinker_guard);
 #if TRUE
     cfs_list_for_each_entry_typed(tmp, &shrinker_hdr,
-                            struct shrinker, list) {
+                                  struct cfs_shrinker, list) {
         if (tmp == s) {
-            list_del(&tmp->list);
+            cfs_list_del(&tmp->list);
             break;
         } 
     }
 #else
-    list_del(&s->list);
+    cfs_list_del(&s->list);
 #endif
-    spin_unlock(&shrinker_guard);
+    cfs_spin_unlock(&shrinker_guard);
     cfs_free(s);
 }
 
 /* time ut test proc */
 void shrinker_timer_proc(ulong_ptr_t arg)
 {
-    struct shrinker *s;
-    spin_lock(&shrinker_guard);
+    struct cfs_shrinker *s;
+    cfs_spin_lock(&shrinker_guard);
 
     cfs_list_for_each_entry_typed(s, &shrinker_hdr,
-                            struct shrinker, list) {
-        s->cb(s->nr, __GFP_FS);
+                                  struct cfs_shrinker, list) {
+            s->cb(s->nr, __GFP_FS);
     }
-    spin_unlock(&shrinker_guard);
+    cfs_spin_unlock(&shrinker_guard);
     cfs_timer_arm(&shrinker_timer, 300);
 }
 
index 3febc7f..530c38a 100644 (file)
@@ -51,7 +51,7 @@ int libcfs_ioctl_getdata(char *buf, char *end, void *arg)
         hdr = (struct libcfs_ioctl_hdr *)buf;
         data = (struct libcfs_ioctl_data *)buf;
 
-        err = copy_from_user(buf, (void *)arg, sizeof(*hdr));
+        err = cfs_copy_from_user(buf, (void *)arg, sizeof(*hdr));
         if (err)
                 RETURN(err);
 
@@ -70,7 +70,7 @@ int libcfs_ioctl_getdata(char *buf, char *end, void *arg)
                 RETURN(-EINVAL);
         }
 
-        err = copy_from_user(buf, (void *)arg, hdr->ioc_len);
+        err = cfs_copy_from_user(buf, (void *)arg, hdr->ioc_len);
         if (err)
                 RETURN(err);
 
@@ -84,14 +84,14 @@ int libcfs_ioctl_getdata(char *buf, char *end, void *arg)
 
         if (data->ioc_inllen2)
                 data->ioc_inlbuf2 = &data->ioc_bulk[0] +
-                        size_round(data->ioc_inllen1);
+                        cfs_size_round(data->ioc_inllen1);
 
         RETURN(0);
 }
 
 int libcfs_ioctl_popdata(void *arg, void *data, int size)
 {
-       if (copy_to_user((char *)arg, data, size))
+       if (cfs_copy_to_user((char *)arg, data, size))
                return -EFAULT;
        return 0;
 }
index 634024a..c0ca9e4 100644 (file)
@@ -208,7 +208,7 @@ int nanosleep(const struct timespec *rqtp, struct timespec *rmtp)
 }
 
 
-void do_gettimeofday(struct timeval *tv)
+void cfs_gettimeofday(struct timeval *tv)
 {
     LARGE_INTEGER Time;
 
@@ -220,7 +220,7 @@ void do_gettimeofday(struct timeval *tv)
 
 int gettimeofday(struct timeval *tv, void * tz)
 {
-    do_gettimeofday(tv);
+    cfs_gettimeofday(tv);
     return 0;
 }
 
@@ -523,7 +523,7 @@ int cfs_proc_ioctl(int fd, int cmd, void *buffer)
         length = portal->ioc_len;
     } else if (_IOC_TYPE(cmd) == 'f') {
         length = obd->ioc_len;
-        extra = size_round(obd->ioc_plen1) + size_round(obd->ioc_plen2);
+        extra = cfs_size_round(obd->ioc_plen1) + cfs_size_round(obd->ioc_plen2);
     } else if(_IOC_TYPE(cmd) == 'u') {
         length = 4;
         extra  = 0;
@@ -560,7 +560,7 @@ int cfs_proc_ioctl(int fd, int cmd, void *buffer)
             if (obd->ioc_pbuf1 && data->ioc_plen1) {
                 data->ioc_pbuf1 = &procdat[length];
                 memcpy(data->ioc_pbuf1, obd->ioc_pbuf1, obd->ioc_plen1); 
-                length += size_round(obd->ioc_plen1);
+                length += cfs_size_round(obd->ioc_plen1);
             } else {
                 data->ioc_plen1 = 0;
                 data->ioc_pbuf1 = NULL;
@@ -569,7 +569,7 @@ int cfs_proc_ioctl(int fd, int cmd, void *buffer)
             if (obd->ioc_pbuf2 && obd->ioc_plen2) {
                 data->ioc_pbuf2 = &procdat[length];
                 memcpy(data->ioc_pbuf2, obd->ioc_pbuf2, obd->ioc_plen2);
-                length += size_round(obd->ioc_plen2);
+                length += cfs_size_round(obd->ioc_plen2);
             } else {
                 data->ioc_plen2 = 0;
                 data->ioc_pbuf2 = NULL;
@@ -603,13 +603,13 @@ int cfs_proc_ioctl(int fd, int cmd, void *buffer)
                 ASSERT(obd->ioc_plen1 == data->ioc_plen1);
                 data->ioc_pbuf1 = &procdat[length];
                 memcpy(obd->ioc_pbuf1, data->ioc_pbuf1, obd->ioc_plen1);
-                length += size_round(obd->ioc_plen1);
+                length += cfs_size_round(obd->ioc_plen1);
             }
             if (obd->ioc_pbuf2) {
                 ASSERT(obd->ioc_plen2 == data->ioc_plen2);
                 data->ioc_pbuf2 = &procdat[length];
                 memcpy(obd->ioc_pbuf2, data->ioc_pbuf2, obd->ioc_plen2);
-                length += size_round(obd->ioc_plen2);
+                length += cfs_size_round(obd->ioc_plen2);
             }
         }
         data->ioc_inlbuf1 = obd->ioc_inlbuf1;
index 8a2ee11..f96bdd2 100644 (file)
@@ -148,7 +148,7 @@ int cfs_kernel_thread(int (*func)(void *), void *arg, int flag)
  */
 
 
-static CFS_DECL_RWSEM(cfs_symbol_lock);
+static CFS_DECLARE_RWSEM(cfs_symbol_lock);
 CFS_LIST_HEAD(cfs_symbol_list);
 
 int libcfs_is_mp_system = FALSE;
@@ -171,18 +171,18 @@ int libcfs_is_mp_system = FALSE;
 void *
 cfs_symbol_get(const char *name)
 {
-    struct list_head    *walker;
-    struct cfs_symbol   *sym = NULL;
+    cfs_list_t              *walker;
+    struct cfs_symbol       *sym = NULL;
 
-    down_read(&cfs_symbol_lock);
-    list_for_each(walker, &cfs_symbol_list) {
-        sym = list_entry (walker, struct cfs_symbol, sym_list);
+    cfs_down_read(&cfs_symbol_lock);
+    cfs_list_for_each(walker, &cfs_symbol_list) {
+        sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
         if (!strcmp(sym->name, name)) {
             sym->ref ++;
             break;
         }
     }
-    up_read(&cfs_symbol_lock);
+    cfs_up_read(&cfs_symbol_lock);
 
     if (sym != NULL)
         return sym->value;
@@ -207,19 +207,19 @@ cfs_symbol_get(const char *name)
 void
 cfs_symbol_put(const char *name)
 {
-    struct list_head    *walker;
-    struct cfs_symbol   *sym = NULL;
+    cfs_list_t              *walker;
+    struct cfs_symbol       *sym = NULL;
 
-    down_read(&cfs_symbol_lock);
-    list_for_each(walker, &cfs_symbol_list) {
-        sym = list_entry (walker, struct cfs_symbol, sym_list);
+    cfs_down_read(&cfs_symbol_lock);
+    cfs_list_for_each(walker, &cfs_symbol_list) {
+        sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
         if (!strcmp(sym->name, name)) {
             LASSERT(sym->ref > 0);
             sym->ref--;
             break;
         }
     }
-    up_read(&cfs_symbol_lock);
+    cfs_up_read(&cfs_symbol_lock);
 
     LASSERT(sym != NULL);
 }
@@ -244,9 +244,9 @@ cfs_symbol_put(const char *name)
 int
 cfs_symbol_register(const char *name, const void *value)
 {
-    struct list_head    *walker;
-    struct cfs_symbol   *sym = NULL;
-    struct cfs_symbol   *new = NULL;
+    cfs_list_t              *walker;
+    struct cfs_symbol       *sym = NULL;
+    struct cfs_symbol       *new = NULL;
 
     new = cfs_alloc(sizeof(struct cfs_symbol), CFS_ALLOC_ZERO);
     if (!new) {
@@ -257,17 +257,17 @@ cfs_symbol_register(const char *name, const void *value)
     new->ref = 0;
     CFS_INIT_LIST_HEAD(&new->sym_list);
 
-    down_write(&cfs_symbol_lock);
-    list_for_each(walker, &cfs_symbol_list) {
-        sym = list_entry (walker, struct cfs_symbol, sym_list);
+    cfs_down_write(&cfs_symbol_lock);
+    cfs_list_for_each(walker, &cfs_symbol_list) {
+        sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
         if (!strcmp(sym->name, name)) {
-            up_write(&cfs_symbol_lock);
+            cfs_up_write(&cfs_symbol_lock);
             cfs_free(new);
             return 0; // alreay registerred
         }
     }
-    list_add_tail(&new->sym_list, &cfs_symbol_list);
-    up_write(&cfs_symbol_lock);
+    cfs_list_add_tail(&new->sym_list, &cfs_symbol_list);
+    cfs_up_write(&cfs_symbol_lock);
 
     return 0;
 }
@@ -289,21 +289,21 @@ cfs_symbol_register(const char *name, const void *value)
 void
 cfs_symbol_unregister(const char *name)
 {
-    struct list_head    *walker;
-    struct list_head    *nxt;
-    struct cfs_symbol   *sym = NULL;
+    cfs_list_t              *walker;
+    cfs_list_t              *nxt;
+    struct cfs_symbol       *sym = NULL;
 
-    down_write(&cfs_symbol_lock);
-    list_for_each_safe(walker, nxt, &cfs_symbol_list) {
-        sym = list_entry (walker, struct cfs_symbol, sym_list);
+    cfs_down_write(&cfs_symbol_lock);
+    cfs_list_for_each_safe(walker, nxt, &cfs_symbol_list) {
+        sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
         if (!strcmp(sym->name, name)) {
             LASSERT(sym->ref == 0);
-            list_del (&sym->sym_list);
+            cfs_list_del (&sym->sym_list);
             cfs_free(sym);
             break;
         }
     }
-    up_write(&cfs_symbol_lock);
+    cfs_up_write(&cfs_symbol_lock);
 }
 
 /*
@@ -323,17 +323,17 @@ cfs_symbol_unregister(const char *name)
 void
 cfs_symbol_clean()
 {
-    struct list_head    *walker;
+    cfs_list_t          *walker;
     struct cfs_symbol   *sym = NULL;
 
-    down_write(&cfs_symbol_lock);
-    list_for_each(walker, &cfs_symbol_list) {
-        sym = list_entry (walker, struct cfs_symbol, sym_list);
+    cfs_down_write(&cfs_symbol_lock);
+    cfs_list_for_each(walker, &cfs_symbol_list) {
+        sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
         LASSERT(sym->ref == 0);
-        list_del (&sym->sym_list);
+        cfs_list_del (&sym->sym_list);
         cfs_free(sym);
     }
-    up_write(&cfs_symbol_lock);
+    cfs_up_write(&cfs_symbol_lock);
     return;
 }
 
@@ -444,7 +444,7 @@ void cfs_timer_arm(cfs_timer_t *timer, cfs_time_t deadline)
     KeAcquireSpinLock(&(timer->Lock), &Irql);
     if (!cfs_is_flag_set(timer->Flags, CFS_TIMER_FLAG_TIMERED)){
 
-        timeout.QuadPart = (LONGLONG)-1*1000*1000*10/HZ*deadline;
+        timeout.QuadPart = (LONGLONG)-1*1000*1000*10/CFS_HZ*deadline;
 
         if (KeSetTimer(&timer->Timer, timeout, &timer->Dpc)) {
             cfs_set_flag(timer->Flags, CFS_TIMER_FLAG_TIMERED);
@@ -770,14 +770,14 @@ libcfs_arch_init(void)
 {
     int         rc;
 
-    spinlock_t  lock;
+    cfs_spinlock_t  lock;
     /* Workground to check the system is MP build or UP build */
-    spin_lock_init(&lock);
-    spin_lock(&lock);
+    cfs_spin_lock_init(&lock);
+    cfs_spin_lock(&lock);
     libcfs_is_mp_system = (int)lock.lock;
     /* MP build system: it's a real spin, for UP build system, it
        only raises the IRQL to DISPATCH_LEVEL */
-    spin_unlock(&lock);
+    cfs_spin_unlock(&lock);
 
     /* initialize libc routines (confliction between libcnptr.lib
        and kernel ntoskrnl.lib) */
index bf41886..bf14504 100644 (file)
@@ -77,19 +77,19 @@ cfs_sysctl_table_header_t       root_table_header;
 /* The global lock to protect all the access */
 
 #if LIBCFS_PROCFS_SPINLOCK
-spinlock_t                      proc_fs_lock;
+cfs_spinlock_t                  proc_fs_lock;
 
-#define INIT_PROCFS_LOCK()      spin_lock_init(&proc_fs_lock)
-#define LOCK_PROCFS()           spin_lock(&proc_fs_lock)
-#define UNLOCK_PROCFS()         spin_unlock(&proc_fs_lock)
+#define INIT_PROCFS_LOCK()      cfs_spin_lock_init(&proc_fs_lock)
+#define LOCK_PROCFS()           cfs_spin_lock(&proc_fs_lock)
+#define UNLOCK_PROCFS()         cfs_spin_unlock(&proc_fs_lock)
 
 #else
 
-mutex_t                         proc_fs_lock;
+cfs_mutex_t                     proc_fs_lock;
 
-#define INIT_PROCFS_LOCK()      init_mutex(&proc_fs_lock)
-#define LOCK_PROCFS()           mutex_down(&proc_fs_lock)
-#define UNLOCK_PROCFS()         mutex_up(&proc_fs_lock)
+#define INIT_PROCFS_LOCK()      cfs_init_mutex(&proc_fs_lock)
+#define LOCK_PROCFS()           cfs_mutex_down(&proc_fs_lock)
+#define UNLOCK_PROCFS()         cfs_mutex_up(&proc_fs_lock)
 
 #endif
 
@@ -137,7 +137,7 @@ proc_file_read(struct file * file, const char * buf, size_t nbytes, loff_t *ppos
             break;
         }
         
-        n -= copy_to_user((void *)buf, start, n);
+        n -= cfs_copy_to_user((void *)buf, start, n);
         if (n == 0) {
             if (retval == 0)
                 retval = -EFAULT;
@@ -927,7 +927,7 @@ void register_proc_table(cfs_sysctl_table_t * table, cfs_proc_entry_t * root)
             continue;
         /* Maybe we can't do anything with it... */
         if (!table->proc_handler && !table->child) {
-            printk(KERN_WARNING "SYSCTL: Can't register %s\n",
+            printk(CFS_KERN_WARNING "SYSCTL: Can't register %s\n",
                 table->procname);
             continue;
         }
@@ -974,7 +974,7 @@ void unregister_proc_table(cfs_sysctl_table_t * table, cfs_proc_entry_t *root)
             continue;
         if (de->mode & S_IFDIR) {
             if (!table->child) {
-                printk (KERN_ALERT "Help - malformed sysctl tree on free\n");
+                printk (CFS_KERN_ALERT "Help- malformed sysctl tree on free\n");
                 continue;
             }
             unregister_proc_table(table->child, de);
@@ -1011,7 +1011,7 @@ int sysctl_string(cfs_sysctl_table_t *table, int *name, int nlen,
             if (len > l) len = l;
             if (len >= table->maxlen)
                 len = table->maxlen;
-            if(copy_to_user(oldval, table->data, len))
+            if(cfs_copy_to_user(oldval, table->data, len))
                 return -EFAULT;
             if(put_user(0, ((char *) oldval) + len))
                 return -EFAULT;
@@ -1023,7 +1023,7 @@ int sysctl_string(cfs_sysctl_table_t *table, int *name, int nlen,
         len = newlen;
         if (len > table->maxlen)
             len = table->maxlen;
-        if(copy_from_user(table->data, newval, len))
+        if(cfs_copy_from_user(table->data, newval, len))
             return -EFAULT;
         if (len == table->maxlen)
             len--;
@@ -1106,7 +1106,7 @@ static int do_proc_dointvec(cfs_sysctl_table_t *table, int write, struct file *f
             len = left;
             if (len > TMPBUFLEN-1)
                 len = TMPBUFLEN-1;
-            if(copy_from_user(buf, buffer, len))
+            if(cfs_copy_from_user(buf, buffer, len))
                 return -EFAULT;
             buf[len] = 0;
             p = buf;
@@ -1143,7 +1143,7 @@ static int do_proc_dointvec(cfs_sysctl_table_t *table, int write, struct file *f
             len = strlen(buf);
             if (len > left)
                 len = left;
-            if(copy_to_user(buffer, buf, len))
+            if(cfs_copy_to_user(buffer, buf, len))
                 return -EFAULT;
             left -= len;
             (char *)buffer += len;
@@ -1235,7 +1235,7 @@ int proc_dostring(cfs_sysctl_table_t *table, int write, struct file *filp,
         }
         if (len >= (size_t)table->maxlen)
             len = (size_t)table->maxlen-1;
-        if(copy_from_user(table->data, buffer, len))
+        if(cfs_copy_from_user(table->data, buffer, len))
             return -EFAULT;
         ((char *) table->data)[len] = 0;
         filp->f_pos += *lenp;
@@ -1246,7 +1246,7 @@ int proc_dostring(cfs_sysctl_table_t *table, int write, struct file *filp,
         if (len > *lenp)
             len = *lenp;
         if (len)
-            if(copy_to_user(buffer, table->data, len))
+            if(cfs_copy_to_user(buffer, table->data, len))
                 return -EFAULT;
         if (len < *lenp) {
             if(put_user('\n', ((char *) buffer) + len))
@@ -1290,7 +1290,7 @@ int do_sysctl_strategy (cfs_sysctl_table_t *table,
             if (len) {
                 if (len > (size_t)table->maxlen)
                     len = (size_t)table->maxlen;
-                if(copy_to_user(oldval, table->data, len))
+                if(cfs_copy_to_user(oldval, table->data, len))
                     return -EFAULT;
                 if(put_user(len, oldlenp))
                     return -EFAULT;
@@ -1300,7 +1300,7 @@ int do_sysctl_strategy (cfs_sysctl_table_t *table,
             len = newlen;
             if (len > (size_t)table->maxlen)
                 len = (size_t)table->maxlen;
-            if(copy_from_user(table->data, newval, len))
+            if(cfs_copy_from_user(table->data, newval, len))
                 return -EFAULT;
         }
     }
@@ -1353,7 +1353,7 @@ repeat:
 int do_sysctl(int *name, int nlen, void *oldval, size_t *oldlenp,
            void *newval, size_t newlen)
 {
-    struct list_head *tmp;
+    cfs_list_t *tmp;
 
     if (nlen <= 0 || nlen >= CTL_MAXNAME)
         return -ENOTDIR;
@@ -1365,7 +1365,7 @@ int do_sysctl(int *name, int nlen, void *oldval, size_t *oldlenp,
     tmp = &root_table_header.ctl_entry;
     do {
         struct ctl_table_header *head =
-            list_entry(tmp, struct ctl_table_header, ctl_entry);
+            cfs_list_entry(tmp, struct ctl_table_header, ctl_entry);
         void *context = NULL;
         int error = parse_table(name, nlen, oldval, oldlenp, 
                     newval, newlen, head->ctl_table,
@@ -1459,9 +1459,9 @@ struct ctl_table_header *register_sysctl_table(cfs_sysctl_table_t * table,
 
     CFS_INIT_LIST_HEAD(&tmp->ctl_entry);
     if (insert_at_head)
-        list_add(&tmp->ctl_entry, &root_table_header.ctl_entry);
+        cfs_list_add(&tmp->ctl_entry, &root_table_header.ctl_entry);
     else
-        list_add_tail(&tmp->ctl_entry, &root_table_header.ctl_entry);
+        cfs_list_add_tail(&tmp->ctl_entry, &root_table_header.ctl_entry);
 #ifdef CONFIG_PROC_FS
     register_proc_table(table, cfs_proc_sys);
 #endif
@@ -1477,7 +1477,7 @@ struct ctl_table_header *register_sysctl_table(cfs_sysctl_table_t * table,
  */
 void unregister_sysctl_table(struct ctl_table_header * header)
 {
-    list_del(&header->ctl_entry);
+    cfs_list_del(&header->ctl_entry);
 #ifdef CONFIG_PROC_FS
     unregister_proc_table(header->ctl_table, cfs_proc_sys);
 #endif
@@ -1569,7 +1569,7 @@ static struct ctl_table top_table[2] = {
 int trace_write_dump_kernel(struct file *file, const char *buffer,
                              unsigned long count, void *data)
 {
-        int rc = trace_dump_debug_buffer_usrstr((void *)buffer, count);
+        int rc = cfs_trace_dump_debug_buffer_usrstr((void *)buffer, count);
         
         return (rc < 0) ? rc : count;
 }
@@ -1577,7 +1577,7 @@ int trace_write_dump_kernel(struct file *file, const char *buffer,
 int trace_write_daemon_file(struct file *file, const char *buffer,
                             unsigned long count, void *data)
 {
-        int rc = trace_daemon_command_usrstr((void *)buffer, count);
+        int rc = cfs_trace_daemon_command_usrstr((void *)buffer, count);
 
         return (rc < 0) ? rc : count;
 }
@@ -1586,9 +1586,9 @@ int trace_read_daemon_file(char *page, char **start, off_t off, int count,
                            int *eof, void *data)
 {
         int rc;
-        tracefile_read_lock();
-        rc = trace_copyout_string(page, count, tracefile, "\n");
-        tracefile_read_unlock();
+        cfs_tracefile_read_lock();
+        rc = cfs_trace_copyout_string(page, count, cfs_tracefile, "\n");
+        cfs_tracefile_read_unlock();
         return rc;
 }
 
@@ -1605,9 +1605,9 @@ int trace_read_debug_mb(char *page, char **start, off_t off, int count,
 {
         char   str[32];
 
-        snprintf(str, sizeof(str), "%d\n", trace_get_debug_mb());
+        snprintf(str, sizeof(str), "%d\n", cfs_trace_get_debug_mb());
 
-        return trace_copyout_string(page, count, str, NULL);
+        return cfs_trace_copyout_string(page, count, str, NULL);
 }
 
 int insert_proc(void)
@@ -1751,14 +1751,14 @@ lustre_ioctl_file(cfs_file_t * fh, PCFS_PROC_IOCTL devctl)
 
             if (obd->ioc_plen1) {
                 obd->ioc_pbuf1 = (char *)(data + off);
-                off += size_round(obd->ioc_plen1);
+                off += cfs_size_round(obd->ioc_plen1);
             } else {
                 obd->ioc_pbuf1 = NULL;
             }
 
             if (obd->ioc_plen2) {
                 obd->ioc_pbuf2 = (char *)(data + off);
-                off += size_round(obd->ioc_plen2);
+                off += cfs_size_round(obd->ioc_plen2);
             } else {
                 obd->ioc_pbuf2 = NULL;
             }
@@ -1843,7 +1843,7 @@ int seq_open(struct file *file, const struct seq_operations *op)
                file->private_data = p;
        }
        memset(p, 0, sizeof(*p));
-       mutex_init(&p->lock);
+       cfs_mutex_init(&p->lock);
        p->op = op;
 
        /*
@@ -1877,7 +1877,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
        void *p;
        int err = 0;
 
-       mutex_lock(&m->lock);
+       cfs_mutex_lock(&m->lock);
        /*
         * seq_file->op->..m_start/m_stop/m_next may do special actions
         * or optimisations based on the file->f_version, so we want to
@@ -1899,7 +1899,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
        /* if not empty - flush it first */
        if (m->count) {
                n = min(m->count, size);
-               err = copy_to_user(buf, m->buf + m->from, n);
+               err = cfs_copy_to_user(buf, m->buf + m->from, n);
                if (err)
                        goto Efault;
                m->count -= n;
@@ -1954,7 +1954,7 @@ Fill:
        }
        m->op->stop(m, p);
        n = min(m->count, size);
-       err = copy_to_user(buf, m->buf, n);
+       err = cfs_copy_to_user(buf, m->buf, n);
        if (err)
                goto Efault;
        copied += n;
@@ -1970,7 +1970,7 @@ Done:
        else
                *ppos += copied;
        file->f_version = m->version;
-       mutex_unlock(&m->lock);
+       cfs_mutex_unlock(&m->lock);
        return copied;
 Enomem:
        err = -ENOMEM;
@@ -2047,7 +2047,7 @@ loff_t seq_lseek(struct file *file, loff_t offset, int origin)
        struct seq_file *m = (struct seq_file *)file->private_data;
        long long retval = -EINVAL;
 
-       mutex_lock(&m->lock);
+       cfs_mutex_lock(&m->lock);
        m->version = file->f_version;
        switch (origin) {
                case 1:
@@ -2071,7 +2071,7 @@ loff_t seq_lseek(struct file *file, loff_t offset, int origin)
                        }
        }
        file->f_version = m->version;
-       mutex_unlock(&m->lock);
+       cfs_mutex_unlock(&m->lock);
        return retval;
 }
 EXPORT_SYMBOL(seq_lseek);
@@ -2298,11 +2298,11 @@ int seq_puts(struct seq_file *m, const char *s)
 }
 EXPORT_SYMBOL(seq_puts);
 
-struct list_head *seq_list_start(struct list_head *head, loff_t pos)
+cfs_list_t *seq_list_start(cfs_list_t *head, loff_t pos)
 {
-       struct list_head *lh;
+       cfs_list_t *lh;
 
-       list_for_each(lh, head)
+       cfs_list_for_each(lh, head)
                if (pos-- == 0)
                        return lh;
 
@@ -2311,7 +2311,8 @@ struct list_head *seq_list_start(struct list_head *head, loff_t pos)
 
 EXPORT_SYMBOL(seq_list_start);
 
-struct list_head *seq_list_start_head(struct list_head *head, loff_t pos)
+cfs_list_t *seq_list_start_head(cfs_list_t *head,
+                                loff_t pos)
 {
        if (!pos)
                return head;
@@ -2321,11 +2322,12 @@ struct list_head *seq_list_start_head(struct list_head *head, loff_t pos)
 
 EXPORT_SYMBOL(seq_list_start_head);
 
-struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos)
+cfs_list_t *seq_list_next(void *v, cfs_list_t *head,
+                          loff_t *ppos)
 {
-       struct list_head *lh;
+       cfs_list_t *lh;
 
-       lh = ((struct list_head *)v)->next;
+       lh = ((cfs_list_t *)v)->next;
        ++*ppos;
        return lh == head ? NULL : lh;
 }
index 81b40df..3a8d6cb 100644 (file)
@@ -62,7 +62,7 @@ void cfs_waitq_init(cfs_waitq_t *waitq)
     waitq->magic = CFS_WAITQ_MAGIC;
     waitq->flags = 0;
     CFS_INIT_LIST_HEAD(&(waitq->waiters));
-    spin_lock_init(&(waitq->guard));
+    cfs_spin_lock_init(&(waitq->guard));
 }
 
 /*
@@ -101,7 +101,7 @@ void cfs_waitlink_init(cfs_waitlink_t *link)
     link->event = &(slot->Event);
     link->hits  = &(slot->hits);
 
-    atomic_inc(&slot->count);
+    cfs_atomic_inc(&slot->count);
 
     CFS_INIT_LIST_HEAD(&(link->waitq[0].link));
     CFS_INIT_LIST_HEAD(&(link->waitq[1].link));
@@ -141,7 +141,7 @@ void cfs_waitlink_fini(cfs_waitlink_t *link)
     cfs_assert(link->waitq[0].waitq == NULL);
     cfs_assert(link->waitq[1].waitq == NULL);
 
-    atomic_dec(&slot->count);
+    cfs_atomic_dec(&slot->count);
 }
 
 
@@ -171,15 +171,15 @@ void cfs_waitq_add_internal(cfs_waitq_t *waitq,
     LASSERT(link->magic == CFS_WAITLINK_MAGIC);
     LASSERT(waitqid < CFS_WAITQ_CHANNELS);
 
-    spin_lock(&(waitq->guard));
+    cfs_spin_lock(&(waitq->guard));
     LASSERT(link->waitq[waitqid].waitq == NULL);
     link->waitq[waitqid].waitq = waitq;
     if (link->flags & CFS_WAITQ_EXCLUSIVE) {
-        list_add_tail(&link->waitq[waitqid].link, &waitq->waiters);
+        cfs_list_add_tail(&link->waitq[waitqid].link, &waitq->waiters);
     } else {
-        list_add(&link->waitq[waitqid].link, &waitq->waiters);
+        cfs_list_add(&link->waitq[waitqid].link, &waitq->waiters);
     }
-    spin_unlock(&(waitq->guard));
+    cfs_spin_unlock(&(waitq->guard));
 }
 /*
  * cfs_waitq_add
@@ -256,7 +256,7 @@ void cfs_waitq_del( cfs_waitq_t *waitq,
     LASSERT(waitq->magic == CFS_WAITQ_MAGIC);
     LASSERT(link->magic == CFS_WAITLINK_MAGIC);
 
-    spin_lock(&(waitq->guard));
+    cfs_spin_lock(&(waitq->guard));
 
     for (i=0; i < CFS_WAITQ_CHANNELS; i++) {
         if (link->waitq[i].waitq == waitq)
@@ -265,12 +265,12 @@ void cfs_waitq_del( cfs_waitq_t *waitq,
 
     if (i < CFS_WAITQ_CHANNELS) {
         link->waitq[i].waitq = NULL;
-        list_del_init(&link->waitq[i].link);
+        cfs_list_del_init(&link->waitq[i].link);
     } else {
         cfs_enter_debugger();
     }
 
-    spin_unlock(&(waitq->guard));
+    cfs_spin_unlock(&(waitq->guard));
 }
 
 /*
@@ -321,7 +321,7 @@ void cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr)
     LASSERT(waitq != NULL);
     LASSERT(waitq->magic == CFS_WAITQ_MAGIC);
 
-    spin_lock(&waitq->guard);
+    cfs_spin_lock(&waitq->guard);
     cfs_list_for_each_entry_typed(scan, &waitq->waiters, 
                             cfs_waitlink_channel_t,
                             link) {
@@ -332,14 +332,14 @@ void cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr)
         LASSERT( result == FALSE || result == TRUE );
 
         if (result) {
-            atomic_inc(waitl->hits);
+            cfs_atomic_inc(waitl->hits);
         }
 
         if ((waitl->flags & CFS_WAITQ_EXCLUSIVE) && --nr == 0)
             break;
     }
 
-    spin_unlock(&waitq->guard);
+    cfs_spin_unlock(&waitq->guard);
     return;
 }
 
@@ -404,9 +404,9 @@ void cfs_waitq_wait(cfs_waitlink_t *link, cfs_task_state_t state)
     LASSERT(link != NULL);
     LASSERT(link->magic == CFS_WAITLINK_MAGIC);
 
-    if (atomic_read(link->hits) > 0) {
-        atomic_dec(link->hits);
-        LASSERT((__u32)atomic_read(link->hits) < (__u32)0xFFFFFF00);
+    if (cfs_atomic_read(link->hits) > 0) {
+        cfs_atomic_dec(link->hits);
+        LASSERT((__u32)cfs_atomic_read(link->hits) < (__u32)0xFFFFFF00);
     } else {
         cfs_wait_event_internal(link->event, 0);
     }
@@ -434,9 +434,9 @@ int64_t cfs_waitq_timedwait( cfs_waitlink_t *link,
                              int64_t timeout)
 { 
 
-    if (atomic_read(link->hits) > 0) {
-        atomic_dec(link->hits);
-        LASSERT((__u32)atomic_read(link->hits) < (__u32)0xFFFFFF00);
+    if (cfs_atomic_read(link->hits) > 0) {
+        cfs_atomic_dec(link->hits);
+        LASSERT((__u32)cfs_atomic_read(link->hits) < (__u32)0xFFFFFF00);
         return (int64_t)TRUE;
     }
 
index ee3a5f0..5d87541 100644 (file)
@@ -48,7 +48,7 @@ KsDumpPrint(PCHAR buffer, ULONG length)
 {
     ULONG i;
     for (i=0; i < length; i++) {
-        if (((i+1) % 31) == 0) 
+        if (((i+1) % 31) == 0)
             printk("\n");
         printk("%2.2x ", (UCHAR)buffer[i]);
     }
@@ -348,14 +348,14 @@ KsAllocateKsTsdu()
 {
     PKS_TSDU    KsTsdu = NULL;
 
-    spin_lock(&(ks_data.ksnd_tsdu_lock));
+    cfs_spin_lock(&(ks_data.ksnd_tsdu_lock));
 
-    if (!list_empty (&(ks_data.ksnd_freetsdus))) {
+    if (!cfs_list_empty (&(ks_data.ksnd_freetsdus))) {
 
         LASSERT(ks_data.ksnd_nfreetsdus > 0);
 
-        KsTsdu = list_entry(ks_data.ksnd_freetsdus.next, KS_TSDU, Link);
-        list_del(&(KsTsdu->Link));
+        KsTsdu = cfs_list_entry(ks_data.ksnd_freetsdus.next, KS_TSDU, Link);
+        cfs_list_del(&(KsTsdu->Link));
         ks_data.ksnd_nfreetsdus--;
 
     } else {
@@ -364,7 +364,7 @@ KsAllocateKsTsdu()
                         ks_data.ksnd_tsdu_slab, 0);
     }
 
-    spin_unlock(&(ks_data.ksnd_tsdu_lock));
+    cfs_spin_unlock(&(ks_data.ksnd_tsdu_lock));
 
     if (NULL != KsTsdu) {
         RtlZeroMemory(KsTsdu, ks_data.ksnd_tsdu_size);
@@ -417,14 +417,14 @@ KsPutKsTsdu(
     PKS_TSDU  KsTsdu
     )
 {
-    spin_lock(&(ks_data.ksnd_tsdu_lock));
+    cfs_spin_lock(&(ks_data.ksnd_tsdu_lock));
     if (ks_data.ksnd_nfreetsdus > 128) {
         KsFreeKsTsdu(KsTsdu);
     } else {
-        list_add_tail( &(KsTsdu->Link), &(ks_data.ksnd_freetsdus));
+        cfs_list_add_tail( &(KsTsdu->Link), &(ks_data.ksnd_freetsdus));
         ks_data.ksnd_nfreetsdus++;
     }
-    spin_unlock(&(ks_data.ksnd_tsdu_lock));
+    cfs_spin_unlock(&(ks_data.ksnd_tsdu_lock));
 }
 
 /* with tconn lock acquired */
@@ -593,11 +593,11 @@ KsReleaseTsdus(
 
     LASSERT(TsduMgr->TotalBytes >= length);
 
-    while (!list_empty(&TsduMgr->TsduList)) {
+    while (!cfs_list_empty(&TsduMgr->TsduList)) {
 
         ULONG   start = 0;
 
-        KsTsdu = list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
+        KsTsdu = cfs_list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
         LASSERT(KsTsdu->Magic == KS_TSDU_MAGIC);
         start = KsTsdu->StartOffset;
 
@@ -695,7 +695,7 @@ KsReleaseTsdus(
         if (KsTsdu->StartOffset >= KsTsdu->LastOffset) {
 
             /* remove KsTsdu from list */
-            list_del(&KsTsdu->Link);
+            cfs_list_del(&KsTsdu->Link);
             TsduMgr->NumOfTsdu--;
             KsPutKsTsdu(KsTsdu);
         }
@@ -760,7 +760,7 @@ KsGetTsdu(PKS_TSDUMGR TsduMgr, ULONG Length)
     /* retrieve the latest Tsdu buffer form TsduMgr
        list if the list is not empty. */
 
-    if (list_empty(&(TsduMgr->TsduList))) {
+    if (cfs_list_empty(&(TsduMgr->TsduList))) {
 
         LASSERT(TsduMgr->NumOfTsdu == 0);
         KsTsdu = NULL;
@@ -768,7 +768,7 @@ KsGetTsdu(PKS_TSDUMGR TsduMgr, ULONG Length)
     } else {
 
         LASSERT(TsduMgr->NumOfTsdu > 0);
-        KsTsdu = list_entry(TsduMgr->TsduList.prev, KS_TSDU, Link);
+        KsTsdu = cfs_list_entry(TsduMgr->TsduList.prev, KS_TSDU, Link);
 
         /* if this Tsdu does not contain enough space, we need
            allocate a new Tsdu queue. */
@@ -782,7 +782,7 @@ KsGetTsdu(PKS_TSDUMGR TsduMgr, ULONG Length)
     if (NULL == KsTsdu) {
         KsTsdu = KsAllocateKsTsdu();
         if (NULL != KsTsdu) {
-            list_add_tail(&(KsTsdu->Link), &(TsduMgr->TsduList));
+            cfs_list_add_tail(&(KsTsdu->Link), &(TsduMgr->TsduList));
             TsduMgr->NumOfTsdu++;
         }
     }
@@ -1011,11 +1011,11 @@ NextTsdu:
 
     } else {
 
-        KsTsdu = list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
+        KsTsdu = cfs_list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
         LASSERT(KsTsdu->Magic == KS_TSDU_MAGIC);
 
         /* remove the KsTsdu from TsduMgr list to release the lock */
-        list_del(&(KsTsdu->Link));
+        cfs_list_del(&(KsTsdu->Link));
         TsduMgr->NumOfTsdu--;
 
         while (length > BytesRecved) {
@@ -1164,7 +1164,7 @@ NextTsdu:
                 KsTsdu = NULL;
             } else {
                 TsduMgr->NumOfTsdu++;
-                list_add(&(KsTsdu->Link), &(TsduMgr->TsduList));
+                cfs_list_add(&(KsTsdu->Link), &(TsduMgr->TsduList));
             }
         }
         
@@ -1284,7 +1284,7 @@ KsInitializeKsTsduMgr(
     TsduMgr->NumOfTsdu  = 0;
     TsduMgr->TotalBytes = 0;
 
-    spin_lock_init(&TsduMgr->Lock);
+    cfs_spin_lock_init(&TsduMgr->Lock);
 }
 
 
@@ -1343,9 +1343,9 @@ KsCleanupTsduMgr(
     KsRemoveTdiEngine(TsduMgr);
     KeSetEvent(&(TsduMgr->Event), 0, FALSE);
 
-    while (!list_empty(&TsduMgr->TsduList)) {
+    while (!cfs_list_empty(&TsduMgr->TsduList)) {
 
-        KsTsdu = list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
+        KsTsdu = cfs_list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
         LASSERT(KsTsdu->Magic == KS_TSDU_MAGIC);
 
         if (KsTsdu->StartOffset == KsTsdu->LastOffset) {
@@ -1354,7 +1354,7 @@ KsCleanupTsduMgr(
             // KsTsdu is empty now, we need free it ...
             //
 
-            list_del(&(KsTsdu->Link));
+            cfs_list_del(&(KsTsdu->Link));
             TsduMgr->NumOfTsdu--;
 
             KsFreeKsTsdu(KsTsdu);
@@ -2955,7 +2955,7 @@ KsAcceptCompletionRoutine(
 
     LASSERT(child->kstc_type == kstt_child);
 
-    spin_lock(&(child->kstc_lock));
+    cfs_spin_lock(&(child->kstc_lock));
 
     LASSERT(parent->kstc_state == ksts_listening);
     LASSERT(child->kstc_state == ksts_connecting);
@@ -2973,7 +2973,7 @@ KsAcceptCompletionRoutine(
             FALSE
             );
 
-        spin_unlock(&(child->kstc_lock));
+        cfs_spin_unlock(&(child->kstc_lock));
 
         KsPrint((2, "KsAcceptCompletionRoutine: singal parent: %p (child: %p)\n",
                     parent, child));
@@ -2985,7 +2985,7 @@ KsAcceptCompletionRoutine(
         child->child.kstc_busy = FALSE;
         child->kstc_state = ksts_associated;
 
-        spin_unlock(&(child->kstc_lock));
+        cfs_spin_unlock(&(child->kstc_lock));
     }
 
     /* now free the Irp */
@@ -3003,7 +3003,7 @@ KsSearchIpAddress(PUNICODE_STRING  DeviceName)
     ks_addr_slot_t * slot = NULL;
     PLIST_ENTRY      list = NULL;
 
-    spin_lock(&ks_data.ksnd_addrs_lock);
+    cfs_spin_lock(&ks_data.ksnd_addrs_lock);
 
     list = ks_data.ksnd_addrs_list.Flink;
     while (list != &ks_data.ksnd_addrs_list) {
@@ -3018,7 +3018,7 @@ KsSearchIpAddress(PUNICODE_STRING  DeviceName)
         slot = NULL;
     }
 
-    spin_unlock(&ks_data.ksnd_addrs_lock);
+    cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
 
     return slot;
 }
@@ -3026,7 +3026,7 @@ KsSearchIpAddress(PUNICODE_STRING  DeviceName)
 void
 KsCleanupIpAddresses()
 {
-    spin_lock(&ks_data.ksnd_addrs_lock);
+    cfs_spin_lock(&ks_data.ksnd_addrs_lock);
 
     while (!IsListEmpty(&ks_data.ksnd_addrs_list)) {
 
@@ -3040,7 +3040,7 @@ KsCleanupIpAddresses()
     }
 
     cfs_assert(ks_data.ksnd_naddrs == 0);
-    spin_unlock(&ks_data.ksnd_addrs_lock);
+    cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
 }
 
 VOID
@@ -3083,7 +3083,7 @@ KsAddAddressHandler(
 
             slot = cfs_alloc(sizeof(ks_addr_slot_t) + DeviceName->Length, CFS_ALLOC_ZERO);
             if (slot != NULL) {
-                spin_lock(&ks_data.ksnd_addrs_lock);
+                cfs_spin_lock(&ks_data.ksnd_addrs_lock);
                 InsertTailList(&ks_data.ksnd_addrs_list, &slot->link);
                 sprintf(slot->iface, "eth%d", ks_data.ksnd_naddrs++);
                 slot->ip_addr = ntohl(IpAddress->in_addr);
@@ -3093,7 +3093,7 @@ KsAddAddressHandler(
                 slot->devname.Length = DeviceName->Length;
                 slot->devname.MaximumLength = DeviceName->Length + sizeof(WCHAR);
                 slot->devname.Buffer = slot->buffer;
-                spin_unlock(&ks_data.ksnd_addrs_lock);
+                cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
 
                 KsPrint((0, "KsAddAddressHandle: %s added: ip=%xh(%d.%d.%d.%d)\n",
                             slot->iface, IpAddress->in_addr,
@@ -3144,7 +3144,7 @@ KsRegisterPnpHandlers()
 
     /* initialize the global ks_data members */
     RtlInitUnicodeString(&ks_data.ksnd_client_name, TDILND_MODULE_NAME);
-    spin_lock_init(&ks_data.ksnd_addrs_lock);
+    cfs_spin_lock_init(&ks_data.ksnd_addrs_lock);
     InitializeListHead(&ks_data.ksnd_addrs_list);
 
     /* register the pnp handlers */
@@ -3199,27 +3199,27 @@ KsGetVacancyBacklog(
     LASSERT(parent->kstc_type == kstt_listener);
     LASSERT(parent->kstc_state == ksts_listening);
 
-    if (list_empty(&(parent->listener.kstc_listening.list))) {
+    if (cfs_list_empty(&(parent->listener.kstc_listening.list))) {
 
         child = NULL;
 
     } else {
 
-        struct list_head * tmp;
+        cfs_list_t * tmp;
 
         /* check the listening queue and try to get a free connecton */
 
-        list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
-            child = list_entry (tmp, ks_tconn_t, child.kstc_link);
-            spin_lock(&(child->kstc_lock));
+        cfs_list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
+            child = cfs_list_entry (tmp, ks_tconn_t, child.kstc_link);
+            cfs_spin_lock(&(child->kstc_lock));
 
             if (!child->child.kstc_busy) {
                 LASSERT(child->kstc_state == ksts_associated);
                 child->child.kstc_busy = TRUE;
-                spin_unlock(&(child->kstc_lock));
+                cfs_spin_unlock(&(child->kstc_lock));
                 break;
             } else {
-                spin_unlock(&(child->kstc_lock));
+                cfs_spin_unlock(&(child->kstc_lock));
                 child = NULL;
             }
         }
@@ -3275,7 +3275,7 @@ KsConnectEventHandler(
 
     LASSERT(parent->kstc_type == kstt_listener);
 
-    spin_lock(&(parent->kstc_lock));
+    cfs_spin_lock(&(parent->kstc_lock));
 
     if (parent->kstc_state == ksts_listening) {
 
@@ -3313,11 +3313,11 @@ KsConnectEventHandler(
 
         if (child) {
 
-            spin_lock(&(child->kstc_lock));
+            cfs_spin_lock(&(child->kstc_lock));
             child->child.kstc_info.ConnectionInfo = ConnectionInfo;
             child->child.kstc_info.Remote = ConnectionInfo->RemoteAddress;
             child->kstc_state = ksts_connecting;
-            spin_unlock(&(child->kstc_lock));
+            cfs_spin_unlock(&(child->kstc_lock));
 
         } else {
 
@@ -3356,13 +3356,13 @@ KsConnectEventHandler(
         goto errorout;
     }
 
-    spin_unlock(&(parent->kstc_lock));
+    cfs_spin_unlock(&(parent->kstc_lock));
 
     return Status;
 
 errorout:
 
-    spin_unlock(&(parent->kstc_lock));
+    cfs_spin_unlock(&(parent->kstc_lock));
 
     *AcceptIrp = NULL;
     *ConnectionContext = NULL;
@@ -3438,9 +3438,9 @@ KsDisconnectHelper(PKS_DISCONNECT_WORKITEM WorkItem)
 
     KeSetEvent(&(WorkItem->Event), 0, FALSE);
 
-    spin_lock(&(tconn->kstc_lock));
+    cfs_spin_lock(&(tconn->kstc_lock));
     cfs_clear_flag(tconn->kstc_flags, KS_TCONN_DISCONNECT_BUSY);
-    spin_unlock(&(tconn->kstc_lock));
+    cfs_spin_unlock(&(tconn->kstc_lock));
     ks_put_tconn(tconn);
 }
 
@@ -3487,7 +3487,7 @@ KsDisconnectEventHandler(
                  tconn, DisconnectFlags));
 
     ks_get_tconn(tconn);
-    spin_lock(&(tconn->kstc_lock));
+    cfs_spin_lock(&(tconn->kstc_lock));
 
     WorkItem = &(tconn->kstc_disconnect);
 
@@ -3520,7 +3520,7 @@ KsDisconnectEventHandler(
         }
     }
 
-    spin_unlock(&(tconn->kstc_lock));
+    cfs_spin_unlock(&(tconn->kstc_lock));
     ks_put_tconn(tconn);
 
     return  (Status);
@@ -4333,16 +4333,16 @@ ks_create_tconn()
                 tconn
             );
 
-        spin_lock_init(&(tconn->kstc_lock));
+        cfs_spin_lock_init(&(tconn->kstc_lock));
 
         ks_get_tconn(tconn);
-        spin_lock(&(ks_data.ksnd_tconn_lock));
+        cfs_spin_lock(&(ks_data.ksnd_tconn_lock));
 
         /* attach it into global list in ks_data */
 
-        list_add(&(tconn->kstc_list), &(ks_data.ksnd_tconns));
+        cfs_list_add(&(tconn->kstc_list), &(ks_data.ksnd_tconns));
         ks_data.ksnd_ntconns++;
-        spin_unlock(&(ks_data.ksnd_tconn_lock));
+        cfs_spin_unlock(&(ks_data.ksnd_tconn_lock));
 
         tconn->kstc_rcv_wnd = tconn->kstc_snd_wnd = 0x10000;
     }
@@ -4368,12 +4368,12 @@ ks_create_tconn()
 void
 ks_free_tconn(ks_tconn_t * tconn)
 {
-    LASSERT(atomic_read(&(tconn->kstc_refcount)) == 0);
+    LASSERT(cfs_atomic_read(&(tconn->kstc_refcount)) == 0);
 
-    spin_lock(&(ks_data.ksnd_tconn_lock));
+    cfs_spin_lock(&(ks_data.ksnd_tconn_lock));
 
     /* remove it from the global list */
-    list_del(&tconn->kstc_list);
+    cfs_list_del(&tconn->kstc_list);
     ks_data.ksnd_ntconns--;
 
     /* if this is the last tconn, it would be safe for
@@ -4381,7 +4381,7 @@ ks_free_tconn(ks_tconn_t * tconn)
     if (ks_data.ksnd_ntconns == 0) {
         cfs_wake_event(&ks_data.ksnd_tconn_exit);
     }
-    spin_unlock(&(ks_data.ksnd_tconn_lock));
+    cfs_spin_unlock(&(ks_data.ksnd_tconn_lock));
 
     /* free the structure memory */
     cfs_mem_cache_free(ks_data.ksnd_tconn_slab, tconn);
@@ -4511,7 +4511,7 @@ ks_get_tconn(
     ks_tconn_t * tconn
     )
 {
-    atomic_inc(&(tconn->kstc_refcount));
+    cfs_atomic_inc(&(tconn->kstc_refcount));
 }
 
 /*
@@ -4534,15 +4534,15 @@ ks_put_tconn(
     ks_tconn_t *tconn
     )
 {
-    if (atomic_dec_and_test(&(tconn->kstc_refcount))) {
+    if (cfs_atomic_dec_and_test(&(tconn->kstc_refcount))) {
 
-        spin_lock(&(tconn->kstc_lock));
+        cfs_spin_lock(&(tconn->kstc_lock));
 
         if ( ( tconn->kstc_type == kstt_child ||
                tconn->kstc_type == kstt_sender ) &&
              ( tconn->kstc_state == ksts_connected ) ) {
 
-            spin_unlock(&(tconn->kstc_lock));
+            cfs_spin_unlock(&(tconn->kstc_lock));
 
             ks_abort_tconn(tconn);
 
@@ -4559,7 +4559,7 @@ ks_put_tconn(
                 cfs_set_flag(tconn->kstc_flags, KS_TCONN_DESTROY_BUSY);
             }
 
-            spin_unlock(&(tconn->kstc_lock));
+            cfs_spin_unlock(&(tconn->kstc_lock));
         }
     }
 }
@@ -4623,8 +4623,8 @@ ks_destroy_tconn(
                 tconn->kstc_addr.FileObject
                 );
 
-        spin_lock(&tconn->child.kstc_parent->kstc_lock);
-        spin_lock(&tconn->kstc_lock);
+        cfs_spin_lock(&tconn->child.kstc_parent->kstc_lock);
+        cfs_spin_lock(&tconn->kstc_lock);
 
         tconn->kstc_state = ksts_inited;
 
@@ -4632,7 +4632,7 @@ ks_destroy_tconn(
 
         if (tconn->child.kstc_queued) {
 
-            list_del(&(tconn->child.kstc_link));
+            cfs_list_del(&(tconn->child.kstc_link));
 
             if (tconn->child.kstc_queueno) {
 
@@ -4648,8 +4648,8 @@ ks_destroy_tconn(
             tconn->child.kstc_queued = FALSE;
         }
 
-        spin_unlock(&tconn->kstc_lock);
-        spin_unlock(&tconn->child.kstc_parent->kstc_lock);
+        cfs_spin_unlock(&tconn->kstc_lock);
+        cfs_spin_unlock(&tconn->child.kstc_parent->kstc_lock);
 
         /* drop the reference of the parent tconn */
         ks_put_tconn(tconn->child.kstc_parent);
@@ -5224,7 +5224,7 @@ ks_build_tconn(
                     NULL
                     );
 
-    spin_lock(&(tconn->kstc_lock));
+    cfs_spin_lock(&(tconn->kstc_lock));
 
     if (NT_SUCCESS(status)) {
 
@@ -5235,7 +5235,7 @@ ks_build_tconn(
         tconn->sender.kstc_info.ConnectionInfo = ConnectionInfo;
         tconn->sender.kstc_info.Remote         = ConnectionInfo->RemoteAddress;
 
-        spin_unlock(&(tconn->kstc_lock));
+        cfs_spin_unlock(&(tconn->kstc_lock));
 
     } else {
 
@@ -5249,7 +5249,7 @@ ks_build_tconn(
         rc = cfs_error_code(status);
 
         tconn->kstc_state = ksts_associated;
-        spin_unlock(&(tconn->kstc_lock));
+        cfs_spin_unlock(&(tconn->kstc_lock));
 
         /* disassocidate the connection and the address object,
            after cleanup,  it's safe to set the state to abort ... */
@@ -5407,7 +5407,7 @@ ks_disconnect_tconn(
             cfs_enter_debugger();
         }
 
-        spin_lock(&(tconn->kstc_lock));
+        cfs_spin_lock(&(tconn->kstc_lock));
 
         /* cleanup the tsdumgr Lists */
         KsCleanupTsdu (tconn);
@@ -5424,7 +5424,7 @@ ks_disconnect_tconn(
         info->ConnectionInfo = NULL;
         info->Remote = NULL;
 
-        spin_unlock(&(tconn->kstc_lock));
+        cfs_spin_unlock(&(tconn->kstc_lock));
     }
 
     status = STATUS_SUCCESS;
@@ -5462,7 +5462,7 @@ ks_abort_tconn(
     WorkItem = &(tconn->kstc_disconnect);
 
     ks_get_tconn(tconn);
-    spin_lock(&(tconn->kstc_lock));
+    cfs_spin_lock(&(tconn->kstc_lock));
 
     if (tconn->kstc_state != ksts_connected) {
         ks_put_tconn(tconn);
@@ -5482,7 +5482,7 @@ ks_abort_tconn(
         }
     }
 
-    spin_unlock(&(tconn->kstc_lock));
+    cfs_spin_unlock(&(tconn->kstc_lock));
 }
 
 
@@ -5558,16 +5558,16 @@ KsQueueTdiEngine(ks_tconn_t * tconn, PKS_TSDUMGR TsduMgr)
     engs = &TsduMgr->Slot;
 
     if (!engs->queued) {
-        spin_lock(&engm->lock);
+        cfs_spin_lock(&engm->lock);
         if (!engs->queued) {
-            list_add_tail(&engs->link, &engm->list);
+            cfs_list_add_tail(&engs->link, &engm->list);
             engs->queued = TRUE;
             engs->tconn = tconn;
             engs->emgr = engm;
             engs->tsdumgr = TsduMgr;
             KeSetEvent(&(engm->start),0, FALSE);
         }
-        spin_unlock(&engm->lock);
+        cfs_spin_unlock(&engm->lock);
         KsPrint((4, "KsQueueTdiEngine: TsduMgr=%p is queued to engine %p\n",
                     TsduMgr, engm));
     }
@@ -5584,15 +5584,15 @@ KsRemoveTdiEngine(PKS_TSDUMGR TsduMgr)
     if (engs->queued) {
         engm = engs->emgr;
         LASSERT(engm != NULL);
-        spin_lock(&engm->lock);
+        cfs_spin_lock(&engm->lock);
         if (engs->queued) {
-            list_del(&engs->link);
+            cfs_list_del(&engs->link);
             engs->queued = FALSE;
             engs->tconn = NULL;
             engs->emgr = NULL;
             engs->tsdumgr = NULL;
         }
-        spin_unlock(&engm->lock);
+        cfs_spin_unlock(&engm->lock);
         KsPrint((4, "KsQueueTdiEngine: TsduMgr %p is removed from engine %p\n",
                     TsduMgr, engm));
     }
@@ -5742,7 +5742,7 @@ KsDeliveryTsdus(ks_tconn_t * tconn, PKS_TSDUMGR TsduMgr)
         tflags = TDI_SEND_NON_BLOCKING;
     }
    
-    if (list_empty(&TsduMgr->TsduList)) {
+    if (cfs_list_empty(&TsduMgr->TsduList)) {
         LASSERT(TsduMgr->TotalBytes == 0);
         ks_unlock_tsdumgr(TsduMgr);
         goto errorout;
@@ -5800,7 +5800,7 @@ KsDeliveryEngineThread(void * context)
 {
     ks_engine_mgr_t *   engm = context;
     ks_engine_slot_t *  engs;
-    struct list_head *  list;
+    cfs_list_t *        list;
     ks_tconn_t *        tconn;
 
     cfs_set_thread_priority(31);
@@ -5809,20 +5809,20 @@ KsDeliveryEngineThread(void * context)
 
         cfs_wait_event_internal(&engm->start, 0);
 
-        spin_lock(&engm->lock);
-        if (list_empty(&engm->list)) {
-            spin_unlock(&engm->lock);
+        cfs_spin_lock(&engm->lock);
+        if (cfs_list_empty(&engm->list)) {
+            cfs_spin_unlock(&engm->lock);
             continue;
         }
 
         list = engm->list.next;
-        list_del(list);
-        engs = list_entry(list, ks_engine_slot_t, link);
+        cfs_list_del(list);
+        engs = cfs_list_entry(list, ks_engine_slot_t, link);
         LASSERT(engs->emgr == engm);
         LASSERT(engs->queued);
         engs->emgr = NULL;
         engs->queued = FALSE;
-        spin_unlock(&engm->lock);
+        cfs_spin_unlock(&engm->lock);
 
         tconn = engs->tconn;
         LASSERT(tconn->kstc_magic == KS_TCONN_MAGIC);
@@ -5861,7 +5861,7 @@ ks_init_tdi_data()
     /* initialize tconn related globals */
     RtlZeroMemory(&ks_data, sizeof(ks_tdi_data_t));
 
-    spin_lock_init(&ks_data.ksnd_tconn_lock);
+    cfs_spin_lock_init(&ks_data.ksnd_tconn_lock);
     CFS_INIT_LIST_HEAD(&ks_data.ksnd_tconns);
     cfs_init_event(&ks_data.ksnd_tconn_exit, TRUE, FALSE);
 
@@ -5874,7 +5874,7 @@ ks_init_tdi_data()
     }
 
     /* initialize tsdu related globals */
-    spin_lock_init(&ks_data.ksnd_tsdu_lock);
+    cfs_spin_lock_init(&ks_data.ksnd_tsdu_lock);
     CFS_INIT_LIST_HEAD(&ks_data.ksnd_freetsdus);
     ks_data.ksnd_tsdu_size = TDINAL_TSDU_DEFAULT_SIZE; /* 64k */
     ks_data.ksnd_tsdu_slab = cfs_mem_cache_create(
@@ -5886,7 +5886,7 @@ ks_init_tdi_data()
     }
 
     /* initialize engine threads list */
-    ks_data.ksnd_engine_nums = num_online_cpus();
+    ks_data.ksnd_engine_nums = cfs_num_online_cpus();
     if (ks_data.ksnd_engine_nums < 4) {
         ks_data.ksnd_engine_nums = 4;
     }
@@ -5897,7 +5897,7 @@ ks_init_tdi_data()
         goto errorout;
     }
     for (i = 0; i < ks_data.ksnd_engine_nums; i++) {
-        spin_lock_init(&ks_data.ksnd_engine_mgr[i].lock);
+        cfs_spin_lock_init(&ks_data.ksnd_engine_mgr[i].lock);
         cfs_init_event(&ks_data.ksnd_engine_mgr[i].start, TRUE, FALSE);
         cfs_init_event(&ks_data.ksnd_engine_mgr[i].exit, TRUE, FALSE);
         CFS_INIT_LIST_HEAD(&ks_data.ksnd_engine_mgr[i].list);
@@ -5939,7 +5939,7 @@ void
 ks_fini_tdi_data()
 {
     PKS_TSDU            KsTsdu = NULL;
-    struct list_head *  list   = NULL;
+    cfs_list_t *        list   = NULL;
     int i;
 
     /* clean up the pnp handler and address slots */
@@ -5956,12 +5956,12 @@ ks_fini_tdi_data()
     }
 
     /* we need wait until all the tconn are freed */
-    spin_lock(&(ks_data.ksnd_tconn_lock));
+    cfs_spin_lock(&(ks_data.ksnd_tconn_lock));
 
-    if (list_empty(&(ks_data.ksnd_tconns))) {
+    if (cfs_list_empty(&(ks_data.ksnd_tconns))) {
         cfs_wake_event(&ks_data.ksnd_tconn_exit);
     }
-    spin_unlock(&(ks_data.ksnd_tconn_lock));
+    cfs_spin_unlock(&(ks_data.ksnd_tconn_lock));
 
     /* now wait on the tconn exit event */
     cfs_wait_event_internal(&ks_data.ksnd_tconn_exit, 0);
@@ -5971,15 +5971,15 @@ ks_fini_tdi_data()
     ks_data.ksnd_tconn_slab = NULL;
 
     /* clean up all the tsud buffers in the free list */
-    spin_lock(&(ks_data.ksnd_tsdu_lock));
-    list_for_each (list, &ks_data.ksnd_freetsdus) {
-        KsTsdu = list_entry (list, KS_TSDU, Link);
+    cfs_spin_lock(&(ks_data.ksnd_tsdu_lock));
+    cfs_list_for_each (list, &ks_data.ksnd_freetsdus) {
+        KsTsdu = cfs_list_entry (list, KS_TSDU, Link);
 
         cfs_mem_cache_free(
                 ks_data.ksnd_tsdu_slab,
                 KsTsdu );
     }
-    spin_unlock(&(ks_data.ksnd_tsdu_lock));
+    cfs_spin_unlock(&(ks_data.ksnd_tsdu_lock));
 
     /* it's safe to delete the tsdu slab ... */
     cfs_mem_cache_destroy(ks_data.ksnd_tsdu_slab);
@@ -6103,22 +6103,22 @@ ks_replenish_backlogs(
         /* create the backlog child tconn */
         backlog = ks_create_child_tconn(parent);
 
-        spin_lock(&(parent->kstc_lock));
+        cfs_spin_lock(&(parent->kstc_lock));
 
         if (backlog) {
-            spin_lock(&backlog->kstc_lock);
+            cfs_spin_lock(&backlog->kstc_lock);
             /* attch it into the listing list of daemon */
-            list_add( &backlog->child.kstc_link,
+            cfs_list_add( &backlog->child.kstc_link,
                       &parent->listener.kstc_listening.list );
             parent->listener.kstc_listening.num++;
 
             backlog->child.kstc_queued = TRUE;
-            spin_unlock(&backlog->kstc_lock);
+            cfs_spin_unlock(&backlog->kstc_lock);
         } else {
             cfs_enter_debugger();
         }
 
-        spin_unlock(&(parent->kstc_lock));
+        cfs_spin_unlock(&(parent->kstc_lock));
     }
 }
 
@@ -6153,11 +6153,11 @@ ks_start_listen(ks_tconn_t *tconn, int nbacklog)
         return rc;
     }
 
-    spin_lock(&(tconn->kstc_lock));
+    cfs_spin_lock(&(tconn->kstc_lock));
     tconn->listener.nbacklog = nbacklog;
     tconn->kstc_state = ksts_listening;
     cfs_set_flag(tconn->kstc_flags, KS_TCONN_DAEMON_STARTED);
-    spin_unlock(&(tconn->kstc_lock));
+    cfs_spin_unlock(&(tconn->kstc_lock));
 
     return rc;
 }
@@ -6165,25 +6165,25 @@ ks_start_listen(ks_tconn_t *tconn, int nbacklog)
 void
 ks_stop_listen(ks_tconn_t *tconn)
 {
-    struct list_head *      list;
+    cfs_list_t *            list;
     ks_tconn_t *            backlog;
 
     /* reset all tdi event callbacks to NULL */
     KsResetHandlers (tconn);
 
-    spin_lock(&tconn->kstc_lock);
+    cfs_spin_lock(&tconn->kstc_lock);
 
     cfs_clear_flag(tconn->kstc_flags, KS_TCONN_DAEMON_STARTED);
 
     /* cleanup all the listening backlog child connections */
-    list_for_each (list, &(tconn->listener.kstc_listening.list)) {
-        backlog = list_entry(list, ks_tconn_t, child.kstc_link);
+    cfs_list_for_each (list, &(tconn->listener.kstc_listening.list)) {
+        backlog = cfs_list_entry(list, ks_tconn_t, child.kstc_link);
 
         /* destory and free it */
         ks_put_tconn(backlog);
     }
 
-    spin_unlock(&tconn->kstc_lock);
+    cfs_spin_unlock(&tconn->kstc_lock);
 
     /* wake up it from the waiting on new incoming connections */
     KeSetEvent(&tconn->listener.kstc_accept_event, 0, FALSE);
@@ -6214,15 +6214,15 @@ ks_wait_child_tconn(
     ks_tconn_t **   child
     )
 {
-    struct list_head * tmp;
+    cfs_list_t * tmp;
     ks_tconn_t * backlog = NULL;
 
     ks_replenish_backlogs(parent, parent->listener.nbacklog);
 
-    spin_lock(&(parent->kstc_lock));
+    cfs_spin_lock(&(parent->kstc_lock));
 
     if (parent->listener.kstc_listening.num <= 0) {
-        spin_unlock(&(parent->kstc_lock));
+        cfs_spin_unlock(&(parent->kstc_lock));
         return -1;
     }
 
@@ -6230,33 +6230,33 @@ again:
 
     /* check the listening queue and try to search the accepted connecton */
 
-    list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
-        backlog = list_entry (tmp, ks_tconn_t, child.kstc_link);
+    cfs_list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
+        backlog = cfs_list_entry (tmp, ks_tconn_t, child.kstc_link);
 
-        spin_lock(&(backlog->kstc_lock));
+        cfs_spin_lock(&(backlog->kstc_lock));
 
         if (backlog->child.kstc_accepted) {
 
             LASSERT(backlog->kstc_state == ksts_connected);
             LASSERT(backlog->child.kstc_busy);
 
-            list_del(&(backlog->child.kstc_link));
-            list_add(&(backlog->child.kstc_link),
-                     &(parent->listener.kstc_accepted.list));
+            cfs_list_del(&(backlog->child.kstc_link));
+            cfs_list_add(&(backlog->child.kstc_link),
+                         &(parent->listener.kstc_accepted.list));
             parent->listener.kstc_accepted.num++;
             parent->listener.kstc_listening.num--;
             backlog->child.kstc_queueno = 1;
 
-            spin_unlock(&(backlog->kstc_lock));
+            cfs_spin_unlock(&(backlog->kstc_lock));
 
             break;
         } else {
-            spin_unlock(&(backlog->kstc_lock));
+            cfs_spin_unlock(&(backlog->kstc_lock));
             backlog = NULL;
         }
     }
 
-    spin_unlock(&(parent->kstc_lock));
+    cfs_spin_unlock(&(parent->kstc_lock));
 
     /* we need wait until new incoming connections are requested
        or the case of shuting down the listenig daemon thread  */
@@ -6272,11 +6272,11 @@ again:
                 NULL
                 );
 
-        spin_lock(&(parent->kstc_lock));
+        cfs_spin_lock(&(parent->kstc_lock));
 
         /* check whether it's exptected to exit ? */
         if (!cfs_is_flag_set(parent->kstc_flags, KS_TCONN_DAEMON_STARTED)) {
-            spin_unlock(&(parent->kstc_lock));
+            cfs_spin_unlock(&(parent->kstc_lock));
         } else {
             goto again;
         }
@@ -6526,7 +6526,7 @@ int libcfs_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask)
     ks_addr_slot_t * slot = NULL;
     PLIST_ENTRY      list = NULL;
 
-    spin_lock(&ks_data.ksnd_addrs_lock);
+    cfs_spin_lock(&ks_data.ksnd_addrs_lock);
 
     list = ks_data.ksnd_addrs_list.Flink;
     while (list != &ks_data.ksnd_addrs_list) {
@@ -6541,7 +6541,7 @@ int libcfs_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask)
         slot = NULL;
     }
 
-    spin_unlock(&ks_data.ksnd_addrs_lock);
+    cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
 
     return (int)(slot == NULL);
 }
@@ -6552,7 +6552,7 @@ int libcfs_ipif_enumerate(char ***names)
     PLIST_ENTRY      list = NULL;
     int              nips = 0;
 
-    spin_lock(&ks_data.ksnd_addrs_lock);
+    cfs_spin_lock(&ks_data.ksnd_addrs_lock);
 
     *names = cfs_alloc(sizeof(char *) * ks_data.ksnd_naddrs, CFS_ALLOC_ZERO);
     if (*names == NULL) {
@@ -6571,7 +6571,7 @@ int libcfs_ipif_enumerate(char ***names)
 
 errorout:
 
-    spin_unlock(&ks_data.ksnd_addrs_lock);
+    cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
     return nips;
 }
 
@@ -6628,7 +6628,7 @@ void libcfs_sock_abort_accept(struct socket *sock)
 {
     LASSERT(sock->kstc_type == kstt_listener);
 
-    spin_lock(&(sock->kstc_lock));
+    cfs_spin_lock(&(sock->kstc_lock));
 
     /* clear the daemon flag */
     cfs_clear_flag(sock->kstc_flags, KS_TCONN_DAEMON_STARTED);
@@ -6636,7 +6636,7 @@ void libcfs_sock_abort_accept(struct socket *sock)
     /* wake up it from the waiting on new incoming connections */
     KeSetEvent(&sock->listener.kstc_accept_event, 0, FALSE);
 
-    spin_unlock(&(sock->kstc_lock));
+    cfs_spin_unlock(&(sock->kstc_lock));
 }
 
 /*
@@ -6720,7 +6720,7 @@ int libcfs_sock_getaddr(struct socket *socket, int remote, __u32 *ip, int *port)
 {
     PTRANSPORT_ADDRESS  taddr = NULL;
 
-    spin_lock(&socket->kstc_lock);
+    cfs_spin_lock(&socket->kstc_lock);
     if (remote) {
         if (socket->kstc_type == kstt_sender) {
             taddr = socket->sender.kstc_info.Remote;
@@ -6738,11 +6738,11 @@ int libcfs_sock_getaddr(struct socket *socket, int remote, __u32 *ip, int *port)
         if (port != NULL)
             *port = ntohs (addr->sin_port);
     } else {
-        spin_unlock(&socket->kstc_lock);
+        cfs_spin_unlock(&socket->kstc_lock);
         return -ENOTCONN;
     }
 
-    spin_unlock(&socket->kstc_lock);
+    cfs_spin_unlock(&socket->kstc_lock);
     return 0;
 }
 
index fb29c1d..6e63f49 100644 (file)
 #include "tracefile.h"
 
 /* percents to share the total debug memory for each type */
-static unsigned int pages_factor[TCD_TYPE_MAX] = {
-        90,  /* 90% pages for TCD_TYPE_PASSIVE */
-        10   /* 10% pages for TCD_TYPE_DISPATCH */
+static unsigned int pages_factor[CFS_TCD_TYPE_MAX] = {
+        90,  /* 90% pages for CFS_TCD_TYPE_PASSIVE */
+        10   /* 10% pages for CFS_TCD_TYPE_DISPATCH */
 };
 
-char *trace_console_buffers[NR_CPUS][TCD_TYPE_MAX];
+char *cfs_trace_console_buffers[CFS_NR_CPUS][CFS_TCD_TYPE_MAX];
 
-struct rw_semaphore tracefile_sem;
+cfs_rw_semaphore_t cfs_tracefile_sem;
 
-int tracefile_init_arch()
+int cfs_tracefile_init_arch()
 {
        int    i;
        int    j;
-       struct trace_cpu_data *tcd;
+       struct cfs_trace_cpu_data *tcd;
 
-       init_rwsem(&tracefile_sem);
+       cfs_init_rwsem(&cfs_tracefile_sem);
 
        /* initialize trace_data */
-       memset(trace_data, 0, sizeof(trace_data));
-       for (i = 0; i < TCD_TYPE_MAX; i++) {
-               trace_data[i]=cfs_alloc(sizeof(union trace_data_union)*NR_CPUS,
-                                                         GFP_KERNEL);
-               if (trace_data[i] == NULL)
+       memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
+       for (i = 0; i < CFS_TCD_TYPE_MAX; i++) {
+               cfs_trace_data[i] =
+                        cfs_alloc(sizeof(union cfs_trace_data_union) * \
+                                  CFS_NR_CPUS, GFP_KERNEL);
+               if (cfs_trace_data[i] == NULL)
                        goto out;
        }
 
        /* arch related info initialized */
-       tcd_for_each(tcd, i, j) {
+       cfs_tcd_for_each(tcd, i, j) {
                tcd->tcd_pages_factor = (USHORT) pages_factor[i];
                tcd->tcd_type = (USHORT) i;
                tcd->tcd_cpu = (USHORT)j;
        }
 
-       for (i = 0; i < num_possible_cpus(); i++)
-               for (j = 0; j < TCD_TYPE_MAX; j++) {
-                       trace_console_buffers[i][j] =
-                               cfs_alloc(TRACE_CONSOLE_BUFFER_SIZE,
+       for (i = 0; i < cfs_num_possible_cpus(); i++)
+               for (j = 0; j < CFS_TCD_TYPE_MAX; j++) {
+                       cfs_trace_console_buffers[i][j] =
+                               cfs_alloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
                                           GFP_KERNEL);
 
-                       if (trace_console_buffers[i][j] == NULL)
+                       if (cfs_trace_console_buffers[i][j] == NULL)
                                goto out;
                }
 
        return 0;
 
 out:
-       tracefile_fini_arch();
-       printk(KERN_ERR "lnet: No enough memory\n");
+       cfs_tracefile_fini_arch();
+       printk(CFS_KERN_ERR "lnet: Not enough memory\n");
        return -ENOMEM;
 
 }
 
-void tracefile_fini_arch()
+void cfs_tracefile_fini_arch()
 {
        int    i;
        int    j;
 
-       for (i = 0; i < num_possible_cpus(); i++) {
-               for (j = 0; j < TCD_TYPE_MAX; j++) {
-                       if (trace_console_buffers[i][j] != NULL) {
-                               cfs_free(trace_console_buffers[i][j]);
-                               trace_console_buffers[i][j] = NULL;
+       for (i = 0; i < cfs_num_possible_cpus(); i++) {
+               for (j = 0; j < CFS_TCD_TYPE_MAX; j++) {
+                       if (cfs_trace_console_buffers[i][j] != NULL) {
+                               cfs_free(cfs_trace_console_buffers[i][j]);
+                               cfs_trace_console_buffers[i][j] = NULL;
                        }
                }
        }
 
-       for (i = 0; trace_data[i] != NULL; i++) {
-               cfs_free(trace_data[i]);
-               trace_data[i] = NULL;
+       for (i = 0; cfs_trace_data[i] != NULL; i++) {
+               cfs_free(cfs_trace_data[i]);
+               cfs_trace_data[i] = NULL;
        }
 
-       fini_rwsem(&tracefile_sem);
+       cfs_fini_rwsem(&cfs_tracefile_sem);
 }
 
-void tracefile_read_lock()
+void cfs_tracefile_read_lock()
 {
-       down_read(&tracefile_sem);
+       cfs_down_read(&cfs_tracefile_sem);
 }
 
-void tracefile_read_unlock()
+void cfs_tracefile_read_unlock()
 {
-       up_read(&tracefile_sem);
+       cfs_up_read(&cfs_tracefile_sem);
 }
 
-void tracefile_write_lock()
+void cfs_tracefile_write_lock()
 {
-       down_write(&tracefile_sem);
+       cfs_down_write(&cfs_tracefile_sem);
 }
 
-void tracefile_write_unlock()
+void cfs_tracefile_write_unlock()
 {
-       up_write(&tracefile_sem);
+       cfs_up_write(&cfs_tracefile_sem);
 }
 
-trace_buf_type_t
-trace_buf_idx_get()
+cfs_trace_buf_type_t cfs_trace_buf_idx_get()
 {
         if (KeGetCurrentIrql() >= DISPATCH_LEVEL)
-                return TCD_TYPE_DISPATCH;
+                return CFS_TCD_TYPE_DISPATCH;
         else
-                return TCD_TYPE_PASSIVE;
+                return CFS_TCD_TYPE_PASSIVE;
 }
 
-int trace_lock_tcd(struct trace_cpu_data *tcd)
+int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd)
 {
-       __LASSERT(tcd->tcd_type < TCD_TYPE_MAX);
+       __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
        return 1;
 }
 
-void trace_unlock_tcd(struct trace_cpu_data *tcd)
+void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd)
 {
-       __LASSERT(tcd->tcd_type < TCD_TYPE_MAX);
+       __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
 }
 
-int tcd_owns_tage(struct trace_cpu_data *tcd, struct trace_page *tage)
+int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
+                      struct cfs_trace_page *tage)
 {
        /*
         * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
@@ -165,16 +166,16 @@ int tcd_owns_tage(struct trace_cpu_data *tcd, struct trace_page *tage)
 }
 
 void
-set_ptldebug_header(struct ptldebug_header *header, int subsys, int mask,
-                   const int line, unsigned long stack)
+cfs_set_ptldebug_header(struct ptldebug_header *header, int subsys, int mask,
+                        const int line, unsigned long stack)
 {
        struct timeval tv;
 
-       do_gettimeofday(&tv);
+       cfs_gettimeofday(&tv);
 
        header->ph_subsys = subsys;
        header->ph_mask = mask;
-       header->ph_cpu_id = smp_processor_id();
+       header->ph_cpu_id = cfs_smp_processor_id();
        header->ph_sec = (__u32)tv.tv_sec;
        header->ph_usec = tv.tv_usec;
        header->ph_stack = stack;
@@ -184,23 +185,24 @@ set_ptldebug_header(struct ptldebug_header *header, int subsys, int mask,
        return;
 }
 
-void print_to_console(struct ptldebug_header *hdr, int mask, const char *buf,
-                            int len, const char *file, const char *fn)
+void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
+                          const char *buf, int len, const char *file,
+                          const char *fn)
 {
        char *prefix = "Lustre", *ptype = NULL;
 
        if ((mask & D_EMERG) != 0) {
                prefix = "LustreError";
-               ptype = KERN_EMERG;
+               ptype = CFS_KERN_EMERG;
        } else if ((mask & D_ERROR) != 0) {
                prefix = "LustreError";
-               ptype = KERN_ERR;
+               ptype = CFS_KERN_ERR;
        } else if ((mask & D_WARNING) != 0) {
                prefix = "Lustre";
-               ptype = KERN_WARNING;
+               ptype = CFS_KERN_WARNING;
        } else if ((mask & (D_CONSOLE | libcfs_printk)) != 0) {
                prefix = "Lustre";
-               ptype = KERN_INFO;
+               ptype = CFS_KERN_INFO;
        }
 
        if ((mask & D_CONSOLE) != 0) {
@@ -212,9 +214,9 @@ void print_to_console(struct ptldebug_header *hdr, int mask, const char *buf,
        return;
 }
 
-int trace_max_debug_mb(void)
+int cfs_trace_max_debug_mb(void)
 {
-       int  total_mb = (num_physpages >> (20 - CFS_PAGE_SHIFT));
+       int  total_mb = (cfs_num_physpages >> (20 - CFS_PAGE_SHIFT));
        
        return MAX(512, (total_mb * 80)/100);
 }
index 63772ef..505a0b7 100644 (file)
@@ -41,9 +41,9 @@
  * only define one trace_data type for windows
  */
 typedef enum {
-        TCD_TYPE_PASSIVE = 0,
-        TCD_TYPE_DISPATCH,
-        TCD_TYPE_MAX
-} trace_buf_type_t;
+        CFS_TCD_TYPE_PASSIVE = 0,
+        CFS_TCD_TYPE_DISPATCH,
+        CFS_TCD_TYPE_MAX
+} cfs_trace_buf_type_t;
 
 #endif
index 69b4829..20b587b 100644 (file)
@@ -89,10 +89,10 @@ static inline int lnet_md_unlinkable (lnet_libmd_t *md)
 }
 
 #ifdef __KERNEL__
-#define LNET_LOCK()        spin_lock(&the_lnet.ln_lock)
-#define LNET_UNLOCK()      spin_unlock(&the_lnet.ln_lock)
-#define LNET_MUTEX_DOWN(m) mutex_down(m)
-#define LNET_MUTEX_UP(m)   mutex_up(m)
+#define LNET_LOCK()        cfs_spin_lock(&the_lnet.ln_lock)
+#define LNET_UNLOCK()      cfs_spin_unlock(&the_lnet.ln_lock)
+#define LNET_MUTEX_DOWN(m) cfs_mutex_down(m)
+#define LNET_MUTEX_UP(m)   cfs_mutex_up(m)
 #else
 # ifndef HAVE_LIBPTHREAD
 #define LNET_SINGLE_THREADED_LOCK(l)            \
@@ -134,11 +134,11 @@ lnet_freelist_alloc (lnet_freelist_t *fl)
         /* ALWAYS called with liblock held */
         lnet_freeobj_t *o;
 
-        if (list_empty (&fl->fl_list))
+        if (cfs_list_empty (&fl->fl_list))
                 return (NULL);
 
-        o = list_entry (fl->fl_list.next, lnet_freeobj_t, fo_list);
-        list_del (&o->fo_list);
+        o = cfs_list_entry (fl->fl_list.next, lnet_freeobj_t, fo_list);
+        cfs_list_del (&o->fo_list);
         return ((void *)&o->fo_contents);
 }
 
@@ -146,9 +146,9 @@ static inline void
 lnet_freelist_free (lnet_freelist_t *fl, void *obj)
 {
         /* ALWAYS called with liblock held */
-        lnet_freeobj_t *o = list_entry (obj, lnet_freeobj_t, fo_contents);
+        lnet_freeobj_t *o = cfs_list_entry (obj, lnet_freeobj_t, fo_contents);
 
-        list_add (&o->fo_list, &fl->fl_list);
+        cfs_list_add (&o->fo_list, &fl->fl_list);
 }
 
 
@@ -474,7 +474,7 @@ lnet_ni_decref_locked(lnet_ni_t *ni)
         LASSERT (ni->ni_refcount > 0);
         ni->ni_refcount--;
         if (ni->ni_refcount == 0)
-                list_add_tail(&ni->ni_list, &the_lnet.ln_zombie_nis);
+                cfs_list_add_tail(&ni->ni_list, &the_lnet.ln_zombie_nis);
 }
 
 static inline void
@@ -485,7 +485,7 @@ lnet_ni_decref(lnet_ni_t *ni)
         LNET_UNLOCK();
 }
 
-static inline struct list_head *
+static inline cfs_list_t *
 lnet_nid2peerhash (lnet_nid_t nid)
 {
         unsigned int idx = LNET_NIDADDR(nid) % LNET_PEER_HASHSIZE;
@@ -686,7 +686,7 @@ int lnet_ping(lnet_process_id_t id, int timeout_ms,
 
 int lnet_parse_ip2nets (char **networksp, char *ip2nets);
 int lnet_parse_routes (char *route_str, int *im_a_router);
-int lnet_parse_networks (struct list_head *nilist, char *networks);
+int lnet_parse_networks (cfs_list_t *nilist, char *networks);
 
 int lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid);
 lnet_peer_t *lnet_find_peer_locked (lnet_nid_t nid);
index 3eec677..3dfda46 100644 (file)
@@ -182,87 +182,87 @@ typedef struct {
 struct lnet_libmd;
 
 typedef struct lnet_msg {
-        struct list_head    msg_activelist;
-        struct list_head    msg_list;           /* Q for credits/MD */
-
-        lnet_process_id_t   msg_target;
-        __u32               msg_type;
-
-        unsigned int        msg_target_is_router:1; /* sending to a router */
-        unsigned int        msg_routing:1;      /* being forwarded */
-        unsigned int        msg_ack:1;          /* ack on finalize (PUT) */
-        unsigned int        msg_sending:1;      /* outgoing message */
-        unsigned int        msg_receiving:1;    /* being received */
-        unsigned int        msg_delayed:1;      /* had to Q for buffer or tx credit */
-        unsigned int        msg_txcredit:1;     /* taken an NI send credit */
-        unsigned int        msg_peertxcredit:1; /* taken a peer send credit */
-        unsigned int        msg_rtrcredit:1;    /* taken a globel router credit */
-        unsigned int        msg_peerrtrcredit:1; /* taken a peer router credit */
-        unsigned int        msg_onactivelist:1; /* on the activelist */
-
-        struct lnet_peer   *msg_txpeer;         /* peer I'm sending to */
-        struct lnet_peer   *msg_rxpeer;         /* peer I received from */
-
-        void               *msg_private;
-        struct lnet_libmd  *msg_md;
-
-        unsigned int        msg_len;
-        unsigned int        msg_wanted;
-        unsigned int        msg_offset;
-        unsigned int        msg_niov;
-        struct iovec       *msg_iov;
-        lnet_kiov_t        *msg_kiov;
-
-        lnet_event_t        msg_ev;
-        lnet_hdr_t          msg_hdr;
+        cfs_list_t            msg_activelist;
+        cfs_list_t            msg_list;           /* Q for credits/MD */
+
+        lnet_process_id_t     msg_target;
+        __u32                 msg_type;
+
+        unsigned int          msg_target_is_router:1; /* sending to a router */
+        unsigned int          msg_routing:1;      /* being forwarded */
+        unsigned int          msg_ack:1;          /* ack on finalize (PUT) */
+        unsigned int          msg_sending:1;      /* outgoing message */
+        unsigned int          msg_receiving:1;    /* being received */
+        unsigned int          msg_delayed:1;      /* had to Q for buffer or tx credit */
+        unsigned int          msg_txcredit:1;     /* taken an NI send credit */
+        unsigned int          msg_peertxcredit:1; /* taken a peer send credit */
+        unsigned int          msg_rtrcredit:1;    /* taken a globel router credit */
+        unsigned int          msg_peerrtrcredit:1; /* taken a peer router credit */
+        unsigned int          msg_onactivelist:1; /* on the activelist */
+
+        struct lnet_peer     *msg_txpeer;         /* peer I'm sending to */
+        struct lnet_peer     *msg_rxpeer;         /* peer I received from */
+
+        void                 *msg_private;
+        struct lnet_libmd    *msg_md;
+
+        unsigned int          msg_len;
+        unsigned int          msg_wanted;
+        unsigned int          msg_offset;
+        unsigned int          msg_niov;
+        struct iovec         *msg_iov;
+        lnet_kiov_t          *msg_kiov;
+
+        lnet_event_t          msg_ev;
+        lnet_hdr_t            msg_hdr;
 } lnet_msg_t;
 
 
 typedef struct lnet_libhandle {
-        struct list_head  lh_hash_chain;
-        __u64             lh_cookie;
+        cfs_list_t            lh_hash_chain;
+        __u64                 lh_cookie;
 } lnet_libhandle_t;
 
 #define lh_entry(ptr, type, member) \
         ((type *)((char *)(ptr)-(char *)(&((type *)0)->member)))
 
 typedef struct lnet_eq {
-        struct list_head  eq_list;
-        lnet_libhandle_t  eq_lh;
-        lnet_seq_t        eq_enq_seq;
-        lnet_seq_t        eq_deq_seq;
-        unsigned int      eq_size;
-        lnet_event_t     *eq_events;
-        int               eq_refcount;
-        lnet_eq_handler_t eq_callback;
+        cfs_list_t            eq_list;
+        lnet_libhandle_t      eq_lh;
+        lnet_seq_t            eq_enq_seq;
+        lnet_seq_t            eq_deq_seq;
+        unsigned int          eq_size;
+        lnet_event_t         *eq_events;
+        int                   eq_refcount;
+        lnet_eq_handler_t     eq_callback;
 } lnet_eq_t;
 
 typedef struct lnet_me {
-        struct list_head   me_list;
-        lnet_libhandle_t   me_lh;
-        lnet_process_id_t  me_match_id;
-        unsigned int       me_portal;
-        __u64              me_match_bits;
-        __u64              me_ignore_bits;
-        lnet_unlink_t      me_unlink;
-        struct lnet_libmd *me_md;
+        cfs_list_t             me_list;
+        lnet_libhandle_t       me_lh;
+        lnet_process_id_t      me_match_id;
+        unsigned int           me_portal;
+        __u64                  me_match_bits;
+        __u64                  me_ignore_bits;
+        lnet_unlink_t          me_unlink;
+        struct lnet_libmd     *me_md;
 } lnet_me_t;
 
 typedef struct lnet_libmd {
-        struct list_head  md_list;
-        lnet_libhandle_t  md_lh;
-        lnet_me_t        *md_me;
-        char             *md_start;
-        unsigned int      md_offset;
-        unsigned int      md_length;
-        unsigned int      md_max_size;
-        int               md_threshold;
-        int               md_refcount;
-        unsigned int      md_options;
-        unsigned int      md_flags;
-        void             *md_user_ptr;
-        lnet_eq_t        *md_eq;
-        unsigned int      md_niov;                /* # frags */
+        cfs_list_t            md_list;
+        lnet_libhandle_t      md_lh;
+        lnet_me_t            *md_me;
+        char                 *md_start;
+        unsigned int          md_offset;
+        unsigned int          md_length;
+        unsigned int          md_max_size;
+        int                   md_threshold;
+        int                   md_refcount;
+        unsigned int          md_options;
+        unsigned int          md_flags;
+        void                 *md_user_ptr;
+        lnet_eq_t            *md_eq;
+        unsigned int          md_niov;                /* # frags */
         union {
                 struct iovec  iov[LNET_MAX_IOV];
                 lnet_kiov_t   kiov[LNET_MAX_IOV];
@@ -275,24 +275,24 @@ typedef struct lnet_libmd {
 #ifdef LNET_USE_LIB_FREELIST
 typedef struct
 {
-        void              *fl_objs;             /* single contiguous array of objects */
-        int                fl_nobjs;            /* the number of them */
-        int                fl_objsize;          /* the size (including overhead) of each of them */
-        struct list_head   fl_list;             /* where they are enqueued */
+        void                  *fl_objs;          /* single contiguous array of objects */
+        int                    fl_nobjs;         /* the number of them */
+        int                    fl_objsize;       /* the size (including overhead) of each of them */
+        cfs_list_t             fl_list;          /* where they are enqueued */
 } lnet_freelist_t;
 
 typedef struct
 {
-        struct list_head   fo_list;             /* enqueue on fl_list */
-        void              *fo_contents;         /* aligned contents */
+        cfs_list_t             fo_list;             /* enqueue on fl_list */
+        void                  *fo_contents;         /* aligned contents */
 } lnet_freeobj_t;
 #endif
 
 typedef struct {
         /* info about peers we are trying to fail */
-        struct list_head   tp_list;             /* ln_test_peers */
-        lnet_nid_t         tp_nid;              /* matching nid */
-        unsigned int       tp_threshold;        /* # failures to simulate */
+        cfs_list_t             tp_list;             /* ln_test_peers */
+        lnet_nid_t             tp_nid;              /* matching nid */
+        unsigned int           tp_threshold;        /* # failures to simulate */
 } lnet_test_peer_t;
 
 #define LNET_COOKIE_TYPE_MD    1
@@ -307,11 +307,11 @@ struct lnet_ni;                                  /* forward ref */
 typedef struct lnet_lnd
 {
         /* fields managed by portals */
-        struct list_head  lnd_list;             /* stash in the LND table */
-        int               lnd_refcount;         /* # active instances */
+        cfs_list_t            lnd_list;             /* stash in the LND table */
+        int                   lnd_refcount;         /* # active instances */
 
         /* fields initialised by the LND */
-        unsigned int      lnd_type;
+        unsigned int          lnd_type;
 
         int  (*lnd_startup) (struct lnet_ni *ni);
         void (*lnd_shutdown) (struct lnet_ni *ni);
@@ -386,8 +386,8 @@ typedef struct {
 #define LNET_MAX_INTERFACES   16
 
 typedef struct lnet_ni {
-        struct list_head  ni_list;              /* chain on ln_nis */
-        struct list_head  ni_txq;               /* messages waiting for tx credits */
+        cfs_list_t        ni_list;              /* chain on ln_nis */
+        cfs_list_t        ni_txq;               /* messages waiting for tx credits */
         int               ni_maxtxcredits;      /* # tx credits  */
         int               ni_txcredits;         /* # tx credits free */
         int               ni_mintxcredits;      /* lowest it's been */
@@ -418,16 +418,16 @@ typedef struct {
 #define LNET_MAX_RTR_NIS   16
 #define LNET_PINGINFO_SIZE offsetof(lnet_ping_info_t, pi_ni[LNET_MAX_RTR_NIS])
 typedef struct {
-        struct list_head  rcd_list;             /* chain on the_lnet.ln_zombie_rcd */
+        cfs_list_t        rcd_list;             /* chain on the_lnet.ln_zombie_rcd */
         lnet_handle_md_t  rcd_mdh;              /* ping buffer MD */
         lnet_ping_info_t *rcd_pinginfo;         /* ping buffer */
 } lnet_rc_data_t;
 
 typedef struct lnet_peer {
-        struct list_head  lp_hashlist;          /* chain on peer hash */
-        struct list_head  lp_txq;               /* messages blocking for tx credits */
-        struct list_head  lp_rtrq;              /* messages blocking for router credits */
-        struct list_head  lp_rtr_list;          /* chain on router list */
+        cfs_list_t        lp_hashlist;          /* chain on peer hash */
+        cfs_list_t        lp_txq;               /* messages blocking for tx credits */
+        cfs_list_t        lp_rtrq;              /* messages blocking for router credits */
+        cfs_list_t        lp_rtr_list;          /* chain on router list */
         int               lp_txcredits;         /* # tx credits available */
         int               lp_mintxcredits;      /* low water mark */
         int               lp_rtrcredits;        /* # router credits */
@@ -454,30 +454,30 @@ typedef struct lnet_peer {
 #define lnet_peer_aliveness_enabled(lp) ((lp)->lp_ni->ni_peertimeout > 0)
 
 typedef struct {
-        struct list_head  lr_list;              /* chain on net */
+        cfs_list_t        lr_list;              /* chain on net */
         lnet_peer_t      *lr_gateway;           /* router node */
         unsigned int      lr_hops;              /* how far I am */
 } lnet_route_t;
 
 typedef struct {
-        struct list_head        lrn_list;       /* chain on ln_remote_nets */
-        struct list_head        lrn_routes;     /* routes to me */
+        cfs_list_t              lrn_list;       /* chain on ln_remote_nets */
+        cfs_list_t              lrn_routes;     /* routes to me */
         __u32                   lrn_net;        /* my net number */
 } lnet_remotenet_t;
 
 typedef struct {
-        struct list_head  rbp_bufs;             /* my free buffer pool */
-        struct list_head  rbp_msgs;             /* messages blocking for a buffer */
-        int               rbp_npages;           /* # pages in each buffer */
-        int               rbp_nbuffers;         /* # buffers */
-        int               rbp_credits;          /* # free buffers / blocked messages */
-        int               rbp_mincredits;       /* low water mark */
+        cfs_list_t rbp_bufs;             /* my free buffer pool */
+        cfs_list_t rbp_msgs;             /* messages blocking for a buffer */
+        int        rbp_npages;           /* # pages in each buffer */
+        int        rbp_nbuffers;         /* # buffers */
+        int        rbp_credits;          /* # free buffers / blocked messages */
+        int        rbp_mincredits;       /* low water mark */
 } lnet_rtrbufpool_t;
 
 typedef struct {
-        struct list_head   rb_list;             /* chain on rbp_bufs */
-        lnet_rtrbufpool_t *rb_pool;             /* owning pool */
-        lnet_kiov_t        rb_kiov[0];          /* the buffer space */
+        cfs_list_t             rb_list;             /* chain on rbp_bufs */
+        lnet_rtrbufpool_t     *rb_pool;             /* owning pool */
+        lnet_kiov_t            rb_kiov[0];          /* the buffer space */
 } lnet_rtrbuf_t;
 
 #include <libcfs/libcfs_pack.h>
@@ -503,11 +503,11 @@ typedef struct {
 /* Options for lnet_portal_t::ptl_options */
 #define LNET_PTL_LAZY               (1 << 0)
 typedef struct {
-        struct list_head ptl_ml;  /* match list */
-        struct list_head ptl_msgq; /* messages blocking for MD */
-        __u64            ptl_ml_version;    /* validity stamp, only changed for new attached MD */
-        __u64            ptl_msgq_version;  /* validity stamp */
-        unsigned int     ptl_options;
+        cfs_list_t           ptl_ml;   /* match list */
+        cfs_list_t           ptl_msgq; /* messages blocking for MD */
+        __u64                ptl_ml_version;    /* validity stamp, only changed for new attached MD */
+        __u64                ptl_msgq_version;  /* validity stamp */
+        unsigned int         ptl_options;
 } lnet_portal_t;
 
 /* Router Checker states */
@@ -520,99 +520,99 @@ typedef struct {
 typedef struct
 {
         /* Stuff initialised at LNetInit() */
-        int                ln_init;             /* LNetInit() called? */
-        int                ln_refcount;         /* LNetNIInit/LNetNIFini counter */
-        int                ln_niinit_self;      /* Have I called LNetNIInit myself? */
+        int                    ln_init;             /* LNetInit() called? */
+        int                    ln_refcount;         /* LNetNIInit/LNetNIFini counter */
+        int                    ln_niinit_self;      /* Have I called LNetNIInit myself? */
 
-        struct list_head   ln_lnds;             /* registered LNDs */
+        cfs_list_t             ln_lnds;             /* registered LNDs */
 
 #ifdef __KERNEL__
-        spinlock_t         ln_lock;
-        cfs_waitq_t        ln_waitq;
-        struct semaphore   ln_api_mutex;
-        struct semaphore   ln_lnd_mutex;
+        cfs_spinlock_t         ln_lock;
+        cfs_waitq_t            ln_waitq;
+        cfs_semaphore_t   ln_api_mutex;
+        cfs_semaphore_t   ln_lnd_mutex;
 #else
 # ifndef HAVE_LIBPTHREAD
-        int                ln_lock;
-        int                ln_api_mutex;
-        int                ln_lnd_mutex;
+        int                    ln_lock;
+        int                    ln_api_mutex;
+        int                    ln_lnd_mutex;
 # else
-        pthread_cond_t     ln_cond;
-        pthread_mutex_t    ln_lock;
-        pthread_mutex_t    ln_api_mutex;
-        pthread_mutex_t    ln_lnd_mutex;
+        pthread_cond_t         ln_cond;
+        pthread_mutex_t        ln_lock;
+        pthread_mutex_t        ln_api_mutex;
+        pthread_mutex_t        ln_lnd_mutex;
 # endif
 #endif
 
         /* Stuff initialised at LNetNIInit() */
 
-        int                ln_shutdown;         /* shutdown in progress */
-        int                ln_nportals;         /* # portals */
-        lnet_portal_t     *ln_portals;          /* the vector of portals */
+        int                    ln_shutdown;         /* shutdown in progress */
+        int                    ln_nportals;         /* # portals */
+        lnet_portal_t         *ln_portals;          /* the vector of portals */
 
-        lnet_pid_t         ln_pid;              /* requested pid */
+        lnet_pid_t             ln_pid;              /* requested pid */
 
-        struct list_head   ln_nis;              /* LND instances */
-        lnet_ni_t         *ln_loni;             /* the loopback NI */
-        lnet_ni_t         *ln_eqwaitni;         /* NI to wait for events in */
-        struct list_head   ln_zombie_nis;       /* dying LND instances */
-        int                ln_nzombie_nis;      /* # of NIs to wait for */
+        cfs_list_t             ln_nis;              /* LND instances */
+        lnet_ni_t             *ln_loni;             /* the loopback NI */
+        lnet_ni_t             *ln_eqwaitni;         /* NI to wait for events in */
+        cfs_list_t             ln_zombie_nis;       /* dying LND instances */
+        int                    ln_nzombie_nis;      /* # of NIs to wait for */
 
-        struct list_head   ln_remote_nets;      /* remote networks with routes to them */
-        __u64              ln_remote_nets_version; /* validity stamp */
+        cfs_list_t             ln_remote_nets;      /* remote networks with routes to them */
+        __u64                  ln_remote_nets_version; /* validity stamp */
 
-        struct list_head   ln_routers;          /* list of all known routers */
-        __u64              ln_routers_version;  /* validity stamp */
+        cfs_list_t             ln_routers;       /* list of all known routers */
+        __u64                  ln_routers_version;  /* validity stamp */
 
-        struct list_head  *ln_peer_hash;        /* NID->peer hash */
-        int                ln_npeers;           /* # peers extant */
-        int                ln_peertable_version; /* /proc validity stamp */
+        cfs_list_t            *ln_peer_hash;        /* NID->peer hash */
+        int                    ln_npeers;           /* # peers extant */
+        int                    ln_peertable_version; /* /proc validity stamp */
 
-        int                ln_routing;          /* am I a router? */
-        lnet_rtrbufpool_t  ln_rtrpools[LNET_NRBPOOLS]; /* router buffer pools */
+        int                    ln_routing;          /* am I a router? */
+        lnet_rtrbufpool_t      ln_rtrpools[LNET_NRBPOOLS]; /* router buffer pools */
 
-        int                ln_lh_hash_size;     /* size of lib handle hash table */
-        struct list_head  *ln_lh_hash_table;    /* all extant lib handles, this interface */
-        __u64              ln_next_object_cookie; /* cookie generator */
-        __u64              ln_interface_cookie; /* uniquely identifies this ni in this epoch */
+        int                    ln_lh_hash_size;     /* size of lib handle hash table */
+        cfs_list_t            *ln_lh_hash_table;    /* all extant lib handles, this interface */
+        __u64                  ln_next_object_cookie; /* cookie generator */
+        __u64                  ln_interface_cookie; /* uniquely identifies this ni in this epoch */
 
-        char              *ln_network_tokens;   /* space for network names */
-        int                ln_network_tokens_nob;
+        char                  *ln_network_tokens;   /* space for network names */
+        int                    ln_network_tokens_nob;
 
-        int                ln_testprotocompat;  /* test protocol compatibility flags */
+        int                    ln_testprotocompat;  /* test protocol compatibility flags */
 
-        struct list_head   ln_finalizeq;        /* msgs waiting to complete finalizing */
+        cfs_list_t             ln_finalizeq;        /* msgs waiting to complete finalizing */
 #ifdef __KERNEL__
-        void             **ln_finalizers;       /* threads doing finalization */
-        int                ln_nfinalizers;      /* max # threads finalizing */
+        void                 **ln_finalizers;       /* threads doing finalization */
+        int                    ln_nfinalizers;      /* max # threads finalizing */
 #else
-        int                ln_finalizing;
+        int                    ln_finalizing;
 #endif
-        struct list_head   ln_test_peers;       /* failure simulation */
+        cfs_list_t             ln_test_peers;       /* failure simulation */
 
-        lnet_handle_md_t   ln_ping_target_md;
-        lnet_handle_eq_t   ln_ping_target_eq;
-        lnet_ping_info_t  *ln_ping_info;
+        lnet_handle_md_t       ln_ping_target_md;
+        lnet_handle_eq_t       ln_ping_target_eq;
+        lnet_ping_info_t      *ln_ping_info;
 
 #ifdef __KERNEL__
-        struct semaphore   ln_rc_signal;        /* serialise startup/shutdown */
+        cfs_semaphore_t   ln_rc_signal;        /* serialise startup/shutdown */
 #endif
         int                ln_rc_state;         /* router checker startup/shutdown state */
         lnet_handle_eq_t   ln_rc_eqh;           /* router checker's event queue */
         lnet_handle_md_t   ln_rc_mdh;
-        struct list_head   ln_zombie_rcd;
+        cfs_list_t         ln_zombie_rcd;
 
 #ifdef LNET_USE_LIB_FREELIST
-        lnet_freelist_t    ln_free_mes;
-        lnet_freelist_t    ln_free_msgs;
-        lnet_freelist_t    ln_free_mds;
-        lnet_freelist_t    ln_free_eqs;
+        lnet_freelist_t        ln_free_mes;
+        lnet_freelist_t        ln_free_msgs;
+        lnet_freelist_t        ln_free_mds;
+        lnet_freelist_t        ln_free_eqs;
 #endif
-        struct list_head   ln_active_msgs;
-        struct list_head   ln_active_mds;
-        struct list_head   ln_active_eqs;
+        cfs_list_t             ln_active_msgs;
+        cfs_list_t             ln_active_mds;
+        cfs_list_t             ln_active_eqs;
 
-        lnet_counters_t    ln_counters;
+        lnet_counters_t        ln_counters;
 
 #ifndef __KERNEL__
         /* Temporary workaround to allow uOSS and test programs force
@@ -620,7 +620,7 @@ typedef struct
          * lnet_prepare(). The only way to turn this flag on is to
          * call lnet_server_mode() */
 
-        int                ln_server_mode_flag;
+        int                    ln_server_mode_flag;
 #endif
 } lnet_t;
 
index 8ae417f..312a237 100644 (file)
@@ -120,7 +120,7 @@ typedef struct {
                                                          *** for list_batch command */
 
 typedef struct {
-        struct list_head        rpe_link;               /* link chain */
+        cfs_list_t              rpe_link;               /* link chain */
         lnet_process_id_t       rpe_peer;               /* peer's id */
         struct timeval          rpe_stamp;              /* time stamp of RPC */
         int                     rpe_state;              /* peer's state */
@@ -270,7 +270,7 @@ typedef struct {
         char                   *lstio_dbg_namep;        /* IN: name of group|batch */
         int                     lstio_dbg_count;        /* IN: # of test nodes to debug */
         lnet_process_id_t      *lstio_dbg_idsp;         /* IN: id of test nodes */
-        struct list_head       *lstio_dbg_resultp;      /* OUT: list head of result buffer */
+        cfs_list_t             *lstio_dbg_resultp;      /* OUT: list head of result buffer */
 } lstio_debug_args_t;
 
 typedef struct {
@@ -297,7 +297,7 @@ typedef struct {
         char                   *lstio_grp_namep;        /* IN: group name */
         int                     lstio_grp_count;        /* IN: # of nodes id */
         lnet_process_id_t      *lstio_grp_idsp;         /* IN: array of nodes */
-        struct list_head       *lstio_grp_resultp;      /* OUT: list head of result buffer */
+        cfs_list_t             *lstio_grp_resultp;      /* OUT: list head of result buffer */
 } lstio_group_update_args_t;
 
 typedef struct {
@@ -306,7 +306,7 @@ typedef struct {
         char                   *lstio_grp_namep;        /* IN: group name */
         int                     lstio_grp_count;        /* IN: # of nodes */
         lnet_process_id_t      *lstio_grp_idsp;         /* IN: nodes */
-        struct list_head       *lstio_grp_resultp;      /* OUT: list head of result buffer */
+        cfs_list_t             *lstio_grp_resultp;      /* OUT: list head of result buffer */
 } lstio_group_nodes_args_t;
 
 typedef struct {
@@ -346,7 +346,7 @@ typedef struct {
         int                     lstio_bat_timeout;      /* IN: timeout for the batch */
         int                     lstio_bat_nmlen;        /* IN: name length */
         char                   *lstio_bat_namep;        /* IN: batch name */
-        struct list_head       *lstio_bat_resultp;      /* OUT: list head of result buffer */
+        cfs_list_t             *lstio_bat_resultp;      /* OUT: list head of result buffer */
 } lstio_batch_run_args_t;
 
 typedef struct {
@@ -354,7 +354,7 @@ typedef struct {
         int                     lstio_bat_force;        /* IN: abort unfinished test RPC */
         int                     lstio_bat_nmlen;        /* IN: name length */
         char                   *lstio_bat_namep;        /* IN: batch name */
-        struct list_head       *lstio_bat_resultp;      /* OUT: list head of result buffer */
+        cfs_list_t             *lstio_bat_resultp;      /* OUT: list head of result buffer */
 } lstio_batch_stop_args_t;
 
 typedef struct {
@@ -364,7 +364,7 @@ typedef struct {
         int                     lstio_bat_timeout;      /* IN: timeout for waiting */
         int                     lstio_bat_nmlen;        /* IN: name length */
         char                   *lstio_bat_namep;        /* IN: batch name */
-        struct list_head       *lstio_bat_resultp;      /* OUT: list head of result buffer */
+        cfs_list_t             *lstio_bat_resultp;      /* OUT: list head of result buffer */
 } lstio_batch_query_args_t;
 
 typedef struct {
@@ -395,7 +395,7 @@ typedef struct {
         char                   *lstio_sta_namep;        /* IN: group name */
         int                     lstio_sta_count;        /* IN: # of pid */
         lnet_process_id_t      *lstio_sta_idsp;         /* IN: pid */
-        struct list_head       *lstio_sta_resultp;      /* OUT: list head of result buffer */
+        cfs_list_t             *lstio_sta_resultp;      /* OUT: list head of result buffer */
 } lstio_stat_args_t;
 
 typedef enum {
@@ -428,7 +428,7 @@ typedef struct {
                                                                lstio_ping_param_t,
                                                                ... more */
         int                    *lstio_tes_retp;         /* OUT: private returned value */
-        struct list_head       *lstio_tes_resultp;      /* OUT: list head of result buffer */
+        cfs_list_t             *lstio_tes_resultp;      /* OUT: list head of result buffer */
 } lstio_test_args_t;
 
 typedef enum {
index d996857..ea265c1 100644 (file)
@@ -137,13 +137,13 @@ typedef struct netbuf {
 #define GMNAL_NETBUF_LOCAL_NETADDR(nb)  ((void *)((unsigned long)(nb)->nb_netaddr))
 
 typedef struct gmnal_txbuf {
-        struct list_head         txb_list;      /* queue on gmni_idle_ltxbs */
+        cfs_list_t               txb_list;      /* queue on gmni_idle_ltxbs */
         struct gmnal_txbuf      *txb_next;      /* stash on gmni_ltxs */
         gmnal_netbuf_t           txb_buf;       /* space */
 } gmnal_txbuf_t;
 
 typedef struct gmnal_tx {
-        struct list_head         tx_list;       /* queue */
+        cfs_list_t               tx_list;       /* queue */
         int                      tx_credit:1;   /* consumed a credit? */
         int                      tx_large_iskiov:1; /* large is in kiovs? */
         struct gmnal_ni         *tx_gmni;       /* owning NI */
@@ -168,7 +168,7 @@ typedef struct gmnal_tx {
 } gmnal_tx_t;
 
 typedef struct gmnal_rx {
-        struct list_head         rx_list;      /* enqueue on gmni_rxq for handling */
+        cfs_list_t               rx_list;      /* enqueue on gmni_rxq for handling */
         int                      rx_islarge:1;  /* large receive buffer? */
         unsigned int             rx_recv_nob;  /* bytes received */
         __u16                    rx_recv_gmid; /* sender */
@@ -179,34 +179,34 @@ typedef struct gmnal_rx {
 } gmnal_rx_t;
 
 typedef struct gmnal_ni {
-        lnet_ni_t        *gmni_ni;              /* generic NI */
-        struct gm_port   *gmni_port;            /* GM port */
-        spinlock_t        gmni_gm_lock;         /* serialise GM calls */
-        int               gmni_large_pages;     /* # pages in a large message buffer */
-        int               gmni_large_msgsize;   /* nob in large message buffers */
-        int               gmni_large_gmsize;    /* large message GM bucket */
-        int               gmni_small_msgsize;   /* nob in small message buffers */
-        int               gmni_small_gmsize;    /* small message GM bucket */
-        __u64             gmni_netaddr_base;    /* base of mapped network VM */
-        int               gmni_netaddr_size;    /* # bytes of mapped network VM */
-
-        gmnal_tx_t       *gmni_txs;             /* all txs */
-        gmnal_rx_t       *gmni_rxs;            /* all rx descs */
-        gmnal_txbuf_t    *gmni_ltxbs;           /* all large tx bufs */
-
-        atomic_t          gmni_nthreads;        /* total # threads */
-        gm_alarm_t        gmni_alarm;           /* alarm to wake caretaker */
-        int               gmni_shutdown;       /* tell all threads to exit */
-
-        struct list_head  gmni_idle_txs;        /* idle tx's */
-        int               gmni_tx_credits;      /* # transmits still possible */
-        struct list_head  gmni_idle_ltxbs;      /* idle large tx buffers */
-        struct list_head  gmni_buf_txq;         /* tx's waiting for buffers */
-        struct list_head  gmni_cred_txq;        /* tx's waiting for credits */
-        spinlock_t        gmni_tx_lock;         /* serialise */
-
-        struct gm_hash   *gmni_rx_hash;                /* buffer->rx lookup */
-        struct semaphore  gmni_rx_mutex;        /* serialise blocking on GM */
+        lnet_ni_t           *gmni_ni;           /* generic NI */
+        struct gm_port      *gmni_port;         /* GM port */
+        cfs_spinlock_t       gmni_gm_lock;      /* serialise GM calls */
+        int                  gmni_large_pages;  /* # pages in a large message buffer */
+        int                  gmni_large_msgsize;/* nob in large message buffers */
+        int                  gmni_large_gmsize; /* large message GM bucket */
+        int                  gmni_small_msgsize;/* nob in small message buffers */
+        int                  gmni_small_gmsize; /* small message GM bucket */
+        __u64                gmni_netaddr_base; /* base of mapped network VM */
+        int                  gmni_netaddr_size; /* # bytes of mapped network VM */
+
+        gmnal_tx_t          *gmni_txs;          /* all txs */
+        gmnal_rx_t          *gmni_rxs;         /* all rx descs */
+        gmnal_txbuf_t       *gmni_ltxbs;        /* all large tx bufs */
+
+        cfs_atomic_t         gmni_nthreads;     /* total # threads */
+        gm_alarm_t           gmni_alarm;        /* alarm to wake caretaker */
+        int                  gmni_shutdown;    /* tell all threads to exit */
+
+        cfs_list_t           gmni_idle_txs;     /* idle tx's */
+        int                  gmni_tx_credits;   /* # transmits still possible */
+        cfs_list_t           gmni_idle_ltxbs;   /* idle large tx buffers */
+        cfs_list_t           gmni_buf_txq;      /* tx's waiting for buffers */
+        cfs_list_t           gmni_cred_txq;     /* tx's waiting for credits */
+        cfs_spinlock_t       gmni_tx_lock;      /* serialise */
+
+        struct gm_hash      *gmni_rx_hash;     /* buffer->rx lookup */
+        cfs_semaphore_t      gmni_rx_mutex;    /* serialise blocking on GM */
 } gmnal_ni_t;
 
 typedef struct {
index f6b4001..27d7dd7 100644 (file)
@@ -159,13 +159,13 @@ gmnal_startup(lnet_ni_t *ni)
 
         memset(gmni, 0, sizeof(*gmni));
         gmni->gmni_ni = ni;
-        spin_lock_init(&gmni->gmni_tx_lock);
-        spin_lock_init(&gmni->gmni_gm_lock);
-        INIT_LIST_HEAD(&gmni->gmni_idle_txs);
-        INIT_LIST_HEAD(&gmni->gmni_idle_ltxbs);
-        INIT_LIST_HEAD(&gmni->gmni_buf_txq);
-        INIT_LIST_HEAD(&gmni->gmni_cred_txq);
-        sema_init(&gmni->gmni_rx_mutex, 1);
+        cfs_spin_lock_init(&gmni->gmni_tx_lock);
+        cfs_spin_lock_init(&gmni->gmni_gm_lock);
+        CFS_INIT_LIST_HEAD(&gmni->gmni_idle_txs);
+        CFS_INIT_LIST_HEAD(&gmni->gmni_idle_ltxbs);
+        CFS_INIT_LIST_HEAD(&gmni->gmni_buf_txq);
+        CFS_INIT_LIST_HEAD(&gmni->gmni_cred_txq);
+        cfs_sema_init(&gmni->gmni_rx_mutex, 1);
         PORTAL_MODULE_USE;
 
         /*
index 3608790..db31d67 100644 (file)
@@ -166,12 +166,12 @@ gmnal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
         LASSERT(tx->tx_lntmsg == NULL);
         tx->tx_lntmsg = lntmsg;
 
-        spin_lock(&gmni->gmni_tx_lock);
+        cfs_spin_lock(&gmni->gmni_tx_lock);
 
-        list_add_tail(&tx->tx_list, &gmni->gmni_buf_txq);
+        cfs_list_add_tail(&tx->tx_list, &gmni->gmni_buf_txq);
         gmnal_check_txqueues_locked(gmni);
 
-        spin_unlock(&gmni->gmni_tx_lock);
+        cfs_spin_unlock(&gmni->gmni_tx_lock);
 
         return 0;
 }
index 8334e71..8cb865e 100644 (file)
@@ -156,18 +156,18 @@ gmnal_get_tx(gmnal_ni_t *gmni)
 {
         gmnal_tx_t *tx = NULL;
 
-        spin_lock(&gmni->gmni_tx_lock);
+        cfs_spin_lock(&gmni->gmni_tx_lock);
 
         if (gmni->gmni_shutdown ||
-            list_empty(&gmni->gmni_idle_txs)) {
-                spin_unlock(&gmni->gmni_tx_lock);
+            cfs_list_empty(&gmni->gmni_idle_txs)) {
+                cfs_spin_unlock(&gmni->gmni_tx_lock);
                 return NULL;
         }
 
-        tx = list_entry(gmni->gmni_idle_txs.next, gmnal_tx_t, tx_list);
-        list_del(&tx->tx_list);
+        tx = cfs_list_entry(gmni->gmni_idle_txs.next, gmnal_tx_t, tx_list);
+        cfs_list_del(&tx->tx_list);
 
-        spin_unlock(&gmni->gmni_tx_lock);
+        cfs_spin_unlock(&gmni->gmni_tx_lock);
 
         LASSERT (tx->tx_lntmsg == NULL);
         LASSERT (tx->tx_ltxb == NULL);
@@ -185,11 +185,12 @@ gmnal_tx_done(gmnal_tx_t *tx, int rc)
 
         tx->tx_lntmsg = NULL;
 
-        spin_lock(&gmni->gmni_tx_lock);
+        cfs_spin_lock(&gmni->gmni_tx_lock);
 
         if (tx->tx_ltxb != NULL) {
                 wake_sched = 1;
-                list_add_tail(&tx->tx_ltxb->txb_list, &gmni->gmni_idle_ltxbs);
+                cfs_list_add_tail(&tx->tx_ltxb->txb_list,
+                                  &gmni->gmni_idle_ltxbs);
                 tx->tx_ltxb = NULL;
         }
 
@@ -199,12 +200,12 @@ gmnal_tx_done(gmnal_tx_t *tx, int rc)
                 tx->tx_credit = 0;
         }
 
-        list_add_tail(&tx->tx_list, &gmni->gmni_idle_txs);
+        cfs_list_add_tail(&tx->tx_list, &gmni->gmni_idle_txs);
 
         if (wake_sched)
                 gmnal_check_txqueues_locked(gmni);
 
-        spin_unlock(&gmni->gmni_tx_lock);
+        cfs_spin_unlock(&gmni->gmni_tx_lock);
 
         /* Delay finalize until tx is free */
         if (lnetmsg != NULL)
@@ -217,7 +218,7 @@ gmnal_drop_sends_callback(struct gm_port *gm_port, void *context,
 {
         gmnal_tx_t *tx = (gmnal_tx_t*)context;
 
-        LASSERT(!in_interrupt());
+        LASSERT(!cfs_in_interrupt());
 
         CDEBUG(D_NET, "status for tx [%p] is [%d][%s], nid %s\n",
                tx, status, gmnal_gmstatus2str(status),
@@ -232,7 +233,7 @@ gmnal_tx_callback(gm_port_t *gm_port, void *context, gm_status_t status)
         gmnal_tx_t *tx = (gmnal_tx_t*)context;
         gmnal_ni_t *gmni = tx->tx_gmni;
 
-        LASSERT(!in_interrupt());
+        LASSERT(!cfs_in_interrupt());
 
         switch(status) {
         case GM_SUCCESS:
@@ -256,13 +257,13 @@ gmnal_tx_callback(gm_port_t *gm_port, void *context, gm_status_t status)
 
                 gmnal_notify_peer_down(tx);
 
-                spin_lock(&gmni->gmni_gm_lock);
+                cfs_spin_lock(&gmni->gmni_gm_lock);
                 gm_drop_sends(gmni->gmni_port,
                               tx->tx_ltxb != NULL ?
                               GMNAL_LARGE_PRIORITY : GMNAL_SMALL_PRIORITY,
                               tx->tx_gmlid, *gmnal_tunables.gm_port,
                               gmnal_drop_sends_callback, tx);
-                spin_unlock(&gmni->gmni_gm_lock);
+                cfs_spin_unlock(&gmni->gmni_gm_lock);
                 return;
         }
 
@@ -279,26 +280,26 @@ gmnal_check_txqueues_locked (gmnal_ni_t *gmni)
         int            pri;
         void          *netaddr;
 
-        tx = list_empty(&gmni->gmni_buf_txq) ? NULL :
-             list_entry(gmni->gmni_buf_txq.next, gmnal_tx_t, tx_list);
+        tx = cfs_list_empty(&gmni->gmni_buf_txq) ? NULL :
+             cfs_list_entry(gmni->gmni_buf_txq.next, gmnal_tx_t, tx_list);
 
         if (tx != NULL &&
             (tx->tx_large_nob == 0 ||
-             !list_empty(&gmni->gmni_idle_ltxbs))) {
+             !cfs_list_empty(&gmni->gmni_idle_ltxbs))) {
 
                 /* consume tx */
-                list_del(&tx->tx_list);
+                cfs_list_del(&tx->tx_list);
 
                 LASSERT (tx->tx_ltxb == NULL);
 
                 if (tx->tx_large_nob != 0) {
-                        ltxb = list_entry(gmni->gmni_idle_ltxbs.next,
-                                          gmnal_txbuf_t, txb_list);
+                        ltxb = cfs_list_entry(gmni->gmni_idle_ltxbs.next,
+                                              gmnal_txbuf_t, txb_list);
 
                         /* consume large buffer */
-                        list_del(&ltxb->txb_list);
+                        cfs_list_del(&ltxb->txb_list);
 
-                        spin_unlock(&gmni->gmni_tx_lock);
+                        cfs_spin_unlock(&gmni->gmni_tx_lock);
 
                         /* Unlocking here allows sends to get re-ordered,
                          * but we want to allow other CPUs to progress... */
@@ -332,22 +333,23 @@ gmnal_check_txqueues_locked (gmnal_ni_t *gmni)
 
                         tx->tx_msgnob += tx->tx_large_nob;
 
-                        spin_lock(&gmni->gmni_tx_lock);
+                        cfs_spin_lock(&gmni->gmni_tx_lock);
                 }
 
-                list_add_tail(&tx->tx_list, &gmni->gmni_cred_txq);
+                cfs_list_add_tail(&tx->tx_list, &gmni->gmni_cred_txq);
         }
 
-        if (!list_empty(&gmni->gmni_cred_txq) &&
+        if (!cfs_list_empty(&gmni->gmni_cred_txq) &&
             gmni->gmni_tx_credits != 0) {
 
-                tx = list_entry(gmni->gmni_cred_txq.next, gmnal_tx_t, tx_list);
+                tx = cfs_list_entry(gmni->gmni_cred_txq.next, gmnal_tx_t,
+                                    tx_list);
 
                 /* consume tx and 1 credit */
-                list_del(&tx->tx_list);
+                cfs_list_del(&tx->tx_list);
                 gmni->gmni_tx_credits--;
 
-                spin_unlock(&gmni->gmni_tx_lock);
+                cfs_spin_unlock(&gmni->gmni_tx_lock);
 
                 /* Unlocking here allows sends to get re-ordered, but we want
                  * to allow other CPUs to progress... */
@@ -369,7 +371,7 @@ gmnal_check_txqueues_locked (gmnal_ni_t *gmni)
                         pri = GMNAL_LARGE_PRIORITY;
                 }
 
-                spin_lock(&gmni->gmni_gm_lock);
+                cfs_spin_lock(&gmni->gmni_gm_lock);
 
                 gm_send_to_peer_with_callback(gmni->gmni_port,
                                               netaddr, gmsize,
@@ -379,8 +381,8 @@ gmnal_check_txqueues_locked (gmnal_ni_t *gmni)
                                               gmnal_tx_callback,
                                               (void*)tx);
 
-                spin_unlock(&gmni->gmni_gm_lock);
-                spin_lock(&gmni->gmni_tx_lock);
+                cfs_spin_unlock(&gmni->gmni_gm_lock);
+                cfs_spin_lock(&gmni->gmni_tx_lock);
         }
 }
 
@@ -395,10 +397,10 @@ gmnal_post_rx(gmnal_ni_t *gmni, gmnal_rx_t *rx)
 
         CDEBUG(D_NET, "posting rx %p buf %p\n", rx, buffer);
 
-        spin_lock(&gmni->gmni_gm_lock);
+        cfs_spin_lock(&gmni->gmni_gm_lock);
         gm_provide_receive_buffer_with_tag(gmni->gmni_port,
                                            buffer, gmsize, pri, 0);
-        spin_unlock(&gmni->gmni_gm_lock);
+        cfs_spin_unlock(&gmni->gmni_gm_lock);
 }
 
 void
@@ -430,12 +432,12 @@ gmnal_version_reply (gmnal_ni_t *gmni, gmnal_rx_t *rx)
         tx->tx_msgnob = offsetof(gmnal_msg_t, gmm_type);
         tx->tx_large_nob = 0;
 
-        spin_lock(&gmni->gmni_tx_lock);
+        cfs_spin_lock(&gmni->gmni_tx_lock);
 
-        list_add_tail(&tx->tx_list, &gmni->gmni_buf_txq);
+        cfs_list_add_tail(&tx->tx_list, &gmni->gmni_buf_txq);
         gmnal_check_txqueues_locked(gmni);
 
-        spin_unlock(&gmni->gmni_tx_lock);
+        cfs_spin_unlock(&gmni->gmni_tx_lock);
 }
 
 int
@@ -455,14 +457,14 @@ gmnal_rx_thread(void *arg)
                 if (rc != 0)
                         continue;
 
-                spin_lock(&gmni->gmni_gm_lock);
+                cfs_spin_lock(&gmni->gmni_gm_lock);
                 rxevent = gm_blocking_receive_no_spin(gmni->gmni_port);
-                spin_unlock(&gmni->gmni_gm_lock);
+                cfs_spin_unlock(&gmni->gmni_gm_lock);
 
                 switch (GM_RECV_EVENT_TYPE(rxevent)) {
                 default:
                         gm_unknown(gmni->gmni_port, rxevent);
-                        up(&gmni->gmni_rx_mutex);
+                        cfs_up(&gmni->gmni_rx_mutex);
                         continue;
 
                 case GM_FAST_RECV_EVENT:
@@ -498,7 +500,7 @@ gmnal_rx_thread(void *arg)
                         break;
                 }
 
-                up(&gmni->gmni_rx_mutex);
+                cfs_up(&gmni->gmni_rx_mutex);
 
                 CDEBUG (D_NET, "rx %p: buf %p(%p) nob %d\n", rx,
                         GMNAL_NETBUF_LOCAL_NETADDR(&rx->rx_buf),
@@ -525,7 +527,7 @@ gmnal_rx_thread(void *arg)
         }
 
         CDEBUG(D_NET, "exiting\n");
-        atomic_dec(&gmni->gmni_nthreads);
+        cfs_atomic_dec(&gmni->gmni_nthreads);
         return 0;
 }
 
@@ -535,18 +537,18 @@ gmnal_stop_threads(gmnal_ni_t *gmni)
         int count = 2;
 
         gmni->gmni_shutdown = 1;
-        mb();
+        cfs_mb();
 
         /* wake rxthread owning gmni_rx_mutex with an alarm. */
-        spin_lock(&gmni->gmni_gm_lock);
+        cfs_spin_lock(&gmni->gmni_gm_lock);
         gm_set_alarm(gmni->gmni_port, &gmni->gmni_alarm, 0, NULL, NULL);
-        spin_unlock(&gmni->gmni_gm_lock);
+        cfs_spin_unlock(&gmni->gmni_gm_lock);
 
-        while (atomic_read(&gmni->gmni_nthreads) != 0) {
+        while (cfs_atomic_read(&gmni->gmni_nthreads) != 0) {
                 count++;
                 if ((count & (count - 1)) == 0)
                         CWARN("Waiting for %d threads to stop\n",
-                              atomic_read(&gmni->gmni_nthreads));
+                              cfs_atomic_read(&gmni->gmni_nthreads));
                 gmnal_yield(1);
         }
 }
@@ -558,20 +560,20 @@ gmnal_start_threads(gmnal_ni_t *gmni)
         int     pid;
 
         LASSERT (!gmni->gmni_shutdown);
-        LASSERT (atomic_read(&gmni->gmni_nthreads) == 0);
+        LASSERT (cfs_atomic_read(&gmni->gmni_nthreads) == 0);
 
         gm_initialize_alarm(&gmni->gmni_alarm);
 
-        for (i = 0; i < num_online_cpus(); i++) {
+        for (i = 0; i < cfs_num_online_cpus(); i++) {
 
-                pid = kernel_thread(gmnal_rx_thread, (void*)gmni, 0);
+                pid = cfs_kernel_thread(gmnal_rx_thread, (void*)gmni, 0);
                 if (pid < 0) {
                         CERROR("rx thread failed to start: %d\n", pid);
                         gmnal_stop_threads(gmni);
                         return pid;
                 }
 
-                atomic_inc(&gmni->gmni_nthreads);
+                cfs_atomic_inc(&gmni->gmni_nthreads);
         }
 
         return 0;
index aa0a139..cf67ef0 100644 (file)
@@ -127,7 +127,7 @@ gmnal_alloc_ltxbuf (gmnal_ni_t *gmni)
                 return rc;
         }
 
-        list_add_tail(&txb->txb_list, &gmni->gmni_idle_ltxbs);
+        cfs_list_add_tail(&txb->txb_list, &gmni->gmni_idle_ltxbs);
 
         txb->txb_next = gmni->gmni_ltxbs;
         gmni->gmni_ltxbs = txb;
@@ -166,7 +166,7 @@ gmnal_alloc_tx (gmnal_ni_t *gmni)
 
         tx->tx_gmni = gmni;
         
-        list_add_tail(&tx->tx_list, &gmni->gmni_idle_txs);
+        cfs_list_add_tail(&tx->tx_list, &gmni->gmni_idle_txs);
 
         tx->tx_next = gmni->gmni_txs;
         gmni->gmni_txs = tx;
@@ -590,6 +590,6 @@ gmnal_rxevent2str(gm_recv_event_t *ev)
 void
 gmnal_yield(int delay)
 {
-       set_current_state(TASK_INTERRUPTIBLE);
-       schedule_timeout(delay);
+       cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+       cfs_schedule_timeout(delay);
 }
index 4c82e6e..34faab7 100644 (file)
@@ -65,9 +65,9 @@ mxlnd_free_pages(kmx_pages_t *p)
         for (i = 0; i < npages; i++) {
                 if (p->mxg_pages[i] != NULL) {
                         __free_page(p->mxg_pages[i]);
-                        spin_lock(&kmxlnd_data.kmx_mem_lock);
+                        cfs_spin_lock(&kmxlnd_data.kmx_mem_lock);
                         kmxlnd_data.kmx_mem_used -= PAGE_SIZE;
-                        spin_unlock(&kmxlnd_data.kmx_mem_lock);
+                        cfs_spin_unlock(&kmxlnd_data.kmx_mem_lock);
                 }
         }
 
@@ -98,9 +98,9 @@ mxlnd_alloc_pages(kmx_pages_t **pp, int npages)
                         mxlnd_free_pages(p);
                         return -ENOMEM;
                 }
-                spin_lock(&kmxlnd_data.kmx_mem_lock);
+                cfs_spin_lock(&kmxlnd_data.kmx_mem_lock);
                 kmxlnd_data.kmx_mem_used += PAGE_SIZE;
-                spin_unlock(&kmxlnd_data.kmx_mem_lock);
+                cfs_spin_unlock(&kmxlnd_data.kmx_mem_lock);
         }
 
         *pp = p;
@@ -120,8 +120,8 @@ mxlnd_ctx_init(kmx_ctx_t *ctx)
         ctx->mxc_incarnation = 0;
         ctx->mxc_deadline = 0;
         ctx->mxc_state = MXLND_CTX_IDLE;
-        if (!list_empty(&ctx->mxc_list))
-                list_del_init(&ctx->mxc_list);
+        if (!cfs_list_empty(&ctx->mxc_list))
+                cfs_list_del_init(&ctx->mxc_list);
         /* ignore mxc_rx_list */
         if (ctx->mxc_type == MXLND_REQ_TX) {
                 ctx->mxc_nid = 0;
@@ -230,7 +230,7 @@ mxlnd_init_txs(void)
                 tx = &kmxlnd_data.kmx_txs[i];
                 tx->mxc_type = MXLND_REQ_TX;
 
-                INIT_LIST_HEAD(&tx->mxc_list);
+                CFS_INIT_LIST_HEAD(&tx->mxc_list);
 
                 /* map mxc_msg to page */
                 page = pages->mxg_pages[ipage];
@@ -251,7 +251,7 @@ mxlnd_init_txs(void)
                 }
 
                 /* in startup(), no locks required */
-                list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_idle);
+                cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_idle);
         }
 
         return 0;
@@ -271,8 +271,10 @@ mxlnd_free_peers(void)
         kmx_peer_t     *next   = NULL;
 
         for (i = 0; i < MXLND_HASH_SIZE; i++) {
-                list_for_each_entry_safe(peer, next, &kmxlnd_data.kmx_peers[i], mxp_list) {
-                        list_del_init(&peer->mxp_list);
+                cfs_list_for_each_entry_safe(peer, next,
+                                             &kmxlnd_data.kmx_peers[i],
+                                             mxp_list) {
+                        cfs_list_del_init(&peer->mxp_list);
                         if (peer->mxp_conn) mxlnd_conn_decref(peer->mxp_conn);
                         mxlnd_peer_decref(peer);
                         count++;
@@ -342,8 +344,9 @@ mxlnd_init_mx(lnet_ni_t *ni)
 
         mx_get_endpoint_addr(kmxlnd_data.kmx_endpt, &kmxlnd_data.kmx_epa);
         mx_decompose_endpoint_addr(kmxlnd_data.kmx_epa, &nic_id, &ep_id);
-        mxret = mx_connect(kmxlnd_data.kmx_endpt, nic_id, ep_id, MXLND_MSG_MAGIC,
-                           MXLND_CONNECT_TIMEOUT/HZ*1000, &kmxlnd_data.kmx_epa);
+        mxret = mx_connect(kmxlnd_data.kmx_endpt, nic_id, ep_id,
+                           MXLND_MSG_MAGIC, MXLND_CONNECT_TIMEOUT/CFS_HZ*1000,
+                           &kmxlnd_data.kmx_epa);
         if (mxret != MX_SUCCESS) {
                 CDEBUG(D_NETERROR, "unable to connect to myself (%s)\n", mx_strerror(mxret));
                 goto failed_with_endpoint;
@@ -361,7 +364,8 @@ mxlnd_init_mx(lnet_ni_t *ni)
                          mx_strerror(mxret));
                 goto failed_with_endpoint;
         }
-        mxret = mx_set_request_timeout(kmxlnd_data.kmx_endpt, NULL, MXLND_COMM_TIMEOUT/HZ*1000);
+        mxret = mx_set_request_timeout(kmxlnd_data.kmx_endpt, NULL,
+                                       MXLND_COMM_TIMEOUT/CFS_HZ*1000);
         if (mxret != MX_SUCCESS) {
                 CERROR("mx_set_request_timeout() failed with %s\n",
                         mx_strerror(mxret));
@@ -390,13 +394,13 @@ mxlnd_thread_start(int (*fn)(void *arg), void *arg)
         int     pid = 0;
         int     i   = (int) ((long) arg);
 
-        atomic_inc(&kmxlnd_data.kmx_nthreads);
-        init_completion(&kmxlnd_data.kmx_completions[i]);
+        cfs_atomic_inc(&kmxlnd_data.kmx_nthreads);
+        cfs_init_completion(&kmxlnd_data.kmx_completions[i]);
 
-        pid = kernel_thread (fn, arg, 0);
+        pid = cfs_kernel_thread (fn, arg, 0);
         if (pid < 0) {
-                CERROR("kernel_thread() failed with %d\n", pid);
-                atomic_dec(&kmxlnd_data.kmx_nthreads);
+                CERROR("cfs_kernel_thread() failed with %d\n", pid);
+                cfs_atomic_dec(&kmxlnd_data.kmx_nthreads);
         }
         return pid;
 }
@@ -411,8 +415,8 @@ void
 mxlnd_thread_stop(long id)
 {
         int     i       = (int) id;
-        atomic_dec (&kmxlnd_data.kmx_nthreads);
-        complete(&kmxlnd_data.kmx_completions[i]);
+        cfs_atomic_dec (&kmxlnd_data.kmx_nthreads);
+        cfs_complete(&kmxlnd_data.kmx_completions[i]);
 }
 
 /**
@@ -433,12 +437,12 @@ mxlnd_shutdown (lnet_ni_t *ni)
         CDEBUG(D_NET, "in shutdown()\n");
 
         CDEBUG(D_MALLOC, "before MXLND cleanup: libcfs_kmemory %d "
-                         "kmx_mem_used %ld\n", atomic_read (&libcfs_kmemory),
+                         "kmx_mem_used %ld\n", cfs_atomic_read(&libcfs_kmemory),
                          kmxlnd_data.kmx_mem_used);
 
 
         CDEBUG(D_NET, "setting shutdown = 1\n");
-        atomic_set(&kmxlnd_data.kmx_shutdown, 1);
+        cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
 
         switch (kmxlnd_data.kmx_init) {
 
@@ -449,9 +453,9 @@ mxlnd_shutdown (lnet_ni_t *ni)
 
                 /* wakeup request_waitds */
                 mx_wakeup(kmxlnd_data.kmx_endpt);
-                up(&kmxlnd_data.kmx_tx_queue_sem);
-                up(&kmxlnd_data.kmx_conn_sem);
-                mxlnd_sleep(2 * HZ);
+                cfs_up(&kmxlnd_data.kmx_tx_queue_sem);
+                cfs_up(&kmxlnd_data.kmx_conn_sem);
+                mxlnd_sleep(2 * CFS_HZ);
 
                 /* fall through */
 
@@ -460,13 +464,13 @@ mxlnd_shutdown (lnet_ni_t *ni)
                 CDEBUG(D_NET, "waiting on threads\n");
                 /* wait for threads to complete */
                 for (i = 0; i < nthreads; i++) {
-                        wait_for_completion(&kmxlnd_data.kmx_completions[i]);
+                        cfs_wait_for_completion(&kmxlnd_data.kmx_completions[i]);
                 }
-                LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
+                LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
 
                 CDEBUG(D_NET, "freeing completions\n");
                 MXLND_FREE(kmxlnd_data.kmx_completions,
-                            nthreads * sizeof(struct completion));
+                            nthreads * sizeof(cfs_completion_t));
 
                 /* fall through */
 
@@ -507,7 +511,7 @@ mxlnd_shutdown (lnet_ni_t *ni)
         CDEBUG(D_NET, "shutdown complete\n");
 
         CDEBUG(D_MALLOC, "after MXLND cleanup: libcfs_kmemory %d "
-                         "kmx_mem_used %ld\n", atomic_read (&libcfs_kmemory),
+                         "kmx_mem_used %ld\n", cfs_atomic_read(&libcfs_kmemory),
                          kmxlnd_data.kmx_mem_used);
 
         kmxlnd_data.kmx_init = MXLND_INIT_NOTHING;
@@ -538,7 +542,7 @@ mxlnd_startup (lnet_ni_t *ni)
                 return -EPERM;
         }
         CDEBUG(D_MALLOC, "before MXLND startup: libcfs_kmemory %d "
-                         "kmx_mem_used %ld\n", atomic_read (&libcfs_kmemory),
+                         "kmx_mem_used %ld\n", cfs_atomic_read(&libcfs_kmemory),
                          kmxlnd_data.kmx_mem_used);
 
         ni->ni_maxtxcredits = MXLND_TX_MSGS();
@@ -552,29 +556,29 @@ mxlnd_startup (lnet_ni_t *ni)
         kmxlnd_data.kmx_ni = ni;
         ni->ni_data = &kmxlnd_data;
 
-        do_gettimeofday(&tv);
+        cfs_gettimeofday(&tv);
         kmxlnd_data.kmx_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
         CDEBUG(D_NET, "my incarnation is %llu\n", kmxlnd_data.kmx_incarnation);
 
-        rwlock_init (&kmxlnd_data.kmx_global_lock);
-        spin_lock_init (&kmxlnd_data.kmx_mem_lock);
+        cfs_rwlock_init (&kmxlnd_data.kmx_global_lock);
+        cfs_spin_lock_init (&kmxlnd_data.kmx_mem_lock);
 
-        INIT_LIST_HEAD (&kmxlnd_data.kmx_conn_reqs);
-        INIT_LIST_HEAD (&kmxlnd_data.kmx_conn_zombies);
-        INIT_LIST_HEAD (&kmxlnd_data.kmx_orphan_msgs);
-        spin_lock_init (&kmxlnd_data.kmx_conn_lock);
-        sema_init(&kmxlnd_data.kmx_conn_sem, 0);
+        CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_conn_reqs);
+        CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_conn_zombies);
+        CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_orphan_msgs);
+        cfs_spin_lock_init (&kmxlnd_data.kmx_conn_lock);
+        cfs_sema_init(&kmxlnd_data.kmx_conn_sem, 0);
 
         for (i = 0; i < MXLND_HASH_SIZE; i++) {
-                INIT_LIST_HEAD (&kmxlnd_data.kmx_peers[i]);
+                CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_peers[i]);
         }
 
-        INIT_LIST_HEAD (&kmxlnd_data.kmx_tx_idle);
-        spin_lock_init (&kmxlnd_data.kmx_tx_idle_lock);
+        CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_tx_idle);
+        cfs_spin_lock_init (&kmxlnd_data.kmx_tx_idle_lock);
         kmxlnd_data.kmx_tx_next_cookie = 1;
-        INIT_LIST_HEAD (&kmxlnd_data.kmx_tx_queue);
-        spin_lock_init (&kmxlnd_data.kmx_tx_queue_lock);
-        sema_init(&kmxlnd_data.kmx_tx_queue_sem, 0);
+        CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_tx_queue);
+        cfs_spin_lock_init (&kmxlnd_data.kmx_tx_queue_lock);
+        cfs_sema_init(&kmxlnd_data.kmx_tx_queue_sem, 0);
 
         kmxlnd_data.kmx_init = MXLND_INIT_DATA;
         /*****************************************************/
@@ -599,13 +603,13 @@ mxlnd_startup (lnet_ni_t *ni)
         /* start threads */
 
         MXLND_ALLOC(kmxlnd_data.kmx_completions,
-                     nthreads * sizeof(struct completion));
+                     nthreads * sizeof(cfs_completion_t));
         if (kmxlnd_data.kmx_completions == NULL) {
                 CERROR("failed to alloc kmxlnd_data.kmx_completions\n");
                 goto failed;
         }
         memset(kmxlnd_data.kmx_completions, 0,
-               nthreads * sizeof(struct completion));
+               nthreads * sizeof(cfs_completion_t));
 
         CDEBUG(D_NET, "using %d %s in mx_wait_any()\n",
                 *kmxlnd_tunables.kmx_n_waitd,
@@ -615,14 +619,14 @@ mxlnd_startup (lnet_ni_t *ni)
                 ret = mxlnd_thread_start(mxlnd_request_waitd, (void*)((long)i));
                 if (ret < 0) {
                         CERROR("Starting mxlnd_request_waitd[%d] failed with %d\n", i, ret);
-                        atomic_set(&kmxlnd_data.kmx_shutdown, 1);
+                        cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
                         mx_wakeup(kmxlnd_data.kmx_endpt);
                         for (--i; i >= 0; i--) {
-                                wait_for_completion(&kmxlnd_data.kmx_completions[i]);
+                                cfs_wait_for_completion(&kmxlnd_data.kmx_completions[i]);
                         }
-                        LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
+                        LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
                         MXLND_FREE(kmxlnd_data.kmx_completions,
-                                nthreads * sizeof(struct completion));
+                                nthreads * sizeof(cfs_completion_t));
 
                         goto failed;
                 }
@@ -630,42 +634,42 @@ mxlnd_startup (lnet_ni_t *ni)
         ret = mxlnd_thread_start(mxlnd_tx_queued, (void*)((long)i++));
         if (ret < 0) {
                 CERROR("Starting mxlnd_tx_queued failed with %d\n", ret);
-                atomic_set(&kmxlnd_data.kmx_shutdown, 1);
+                cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
                 mx_wakeup(kmxlnd_data.kmx_endpt);
                 for (--i; i >= 0; i--) {
-                        wait_for_completion(&kmxlnd_data.kmx_completions[i]);
+                        cfs_wait_for_completion(&kmxlnd_data.kmx_completions[i]);
                 }
-                LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
+                LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
                 MXLND_FREE(kmxlnd_data.kmx_completions,
-                        nthreads * sizeof(struct completion));
+                        nthreads * sizeof(cfs_completion_t));
                 goto failed;
         }
         ret = mxlnd_thread_start(mxlnd_timeoutd, (void*)((long)i++));
         if (ret < 0) {
                 CERROR("Starting mxlnd_timeoutd failed with %d\n", ret);
-                atomic_set(&kmxlnd_data.kmx_shutdown, 1);
+                cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
                 mx_wakeup(kmxlnd_data.kmx_endpt);
-                up(&kmxlnd_data.kmx_tx_queue_sem);
+                cfs_up(&kmxlnd_data.kmx_tx_queue_sem);
                 for (--i; i >= 0; i--) {
-                        wait_for_completion(&kmxlnd_data.kmx_completions[i]);
+                        cfs_wait_for_completion(&kmxlnd_data.kmx_completions[i]);
                 }
-                LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
+                LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
                 MXLND_FREE(kmxlnd_data.kmx_completions,
-                        nthreads * sizeof(struct completion));
+                        nthreads * sizeof(cfs_completion_t));
                 goto failed;
         }
         ret = mxlnd_thread_start(mxlnd_connd, (void*)((long)i++));
         if (ret < 0) {
                 CERROR("Starting mxlnd_connd failed with %d\n", ret);
-                atomic_set(&kmxlnd_data.kmx_shutdown, 1);
+                cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
                 mx_wakeup(kmxlnd_data.kmx_endpt);
-                up(&kmxlnd_data.kmx_tx_queue_sem);
+                cfs_up(&kmxlnd_data.kmx_tx_queue_sem);
                 for (--i; i >= 0; i--) {
-                        wait_for_completion(&kmxlnd_data.kmx_completions[i]);
+                        cfs_wait_for_completion(&kmxlnd_data.kmx_completions[i]);
                 }
-                LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
+                LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
                 MXLND_FREE(kmxlnd_data.kmx_completions,
-                        nthreads * sizeof(struct completion));
+                        nthreads * sizeof(cfs_completion_t));
                 goto failed;
         }
 
index dea4a0c..8b1941f 100644 (file)
 #define MXLND_NDAEMONS          3               /* connd, timeoutd, tx_queued */
 #define MXLND_MX_BOARD          0               /* Use the first MX NIC if more than 1 avail */
 #define MXLND_MX_EP_ID          0               /* MX endpoint ID */
-#define MXLND_COMM_TIMEOUT      (20 * HZ)       /* timeout for send/recv (jiffies) */
-#define MXLND_WAIT_TIMEOUT      HZ              /* timeout for wait (jiffies) */
-#define MXLND_CONNECT_TIMEOUT   (5 * HZ)        /* timeout for connections (jiffies) */
+#define MXLND_COMM_TIMEOUT      (20 * CFS_HZ)   /* timeout for send/recv (jiffies) */
+#define MXLND_WAIT_TIMEOUT      CFS_HZ          /* timeout for wait (jiffies) */
+#define MXLND_CONNECT_TIMEOUT   (5 * CFS_HZ)    /* timeout for connections (jiffies) */
 #define MXLND_POLLING           1000            /* poll iterations before blocking */
 #define MXLND_LOOKUP_COUNT      5               /* how many times to try to resolve MAC */
 #define MXLND_MAX_PEERS         1024            /* number of nodes talking to me */
 
 #define MXLND_ALLOC(x, size) \
         do { \
-                spin_lock(&kmxlnd_data.kmx_mem_lock); \
+                cfs_spin_lock(&kmxlnd_data.kmx_mem_lock); \
                 kmxlnd_data.kmx_mem_used += size; \
-                spin_unlock(&kmxlnd_data.kmx_mem_lock); \
+                cfs_spin_unlock(&kmxlnd_data.kmx_mem_lock); \
                 LIBCFS_ALLOC(x, size); \
                 if (unlikely(x == NULL)) { \
-                        spin_lock(&kmxlnd_data.kmx_mem_lock); \
+                        cfs_spin_lock(&kmxlnd_data.kmx_mem_lock); \
                         kmxlnd_data.kmx_mem_used -= size; \
-                        spin_unlock(&kmxlnd_data.kmx_mem_lock); \
+                        cfs_spin_unlock(&kmxlnd_data.kmx_mem_lock); \
                 } \
         } while (0)
 
 #define MXLND_FREE(x, size) \
         do { \
-                spin_lock(&kmxlnd_data.kmx_mem_lock); \
+                cfs_spin_lock(&kmxlnd_data.kmx_mem_lock); \
                 kmxlnd_data.kmx_mem_used -= size; \
-                spin_unlock(&kmxlnd_data.kmx_mem_lock); \
+                cfs_spin_unlock(&kmxlnd_data.kmx_mem_lock); \
                 LIBCFS_FREE(x, size); \
         } while (0)
 
@@ -228,38 +228,38 @@ typedef struct
 typedef struct kmx_data
 {
         int                 kmx_init;           /* initialization state */
-        atomic_t            kmx_shutdown;       /* shutting down? */
-        atomic_t            kmx_nthreads;       /* number of threads */
-        struct completion  *kmx_completions;    /* array of completion structs */
+        cfs_atomic_t        kmx_shutdown;       /* shutting down? */
+        cfs_atomic_t        kmx_nthreads;       /* number of threads */
+        cfs_completion_t   *kmx_completions;   /* array of completion structs */
         lnet_ni_t          *kmx_ni;             /* the LND instance */
         u64                 kmx_incarnation;    /* my incarnation value */
         long                kmx_mem_used;       /* memory used */
         mx_endpoint_t       kmx_endpt;          /* the MX endpoint */
         mx_endpoint_addr_t  kmx_epa;            /* the MX endpoint address */
 
-        rwlock_t            kmx_global_lock;    /* global lock */
-        spinlock_t          kmx_mem_lock;       /* memory accounting lock */
+        cfs_rwlock_t        kmx_global_lock;    /* global lock */
+        cfs_spinlock_t      kmx_mem_lock;       /* memory accounting lock */
 
-        struct list_head    kmx_conn_reqs;      /* list of connection requests */
-        spinlock_t          kmx_conn_lock;      /* connection list lock */
-        struct semaphore    kmx_conn_sem;       /* semaphore for connection request list */
-        struct list_head    kmx_conn_zombies;   /* list of zombie connections */
-        struct list_head    kmx_orphan_msgs;    /* list of txs to cancel */
+        cfs_list_t          kmx_conn_reqs;     /* list of connection requests */
+        cfs_spinlock_t      kmx_conn_lock;      /* connection list lock */
+        cfs_semaphore_t     kmx_conn_sem;       /* semaphore for connection request list */
+        cfs_list_t          kmx_conn_zombies;   /* list of zombie connections */
+        cfs_list_t          kmx_orphan_msgs;    /* list of txs to cancel */
 
                                                 /* list of all known peers */
-        struct list_head    kmx_peers[MXLND_HASH_SIZE];
-        atomic_t            kmx_npeers;         /* number of peers */
+        cfs_list_t          kmx_peers[MXLND_HASH_SIZE];
+        cfs_atomic_t        kmx_npeers;         /* number of peers */
 
         kmx_pages_t        *kmx_tx_pages;       /* tx msg pages */
 
         struct kmx_ctx     *kmx_txs;            /* all tx descriptors */
-        struct list_head    kmx_tx_idle;        /* list of idle tx */
-        spinlock_t          kmx_tx_idle_lock;   /* lock for idle tx list */
+        cfs_list_t          kmx_tx_idle;        /* list of idle tx */
+        cfs_spinlock_t      kmx_tx_idle_lock;   /* lock for idle tx list */
         s32                 kmx_tx_used;        /* txs in use */
         u64                 kmx_tx_next_cookie; /* unique id for tx */
-        struct list_head    kmx_tx_queue;       /* generic send queue */
-        spinlock_t          kmx_tx_queue_lock;  /* lock for generic sends */
-        struct semaphore    kmx_tx_queue_sem;   /* semaphore for tx queue */
+        cfs_list_t          kmx_tx_queue;       /* generic send queue */
+        cfs_spinlock_t      kmx_tx_queue_lock;  /* lock for generic sends */
+        cfs_semaphore_t     kmx_tx_queue_sem;   /* semaphore for tx queue */
 } kmx_data_t;
 
 #define MXLND_INIT_NOTHING      0       /* in the beginning, there was nothing... */
@@ -361,8 +361,8 @@ typedef struct kmx_ctx
                                                    control credits after completion */
         unsigned long       mxc_deadline;       /* request time out in absolute jiffies */
         enum kmx_req_state  mxc_state;          /* what is the state of the request? */
-        struct list_head    mxc_list;           /* place on rx/tx idle list, tx q, peer tx */
-        struct list_head    mxc_rx_list;        /* place on mxp_rx_posted list */
+        cfs_list_t          mxc_list;           /* place on rx/tx idle list, tx q, peer tx */
+        cfs_list_t          mxc_rx_list;        /* place on mxp_rx_posted list */
 
         lnet_nid_t          mxc_nid;            /* dst's NID if peer is not known */
         struct kmx_peer    *mxc_peer;           /* owning peer */
@@ -396,7 +396,7 @@ typedef struct kmx_ctx
 /* store all data from an unexpected CONN_[REQ|ACK] receive */
 typedef struct kmx_connparams
 {
-        struct list_head        mxr_list;       /* list to hang on kmx_conn_reqs */
+        cfs_list_t              mxr_list;       /* list to hang on kmx_conn_reqs */
         void                   *mxr_context;    /* context - unused - will hold net */
         mx_endpoint_addr_t      mxr_epa;        /* the peer's epa */
         u64                     mxr_match;      /* the CONN_REQ's match bits */
@@ -410,48 +410,48 @@ typedef struct kmx_connparams
 typedef struct kmx_conn
 {
         struct kmx_peer    *mxk_peer;           /* owning peer */
-        struct list_head    mxk_list;           /* for placing on mxp_conns */
-        struct list_head    mxk_zombie;         /* for placing on zombies list */
+        cfs_list_t          mxk_list;           /* for placing on mxp_conns */
+        cfs_list_t          mxk_zombie;         /* for placing on zombies list */
         u64                 mxk_incarnation;    /* connections's incarnation value */
         u32                 mxk_sid;            /* peer's MX session id */
-        atomic_t            mxk_refcount;       /* reference counting */
+        cfs_atomic_t        mxk_refcount;       /* reference counting */
         int                 mxk_status;         /* can we send messages? MXLND_CONN_* */
 
         mx_endpoint_addr_t  mxk_epa;            /* peer's endpoint address */
 
-        spinlock_t          mxk_lock;           /* lock */
+        cfs_spinlock_t      mxk_lock;           /* lock */
         unsigned long       mxk_timeout;        /* expiration of oldest pending tx/rx */
         unsigned long       mxk_last_tx;        /* when last tx completed with success */
         unsigned long       mxk_last_rx;        /* when last rx completed */
 
         kmx_pages_t        *mxk_rx_pages;       /* rx msg pages */
         kmx_ctx_t          *mxk_rxs;            /* the rx descriptors */
-        struct list_head    mxk_rx_idle;        /* list of idle rx */
+        cfs_list_t          mxk_rx_idle;        /* list of idle rx */
 
         int                 mxk_credits;        /* # of my credits for sending to peer */
         int                 mxk_outstanding;    /* # of credits to return */
 
-        struct list_head    mxk_tx_credit_queue; /* send queue for peer */
-        struct list_head    mxk_tx_free_queue;  /* send queue for peer */
+        cfs_list_t          mxk_tx_credit_queue; /* send queue for peer */
+        cfs_list_t          mxk_tx_free_queue;  /* send queue for peer */
         int                 mxk_ntx_msgs;       /* # of msgs on tx queues */
         int                 mxk_ntx_data ;      /* # of DATA on tx queues */
         int                 mxk_ntx_posted;     /* # of tx msgs in flight */
         int                 mxk_data_posted;    /* # of tx data payloads in flight */
 
-        struct list_head    mxk_pending;        /* in flight rxs and txs */
+        cfs_list_t          mxk_pending;        /* in flight rxs and txs */
 } kmx_conn_t;
 
 /* peer state */
 typedef struct kmx_peer
 {
-        struct list_head    mxp_list;           /* for placing on kmx_peers */
+        cfs_list_t          mxp_list;           /* for placing on kmx_peers */
         lnet_nid_t          mxp_nid;            /* peer's LNET NID */
         lnet_ni_t          *mxp_ni;             /* LNET interface */
-        atomic_t            mxp_refcount;       /* reference counts */
+        cfs_atomic_t        mxp_refcount;       /* reference counts */
 
-        struct list_head    mxp_conns;          /* list of connections */
+        cfs_list_t          mxp_conns;          /* list of connections */
         kmx_conn_t         *mxp_conn;           /* current connection */
-        struct list_head    mxp_tx_queue;       /* msgs waiting for a conn */
+        cfs_list_t          mxp_tx_queue;       /* msgs waiting for a conn */
 
         u32                 mxp_board;          /* peer's board rank */
         u32                 mxp_ep_id;          /* peer's MX endpoint ID */
@@ -520,39 +520,39 @@ mxlnd_nid_to_hash(lnet_nid_t nid)
 #define mxlnd_peer_addref(peer)                                 \
 do {                                                            \
         LASSERT(peer != NULL);                                  \
-        LASSERT(atomic_read(&(peer)->mxp_refcount) > 0);        \
-        atomic_inc(&(peer)->mxp_refcount);                      \
+        LASSERT(cfs_atomic_read(&(peer)->mxp_refcount) > 0);    \
+        cfs_atomic_inc(&(peer)->mxp_refcount);                  \
 } while (0)
 
 
 #define mxlnd_peer_decref(peer)                                 \
 do {                                                            \
-        LASSERT(atomic_read(&(peer)->mxp_refcount) > 0);        \
-        if (atomic_dec_and_test(&(peer)->mxp_refcount))         \
+        LASSERT(cfs_atomic_read(&(peer)->mxp_refcount) > 0);    \
+        if (cfs_atomic_dec_and_test(&(peer)->mxp_refcount))     \
                 mxlnd_peer_free(peer);                          \
 } while (0)
 
 #define mxlnd_conn_addref(conn)                                 \
 do {                                                            \
         LASSERT(conn != NULL);                                  \
-        LASSERT(atomic_read(&(conn)->mxk_refcount) > 0);        \
-        atomic_inc(&(conn)->mxk_refcount);                      \
+        LASSERT(cfs_atomic_read(&(conn)->mxk_refcount) > 0);    \
+        cfs_atomic_inc(&(conn)->mxk_refcount);                  \
 } while (0)
 
 
-#define mxlnd_conn_decref(conn)                                 \
-do {                                                            \
-        LASSERT(conn != NULL);                                  \
-        LASSERT(atomic_read(&(conn)->mxk_refcount) > 0);        \
-        if (atomic_dec_and_test(&(conn)->mxk_refcount)) {       \
-                spin_lock(&kmxlnd_data.kmx_conn_lock);          \
+#define mxlnd_conn_decref(conn)                                       \
+do {                                                                  \
+        LASSERT(conn != NULL);                                        \
+        LASSERT(cfs_atomic_read(&(conn)->mxk_refcount) > 0);          \
+        if (cfs_atomic_dec_and_test(&(conn)->mxk_refcount)) {         \
+                cfs_spin_lock(&kmxlnd_data.kmx_conn_lock);            \
                 LASSERT((conn)->mxk_status == MXLND_CONN_DISCONNECT); \
                 CDEBUG(D_NET, "adding conn %p to zombies\n", (conn)); \
-                list_add_tail(&(conn)->mxk_zombie,              \
-                              &kmxlnd_data.kmx_conn_zombies);   \
-                spin_unlock(&kmxlnd_data.kmx_conn_lock);        \
-                up(&kmxlnd_data.kmx_conn_sem);                  \
-        }                                                       \
+                cfs_list_add_tail(&(conn)->mxk_zombie,                \
+                                  &kmxlnd_data.kmx_conn_zombies);     \
+                cfs_spin_unlock(&kmxlnd_data.kmx_conn_lock);          \
+                cfs_up(&kmxlnd_data.kmx_conn_sem);                    \
+        }                                                             \
 } while (0)
 
 #define mxlnd_valid_msg_type(type)                              \
index ddc3bed..0c98b45 100644 (file)
@@ -166,23 +166,23 @@ mxlnd_parse_match(u64 match, u8 *msg_type, u8 *error, u64 *cookie)
 kmx_ctx_t *
 mxlnd_get_idle_rx(kmx_conn_t *conn)
 {
-        struct list_head        *rxs    = NULL;
+        cfs_list_t              *rxs    = NULL;
         kmx_ctx_t               *rx     = NULL;
 
         LASSERT(conn != NULL);
 
         rxs = &conn->mxk_rx_idle;
 
-        spin_lock(&conn->mxk_lock);
+        cfs_spin_lock(&conn->mxk_lock);
 
-        if (list_empty (rxs)) {
-                spin_unlock(&conn->mxk_lock);
+        if (cfs_list_empty (rxs)) {
+                cfs_spin_unlock(&conn->mxk_lock);
                 return NULL;
         }
 
-        rx = list_entry (rxs->next, kmx_ctx_t, mxc_list);
-        list_del_init(&rx->mxc_list);
-        spin_unlock(&conn->mxk_lock);
+        rx = cfs_list_entry (rxs->next, kmx_ctx_t, mxc_list);
+        cfs_list_del_init(&rx->mxc_list);
+        cfs_spin_unlock(&conn->mxk_lock);
 
 #if MXLND_DEBUG
         if (rx->mxc_get != rx->mxc_put) {
@@ -190,7 +190,7 @@ mxlnd_get_idle_rx(kmx_conn_t *conn)
                 CDEBUG(D_NETERROR, "*** incarnation= %lld ***\n", rx->mxc_incarnation);
                 CDEBUG(D_NETERROR, "*** deadline= %ld ***\n", rx->mxc_deadline);
                 CDEBUG(D_NETERROR, "*** state= %s ***\n", mxlnd_ctxstate_to_str(rx->mxc_state));
-                CDEBUG(D_NETERROR, "*** listed?= %d ***\n", !list_empty(&rx->mxc_list));
+                CDEBUG(D_NETERROR, "*** listed?= %d ***\n", !cfs_list_empty(&rx->mxc_list));
                 CDEBUG(D_NETERROR, "*** nid= 0x%llx ***\n", rx->mxc_nid);
                 CDEBUG(D_NETERROR, "*** peer= 0x%p ***\n", rx->mxc_peer);
                 CDEBUG(D_NETERROR, "*** msg_type= %s ***\n", mxlnd_msgtype_to_str(rx->mxc_msg_type));
@@ -213,7 +213,7 @@ int
 mxlnd_put_idle_rx(kmx_ctx_t *rx)
 {
         kmx_conn_t              *conn   = rx->mxc_conn;
-        struct list_head        *rxs    = &conn->mxk_rx_idle;
+        cfs_list_t              *rxs    = &conn->mxk_rx_idle;
 
         LASSERT(rx->mxc_type == MXLND_REQ_RX);
 
@@ -222,29 +222,29 @@ mxlnd_put_idle_rx(kmx_ctx_t *rx)
         rx->mxc_put++;
         LASSERT(rx->mxc_get == rx->mxc_put);
 
-        spin_lock(&conn->mxk_lock);
-        list_add(&rx->mxc_list, rxs);
-        spin_unlock(&conn->mxk_lock);
+        cfs_spin_lock(&conn->mxk_lock);
+        cfs_list_add(&rx->mxc_list, rxs);
+        cfs_spin_unlock(&conn->mxk_lock);
         return 0;
 }
 
 kmx_ctx_t *
 mxlnd_get_idle_tx(void)
 {
-        struct list_head        *tmp    = &kmxlnd_data.kmx_tx_idle;
+        cfs_list_t              *tmp    = &kmxlnd_data.kmx_tx_idle;
         kmx_ctx_t               *tx     = NULL;
 
-        spin_lock(&kmxlnd_data.kmx_tx_idle_lock);
+        cfs_spin_lock(&kmxlnd_data.kmx_tx_idle_lock);
 
-        if (list_empty (&kmxlnd_data.kmx_tx_idle)) {
+        if (cfs_list_empty (&kmxlnd_data.kmx_tx_idle)) {
                 CDEBUG(D_NETERROR, "%d txs in use\n", kmxlnd_data.kmx_tx_used);
-                spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
+                cfs_spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
                 return NULL;
         }
 
         tmp = &kmxlnd_data.kmx_tx_idle;
-        tx = list_entry (tmp->next, kmx_ctx_t, mxc_list);
-        list_del_init(&tx->mxc_list);
+        tx = cfs_list_entry (tmp->next, kmx_ctx_t, mxc_list);
+        cfs_list_del_init(&tx->mxc_list);
 
         /* Allocate a new completion cookie.  It might not be needed,
          * but we've got a lock right now and we're unlikely to
@@ -254,7 +254,7 @@ mxlnd_get_idle_tx(void)
                 kmxlnd_data.kmx_tx_next_cookie = 1;
         }
         kmxlnd_data.kmx_tx_used++;
-        spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
+        cfs_spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
 
         LASSERT (tx->mxc_get == tx->mxc_put);
 
@@ -298,10 +298,10 @@ mxlnd_put_idle_tx(kmx_ctx_t *tx)
         tx->mxc_put++;
         LASSERT(tx->mxc_get == tx->mxc_put);
 
-        spin_lock(&kmxlnd_data.kmx_tx_idle_lock);
-        list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_idle);
+        cfs_spin_lock(&kmxlnd_data.kmx_tx_idle_lock);
+        cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_idle);
         kmxlnd_data.kmx_tx_used--;
-        spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
+        cfs_spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
 
         if (lntmsg[0] != NULL) lnet_finalize(kmxlnd_data.kmx_ni, lntmsg[0], result);
         if (lntmsg[1] != NULL) lnet_finalize(kmxlnd_data.kmx_ni, lntmsg[1], result);
@@ -312,7 +312,7 @@ mxlnd_put_idle_tx(kmx_ctx_t *tx)
 void
 mxlnd_connparams_free(kmx_connparams_t *cp)
 {
-        LASSERT(list_empty(&cp->mxr_list));
+        LASSERT(cfs_list_empty(&cp->mxr_list));
         MXLND_FREE(cp, sizeof(*cp));
         return;
 }
@@ -327,7 +327,7 @@ mxlnd_connparams_alloc(kmx_connparams_t **cp, void *context,
         MXLND_ALLOC(c, sizeof(*c));
         if (!c) return -ENOMEM;
 
-        INIT_LIST_HEAD(&c->mxr_list);
+        CFS_INIT_LIST_HEAD(&c->mxr_list);
         c->mxr_context = context;
         c->mxr_epa = epa;
         c->mxr_match = match;
@@ -344,7 +344,7 @@ static inline void
 mxlnd_set_conn_status(kmx_conn_t *conn, int status)
 {
         conn->mxk_status = status;
-        mb();
+        cfs_mb();
 }
 
 /**
@@ -361,11 +361,11 @@ mxlnd_conn_free_locked(kmx_conn_t *conn)
         kmx_peer_t      *peer   = conn->mxk_peer;
 
         CDEBUG(D_NET, "freeing conn 0x%p *****\n", conn);
-        LASSERT (list_empty (&conn->mxk_tx_credit_queue) &&
-                 list_empty (&conn->mxk_tx_free_queue) &&
-                 list_empty (&conn->mxk_pending));
-        if (!list_empty(&conn->mxk_list)) {
-                list_del_init(&conn->mxk_list);
+        LASSERT (cfs_list_empty (&conn->mxk_tx_credit_queue) &&
+                 cfs_list_empty (&conn->mxk_tx_free_queue) &&
+                 cfs_list_empty (&conn->mxk_pending));
+        if (!cfs_list_empty(&conn->mxk_list)) {
+                cfs_list_del_init(&conn->mxk_list);
                 if (peer->mxp_conn == conn) {
                         peer->mxp_conn = NULL;
                         if (valid) {
@@ -379,7 +379,7 @@ mxlnd_conn_free_locked(kmx_conn_t *conn)
                                 }
                         }
                         /* unlink from global list and drop its ref */
-                        list_del_init(&peer->mxp_list);
+                        cfs_list_del_init(&peer->mxp_list);
                         mxlnd_peer_decref(peer);
                 }
         }
@@ -421,9 +421,10 @@ mxlnd_conn_cancel_pending_rxs(kmx_conn_t *conn)
 
         do {
                 found = 0;
-                spin_lock(&conn->mxk_lock);
-                list_for_each_entry_safe(ctx, next, &conn->mxk_pending, mxc_list) {
-                        list_del_init(&ctx->mxc_list);
+                cfs_spin_lock(&conn->mxk_lock);
+                cfs_list_for_each_entry_safe(ctx, next, &conn->mxk_pending,
+                                             mxc_list) {
+                        cfs_list_del_init(&ctx->mxc_list);
                         if (ctx->mxc_type == MXLND_REQ_RX) {
                                 found = 1;
                                 mxret = mx_cancel(kmxlnd_data.kmx_endpt,
@@ -435,20 +436,20 @@ mxlnd_conn_cancel_pending_rxs(kmx_conn_t *conn)
                                 if (result == 1) {
                                         ctx->mxc_errno = -ECONNABORTED;
                                         ctx->mxc_state = MXLND_CTX_CANCELED;
-                                        spin_unlock(&conn->mxk_lock);
-                                        spin_lock(&kmxlnd_data.kmx_conn_lock);
+                                        cfs_spin_unlock(&conn->mxk_lock);
+                                        cfs_spin_lock(&kmxlnd_data.kmx_conn_lock);
                                         /* we may be holding the global lock,
                                          * move to orphan list so that it can free it */
-                                        list_add_tail(&ctx->mxc_list,
-                                                      &kmxlnd_data.kmx_orphan_msgs);
+                                        cfs_list_add_tail(&ctx->mxc_list,
+                                                          &kmxlnd_data.kmx_orphan_msgs);
                                         count++;
-                                        spin_unlock(&kmxlnd_data.kmx_conn_lock);
-                                        spin_lock(&conn->mxk_lock);
+                                        cfs_spin_unlock(&kmxlnd_data.kmx_conn_lock);
+                                        cfs_spin_lock(&conn->mxk_lock);
                                 }
                                 break;
                         }
                 }
-                spin_unlock(&conn->mxk_lock);
+                cfs_spin_unlock(&conn->mxk_lock);
         }
         while (found);
 
@@ -459,33 +460,33 @@ int
 mxlnd_cancel_queued_txs(kmx_conn_t *conn)
 {
         int                     count   = 0;
-        struct list_head        *tmp    = NULL;
+        cfs_list_t             *tmp    = NULL;
 
-        spin_lock(&conn->mxk_lock);
-        while (!list_empty(&conn->mxk_tx_free_queue) ||
-               !list_empty(&conn->mxk_tx_credit_queue)) {
+        cfs_spin_lock(&conn->mxk_lock);
+        while (!cfs_list_empty(&conn->mxk_tx_free_queue) ||
+               !cfs_list_empty(&conn->mxk_tx_credit_queue)) {
 
                 kmx_ctx_t       *tx     = NULL;
 
-                if (!list_empty(&conn->mxk_tx_free_queue)) {
+                if (!cfs_list_empty(&conn->mxk_tx_free_queue)) {
                         tmp = &conn->mxk_tx_free_queue;
                 } else {
                         tmp = &conn->mxk_tx_credit_queue;
                 }
 
-                tx = list_entry(tmp->next, kmx_ctx_t, mxc_list);
-                list_del_init(&tx->mxc_list);
-                spin_unlock(&conn->mxk_lock);
+                tx = cfs_list_entry(tmp->next, kmx_ctx_t, mxc_list);
+                cfs_list_del_init(&tx->mxc_list);
+                cfs_spin_unlock(&conn->mxk_lock);
                 tx->mxc_errno = -ECONNABORTED;
                 tx->mxc_state = MXLND_CTX_CANCELED;
                 /* move to orphan list and then abort */
-                spin_lock(&kmxlnd_data.kmx_conn_lock);
-                list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_orphan_msgs);
-                spin_unlock(&kmxlnd_data.kmx_conn_lock);
+                cfs_spin_lock(&kmxlnd_data.kmx_conn_lock);
+                cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_orphan_msgs);
+                cfs_spin_unlock(&kmxlnd_data.kmx_conn_lock);
                 count++;
-                spin_lock(&conn->mxk_lock);
+                cfs_spin_lock(&conn->mxk_lock);
         }
-        spin_unlock(&conn->mxk_lock);
+        cfs_spin_unlock(&conn->mxk_lock);
 
         return count;
 }
@@ -518,20 +519,20 @@ mxlnd_conn_disconnect(kmx_conn_t *conn, int mx_dis, int send_bye)
         int                     valid   = !mxlnd_endpoint_addr_null(epa);
         int                     count   = 0;
 
-        spin_lock(&conn->mxk_lock);
+        cfs_spin_lock(&conn->mxk_lock);
         if (conn->mxk_status == MXLND_CONN_DISCONNECT) {
-                spin_unlock(&conn->mxk_lock);
+                cfs_spin_unlock(&conn->mxk_lock);
                 return;
         }
         mxlnd_set_conn_status(conn, MXLND_CONN_DISCONNECT);
         conn->mxk_timeout = 0;
-        spin_unlock(&conn->mxk_lock);
+        cfs_spin_unlock(&conn->mxk_lock);
 
         count = mxlnd_cancel_queued_txs(conn);
         count += mxlnd_conn_cancel_pending_rxs(conn);
 
         if (count)
-                up(&kmxlnd_data.kmx_conn_sem); /* let connd call kmxlnd_abort_msgs() */
+                cfs_up(&kmxlnd_data.kmx_conn_sem); /* let connd call kmxlnd_abort_msgs() */
 
         if (send_bye && valid &&
             conn->mxk_peer->mxp_nid != kmxlnd_data.kmx_ni->ni_nid) {
@@ -543,11 +544,11 @@ mxlnd_conn_disconnect(kmx_conn_t *conn, int mx_dis, int send_bye)
                 mxlnd_sleep(msecs_to_jiffies(20));
         }
 
-        if (atomic_read(&kmxlnd_data.kmx_shutdown) != 1) {
+        if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown) != 1) {
                 unsigned long   last_msg        = 0;
 
                 /* notify LNET that we are giving up on this peer */
-                if (time_after(conn->mxk_last_rx, conn->mxk_last_tx))
+                if (cfs_time_after(conn->mxk_last_rx, conn->mxk_last_tx))
                         last_msg = conn->mxk_last_rx;
                 else
                         last_msg = conn->mxk_last_tx;
@@ -613,9 +614,9 @@ mxlnd_conn_alloc_locked(kmx_conn_t **connp, kmx_peer_t *peer)
         memset(conn->mxk_rxs, 0, MXLND_RX_MSGS() * sizeof(kmx_ctx_t));
 
         conn->mxk_peer = peer;
-        INIT_LIST_HEAD(&conn->mxk_list);
-        INIT_LIST_HEAD(&conn->mxk_zombie);
-        atomic_set(&conn->mxk_refcount, 2);     /* ref for owning peer
+        CFS_INIT_LIST_HEAD(&conn->mxk_list);
+        CFS_INIT_LIST_HEAD(&conn->mxk_zombie);
+        cfs_atomic_set(&conn->mxk_refcount, 2); /* ref for owning peer
                                                    and one for the caller */
         if (peer->mxp_nid == kmxlnd_data.kmx_ni->ni_nid) {
                 u64     nic_id  = 0ULL;
@@ -637,28 +638,28 @@ mxlnd_conn_alloc_locked(kmx_conn_t **connp, kmx_peer_t *peer)
                 mxlnd_set_conn_status(conn, MXLND_CONN_INIT);
                 /* mxk_epa - to be set after mx_iconnect() */
         }
-        spin_lock_init(&conn->mxk_lock);
+        cfs_spin_lock_init(&conn->mxk_lock);
         /* conn->mxk_timeout = 0 */
         /* conn->mxk_last_tx = 0 */
         /* conn->mxk_last_rx = 0 */
-        INIT_LIST_HEAD(&conn->mxk_rx_idle);
+        CFS_INIT_LIST_HEAD(&conn->mxk_rx_idle);
 
         conn->mxk_credits = *kmxlnd_tunables.kmx_peercredits;
         /* mxk_outstanding = 0 */
 
-        INIT_LIST_HEAD(&conn->mxk_tx_credit_queue);
-        INIT_LIST_HEAD(&conn->mxk_tx_free_queue);
+        CFS_INIT_LIST_HEAD(&conn->mxk_tx_credit_queue);
+        CFS_INIT_LIST_HEAD(&conn->mxk_tx_free_queue);
         /* conn->mxk_ntx_msgs = 0 */
         /* conn->mxk_ntx_data = 0 */
         /* conn->mxk_ntx_posted = 0 */
         /* conn->mxk_data_posted = 0 */
-        INIT_LIST_HEAD(&conn->mxk_pending);
+        CFS_INIT_LIST_HEAD(&conn->mxk_pending);
 
         for (i = 0; i < MXLND_RX_MSGS(); i++) {
 
                 rx = &conn->mxk_rxs[i];
                 rx->mxc_type = MXLND_REQ_RX;
-                INIT_LIST_HEAD(&rx->mxc_list);
+                CFS_INIT_LIST_HEAD(&rx->mxc_list);
 
                 /* map mxc_msg to page */
                 page = pages->mxg_pages[ipage];
@@ -682,7 +683,7 @@ mxlnd_conn_alloc_locked(kmx_conn_t **connp, kmx_peer_t *peer)
                         LASSERT (ipage <= MXLND_TX_MSG_PAGES());
                 }
 
-                list_add_tail(&rx->mxc_list, &conn->mxk_rx_idle);
+                cfs_list_add_tail(&rx->mxc_list, &conn->mxk_rx_idle);
         }
 
         *connp = conn;
@@ -690,7 +691,7 @@ mxlnd_conn_alloc_locked(kmx_conn_t **connp, kmx_peer_t *peer)
         mxlnd_peer_addref(peer);        /* add a ref for this conn */
 
         /* add to front of peer's conns list */
-        list_add(&conn->mxk_list, &peer->mxp_conns);
+        cfs_list_add(&conn->mxk_list, &peer->mxp_conns);
         peer->mxp_conn = conn;
         return 0;
 }
@@ -699,11 +700,11 @@ int
 mxlnd_conn_alloc(kmx_conn_t **connp, kmx_peer_t *peer)
 {
         int             ret     = 0;
-        rwlock_t       *g_lock  = &kmxlnd_data.kmx_global_lock;
+        cfs_rwlock_t   *g_lock  = &kmxlnd_data.kmx_global_lock;
 
-        write_lock(g_lock);
+        cfs_write_lock(g_lock);
         ret = mxlnd_conn_alloc_locked(connp, peer);
-        write_unlock(g_lock);
+        cfs_write_unlock(g_lock);
         return ret;
 }
 
@@ -715,9 +716,9 @@ mxlnd_q_pending_ctx(kmx_ctx_t *ctx)
 
         ctx->mxc_state = MXLND_CTX_PENDING;
         if (conn != NULL) {
-                spin_lock(&conn->mxk_lock);
+                cfs_spin_lock(&conn->mxk_lock);
                 if (conn->mxk_status >= MXLND_CONN_INIT) {
-                        list_add_tail(&ctx->mxc_list, &conn->mxk_pending);
+                        cfs_list_add_tail(&ctx->mxc_list, &conn->mxk_pending);
                         if (conn->mxk_timeout == 0 || ctx->mxc_deadline < conn->mxk_timeout) {
                                 conn->mxk_timeout = ctx->mxc_deadline;
                         }
@@ -725,7 +726,7 @@ mxlnd_q_pending_ctx(kmx_ctx_t *ctx)
                         ctx->mxc_state = MXLND_CTX_COMPLETED;
                         ret = -1;
                 }
-                spin_unlock(&conn->mxk_lock);
+                cfs_spin_unlock(&conn->mxk_lock);
         }
         return ret;
 }
@@ -741,19 +742,20 @@ mxlnd_deq_pending_ctx(kmx_ctx_t *ctx)
                        mxlnd_ctxstate_to_str(ctx->mxc_state));
         }
         ctx->mxc_state = MXLND_CTX_COMPLETED;
-        if (!list_empty(&ctx->mxc_list)) {
+        if (!cfs_list_empty(&ctx->mxc_list)) {
                 kmx_conn_t      *conn = ctx->mxc_conn;
                 kmx_ctx_t       *next = NULL;
 
                 LASSERT(conn != NULL);
-                spin_lock(&conn->mxk_lock);
-                list_del_init(&ctx->mxc_list);
+                cfs_spin_lock(&conn->mxk_lock);
+                cfs_list_del_init(&ctx->mxc_list);
                 conn->mxk_timeout = 0;
-                if (!list_empty(&conn->mxk_pending)) {
-                        next = list_entry(conn->mxk_pending.next, kmx_ctx_t, mxc_list);
+                if (!cfs_list_empty(&conn->mxk_pending)) {
+                        next = cfs_list_entry(conn->mxk_pending.next,
+                                              kmx_ctx_t, mxc_list);
                         conn->mxk_timeout = next->mxc_deadline;
                 }
-                spin_unlock(&conn->mxk_lock);
+                cfs_spin_unlock(&conn->mxk_lock);
         }
         return 0;
 }
@@ -770,15 +772,15 @@ mxlnd_peer_free(kmx_peer_t *peer)
 {
         CDEBUG(D_NET, "freeing peer 0x%p %s\n", peer, libcfs_nid2str(peer->mxp_nid));
 
-        LASSERT (atomic_read(&peer->mxp_refcount) == 0);
+        LASSERT (cfs_atomic_read(&peer->mxp_refcount) == 0);
 
-        if (!list_empty(&peer->mxp_list)) {
+        if (!cfs_list_empty(&peer->mxp_list)) {
                 /* assume we are locked */
-                list_del_init(&peer->mxp_list);
+                cfs_list_del_init(&peer->mxp_list);
         }
 
         MXLND_FREE(peer, sizeof (*peer));
-        atomic_dec(&kmxlnd_data.kmx_npeers);
+        cfs_atomic_dec(&kmxlnd_data.kmx_npeers);
         return;
 }
 
@@ -842,8 +844,9 @@ mxlnd_ip2nic_id(u32 ip, u64 *nic_id, int tries)
                                 break;
                         } else if (ret == -EHOSTUNREACH && try < tries) {
                                 /* add a little backoff */
-                                CDEBUG(D_NET, "sleeping for %d jiffies\n", HZ/4);
-                                mxlnd_sleep(HZ/4);
+                                CDEBUG(D_NET, "sleeping for %d jiffies\n",
+                                       CFS_HZ/4);
+                                mxlnd_sleep(CFS_HZ/4);
                         }
                 }
         } while (try++ < tries);
@@ -877,29 +880,30 @@ mxlnd_peer_alloc(kmx_peer_t **peerp, lnet_nid_t nid, u32 board, u32 ep_id, u64 n
 
         MXLND_ALLOC(peer, sizeof (*peer));
         if (peer == NULL) {
-                CDEBUG(D_NETERROR, "Cannot allocate peer for NID 0x%llx\n", nid);
+                CDEBUG(D_NETERROR, "Cannot allocate peer for NID 0x%llx\n",
+                       nid);
                 return -ENOMEM;
         }
         CDEBUG(D_NET, "allocated peer 0x%p for NID 0x%llx\n", peer, nid);
 
         memset(peer, 0, sizeof(*peer));
 
-        INIT_LIST_HEAD(&peer->mxp_list);
+        CFS_INIT_LIST_HEAD(&peer->mxp_list);
         peer->mxp_nid = nid;
         /* peer->mxp_ni unused - may be used for multi-rail */
-        atomic_set(&peer->mxp_refcount, 1);     /* ref for kmx_peers list */
+        cfs_atomic_set(&peer->mxp_refcount, 1);     /* ref for kmx_peers list */
 
         peer->mxp_board = board;
         peer->mxp_ep_id = ep_id;
         peer->mxp_nic_id = nic_id;
 
-        INIT_LIST_HEAD(&peer->mxp_conns);
+        CFS_INIT_LIST_HEAD(&peer->mxp_conns);
         ret = mxlnd_conn_alloc(&peer->mxp_conn, peer); /* adds 2nd conn ref here... */
         if (ret != 0) {
                 mxlnd_peer_decref(peer);
                 return ret;
         }
-        INIT_LIST_HEAD(&peer->mxp_tx_queue);
+        CFS_INIT_LIST_HEAD(&peer->mxp_tx_queue);
 
         if (peer->mxp_nic_id != 0ULL)
                 nic_id = peer->mxp_nic_id;
@@ -930,7 +934,7 @@ mxlnd_find_peer_by_nid_locked(lnet_nid_t nid)
 
         hash = mxlnd_nid_to_hash(nid);
 
-        list_for_each_entry(peer, &kmxlnd_data.kmx_peers[hash], mxp_list) {
+        cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[hash], mxp_list) {
                 if (peer->mxp_nid == nid) {
                         found = 1;
                         mxlnd_peer_addref(peer);
@@ -947,23 +951,23 @@ mxlnd_find_peer_by_nid(lnet_nid_t nid, int create)
         int             hash    = 0;
         kmx_peer_t      *peer   = NULL;
         kmx_peer_t      *old    = NULL;
-        rwlock_t        *g_lock = &kmxlnd_data.kmx_global_lock;
+        cfs_rwlock_t    *g_lock = &kmxlnd_data.kmx_global_lock;
 
-        read_lock(g_lock);
+        cfs_read_lock(g_lock);
         peer = mxlnd_find_peer_by_nid_locked(nid); /* adds peer ref */
 
         if ((peer && peer->mxp_conn) || /* found peer with conn or */
             (!peer && !create)) {       /* did not find peer and do not create one */
-                read_unlock(g_lock);
+                cfs_read_unlock(g_lock);
                 return peer;
         }
 
-        read_unlock(g_lock);
+        cfs_read_unlock(g_lock);
 
         /* if peer but _not_ conn */
         if (peer && !peer->mxp_conn) {
                 if (create) {
-                        write_lock(g_lock);
+                        cfs_write_lock(g_lock);
                         if (!peer->mxp_conn) { /* check again */
                                 /* create the conn */
                                 ret = mxlnd_conn_alloc_locked(&peer->mxp_conn, peer);
@@ -977,7 +981,7 @@ mxlnd_find_peer_by_nid(lnet_nid_t nid, int create)
                                         mxlnd_conn_decref(peer->mxp_conn);
                                 }
                         }
-                        write_unlock(g_lock);
+                        cfs_write_unlock(g_lock);
                 }
                 return peer;
         }
@@ -992,7 +996,7 @@ mxlnd_find_peer_by_nid(lnet_nid_t nid, int create)
         if (ret != 0) /* no memory, peer is NULL */
                 return NULL;
 
-        write_lock(g_lock);
+        cfs_write_lock(g_lock);
 
         /* look again */
         old = mxlnd_find_peer_by_nid_locked(nid);
@@ -1004,13 +1008,14 @@ mxlnd_find_peer_by_nid(lnet_nid_t nid, int create)
                 peer = old;
         } else {
                 /* no other peer, use this one */
-                list_add_tail(&peer->mxp_list, &kmxlnd_data.kmx_peers[hash]);
-                atomic_inc(&kmxlnd_data.kmx_npeers);
+                cfs_list_add_tail(&peer->mxp_list,
+                                  &kmxlnd_data.kmx_peers[hash]);
+                cfs_atomic_inc(&kmxlnd_data.kmx_npeers);
                 mxlnd_peer_addref(peer);
                 mxlnd_conn_decref(peer->mxp_conn); /* drop ref from peer_alloc */
         }
 
-        write_unlock(g_lock);
+        cfs_write_unlock(g_lock);
 
         return peer;
 }
@@ -1332,13 +1337,13 @@ mxlnd_unexpected_recv(void *context, mx_endpoint_addr_t source,
 
         mx_decompose_endpoint_addr2(source, &nic_id, &ep_id, &sid);
         mxlnd_parse_match(match_value, &msg_type, &error, &cookie);
-        read_lock(&kmxlnd_data.kmx_global_lock);
+        cfs_read_lock(&kmxlnd_data.kmx_global_lock);
         mx_get_endpoint_addr_context(source, (void **) &conn);
         if (conn) {
                 mxlnd_conn_addref(conn); /* add ref for this function */
                 peer = conn->mxk_peer;
         }
-        read_unlock(&kmxlnd_data.kmx_global_lock);
+        cfs_read_unlock(&kmxlnd_data.kmx_global_lock);
 
         if (msg_type == MXLND_MSG_BYE) {
                 if (conn) {
@@ -1371,10 +1376,10 @@ mxlnd_unexpected_recv(void *context, mx_endpoint_addr_t source,
                         mxlnd_send_message(source, MXLND_MSG_CONN_ACK, ENOMEM, 0);
                         return MX_RECV_FINISHED;
                 }
-                spin_lock(&kmxlnd_data.kmx_conn_lock);
-                list_add_tail(&cp->mxr_list, &kmxlnd_data.kmx_conn_reqs);
-                spin_unlock(&kmxlnd_data.kmx_conn_lock);
-                up(&kmxlnd_data.kmx_conn_sem);
+                cfs_spin_lock(&kmxlnd_data.kmx_conn_lock);
+                cfs_list_add_tail(&cp->mxr_list, &kmxlnd_data.kmx_conn_reqs);
+                cfs_spin_unlock(&kmxlnd_data.kmx_conn_lock);
+                cfs_up(&kmxlnd_data.kmx_conn_sem);
                 return MX_RECV_FINISHED;
         }
         if (msg_type == MXLND_MSG_CONN_ACK) {
@@ -1403,10 +1408,11 @@ mxlnd_unexpected_recv(void *context, mx_endpoint_addr_t source,
                                                " from %llx:%d\n", nic_id, ep_id);
                                 mxlnd_conn_disconnect(conn, 1, 1);
                         } else {
-                                spin_lock(&kmxlnd_data.kmx_conn_lock);
-                                list_add_tail(&cp->mxr_list, &kmxlnd_data.kmx_conn_reqs);
-                                spin_unlock(&kmxlnd_data.kmx_conn_lock);
-                                up(&kmxlnd_data.kmx_conn_sem);
+                                cfs_spin_lock(&kmxlnd_data.kmx_conn_lock);
+                                cfs_list_add_tail(&cp->mxr_list,
+                                                  &kmxlnd_data.kmx_conn_reqs);
+                                cfs_spin_unlock(&kmxlnd_data.kmx_conn_lock);
+                                cfs_up(&kmxlnd_data.kmx_conn_sem);
                         }
                 }
                 mxlnd_conn_decref(conn); /* drop ref taken above */
@@ -1471,18 +1477,19 @@ mxlnd_get_peer_info(int index, lnet_nid_t *nidp, int *count)
         int              ret    = -ENOENT;
         kmx_peer_t      *peer   = NULL;
 
-        read_lock(&kmxlnd_data.kmx_global_lock);
+        cfs_read_lock(&kmxlnd_data.kmx_global_lock);
         for (i = 0; i < MXLND_HASH_SIZE; i++) {
-                list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i], mxp_list) {
+                cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i],
+                                        mxp_list) {
                         if (index-- == 0) {
                                 *nidp = peer->mxp_nid;
-                                *count = atomic_read(&peer->mxp_refcount);
+                                *count = cfs_atomic_read(&peer->mxp_refcount);
                                 ret = 0;
                                 break;
                         }
                 }
         }
-        read_unlock(&kmxlnd_data.kmx_global_lock);
+        cfs_read_unlock(&kmxlnd_data.kmx_global_lock);
 
         return ret;
 }
@@ -1493,7 +1500,7 @@ mxlnd_del_peer_locked(kmx_peer_t *peer)
         if (peer->mxp_conn) {
                 mxlnd_conn_disconnect(peer->mxp_conn, 1, 1);
         } else {
-                list_del_init(&peer->mxp_list); /* remove from the global list */
+                cfs_list_del_init(&peer->mxp_list); /* remove from the global list */
                 mxlnd_peer_decref(peer); /* drop global list ref */
         }
         return;
@@ -1510,7 +1517,7 @@ mxlnd_del_peer(lnet_nid_t nid)
         if (nid != LNET_NID_ANY) {
                 peer = mxlnd_find_peer_by_nid(nid, 0); /* adds peer ref */
         }
-        write_lock(&kmxlnd_data.kmx_global_lock);
+        cfs_write_lock(&kmxlnd_data.kmx_global_lock);
         if (nid != LNET_NID_ANY) {
                 if (peer == NULL) {
                         ret = -ENOENT;
@@ -1520,13 +1527,14 @@ mxlnd_del_peer(lnet_nid_t nid)
                 }
         } else { /* LNET_NID_ANY */
                 for (i = 0; i < MXLND_HASH_SIZE; i++) {
-                        list_for_each_entry_safe(peer, next,
-                                                 &kmxlnd_data.kmx_peers[i], mxp_list) {
+                        cfs_list_for_each_entry_safe(peer, next,
+                                                     &kmxlnd_data.kmx_peers[i],
+                                                     mxp_list) {
                                 mxlnd_del_peer_locked(peer);
                         }
                 }
         }
-        write_unlock(&kmxlnd_data.kmx_global_lock);
+        cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
 
         return ret;
 }
@@ -1538,21 +1546,23 @@ mxlnd_get_conn_by_idx(int index)
         kmx_peer_t      *peer   = NULL;
         kmx_conn_t      *conn   = NULL;
 
-        read_lock(&kmxlnd_data.kmx_global_lock);
+        cfs_read_lock(&kmxlnd_data.kmx_global_lock);
         for (i = 0; i < MXLND_HASH_SIZE; i++) {
-                list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i], mxp_list) {
-                        list_for_each_entry(conn, &peer->mxp_conns, mxk_list) {
+                cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i],
+                                        mxp_list) {
+                        cfs_list_for_each_entry(conn, &peer->mxp_conns,
+                                                mxk_list) {
                                 if (index-- > 0) {
                                         continue;
                                 }
 
                                 mxlnd_conn_addref(conn); /* add ref here, dec in ctl() */
-                                read_unlock(&kmxlnd_data.kmx_global_lock);
+                                cfs_read_unlock(&kmxlnd_data.kmx_global_lock);
                                 return conn;
                         }
                 }
         }
-        read_unlock(&kmxlnd_data.kmx_global_lock);
+        cfs_read_unlock(&kmxlnd_data.kmx_global_lock);
 
         return NULL;
 }
@@ -1563,7 +1573,7 @@ mxlnd_close_matching_conns_locked(kmx_peer_t *peer)
         kmx_conn_t      *conn   = NULL;
         kmx_conn_t      *next   = NULL;
 
-        list_for_each_entry_safe(conn, next, &peer->mxp_conns, mxk_list)
+        cfs_list_for_each_entry_safe(conn, next, &peer->mxp_conns, mxk_list)
                 mxlnd_conn_disconnect(conn, 0, 1);
 
         return;
@@ -1576,7 +1586,7 @@ mxlnd_close_matching_conns(lnet_nid_t nid)
         int             ret     = 0;
         kmx_peer_t      *peer   = NULL;
 
-        write_lock(&kmxlnd_data.kmx_global_lock);
+        cfs_write_lock(&kmxlnd_data.kmx_global_lock);
         if (nid != LNET_NID_ANY) {
                 peer = mxlnd_find_peer_by_nid_locked(nid); /* adds peer ref */
                 if (peer == NULL) {
@@ -1587,11 +1597,11 @@ mxlnd_close_matching_conns(lnet_nid_t nid)
                 }
         } else { /* LNET_NID_ANY */
                 for (i = 0; i < MXLND_HASH_SIZE; i++) {
-                        list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i], mxp_list)
+                        cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i],                                                mxp_list)
                                 mxlnd_close_matching_conns_locked(peer);
                 }
         }
-        write_unlock(&kmxlnd_data.kmx_global_lock);
+        cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
 
         return ret;
 }
@@ -1672,20 +1682,22 @@ mxlnd_peer_queue_tx_locked(kmx_ctx_t *tx)
             msg_type != MXLND_MSG_GET_DATA) {
                 /* msg style tx */
                 if (mxlnd_tx_requires_credit(tx)) {
-                        list_add_tail(&tx->mxc_list, &conn->mxk_tx_credit_queue);
+                        cfs_list_add_tail(&tx->mxc_list,
+                                          &conn->mxk_tx_credit_queue);
                         conn->mxk_ntx_msgs++;
                 } else if (msg_type == MXLND_MSG_CONN_REQ ||
                            msg_type == MXLND_MSG_CONN_ACK) {
                         /* put conn msgs at the front of the queue */
-                        list_add(&tx->mxc_list, &conn->mxk_tx_free_queue);
+                        cfs_list_add(&tx->mxc_list, &conn->mxk_tx_free_queue);
                 } else {
                         /* PUT_ACK, PUT_NAK */
-                        list_add_tail(&tx->mxc_list, &conn->mxk_tx_free_queue);
+                        cfs_list_add_tail(&tx->mxc_list,
+                                          &conn->mxk_tx_free_queue);
                         conn->mxk_ntx_msgs++;
                 }
         } else {
                 /* data style tx */
-                list_add_tail(&tx->mxc_list, &conn->mxk_tx_free_queue);
+                cfs_list_add_tail(&tx->mxc_list, &conn->mxk_tx_free_queue);
                 conn->mxk_ntx_data++;
         }
 
@@ -1703,9 +1715,9 @@ mxlnd_peer_queue_tx(kmx_ctx_t *tx)
 {
         LASSERT(tx->mxc_peer != NULL);
         LASSERT(tx->mxc_conn != NULL);
-        spin_lock(&tx->mxc_conn->mxk_lock);
+        cfs_spin_lock(&tx->mxc_conn->mxk_lock);
         mxlnd_peer_queue_tx_locked(tx);
-        spin_unlock(&tx->mxc_conn->mxk_lock);
+        cfs_spin_unlock(&tx->mxc_conn->mxk_lock);
 
         return;
 }
@@ -1748,10 +1760,10 @@ mxlnd_queue_tx(kmx_ctx_t *tx)
                 mxlnd_peer_queue_tx(tx);
                 mxlnd_check_sends(peer);
         } else {
-                spin_lock(&kmxlnd_data.kmx_tx_queue_lock);
-                list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_queue);
-                spin_unlock(&kmxlnd_data.kmx_tx_queue_lock);
-                up(&kmxlnd_data.kmx_tx_queue_sem);
+                cfs_spin_lock(&kmxlnd_data.kmx_tx_queue_lock);
+                cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_queue);
+                cfs_spin_unlock(&kmxlnd_data.kmx_tx_queue_lock);
+                cfs_up(&kmxlnd_data.kmx_tx_queue_sem);
         }
 done:
         return;
@@ -2110,7 +2122,7 @@ mxlnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
         int                     nob             = 0;
         uint32_t                length          = 0;
         kmx_peer_t             *peer            = NULL;
-        rwlock_t               *g_lock          = &kmxlnd_data.kmx_global_lock;
+        cfs_rwlock_t           *g_lock          = &kmxlnd_data.kmx_global_lock;
 
         CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
                        payload_nob, payload_niov, libcfs_id2str(target));
@@ -2142,14 +2154,14 @@ mxlnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
         if (unlikely(peer->mxp_incompatible)) {
                 mxlnd_peer_decref(peer); /* drop ref taken above */
         } else {
-                read_lock(g_lock);
+                cfs_read_lock(g_lock);
                 conn = peer->mxp_conn;
                 if (conn && conn->mxk_status != MXLND_CONN_DISCONNECT) {
                         mxlnd_conn_addref(conn);
                 } else {
                         conn = NULL;
                 }
-                read_unlock(g_lock);
+                cfs_read_unlock(g_lock);
                 mxlnd_peer_decref(peer); /* drop peer ref taken above */
                 if (!conn)
                         return -ENOTCONN;
@@ -2498,9 +2510,9 @@ mxlnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
         if (repost) {
                 /* we received a message, increment peer's outstanding credits */
                 if (credit == 1) {
-                        spin_lock(&conn->mxk_lock);
+                        cfs_spin_lock(&conn->mxk_lock);
                         conn->mxk_outstanding++;
-                        spin_unlock(&conn->mxk_lock);
+                        cfs_spin_unlock(&conn->mxk_lock);
                 }
                 /* we are done with the rx */
                 mxlnd_put_idle_rx(rx);
@@ -2518,8 +2530,8 @@ mxlnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
 void
 mxlnd_sleep(unsigned long timeout)
 {
-        set_current_state(TASK_INTERRUPTIBLE);
-        schedule_timeout(timeout);
+        cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+        cfs_schedule_timeout(timeout);
         return;
 }
 
@@ -2539,39 +2551,39 @@ mxlnd_tx_queued(void *arg)
         int                     found   = 0;
         kmx_ctx_t              *tx      = NULL;
         kmx_peer_t             *peer    = NULL;
-        struct list_head       *queue   = &kmxlnd_data.kmx_tx_queue;
-        spinlock_t             *tx_q_lock = &kmxlnd_data.kmx_tx_queue_lock;
-        rwlock_t               *g_lock  = &kmxlnd_data.kmx_global_lock;
+        cfs_list_t             *queue   = &kmxlnd_data.kmx_tx_queue;
+        cfs_spinlock_t         *tx_q_lock = &kmxlnd_data.kmx_tx_queue_lock;
+        cfs_rwlock_t           *g_lock  = &kmxlnd_data.kmx_global_lock;
 
         cfs_daemonize("mxlnd_tx_queued");
 
-        while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) {
+        while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
                 ret = down_interruptible(&kmxlnd_data.kmx_tx_queue_sem);
-                if (atomic_read(&kmxlnd_data.kmx_shutdown))
+                if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown))
                         break;
                 if (ret != 0) // Should we check for -EINTR?
                         continue;
-                spin_lock(tx_q_lock);
-                if (list_empty (&kmxlnd_data.kmx_tx_queue)) {
-                        spin_unlock(tx_q_lock);
+                cfs_spin_lock(tx_q_lock);
+                if (cfs_list_empty (&kmxlnd_data.kmx_tx_queue)) {
+                        cfs_spin_unlock(tx_q_lock);
                         continue;
                 }
-                tx = list_entry (queue->next, kmx_ctx_t, mxc_list);
-                list_del_init(&tx->mxc_list);
-                spin_unlock(tx_q_lock);
+                tx = cfs_list_entry (queue->next, kmx_ctx_t, mxc_list);
+                cfs_list_del_init(&tx->mxc_list);
+                cfs_spin_unlock(tx_q_lock);
 
                 found = 0;
                 peer = mxlnd_find_peer_by_nid(tx->mxc_nid, 0); /* adds peer ref */
                 if (peer != NULL) {
                         tx->mxc_peer = peer;
-                        write_lock(g_lock);
+                        cfs_write_lock(g_lock);
                         if (peer->mxp_conn == NULL) {
                                 ret = mxlnd_conn_alloc_locked(&peer->mxp_conn, peer);
                                 if (ret != 0) {
                                         /* out of memory, give up and fail tx */
                                         tx->mxc_errno = -ENOMEM;
                                         mxlnd_peer_decref(peer);
-                                        write_unlock(g_lock);
+                                        cfs_write_unlock(g_lock);
                                         mxlnd_put_idle_tx(tx);
                                         continue;
                                 }
@@ -2579,7 +2591,7 @@ mxlnd_tx_queued(void *arg)
                         tx->mxc_conn = peer->mxp_conn;
                         mxlnd_conn_addref(tx->mxc_conn); /* for this tx */
                         mxlnd_peer_decref(peer); /* drop peer ref taken above */
-                        write_unlock(g_lock);
+                        cfs_write_unlock(g_lock);
                         mxlnd_queue_tx(tx);
                         found = 1;
                 }
@@ -2610,7 +2622,7 @@ mxlnd_tx_queued(void *arg)
                         /* add peer to global peer list, but look to see
                          * if someone already created it after we released
                          * the read lock */
-                        write_lock(g_lock);
+                        cfs_write_lock(g_lock);
                         old = mxlnd_find_peer_by_nid_locked(peer->mxp_nid);
                         if (old) {
                                 /* we have a peer ref on old */
@@ -2626,8 +2638,9 @@ mxlnd_tx_queued(void *arg)
                         }
 
                         if (found == 0) {
-                                list_add_tail(&peer->mxp_list, &kmxlnd_data.kmx_peers[hash]);
-                                atomic_inc(&kmxlnd_data.kmx_npeers);
+                                cfs_list_add_tail(&peer->mxp_list,
+                                                  &kmxlnd_data.kmx_peers[hash]);
+                                cfs_atomic_inc(&kmxlnd_data.kmx_npeers);
                         } else {
                                 tx->mxc_peer = old;
                                 tx->mxc_conn = old->mxp_conn;
@@ -2637,7 +2650,7 @@ mxlnd_tx_queued(void *arg)
                                 mxlnd_conn_decref(peer->mxp_conn); /* drop peer's ref */
                                 mxlnd_peer_decref(peer);
                         }
-                        write_unlock(g_lock);
+                        cfs_write_unlock(g_lock);
 
                         mxlnd_queue_tx(tx);
                 }
@@ -2675,13 +2688,14 @@ mxlnd_iconnect(kmx_peer_t *peer, u8 msg_type)
                 }
                 if (peer->mxp_nic_id == 0ULL && conn->mxk_status == MXLND_CONN_WAIT) {
                         /* not mapped yet, return */
-                        spin_lock(&conn->mxk_lock);
+                        cfs_spin_lock(&conn->mxk_lock);
                         mxlnd_set_conn_status(conn, MXLND_CONN_INIT);
-                        spin_unlock(&conn->mxk_lock);
+                        cfs_spin_unlock(&conn->mxk_lock);
                 }
         }
 
-        if (time_after(jiffies, peer->mxp_reconnect_time + MXLND_CONNECT_TIMEOUT) &&
+        if (cfs_time_after(jiffies,
+                           peer->mxp_reconnect_time + MXLND_CONNECT_TIMEOUT) &&
             conn->mxk_status != MXLND_CONN_DISCONNECT) {
                 /* give up and notify LNET */
                 CDEBUG(D_NET, "timeout trying to connect to %s\n",
@@ -2695,14 +2709,15 @@ mxlnd_iconnect(kmx_peer_t *peer, u8 msg_type)
                             peer->mxp_ep_id, MXLND_MSG_MAGIC, match,
                             (void *) peer, &request);
         if (unlikely(mxret != MX_SUCCESS)) {
-                spin_lock(&conn->mxk_lock);
+                cfs_spin_lock(&conn->mxk_lock);
                 mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
-                spin_unlock(&conn->mxk_lock);
+                cfs_spin_unlock(&conn->mxk_lock);
                 CDEBUG(D_NETERROR, "mx_iconnect() failed with %s (%d) to %s\n",
                        mx_strerror(mxret), mxret, libcfs_nid2str(peer->mxp_nid));
                 mxlnd_conn_decref(conn);
         }
-        mx_set_request_timeout(kmxlnd_data.kmx_endpt, request, MXLND_CONNECT_TIMEOUT/HZ*1000);
+        mx_set_request_timeout(kmxlnd_data.kmx_endpt, request,
+                               MXLND_CONNECT_TIMEOUT/CFS_HZ*1000);
         return;
 }
 
@@ -2729,18 +2744,18 @@ mxlnd_check_sends(kmx_peer_t *peer)
                 LASSERT(peer != NULL);
                 return -1;
         }
-        write_lock(&kmxlnd_data.kmx_global_lock);
+        cfs_write_lock(&kmxlnd_data.kmx_global_lock);
         conn = peer->mxp_conn;
         /* NOTE take a ref for the duration of this function since it is called
          * when there might not be any queued txs for this peer */
         if (conn) {
                 if (conn->mxk_status == MXLND_CONN_DISCONNECT) {
-                        write_unlock(&kmxlnd_data.kmx_global_lock);
+                        cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
                         return -1;
                 }
                 mxlnd_conn_addref(conn); /* for duration of this function */
         }
-        write_unlock(&kmxlnd_data.kmx_global_lock);
+        cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
 
         /* do not add another ref for this tx */
 
@@ -2751,8 +2766,8 @@ mxlnd_check_sends(kmx_peer_t *peer)
         }
 
 #if MXLND_STATS
-        if (time_after(jiffies, last)) {
-                last = jiffies + HZ;
+        if (cfs_time_after(jiffies, last)) {
+                last = jiffies + CFS_HZ;
                 CDEBUG(D_NET, "status= %s credits= %d outstanding= %d ntx_msgs= %d "
                               "ntx_posted= %d ntx_data= %d data_posted= %d\n",
                               mxlnd_connstatus_to_str(conn->mxk_status), conn->mxk_credits,
@@ -2761,7 +2776,7 @@ mxlnd_check_sends(kmx_peer_t *peer)
         }
 #endif
 
-        spin_lock(&conn->mxk_lock);
+        cfs_spin_lock(&conn->mxk_lock);
         ntx_posted = conn->mxk_ntx_posted;
         credits = conn->mxk_credits;
 
@@ -2774,7 +2789,8 @@ mxlnd_check_sends(kmx_peer_t *peer)
         /* check number of queued msgs, ignore data */
         if (conn->mxk_outstanding >= MXLND_CREDIT_HIGHWATER()) {
                 /* check if any txs queued that could return credits... */
-                if (list_empty(&conn->mxk_tx_credit_queue) || conn->mxk_ntx_msgs == 0) {
+                if (cfs_list_empty(&conn->mxk_tx_credit_queue) ||
+                    conn->mxk_ntx_msgs == 0) {
                         /* if not, send a NOOP */
                         tx = mxlnd_get_idle_tx();
                         if (likely(tx != NULL)) {
@@ -2795,24 +2811,24 @@ mxlnd_check_sends(kmx_peer_t *peer)
             conn->mxk_status == MXLND_CONN_FAIL)) {
                 CDEBUG(D_NET, "status=%s\n", mxlnd_connstatus_to_str(conn->mxk_status));
                 mxlnd_set_conn_status(conn, MXLND_CONN_WAIT);
-                spin_unlock(&conn->mxk_lock);
+                cfs_spin_unlock(&conn->mxk_lock);
                 mxlnd_iconnect(peer, (u8) MXLND_MSG_ICON_REQ);
                 goto done;
         }
 
-        while (!list_empty(&conn->mxk_tx_free_queue) ||
-               !list_empty(&conn->mxk_tx_credit_queue)) {
+        while (!cfs_list_empty(&conn->mxk_tx_free_queue) ||
+               !cfs_list_empty(&conn->mxk_tx_credit_queue)) {
                 /* We have something to send. If we have a queued tx that does not
                  * require a credit (free), choose it since its completion will
                  * return a credit (here or at the peer), complete a DATA or
                  * CONN_REQ or CONN_ACK. */
-                struct list_head *tmp_tx = NULL;
-                if (!list_empty(&conn->mxk_tx_free_queue)) {
+                cfs_list_t *tmp_tx = NULL;
+                if (!cfs_list_empty(&conn->mxk_tx_free_queue)) {
                         tmp_tx = &conn->mxk_tx_free_queue;
                 } else {
                         tmp_tx = &conn->mxk_tx_credit_queue;
                 }
-                tx = list_entry(tmp_tx->next, kmx_ctx_t, mxc_list);
+                tx = cfs_list_entry(tmp_tx->next, kmx_ctx_t, mxc_list);
 
                 msg_type = tx->mxc_msg_type;
 
@@ -2863,10 +2879,10 @@ mxlnd_check_sends(kmx_peer_t *peer)
                                              tx->mxc_cookie,
                                              mxlnd_msgtype_to_str(tx->mxc_msg_type));
                                 if (conn->mxk_status == MXLND_CONN_DISCONNECT ||
-                                    time_after_eq(jiffies, tx->mxc_deadline)) {
-                                        list_del_init(&tx->mxc_list);
+                                    cfs_time_aftereq(jiffies, tx->mxc_deadline)) {
+                                        cfs_list_del_init(&tx->mxc_list);
                                         tx->mxc_errno = -ECONNABORTED;
-                                        spin_unlock(&conn->mxk_lock);
+                                        cfs_spin_unlock(&conn->mxk_lock);
                                         mxlnd_put_idle_tx(tx);
                                         mxlnd_conn_decref(conn);
                                         goto done;
@@ -2875,7 +2891,7 @@ mxlnd_check_sends(kmx_peer_t *peer)
                         }
                 }
 
-                list_del_init(&tx->mxc_list);
+                cfs_list_del_init(&tx->mxc_list);
 
                 /* handle credits, etc now while we have the lock to avoid races */
                 if (credit) {
@@ -2902,7 +2918,7 @@ mxlnd_check_sends(kmx_peer_t *peer)
                             (conn->mxk_ntx_msgs >= 1)) {
                                 conn->mxk_credits++;
                                 conn->mxk_ntx_posted--;
-                                spin_unlock(&conn->mxk_lock);
+                                cfs_spin_unlock(&conn->mxk_lock);
                                 /* redundant NOOP */
                                 mxlnd_put_idle_tx(tx);
                                 mxlnd_conn_decref(conn);
@@ -2922,7 +2938,7 @@ mxlnd_check_sends(kmx_peer_t *peer)
                 mxret = MX_SUCCESS;
 
                 status = conn->mxk_status;
-                spin_unlock(&conn->mxk_lock);
+                cfs_spin_unlock(&conn->mxk_lock);
 
                 if (likely((status == MXLND_CONN_READY) ||
                     (msg_type == MXLND_MSG_CONN_REQ) ||
@@ -2956,10 +2972,10 @@ mxlnd_check_sends(kmx_peer_t *peer)
                                                           &tx->mxc_mxreq);
                                 } else {
                                         /* send a DATA tx */
-                                        spin_lock(&conn->mxk_lock);
+                                        cfs_spin_lock(&conn->mxk_lock);
                                         conn->mxk_ntx_data--;
                                         conn->mxk_data_posted++;
-                                        spin_unlock(&conn->mxk_lock);
+                                        cfs_spin_unlock(&conn->mxk_lock);
                                         CDEBUG(D_NET, "sending %s 0x%llx\n",
                                                mxlnd_msgtype_to_str(msg_type),
                                                tx->mxc_cookie);
@@ -2990,23 +3006,23 @@ mxlnd_check_sends(kmx_peer_t *peer)
                                         tx->mxc_errno = -ECONNABORTED;
                                 }
                                 if (credit) {
-                                        spin_lock(&conn->mxk_lock);
+                                        cfs_spin_lock(&conn->mxk_lock);
                                         conn->mxk_ntx_posted--;
                                         conn->mxk_credits++;
-                                        spin_unlock(&conn->mxk_lock);
+                                        cfs_spin_unlock(&conn->mxk_lock);
                                 } else if (msg_type == MXLND_MSG_PUT_DATA ||
                                         msg_type == MXLND_MSG_GET_DATA) {
-                                        spin_lock(&conn->mxk_lock);
+                                        cfs_spin_lock(&conn->mxk_lock);
                                         conn->mxk_data_posted--;
-                                        spin_unlock(&conn->mxk_lock);
+                                        cfs_spin_unlock(&conn->mxk_lock);
                                 }
                                 if (msg_type != MXLND_MSG_PUT_DATA &&
                                     msg_type != MXLND_MSG_GET_DATA &&
                                     msg_type != MXLND_MSG_CONN_REQ &&
                                     msg_type != MXLND_MSG_CONN_ACK) {
-                                        spin_lock(&conn->mxk_lock);
+                                        cfs_spin_lock(&conn->mxk_lock);
                                         conn->mxk_outstanding += tx->mxc_msg->mxm_credits;
-                                        spin_unlock(&conn->mxk_lock);
+                                        cfs_spin_unlock(&conn->mxk_lock);
                                 }
                                 if (msg_type != MXLND_MSG_CONN_REQ &&
                                     msg_type != MXLND_MSG_CONN_ACK) {
@@ -3017,10 +3033,10 @@ mxlnd_check_sends(kmx_peer_t *peer)
                                 mxlnd_conn_decref(conn);
                         }
                 }
-                spin_lock(&conn->mxk_lock);
+                cfs_spin_lock(&conn->mxk_lock);
         }
 done_locked:
-        spin_unlock(&conn->mxk_lock);
+        cfs_spin_unlock(&conn->mxk_lock);
 done:
         mxlnd_conn_decref(conn); /* drop ref taken at start of function */
         return found;
@@ -3060,28 +3076,28 @@ mxlnd_handle_tx_completion(kmx_ctx_t *tx)
         if (failed) {
                 if (tx->mxc_errno == 0) tx->mxc_errno = -EIO;
         } else {
-                spin_lock(&conn->mxk_lock);
+                cfs_spin_lock(&conn->mxk_lock);
                 conn->mxk_last_tx = cfs_time_current(); /* jiffies */
-                spin_unlock(&conn->mxk_lock);
+                cfs_spin_unlock(&conn->mxk_lock);
         }
 
         switch (type) {
 
         case MXLND_MSG_GET_DATA:
-                spin_lock(&conn->mxk_lock);
+                cfs_spin_lock(&conn->mxk_lock);
                 if (conn->mxk_incarnation == tx->mxc_incarnation) {
                         conn->mxk_outstanding++;
                         conn->mxk_data_posted--;
                 }
-                spin_unlock(&conn->mxk_lock);
+                cfs_spin_unlock(&conn->mxk_lock);
                 break;
 
         case MXLND_MSG_PUT_DATA:
-                spin_lock(&conn->mxk_lock);
+                cfs_spin_lock(&conn->mxk_lock);
                 if (conn->mxk_incarnation == tx->mxc_incarnation) {
                         conn->mxk_data_posted--;
                 }
-                spin_unlock(&conn->mxk_lock);
+                cfs_spin_unlock(&conn->mxk_lock);
                 break;
 
         case MXLND_MSG_NOOP:
@@ -3104,12 +3120,12 @@ mxlnd_handle_tx_completion(kmx_ctx_t *tx)
                                mx_strstatus(code), code, tx->mxc_errno,
                                libcfs_nid2str(tx->mxc_nid));
                         if (!peer->mxp_incompatible) {
-                                spin_lock(&conn->mxk_lock);
+                                cfs_spin_lock(&conn->mxk_lock);
                                 if (code == MX_STATUS_BAD_SESSION)
                                         mxlnd_set_conn_status(conn, MXLND_CONN_INIT);
                                 else
                                         mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
-                                spin_unlock(&conn->mxk_lock);
+                                cfs_spin_unlock(&conn->mxk_lock);
                         }
                 }
                 break;
@@ -3120,11 +3136,11 @@ mxlnd_handle_tx_completion(kmx_ctx_t *tx)
         }
 
         if (credit) {
-                spin_lock(&conn->mxk_lock);
+                cfs_spin_lock(&conn->mxk_lock);
                 if (conn->mxk_incarnation == tx->mxc_incarnation) {
                         conn->mxk_ntx_posted--;
                 }
-                spin_unlock(&conn->mxk_lock);
+                cfs_spin_unlock(&conn->mxk_lock);
         }
 
         mxlnd_put_idle_tx(tx);
@@ -3175,7 +3191,7 @@ mxlnd_handle_rx_completion(kmx_ctx_t *rx)
         } /* else peer and conn == NULL */
 
         if (conn == NULL && peer != NULL) {
-                write_lock(&kmxlnd_data.kmx_global_lock);
+                cfs_write_lock(&kmxlnd_data.kmx_global_lock);
                 conn = peer->mxp_conn;
                 if (conn) {
                         mxlnd_conn_addref(conn); /* conn takes ref... */
@@ -3183,7 +3199,7 @@ mxlnd_handle_rx_completion(kmx_ctx_t *rx)
                         conn_ref = 1;
                         peer_ref = 0;
                 }
-                write_unlock(&kmxlnd_data.kmx_global_lock);
+                cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
                 rx->mxc_conn = conn;
         }
 
@@ -3267,7 +3283,7 @@ mxlnd_handle_rx_completion(kmx_ctx_t *rx)
 
         LASSERT(peer != NULL && conn != NULL);
         if (msg->mxm_credits != 0) {
-                spin_lock(&conn->mxk_lock);
+                cfs_spin_lock(&conn->mxk_lock);
                 if (msg->mxm_srcstamp == conn->mxk_incarnation) {
                         if ((conn->mxk_credits + msg->mxm_credits) >
                              *kmxlnd_tunables.kmx_peercredits) {
@@ -3278,7 +3294,7 @@ mxlnd_handle_rx_completion(kmx_ctx_t *rx)
                         LASSERT(conn->mxk_credits >= 0);
                         LASSERT(conn->mxk_credits <= *kmxlnd_tunables.kmx_peercredits);
                 }
-                spin_unlock(&conn->mxk_lock);
+                cfs_spin_unlock(&conn->mxk_lock);
         }
 
         CDEBUG(D_NET, "switch %s for rx (0x%llx)\n", mxlnd_msgtype_to_str(type), seq);
@@ -3328,16 +3344,16 @@ mxlnd_handle_rx_completion(kmx_ctx_t *rx)
 
         if (ret < 0) {
                 CDEBUG(D_NET, "setting PEER_CONN_FAILED\n");
-                spin_lock(&conn->mxk_lock);
+                cfs_spin_lock(&conn->mxk_lock);
                 mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
-                spin_unlock(&conn->mxk_lock);
+                cfs_spin_unlock(&conn->mxk_lock);
         }
 
 cleanup:
         if (conn != NULL) {
-                spin_lock(&conn->mxk_lock);
+                cfs_spin_lock(&conn->mxk_lock);
                 conn->mxk_last_rx = cfs_time_current(); /* jiffies */
-                spin_unlock(&conn->mxk_lock);
+                cfs_spin_unlock(&conn->mxk_lock);
         }
 
         if (repost) {
@@ -3348,9 +3364,9 @@ cleanup:
                             type == MXLND_MSG_EAGER ||
                             type == MXLND_MSG_PUT_REQ ||
                             type == MXLND_MSG_NOOP) {
-                                spin_lock(&conn->mxk_lock);
+                                cfs_spin_lock(&conn->mxk_lock);
                                 conn->mxk_outstanding++;
-                                spin_unlock(&conn->mxk_lock);
+                                cfs_spin_unlock(&conn->mxk_lock);
                         }
                 }
                 if (conn_ref) mxlnd_conn_decref(conn);
@@ -3398,11 +3414,12 @@ mxlnd_handle_connect_msg(kmx_peer_t *peer, u8 msg_type, mx_status_t status)
                         peer->mxp_nid,
                         peer->mxp_nic_id,
                         peer->mxp_ep_id);
-                spin_lock(&conn->mxk_lock);
+                cfs_spin_lock(&conn->mxk_lock);
                 mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
-                spin_unlock(&conn->mxk_lock);
+                cfs_spin_unlock(&conn->mxk_lock);
 
-                if (time_after(jiffies, peer->mxp_reconnect_time + MXLND_CONNECT_TIMEOUT)) {
+                if (cfs_time_after(jiffies, peer->mxp_reconnect_time +
+                                   MXLND_CONNECT_TIMEOUT)) {
                         CDEBUG(D_NETERROR, "timeout, calling conn_disconnect()\n");
                         mxlnd_conn_disconnect(conn, 0, send_bye);
                 }
@@ -3411,21 +3428,21 @@ mxlnd_handle_connect_msg(kmx_peer_t *peer, u8 msg_type, mx_status_t status)
                 return;
         }
         mx_decompose_endpoint_addr2(status.source, &nic_id, &ep_id, &sid);
-        write_lock(&kmxlnd_data.kmx_global_lock);
-        spin_lock(&conn->mxk_lock);
+        cfs_write_lock(&kmxlnd_data.kmx_global_lock);
+        cfs_spin_lock(&conn->mxk_lock);
         conn->mxk_epa = status.source;
         mx_set_endpoint_addr_context(conn->mxk_epa, (void *) conn);
         if (msg_type == MXLND_MSG_ICON_ACK && likely(!peer->mxp_incompatible)) {
                 mxlnd_set_conn_status(conn, MXLND_CONN_READY);
         }
-        spin_unlock(&conn->mxk_lock);
-        write_unlock(&kmxlnd_data.kmx_global_lock);
+        cfs_spin_unlock(&conn->mxk_lock);
+        cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
 
         /* mx_iconnect() succeeded, reset delay to 0 */
-        write_lock(&kmxlnd_data.kmx_global_lock);
+        cfs_write_lock(&kmxlnd_data.kmx_global_lock);
         peer->mxp_reconnect_time = 0;
         peer->mxp_conn->mxk_sid = sid;
-        write_unlock(&kmxlnd_data.kmx_global_lock);
+        cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
 
         /* marshal CONN_REQ or CONN_ACK msg */
         /* we are still using the conn ref from iconnect() - do not take another */
@@ -3434,9 +3451,9 @@ mxlnd_handle_connect_msg(kmx_peer_t *peer, u8 msg_type, mx_status_t status)
                 CDEBUG(D_NETERROR, "Can't obtain %s tx for %s\n",
                        mxlnd_msgtype_to_str(type),
                        libcfs_nid2str(peer->mxp_nid));
-                spin_lock(&conn->mxk_lock);
+                cfs_spin_lock(&conn->mxk_lock);
                 mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
-                spin_unlock(&conn->mxk_lock);
+                cfs_spin_unlock(&conn->mxk_lock);
                 mxlnd_conn_decref(conn);
                 return;
         }
@@ -3486,7 +3503,7 @@ mxlnd_request_waitd(void *arg)
 
         CDEBUG(D_NET, "%s starting\n", name);
 
-        while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) {
+        while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
                 u8      msg_type        = 0;
 
                 mxret = MX_SUCCESS;
@@ -3504,7 +3521,7 @@ mxlnd_request_waitd(void *arg)
                 mxret = mx_wait_any(kmxlnd_data.kmx_endpt, MXLND_WAIT_TIMEOUT,
                                     0ULL, 0ULL, &status, &result);
 #endif
-                if (unlikely(atomic_read(&kmxlnd_data.kmx_shutdown)))
+                if (unlikely(cfs_atomic_read(&kmxlnd_data.kmx_shutdown)))
                         break;
 
                 if (result != 1) {
@@ -3585,14 +3602,15 @@ mxlnd_check_timeouts(unsigned long now)
         unsigned long   next            = 0; /* jiffies */
         kmx_peer_t      *peer           = NULL;
         kmx_conn_t      *conn           = NULL;
-        rwlock_t        *g_lock         = &kmxlnd_data.kmx_global_lock;
+        cfs_rwlock_t    *g_lock         = &kmxlnd_data.kmx_global_lock;
 
-        read_lock(g_lock);
+        cfs_read_lock(g_lock);
         for (i = 0; i < MXLND_HASH_SIZE; i++) {
-                list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i], mxp_list) {
+                cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i],
+                                        mxp_list) {
 
-                        if (unlikely(atomic_read(&kmxlnd_data.kmx_shutdown))) {
-                                read_unlock(g_lock);
+                        if (unlikely(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
+                                cfs_read_unlock(g_lock);
                                 return next;
                         }
 
@@ -3603,14 +3621,14 @@ mxlnd_check_timeouts(unsigned long now)
                                 continue;
                         }
 
-                        spin_lock(&conn->mxk_lock);
+                        cfs_spin_lock(&conn->mxk_lock);
 
                         /* if nothing pending (timeout == 0) or
                          * if conn is already disconnected,
                          * skip this conn */
                         if (conn->mxk_timeout == 0 ||
                             conn->mxk_status == MXLND_CONN_DISCONNECT) {
-                                spin_unlock(&conn->mxk_lock);
+                                cfs_spin_unlock(&conn->mxk_lock);
                                 mxlnd_conn_decref(conn);
                                 continue;
                         }
@@ -3619,16 +3637,17 @@ mxlnd_check_timeouts(unsigned long now)
                          * if it is in the future, we will sleep until then.
                          * if it is in the past, then we will sleep one
                          * second and repeat the process. */
-                        if ((next == 0) || (time_before(conn->mxk_timeout, next))) {
+                        if ((next == 0) ||
+                            (cfs_time_before(conn->mxk_timeout, next))) {
                                 next = conn->mxk_timeout;
                         }
 
                         disconnect = 0;
 
-                        if (time_after_eq(now, conn->mxk_timeout))  {
+                        if (cfs_time_aftereq(now, conn->mxk_timeout))  {
                                 disconnect = 1;
                         }
-                        spin_unlock(&conn->mxk_lock);
+                        cfs_spin_unlock(&conn->mxk_lock);
 
                         if (disconnect) {
                                 mxlnd_conn_disconnect(conn, 1, 1);
@@ -3636,7 +3655,7 @@ mxlnd_check_timeouts(unsigned long now)
                         mxlnd_conn_decref(conn);
                 }
         }
-        read_unlock(g_lock);
+        cfs_read_unlock(g_lock);
         if (next == 0) next = now + MXLND_COMM_TIMEOUT;
 
         return next;
@@ -3654,7 +3673,7 @@ mxlnd_passive_connect(kmx_connparams_t *cp)
         kmx_msg_t       *msg            = &cp->mxr_msg;
         kmx_peer_t      *peer           = cp->mxr_peer;
         kmx_conn_t      *conn           = NULL;
-        rwlock_t        *g_lock         = &kmxlnd_data.kmx_global_lock;
+        cfs_rwlock_t    *g_lock         = &kmxlnd_data.kmx_global_lock;
 
         mx_decompose_endpoint_addr2(cp->mxr_epa, &nic_id, &ep_id, &sid);
 
@@ -3711,7 +3730,7 @@ mxlnd_passive_connect(kmx_connparams_t *cp)
                         }
                         peer->mxp_conn->mxk_sid = sid;
                         LASSERT(peer->mxp_ep_id == ep_id);
-                        write_lock(g_lock);
+                        cfs_write_lock(g_lock);
                         existing_peer = mxlnd_find_peer_by_nid_locked(msg->mxm_srcnid);
                         if (existing_peer) {
                                 mxlnd_conn_decref(peer->mxp_conn);
@@ -3720,16 +3739,16 @@ mxlnd_passive_connect(kmx_connparams_t *cp)
                                 mxlnd_conn_addref(peer->mxp_conn);
                                 conn = peer->mxp_conn;
                         } else {
-                                list_add_tail(&peer->mxp_list,
-                                              &kmxlnd_data.kmx_peers[hash]);
-                                atomic_inc(&kmxlnd_data.kmx_npeers);
+                                cfs_list_add_tail(&peer->mxp_list,
+                                                  &kmxlnd_data.kmx_peers[hash]);
+                                cfs_atomic_inc(&kmxlnd_data.kmx_npeers);
                         }
-                        write_unlock(g_lock);
+                        cfs_write_unlock(g_lock);
                 } else {
                         ret = mxlnd_conn_alloc(&conn, peer); /* adds 2nd ref */
-                        write_lock(g_lock);
+                        cfs_write_lock(g_lock);
                         mxlnd_peer_decref(peer); /* drop ref taken above */
-                        write_unlock(g_lock);
+                        cfs_write_unlock(g_lock);
                         if (ret != 0) {
                                 CDEBUG(D_NETERROR, "Cannot allocate mxp_conn\n");
                                 goto cleanup;
@@ -3763,13 +3782,13 @@ mxlnd_passive_connect(kmx_connparams_t *cp)
                         conn = peer->mxp_conn;
                 }
         }
-        write_lock(g_lock);
+        cfs_write_lock(g_lock);
         peer->mxp_incompatible = incompatible;
-        write_unlock(g_lock);
-        spin_lock(&conn->mxk_lock);
+        cfs_write_unlock(g_lock);
+        cfs_spin_lock(&conn->mxk_lock);
         conn->mxk_incarnation = msg->mxm_srcstamp;
         mxlnd_set_conn_status(conn, MXLND_CONN_WAIT);
-        spin_unlock(&conn->mxk_lock);
+        cfs_spin_unlock(&conn->mxk_lock);
 
         /* handle_conn_ack() will create the CONN_ACK msg */
         mxlnd_iconnect(peer, (u8) MXLND_MSG_ICON_ACK);
@@ -3835,10 +3854,10 @@ mxlnd_check_conn_ack(kmx_connparams_t *cp)
                 ret = -1;
                 goto failed;
         }
-        write_lock(&kmxlnd_data.kmx_global_lock);
+        cfs_write_lock(&kmxlnd_data.kmx_global_lock);
         peer->mxp_incompatible = incompatible;
-        write_unlock(&kmxlnd_data.kmx_global_lock);
-        spin_lock(&conn->mxk_lock);
+        cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
+        cfs_spin_lock(&conn->mxk_lock);
         conn->mxk_credits = *kmxlnd_tunables.kmx_peercredits;
         conn->mxk_outstanding = 0;
         conn->mxk_incarnation = msg->mxm_srcstamp;
@@ -3848,16 +3867,16 @@ mxlnd_check_conn_ack(kmx_connparams_t *cp)
                        libcfs_nid2str(msg->mxm_srcnid));
                 mxlnd_set_conn_status(conn, MXLND_CONN_READY);
         }
-        spin_unlock(&conn->mxk_lock);
+        cfs_spin_unlock(&conn->mxk_lock);
 
         if (!incompatible)
                 mxlnd_check_sends(peer);
 
 failed:
         if (ret < 0) {
-                spin_lock(&conn->mxk_lock);
+                cfs_spin_lock(&conn->mxk_lock);
                 mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
-                spin_unlock(&conn->mxk_lock);
+                cfs_spin_unlock(&conn->mxk_lock);
         }
 
         if (incompatible) mxlnd_conn_disconnect(conn, 0, 0);
@@ -3870,18 +3889,18 @@ int
 mxlnd_abort_msgs(void)
 {
         int                     count           = 0;
-        struct list_head        *orphans        = &kmxlnd_data.kmx_orphan_msgs;
-        spinlock_t              *g_conn_lock    = &kmxlnd_data.kmx_conn_lock;
+        cfs_list_t              *orphans        = &kmxlnd_data.kmx_orphan_msgs;
+        cfs_spinlock_t          *g_conn_lock    = &kmxlnd_data.kmx_conn_lock;
 
         /* abort orphans */
-        spin_lock(g_conn_lock);
-        while (!list_empty(orphans)) {
+        cfs_spin_lock(g_conn_lock);
+        while (!cfs_list_empty(orphans)) {
                 kmx_ctx_t       *ctx     = NULL;
                 kmx_conn_t      *conn   = NULL;
 
-                ctx = list_entry(orphans->next, kmx_ctx_t, mxc_list);
-                list_del_init(&ctx->mxc_list);
-                spin_unlock(g_conn_lock);
+                ctx = cfs_list_entry(orphans->next, kmx_ctx_t, mxc_list);
+                cfs_list_del_init(&ctx->mxc_list);
+                cfs_spin_unlock(g_conn_lock);
 
                 ctx->mxc_errno = -ECONNABORTED;
                 conn = ctx->mxc_conn;
@@ -3898,9 +3917,9 @@ mxlnd_abort_msgs(void)
                 }
 
                 count++;
-                spin_lock(g_conn_lock);
+                cfs_spin_lock(g_conn_lock);
         }
-        spin_unlock(g_conn_lock);
+        cfs_spin_unlock(g_conn_lock);
 
         return count;
 }
@@ -3909,27 +3928,27 @@ int
 mxlnd_free_conn_zombies(void)
 {
         int                     count           = 0;
-        struct list_head        *zombies        = &kmxlnd_data.kmx_conn_zombies;
-        spinlock_t              *g_conn_lock    = &kmxlnd_data.kmx_conn_lock;
-        rwlock_t                *g_lock         = &kmxlnd_data.kmx_global_lock;
+        cfs_list_t             *zombies        = &kmxlnd_data.kmx_conn_zombies;
+        cfs_spinlock_t         *g_conn_lock    = &kmxlnd_data.kmx_conn_lock;
+        cfs_rwlock_t           *g_lock         = &kmxlnd_data.kmx_global_lock;
 
         /* cleanup any zombies */
-        spin_lock(g_conn_lock);
-        while (!list_empty(zombies)) {
+        cfs_spin_lock(g_conn_lock);
+        while (!cfs_list_empty(zombies)) {
                 kmx_conn_t      *conn   = NULL;
 
-                conn = list_entry(zombies->next, kmx_conn_t, mxk_zombie);
-                list_del_init(&conn->mxk_zombie);
-                spin_unlock(g_conn_lock);
+                conn = cfs_list_entry(zombies->next, kmx_conn_t, mxk_zombie);
+                cfs_list_del_init(&conn->mxk_zombie);
+                cfs_spin_unlock(g_conn_lock);
 
-                write_lock(g_lock);
+                cfs_write_lock(g_lock);
                 mxlnd_conn_free_locked(conn);
-                write_unlock(g_lock);
+                cfs_write_unlock(g_lock);
 
                 count++;
-                spin_lock(g_conn_lock);
+                cfs_spin_lock(g_conn_lock);
         }
-        spin_unlock(g_conn_lock);
+        cfs_spin_unlock(g_conn_lock);
         CDEBUG(D_NET, "%s: freed %d zombies\n", __func__, count);
         return count;
 }
@@ -3949,15 +3968,15 @@ mxlnd_connd(void *arg)
 
         CDEBUG(D_NET, "connd starting\n");
 
-        while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) {
-                int                     ret             = 0;
-                kmx_connparams_t       *cp              = NULL;
-                spinlock_t             *g_conn_lock     = &kmxlnd_data.kmx_conn_lock;
-                struct list_head       *conn_reqs       = &kmxlnd_data.kmx_conn_reqs;
+        while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
+                int                ret             = 0;
+                kmx_connparams_t  *cp              = NULL;
+                cfs_spinlock_t    *g_conn_lock     = &kmxlnd_data.kmx_conn_lock;
+                cfs_list_t        *conn_reqs       = &kmxlnd_data.kmx_conn_reqs;
 
                 ret = down_interruptible(&kmxlnd_data.kmx_conn_sem);
 
-                if (atomic_read(&kmxlnd_data.kmx_shutdown))
+                if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown))
                         break;
 
                 if (ret != 0)
@@ -3966,17 +3985,18 @@ mxlnd_connd(void *arg)
                 ret = mxlnd_abort_msgs();
                 ret += mxlnd_free_conn_zombies();
 
-                spin_lock(g_conn_lock);
-                if (list_empty(conn_reqs)) {
+                cfs_spin_lock(g_conn_lock);
+                if (cfs_list_empty(conn_reqs)) {
                         if (ret == 0)
                                 CDEBUG(D_NETERROR, "connd woke up but did not "
                                        "find a kmx_connparams_t or zombie conn\n");
-                        spin_unlock(g_conn_lock);
+                        cfs_spin_unlock(g_conn_lock);
                         continue;
                 }
-                cp = list_entry(conn_reqs->next, kmx_connparams_t, mxr_list);
-                list_del_init(&cp->mxr_list);
-                spin_unlock(g_conn_lock);
+                cp = cfs_list_entry(conn_reqs->next, kmx_connparams_t,
+                                    mxr_list);
+                cfs_list_del_init(&cp->mxr_list);
+                cfs_spin_unlock(g_conn_lock);
 
                 switch (MXLND_MSG_TYPE(cp->mxr_match)) {
                 case MXLND_MSG_CONN_REQ:
@@ -4013,33 +4033,34 @@ mxlnd_timeoutd(void *arg)
         long            id      = (long) arg;
         unsigned long   now     = 0;
         unsigned long   next    = 0;
-        unsigned long   delay   = HZ;
+        unsigned long   delay   = CFS_HZ;
         kmx_peer_t     *peer    = NULL;
         kmx_peer_t     *temp    = NULL;
         kmx_conn_t     *conn    = NULL;
-        rwlock_t       *g_lock  = &kmxlnd_data.kmx_global_lock;
+        cfs_rwlock_t   *g_lock  = &kmxlnd_data.kmx_global_lock;
 
         cfs_daemonize("mxlnd_timeoutd");
 
         CDEBUG(D_NET, "timeoutd starting\n");
 
-        while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) {
+        while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
 
                 now = jiffies;
                 /* if the next timeout has not arrived, go back to sleep */
-                if (time_after(now, next)) {
+                if (cfs_time_after(now, next)) {
                         next = mxlnd_check_timeouts(now);
                 }
 
                 /* try to progress peers' txs */
-               write_lock(g_lock);
+               cfs_write_lock(g_lock);
                 for (i = 0; i < MXLND_HASH_SIZE; i++) {
-                        struct list_head *peers = &kmxlnd_data.kmx_peers[i];
+                        cfs_list_t *peers = &kmxlnd_data.kmx_peers[i];
 
                         /* NOTE we are safe against the removal of peer, but
                          * not against the removal of temp */
-                        list_for_each_entry_safe(peer, temp, peers, mxp_list) {
-                                if (atomic_read(&kmxlnd_data.kmx_shutdown))
+                        cfs_list_for_each_entry_safe(peer, temp, peers,
+                                                     mxp_list) {
+                                if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown))
                                         break;
                                 mxlnd_peer_addref(peer); /* add ref... */
                                 conn = peer->mxp_conn;
@@ -4054,16 +4075,18 @@ mxlnd_timeoutd(void *arg)
 
                                 if ((conn->mxk_status == MXLND_CONN_READY ||
                                     conn->mxk_status == MXLND_CONN_FAIL) &&
-                                    time_after(now, conn->mxk_last_tx + HZ)) {
-                                        write_unlock(g_lock);
+                                    cfs_time_after(now,
+                                                   conn->mxk_last_tx +
+                                                   CFS_HZ)) {
+                                        cfs_write_unlock(g_lock);
                                         mxlnd_check_sends(peer);
-                                        write_lock(g_lock);
+                                        cfs_write_lock(g_lock);
                                 }
                                 mxlnd_conn_decref(conn); /* until here */
                                 mxlnd_peer_decref(peer); /* ...to here */
                         }
                 }
-                write_unlock(g_lock);
+                cfs_write_unlock(g_lock);
 
                 mxlnd_sleep(delay);
         }
index d10041a..2251f23 100644 (file)
@@ -345,21 +345,21 @@ kiblnd_create_peer (lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
         peer->ibp_nid = nid;
         peer->ibp_error = 0;
         peer->ibp_last_alive = 0;
-        atomic_set(&peer->ibp_refcount, 1);     /* 1 ref for caller */
+        cfs_atomic_set(&peer->ibp_refcount, 1);  /* 1 ref for caller */
 
-        INIT_LIST_HEAD(&peer->ibp_list);       /* not in the peer table yet */
-        INIT_LIST_HEAD(&peer->ibp_conns);
-        INIT_LIST_HEAD(&peer->ibp_tx_queue);
+        CFS_INIT_LIST_HEAD(&peer->ibp_list);     /* not in the peer table yet */
+        CFS_INIT_LIST_HEAD(&peer->ibp_conns);
+        CFS_INIT_LIST_HEAD(&peer->ibp_tx_queue);
 
-        write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
         /* always called with a ref on ni, which prevents ni being shutdown */
         LASSERT (net->ibn_shutdown == 0);
 
         /* npeers only grows with the global lock held */
-        atomic_inc(&net->ibn_npeers);
+        cfs_atomic_inc(&net->ibn_npeers);
 
-        write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
         *peerp = peer;
         return 0;
@@ -371,12 +371,12 @@ kiblnd_destroy_peer (kib_peer_t *peer)
         kib_net_t *net = peer->ibp_ni->ni_data;
 
         LASSERT (net != NULL);
-        LASSERT (atomic_read(&peer->ibp_refcount) == 0);
+        LASSERT (cfs_atomic_read(&peer->ibp_refcount) == 0);
         LASSERT (!kiblnd_peer_active(peer));
         LASSERT (peer->ibp_connecting == 0);
         LASSERT (peer->ibp_accepting == 0);
-        LASSERT (list_empty(&peer->ibp_conns));
-        LASSERT (list_empty(&peer->ibp_tx_queue));
+        LASSERT (cfs_list_empty(&peer->ibp_conns));
+        LASSERT (cfs_list_empty(&peer->ibp_tx_queue));
 
         LIBCFS_FREE(peer, sizeof(*peer));
 
@@ -384,7 +384,7 @@ kiblnd_destroy_peer (kib_peer_t *peer)
          * they are destroyed, so we can be assured that _all_ state to do
          * with this peer has been cleaned up when its refcount drops to
          * zero. */
-        atomic_dec(&net->ibn_npeers);
+        cfs_atomic_dec(&net->ibn_npeers);
 }
 
 kib_peer_t *
@@ -392,24 +392,24 @@ kiblnd_find_peer_locked (lnet_nid_t nid)
 {
         /* the caller is responsible for accounting the additional reference
          * that this creates */
-        struct list_head *peer_list = kiblnd_nid2peerlist(nid);
-        struct list_head *tmp;
+        cfs_list_t       *peer_list = kiblnd_nid2peerlist(nid);
+        cfs_list_t       *tmp;
         kib_peer_t       *peer;
 
-        list_for_each (tmp, peer_list) {
+        cfs_list_for_each (tmp, peer_list) {
 
-                peer = list_entry(tmp, kib_peer_t, ibp_list);
+                peer = cfs_list_entry(tmp, kib_peer_t, ibp_list);
 
                 LASSERT (peer->ibp_connecting > 0 || /* creating conns */
                          peer->ibp_accepting > 0 ||
-                         !list_empty(&peer->ibp_conns));  /* active conn */
+                         !cfs_list_empty(&peer->ibp_conns));  /* active conn */
 
                 if (peer->ibp_nid != nid)
                         continue;
 
                 CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n",
                        peer, libcfs_nid2str(nid),
-                       atomic_read(&peer->ibp_refcount),
+                       cfs_atomic_read(&peer->ibp_refcount),
                        peer->ibp_version);
                 return peer;
         }
@@ -419,10 +419,10 @@ kiblnd_find_peer_locked (lnet_nid_t nid)
 void
 kiblnd_unlink_peer_locked (kib_peer_t *peer)
 {
-        LASSERT (list_empty(&peer->ibp_conns));
+        LASSERT (cfs_list_empty(&peer->ibp_conns));
 
         LASSERT (kiblnd_peer_active(peer));
-        list_del_init(&peer->ibp_list);
+        cfs_list_del_init(&peer->ibp_list);
         /* lose peerlist's ref */
         kiblnd_peer_decref(peer);
 }
@@ -431,21 +431,21 @@ int
 kiblnd_get_peer_info (lnet_ni_t *ni, int index, 
                       lnet_nid_t *nidp, int *count)
 {
-        kib_peer_t        *peer;
-        struct list_head  *ptmp;
-        int                i;
-        unsigned long      flags;
+        kib_peer_t            *peer;
+        cfs_list_t            *ptmp;
+        int                    i;
+        unsigned long          flags;
 
-        read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+        cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
         for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
 
-                list_for_each (ptmp, &kiblnd_data.kib_peers[i]) {
+                cfs_list_for_each (ptmp, &kiblnd_data.kib_peers[i]) {
 
-                        peer = list_entry(ptmp, kib_peer_t, ibp_list);
+                        peer = cfs_list_entry(ptmp, kib_peer_t, ibp_list);
                         LASSERT (peer->ibp_connecting > 0 ||
                                  peer->ibp_accepting > 0 ||
-                                 !list_empty(&peer->ibp_conns));
+                                 !cfs_list_empty(&peer->ibp_conns));
 
                         if (peer->ibp_ni != ni)
                                 continue;
@@ -454,30 +454,30 @@ kiblnd_get_peer_info (lnet_ni_t *ni, int index,
                                 continue;
 
                         *nidp = peer->ibp_nid;
-                        *count = atomic_read(&peer->ibp_refcount);
+                        *count = cfs_atomic_read(&peer->ibp_refcount);
 
-                        read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
-                                               flags);
+                        cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+                                                   flags);
                         return 0;
                 }
         }
 
-        read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+        cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
         return -ENOENT;
 }
 
 void
 kiblnd_del_peer_locked (kib_peer_t *peer)
 {
-        struct list_head *ctmp;
-        struct list_head *cnxt;
-        kib_conn_t       *conn;
+        cfs_list_t           *ctmp;
+        cfs_list_t           *cnxt;
+        kib_conn_t           *conn;
 
-        if (list_empty(&peer->ibp_conns)) {
+        if (cfs_list_empty(&peer->ibp_conns)) {
                 kiblnd_unlink_peer_locked(peer);
         } else {
-                list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
-                        conn = list_entry(ctmp, kib_conn_t, ibc_list);
+                cfs_list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
+                        conn = cfs_list_entry(ctmp, kib_conn_t, ibc_list);
 
                         kiblnd_close_conn_locked(conn, 0);
                 }
@@ -490,17 +490,17 @@ kiblnd_del_peer_locked (kib_peer_t *peer)
 int
 kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid)
 {
-        CFS_LIST_HEAD     (zombies);
-        struct list_head  *ptmp;
-        struct list_head  *pnxt;
-        kib_peer_t        *peer;
-        int                lo;
-        int                hi;
-        int                i;
-        unsigned long      flags;
-        int                rc = -ENOENT;
+        CFS_LIST_HEAD         (zombies);
+        cfs_list_t            *ptmp;
+        cfs_list_t            *pnxt;
+        kib_peer_t            *peer;
+        int                    lo;
+        int                    hi;
+        int                    i;
+        unsigned long          flags;
+        int                    rc = -ENOENT;
 
-        write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
         if (nid != LNET_NID_ANY) {
                 lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
@@ -510,11 +510,11 @@ kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid)
         }
 
         for (i = lo; i <= hi; i++) {
-                list_for_each_safe (ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
-                        peer = list_entry(ptmp, kib_peer_t, ibp_list);
+                cfs_list_for_each_safe (ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
+                        peer = cfs_list_entry(ptmp, kib_peer_t, ibp_list);
                         LASSERT (peer->ibp_connecting > 0 ||
                                  peer->ibp_accepting > 0 ||
-                                 !list_empty(&peer->ibp_conns));
+                                 !cfs_list_empty(&peer->ibp_conns));
 
                         if (peer->ibp_ni != ni)
                                 continue;
@@ -522,10 +522,11 @@ kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid)
                         if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid))
                                 continue;
 
-                        if (!list_empty(&peer->ibp_tx_queue)) {
-                                LASSERT (list_empty(&peer->ibp_conns));
+                        if (!cfs_list_empty(&peer->ibp_tx_queue)) {
+                                LASSERT (cfs_list_empty(&peer->ibp_conns));
 
-                                list_splice_init(&peer->ibp_tx_queue, &zombies);
+                                cfs_list_splice_init(&peer->ibp_tx_queue,
+                                                     &zombies);
                         }
 
                         kiblnd_del_peer_locked(peer);
@@ -533,7 +534,7 @@ kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid)
                 }
         }
 
-        write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
         kiblnd_txlist_done(ni, &zombies, -EIO);
 
@@ -543,40 +544,41 @@ kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid)
 kib_conn_t *
 kiblnd_get_conn_by_idx (lnet_ni_t *ni, int index)
 {
-        kib_peer_t        *peer;
-        struct list_head  *ptmp;
-        kib_conn_t        *conn;
-        struct list_head  *ctmp;
-        int                i;
-        unsigned long      flags;
+        kib_peer_t            *peer;
+        cfs_list_t            *ptmp;
+        kib_conn_t            *conn;
+        cfs_list_t            *ctmp;
+        int                    i;
+        unsigned long          flags;
 
-        read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+        cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
         for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
-                list_for_each (ptmp, &kiblnd_data.kib_peers[i]) {
+                cfs_list_for_each (ptmp, &kiblnd_data.kib_peers[i]) {
 
-                        peer = list_entry(ptmp, kib_peer_t, ibp_list);
+                        peer = cfs_list_entry(ptmp, kib_peer_t, ibp_list);
                         LASSERT (peer->ibp_connecting > 0 ||
                                  peer->ibp_accepting > 0 ||
-                                 !list_empty(&peer->ibp_conns));
+                                 !cfs_list_empty(&peer->ibp_conns));
 
                         if (peer->ibp_ni != ni)
                                 continue;
 
-                        list_for_each (ctmp, &peer->ibp_conns) {
+                        cfs_list_for_each (ctmp, &peer->ibp_conns) {
                                 if (index-- > 0)
                                         continue;
 
-                                conn = list_entry(ctmp, kib_conn_t, ibc_list);
+                                conn = cfs_list_entry(ctmp, kib_conn_t,
+                                                      ibc_list);
                                 kiblnd_conn_addref(conn);
-                                read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
-                                                       flags);
+                                cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+                                                           flags);
                                 return conn;
                         }
                 }
         }
 
-        read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+        cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
         return NULL;
 }
 
@@ -603,13 +605,13 @@ kiblnd_debug_tx (kib_tx_t *tx)
 void
 kiblnd_debug_conn (kib_conn_t *conn)
 {
-        struct list_head *tmp;
-        int               i;
+        cfs_list_t           *tmp;
+        int                   i;
 
-        spin_lock(&conn->ibc_lock);
+        cfs_spin_lock(&conn->ibc_lock);
 
         CDEBUG(D_CONSOLE, "conn[%d] %p [version %x] -> %s: \n",
-               atomic_read(&conn->ibc_refcount), conn,
+               cfs_atomic_read(&conn->ibc_refcount), conn,
                conn->ibc_version, libcfs_nid2str(conn->ibc_peer->ibp_nid));
         CDEBUG(D_CONSOLE, "   state %d nposted %d/%d cred %d o_cred %d r_cred %d\n",
                conn->ibc_state, conn->ibc_noops_posted,
@@ -618,30 +620,30 @@ kiblnd_debug_conn (kib_conn_t *conn)
         CDEBUG(D_CONSOLE, "   comms_err %d\n", conn->ibc_comms_error);
 
         CDEBUG(D_CONSOLE, "   early_rxs:\n");
-        list_for_each(tmp, &conn->ibc_early_rxs)
-                kiblnd_debug_rx(list_entry(tmp, kib_rx_t, rx_list));
+        cfs_list_for_each(tmp, &conn->ibc_early_rxs)
+                kiblnd_debug_rx(cfs_list_entry(tmp, kib_rx_t, rx_list));
 
         CDEBUG(D_CONSOLE, "   tx_queue_nocred:\n");
-        list_for_each(tmp, &conn->ibc_tx_queue_nocred)
-                kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
+        cfs_list_for_each(tmp, &conn->ibc_tx_queue_nocred)
+                kiblnd_debug_tx(cfs_list_entry(tmp, kib_tx_t, tx_list));
 
         CDEBUG(D_CONSOLE, "   tx_queue_rsrvd:\n");
-        list_for_each(tmp, &conn->ibc_tx_queue_rsrvd)
-                kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
+        cfs_list_for_each(tmp, &conn->ibc_tx_queue_rsrvd)
+                kiblnd_debug_tx(cfs_list_entry(tmp, kib_tx_t, tx_list));
 
         CDEBUG(D_CONSOLE, "   tx_queue:\n");
-        list_for_each(tmp, &conn->ibc_tx_queue)
-                kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
+        cfs_list_for_each(tmp, &conn->ibc_tx_queue)
+                kiblnd_debug_tx(cfs_list_entry(tmp, kib_tx_t, tx_list));
 
         CDEBUG(D_CONSOLE, "   active_txs:\n");
-        list_for_each(tmp, &conn->ibc_active_txs)
-                kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
+        cfs_list_for_each(tmp, &conn->ibc_active_txs)
+                kiblnd_debug_tx(cfs_list_entry(tmp, kib_tx_t, tx_list));
 
         CDEBUG(D_CONSOLE, "   rxs:\n");
         for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++)
                 kiblnd_debug_rx(&conn->ibc_rxs[i]);
 
-        spin_unlock(&conn->ibc_lock);
+        cfs_spin_unlock(&conn->ibc_lock);
 }
 
 int
@@ -675,14 +677,14 @@ kiblnd_setup_mtu(struct rdma_cm_id *cmid)
         if (cmid->route.path_rec == NULL)
                 return;
 
-        write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
         mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu);
         LASSERT (mtu >= 0);
         if (mtu != 0)
                 cmid->route.path_rec->mtu = mtu;
 
-        write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 }
 
 kib_conn_t *
@@ -705,7 +707,7 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
         int                     i;
 
         LASSERT (net != NULL);
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
 
         LIBCFS_ALLOC(init_qp_attr, sizeof(*init_qp_attr));
         if (init_qp_attr == NULL) {
@@ -729,12 +731,12 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
         cmid->context = conn;                   /* for future CM callbacks */
         conn->ibc_cmid = cmid;
 
-        INIT_LIST_HEAD(&conn->ibc_early_rxs);
-        INIT_LIST_HEAD(&conn->ibc_tx_queue);
-        INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
-        INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
-        INIT_LIST_HEAD(&conn->ibc_active_txs);
-        spin_lock_init(&conn->ibc_lock);
+        CFS_INIT_LIST_HEAD(&conn->ibc_early_rxs);
+        CFS_INIT_LIST_HEAD(&conn->ibc_tx_queue);
+        CFS_INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
+        CFS_INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
+        CFS_INIT_LIST_HEAD(&conn->ibc_active_txs);
+        cfs_spin_lock_init(&conn->ibc_lock);
 
         LIBCFS_ALLOC(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
         if (conn->ibc_connvars == NULL) {
@@ -805,7 +807,7 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
         LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
 
         /* 1 ref for caller and each rxmsg */
-        atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version));
+        cfs_atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version));
         conn->ibc_nrx = IBLND_RX_MSGS(version);
 
         /* post receives */
@@ -818,12 +820,13 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
                         /* Make posted receives complete */
                         kiblnd_abort_receives(conn);
 
-                        /* correct # of posted buffers 
+                        /* correct # of posted buffers
                          * NB locking needed now I'm racing with completion */
-                        spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
+                        cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock,
+                                              flags);
                         conn->ibc_nrx -= IBLND_RX_MSGS(version) - i;
-                        spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
-                                               flags);
+                        cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
+                                                   flags);
 
                         /* cmid will be destroyed by CM(ofed) after cm_callback
                          * returned, so we can't refer it anymore
@@ -845,7 +848,7 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
         conn->ibc_state = state;
 
         /* 1 more conn */
-        atomic_inc(&net->ibn_nconns);
+        cfs_atomic_inc(&net->ibn_nconns);
         return conn;
 
  failed_2:
@@ -863,13 +866,13 @@ kiblnd_destroy_conn (kib_conn_t *conn)
         kib_peer_t        *peer = conn->ibc_peer;
         int                rc;
 
-        LASSERT (!in_interrupt());
-        LASSERT (atomic_read(&conn->ibc_refcount) == 0);
-        LASSERT (list_empty(&conn->ibc_early_rxs));
-        LASSERT (list_empty(&conn->ibc_tx_queue));
-        LASSERT (list_empty(&conn->ibc_tx_queue_rsrvd));
-        LASSERT (list_empty(&conn->ibc_tx_queue_nocred));
-        LASSERT (list_empty(&conn->ibc_active_txs));
+        LASSERT (!cfs_in_interrupt());
+        LASSERT (cfs_atomic_read(&conn->ibc_refcount) == 0);
+        LASSERT (cfs_list_empty(&conn->ibc_early_rxs));
+        LASSERT (cfs_list_empty(&conn->ibc_tx_queue));
+        LASSERT (cfs_list_empty(&conn->ibc_tx_queue_rsrvd));
+        LASSERT (cfs_list_empty(&conn->ibc_tx_queue_nocred));
+        LASSERT (cfs_list_empty(&conn->ibc_active_txs));
         LASSERT (conn->ibc_noops_posted == 0);
         LASSERT (conn->ibc_nsends_posted == 0);
 
@@ -914,7 +917,7 @@ kiblnd_destroy_conn (kib_conn_t *conn)
 
                 kiblnd_peer_decref(peer);
                 rdma_destroy_id(cmid);
-                atomic_dec(&net->ibn_nconns);
+                cfs_atomic_dec(&net->ibn_nconns);
         }
 
         LIBCFS_FREE(conn, sizeof(*conn));
@@ -923,13 +926,13 @@ kiblnd_destroy_conn (kib_conn_t *conn)
 int
 kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why)
 {
-        kib_conn_t         *conn;
-        struct list_head   *ctmp;
-        struct list_head   *cnxt;
-        int                 count = 0;
+        kib_conn_t             *conn;
+        cfs_list_t             *ctmp;
+        cfs_list_t             *cnxt;
+        int                     count = 0;
 
-        list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
-                conn = list_entry(ctmp, kib_conn_t, ibc_list);
+        cfs_list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
+                conn = cfs_list_entry(ctmp, kib_conn_t, ibc_list);
 
                 CDEBUG(D_NET, "Closing conn -> %s, "
                               "version: %x, reason: %d\n",
@@ -947,13 +950,13 @@ int
 kiblnd_close_stale_conns_locked (kib_peer_t *peer,
                                  int version, __u64 incarnation)
 {
-        kib_conn_t         *conn;
-        struct list_head   *ctmp;
-        struct list_head   *cnxt;
-        int                 count = 0;
+        kib_conn_t             *conn;
+        cfs_list_t             *ctmp;
+        cfs_list_t             *cnxt;
+        int                     count = 0;
 
-        list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
-                conn = list_entry(ctmp, kib_conn_t, ibc_list);
+        cfs_list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
+                conn = cfs_list_entry(ctmp, kib_conn_t, ibc_list);
 
                 if (conn->ibc_version     == version &&
                     conn->ibc_incarnation == incarnation)
@@ -975,16 +978,16 @@ kiblnd_close_stale_conns_locked (kib_peer_t *peer,
 int
 kiblnd_close_matching_conns (lnet_ni_t *ni, lnet_nid_t nid)
 {
-        kib_peer_t         *peer;
-        struct list_head   *ptmp;
-        struct list_head   *pnxt;
-        int                 lo;
-        int                 hi;
-        int                 i;
-        unsigned long       flags;
-        int                 count = 0;
+        kib_peer_t             *peer;
+        cfs_list_t             *ptmp;
+        cfs_list_t             *pnxt;
+        int                     lo;
+        int                     hi;
+        int                     i;
+        unsigned long           flags;
+        int                     count = 0;
 
-        write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
         if (nid != LNET_NID_ANY)
                 lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
@@ -994,12 +997,12 @@ kiblnd_close_matching_conns (lnet_ni_t *ni, lnet_nid_t nid)
         }
 
         for (i = lo; i <= hi; i++) {
-                list_for_each_safe (ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
+                cfs_list_for_each_safe (ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
 
-                        peer = list_entry(ptmp, kib_peer_t, ibp_list);
+                        peer = cfs_list_entry(ptmp, kib_peer_t, ibp_list);
                         LASSERT (peer->ibp_connecting > 0 ||
                                  peer->ibp_accepting > 0 ||
-                                 !list_empty(&peer->ibp_conns));
+                                 !cfs_list_empty(&peer->ibp_conns));
 
                         if (peer->ibp_ni != ni)
                                 continue;
@@ -1011,7 +1014,7 @@ kiblnd_close_matching_conns (lnet_ni_t *ni, lnet_nid_t nid)
                 }
         }
 
-        write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
         /* wildcards always succeed */
         if (nid == LNET_NID_ANY)
@@ -1077,22 +1080,22 @@ kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
 void
 kiblnd_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
 {
-        cfs_time_t     last_alive = 0;
-        rwlock_t      *glock = &kiblnd_data.kib_global_lock;
-        kib_peer_t    *peer;
-        unsigned long  flags;
+        cfs_time_t         last_alive = 0;
+        cfs_rwlock_t      *glock = &kiblnd_data.kib_global_lock;
+        kib_peer_t        *peer;
+        unsigned long      flags;
 
-        read_lock_irqsave(glock, flags);
+        cfs_read_lock_irqsave(glock, flags);
 
         peer = kiblnd_find_peer_locked(nid);
         if (peer != NULL) {
                 LASSERT (peer->ibp_connecting > 0 || /* creating conns */
                          peer->ibp_accepting > 0 ||
-                         !list_empty(&peer->ibp_conns));  /* active conn */
+                         !cfs_list_empty(&peer->ibp_conns));  /* active conn */
                 last_alive = peer->ibp_last_alive;
         }
 
-        read_unlock_irqrestore(glock, flags);
+        cfs_read_unlock_irqrestore(glock, flags);
 
         if (last_alive != 0)
                 *when = last_alive;
@@ -1272,7 +1275,7 @@ kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
                                                    tx->tx_msgaddr));
                 KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
 
-                list_add(&tx->tx_list, &pool->po_free_list);
+                cfs_list_add(&tx->tx_list, &pool->po_free_list);
 
                 page_offset += IBLND_MSG_SIZE;
                 LASSERT (page_offset <= PAGE_SIZE);
@@ -1350,13 +1353,13 @@ kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool)
 }
 
 void
-kiblnd_destroy_fmr_pool_list(struct list_head *head)
+kiblnd_destroy_fmr_pool_list(cfs_list_t *head)
 {
         kib_fmr_pool_t *pool;
 
-        while (!list_empty(head)) {
-                pool = list_entry(head->next, kib_fmr_pool_t, fpo_list);
-                list_del(&pool->fpo_list);
+        while (!cfs_list_empty(head)) {
+                pool = cfs_list_entry(head->next, kib_fmr_pool_t, fpo_list);
+                cfs_list_del(&pool->fpo_list);
                 kiblnd_destroy_fmr_pool(pool);
         }
 }
@@ -1417,11 +1420,11 @@ kiblnd_init_fmr_pool_set(kib_fmr_poolset_t *fps, kib_net_t *net)
         memset(fps, 0, sizeof(kib_fmr_poolset_t));
 
         fps->fps_net = net;
-        spin_lock_init(&fps->fps_lock);
+        cfs_spin_lock_init(&fps->fps_lock);
         CFS_INIT_LIST_HEAD(&fps->fps_pool_list);
         rc = kiblnd_create_fmr_pool(fps, &fpo);
         if (rc == 0)
-                list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
+                cfs_list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
 
         return rc;
 }
@@ -1446,23 +1449,23 @@ kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
         fmr->fmr_pool = NULL;
         fmr->fmr_pfmr = NULL;
 
-        spin_lock(&fps->fps_lock);
+        cfs_spin_lock(&fps->fps_lock);
         fpo->fpo_map_count --;  /* decref the pool */
 
-        list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) {
+        cfs_list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) {
                 /* the first pool is persistent */
                 if (fps->fps_pool_list.next == &fpo->fpo_list)
                         continue;
 
                 if (fpo->fpo_map_count == 0 &&  /* no more reference */
                     cfs_time_aftereq(cfs_time_current(), fpo->fpo_deadline)) {
-                        list_move(&fpo->fpo_list, &zombies);
+                        cfs_list_move(&fpo->fpo_list, &zombies);
                         fps->fps_version ++;
                 }
         }
-        spin_unlock(&fps->fps_lock);
+        cfs_spin_unlock(&fps->fps_lock);
 
-        if (!list_empty(&zombies))
+        if (!cfs_list_empty(&zombies))
                 kiblnd_destroy_fmr_pool_list(&zombies);
 }
 
@@ -1477,12 +1480,12 @@ kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
 
         LASSERT (fps->fps_net->ibn_with_fmr);
  again:
-        spin_lock(&fps->fps_lock);
+        cfs_spin_lock(&fps->fps_lock);
         version = fps->fps_version;
-        list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
+        cfs_list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
                 fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
                 fpo->fpo_map_count ++;
-                spin_unlock(&fps->fps_lock);
+                cfs_spin_unlock(&fps->fps_lock);
 
                 pfmr = ib_fmr_pool_map_phys(fpo->fpo_fmr_pool,
                                             pages, npages, iov);
@@ -1492,49 +1495,49 @@ kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
                         return 0;
                 }
 
-                spin_lock(&fps->fps_lock);
+                cfs_spin_lock(&fps->fps_lock);
                 fpo->fpo_map_count --;
                 if (PTR_ERR(pfmr) != -EAGAIN) {
-                        spin_unlock(&fps->fps_lock);
+                        cfs_spin_unlock(&fps->fps_lock);
                         return PTR_ERR(pfmr);
                 }
 
                 /* EAGAIN and ... */
                 if (version != fps->fps_version) {
-                        spin_unlock(&fps->fps_lock);
+                        cfs_spin_unlock(&fps->fps_lock);
                         goto again;
                 }
         }
 
         if (fps->fps_increasing) {
-                spin_unlock(&fps->fps_lock);
+                cfs_spin_unlock(&fps->fps_lock);
                 CDEBUG(D_NET, "Another thread is allocating new "
                               "FMR pool, waiting for her to complete\n");
-                schedule();
+                cfs_schedule();
                 goto again;
 
         }
 
         if (cfs_time_before(cfs_time_current(), fps->fps_next_retry)) {
                 /* someone failed recently */
-                spin_unlock(&fps->fps_lock);
+                cfs_spin_unlock(&fps->fps_lock);
                 return -EAGAIN;
         }
 
         fps->fps_increasing = 1;
-        spin_unlock(&fps->fps_lock);
+        cfs_spin_unlock(&fps->fps_lock);
 
         CDEBUG(D_NET, "Allocate new FMR pool\n");
         rc = kiblnd_create_fmr_pool(fps, &fpo);
-        spin_lock(&fps->fps_lock);
+        cfs_spin_lock(&fps->fps_lock);
         fps->fps_increasing = 0;
         if (rc == 0) {
                 fps->fps_version ++;
-                list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
+                cfs_list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
         } else {
                 fps->fps_next_retry = cfs_time_shift(10);
         }
-        spin_unlock(&fps->fps_lock);
+        cfs_spin_unlock(&fps->fps_lock);
 
         goto again;
 }
@@ -1542,7 +1545,7 @@ kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
 static void
 kiblnd_fini_pool(kib_pool_t *pool)
 {
-        LASSERT (list_empty(&pool->po_free_list));
+        LASSERT (cfs_list_empty(&pool->po_free_list));
         LASSERT (pool->po_allocated == 0);
 
         CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
@@ -1561,13 +1564,13 @@ kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size)
 }
 
 void
-kiblnd_destroy_pool_list(kib_poolset_t *ps, struct list_head *head)
+kiblnd_destroy_pool_list(kib_poolset_t *ps, cfs_list_t *head)
 {
         kib_pool_t *pool;
 
-        while (!list_empty(head)) {
-                pool = list_entry(head->next, kib_pool_t, po_list);
-                list_del(&pool->po_list);
+        while (!cfs_list_empty(head)) {
+                pool = cfs_list_entry(head->next, kib_pool_t, po_list);
+                cfs_list_del(&pool->po_list);
                 ps->ps_pool_destroy(pool);
         }
 }
@@ -1598,12 +1601,12 @@ kiblnd_init_pool_set(kib_poolset_t *ps, kib_net_t *net,
         ps->ps_node_fini    = nd_fini;
         ps->ps_pool_size    = size;
         strncpy(ps->ps_name, name, IBLND_POOL_NAME_LEN);
-        spin_lock_init(&ps->ps_lock);
+        cfs_spin_lock_init(&ps->ps_lock);
         CFS_INIT_LIST_HEAD(&ps->ps_pool_list);
 
         rc = ps->ps_pool_create(ps, size, &pool);
         if (rc == 0)
-                list_add(&pool->po_list, &ps->ps_pool_list);
+                cfs_list_add(&pool->po_list, &ps->ps_pool_list);
         else
                 CERROR("Failed to create the first pool for %s\n", ps->ps_name);
 
@@ -1611,98 +1614,98 @@ kiblnd_init_pool_set(kib_poolset_t *ps, kib_net_t *net,
 }
 
 void
-kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
+kiblnd_pool_free_node(kib_pool_t *pool, cfs_list_t *node)
 {
         CFS_LIST_HEAD  (zombies);
         kib_poolset_t  *ps = pool->po_owner;
         kib_pool_t     *tmp;
         cfs_time_t      now = cfs_time_current();
 
-        spin_lock(&ps->ps_lock);
+        cfs_spin_lock(&ps->ps_lock);
 
         if (ps->ps_node_fini != NULL)
                 ps->ps_node_fini(pool, node);
 
         LASSERT (pool->po_allocated > 0);
-        list_add(node, &pool->po_free_list);
+        cfs_list_add(node, &pool->po_free_list);
         pool->po_allocated --;
 
-        list_for_each_entry_safe(pool, tmp, &ps->ps_pool_list, po_list) {
+        cfs_list_for_each_entry_safe(pool, tmp, &ps->ps_pool_list, po_list) {
                 /* the first pool is persistent */
                 if (ps->ps_pool_list.next == &pool->po_list)
                         continue;
 
                 if (pool->po_allocated == 0 &&
                     cfs_time_aftereq(now, pool->po_deadline))
-                        list_move(&pool->po_list, &zombies);
+                        cfs_list_move(&pool->po_list, &zombies);
         }
-        spin_unlock(&ps->ps_lock);
+        cfs_spin_unlock(&ps->ps_lock);
 
-        if (!list_empty(&zombies))
+        if (!cfs_list_empty(&zombies))
                 kiblnd_destroy_pool_list(ps, &zombies);
 }
 
-struct list_head *
+cfs_list_t *
 kiblnd_pool_alloc_node(kib_poolset_t *ps)
 {
-        struct list_head  *node;
-        kib_pool_t        *pool;
-        int                rc;
+        cfs_list_t            *node;
+        kib_pool_t            *pool;
+        int                    rc;
 
  again:
-        spin_lock(&ps->ps_lock);
-        list_for_each_entry(pool, &ps->ps_pool_list, po_list) {
-                if (list_empty(&pool->po_free_list))
+        cfs_spin_lock(&ps->ps_lock);
+        cfs_list_for_each_entry(pool, &ps->ps_pool_list, po_list) {
+                if (cfs_list_empty(&pool->po_free_list))
                         continue;
 
                 pool->po_allocated ++;
                 pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
                 node = pool->po_free_list.next;
-                list_del(node);
+                cfs_list_del(node);
 
                 if (ps->ps_node_init != NULL) {
                         /* still hold the lock */
                         ps->ps_node_init(pool, node);
                 }
-                spin_unlock(&ps->ps_lock);
+                cfs_spin_unlock(&ps->ps_lock);
                 return node;
         }
 
         /* no available tx pool and ... */
         if (ps->ps_increasing) {
                 /* another thread is allocating a new pool */
-                spin_unlock(&ps->ps_lock);
+                cfs_spin_unlock(&ps->ps_lock);
                 CDEBUG(D_NET, "Another thread is allocating new "
                        "%s pool, waiting for her to complete\n",
                        ps->ps_name);
-                schedule();
+                cfs_schedule();
                 goto again;
         }
 
         if (cfs_time_before(cfs_time_current(), ps->ps_next_retry)) {
                 /* someone failed recently */
-                spin_unlock(&ps->ps_lock);
+                cfs_spin_unlock(&ps->ps_lock);
                 return NULL;
         }
 
         ps->ps_increasing = 1;
-        spin_unlock(&ps->ps_lock);
+        cfs_spin_unlock(&ps->ps_lock);
 
         CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
 
         rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
 
-        spin_lock(&ps->ps_lock);
+        cfs_spin_lock(&ps->ps_lock);
         ps->ps_increasing = 0;
         if (rc == 0) {
-                list_add_tail(&pool->po_list, &ps->ps_pool_list);
+                cfs_list_add_tail(&pool->po_list, &ps->ps_pool_list);
         } else {
                 /* retry 10 seconds later */
                 ps->ps_next_retry = cfs_time_shift(10);
                 CERROR("Can't allocate new %s pool because out of memory\n",
                        ps->ps_name);
         }
-        spin_unlock(&ps->ps_lock);
+        cfs_spin_unlock(&ps->ps_lock);
 
         goto again;
 }
@@ -1723,10 +1726,10 @@ int
 kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_rdma_desc_t *rd,
                     __u64 *iova, kib_phys_mr_t **pp_pmr)
 {
-        kib_phys_mr_t       *pmr;
-        struct list_head    *node;
-        int                  rc;
-        int                  i;
+        kib_phys_mr_t *pmr;
+        cfs_list_t    *node;
+        int            rc;
+        int            i;
 
         node = kiblnd_pool_alloc_node(&pps->pps_poolset);
         if (node == NULL) {
@@ -1768,12 +1771,12 @@ kiblnd_destroy_pmr_pool(kib_pool_t *pool)
 
         LASSERT (pool->po_allocated == 0);
 
-        while (!list_empty(&pool->po_free_list)) {
-                pmr = list_entry(pool->po_free_list.next,
-                                 kib_phys_mr_t, pmr_list);
+        while (!cfs_list_empty(&pool->po_free_list)) {
+                pmr = cfs_list_entry(pool->po_free_list.next,
+                                     kib_phys_mr_t, pmr_list);
 
                 LASSERT (pmr->pmr_mr == NULL);
-                list_del(&pmr->pmr_list);
+                cfs_list_del(&pmr->pmr_list);
 
                 if (pmr->pmr_ipb != NULL) {
                         LIBCFS_FREE(pmr->pmr_ipb,
@@ -1818,7 +1821,7 @@ kiblnd_create_pmr_pool(kib_poolset_t *ps, int size, kib_pool_t **pp_po)
                 if (pmr->pmr_ipb == NULL)
                         break;
 
-                list_add(&pmr->pmr_list, &pool->po_free_list);
+                cfs_list_add(&pmr->pmr_list, &pool->po_free_list);
         }
 
         if (i < size) {
@@ -1850,7 +1853,7 @@ kiblnd_destroy_tx_pool(kib_pool_t *pool)
         for (i = 0; i < pool->po_size; i++) {
                 kib_tx_t *tx = &tpo->tpo_tx_descs[i];
 
-                list_del(&tx->tx_list);
+                cfs_list_del(&tx->tx_list);
                 if (tx->tx_pages != NULL)
                         LIBCFS_FREE(tx->tx_pages,
                                     LNET_MAX_IOV *
@@ -1962,10 +1965,11 @@ kiblnd_create_tx_pool(kib_poolset_t *ps, int size, kib_pool_t **pp_po)
 }
 
 static void
-kiblnd_tx_init(kib_pool_t *pool, struct list_head *node)
+kiblnd_tx_init(kib_pool_t *pool, cfs_list_t *node)
 {
-        kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t, tps_poolset);
-        kib_tx_t         *tx  = list_entry(node, kib_tx_t, tx_list);
+        kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t,
+                                             tps_poolset);
+        kib_tx_t         *tx  = cfs_list_entry(node, kib_tx_t, tx_list);
 
         tx->tx_cookie = tps->tps_next_tx_cookie ++;
 }
@@ -2196,7 +2200,7 @@ out:
                           LPX64", array size: %d\n",
                           ibdev->ibd_mr_size, ibdev->ibd_nmrs);
 
-        list_add_tail(&ibdev->ibd_list,
+        cfs_list_add_tail(&ibdev->ibd_list,
                       &kiblnd_data.kib_devs);
         return 0;
 }
@@ -2206,8 +2210,8 @@ kiblnd_destroy_dev (kib_dev_t *dev)
 {
         LASSERT (dev->ibd_nnets == 0);
 
-        if (!list_empty(&dev->ibd_list)) /* on kib_devs? */
-                list_del_init(&dev->ibd_list);
+        if (!cfs_list_empty(&dev->ibd_list)) /* on kib_devs? */
+                cfs_list_del_init(&dev->ibd_list);
 
         kiblnd_dev_cleanup(dev);
 
@@ -2225,10 +2229,10 @@ kiblnd_base_shutdown (void)
 {
         int i;
 
-        LASSERT (list_empty(&kiblnd_data.kib_devs));
+        LASSERT (cfs_list_empty(&kiblnd_data.kib_devs));
 
         CDEBUG(D_MALLOC, "before LND base cleanup: kmem %d\n",
-               atomic_read(&libcfs_kmemory));
+               cfs_atomic_read(&libcfs_kmemory));
 
         switch (kiblnd_data.kib_init) {
         default:
@@ -2238,22 +2242,22 @@ kiblnd_base_shutdown (void)
         case IBLND_INIT_DATA:
                 LASSERT (kiblnd_data.kib_peers != NULL);
                 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
-                        LASSERT (list_empty(&kiblnd_data.kib_peers[i]));
+                        LASSERT (cfs_list_empty(&kiblnd_data.kib_peers[i]));
                 }
-                LASSERT (list_empty(&kiblnd_data.kib_connd_zombies));
-                LASSERT (list_empty(&kiblnd_data.kib_connd_conns));
+                LASSERT (cfs_list_empty(&kiblnd_data.kib_connd_zombies));
+                LASSERT (cfs_list_empty(&kiblnd_data.kib_connd_conns));
 
                 /* flag threads to terminate; wake and wait for them to die */
                 kiblnd_data.kib_shutdown = 1;
-                wake_up_all(&kiblnd_data.kib_sched_waitq);
-                wake_up_all(&kiblnd_data.kib_connd_waitq);
+                cfs_waitq_broadcast(&kiblnd_data.kib_sched_waitq);
+                cfs_waitq_broadcast(&kiblnd_data.kib_connd_waitq);
 
                 i = 2;
-                while (atomic_read(&kiblnd_data.kib_nthreads) != 0) {
+                while (cfs_atomic_read(&kiblnd_data.kib_nthreads) != 0) {
                         i++;
                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
                                "Waiting for %d threads to terminate\n",
-                               atomic_read(&kiblnd_data.kib_nthreads));
+                               cfs_atomic_read(&kiblnd_data.kib_nthreads));
                         cfs_pause(cfs_time_seconds(1));
                 }
 
@@ -2265,11 +2269,11 @@ kiblnd_base_shutdown (void)
 
         if (kiblnd_data.kib_peers != NULL)
                 LIBCFS_FREE(kiblnd_data.kib_peers,
-                            sizeof(struct list_head) *
+                            sizeof(cfs_list_t) *
                             kiblnd_data.kib_peer_hash_size);
 
         CDEBUG(D_MALLOC, "after LND base cleanup: kmem %d\n",
-               atomic_read(&libcfs_kmemory));
+               cfs_atomic_read(&libcfs_kmemory));
 
         kiblnd_data.kib_init = IBLND_INIT_NOTHING;
         PORTAL_MODULE_UNUSE;
@@ -2279,7 +2283,7 @@ void
 kiblnd_shutdown (lnet_ni_t *ni)
 {
         kib_net_t        *net = ni->ni_data;
-        rwlock_t         *g_lock = &kiblnd_data.kib_global_lock;
+        cfs_rwlock_t     *g_lock = &kiblnd_data.kib_global_lock;
         int               i;
         unsigned long     flags;
 
@@ -2289,11 +2293,11 @@ kiblnd_shutdown (lnet_ni_t *ni)
                 goto out;
 
         CDEBUG(D_MALLOC, "before LND net cleanup: kmem %d\n",
-               atomic_read(&libcfs_kmemory));
+               cfs_atomic_read(&libcfs_kmemory));
 
-        write_lock_irqsave(g_lock, flags);
+        cfs_write_lock_irqsave(g_lock, flags);
         net->ibn_shutdown = 1;
-        write_unlock_irqrestore(g_lock, flags);
+        cfs_write_unlock_irqrestore(g_lock, flags);
 
         switch (net->ibn_init) {
         default:
@@ -2305,12 +2309,12 @@ kiblnd_shutdown (lnet_ni_t *ni)
 
                 /* Wait for all peer state to clean up */
                 i = 2;
-                while (atomic_read(&net->ibn_npeers) != 0) {
+                while (cfs_atomic_read(&net->ibn_npeers) != 0) {
                         i++;
                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */
                                "%s: waiting for %d peers to disconnect\n",
                                libcfs_nid2str(ni->ni_nid),
-                               atomic_read(&net->ibn_npeers));
+                               cfs_atomic_read(&net->ibn_npeers));
                         cfs_pause(cfs_time_seconds(1));
                 }
 
@@ -2322,7 +2326,7 @@ kiblnd_shutdown (lnet_ni_t *ni)
                 /* fall through */
 
         case IBLND_INIT_NOTHING:
-                LASSERT (atomic_read(&net->ibn_nconns) == 0);
+                LASSERT (cfs_atomic_read(&net->ibn_nconns) == 0);
 
                 if (net->ibn_dev != NULL &&
                     net->ibn_dev->ibd_nnets == 0)
@@ -2332,7 +2336,7 @@ kiblnd_shutdown (lnet_ni_t *ni)
         }
 
         CDEBUG(D_MALLOC, "after LND net cleanup: kmem %d\n",
-               atomic_read(&libcfs_kmemory));
+               cfs_atomic_read(&libcfs_kmemory));
 
         net->ibn_init = IBLND_INIT_NOTHING;
         ni->ni_data = NULL;
@@ -2340,7 +2344,7 @@ kiblnd_shutdown (lnet_ni_t *ni)
         LIBCFS_FREE(net, sizeof(*net));
 
 out:
-        if (list_empty(&kiblnd_data.kib_devs))
+        if (cfs_list_empty(&kiblnd_data.kib_devs))
                 kiblnd_base_shutdown();
         return;
 }
@@ -2356,27 +2360,28 @@ kiblnd_base_startup (void)
         PORTAL_MODULE_USE;
         memset(&kiblnd_data, 0, sizeof(kiblnd_data)); /* zero pointers, flags etc */
 
-        rwlock_init(&kiblnd_data.kib_global_lock);
+        cfs_rwlock_init(&kiblnd_data.kib_global_lock);
 
-        INIT_LIST_HEAD(&kiblnd_data.kib_devs);
+        CFS_INIT_LIST_HEAD(&kiblnd_data.kib_devs);
 
         kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
         LIBCFS_ALLOC(kiblnd_data.kib_peers,
-                     sizeof(struct list_head) * kiblnd_data.kib_peer_hash_size);
+                     sizeof(cfs_list_t) *
+                            kiblnd_data.kib_peer_hash_size);
         if (kiblnd_data.kib_peers == NULL) {
                 goto failed;
         }
         for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
-                INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
+                CFS_INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
 
-        spin_lock_init(&kiblnd_data.kib_connd_lock);
-        INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
-        INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
-        init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
+        cfs_spin_lock_init(&kiblnd_data.kib_connd_lock);
+        CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
+        CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
+        cfs_waitq_init(&kiblnd_data.kib_connd_waitq);
 
-        spin_lock_init(&kiblnd_data.kib_sched_lock);
-        INIT_LIST_HEAD(&kiblnd_data.kib_sched_conns);
-        init_waitqueue_head(&kiblnd_data.kib_sched_waitq);
+        cfs_spin_lock_init(&kiblnd_data.kib_sched_lock);
+        CFS_INIT_LIST_HEAD(&kiblnd_data.kib_sched_conns);
+        cfs_waitq_init(&kiblnd_data.kib_sched_waitq);
 
         kiblnd_data.kib_error_qpa.qp_state = IB_QPS_ERR;
 
@@ -2416,7 +2421,7 @@ kiblnd_startup (lnet_ni_t *ni)
         char                     *ifname;
         kib_dev_t                *ibdev = NULL;
         kib_net_t                *net;
-        struct list_head         *tmp;
+        cfs_list_t               *tmp;
         struct timeval            tv;
         int                       rc;
 
@@ -2435,7 +2440,7 @@ kiblnd_startup (lnet_ni_t *ni)
 
         memset(net, 0, sizeof(*net));
 
-        do_gettimeofday(&tv);
+        cfs_gettimeofday(&tv);
         net->ibn_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
 
         ni->ni_peertimeout    = *kiblnd_tunables.kib_peertimeout;
@@ -2462,8 +2467,8 @@ kiblnd_startup (lnet_ni_t *ni)
                 goto failed;
         }
 
-        list_for_each (tmp, &kiblnd_data.kib_devs) {
-                ibdev = list_entry(tmp, kib_dev_t, ibd_list);
+        cfs_list_for_each (tmp, &kiblnd_data.kib_devs) {
+                ibdev = cfs_list_entry(tmp, kib_dev_t, ibd_list);
 
                 if (!strcmp(&ibdev->ibd_ifname[0], ifname))
                         break;
index 8ccb4df..ec44b8d 100644 (file)
@@ -89,7 +89,7 @@ typedef int gfp_t;
 
 /* tunables fixed at compile time */
 #ifdef CONFIG_SMP
-# define IBLND_N_SCHED      num_online_cpus()   /* # schedulers */
+# define IBLND_N_SCHED      cfs_num_online_cpus()   /* # schedulers */
 #else
 # define IBLND_N_SCHED      1                   /* # schedulers */
 #endif
@@ -187,7 +187,7 @@ kiblnd_concurrent_sends_v1(void)
 
 typedef struct
 {
-        struct list_head     ibd_list;          /* chain on kib_devs */
+        cfs_list_t           ibd_list;          /* chain on kib_devs */
         __u32                ibd_ifip;          /* IPoIB interface IP */
         char                 ibd_ifname[32];    /* IPoIB interface name */
         int                  ibd_nnets;         /* # nets extant */
@@ -216,7 +216,7 @@ typedef struct
 struct kib_pmr_pool;
 
 typedef struct {
-        struct list_head        pmr_list;               /* chain node */
+        cfs_list_t              pmr_list;               /* chain node */
         struct ib_phys_buf     *pmr_ipb;                /* physical buffer */
         struct ib_mr           *pmr_mr;                 /* IB MR */
         struct kib_pmr_pool    *pmr_pool;               /* owner of this MR */
@@ -229,8 +229,10 @@ struct kib_poolset;
 
 typedef int  (*kib_ps_pool_create_t)(struct kib_poolset *ps, int inc, struct kib_pool **pp_po);
 typedef void (*kib_ps_pool_destroy_t)(struct kib_pool *po);
-typedef void (*kib_ps_node_init_t)(struct kib_pool *po, struct list_head *node);
-typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, struct list_head *node);
+typedef void (*kib_ps_node_init_t)(struct kib_pool *po,
+                                   cfs_list_t *node);
+typedef void (*kib_ps_node_fini_t)(struct kib_pool *po,
+                                   cfs_list_t *node);
 
 struct kib_net;
 
@@ -238,10 +240,10 @@ struct kib_net;
 
 typedef struct kib_poolset
 {
-        spinlock_t              ps_lock;                /* serialize */
+        cfs_spinlock_t          ps_lock;                /* serialize */
         struct kib_net         *ps_net;                 /* network it belongs to */
         char                    ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
-        struct list_head        ps_pool_list;           /* list of pools */
+        cfs_list_t              ps_pool_list;           /* list of pools */
         cfs_time_t              ps_next_retry;          /* time stamp for retry if failed to allocate */
         int                     ps_increasing;          /* is allocating new pool */
         int                     ps_pool_size;           /* new pool size */
@@ -254,8 +256,8 @@ typedef struct kib_poolset
 
 typedef struct kib_pool
 {
-        struct list_head        po_list;                /* chain on pool list */
-        struct list_head        po_free_list;           /* pre-allocated node */
+        cfs_list_t              po_list;                /* chain on pool list */
+        cfs_list_t              po_free_list;           /* pre-allocated node */
         kib_poolset_t          *po_owner;               /* pool_set of this pool */
         cfs_time_t              po_deadline;            /* deadline of this pool */
         int                     po_allocated;           /* # of elements in use */
@@ -283,9 +285,9 @@ typedef struct kib_pmr_pool {
 
 typedef struct
 {
-        spinlock_t              fps_lock;               /* serialize */
+        cfs_spinlock_t          fps_lock;               /* serialize */
         struct kib_net         *fps_net;                /* IB network */
-        struct list_head        fps_pool_list;          /* FMR pool list */
+        cfs_list_t              fps_pool_list;          /* FMR pool list */
         __u64                   fps_version;            /* validity stamp */
         int                     fps_increasing;         /* is allocating new pool */
         cfs_time_t              fps_next_retry;         /* time stamp for retry if failed to allocate */
@@ -293,7 +295,7 @@ typedef struct
 
 typedef struct
 {
-        struct list_head        fpo_list;               /* chain on pool list */
+        cfs_list_t              fpo_list;               /* chain on pool list */
         kib_fmr_poolset_t      *fpo_owner;              /* owner of this pool */
         struct ib_fmr_pool     *fpo_fmr_pool;           /* IB FMR pool */
         cfs_time_t              fpo_deadline;           /* deadline of this pool */
@@ -313,8 +315,8 @@ typedef struct kib_net
         unsigned int         ibn_with_fmr:1;    /* FMR? */
         unsigned int         ibn_with_pmr:1;    /* PMR? */
 
-        atomic_t             ibn_npeers;        /* # peers extant */
-        atomic_t             ibn_nconns;        /* # connections extant */
+        cfs_atomic_t         ibn_npeers;        /* # peers extant */
+        cfs_atomic_t         ibn_nconns;        /* # connections extant */
 
         kib_tx_poolset_t     ibn_tx_ps;         /* tx pool-set */
         kib_fmr_poolset_t    ibn_fmr_ps;        /* fmr pool-set */
@@ -325,26 +327,26 @@ typedef struct kib_net
 
 typedef struct
 {
-        int                  kib_init;          /* initialisation state */
-        int                  kib_shutdown;      /* shut down? */
-        struct list_head     kib_devs;          /* IB devices extant */
-        atomic_t             kib_nthreads;      /* # live threads */
-        rwlock_t             kib_global_lock;   /* stabilize net/dev/peer/conn ops */
+        int               kib_init;        /* initialisation state */
+        int               kib_shutdown;    /* shut down? */
+        cfs_list_t        kib_devs;        /* IB devices extant */
+        cfs_atomic_t      kib_nthreads;    /* # live threads */
+        cfs_rwlock_t      kib_global_lock; /* stabilize net/dev/peer/conn ops */
 
-        struct list_head    *kib_peers;         /* hash table of all my known peers */
-        int                  kib_peer_hash_size; /* size of kib_peers */
+        cfs_list_t       *kib_peers;  /* hash table of all my known peers */
+        int               kib_peer_hash_size;/* size of kib_peers */
 
-        void                *kib_connd;         /* the connd task (serialisation assertions) */
-        struct list_head     kib_connd_conns;   /* connections to setup/teardown */
-        struct list_head     kib_connd_zombies; /* connections with zero refcount */
-        wait_queue_head_t    kib_connd_waitq;   /* connection daemon sleeps here */
-        spinlock_t           kib_connd_lock;    /* serialise */
+        void             *kib_connd;       /* the connd task (serialisation assertions) */
+        cfs_list_t        kib_connd_conns; /* connections to setup/teardown */
+        cfs_list_t        kib_connd_zombies;/* connections with zero refcount */
+        cfs_waitq_t       kib_connd_waitq; /* connection daemon sleeps here */
+        cfs_spinlock_t    kib_connd_lock;  /* serialise */
 
-        wait_queue_head_t    kib_sched_waitq;   /* schedulers sleep here */
-        struct list_head     kib_sched_conns;   /* conns to check for rx completions */
-        spinlock_t           kib_sched_lock;    /* serialise */
+        cfs_waitq_t       kib_sched_waitq; /* schedulers sleep here */
+        cfs_list_t        kib_sched_conns; /* conns to check for rx completions */
+        cfs_spinlock_t    kib_sched_lock;  /* serialise */
 
-        struct ib_qp_attr    kib_error_qpa;      /* QP->ERROR */
+        struct ib_qp_attr kib_error_qpa;   /* QP->ERROR */
 } kib_data_t;
 
 #define IBLND_INIT_NOTHING         0
@@ -474,7 +476,7 @@ typedef struct {
 
 typedef struct kib_rx                           /* receive message */
 {
-        struct list_head          rx_list;      /* queue for attention */
+        cfs_list_t                rx_list;      /* queue for attention */
         struct kib_conn          *rx_conn;      /* owning conn */
         int                       rx_nob;       /* # bytes received (-1 while posted) */
         enum ib_wc_status         rx_status;    /* completion status */
@@ -492,7 +494,7 @@ typedef struct kib_rx                           /* receive message */
 
 typedef struct kib_tx                           /* transmit message */
 {
-        struct list_head          tx_list;      /* queue on idle_txs ibc_tx_queue etc. */
+        cfs_list_t                tx_list;      /* queue on idle_txs ibc_tx_queue etc. */
         kib_tx_pool_t            *tx_pool;      /* pool I'm from */
         struct kib_conn          *tx_conn;      /* owning conn */
         short                     tx_sending;   /* # tx callbacks outstanding */
@@ -527,36 +529,36 @@ typedef struct kib_connvars
 
 typedef struct kib_conn
 {
-        struct kib_peer    *ibc_peer;           /* owning peer */
-        struct list_head    ibc_list;           /* stash on peer's conn list */
-        struct list_head    ibc_sched_list;     /* schedule for attention */
-        __u16               ibc_version;        /* version of connection */
-        __u64               ibc_incarnation;    /* which instance of the peer */
-        atomic_t            ibc_refcount;       /* # users */
-        int                 ibc_state;          /* what's happening */
-        int                 ibc_nsends_posted;  /* # uncompleted sends */
-        int                 ibc_noops_posted;   /* # uncompleted NOOPs */
-        int                 ibc_credits;        /* # credits I have */
-        int                 ibc_outstanding_credits; /* # credits to return */
-        int                 ibc_reserved_credits;/* # ACK/DONE msg credits */
-        int                 ibc_comms_error;    /* set on comms error */
-        int                 ibc_nrx:16;         /* receive buffers owned */
-        int                 ibc_scheduled:1;    /* scheduled for attention */
-        int                 ibc_ready:1;        /* CQ callback fired */
-        unsigned long       ibc_last_send;      /* time of last send */
-        struct list_head    ibc_early_rxs;      /* rxs completed before ESTABLISHED */
-        struct list_head    ibc_tx_queue;       /* sends that need a credit */
-        struct list_head    ibc_tx_queue_nocred;/* sends that don't need a credit */
-        struct list_head    ibc_tx_queue_rsrvd; /* sends that need to reserve an ACK/DONE msg */
-        struct list_head    ibc_active_txs;     /* active tx awaiting completion */
-        spinlock_t          ibc_lock;           /* serialise */
-        kib_rx_t           *ibc_rxs;            /* the rx descs */
-        kib_pages_t        *ibc_rx_pages;       /* premapped rx msg pages */
-
-        struct rdma_cm_id  *ibc_cmid;           /* CM id */
-        struct ib_cq       *ibc_cq;             /* completion queue */
-
-        kib_connvars_t     *ibc_connvars;       /* in-progress connection state */
+        struct kib_peer     *ibc_peer;          /* owning peer */
+        cfs_list_t           ibc_list;          /* stash on peer's conn list */
+        cfs_list_t           ibc_sched_list;    /* schedule for attention */
+        __u16                ibc_version;       /* version of connection */
+        __u64                ibc_incarnation;   /* which instance of the peer */
+        cfs_atomic_t         ibc_refcount;      /* # users */
+        int                  ibc_state;         /* what's happening */
+        int                  ibc_nsends_posted; /* # uncompleted sends */
+        int                  ibc_noops_posted;  /* # uncompleted NOOPs */
+        int                  ibc_credits;       /* # credits I have */
+        int                  ibc_outstanding_credits; /* # credits to return */
+        int                  ibc_reserved_credits;/* # ACK/DONE msg credits */
+        int                  ibc_comms_error;   /* set on comms error */
+        int                  ibc_nrx:16;        /* receive buffers owned */
+        int                  ibc_scheduled:1;   /* scheduled for attention */
+        int                  ibc_ready:1;       /* CQ callback fired */
+        unsigned long        ibc_last_send;     /* time of last send */
+        cfs_list_t           ibc_early_rxs;     /* rxs completed before ESTABLISHED */
+        cfs_list_t           ibc_tx_queue;       /* sends that need a credit */
+        cfs_list_t           ibc_tx_queue_nocred;/* sends that don't need a credit */
+        cfs_list_t           ibc_tx_queue_rsrvd; /* sends that need to reserve an ACK/DONE msg */
+        cfs_list_t           ibc_active_txs;     /* active tx awaiting completion */
+        cfs_spinlock_t       ibc_lock;           /* serialise */
+        kib_rx_t            *ibc_rxs;            /* the rx descs */
+        kib_pages_t         *ibc_rx_pages;       /* premapped rx msg pages */
+
+        struct rdma_cm_id   *ibc_cmid;           /* CM id */
+        struct ib_cq        *ibc_cq;             /* completion queue */
+
+        kib_connvars_t      *ibc_connvars;       /* in-progress connection state */
 } kib_conn_t;
 
 #define IBLND_CONN_INIT               0         /* being intialised */
@@ -568,18 +570,18 @@ typedef struct kib_conn
 
 typedef struct kib_peer
 {
-        struct list_head    ibp_list;           /* stash on global peer list */
-        lnet_nid_t          ibp_nid;            /* who's on the other end(s) */
-        lnet_ni_t          *ibp_ni;             /* LNet interface */
-        atomic_t            ibp_refcount;       /* # users */
-        struct list_head    ibp_conns;          /* all active connections */
-        struct list_head    ibp_tx_queue;       /* msgs waiting for a conn */
-        __u16               ibp_version;        /* version of peer */
-        __u64               ibp_incarnation;    /* incarnation of peer */
-        int                 ibp_connecting;     /* current active connection attempts */
-        int                 ibp_accepting;      /* current passive connection attempts */
-        int                 ibp_error;          /* errno on closing this peer */
-        cfs_time_t          ibp_last_alive;     /* when (in jiffies) I was last alive */
+        cfs_list_t           ibp_list;           /* stash on global peer list */
+        lnet_nid_t           ibp_nid;            /* who's on the other end(s) */
+        lnet_ni_t           *ibp_ni;             /* LNet interface */
+        cfs_atomic_t         ibp_refcount;       /* # users */
+        cfs_list_t           ibp_conns;          /* all active connections */
+        cfs_list_t           ibp_tx_queue;       /* msgs waiting for a conn */
+        __u16                ibp_version;        /* version of peer */
+        __u64                ibp_incarnation;    /* incarnation of peer */
+        int                  ibp_connecting;     /* current active connection attempts */
+        int                  ibp_accepting;      /* current passive connection attempts */
+        int                  ibp_error;          /* errno on closing this peer */
+        cfs_time_t           ibp_last_alive;     /* when (in jiffies) I was last alive */
 } kib_peer_t;
 
 extern kib_data_t      kiblnd_data;
@@ -587,50 +589,51 @@ extern kib_data_t      kiblnd_data;
 #define kiblnd_conn_addref(conn)                                \
 do {                                                            \
         CDEBUG(D_NET, "conn[%p] (%d)++\n",                      \
-               (conn), atomic_read(&(conn)->ibc_refcount));     \
-        LASSERT(atomic_read(&(conn)->ibc_refcount) > 0);        \
-        atomic_inc(&(conn)->ibc_refcount);                      \
+               (conn), cfs_atomic_read(&(conn)->ibc_refcount)); \
+        LASSERT(cfs_atomic_read(&(conn)->ibc_refcount) > 0);    \
+        cfs_atomic_inc(&(conn)->ibc_refcount);                  \
 } while (0)
 
-#define kiblnd_conn_decref(conn)                                              \
-do {                                                                          \
-        unsigned long   flags;                                                \
-                                                                              \
-        CDEBUG(D_NET, "conn[%p] (%d)--\n",                                    \
-               (conn), atomic_read(&(conn)->ibc_refcount));                   \
-        LASSERT(atomic_read(&(conn)->ibc_refcount) > 0);                      \
-        if (atomic_dec_and_test(&(conn)->ibc_refcount)) {                     \
-                spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);        \
-                list_add_tail(&(conn)->ibc_list,                              \
-                              &kiblnd_data.kib_connd_zombies);                \
-                wake_up(&kiblnd_data.kib_connd_waitq);                        \
-                spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);   \
-        }                                                                     \
+#define kiblnd_conn_decref(conn)                                               \
+do {                                                                           \
+        unsigned long   flags;                                                 \
+                                                                               \
+        CDEBUG(D_NET, "conn[%p] (%d)--\n",                                     \
+               (conn), cfs_atomic_read(&(conn)->ibc_refcount));                \
+        LASSERT(cfs_atomic_read(&(conn)->ibc_refcount) > 0);                   \
+        if (cfs_atomic_dec_and_test(&(conn)->ibc_refcount)) {                  \
+                cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);     \
+                cfs_list_add_tail(&(conn)->ibc_list,                           \
+                                  &kiblnd_data.kib_connd_zombies);             \
+                cfs_waitq_signal(&kiblnd_data.kib_connd_waitq);                \
+                cfs_spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
+        }                                                                      \
 } while (0)
 
 #define kiblnd_peer_addref(peer)                                \
 do {                                                            \
         CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n",                \
                (peer), libcfs_nid2str((peer)->ibp_nid),         \
-               atomic_read (&(peer)->ibp_refcount));            \
-        LASSERT(atomic_read(&(peer)->ibp_refcount) > 0);        \
-        atomic_inc(&(peer)->ibp_refcount);                      \
+               cfs_atomic_read (&(peer)->ibp_refcount));        \
+        LASSERT(cfs_atomic_read(&(peer)->ibp_refcount) > 0);    \
+        cfs_atomic_inc(&(peer)->ibp_refcount);                  \
 } while (0)
 
 #define kiblnd_peer_decref(peer)                                \
 do {                                                            \
         CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n",                \
                (peer), libcfs_nid2str((peer)->ibp_nid),         \
-               atomic_read (&(peer)->ibp_refcount));            \
-        LASSERT(atomic_read(&(peer)->ibp_refcount) > 0);        \
-        if (atomic_dec_and_test(&(peer)->ibp_refcount))         \
+               cfs_atomic_read (&(peer)->ibp_refcount));        \
+        LASSERT(cfs_atomic_read(&(peer)->ibp_refcount) > 0);    \
+        if (cfs_atomic_dec_and_test(&(peer)->ibp_refcount))     \
                 kiblnd_destroy_peer(peer);                      \
 } while (0)
 
-static inline struct list_head *
+static inline cfs_list_t *
 kiblnd_nid2peerlist (lnet_nid_t nid)
 {
-        unsigned int hash = ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
+        unsigned int hash =
+                ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
 
         return (&kiblnd_data.kib_peers [hash]);
 }
@@ -639,24 +642,24 @@ static inline int
 kiblnd_peer_active (kib_peer_t *peer)
 {
         /* Am I in the peer hash table? */
-        return (!list_empty(&peer->ibp_list));
+        return (!cfs_list_empty(&peer->ibp_list));
 }
 
 static inline kib_conn_t *
 kiblnd_get_conn_locked (kib_peer_t *peer)
 {
-        LASSERT (!list_empty(&peer->ibp_conns));
+        LASSERT (!cfs_list_empty(&peer->ibp_conns));
 
         /* just return the first connection */
-        return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
+        return cfs_list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
 }
 
 static inline int
 kiblnd_send_keepalive(kib_conn_t *conn)
 {
         return (*kiblnd_tunables.kib_keepalive > 0) &&
-                time_after(jiffies, conn->ibc_last_send +
-                           *kiblnd_tunables.kib_keepalive*HZ);
+                cfs_time_after(jiffies, conn->ibc_last_send +
+                               *kiblnd_tunables.kib_keepalive*CFS_HZ);
 }
 
 static inline int
@@ -669,14 +672,15 @@ kiblnd_send_noop(kib_conn_t *conn)
             !kiblnd_send_keepalive(conn))
                 return 0; /* No need to send NOOP */
 
-        if (!list_empty(&conn->ibc_tx_queue_nocred))
+        if (!cfs_list_empty(&conn->ibc_tx_queue_nocred))
                 return 0; /* NOOP can be piggybacked */
 
         if (!IBLND_OOB_CAPABLE(conn->ibc_version))
-                return list_empty(&conn->ibc_tx_queue); /* can't piggyback? */
+                /* can't piggyback? */
+                return cfs_list_empty(&conn->ibc_tx_queue);
 
         /* No tx to piggyback NOOP onto or no credit to send a tx */
-        return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 0);
+        return (cfs_list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 0);
 }
 
 static inline void
@@ -687,7 +691,7 @@ kiblnd_abort_receives(kib_conn_t *conn)
 }
 
 static inline const char *
-kiblnd_queue2str (kib_conn_t *conn, struct list_head *q)
+kiblnd_queue2str (kib_conn_t *conn, cfs_list_t *q)
 {
         if (q == &conn->ibc_tx_queue)
                 return "tx_queue";
@@ -739,7 +743,7 @@ static inline void
 kiblnd_set_conn_state (kib_conn_t *conn, int state)
 {
         conn->ibc_state = state;
-        mb();
+        cfs_mb();
 }
 
 static inline void
@@ -928,8 +932,8 @@ void kiblnd_unmap_rx_descs(kib_conn_t *conn);
 int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
                   kib_rdma_desc_t *rd, int nfrags);
 void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx);
-void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
-struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps);
+void kiblnd_pool_free_node(kib_pool_t *pool, cfs_list_t *node);
+cfs_list_t *kiblnd_pool_alloc_node(kib_poolset_t *ps);
 
 int  kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages,
                          int npages, __u64 iov, kib_fmr_t *fmr);
@@ -983,7 +987,8 @@ void kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
 void kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn);
 void kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn);
 void kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
-void kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int status);
+void kiblnd_txlist_done (lnet_ni_t *ni, cfs_list_t *txlist,
+                         int status);
 void kiblnd_check_sends (kib_conn_t *conn);
 
 void kiblnd_qp_event(struct ib_event *event, void *arg);
index 01c4621..f869420 100644 (file)
@@ -49,7 +49,7 @@ kiblnd_tx_done (lnet_ni_t *ni, kib_tx_t *tx)
         int         i;
 
         LASSERT (net != NULL);
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
         LASSERT (!tx->tx_queued);               /* mustn't be queued for sending */
         LASSERT (tx->tx_sending == 0);          /* mustn't be awaiting sent callback */
         LASSERT (!tx->tx_waiting);              /* mustn't be awaiting peer response */
@@ -84,14 +84,14 @@ kiblnd_tx_done (lnet_ni_t *ni, kib_tx_t *tx)
 }
 
 void
-kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int status)
+kiblnd_txlist_done (lnet_ni_t *ni, cfs_list_t *txlist, int status)
 {
         kib_tx_t *tx;
 
-        while (!list_empty (txlist)) {
-                tx = list_entry (txlist->next, kib_tx_t, tx_list);
+        while (!cfs_list_empty (txlist)) {
+                tx = cfs_list_entry (txlist->next, kib_tx_t, tx_list);
 
-                list_del(&tx->tx_list);
+                cfs_list_del(&tx->tx_list);
                 /* complete now */
                 tx->tx_waiting = 0;
                 tx->tx_status = status;
@@ -102,9 +102,9 @@ kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int status)
 kib_tx_t *
 kiblnd_get_idle_tx (lnet_ni_t *ni)
 {
-        kib_net_t        *net = (kib_net_t *)ni->ni_data;
-        struct list_head *node;
-        kib_tx_t         *tx;
+        kib_net_t            *net = (kib_net_t *)ni->ni_data;
+        cfs_list_t           *node;
+        kib_tx_t             *tx;
 
         node = kiblnd_pool_alloc_node(&net->ibn_tx_ps.tps_poolset);
         if (node == NULL)
@@ -131,10 +131,10 @@ kiblnd_drop_rx (kib_rx_t *rx)
         kib_conn_t         *conn = rx->rx_conn;
         unsigned long       flags;
         
-        spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
+        cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
         LASSERT (conn->ibc_nrx > 0);
         conn->ibc_nrx--;
-        spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
+        cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
 
         kiblnd_conn_decref(conn);
 }
@@ -149,7 +149,7 @@ kiblnd_post_rx (kib_rx_t *rx, int credit)
         int                 rc;
 
         LASSERT (net != NULL);
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
         LASSERT (credit == IBLND_POSTRX_NO_CREDIT ||
                  credit == IBLND_POSTRX_PEER_CREDIT ||
                  credit == IBLND_POSTRX_RSRVD_CREDIT);
@@ -195,12 +195,12 @@ kiblnd_post_rx (kib_rx_t *rx, int credit)
         if (credit == IBLND_POSTRX_NO_CREDIT)
                 return 0;
 
-        spin_lock(&conn->ibc_lock);
+        cfs_spin_lock(&conn->ibc_lock);
         if (credit == IBLND_POSTRX_PEER_CREDIT)
                 conn->ibc_outstanding_credits++;
         else
                 conn->ibc_reserved_credits++;
-        spin_unlock(&conn->ibc_lock);
+        cfs_spin_unlock(&conn->ibc_lock);
 
         kiblnd_check_sends(conn);
         return 0;
@@ -209,10 +209,10 @@ kiblnd_post_rx (kib_rx_t *rx, int credit)
 kib_tx_t *
 kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
 {
-        struct list_head   *tmp;
+        cfs_list_t   *tmp;
 
-        list_for_each(tmp, &conn->ibc_active_txs) {
-                kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
+        cfs_list_for_each(tmp, &conn->ibc_active_txs) {
+                kib_tx_t *tx = cfs_list_entry(tmp, kib_tx_t, tx_list);
 
                 LASSERT (!tx->tx_queued);
                 LASSERT (tx->tx_sending != 0 || tx->tx_waiting);
@@ -238,11 +238,11 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
         lnet_ni_t   *ni = conn->ibc_peer->ibp_ni;
         int          idle;
 
-        spin_lock(&conn->ibc_lock);
+        cfs_spin_lock(&conn->ibc_lock);
 
         tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
         if (tx == NULL) {
-                spin_unlock(&conn->ibc_lock);
+                cfs_spin_unlock(&conn->ibc_lock);
 
                 CWARN("Unmatched completion type %x cookie "LPX64" from %s\n",
                       txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid));
@@ -262,9 +262,9 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
 
         idle = !tx->tx_queued && (tx->tx_sending == 0);
         if (idle)
-                list_del(&tx->tx_list);
+                cfs_list_del(&tx->tx_list);
 
-        spin_unlock(&conn->ibc_lock);
+        cfs_spin_unlock(&conn->ibc_lock);
 
         if (idle)
                 kiblnd_tx_done(ni, tx);
@@ -304,20 +304,22 @@ kiblnd_handle_rx (kib_rx_t *rx)
         LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
         CDEBUG (D_NET, "Received %x[%d] from %s\n",
-                msg->ibm_type, credits, libcfs_nid2str(conn->ibc_peer->ibp_nid));
+                msg->ibm_type, credits,
+                libcfs_nid2str(conn->ibc_peer->ibp_nid));
 
         if (credits != 0) {
                 /* Have I received credits that will let me send? */
-                spin_lock(&conn->ibc_lock);
+                cfs_spin_lock(&conn->ibc_lock);
 
                 if (conn->ibc_credits + credits >
                     IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) {
                         rc2 = conn->ibc_credits;
-                        spin_unlock(&conn->ibc_lock);
+                        cfs_spin_unlock(&conn->ibc_lock);
 
                         CERROR("Bad credits from %s: %d + %d > %d\n",
                                libcfs_nid2str(conn->ibc_peer->ibp_nid),
-                               rc2, credits, IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
+                               rc2, credits,
+                               IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
 
                         kiblnd_close_conn(conn, -EPROTO);
                         kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT);
@@ -326,7 +328,7 @@ kiblnd_handle_rx (kib_rx_t *rx)
 
                 conn->ibc_credits += credits;
 
-                spin_unlock(&conn->ibc_lock);
+                cfs_spin_unlock(&conn->ibc_lock);
                 kiblnd_check_sends(conn);
         }
 
@@ -362,7 +364,8 @@ kiblnd_handle_rx (kib_rx_t *rx)
                 break;
 
         case IBLND_MSG_PUT_NAK:
-                CWARN ("PUT_NACK from %s\n", libcfs_nid2str(conn->ibc_peer->ibp_nid));
+                CWARN ("PUT_NACK from %s\n",
+                       libcfs_nid2str(conn->ibc_peer->ibp_nid));
                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
                 kiblnd_handle_completion(conn, IBLND_MSG_PUT_REQ,
                                          msg->ibm_u.completion.ibcm_status,
@@ -372,12 +375,12 @@ kiblnd_handle_rx (kib_rx_t *rx)
         case IBLND_MSG_PUT_ACK:
                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
 
-                spin_lock(&conn->ibc_lock);
+                cfs_spin_lock(&conn->ibc_lock);
                 tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
                                                    msg->ibm_u.putack.ibpam_src_cookie);
                 if (tx != NULL)
-                        list_del(&tx->tx_list);
-                spin_unlock(&conn->ibc_lock);
+                        cfs_list_del(&tx->tx_list);
+                cfs_spin_unlock(&conn->ibc_lock);
 
                 if (tx == NULL) {
                         CERROR("Unmatched PUT_ACK from %s\n",
@@ -401,10 +404,10 @@ kiblnd_handle_rx (kib_rx_t *rx)
                         CERROR("Can't setup rdma for PUT to %s: %d\n",
                                libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2);
 
-                spin_lock(&conn->ibc_lock);
+                cfs_spin_lock(&conn->ibc_lock);
                 tx->tx_waiting = 0;             /* clear waiting and queue atomically */
                 kiblnd_queue_tx_locked(tx, conn);
-                spin_unlock(&conn->ibc_lock);
+                cfs_spin_unlock(&conn->ibc_lock);
                 break;
 
         case IBLND_MSG_PUT_DONE:
@@ -486,17 +489,17 @@ kiblnd_rx_complete (kib_rx_t *rx, int status, int nob)
         /* racing with connection establishment/teardown! */
 
         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
-                rwlock_t      *g_lock = &kiblnd_data.kib_global_lock;
+                cfs_rwlock_t  *g_lock = &kiblnd_data.kib_global_lock;
                 unsigned long  flags;
 
-                write_lock_irqsave(g_lock, flags);
+                cfs_write_lock_irqsave(g_lock, flags);
                 /* must check holding global lock to eliminate race */
                 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
-                        list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
-                        write_unlock_irqrestore(g_lock, flags);
+                        cfs_list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
+                        cfs_write_unlock_irqrestore(g_lock, flags);
                         return;
                 }
-                write_unlock_irqrestore(g_lock, flags);
+                cfs_write_unlock_irqrestore(g_lock, flags);
         }
         kiblnd_handle_rx(rx);
         return;
@@ -794,7 +797,7 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
         }
 
         /* NB don't drop ibc_lock before bumping tx_sending */
-        list_del(&tx->tx_list);
+        cfs_list_del(&tx->tx_list);
         tx->tx_queued = 0;
 
         if (msg->ibm_type == IBLND_MSG_NOOP &&
@@ -804,9 +807,9 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
                 /* OK to drop when posted enough NOOPs, since
                  * kiblnd_check_sends will queue NOOP again when
                  * posted NOOPs complete */
-                spin_unlock(&conn->ibc_lock);
+                cfs_spin_unlock(&conn->ibc_lock);
                 kiblnd_tx_done(peer->ibp_ni, tx);
-                spin_lock(&conn->ibc_lock);
+                cfs_spin_lock(&conn->ibc_lock);
                 CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n",
                        libcfs_nid2str(peer->ibp_nid),
                        conn->ibc_noops_posted);
@@ -829,7 +832,7 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
          * tx_sending is non-zero if we've not done the tx_complete()
          * from the first send; hence the ++ rather than = below. */
         tx->tx_sending++;
-        list_add(&tx->tx_list, &conn->ibc_active_txs);
+        cfs_list_add(&tx->tx_list, &conn->ibc_active_txs);
 
         /* I'm still holding ibc_lock! */
         if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
@@ -856,9 +859,9 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
 
         done = (tx->tx_sending == 0);
         if (done)
-                list_del(&tx->tx_list);
+                cfs_list_del(&tx->tx_list);
 
-        spin_unlock(&conn->ibc_lock);
+        cfs_spin_unlock(&conn->ibc_lock);
 
         if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
                 CERROR("Error %d posting transmit to %s\n",
@@ -888,7 +891,7 @@ kiblnd_check_sends (kib_conn_t *conn)
                 return;
         }
 
-        spin_lock(&conn->ibc_lock);
+        cfs_spin_lock(&conn->ibc_lock);
 
         LASSERT (conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver));
         LASSERT (!IBLND_OOB_CAPABLE(ver) ||
@@ -896,22 +899,22 @@ kiblnd_check_sends (kib_conn_t *conn)
         LASSERT (conn->ibc_reserved_credits >= 0);
 
         while (conn->ibc_reserved_credits > 0 &&
-               !list_empty(&conn->ibc_tx_queue_rsrvd)) {
-                tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
-                                kib_tx_t, tx_list);
-                list_del(&tx->tx_list);
-                list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
+               !cfs_list_empty(&conn->ibc_tx_queue_rsrvd)) {
+                tx = cfs_list_entry(conn->ibc_tx_queue_rsrvd.next,
+                                    kib_tx_t, tx_list);
+                cfs_list_del(&tx->tx_list);
+                cfs_list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
                 conn->ibc_reserved_credits--;
         }
 
         if (kiblnd_send_noop(conn)) {
-                spin_unlock(&conn->ibc_lock);
+                cfs_spin_unlock(&conn->ibc_lock);
 
                 tx = kiblnd_get_idle_tx(ni);
                 if (tx != NULL)
                         kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
 
-                spin_lock(&conn->ibc_lock);
+                cfs_spin_lock(&conn->ibc_lock);
                 if (tx != NULL)
                         kiblnd_queue_tx_locked(tx, conn);
         }
@@ -919,14 +922,14 @@ kiblnd_check_sends (kib_conn_t *conn)
         for (;;) {
                 int credit;
 
-                if (!list_empty(&conn->ibc_tx_queue_nocred)) {
+                if (!cfs_list_empty(&conn->ibc_tx_queue_nocred)) {
                         credit = 0;
-                        tx = list_entry(conn->ibc_tx_queue_nocred.next,
-                                        kib_tx_t, tx_list);
-                } else if (!list_empty(&conn->ibc_tx_queue)) {
+                        tx = cfs_list_entry(conn->ibc_tx_queue_nocred.next,
+                                            kib_tx_t, tx_list);
+                } else if (!cfs_list_empty(&conn->ibc_tx_queue)) {
                         credit = 1;
-                        tx = list_entry(conn->ibc_tx_queue.next,
-                                        kib_tx_t, tx_list);
+                        tx = cfs_list_entry(conn->ibc_tx_queue.next,
+                                            kib_tx_t, tx_list);
                 } else
                         break;
 
@@ -934,7 +937,7 @@ kiblnd_check_sends (kib_conn_t *conn)
                         break;
         }
 
-        spin_unlock(&conn->ibc_lock);
+        cfs_spin_unlock(&conn->ibc_lock);
 }
 
 void
@@ -959,7 +962,7 @@ kiblnd_tx_complete (kib_tx_t *tx, int status)
                 kiblnd_peer_alive(conn->ibc_peer);
         }
 
-        spin_lock(&conn->ibc_lock);
+        cfs_spin_lock(&conn->ibc_lock);
 
         /* I could be racing with rdma completion.  Whoever makes 'tx' idle
          * gets to free it, which also drops its ref on 'conn'. */
@@ -978,11 +981,11 @@ kiblnd_tx_complete (kib_tx_t *tx, int status)
                !tx->tx_waiting &&               /* Not waiting for peer */
                !tx->tx_queued;                  /* Not re-queued (PUT_DONE) */
         if (idle)
-                list_del(&tx->tx_list);
+                cfs_list_del(&tx->tx_list);
 
         kiblnd_conn_addref(conn);               /* 1 ref for me.... */
 
-        spin_unlock(&conn->ibc_lock);
+        cfs_spin_unlock(&conn->ibc_lock);
 
         if (idle)
                 kiblnd_tx_done(conn->ibc_peer->ibp_ni, tx);
@@ -1040,7 +1043,7 @@ kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
         int                dstidx;
         int                wrknob;
 
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
         LASSERT (tx->tx_nwrq == 0);
         LASSERT (type == IBLND_MSG_GET_DONE ||
                  type == IBLND_MSG_PUT_DONE);
@@ -1115,14 +1118,14 @@ kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
 void
 kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn)
 {
-        struct list_head   *q;
+        cfs_list_t   *q;
 
         LASSERT (tx->tx_nwrq > 0);              /* work items set up */
         LASSERT (!tx->tx_queued);               /* not queued for sending already */
         LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
         tx->tx_queued = 1;
-        tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ);
+        tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * CFS_HZ);
 
         if (tx->tx_conn == NULL) {
                 kiblnd_conn_addref(conn);
@@ -1162,15 +1165,15 @@ kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn)
                 break;
         }
 
-        list_add_tail(&tx->tx_list, q);
+        cfs_list_add_tail(&tx->tx_list, q);
 }
 
 void
 kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn)
 {
-        spin_lock(&conn->ibc_lock);
+        cfs_spin_lock(&conn->ibc_lock);
         kiblnd_queue_tx_locked(tx, conn);
-        spin_unlock(&conn->ibc_lock);
+        cfs_spin_unlock(&conn->ibc_lock);
 
         kiblnd_check_sends(conn);
 }
@@ -1236,7 +1239,7 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
         kib_peer_t        *peer;
         kib_peer_t        *peer2;
         kib_conn_t        *conn;
-        rwlock_t          *g_lock = &kiblnd_data.kib_global_lock;
+        cfs_rwlock_t      *g_lock = &kiblnd_data.kib_global_lock;
         unsigned long      flags;
         int                rc;
 
@@ -1248,15 +1251,15 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
 
         /* First time, just use a read lock since I expect to find my peer
          * connected */
-        read_lock_irqsave(g_lock, flags);
+        cfs_read_lock_irqsave(g_lock, flags);
 
         peer = kiblnd_find_peer_locked(nid);
-        if (peer != NULL && !list_empty(&peer->ibp_conns)) {
+        if (peer != NULL && !cfs_list_empty(&peer->ibp_conns)) {
                 /* Found a peer with an established connection */
                 conn = kiblnd_get_conn_locked(peer);
                 kiblnd_conn_addref(conn); /* 1 ref for me... */
 
-                read_unlock_irqrestore(g_lock, flags);
+                cfs_read_unlock_irqrestore(g_lock, flags);
 
                 if (tx != NULL)
                         kiblnd_queue_tx(tx, conn);
@@ -1264,24 +1267,25 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
                 return;
         }
 
-        read_unlock(g_lock);
+        cfs_read_unlock(g_lock);
         /* Re-try with a write lock */
-        write_lock(g_lock);
+        cfs_write_lock(g_lock);
 
         peer = kiblnd_find_peer_locked(nid);
         if (peer != NULL) {
-                if (list_empty(&peer->ibp_conns)) {
+                if (cfs_list_empty(&peer->ibp_conns)) {
                         /* found a peer, but it's still connecting... */
                         LASSERT (peer->ibp_connecting != 0 ||
                                  peer->ibp_accepting != 0);
                         if (tx != NULL)
-                                list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
-                        write_unlock_irqrestore(g_lock, flags);
+                                cfs_list_add_tail(&tx->tx_list,
+                                                  &peer->ibp_tx_queue);
+                        cfs_write_unlock_irqrestore(g_lock, flags);
                 } else {
                         conn = kiblnd_get_conn_locked(peer);
                         kiblnd_conn_addref(conn); /* 1 ref for me... */
 
-                        write_unlock_irqrestore(g_lock, flags);
+                        cfs_write_unlock_irqrestore(g_lock, flags);
 
                         if (tx != NULL)
                                 kiblnd_queue_tx(tx, conn);
@@ -1290,7 +1294,7 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
                 return;
         }
 
-        write_unlock_irqrestore(g_lock, flags);
+        cfs_write_unlock_irqrestore(g_lock, flags);
 
         /* Allocate a peer ready to add to the peer table and retry */
         rc = kiblnd_create_peer(ni, &peer, nid);
@@ -1304,22 +1308,23 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
                 return;
         }
 
-        write_lock_irqsave(g_lock, flags);
+        cfs_write_lock_irqsave(g_lock, flags);
 
         peer2 = kiblnd_find_peer_locked(nid);
         if (peer2 != NULL) {
-                if (list_empty(&peer2->ibp_conns)) {
+                if (cfs_list_empty(&peer2->ibp_conns)) {
                         /* found a peer, but it's still connecting... */
                         LASSERT (peer2->ibp_connecting != 0 ||
                                  peer2->ibp_accepting != 0);
                         if (tx != NULL)
-                                list_add_tail(&tx->tx_list, &peer2->ibp_tx_queue);
-                        write_unlock_irqrestore(g_lock, flags);
+                                cfs_list_add_tail(&tx->tx_list,
+                                                  &peer2->ibp_tx_queue);
+                        cfs_write_unlock_irqrestore(g_lock, flags);
                 } else {
                         conn = kiblnd_get_conn_locked(peer2);
                         kiblnd_conn_addref(conn); /* 1 ref for me... */
 
-                        write_unlock_irqrestore(g_lock, flags);
+                        cfs_write_unlock_irqrestore(g_lock, flags);
 
                         if (tx != NULL)
                                 kiblnd_queue_tx(tx, conn);
@@ -1338,12 +1343,12 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
         LASSERT (((kib_net_t *)ni->ni_data)->ibn_shutdown == 0);
 
         if (tx != NULL)
-                list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
+                cfs_list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
 
         kiblnd_peer_addref(peer);
-        list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
+        cfs_list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
 
-        write_unlock_irqrestore(g_lock, flags);
+        cfs_write_unlock_irqrestore(g_lock, flags);
 
         kiblnd_connect_peer(peer);
         kiblnd_peer_decref(peer);
@@ -1376,7 +1381,7 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
         LASSERT (payload_niov <= LNET_MAX_IOV);
 
         /* Thread context */
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
         /* payload is either all vaddrs or all pages */
         LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
 
@@ -1597,7 +1602,7 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
         int          rc = 0;
 
         LASSERT (mlen <= rlen);
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
         /* Either all pages or all vaddrs */
         LASSERT (!(kiov != NULL && iov != NULL));
 
@@ -1698,19 +1703,19 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
 int
 kiblnd_thread_start (int (*fn)(void *arg), void *arg)
 {
-        long    pid = kernel_thread (fn, arg, 0);
+        long    pid = cfs_kernel_thread (fn, arg, 0);
 
         if (pid < 0)
                 return ((int)pid);
 
-        atomic_inc (&kiblnd_data.kib_nthreads);
+        cfs_atomic_inc (&kiblnd_data.kib_nthreads);
         return (0);
 }
 
 void
 kiblnd_thread_fini (void)
 {
-        atomic_dec (&kiblnd_data.kib_nthreads);
+        cfs_atomic_dec (&kiblnd_data.kib_nthreads);
 }
 
 void
@@ -1718,7 +1723,7 @@ kiblnd_peer_alive (kib_peer_t *peer)
 {
         /* This is racy, but everyone's only writing cfs_time_current() */
         peer->ibp_last_alive = cfs_time_current();
-        mb();
+        cfs_mb();
 }
 
 void
@@ -1728,9 +1733,9 @@ kiblnd_peer_notify (kib_peer_t *peer)
         cfs_time_t    last_alive = 0;
         unsigned long flags;
 
-        read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+        cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
-        if (list_empty(&peer->ibp_conns) &&
+        if (cfs_list_empty(&peer->ibp_conns) &&
             peer->ibp_accepting == 0 &&
             peer->ibp_connecting == 0 &&
             peer->ibp_error != 0) {
@@ -1740,7 +1745,7 @@ kiblnd_peer_notify (kib_peer_t *peer)
                 last_alive = peer->ibp_last_alive;
         }
 
-        read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+        cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
         if (error != 0)
                 lnet_notify(peer->ibp_ni,
@@ -1768,25 +1773,25 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error)
                 return; /* already being handled  */
 
         if (error == 0 &&
-            list_empty(&conn->ibc_tx_queue) &&
-            list_empty(&conn->ibc_tx_queue_rsrvd) &&
-            list_empty(&conn->ibc_tx_queue_nocred) &&
-            list_empty(&conn->ibc_active_txs)) {
+            cfs_list_empty(&conn->ibc_tx_queue) &&
+            cfs_list_empty(&conn->ibc_tx_queue_rsrvd) &&
+            cfs_list_empty(&conn->ibc_tx_queue_nocred) &&
+            cfs_list_empty(&conn->ibc_active_txs)) {
                 CDEBUG(D_NET, "closing conn to %s\n", 
                        libcfs_nid2str(peer->ibp_nid));
         } else {
                 CDEBUG(D_NETERROR, "Closing conn to %s: error %d%s%s%s%s\n",
                        libcfs_nid2str(peer->ibp_nid), error,
-                       list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
-                       list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
-                       list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
-                       list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
+                       cfs_list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
+                       cfs_list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
+                       cfs_list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
+                       cfs_list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
         }
 
-        list_del(&conn->ibc_list);
+        cfs_list_del(&conn->ibc_list);
         /* connd (see below) takes over ibc_list's ref */
 
-        if (list_empty (&peer->ibp_conns) &&    /* no more conns */
+        if (cfs_list_empty (&peer->ibp_conns) &&    /* no more conns */
             kiblnd_peer_active(peer)) {         /* still in peer table */
                 kiblnd_unlink_peer_locked(peer);
 
@@ -1796,12 +1801,12 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error)
 
         kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
 
-        spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+        cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
 
-        list_add_tail (&conn->ibc_list, &kiblnd_data.kib_connd_conns);
-        wake_up (&kiblnd_data.kib_connd_waitq);
+        cfs_list_add_tail (&conn->ibc_list, &kiblnd_data.kib_connd_conns);
+        cfs_waitq_signal (&kiblnd_data.kib_connd_waitq);
 
-        spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+        cfs_spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
 }
 
 void
@@ -1809,11 +1814,11 @@ kiblnd_close_conn (kib_conn_t *conn, int error)
 {
         unsigned long flags;
 
-        write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
         kiblnd_close_conn_locked(conn, error);
 
-        write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 }
 
 void
@@ -1822,35 +1827,36 @@ kiblnd_handle_early_rxs(kib_conn_t *conn)
         unsigned long    flags;
         kib_rx_t        *rx;
 
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
         LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
-        write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-        while (!list_empty(&conn->ibc_early_rxs)) {
-                rx = list_entry(conn->ibc_early_rxs.next,
+        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+        while (!cfs_list_empty(&conn->ibc_early_rxs)) {
+                rx = cfs_list_entry(conn->ibc_early_rxs.next,
                                 kib_rx_t, rx_list);
-                list_del(&rx->rx_list);
-                write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+                cfs_list_del(&rx->rx_list);
+                cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+                                            flags);
 
                 kiblnd_handle_rx(rx);
 
-                write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+                cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
         }
-        write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 }
 
 void
-kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
+kiblnd_abort_txs(kib_conn_t *conn, cfs_list_t *txs)
 {
-        LIST_HEAD           (zombies);
-        struct list_head    *tmp;
-        struct list_head    *nxt;
+        CFS_LIST_HEAD       (zombies);
+        cfs_list_t          *tmp;
+        cfs_list_t          *nxt;
         kib_tx_t            *tx;
 
-        spin_lock(&conn->ibc_lock);
+        cfs_spin_lock(&conn->ibc_lock);
 
-        list_for_each_safe (tmp, nxt, txs) {
-                tx = list_entry (tmp, kib_tx_t, tx_list);
+        cfs_list_for_each_safe (tmp, nxt, txs) {
+                tx = cfs_list_entry (tmp, kib_tx_t, tx_list);
 
                 if (txs == &conn->ibc_active_txs) {
                         LASSERT (!tx->tx_queued);
@@ -1865,12 +1871,12 @@ kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
 
                 if (tx->tx_sending == 0) {
                         tx->tx_queued = 0;
-                        list_del (&tx->tx_list);
-                        list_add (&tx->tx_list, &zombies);
+                        cfs_list_del (&tx->tx_list);
+                        cfs_list_add (&tx->tx_list, &zombies);
                 }
         }
 
-        spin_unlock(&conn->ibc_lock);
+        cfs_spin_unlock(&conn->ibc_lock);
 
         kiblnd_txlist_done(conn->ibc_peer->ibp_ni,
                            &zombies, -ECONNABORTED);
@@ -1879,7 +1885,7 @@ kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
 void
 kiblnd_finalise_conn (kib_conn_t *conn)
 {
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
         LASSERT (conn->ibc_state > IBLND_CONN_INIT);
 
         kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
@@ -1903,13 +1909,13 @@ kiblnd_finalise_conn (kib_conn_t *conn)
 void
 kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error)
 {
-        LIST_HEAD        (zombies);
+        CFS_LIST_HEAD    (zombies);
         unsigned long     flags;
 
         LASSERT (error != 0);
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
 
-        write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
         if (active) {
                 LASSERT (peer->ibp_connecting > 0);
@@ -1922,14 +1928,15 @@ kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error)
         if (peer->ibp_connecting != 0 ||
             peer->ibp_accepting != 0) {
                 /* another connection attempt under way... */
-                write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+                cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+                                            flags);
                 return;
         }
 
-        if (list_empty(&peer->ibp_conns)) {
+        if (cfs_list_empty(&peer->ibp_conns)) {
                 /* Take peer's blocked transmits to complete with error */
-                list_add(&zombies, &peer->ibp_tx_queue);
-                list_del_init(&peer->ibp_tx_queue);
+                cfs_list_add(&zombies, &peer->ibp_tx_queue);
+                cfs_list_del_init(&peer->ibp_tx_queue);
 
                 if (kiblnd_peer_active(peer))
                         kiblnd_unlink_peer_locked(peer);
@@ -1937,14 +1944,14 @@ kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error)
                 peer->ibp_error = error;
         } else {
                 /* Can't have blocked transmits if there are connections */
-                LASSERT (list_empty(&peer->ibp_tx_queue));
+                LASSERT (cfs_list_empty(&peer->ibp_tx_queue));
         }
 
-        write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
         kiblnd_peer_notify(peer);
 
-        if (list_empty (&zombies))
+        if (cfs_list_empty (&zombies))
                 return;
 
         CDEBUG (D_NETERROR, "Deleting messages for %s: connection failed\n",
@@ -1958,7 +1965,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
 {
         kib_peer_t        *peer = conn->ibc_peer;
         kib_tx_t          *tx;
-        struct list_head   txs;
+        cfs_list_t         txs;
         unsigned long      flags;
         int                active;
 
@@ -1968,7 +1975,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
                libcfs_nid2str(peer->ibp_nid), active,
                conn->ibc_version, status);
 
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
         LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
                   peer->ibp_connecting > 0) ||
                  (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
@@ -1985,7 +1992,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
         }
 
         /* connection established */
-        write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
         conn->ibc_last_send = jiffies;
         kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
@@ -1994,7 +2001,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
         /* Add conn to peer's list and nuke any dangling conns from a different
          * peer instance... */
         kiblnd_conn_addref(conn);               /* +1 ref for ibc_list */
-        list_add(&conn->ibc_list, &peer->ibp_conns);
+        cfs_list_add(&conn->ibc_list, &peer->ibp_conns);
         if (active)
                 peer->ibp_connecting--;
         else
@@ -2014,8 +2021,8 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
         }
 
         /* grab pending txs while I have the lock */
-        list_add(&txs, &peer->ibp_tx_queue);
-        list_del_init(&peer->ibp_tx_queue);
+        cfs_list_add(&txs, &peer->ibp_tx_queue);
+        cfs_list_del_init(&peer->ibp_tx_queue);
 
         if (!kiblnd_peer_active(peer) ||        /* peer has been deleted */
             conn->ibc_comms_error != 0) {       /* error has happened already */
@@ -2023,24 +2030,25 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
 
                 /* start to shut down connection */
                 kiblnd_close_conn_locked(conn, -ECONNABORTED);
-                write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+                cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+                                            flags);
 
                 kiblnd_txlist_done(ni, &txs, -ECONNABORTED);
 
                 return;
         }
 
-        write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
         /* Schedule blocked txs */
-        spin_lock (&conn->ibc_lock);
-        while (!list_empty (&txs)) {
-                tx = list_entry (txs.next, kib_tx_t, tx_list);
-                list_del(&tx->tx_list);
+        cfs_spin_lock (&conn->ibc_lock);
+        while (!cfs_list_empty (&txs)) {
+                tx = cfs_list_entry (txs.next, kib_tx_t, tx_list);
+                cfs_list_del(&tx->tx_list);
 
                 kiblnd_queue_tx_locked(tx, conn);
         }
-        spin_unlock (&conn->ibc_lock);
+        cfs_spin_unlock (&conn->ibc_lock);
 
         kiblnd_check_sends(conn);
 
@@ -2062,7 +2070,7 @@ kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
 int
 kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
 {
-        rwlock_t              *g_lock = &kiblnd_data.kib_global_lock;
+        cfs_rwlock_t          *g_lock = &kiblnd_data.kib_global_lock;
         kib_msg_t             *reqmsg = priv;
         kib_msg_t             *ackmsg;
         kib_dev_t             *ibdev;
@@ -2078,7 +2086,7 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
         unsigned long          flags;
         int                    rc;
 
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
 
         /* cmid inherits 'context' from the corresponding listener id */
         ibdev = (kib_dev_t *)cmid->context;
@@ -2199,7 +2207,7 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
                 goto failed;
         }
 
-        write_lock_irqsave(g_lock, flags);
+        cfs_write_lock_irqsave(g_lock, flags);
 
         peer2 = kiblnd_find_peer_locked(nid);
         if (peer2 != NULL) {
@@ -2212,7 +2220,7 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
                 if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
                     peer2->ibp_version     != version) {
                         kiblnd_close_peer_conns_locked(peer2, -ESTALE);
-                        write_unlock_irqrestore(g_lock, flags);
+                        cfs_write_unlock_irqrestore(g_lock, flags);
 
                         CWARN("Conn stale %s [old ver: %x, new ver: %x]\n",
                               libcfs_nid2str(nid), peer2->ibp_version, version);
@@ -2225,7 +2233,7 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
                 /* tie-break connection race in favour of the higher NID */
                 if (peer2->ibp_connecting != 0 &&
                     nid < ni->ni_nid) {
-                        write_unlock_irqrestore(g_lock, flags);
+                        cfs_write_unlock_irqrestore(g_lock, flags);
 
                         CWARN("Conn race %s\n", libcfs_nid2str(peer2->ibp_nid));
 
@@ -2237,7 +2245,7 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
                 peer2->ibp_accepting++;
                 kiblnd_peer_addref(peer2);
 
-                write_unlock_irqrestore(g_lock, flags);
+                cfs_write_unlock_irqrestore(g_lock, flags);
                 kiblnd_peer_decref(peer);
                 peer = peer2;
         } else {
@@ -2254,9 +2262,9 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
                 LASSERT (net->ibn_shutdown == 0);
 
                 kiblnd_peer_addref(peer);
-                list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
+                cfs_list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
 
-                write_unlock_irqrestore(g_lock, flags);
+                cfs_write_unlock_irqrestore(g_lock, flags);
         }
 
         conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version);
@@ -2336,11 +2344,11 @@ kiblnd_reconnect (kib_conn_t *conn, int version,
         LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
         LASSERT (peer->ibp_connecting > 0);     /* 'conn' at least */
 
-        write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
         /* retry connection if it's still needed and no other connection
          * attempts (active or passive) are in progress */
-        if (!list_empty(&peer->ibp_tx_queue) &&
+        if (!cfs_list_empty(&peer->ibp_tx_queue) &&
             peer->ibp_connecting == 1 &&
             peer->ibp_accepting == 0) {
                 retry = 1;
@@ -2350,7 +2358,7 @@ kiblnd_reconnect (kib_conn_t *conn, int version,
                 peer->ibp_incarnation = incarnation;
         }
 
-        write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
         if (!retry)
                 return;
@@ -2389,7 +2397,7 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob)
 {
         kib_peer_t    *peer = conn->ibc_peer;
 
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
         LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
 
         switch (reason) {
@@ -2582,13 +2590,13 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob)
                 goto failed;
         }
 
-        read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+        cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
         if (msg->ibm_dstnid == ni->ni_nid &&
             msg->ibm_dststamp == net->ibn_incarnation)
                 rc = 0;
         else
                 rc = -ESTALE;
-        read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+        cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
         if (rc != 0) {
                 CERROR("Bad connection reply from %s, rc = %d, "
@@ -2630,12 +2638,12 @@ kiblnd_active_connect (struct rdma_cm_id *cmid)
         unsigned long            flags;
         int                      rc;
 
-        read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+        cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
         incarnation = peer->ibp_incarnation;
         version     = (peer->ibp_version == 0) ? IBLND_MSG_VERSION : peer->ibp_version;
 
-        read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+        cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
         conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version);
         if (conn == NULL) {
@@ -2856,16 +2864,16 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
 }
 
 int
-kiblnd_check_txs (kib_conn_t *conn, struct list_head *txs)
+kiblnd_check_txs (kib_conn_t *conn, cfs_list_t *txs)
 {
         kib_tx_t          *tx;
-        struct list_head  *ttmp;
+        cfs_list_t        *ttmp;
         int                timed_out = 0;
 
-        spin_lock(&conn->ibc_lock);
+        cfs_spin_lock(&conn->ibc_lock);
 
-        list_for_each (ttmp, txs) {
-                tx = list_entry (ttmp, kib_tx_t, tx_list);
+        cfs_list_for_each (ttmp, txs) {
+                tx = cfs_list_entry (ttmp, kib_tx_t, tx_list);
 
                 if (txs != &conn->ibc_active_txs) {
                         LASSERT (tx->tx_queued);
@@ -2874,7 +2882,7 @@ kiblnd_check_txs (kib_conn_t *conn, struct list_head *txs)
                         LASSERT (tx->tx_waiting || tx->tx_sending != 0);
                 }
 
-                if (time_after_eq (jiffies, tx->tx_deadline)) {
+                if (cfs_time_aftereq (jiffies, tx->tx_deadline)) {
                         timed_out = 1;
                         CERROR("Timed out tx: %s, %lu seconds\n",
                                kiblnd_queue2str(conn, txs),
@@ -2883,7 +2891,7 @@ kiblnd_check_txs (kib_conn_t *conn, struct list_head *txs)
                 }
         }
 
-        spin_unlock(&conn->ibc_lock);
+        cfs_spin_unlock(&conn->ibc_lock);
         return timed_out;
 }
 
@@ -2899,24 +2907,24 @@ kiblnd_conn_timed_out (kib_conn_t *conn)
 void
 kiblnd_check_conns (int idx)
 {
-        struct list_head  *peers = &kiblnd_data.kib_peers[idx];
-        struct list_head  *ptmp;
+        cfs_list_t        *peers = &kiblnd_data.kib_peers[idx];
+        cfs_list_t        *ptmp;
         kib_peer_t        *peer;
         kib_conn_t        *conn;
-        struct list_head  *ctmp;
+        cfs_list_t        *ctmp;
         unsigned long      flags;
 
  again:
         /* NB. We expect to have a look at all the peers and not find any
          * rdmas to time out, so we just use a shared lock while we
          * take a look... */
-        read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+        cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
-        list_for_each (ptmp, peers) {
-                peer = list_entry (ptmp, kib_peer_t, ibp_list);
+        cfs_list_for_each (ptmp, peers) {
+                peer = cfs_list_entry (ptmp, kib_peer_t, ibp_list);
 
-                list_for_each (ctmp, &peer->ibp_conns) {
-                        conn = list_entry (ctmp, kib_conn_t, ibc_list);
+                cfs_list_for_each (ctmp, &peer->ibp_conns) {
+                        conn = cfs_list_entry (ctmp, kib_conn_t, ibc_list);
 
                         LASSERT (conn->ibc_state == IBLND_CONN_ESTABLISHED);
 
@@ -2934,8 +2942,8 @@ kiblnd_check_conns (int idx)
 
                         kiblnd_conn_addref(conn); /* 1 ref for me... */
 
-                        read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
-                                               flags);
+                        cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+                                                   flags);
 
                         CERROR("Timed out RDMA with %s (%lu)\n",
                                libcfs_nid2str(peer->ibp_nid),
@@ -2950,13 +2958,13 @@ kiblnd_check_conns (int idx)
                 }
         }
 
-        read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+        cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 }
 
 void
 kiblnd_disconnect_conn (kib_conn_t *conn)
 {
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
         LASSERT (current == kiblnd_data.kib_connd);
         LASSERT (conn->ibc_state == IBLND_CONN_CLOSING);
 
@@ -2969,7 +2977,7 @@ kiblnd_disconnect_conn (kib_conn_t *conn)
 int
 kiblnd_connd (void *arg)
 {
-        wait_queue_t       wait;
+        cfs_waitlink_t     wait;
         unsigned long      flags;
         kib_conn_t        *conn;
         int                timeout;
@@ -2981,40 +2989,45 @@ kiblnd_connd (void *arg)
         cfs_daemonize ("kiblnd_connd");
         cfs_block_allsigs ();
 
-        init_waitqueue_entry (&wait, current);
+        cfs_waitlink_init (&wait);
         kiblnd_data.kib_connd = current;
 
-        spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+        cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
 
         while (!kiblnd_data.kib_shutdown) {
 
                 dropped_lock = 0;
 
-                if (!list_empty (&kiblnd_data.kib_connd_zombies)) {
-                        conn = list_entry (kiblnd_data.kib_connd_zombies.next,
-                                           kib_conn_t, ibc_list);
-                        list_del(&conn->ibc_list);
+                if (!cfs_list_empty (&kiblnd_data.kib_connd_zombies)) {
+                        conn = cfs_list_entry(kiblnd_data. \
+                                              kib_connd_zombies.next,
+                                              kib_conn_t, ibc_list);
+                        cfs_list_del(&conn->ibc_list);
 
-                        spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
+                        cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock,
+                                                   flags);
                         dropped_lock = 1;
 
                         kiblnd_destroy_conn(conn);
 
-                        spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags);
+                        cfs_spin_lock_irqsave (&kiblnd_data.kib_connd_lock,
+                                               flags);
                 }
 
-                if (!list_empty (&kiblnd_data.kib_connd_conns)) {
-                        conn = list_entry (kiblnd_data.kib_connd_conns.next,
-                                           kib_conn_t, ibc_list);
-                        list_del(&conn->ibc_list);
+                if (!cfs_list_empty (&kiblnd_data.kib_connd_conns)) {
+                        conn = cfs_list_entry (kiblnd_data.kib_connd_conns.next,
+                                               kib_conn_t, ibc_list);
+                        cfs_list_del(&conn->ibc_list);
 
-                        spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
+                        cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock,
+                                                    flags);
                         dropped_lock = 1;
 
                         kiblnd_disconnect_conn(conn);
                         kiblnd_conn_decref(conn);
 
-                        spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags);
+                        cfs_spin_lock_irqsave (&kiblnd_data.kib_connd_lock,
+                                               flags);
                 }
 
                 /* careful with the jiffy wrap... */
@@ -3024,7 +3037,7 @@ kiblnd_connd (void *arg)
                         const int p = 1;
                         int       chunk = kiblnd_data.kib_peer_hash_size;
 
-                        spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+                        cfs_spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
                         dropped_lock = 1;
 
                         /* Time to check for RDMA timeouts on a few more
@@ -3047,26 +3060,27 @@ kiblnd_connd (void *arg)
                                              kiblnd_data.kib_peer_hash_size;
                         }
 
-                        deadline += p * HZ;
-                        spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+                        deadline += p * CFS_HZ;
+                        cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock,
+                                              flags);
                 }
 
                 if (dropped_lock)
                         continue;
 
                 /* Nothing to do for 'timeout'  */
-                set_current_state (TASK_INTERRUPTIBLE);
-                add_wait_queue (&kiblnd_data.kib_connd_waitq, &wait);
-                spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
+                cfs_set_current_state (CFS_TASK_INTERRUPTIBLE);
+                cfs_waitq_add (&kiblnd_data.kib_connd_waitq, &wait);
+                cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
 
-                schedule_timeout (timeout);
+                cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE, timeout);
 
-                set_current_state (TASK_RUNNING);
-                remove_wait_queue (&kiblnd_data.kib_connd_waitq, &wait);
-                spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags);
+                cfs_set_current_state (CFS_TASK_RUNNING);
+                cfs_waitq_del (&kiblnd_data.kib_connd_waitq, &wait);
+                cfs_spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags);
         }
 
-        spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
+        cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
 
         kiblnd_thread_fini();
         return (0);
@@ -3129,10 +3143,10 @@ kiblnd_cq_completion (struct ib_cq *cq, void *arg)
          * and this CQ is about to be destroyed so I NOOP. */
         kib_conn_t     *conn = (kib_conn_t *)arg;
         unsigned long   flags;
-        
+
         LASSERT (cq == conn->ibc_cq);
 
-        spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
+        cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
 
         conn->ibc_ready = 1;
 
@@ -3141,12 +3155,12 @@ kiblnd_cq_completion (struct ib_cq *cq, void *arg)
              conn->ibc_nsends_posted > 0)) {
                 kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
                 conn->ibc_scheduled = 1;
-                list_add_tail(&conn->ibc_sched_list,
-                              &kiblnd_data.kib_sched_conns);
-                wake_up(&kiblnd_data.kib_sched_waitq);
+                cfs_list_add_tail(&conn->ibc_sched_list,
+                                  &kiblnd_data.kib_sched_conns);
+                cfs_waitq_signal(&kiblnd_data.kib_sched_waitq);
         }
 
-        spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
+        cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
 }
 
 void
@@ -3162,7 +3176,7 @@ int
 kiblnd_scheduler(void *arg)
 {
         long            id = (long)arg;
-        wait_queue_t    wait;
+        cfs_waitlink_t  wait;
         char            name[16];
         unsigned long   flags;
         kib_conn_t     *conn;
@@ -3175,33 +3189,34 @@ kiblnd_scheduler(void *arg)
         cfs_daemonize(name);
         cfs_block_allsigs();
 
-        init_waitqueue_entry(&wait, current);
+        cfs_waitlink_init(&wait);
 
-        spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
+        cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
 
         while (!kiblnd_data.kib_shutdown) {
                 if (busy_loops++ >= IBLND_RESCHED) {
-                        spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
-                                               flags);
+                        cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
+                                                   flags);
 
-                        our_cond_resched();
+                        cfs_cond_resched();
                         busy_loops = 0;
 
-                        spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
+                        cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock,
+                                              flags);
                 }
 
                 did_something = 0;
 
-                if (!list_empty(&kiblnd_data.kib_sched_conns)) {
-                        conn = list_entry(kiblnd_data.kib_sched_conns.next,
-                                          kib_conn_t, ibc_sched_list);
+                if (!cfs_list_empty(&kiblnd_data.kib_sched_conns)) {
+                        conn = cfs_list_entry(kiblnd_data.kib_sched_conns.next,
+                                              kib_conn_t, ibc_sched_list);
                         /* take over kib_sched_conns' ref on conn... */
                         LASSERT(conn->ibc_scheduled);
-                        list_del(&conn->ibc_sched_list);
+                        cfs_list_del(&conn->ibc_sched_list);
                         conn->ibc_ready = 0;
 
-                        spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
-                                               flags);
+                        cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
+                                                   flags);
 
                         rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
                         if (rc == 0) {
@@ -3213,7 +3228,9 @@ kiblnd_scheduler(void *arg)
                                               libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
                                         kiblnd_close_conn(conn, -EIO);
                                         kiblnd_conn_decref(conn);
-                                        spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
+                                        cfs_spin_lock_irqsave(&kiblnd_data. \
+                                                              kib_sched_lock,
+                                                              flags);
                                         continue;
                                 }
 
@@ -3223,36 +3240,40 @@ kiblnd_scheduler(void *arg)
                         if (rc < 0) {
                                 CWARN("%s: ib_poll_cq failed: %d, "
                                       "closing connection\n",
-                                      libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
+                                      libcfs_nid2str(conn->ibc_peer->ibp_nid),
+                                                     rc);
                                 kiblnd_close_conn(conn, -EIO);
                                 kiblnd_conn_decref(conn);
-                                spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
+                                cfs_spin_lock_irqsave(&kiblnd_data. \
+                                                      kib_sched_lock, flags);
                                 continue;
                         }
 
-                        spin_lock_irqsave(&kiblnd_data.kib_sched_lock,
-                                          flags);
+                        cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock,
+                                              flags);
 
                         if (rc != 0 || conn->ibc_ready) {
                                 /* There may be another completion waiting; get
                                  * another scheduler to check while I handle
                                  * this one... */
                                 kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
-                                list_add_tail(&conn->ibc_sched_list,
-                                              &kiblnd_data.kib_sched_conns);
-                                wake_up(&kiblnd_data.kib_sched_waitq);
+                                cfs_list_add_tail(&conn->ibc_sched_list,
+                                                  &kiblnd_data.kib_sched_conns);
+                                cfs_waitq_signal(&kiblnd_data.kib_sched_waitq);
                         } else {
                                 conn->ibc_scheduled = 0;
                         }
-                        
+
                         if (rc != 0) {
-                                spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
-                                                       flags);
+                                cfs_spin_unlock_irqrestore(&kiblnd_data. \
+                                                           kib_sched_lock,
+                                                           flags);
 
                                 kiblnd_complete(&wc);
 
-                                spin_lock_irqsave(&kiblnd_data.kib_sched_lock,
-                                                  flags);
+                                cfs_spin_lock_irqsave(&kiblnd_data. \
+                                                      kib_sched_lock,
+                                                      flags);
                         }
 
                         kiblnd_conn_decref(conn); /* ...drop my ref from above */
@@ -3262,19 +3283,19 @@ kiblnd_scheduler(void *arg)
                 if (did_something)
                         continue;
 
-                set_current_state(TASK_INTERRUPTIBLE);
-                add_wait_queue_exclusive(&kiblnd_data.kib_sched_waitq, &wait);
-                spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
+                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+                cfs_waitq_add_exclusive(&kiblnd_data.kib_sched_waitq, &wait);
+                cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
 
-                schedule();
+                cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
                 busy_loops = 0;
 
-                remove_wait_queue(&kiblnd_data.kib_sched_waitq, &wait);
-                set_current_state(TASK_RUNNING);
-                spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
+                cfs_waitq_del(&kiblnd_data.kib_sched_waitq, &wait);
+                cfs_set_current_state(CFS_TASK_RUNNING);
+                cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
         }
 
-        spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
+        cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
 
         kiblnd_thread_fini();
         return (0);
index 0abaa37..1501a6f 100755 (executable)
@@ -62,11 +62,11 @@ kptllnd_ptlid2str(ptl_process_id_t id)
         unsigned long  flags;
         char          *str;
         
-        spin_lock_irqsave(&kptllnd_data.kptl_ptlid2str_lock, flags);
+        cfs_spin_lock_irqsave(&kptllnd_data.kptl_ptlid2str_lock, flags);
         str = strs[idx++];
         if (idx >= sizeof(strs)/sizeof(strs[0]))
                 idx = 0;
-        spin_unlock_irqrestore(&kptllnd_data.kptl_ptlid2str_lock, flags);
+        cfs_spin_unlock_irqrestore(&kptllnd_data.kptl_ptlid2str_lock, flags);
 
         snprintf(str, sizeof(strs[0]), FMT_PTLID, id.pid, id.nid);
         return str;
@@ -493,10 +493,10 @@ kptllnd_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
         if (kptllnd_find_target(net, id, &peer) != 0)
                 return;
 
-        spin_lock_irqsave(&peer->peer_lock, flags);
+        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
         if (peer->peer_last_alive != 0)
                 *when = peer->peer_last_alive;
-        spin_unlock_irqrestore(&peer->peer_lock, flags);
+        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
         kptllnd_peer_decref(peer);
         return;
 }
@@ -509,9 +509,9 @@ kptllnd_base_shutdown (void)
         unsigned long     flags;
         lnet_process_id_t process_id;
 
-        read_lock(&kptllnd_data.kptl_net_rw_lock);
-        LASSERT (list_empty(&kptllnd_data.kptl_nets));
-        read_unlock(&kptllnd_data.kptl_net_rw_lock);
+        cfs_read_lock(&kptllnd_data.kptl_net_rw_lock);
+        LASSERT (cfs_list_empty(&kptllnd_data.kptl_nets));
+        cfs_read_unlock(&kptllnd_data.kptl_net_rw_lock);
 
         switch (kptllnd_data.kptl_init) {
         default:
@@ -521,22 +521,23 @@ kptllnd_base_shutdown (void)
         case PTLLND_INIT_DATA:
                 /* stop receiving */
                 kptllnd_rx_buffer_pool_fini(&kptllnd_data.kptl_rx_buffer_pool);
-                LASSERT (list_empty(&kptllnd_data.kptl_sched_rxq));
-                LASSERT (list_empty(&kptllnd_data.kptl_sched_rxbq));
+                LASSERT (cfs_list_empty(&kptllnd_data.kptl_sched_rxq));
+                LASSERT (cfs_list_empty(&kptllnd_data.kptl_sched_rxbq));
 
                 /* lock to interleave cleanly with peer birth/death */
-                write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+                cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
                 LASSERT (kptllnd_data.kptl_shutdown == 0);
                 kptllnd_data.kptl_shutdown = 1; /* phase 1 == destroy peers */
                 /* no new peers possible now */
-                write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+                cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+                                            flags);
 
                 /* nuke all existing peers */
                 process_id.nid = LNET_NID_ANY;
                 process_id.pid = LNET_PID_ANY;
                 kptllnd_peer_del(process_id);
 
-                read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+                cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
 
                 LASSERT (kptllnd_data.kptl_n_active_peers == 0);
 
@@ -547,43 +548,44 @@ kptllnd_base_shutdown (void)
                                "Waiting for %d peers to terminate\n",
                                kptllnd_data.kptl_npeers);
 
-                        read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
-                                               flags);
+                        cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+                                                   flags);
 
                         cfs_pause(cfs_time_seconds(1));
 
-                        read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock,
-                                          flags);
+                        cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock,
+                                              flags);
                 }
 
-                LASSERT (list_empty(&kptllnd_data.kptl_closing_peers));
-                LASSERT (list_empty(&kptllnd_data.kptl_zombie_peers));
+                LASSERT (cfs_list_empty(&kptllnd_data.kptl_closing_peers));
+                LASSERT (cfs_list_empty(&kptllnd_data.kptl_zombie_peers));
                 LASSERT (kptllnd_data.kptl_peers != NULL);
                 for (i = 0; i < kptllnd_data.kptl_peer_hash_size; i++)
-                        LASSERT (list_empty (&kptllnd_data.kptl_peers[i]));
+                        LASSERT (cfs_list_empty (&kptllnd_data.kptl_peers[i]));
 
-                read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+                cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+                                           flags);
                 CDEBUG(D_NET, "All peers deleted\n");
 
                 /* Shutdown phase 2: kill the daemons... */
                 kptllnd_data.kptl_shutdown = 2;
-                mb();
+                cfs_mb();
 
                 i = 2;
-                while (atomic_read (&kptllnd_data.kptl_nthreads) != 0) {
+                while (cfs_atomic_read (&kptllnd_data.kptl_nthreads) != 0) {
                         /* Wake up all threads*/
-                        wake_up_all(&kptllnd_data.kptl_sched_waitq);
-                        wake_up_all(&kptllnd_data.kptl_watchdog_waitq);
+                        cfs_wake_up_all(&kptllnd_data.kptl_sched_waitq);
+                        cfs_wake_up_all(&kptllnd_data.kptl_watchdog_waitq);
 
                         i++;
                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
                                "Waiting for %d threads to terminate\n",
-                               atomic_read(&kptllnd_data.kptl_nthreads));
+                               cfs_atomic_read(&kptllnd_data.kptl_nthreads));
                         cfs_pause(cfs_time_seconds(1));
                 }
 
                 CDEBUG(D_NET, "All Threads stopped\n");
-                LASSERT(list_empty(&kptllnd_data.kptl_sched_txq));
+                LASSERT(cfs_list_empty(&kptllnd_data.kptl_sched_txq));
 
                 kptllnd_cleanup_tx_descs();
 
@@ -612,15 +614,15 @@ kptllnd_base_shutdown (void)
                                kptllnd_errtype2str(prc), prc);
         }
 
-        LASSERT (atomic_read(&kptllnd_data.kptl_ntx) == 0);
-        LASSERT (list_empty(&kptllnd_data.kptl_idle_txs));
+        LASSERT (cfs_atomic_read(&kptllnd_data.kptl_ntx) == 0);
+        LASSERT (cfs_list_empty(&kptllnd_data.kptl_idle_txs));
 
         if (kptllnd_data.kptl_rx_cache != NULL)
                 cfs_mem_cache_destroy(kptllnd_data.kptl_rx_cache);
 
         if (kptllnd_data.kptl_peers != NULL)
                 LIBCFS_FREE(kptllnd_data.kptl_peers,
-                            sizeof (struct list_head) *
+                            sizeof (cfs_list_t) *
                             kptllnd_data.kptl_peer_hash_size);
 
         if (kptllnd_data.kptl_nak_msg != NULL)
@@ -672,23 +674,23 @@ kptllnd_base_startup (void)
         kptllnd_data.kptl_eqh = PTL_INVALID_HANDLE;
         kptllnd_data.kptl_nih = PTL_INVALID_HANDLE;
 
-        rwlock_init(&kptllnd_data.kptl_net_rw_lock);
-        INIT_LIST_HEAD(&kptllnd_data.kptl_nets);
+        cfs_rwlock_init(&kptllnd_data.kptl_net_rw_lock);
+        CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_nets);
 
         /* Setup the sched locks/lists/waitq */
-        spin_lock_init(&kptllnd_data.kptl_sched_lock);
-        init_waitqueue_head(&kptllnd_data.kptl_sched_waitq);
-        INIT_LIST_HEAD(&kptllnd_data.kptl_sched_txq);
-        INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxq);
-        INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxbq);
+        cfs_spin_lock_init(&kptllnd_data.kptl_sched_lock);
+        cfs_init_waitqueue_head(&kptllnd_data.kptl_sched_waitq);
+        CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_txq);
+        CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxq);
+        CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxbq);
 
         /* Init kptl_ptlid2str_lock before any call to kptllnd_ptlid2str */
-        spin_lock_init(&kptllnd_data.kptl_ptlid2str_lock);
+        cfs_spin_lock_init(&kptllnd_data.kptl_ptlid2str_lock);
 
         /* Setup the tx locks/lists */
-        spin_lock_init(&kptllnd_data.kptl_tx_lock);
-        INIT_LIST_HEAD(&kptllnd_data.kptl_idle_txs);
-        atomic_set(&kptllnd_data.kptl_ntx, 0);
+        cfs_spin_lock_init(&kptllnd_data.kptl_tx_lock);
+        CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_idle_txs);
+        cfs_atomic_set(&kptllnd_data.kptl_ntx, 0);
 
         /* Uptick the module reference count */
         PORTAL_MODULE_USE;
@@ -696,7 +698,7 @@ kptllnd_base_startup (void)
         kptllnd_data.kptl_expected_peers =
                 *kptllnd_tunables.kptl_max_nodes *
                 *kptllnd_tunables.kptl_max_procs_per_node;
-        
+
         /*
          * Initialize the Network interface instance
          * We use the default because we don't have any
@@ -758,7 +760,7 @@ kptllnd_base_startup (void)
         /* Initialized the incarnation - it must be for-all-time unique, even
          * accounting for the fact that we increment it when we disconnect a
          * peer that's using it */
-        do_gettimeofday(&tv);
+        cfs_gettimeofday(&tv);
         kptllnd_data.kptl_incarnation = (((__u64)tv.tv_sec) * 1000000) +
                                         tv.tv_usec;
         CDEBUG(D_NET, "Incarnation="LPX64"\n", kptllnd_data.kptl_incarnation);
@@ -771,17 +773,17 @@ kptllnd_base_startup (void)
         kptllnd_data.kptl_nak_msg->ptlm_srcpid   = the_lnet.ln_pid;
         kptllnd_data.kptl_nak_msg->ptlm_srcstamp = kptllnd_data.kptl_incarnation;
 
-        rwlock_init(&kptllnd_data.kptl_peer_rw_lock);
-        init_waitqueue_head(&kptllnd_data.kptl_watchdog_waitq);
-        atomic_set(&kptllnd_data.kptl_needs_ptltrace, 0);
-        INIT_LIST_HEAD(&kptllnd_data.kptl_closing_peers);
-        INIT_LIST_HEAD(&kptllnd_data.kptl_zombie_peers);
+        cfs_rwlock_init(&kptllnd_data.kptl_peer_rw_lock);
+        cfs_init_waitqueue_head(&kptllnd_data.kptl_watchdog_waitq);
+        cfs_atomic_set(&kptllnd_data.kptl_needs_ptltrace, 0);
+        CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_closing_peers);
+        CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_zombie_peers);
 
         /* Allocate and setup the peer hash table */
         kptllnd_data.kptl_peer_hash_size =
                 *kptllnd_tunables.kptl_peer_hash_table_size;
         LIBCFS_ALLOC(kptllnd_data.kptl_peers,
-                     sizeof(struct list_head) *
+                     sizeof(cfs_list_t) *
                      kptllnd_data.kptl_peer_hash_size);
         if (kptllnd_data.kptl_peers == NULL) {
                 CERROR("Failed to allocate space for peer hash table size=%d\n",
@@ -790,11 +792,11 @@ kptllnd_base_startup (void)
                 goto failed;
         }
         for (i = 0; i < kptllnd_data.kptl_peer_hash_size; i++)
-                INIT_LIST_HEAD(&kptllnd_data.kptl_peers[i]);
+                CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_peers[i]);
 
         kptllnd_rx_buffer_pool_init(&kptllnd_data.kptl_rx_buffer_pool);
 
-        kptllnd_data.kptl_rx_cache = 
+        kptllnd_data.kptl_rx_cache =
                 cfs_mem_cache_create("ptllnd_rx",
                                      sizeof(kptl_rx_t) + 
                                      *kptllnd_tunables.kptl_max_msg_size,
@@ -858,7 +860,7 @@ kptllnd_base_startup (void)
 
         if (*kptllnd_tunables.kptl_checksum)
                 CWARN("Checksumming enabled\n");
-        
+
         CDEBUG(D_NET, "<<< kptllnd_base_startup SUCCESS\n");
         return 0;
 
@@ -905,10 +907,10 @@ kptllnd_startup (lnet_ni_t *ni)
          * multiple NIs */
         kptllnd_data.kptl_nak_msg->ptlm_srcnid = ni->ni_nid;
 
-        atomic_set(&net->net_refcount, 1);
-        write_lock(&kptllnd_data.kptl_net_rw_lock);
-        list_add_tail(&net->net_list, &kptllnd_data.kptl_nets);
-        write_unlock(&kptllnd_data.kptl_net_rw_lock);
+        cfs_atomic_set(&net->net_refcount, 1);
+        cfs_write_lock(&kptllnd_data.kptl_net_rw_lock);
+        cfs_list_add_tail(&net->net_list, &kptllnd_data.kptl_nets);
+        cfs_write_unlock(&kptllnd_data.kptl_net_rw_lock);
         return 0;
 
  failed:
@@ -926,45 +928,45 @@ kptllnd_shutdown (lnet_ni_t *ni)
         LASSERT (kptllnd_data.kptl_init == PTLLND_INIT_ALL);
 
         CDEBUG(D_MALLOC, "before LND cleanup: kmem %d\n",
-               atomic_read (&libcfs_kmemory));
+               cfs_atomic_read (&libcfs_kmemory));
 
         if (net == NULL)
                 goto out;
 
         LASSERT (ni == net->net_ni);
         LASSERT (!net->net_shutdown);
-        LASSERT (!list_empty(&net->net_list));
-        LASSERT (atomic_read(&net->net_refcount) != 0);
+        LASSERT (!cfs_list_empty(&net->net_list));
+        LASSERT (cfs_atomic_read(&net->net_refcount) != 0);
         ni->ni_data = NULL;
         net->net_ni = NULL;
 
-        write_lock(&kptllnd_data.kptl_net_rw_lock);
+        cfs_write_lock(&kptllnd_data.kptl_net_rw_lock);
         kptllnd_net_decref(net);
-        list_del_init(&net->net_list);
-        write_unlock(&kptllnd_data.kptl_net_rw_lock);
+        cfs_list_del_init(&net->net_list);
+        cfs_write_unlock(&kptllnd_data.kptl_net_rw_lock);
 
         /* Can't nuke peers here - they are shared among all NIs */
-                write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+        cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
         net->net_shutdown = 1;   /* Order with peer creation */
-        write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+        cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
 
-                i = 2;
-        while (atomic_read(&net->net_refcount) != 0) {
+        i = 2;
+        while (cfs_atomic_read(&net->net_refcount) != 0) {
                         i++;
                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
                        "Waiting for %d references to drop\n",
-                       atomic_read(&net->net_refcount));
+                       cfs_atomic_read(&net->net_refcount));
 
-                        cfs_pause(cfs_time_seconds(1));
+                       cfs_pause(cfs_time_seconds(1));
                 }
 
         LIBCFS_FREE(net, sizeof(*net));
 out:
         /* NB no locking since I don't race with writers */
-        if (list_empty(&kptllnd_data.kptl_nets))
+        if (cfs_list_empty(&kptllnd_data.kptl_nets))
                 kptllnd_base_shutdown();
         CDEBUG(D_MALLOC, "after LND cleanup: kmem %d\n",
-               atomic_read (&libcfs_kmemory));
+               cfs_atomic_read (&libcfs_kmemory));
         return;
 }
 
index 49b90d3..53df5ee 100755 (executable)
@@ -90,7 +90,7 @@
 //#define PJK_DEBUGGING
 
 #ifdef CONFIG_SMP
-# define PTLLND_N_SCHED         num_online_cpus()   /* # schedulers */
+# define PTLLND_N_SCHED         cfs_num_online_cpus()   /* # schedulers */
 #else
 # define PTLLND_N_SCHED         1                   /* # schedulers */
 #endif
@@ -149,7 +149,7 @@ typedef struct {
 
 typedef struct kptl_rx                          /* receive message */
 {
-        struct list_head        rx_list;        /* queue for attention */
+        cfs_list_t              rx_list;        /* queue for attention */
         kptl_rx_buffer_t       *rx_rxb;         /* the rx buffer pointer */
         kptl_msg_t             *rx_msg;         /* received message */
         int                     rx_nob;         /* received message size */
@@ -168,8 +168,8 @@ typedef struct kptl_rx                          /* receive message */
 
 typedef struct kptl_rx_buffer_pool
 {
-        spinlock_t              rxbp_lock;
-        struct list_head        rxbp_list;      /* all allocated buffers */
+        cfs_spinlock_t          rxbp_lock;
+        cfs_list_t              rxbp_list;      /* all allocated buffers */
         int                     rxbp_count;     /* # allocated buffers */
         int                     rxbp_reserved;  /* # requests to buffer */
         int                     rxbp_shutdown;  /* shutdown flag */
@@ -177,15 +177,15 @@ typedef struct kptl_rx_buffer_pool
 
 struct kptl_rx_buffer
 {
-        kptl_rx_buffer_pool_t  *rxb_pool;
-        struct list_head        rxb_list;       /* for the rxb_pool list */
-        struct list_head        rxb_repost_list;/* for the kptl_sched_rxbq list */
-        int                     rxb_posted:1;   /* on the net */
-        int                     rxb_idle:1;     /* all done */
-        kptl_eventarg_t         rxb_eventarg;   /* event->md.user_ptr */
-        int                     rxb_refcount;   /* reference count */
-        ptl_handle_md_t         rxb_mdh;        /* the portals memory descriptor (MD) handle */
-        char                   *rxb_buffer;     /* the buffer */
+        kptl_rx_buffer_pool_t *rxb_pool;
+        cfs_list_t             rxb_list;       /* for the rxb_pool list */
+        cfs_list_t             rxb_repost_list;/* for the kptl_sched_rxbq list */
+        int                    rxb_posted:1;   /* on the net */
+        int                    rxb_idle:1;     /* all done */
+        kptl_eventarg_t        rxb_eventarg;   /* event->md.user_ptr */
+        int                    rxb_refcount;   /* reference count */
+        ptl_handle_md_t        rxb_mdh;        /* the portals memory descriptor (MD) handle */
+        char                  *rxb_buffer;     /* the buffer */
 
 };
 
@@ -210,8 +210,8 @@ typedef union {
 
 typedef struct kptl_tx                           /* transmit message */
 {
-        struct list_head        tx_list;      /* queue on idle_txs etc */
-        atomic_t                tx_refcount;  /* reference count*/
+        cfs_list_t              tx_list;      /* queue on idle_txs etc */
+        cfs_atomic_t            tx_refcount;  /* reference count*/
         enum kptl_tx_type       tx_type;      /* small msg/{put,get}{req,resp} */
         int                     tx_active:1;  /* queued on the peer */
         int                     tx_idle:1;    /* on the free list */
@@ -243,13 +243,13 @@ enum kptllnd_peer_state
 
 struct kptl_peer
 {
-        struct list_head        peer_list;
-        atomic_t                peer_refcount;          /* The current references */
+        cfs_list_t              peer_list;
+        cfs_atomic_t            peer_refcount;          /* The current references */
         enum kptllnd_peer_state peer_state;
-        spinlock_t              peer_lock;              /* serialize */
-        struct list_head        peer_noops;             /* PTLLND_MSG_TYPE_NOOP txs */
-        struct list_head        peer_sendq;             /* txs waiting for mh handles */
-        struct list_head        peer_activeq;           /* txs awaiting completion */
+        cfs_spinlock_t          peer_lock;              /* serialize */
+        cfs_list_t              peer_noops;             /* PTLLND_MSG_TYPE_NOOP txs */
+        cfs_list_t              peer_sendq;             /* txs waiting for mh handles */
+        cfs_list_t              peer_activeq;           /* txs awaiting completion */
         lnet_process_id_t       peer_id;                /* Peer's LNET id */
         ptl_process_id_t        peer_ptlid;             /* Peer's portals id */
         __u64                   peer_incarnation;       /* peer's incarnation */
@@ -271,49 +271,49 @@ struct kptl_data
 {
         int                     kptl_init;             /* initialisation state */
         volatile int            kptl_shutdown;         /* shut down? */
-        atomic_t                kptl_nthreads;         /* # live threads */
+        cfs_atomic_t            kptl_nthreads;         /* # live threads */
         ptl_handle_ni_t         kptl_nih;              /* network inteface handle */
         ptl_process_id_t        kptl_portals_id;       /* Portals ID of interface */
         __u64                   kptl_incarnation;      /* which one am I */
         ptl_handle_eq_t         kptl_eqh;              /* Event Queue (EQ) */
 
-        rwlock_t                kptl_net_rw_lock;      /* serialise... */
-        struct list_head        kptl_nets;             /* kptl_net instances */
+        cfs_rwlock_t            kptl_net_rw_lock;      /* serialise... */
+        cfs_list_t              kptl_nets;             /* kptl_net instances */
 
-        spinlock_t              kptl_sched_lock;       /* serialise... */
-        wait_queue_head_t       kptl_sched_waitq;      /* schedulers sleep here */
-        struct list_head        kptl_sched_txq;        /* tx requiring attention */
-        struct list_head        kptl_sched_rxq;        /* rx requiring attention */
-        struct list_head        kptl_sched_rxbq;       /* rxb requiring reposting */
+        cfs_spinlock_t          kptl_sched_lock;       /* serialise... */
+        cfs_waitq_t             kptl_sched_waitq;      /* schedulers sleep here */
+        cfs_list_t              kptl_sched_txq;        /* tx requiring attention */
+        cfs_list_t              kptl_sched_rxq;        /* rx requiring attention */
+        cfs_list_t              kptl_sched_rxbq;       /* rxb requiring reposting */
 
-        wait_queue_head_t       kptl_watchdog_waitq;   /* watchdog sleeps here */
-        atomic_t                kptl_needs_ptltrace;   /* watchdog thread to dump ptltrace */
+        cfs_waitq_t             kptl_watchdog_waitq;   /* watchdog sleeps here */
+        cfs_atomic_t            kptl_needs_ptltrace;   /* watchdog thread to dump ptltrace */
 
         kptl_rx_buffer_pool_t   kptl_rx_buffer_pool;   /* rx buffer pool */
         cfs_mem_cache_t*        kptl_rx_cache;         /* rx descripter cache */
 
-        atomic_t                kptl_ntx;              /* # tx descs allocated */
-        spinlock_t              kptl_tx_lock;          /* serialise idle tx list*/
-        struct list_head        kptl_idle_txs;         /* idle tx descriptors */
+        cfs_atomic_t            kptl_ntx;              /* # tx descs allocated */
+        cfs_spinlock_t          kptl_tx_lock;          /* serialise idle tx list*/
+        cfs_list_t              kptl_idle_txs;         /* idle tx descriptors */
 
-        rwlock_t                kptl_peer_rw_lock;     /* lock for peer table */
-        struct list_head       *kptl_peers;            /* hash table of all my known peers */
-        struct list_head        kptl_closing_peers;    /* peers being closed */
-        struct list_head        kptl_zombie_peers;     /* peers waiting for refs to drain */
+        cfs_rwlock_t            kptl_peer_rw_lock;     /* lock for peer table */
+        cfs_list_t             *kptl_peers;            /* hash table of all my known peers */
+        cfs_list_t              kptl_closing_peers;    /* peers being closed */
+        cfs_list_t              kptl_zombie_peers;     /* peers waiting for refs to drain */
         int                     kptl_peer_hash_size;   /* size of kptl_peers */
         int                     kptl_npeers;           /* # peers extant */
         int                     kptl_n_active_peers;   /* # active peers */
         int                     kptl_expected_peers;   /* # peers I can buffer HELLOs from */
 
         kptl_msg_t             *kptl_nak_msg;          /* common NAK message */
-        spinlock_t              kptl_ptlid2str_lock;   /* serialise str ops */
+        cfs_spinlock_t          kptl_ptlid2str_lock;   /* serialise str ops */
 };
 
 struct kptl_net
 {
-        struct list_head  net_list;      /* chain on kptl_data:: kptl_nets */
+        cfs_list_t        net_list;      /* chain on kptl_data:: kptl_nets */
         lnet_ni_t        *net_ni;
-        atomic_t          net_refcount;  /* # current references */
+        cfs_atomic_t      net_refcount;  /* # current references */
         int               net_shutdown;  /* lnd_shutdown called */
 };
 
@@ -353,8 +353,8 @@ kptllnd_schedule_ptltrace_dump (void)
 {
 #ifdef CRAY_XT3
         if (*kptllnd_tunables.kptl_ptltrace_on_fail) {
-                atomic_inc(&kptllnd_data.kptl_needs_ptltrace);
-                wake_up(&kptllnd_data.kptl_watchdog_waitq);
+                cfs_atomic_inc(&kptllnd_data.kptl_needs_ptltrace);
+                cfs_waitq_signal(&kptllnd_data.kptl_watchdog_waitq);
         }
 #endif
 }
@@ -388,11 +388,11 @@ kptllnd_eventarg2obj (kptl_eventarg_t *eva)
         default:
                 LBUG();
         case PTLLND_EVENTARG_TYPE_BUF:
-                return list_entry(eva, kptl_rx_buffer_t, rxb_eventarg);
+                return cfs_list_entry(eva, kptl_rx_buffer_t, rxb_eventarg);
         case PTLLND_EVENTARG_TYPE_RDMA:
-                return list_entry(eva, kptl_tx_t, tx_rdma_eventarg);
+                return cfs_list_entry(eva, kptl_tx_t, tx_rdma_eventarg);
         case PTLLND_EVENTARG_TYPE_MSG:
-                return list_entry(eva, kptl_tx_t, tx_msg_eventarg);
+                return cfs_list_entry(eva, kptl_tx_t, tx_msg_eventarg);
         }
 }
 
@@ -416,23 +416,23 @@ static inline void
 kptllnd_rx_buffer_addref(kptl_rx_buffer_t *rxb)
 {
         unsigned long flags;
-        
-        spin_lock_irqsave(&rxb->rxb_pool->rxbp_lock, flags);
+
+        cfs_spin_lock_irqsave(&rxb->rxb_pool->rxbp_lock, flags);
         rxb->rxb_refcount++;
-        spin_unlock_irqrestore(&rxb->rxb_pool->rxbp_lock, flags);
+        cfs_spin_unlock_irqrestore(&rxb->rxb_pool->rxbp_lock, flags);
 }
 
 static inline void
 kptllnd_rx_buffer_decref_locked(kptl_rx_buffer_t *rxb)
 {
         if (--(rxb->rxb_refcount) == 0) {
-                spin_lock(&kptllnd_data.kptl_sched_lock);
-        
-                list_add_tail(&rxb->rxb_repost_list,
-                              &kptllnd_data.kptl_sched_rxbq);
-                wake_up(&kptllnd_data.kptl_sched_waitq);
+                cfs_spin_lock(&kptllnd_data.kptl_sched_lock);
+
+                cfs_list_add_tail(&rxb->rxb_repost_list,
+                                  &kptllnd_data.kptl_sched_rxbq);
+                cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
 
-                spin_unlock(&kptllnd_data.kptl_sched_lock);
+                cfs_spin_unlock(&kptllnd_data.kptl_sched_lock);
         }
 }
 
@@ -441,10 +441,10 @@ kptllnd_rx_buffer_decref(kptl_rx_buffer_t *rxb)
 {
         unsigned long flags;
         int           count;
-        
-        spin_lock_irqsave(&rxb->rxb_pool->rxbp_lock, flags);
+
+        cfs_spin_lock_irqsave(&rxb->rxb_pool->rxbp_lock, flags);
         count = --(rxb->rxb_refcount);
-        spin_unlock_irqrestore(&rxb->rxb_pool->rxbp_lock, flags);
+        cfs_spin_unlock_irqrestore(&rxb->rxb_pool->rxbp_lock, flags);
 
         if (count == 0)
                 kptllnd_rx_buffer_post(rxb);
@@ -486,40 +486,40 @@ void kptllnd_peer_alive(kptl_peer_t *peer);
 static inline void
 kptllnd_peer_addref (kptl_peer_t *peer)
 {
-        atomic_inc(&peer->peer_refcount);
+        cfs_atomic_inc(&peer->peer_refcount);
 }
 
 static inline void
 kptllnd_peer_decref (kptl_peer_t *peer)
 {
-        if (atomic_dec_and_test(&peer->peer_refcount))
+        if (cfs_atomic_dec_and_test(&peer->peer_refcount))
                 kptllnd_peer_destroy(peer);
 }
 
 static inline void
 kptllnd_net_addref (kptl_net_t *net)
 {
-        LASSERT (atomic_read(&net->net_refcount) > 0);
-        atomic_inc(&net->net_refcount);
+        LASSERT (cfs_atomic_read(&net->net_refcount) > 0);
+        cfs_atomic_inc(&net->net_refcount);
 }
 
 static inline void
 kptllnd_net_decref (kptl_net_t *net)
 {
-        LASSERT (atomic_read(&net->net_refcount) > 0);
-        atomic_dec(&net->net_refcount);
+        LASSERT (cfs_atomic_read(&net->net_refcount) > 0);
+        cfs_atomic_dec(&net->net_refcount);
 }
 
 static inline void
-kptllnd_set_tx_peer(kptl_tx_t *tx, kptl_peer_t *peer) 
+kptllnd_set_tx_peer(kptl_tx_t *tx, kptl_peer_t *peer)
 {
         LASSERT (tx->tx_peer == NULL);
-        
+
         kptllnd_peer_addref(peer);
         tx->tx_peer = peer;
 }
 
-static inline struct list_head *
+static inline cfs_list_t *
 kptllnd_nid2peerlist(lnet_nid_t nid)
 {
         /* Only one copy of peer state for all logical peers, so the net part
@@ -536,9 +536,9 @@ kptllnd_id2peer(lnet_process_id_t id)
         kptl_peer_t   *peer;
         unsigned long  flags;
 
-        read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+        cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
         peer = kptllnd_id2peer_locked(id);
-        read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+        cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
 
         return peer;
 }
@@ -569,8 +569,9 @@ kptllnd_peer_unreserve_buffers(void)
 int  kptllnd_setup_tx_descs(void);
 void kptllnd_cleanup_tx_descs(void);
 void kptllnd_tx_fini(kptl_tx_t *tx);
-void kptllnd_cancel_txlist(struct list_head *peerq, struct list_head *txs);
-void kptllnd_restart_txs(kptl_net_t *net, lnet_process_id_t id, struct list_head *restarts);
+void kptllnd_cancel_txlist(cfs_list_t *peerq, cfs_list_t *txs);
+void kptllnd_restart_txs(kptl_net_t *net, lnet_process_id_t id,
+                         cfs_list_t *restarts);
 kptl_tx_t *kptllnd_get_idle_tx(enum kptl_tx_type purpose);
 void kptllnd_tx_callback(ptl_event_t *ev);
 const char *kptllnd_tx_typestr(int type);
@@ -578,15 +579,15 @@ const char *kptllnd_tx_typestr(int type);
 static inline void
 kptllnd_tx_addref(kptl_tx_t *tx)
 {
-        atomic_inc(&tx->tx_refcount);
+        cfs_atomic_inc(&tx->tx_refcount);
 }
 
-static inline void 
+static inline void
 kptllnd_tx_decref(kptl_tx_t *tx)
 {
-        LASSERT (!in_interrupt());        /* Thread context only */
+        LASSERT (!cfs_in_interrupt());        /* Thread context only */
 
-        if (atomic_dec_and_test(&tx->tx_refcount))
+        if (cfs_atomic_dec_and_test(&tx->tx_refcount))
                 kptllnd_tx_fini(tx);
 }
 
index 8a0d67a..9d22e2d 100644 (file)
@@ -257,20 +257,20 @@ kptllnd_active_rdma(kptl_rx_t *rx, lnet_msg_t *lntmsg, int type,
                 return -EIO;
         }
 
-        spin_lock_irqsave(&peer->peer_lock, flags);
+        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
 
         tx->tx_lnet_msg = lntmsg;
         /* lnet_finalize() will be called when tx is torn down, so I must
          * return success from here on... */
 
-        tx->tx_deadline = jiffies + (*kptllnd_tunables.kptl_timeout * HZ);
+        tx->tx_deadline = jiffies + (*kptllnd_tunables.kptl_timeout * CFS_HZ);
         tx->tx_rdma_mdh = mdh;
         tx->tx_active = 1;
-        list_add_tail(&tx->tx_list, &peer->peer_activeq);
+        cfs_list_add_tail(&tx->tx_list, &peer->peer_activeq);
 
         /* peer has now got my ref on 'tx' */
 
-        spin_unlock_irqrestore(&peer->peer_lock, flags);
+        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
 
         tx->tx_tposted = jiffies;
 
@@ -333,7 +333,7 @@ kptllnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
         LASSERT (payload_niov <= LNET_MAX_IOV);
         LASSERT (payload_niov <= PTL_MD_MAX_IOV); /* !!! */
         LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
 
         rc = kptllnd_find_target(net, target, &peer);
         if (rc != 0)
@@ -536,7 +536,7 @@ kptllnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
 
         LASSERT (mlen <= rlen);
         LASSERT (mlen >= 0);
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
         LASSERT (!(kiov != NULL && iov != NULL)); /* never both */
         LASSERT (niov <= PTL_MD_MAX_IOV);       /* !!! */
 
@@ -637,12 +637,12 @@ kptllnd_eq_callback(ptl_event_t *ev)
         switch (eva->eva_type) {
         default:
                 LBUG();
-                
+
         case PTLLND_EVENTARG_TYPE_MSG:
         case PTLLND_EVENTARG_TYPE_RDMA:
                 kptllnd_tx_callback(ev);
                 break;
-                
+
         case PTLLND_EVENTARG_TYPE_BUF:
                 kptllnd_rx_buffer_callback(ev);
                 break;
@@ -652,7 +652,7 @@ kptllnd_eq_callback(ptl_event_t *ev)
 void
 kptllnd_thread_fini (void)
 {
-        atomic_dec(&kptllnd_data.kptl_nthreads);
+        cfs_atomic_dec(&kptllnd_data.kptl_nthreads);
 }
 
 int
@@ -660,13 +660,13 @@ kptllnd_thread_start (int (*fn)(void *arg), void *arg)
 {
         long                pid;
 
-        atomic_inc(&kptllnd_data.kptl_nthreads);
+        cfs_atomic_inc(&kptllnd_data.kptl_nthreads);
 
-        pid = kernel_thread (fn, arg, 0);
+        pid = cfs_kernel_thread (fn, arg, 0);
         if (pid >= 0)
                 return 0;
-        
-        CERROR("Failed to start kernel_thread: error %d\n", (int)pid);
+
+        CERROR("Failed to start cfs_kernel_thread: error %d\n", (int)pid);
         kptllnd_thread_fini();
         return (int)pid;
 }
@@ -676,7 +676,7 @@ kptllnd_watchdog(void *arg)
 {
         int                 id = (long)arg;
         char                name[16];
-        wait_queue_t        waitlink;
+        cfs_waitlink_t      waitlink;
         int                 stamp = 0;
         int                 peer_index = 0;
         unsigned long       deadline = jiffies;
@@ -687,22 +687,22 @@ kptllnd_watchdog(void *arg)
         cfs_daemonize(name);
         cfs_block_allsigs();
 
-        init_waitqueue_entry(&waitlink, current);
+        cfs_waitlink_init(&waitlink);
 
         /* threads shut down in phase 2 after all peers have been destroyed */
         while (kptllnd_data.kptl_shutdown < 2) {
 
                 /* add a check for needs ptltrace
                  * yes, this is blatant hijacking of this thread
-                 * we can't dump directly from tx or rx _callbacks as it deadlocks portals
-                 * and takes out the node
+                 * we can't dump directly from tx or rx _callbacks as it
+                 * deadlocks portals and takes out the node
                 */
 
-                if (atomic_read(&kptllnd_data.kptl_needs_ptltrace)) {
+                if (cfs_atomic_read(&kptllnd_data.kptl_needs_ptltrace)) {
 #ifdef CRAY_XT3
                         kptllnd_dump_ptltrace();
                         /* we only dump once, no matter how many pending */
-                        atomic_set(&kptllnd_data.kptl_needs_ptltrace, 0);
+                        cfs_atomic_set(&kptllnd_data.kptl_needs_ptltrace, 0);
 #else
                         LBUG();
 #endif
@@ -736,21 +736,21 @@ kptllnd_watchdog(void *arg)
                                      kptllnd_data.kptl_peer_hash_size;
                         }
 
-                        deadline += p * HZ;
+                        deadline += p * CFS_HZ;
                         stamp++;
                         continue;
                 }
 
                 kptllnd_handle_closing_peers();
 
-                set_current_state(TASK_INTERRUPTIBLE);
-                add_wait_queue_exclusive(&kptllnd_data.kptl_watchdog_waitq,
-                                         &waitlink);
+                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+                cfs_waitq_add_exclusive(&kptllnd_data.kptl_watchdog_waitq,
+                                        &waitlink);
 
-                schedule_timeout(timeout);
-                
-                set_current_state (TASK_RUNNING);
-                remove_wait_queue(&kptllnd_data.kptl_watchdog_waitq, &waitlink);
+                cfs_waitq_timedwait(&waitlink, CFS_TASK_INTERRUPTIBLE, timeout);
+
+                cfs_set_current_state (CFS_TASK_RUNNING);
+                cfs_waitq_del(&kptllnd_data.kptl_watchdog_waitq, &waitlink);
         }
 
         kptllnd_thread_fini();
@@ -763,7 +763,7 @@ kptllnd_scheduler (void *arg)
 {
         int                 id = (long)arg;
         char                name[16];
-        wait_queue_t        waitlink;
+        cfs_waitlink_t      waitlink;
         unsigned long       flags;
         int                 did_something;
         int                 counter = 0;
@@ -775,54 +775,61 @@ kptllnd_scheduler (void *arg)
         cfs_daemonize(name);
         cfs_block_allsigs();
 
-        init_waitqueue_entry(&waitlink, current);
+        cfs_waitlink_init(&waitlink);
 
-        spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
+        cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
 
         /* threads shut down in phase 2 after all peers have been destroyed */
         while (kptllnd_data.kptl_shutdown < 2) {
 
                 did_something = 0;
 
-                if (!list_empty(&kptllnd_data.kptl_sched_rxq)) {
-                        rx = list_entry (kptllnd_data.kptl_sched_rxq.next,
-                                         kptl_rx_t, rx_list);
-                        list_del(&rx->rx_list);
-                        
-                        spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
-                                               flags);
+                if (!cfs_list_empty(&kptllnd_data.kptl_sched_rxq)) {
+                        rx = cfs_list_entry (kptllnd_data.kptl_sched_rxq.next,
+                                             kptl_rx_t, rx_list);
+                        cfs_list_del(&rx->rx_list);
+
+                        cfs_spin_unlock_irqrestore(&kptllnd_data. \
+                                                   kptl_sched_lock,
+                                                   flags);
 
                         kptllnd_rx_parse(rx);
                         did_something = 1;
 
-                        spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
+                        cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
+                                              flags);
                 }
 
-                if (!list_empty(&kptllnd_data.kptl_sched_rxbq)) {
-                        rxb = list_entry (kptllnd_data.kptl_sched_rxbq.next,
-                                          kptl_rx_buffer_t, rxb_repost_list);
-                        list_del(&rxb->rxb_repost_list);
+                if (!cfs_list_empty(&kptllnd_data.kptl_sched_rxbq)) {
+                        rxb = cfs_list_entry (kptllnd_data.kptl_sched_rxbq.next,
+                                              kptl_rx_buffer_t,
+                                              rxb_repost_list);
+                        cfs_list_del(&rxb->rxb_repost_list);
 
-                        spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
-                                               flags);
+                        cfs_spin_unlock_irqrestore(&kptllnd_data. \
+                                                   kptl_sched_lock,
+                                                   flags);
 
                         kptllnd_rx_buffer_post(rxb);
                         did_something = 1;
 
-                        spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
+                        cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
+                                              flags);
                 }
 
-                if (!list_empty(&kptllnd_data.kptl_sched_txq)) {
-                        tx = list_entry (kptllnd_data.kptl_sched_txq.next,
-                                         kptl_tx_t, tx_list);
-                        list_del_init(&tx->tx_list);
+                if (!cfs_list_empty(&kptllnd_data.kptl_sched_txq)) {
+                        tx = cfs_list_entry (kptllnd_data.kptl_sched_txq.next,
+                                             kptl_tx_t, tx_list);
+                        cfs_list_del_init(&tx->tx_list);
 
-                        spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
+                        cfs_spin_unlock_irqrestore(&kptllnd_data. \
+                                                   kptl_sched_lock, flags);
 
                         kptllnd_tx_fini(tx);
                         did_something = 1;
 
-                        spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
+                        cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
+                                              flags);
                 }
 
                 if (did_something) {
@@ -830,25 +837,26 @@ kptllnd_scheduler (void *arg)
                                 continue;
                 }
 
-                set_current_state(TASK_INTERRUPTIBLE);
-                add_wait_queue_exclusive(&kptllnd_data.kptl_sched_waitq,
-                                         &waitlink);
-                spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
+                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+                cfs_waitq_add_exclusive(&kptllnd_data.kptl_sched_waitq,
+                                        &waitlink);
+                cfs_spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
+                                           flags);
 
                 if (!did_something)
-                        schedule(); 
+                        cfs_waitq_wait(&waitlink, CFS_TASK_INTERRUPTIBLE);
                 else
-                        cond_resched();
+                        cfs_cond_resched();
 
-                set_current_state(TASK_RUNNING);
-                remove_wait_queue(&kptllnd_data.kptl_sched_waitq, &waitlink);
+                cfs_set_current_state(CFS_TASK_RUNNING);
+                cfs_waitq_del(&kptllnd_data.kptl_sched_waitq, &waitlink);
 
-                spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
+                cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
 
                 counter = 0;
         }
 
-        spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
+        cfs_spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
 
         kptllnd_thread_fini();
         return 0;
index 62ff41f..eacbe19 100644 (file)
 #include <libcfs/list.h>
 
 static int
-kptllnd_count_queue(struct list_head *q)
+kptllnd_count_queue(cfs_list_t *q)
 {
-        struct list_head *e;
-        int               n = 0;
-        
-        list_for_each(e, q) {
+        cfs_list_t *e;
+        int         n = 0;
+
+        cfs_list_for_each(e, q) {
                 n++;
         }
 
@@ -56,37 +56,37 @@ kptllnd_count_queue(struct list_head *q)
 }
 
 int
-kptllnd_get_peer_info(int index, 
+kptllnd_get_peer_info(int index,
                       lnet_process_id_t *id,
                       int *state, int *sent_hello,
                       int *refcount, __u64 *incarnation,
                       __u64 *next_matchbits, __u64 *last_matchbits_seen,
                       int *nsendq, int *nactiveq,
-                      int *credits, int *outstanding_credits) 
+                      int *credits, int *outstanding_credits)
 {
-        rwlock_t         *g_lock = &kptllnd_data.kptl_peer_rw_lock;
+        cfs_rwlock_t     *g_lock = &kptllnd_data.kptl_peer_rw_lock;
         unsigned long     flags;
-        struct list_head *ptmp;
+        cfs_list_t       *ptmp;
         kptl_peer_t      *peer;
         int               i;
         int               rc = -ENOENT;
 
-        read_lock_irqsave(g_lock, flags);
+        cfs_read_lock_irqsave(g_lock, flags);
 
         for (i = 0; i < kptllnd_data.kptl_peer_hash_size; i++) {
-                list_for_each (ptmp, &kptllnd_data.kptl_peers[i]) {
-                        peer = list_entry(ptmp, kptl_peer_t, peer_list);
+                cfs_list_for_each (ptmp, &kptllnd_data.kptl_peers[i]) {
+                        peer = cfs_list_entry(ptmp, kptl_peer_t, peer_list);
 
                         if (index-- > 0)
                                 continue;
-                        
+
                         *id          = peer->peer_id;
                         *state       = peer->peer_state;
                         *sent_hello  = peer->peer_sent_hello;
-                        *refcount    = atomic_read(&peer->peer_refcount);
+                        *refcount    = cfs_atomic_read(&peer->peer_refcount);
                         *incarnation = peer->peer_incarnation;
 
-                        spin_lock(&peer->peer_lock);
+                        cfs_spin_lock(&peer->peer_lock);
 
                         *next_matchbits      = peer->peer_next_matchbits;
                         *last_matchbits_seen = peer->peer_last_matchbits_seen;
@@ -96,15 +96,15 @@ kptllnd_get_peer_info(int index,
                         *nsendq   = kptllnd_count_queue(&peer->peer_sendq);
                         *nactiveq = kptllnd_count_queue(&peer->peer_activeq);
 
-                        spin_unlock(&peer->peer_lock);
+                        cfs_spin_unlock(&peer->peer_lock);
 
                         rc = 0;
                         goto out;
                 }
         }
-        
+
  out:
-        read_unlock_irqrestore(g_lock, flags);
+        cfs_read_unlock_irqrestore(g_lock, flags);
         return rc;
 }
 
@@ -116,13 +116,13 @@ kptllnd_peer_add_peertable_locked (kptl_peer_t *peer)
 
         LASSERT (peer->peer_state == PEER_STATE_WAITING_HELLO ||
                  peer->peer_state == PEER_STATE_ACTIVE);
-        
+
         kptllnd_data.kptl_n_active_peers++;
-        atomic_inc(&peer->peer_refcount);       /* +1 ref for the list */
+        cfs_atomic_inc(&peer->peer_refcount);       /* +1 ref for the list */
 
         /* NB add to HEAD of peer list for MRU order!
          * (see kptllnd_cull_peertable) */
-        list_add(&peer->peer_list, kptllnd_nid2peerlist(peer->peer_id.nid));
+        cfs_list_add(&peer->peer_list, kptllnd_nid2peerlist(peer->peer_id.nid));
 }
 
 void
@@ -131,18 +131,18 @@ kptllnd_cull_peertable_locked (lnet_process_id_t pid)
         /* I'm about to add a new peer with this portals ID to the peer table,
          * so (a) this peer should not exist already and (b) I want to leave at
          * most (max_procs_per_nid - 1) peers with this NID in the table. */
-        struct list_head  *peers = kptllnd_nid2peerlist(pid.nid);
-        int                cull_count = *kptllnd_tunables.kptl_max_procs_per_node;
-        int                count;
-        struct list_head  *tmp;
-        struct list_head  *nxt;
-        kptl_peer_t       *peer;
-        
+        cfs_list_t   *peers = kptllnd_nid2peerlist(pid.nid);
+        int           cull_count = *kptllnd_tunables.kptl_max_procs_per_node;
+        int           count;
+        cfs_list_t   *tmp;
+        cfs_list_t   *nxt;
+        kptl_peer_t  *peer;
+
         count = 0;
-        list_for_each_safe (tmp, nxt, peers) {
+        cfs_list_for_each_safe (tmp, nxt, peers) {
                 /* NB I rely on kptllnd_peer_add_peertable_locked to add peers
                  * in MRU order */
-                peer = list_entry(tmp, kptl_peer_t, peer_list);
+                peer = cfs_list_entry(tmp, kptl_peer_t, peer_list);
                         
                 if (LNET_NIDADDR(peer->peer_id.nid) != LNET_NIDADDR(pid.nid))
                         continue;
@@ -178,10 +178,10 @@ kptllnd_peer_allocate (kptl_net_t *net, lnet_process_id_t lpid, ptl_process_id_t
 
         memset(peer, 0, sizeof(*peer));         /* zero flags etc */
 
-        INIT_LIST_HEAD (&peer->peer_noops);
-        INIT_LIST_HEAD (&peer->peer_sendq);
-        INIT_LIST_HEAD (&peer->peer_activeq);
-        spin_lock_init (&peer->peer_lock);
+        CFS_INIT_LIST_HEAD (&peer->peer_noops);
+        CFS_INIT_LIST_HEAD (&peer->peer_sendq);
+        CFS_INIT_LIST_HEAD (&peer->peer_activeq);
+        cfs_spin_lock_init (&peer->peer_lock);
 
         peer->peer_state = PEER_STATE_ALLOCATED;
         peer->peer_error = 0;
@@ -194,22 +194,23 @@ kptllnd_peer_allocate (kptl_net_t *net, lnet_process_id_t lpid, ptl_process_id_t
         peer->peer_sent_credits = 1;           /* HELLO credit is implicit */
         peer->peer_max_msg_size = PTLLND_MIN_BUFFER_SIZE; /* until we know better */
 
-        atomic_set(&peer->peer_refcount, 1);    /* 1 ref for caller */
+        cfs_atomic_set(&peer->peer_refcount, 1);    /* 1 ref for caller */
 
-        write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+        cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
 
         peer->peer_myincarnation = kptllnd_data.kptl_incarnation;
 
         /* Only increase # peers under lock, to guarantee we dont grow it
          * during shutdown */
         if (net->net_shutdown) {
-                write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+                cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+                                            flags);
                 LIBCFS_FREE(peer, sizeof(*peer));
                 return NULL;
         }
 
         kptllnd_data.kptl_npeers++;
-        write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+        cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
         return peer;
 }
 
@@ -217,41 +218,41 @@ void
 kptllnd_peer_destroy (kptl_peer_t *peer)
 {
         unsigned long flags;
-        
+
         CDEBUG(D_NET, "Peer=%p\n", peer);
 
-        LASSERT (!in_interrupt());
-        LASSERT (atomic_read(&peer->peer_refcount) == 0);
+        LASSERT (!cfs_in_interrupt());
+        LASSERT (cfs_atomic_read(&peer->peer_refcount) == 0);
         LASSERT (peer->peer_state == PEER_STATE_ALLOCATED ||
                  peer->peer_state == PEER_STATE_ZOMBIE);
-        LASSERT (list_empty(&peer->peer_noops));
-        LASSERT (list_empty(&peer->peer_sendq));
-        LASSERT (list_empty(&peer->peer_activeq));
+        LASSERT (cfs_list_empty(&peer->peer_noops));
+        LASSERT (cfs_list_empty(&peer->peer_sendq));
+        LASSERT (cfs_list_empty(&peer->peer_activeq));
 
-        write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+        cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
 
         if (peer->peer_state == PEER_STATE_ZOMBIE)
-                list_del(&peer->peer_list);
+                cfs_list_del(&peer->peer_list);
 
         kptllnd_data.kptl_npeers--;
 
-        write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+        cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
 
         LIBCFS_FREE (peer, sizeof (*peer));
 }
 
 void
-kptllnd_cancel_txlist (struct list_head *peerq, struct list_head *txs)
+kptllnd_cancel_txlist (cfs_list_t *peerq, cfs_list_t *txs)
 {
-        struct list_head  *tmp;
-        struct list_head  *nxt;
-        kptl_tx_t         *tx;
+        cfs_list_t  *tmp;
+        cfs_list_t  *nxt;
+        kptl_tx_t   *tx;
 
-        list_for_each_safe (tmp, nxt, peerq) {
-                tx = list_entry(tmp, kptl_tx_t, tx_list);
+        cfs_list_for_each_safe (tmp, nxt, peerq) {
+                tx = cfs_list_entry(tmp, kptl_tx_t, tx_list);
 
-                list_del(&tx->tx_list);
-                list_add_tail(&tx->tx_list, txs);
+                cfs_list_del(&tx->tx_list);
+                cfs_list_add_tail(&tx->tx_list, txs);
 
                 tx->tx_status = -EIO;
                 tx->tx_active = 0;
@@ -259,17 +260,17 @@ kptllnd_cancel_txlist (struct list_head *peerq, struct list_head *txs)
 }
 
 void
-kptllnd_peer_cancel_txs(kptl_peer_t *peer, struct list_head *txs)
+kptllnd_peer_cancel_txs(kptl_peer_t *peer, cfs_list_t *txs)
 {
         unsigned long   flags;
 
-        spin_lock_irqsave(&peer->peer_lock, flags);
+        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
 
         kptllnd_cancel_txlist(&peer->peer_noops, txs);
         kptllnd_cancel_txlist(&peer->peer_sendq, txs);
         kptllnd_cancel_txlist(&peer->peer_activeq, txs);
                 
-        spin_unlock_irqrestore(&peer->peer_lock, flags);
+        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
 }
 
 void
@@ -277,7 +278,7 @@ kptllnd_peer_alive (kptl_peer_t *peer)
 {
         /* This is racy, but everyone's only writing cfs_time_current() */
         peer->peer_last_alive = cfs_time_current();
-        mb();
+        cfs_mb();
 }
 
 void
@@ -290,24 +291,24 @@ kptllnd_peer_notify (kptl_peer_t *peer)
         int           nnets = 0;
         int           error = 0;
         cfs_time_t    last_alive = 0;
-        
-        spin_lock_irqsave(&peer->peer_lock, flags);
+
+        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
 
         if (peer->peer_error != 0) {
                 error = peer->peer_error;
                 peer->peer_error = 0;
                 last_alive = peer->peer_last_alive;
         }
-        
-        spin_unlock_irqrestore(&peer->peer_lock, flags);
+
+        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
 
         if (error == 0)
                 return;
 
-        read_lock(&kptllnd_data.kptl_net_rw_lock);
-        list_for_each_entry (net, &kptllnd_data.kptl_nets, net_list)
+        cfs_read_lock(&kptllnd_data.kptl_net_rw_lock);
+        cfs_list_for_each_entry (net, &kptllnd_data.kptl_nets, net_list)
                 nnets++;
-        read_unlock(&kptllnd_data.kptl_net_rw_lock);
+        cfs_read_unlock(&kptllnd_data.kptl_net_rw_lock);
 
         if (nnets == 0) /* shutdown in progress */
                 return;
@@ -319,15 +320,15 @@ kptllnd_peer_notify (kptl_peer_t *peer)
         }
         memset(nets, 0, nnets * sizeof(*nets));
 
-        read_lock(&kptllnd_data.kptl_net_rw_lock);
+        cfs_read_lock(&kptllnd_data.kptl_net_rw_lock);
         i = 0;
-        list_for_each_entry (net, &kptllnd_data.kptl_nets, net_list) {
+        cfs_list_for_each_entry (net, &kptllnd_data.kptl_nets, net_list) {
                 LASSERT (i < nnets);
                 nets[i] = net;
                 kptllnd_net_addref(net);
                 i++;
         }
-        read_unlock(&kptllnd_data.kptl_net_rw_lock);
+        cfs_read_unlock(&kptllnd_data.kptl_net_rw_lock);
 
         for (i = 0; i < nnets; i++) {
                 lnet_nid_t peer_nid;
@@ -352,32 +353,32 @@ void
 kptllnd_handle_closing_peers ()
 {
         unsigned long           flags;
-        struct list_head        txs;
+        cfs_list_t              txs;
         kptl_peer_t            *peer;
-        struct list_head       *tmp;
-        struct list_head       *nxt;
+        cfs_list_t             *tmp;
+        cfs_list_t             *nxt;
         kptl_tx_t              *tx;
         int                     idle;
 
         /* Check with a read lock first to avoid blocking anyone */
 
-        read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
-        idle = list_empty(&kptllnd_data.kptl_closing_peers) &&
-               list_empty(&kptllnd_data.kptl_zombie_peers);
-        read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+        cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+        idle = cfs_list_empty(&kptllnd_data.kptl_closing_peers) &&
+               cfs_list_empty(&kptllnd_data.kptl_zombie_peers);
+        cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
 
         if (idle)
                 return;
 
-        INIT_LIST_HEAD(&txs);
+        CFS_INIT_LIST_HEAD(&txs);
 
-        write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+        cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
 
         /* Cancel txs on all zombie peers.  NB anyone dropping the last peer
          * ref removes it from this list, so I musn't drop the lock while
          * scanning it. */
-        list_for_each (tmp, &kptllnd_data.kptl_zombie_peers) {
-                peer = list_entry (tmp, kptl_peer_t, peer_list);
+        cfs_list_for_each (tmp, &kptllnd_data.kptl_zombie_peers) {
+                peer = cfs_list_entry (tmp, kptl_peer_t, peer_list);
 
                 LASSERT (peer->peer_state == PEER_STATE_ZOMBIE);
 
@@ -388,33 +389,34 @@ kptllnd_handle_closing_peers ()
          * I'm the only one removing from this list, but peers can be added on
          * the end any time I drop the lock. */
 
-        list_for_each_safe (tmp, nxt, &kptllnd_data.kptl_closing_peers) {
-                peer = list_entry (tmp, kptl_peer_t, peer_list);
+        cfs_list_for_each_safe (tmp, nxt, &kptllnd_data.kptl_closing_peers) {
+                peer = cfs_list_entry (tmp, kptl_peer_t, peer_list);
 
                 LASSERT (peer->peer_state == PEER_STATE_CLOSING);
 
-                list_del(&peer->peer_list);
-                list_add_tail(&peer->peer_list,
-                              &kptllnd_data.kptl_zombie_peers);
+                cfs_list_del(&peer->peer_list);
+                cfs_list_add_tail(&peer->peer_list,
+                                  &kptllnd_data.kptl_zombie_peers);
                 peer->peer_state = PEER_STATE_ZOMBIE;
 
-                write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+                cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+                                            flags);
 
                 kptllnd_peer_notify(peer);
                 kptllnd_peer_cancel_txs(peer, &txs);
                 kptllnd_peer_decref(peer);
 
-                write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+                cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
         }
 
-        write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+        cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
 
         /* Drop peer's ref on all cancelled txs.  This will get
          * kptllnd_tx_fini() to abort outstanding comms if necessary. */
 
-        list_for_each_safe (tmp, nxt, &txs) {
-                tx = list_entry(tmp, kptl_tx_t, tx_list);
-                list_del(&tx->tx_list);
+        cfs_list_for_each_safe (tmp, nxt, &txs) {
+                tx = cfs_list_entry(tmp, kptl_tx_t, tx_list);
+                cfs_list_del(&tx->tx_list);
                 kptllnd_tx_decref(tx);
         }
 }
@@ -437,16 +439,16 @@ kptllnd_peer_close_locked(kptl_peer_t *peer, int why)
                 kptllnd_data.kptl_n_active_peers--;
                 LASSERT (kptllnd_data.kptl_n_active_peers >= 0);
 
-                list_del(&peer->peer_list);
+                cfs_list_del(&peer->peer_list);
                 kptllnd_peer_unreserve_buffers();
 
                 peer->peer_error = why; /* stash 'why' only on first close */
                 peer->peer_state = PEER_STATE_CLOSING;
 
                 /* Schedule for immediate attention, taking peer table's ref */
-                list_add_tail(&peer->peer_list, 
-                              &kptllnd_data.kptl_closing_peers);
-                wake_up(&kptllnd_data.kptl_watchdog_waitq);
+                cfs_list_add_tail(&peer->peer_list,
+                                 &kptllnd_data.kptl_closing_peers);
+                cfs_waitq_signal(&kptllnd_data.kptl_watchdog_waitq);
                 break;
 
         case PEER_STATE_ZOMBIE:
@@ -460,16 +462,16 @@ kptllnd_peer_close(kptl_peer_t *peer, int why)
 {
         unsigned long      flags;
 
-        write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+        cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
         kptllnd_peer_close_locked(peer, why);
-        write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+        cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
 }
 
 int
 kptllnd_peer_del(lnet_process_id_t id)
 {
-        struct list_head  *ptmp;
-        struct list_head  *pnxt;
+        cfs_list_t        *ptmp;
+        cfs_list_t        *pnxt;
         kptl_peer_t       *peer;
         int                lo;
         int                hi;
@@ -482,23 +484,24 @@ kptllnd_peer_del(lnet_process_id_t id)
          * wildcard (LNET_NID_ANY) then look at all of the buckets
          */
         if (id.nid != LNET_NID_ANY) {
-                struct list_head *l = kptllnd_nid2peerlist(id.nid);
-                
+                cfs_list_t *l = kptllnd_nid2peerlist(id.nid);
+
                 lo = hi =  l - kptllnd_data.kptl_peers;
         } else {
                 if (id.pid != LNET_PID_ANY)
                         return -EINVAL;
-                
+
                 lo = 0;
                 hi = kptllnd_data.kptl_peer_hash_size - 1;
         }
 
 again:
-        read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+        cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
 
         for (i = lo; i <= hi; i++) {
-                list_for_each_safe (ptmp, pnxt, &kptllnd_data.kptl_peers[i]) {
-                        peer = list_entry (ptmp, kptl_peer_t, peer_list);
+                cfs_list_for_each_safe (ptmp, pnxt,
+                                        &kptllnd_data.kptl_peers[i]) {
+                        peer = cfs_list_entry (ptmp, kptl_peer_t, peer_list);
 
                         if (!(id.nid == LNET_NID_ANY || 
                               (LNET_NIDADDR(peer->peer_id.nid) == LNET_NIDADDR(id.nid) &&
@@ -508,8 +511,9 @@ again:
 
                         kptllnd_peer_addref(peer); /* 1 ref for me... */
 
-                        read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
-                                               flags);
+                        cfs_read_unlock_irqrestore(&kptllnd_data. \
+                                                   kptl_peer_rw_lock,
+                                                   flags);
 
                         kptllnd_peer_close(peer, 0);
                         kptllnd_peer_decref(peer); /* ...until here */
@@ -521,7 +525,7 @@ again:
                 }
         }
 
-        read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+        cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
 
         return (rc);
 }
@@ -532,17 +536,17 @@ kptllnd_queue_tx(kptl_peer_t *peer, kptl_tx_t *tx)
         /* CAVEAT EMPTOR: I take over caller's ref on 'tx' */
         unsigned long flags;
 
-        spin_lock_irqsave(&peer->peer_lock, flags);
+        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
 
         /* Ensure HELLO is sent first */
         if (tx->tx_msg->ptlm_type == PTLLND_MSG_TYPE_NOOP)
-                list_add(&tx->tx_list, &peer->peer_noops);
+                cfs_list_add(&tx->tx_list, &peer->peer_noops);
         else if (tx->tx_msg->ptlm_type == PTLLND_MSG_TYPE_HELLO)
-                list_add(&tx->tx_list, &peer->peer_sendq);
+                cfs_list_add(&tx->tx_list, &peer->peer_sendq);
         else
-                list_add_tail(&tx->tx_list, &peer->peer_sendq);
+                cfs_list_add_tail(&tx->tx_list, &peer->peer_sendq);
 
-        spin_unlock_irqrestore(&peer->peer_lock, flags);
+        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
 }
 
 
@@ -596,7 +600,7 @@ kptllnd_post_tx(kptl_peer_t *peer, kptl_tx_t *tx, int nfrag)
         }
 
 
-        tx->tx_deadline = jiffies + (*kptllnd_tunables.kptl_timeout * HZ);
+        tx->tx_deadline = jiffies + (*kptllnd_tunables.kptl_timeout * CFS_HZ);
         tx->tx_active = 1;
         tx->tx_msg_mdh = msg_mdh;
         kptllnd_queue_tx(peer, tx);
@@ -604,24 +608,25 @@ kptllnd_post_tx(kptl_peer_t *peer, kptl_tx_t *tx, int nfrag)
 
 /* NB "restarts" comes from peer_sendq of a single peer */
 void
-kptllnd_restart_txs (kptl_net_t *net, lnet_process_id_t target, struct list_head *restarts)
+kptllnd_restart_txs (kptl_net_t *net, lnet_process_id_t target,
+                     cfs_list_t *restarts)
 {
         kptl_tx_t   *tx;
         kptl_tx_t   *tmp;
         kptl_peer_t *peer;
 
-        LASSERT (!list_empty(restarts));
+        LASSERT (!cfs_list_empty(restarts));
 
         if (kptllnd_find_target(net, target, &peer) != 0)
                 peer = NULL;
 
-        list_for_each_entry_safe (tx, tmp, restarts, tx_list) {
+        cfs_list_for_each_entry_safe (tx, tmp, restarts, tx_list) {
                 LASSERT (tx->tx_peer != NULL);
                 LASSERT (tx->tx_type == TX_TYPE_GET_REQUEST ||
                          tx->tx_type == TX_TYPE_PUT_REQUEST ||
                          tx->tx_type == TX_TYPE_SMALL_MESSAGE);
 
-                list_del_init(&tx->tx_list);
+                cfs_list_del_init(&tx->tx_list);
 
                 if (peer == NULL ||
                     tx->tx_msg->ptlm_type == PTLLND_MSG_TYPE_HELLO) {
@@ -650,12 +655,12 @@ kptllnd_peer_send_noop (kptl_peer_t *peer)
 {
         if (!peer->peer_sent_hello ||
             peer->peer_credits == 0 ||
-            !list_empty(&peer->peer_noops) ||
+            !cfs_list_empty(&peer->peer_noops) ||
             peer->peer_outstanding_credits < PTLLND_CREDIT_HIGHWATER)
                 return 0;
 
         /* No tx to piggyback NOOP onto or no credit to send a tx */
-        return (list_empty(&peer->peer_sendq) || peer->peer_credits == 1);
+        return (cfs_list_empty(&peer->peer_sendq) || peer->peer_credits == 1);
 }
 
 void
@@ -667,15 +672,15 @@ kptllnd_peer_check_sends (kptl_peer_t *peer)
         int              msg_type;
         unsigned long    flags;
 
-        LASSERT(!in_interrupt());
+        LASSERT(!cfs_in_interrupt());
 
-        spin_lock_irqsave(&peer->peer_lock, flags);
+        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
 
         peer->peer_retry_noop = 0;
 
         if (kptllnd_peer_send_noop(peer)) {
                 /* post a NOOP to return credits */
-                spin_unlock_irqrestore(&peer->peer_lock, flags);
+                cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
 
                 tx = kptllnd_get_idle_tx(TX_TYPE_SMALL_MESSAGE);
                 if (tx == NULL) {
@@ -687,18 +692,18 @@ kptllnd_peer_check_sends (kptl_peer_t *peer)
                         kptllnd_post_tx(peer, tx, 0);
                 }
 
-                spin_lock_irqsave(&peer->peer_lock, flags);
+                cfs_spin_lock_irqsave(&peer->peer_lock, flags);
                 peer->peer_retry_noop = (tx == NULL);
         }
 
         for (;;) {
-                if (!list_empty(&peer->peer_noops)) {
+                if (!cfs_list_empty(&peer->peer_noops)) {
                         LASSERT (peer->peer_sent_hello);
-                        tx = list_entry(peer->peer_noops.next,
-                                        kptl_tx_t, tx_list);
-                } else if (!list_empty(&peer->peer_sendq)) {
-                        tx = list_entry(peer->peer_sendq.next,
-                                        kptl_tx_t, tx_list);
+                        tx = cfs_list_entry(peer->peer_noops.next,
+                                            kptl_tx_t, tx_list);
+                } else if (!cfs_list_empty(&peer->peer_sendq)) {
+                        tx = cfs_list_entry(peer->peer_sendq.next,
+                                            kptl_tx_t, tx_list);
                 } else {
                         /* nothing to send right now */
                         break;
@@ -719,7 +724,7 @@ kptllnd_peer_check_sends (kptl_peer_t *peer)
 
                 /* Ensure HELLO is sent first */
                 if (!peer->peer_sent_hello) {
-                        LASSERT (list_empty(&peer->peer_noops));
+                        LASSERT (cfs_list_empty(&peer->peer_noops));
                         if (msg_type != PTLLND_MSG_TYPE_HELLO)
                                 break;
                         peer->peer_sent_hello = 1;
@@ -749,7 +754,7 @@ kptllnd_peer_check_sends (kptl_peer_t *peer)
                         break;
                 }
 
-                list_del(&tx->tx_list);
+                cfs_list_del(&tx->tx_list);
 
                 /* Discard any NOOP I queued if I'm not at the high-water mark
                  * any more or more messages have been queued */
@@ -757,13 +762,13 @@ kptllnd_peer_check_sends (kptl_peer_t *peer)
                     !kptllnd_peer_send_noop(peer)) {
                         tx->tx_active = 0;
 
-                        spin_unlock_irqrestore(&peer->peer_lock, flags);
+                        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
 
                         CDEBUG(D_NET, "%s: redundant noop\n", 
                                libcfs_id2str(peer->peer_id));
                         kptllnd_tx_decref(tx);
 
-                        spin_lock_irqsave(&peer->peer_lock, flags);
+                        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
                         continue;
                 }
 
@@ -790,11 +795,11 @@ kptllnd_peer_check_sends (kptl_peer_t *peer)
                        kptllnd_msgtype2str(msg_type), tx, tx->tx_msg->ptlm_nob,
                        tx->tx_msg->ptlm_credits);
 
-                list_add_tail(&tx->tx_list, &peer->peer_activeq);
+                cfs_list_add_tail(&tx->tx_list, &peer->peer_activeq);
 
                 kptllnd_tx_addref(tx);          /* 1 ref for me... */
 
-                spin_unlock_irqrestore(&peer->peer_lock, flags);
+                cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
 
                 if (tx->tx_type == TX_TYPE_PUT_REQUEST ||
                     tx->tx_type == TX_TYPE_GET_REQUEST) {
@@ -850,10 +855,10 @@ kptllnd_peer_check_sends (kptl_peer_t *peer)
 
                 kptllnd_tx_decref(tx);          /* drop my ref */
 
-                spin_lock_irqsave(&peer->peer_lock, flags);
+                cfs_spin_lock_irqsave(&peer->peer_lock, flags);
         }
 
-        spin_unlock_irqrestore(&peer->peer_lock, flags);
+        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
         return;
 
  failed:
@@ -867,21 +872,21 @@ kptl_tx_t *
 kptllnd_find_timed_out_tx(kptl_peer_t *peer)
 {
         kptl_tx_t         *tx;
-        struct list_head  *ele;
+        cfs_list_t        *ele;
 
-        list_for_each(ele, &peer->peer_sendq) {
-                tx = list_entry(ele, kptl_tx_t, tx_list);
+        cfs_list_for_each(ele, &peer->peer_sendq) {
+                tx = cfs_list_entry(ele, kptl_tx_t, tx_list);
 
-                if (time_after_eq(jiffies, tx->tx_deadline)) {
+                if (cfs_time_aftereq(jiffies, tx->tx_deadline)) {
                         kptllnd_tx_addref(tx);
                         return tx;
                 }
         }
 
-        list_for_each(ele, &peer->peer_activeq) {
-                tx = list_entry(ele, kptl_tx_t, tx_list);
+        cfs_list_for_each(ele, &peer->peer_activeq) {
+                tx = cfs_list_entry(ele, kptl_tx_t, tx_list);
 
-                if (time_after_eq(jiffies, tx->tx_deadline)) {
+                if (cfs_time_aftereq(jiffies, tx->tx_deadline)) {
                         kptllnd_tx_addref(tx);
                         return tx;
                 }
@@ -894,7 +899,7 @@ kptllnd_find_timed_out_tx(kptl_peer_t *peer)
 void
 kptllnd_peer_check_bucket (int idx, int stamp)
 {
-        struct list_head  *peers = &kptllnd_data.kptl_peers[idx];
+        cfs_list_t        *peers = &kptllnd_data.kptl_peers[idx];
         kptl_peer_t       *peer;
         unsigned long      flags;
 
@@ -902,9 +907,9 @@ kptllnd_peer_check_bucket (int idx, int stamp)
 
  again:
         /* NB. Shared lock while I just look */
-        read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+        cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
 
-        list_for_each_entry (peer, peers, peer_list) {
+        cfs_list_for_each_entry (peer, peers, peer_list) {
                 kptl_tx_t *tx;
                 int        check_sends;
                 int        c = -1, oc = -1, sc = -1;
@@ -915,18 +920,18 @@ kptllnd_peer_check_bucket (int idx, int stamp)
                        libcfs_id2str(peer->peer_id), peer->peer_credits, 
                        peer->peer_outstanding_credits, peer->peer_sent_credits);
 
-                spin_lock(&peer->peer_lock);
+                cfs_spin_lock(&peer->peer_lock);
 
                 if (peer->peer_check_stamp == stamp) {
                         /* checked already this pass */
-                        spin_unlock(&peer->peer_lock);
+                        cfs_spin_unlock(&peer->peer_lock);
                         continue;
                 }
 
                 peer->peer_check_stamp = stamp;
                 tx = kptllnd_find_timed_out_tx(peer);
                 check_sends = peer->peer_retry_noop;
-                
+
                 if (tx != NULL) {
                         c  = peer->peer_credits;
                         sc = peer->peer_sent_credits;
@@ -937,14 +942,15 @@ kptllnd_peer_check_bucket (int idx, int stamp)
                         nactive = kptllnd_count_queue(&peer->peer_activeq);
                 }
 
-                spin_unlock(&peer->peer_lock);
-                
+                cfs_spin_unlock(&peer->peer_lock);
+
                 if (tx == NULL && !check_sends)
                         continue;
 
                 kptllnd_peer_addref(peer); /* 1 ref for me... */
 
-                read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+                cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+                                           flags);
 
                 if (tx == NULL) { /* nothing timed out */
                         kptllnd_peer_check_sends(peer);
@@ -956,8 +962,8 @@ kptllnd_peer_check_bucket (int idx, int stamp)
 
                 LCONSOLE_ERROR_MSG(0x126, "Timing out %s: %s\n",
                                    libcfs_id2str(peer->peer_id),
-                                   (tx->tx_tposted == 0) ? 
-                                   "no free peer buffers" : 
+                                   (tx->tx_tposted == 0) ?
+                                   "no free peer buffers" :
                                    "please check Portals");
 
                if (tx->tx_tposted) {
@@ -1009,18 +1015,18 @@ kptllnd_peer_check_bucket (int idx, int stamp)
                 goto again;
         }
 
-        read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+        cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
 }
 
 kptl_peer_t *
 kptllnd_id2peer_locked (lnet_process_id_t id)
 {
-        struct list_head *peers = kptllnd_nid2peerlist(id.nid);
-        struct list_head *tmp;
+        cfs_list_t       *peers = kptllnd_nid2peerlist(id.nid);
+        cfs_list_t       *tmp;
         kptl_peer_t      *peer;
 
-        list_for_each (tmp, peers) {
-                peer = list_entry (tmp, kptl_peer_t, peer_list);
+        cfs_list_for_each (tmp, peers) {
+                peer = cfs_list_entry (tmp, kptl_peer_t, peer_list);
 
                 LASSERT(peer->peer_state == PEER_STATE_WAITING_HELLO ||
                         peer->peer_state == PEER_STATE_ACTIVE);
@@ -1035,7 +1041,7 @@ kptllnd_id2peer_locked (lnet_process_id_t id)
                 CDEBUG(D_NET, "%s -> %s (%d)\n",
                        libcfs_id2str(id),
                        kptllnd_ptlid2str(peer->peer_ptlid),
-                       atomic_read (&peer->peer_refcount));
+                       cfs_atomic_read (&peer->peer_refcount));
                 return peer;
         }
 
@@ -1056,8 +1062,8 @@ kptllnd_peertable_overflow_msg(char *str, lnet_process_id_t id)
 __u64
 kptllnd_get_last_seen_matchbits_locked(lnet_process_id_t lpid)
 {
-        kptl_peer_t            *peer;
-        struct list_head       *tmp;
+        kptl_peer_t  *peer;
+        cfs_list_t   *tmp;
 
         /* Find the last matchbits I saw this new peer using.  Note..
            A. This peer cannot be in the peer table - she's new!
@@ -1072,16 +1078,16 @@ kptllnd_get_last_seen_matchbits_locked(lnet_process_id_t lpid)
         /* peer's last matchbits can't change after it comes out of the peer
          * table, so first match is fine */
 
-        list_for_each (tmp, &kptllnd_data.kptl_closing_peers) {
-                peer = list_entry (tmp, kptl_peer_t, peer_list);
+        cfs_list_for_each (tmp, &kptllnd_data.kptl_closing_peers) {
+                peer = cfs_list_entry (tmp, kptl_peer_t, peer_list);
 
                 if (LNET_NIDADDR(peer->peer_id.nid) == LNET_NIDADDR(lpid.nid) &&
                     peer->peer_id.pid == lpid.pid)
                         return peer->peer_last_matchbits_seen;
         }
 
-        list_for_each (tmp, &kptllnd_data.kptl_zombie_peers) {
-                peer = list_entry (tmp, kptl_peer_t, peer_list);
+        cfs_list_for_each (tmp, &kptllnd_data.kptl_zombie_peers) {
+                peer = cfs_list_entry (tmp, kptl_peer_t, peer_list);
 
                 if (LNET_NIDADDR(peer->peer_id.nid) == LNET_NIDADDR(lpid.nid) &&
                     peer->peer_id.pid == lpid.pid)
@@ -1095,7 +1101,7 @@ kptl_peer_t *
 kptllnd_peer_handle_hello (kptl_net_t *net,
                            ptl_process_id_t initiator, kptl_msg_t *msg)
 {
-        rwlock_t           *g_lock = &kptllnd_data.kptl_peer_rw_lock;
+        cfs_rwlock_t       *g_lock = &kptllnd_data.kptl_peer_rw_lock;
         kptl_peer_t        *peer;
         kptl_peer_t        *new_peer;
         lnet_process_id_t   lpid;
@@ -1146,7 +1152,7 @@ kptllnd_peer_handle_hello (kptl_net_t *net,
                 return NULL;
         }
         
-        write_lock_irqsave(g_lock, flags);
+        cfs_write_lock_irqsave(g_lock, flags);
 
         peer = kptllnd_id2peer_locked(lpid);
         if (peer != NULL) {
@@ -1156,7 +1162,7 @@ kptllnd_peer_handle_hello (kptl_net_t *net,
 
                         if (msg->ptlm_dststamp != 0 &&
                             msg->ptlm_dststamp != peer->peer_myincarnation) {
-                                write_unlock_irqrestore(g_lock, flags);
+                                cfs_write_unlock_irqrestore(g_lock, flags);
 
                                 CERROR("Ignoring HELLO from %s: unexpected "
                                        "dststamp "LPX64" ("LPX64" wanted)\n",
@@ -1174,13 +1180,13 @@ kptllnd_peer_handle_hello (kptl_net_t *net,
                         peer->peer_max_msg_size =
                                 msg->ptlm_u.hello.kptlhm_max_msg_size;
                         
-                        write_unlock_irqrestore(g_lock, flags);
+                        cfs_write_unlock_irqrestore(g_lock, flags);
                         return peer;
                 }
 
                 if (msg->ptlm_dststamp != 0 &&
                     msg->ptlm_dststamp <= peer->peer_myincarnation) {
-                        write_unlock_irqrestore(g_lock, flags);
+                        cfs_write_unlock_irqrestore(g_lock, flags);
 
                         CERROR("Ignoring stale HELLO from %s: "
                                "dststamp "LPX64" (current "LPX64")\n",
@@ -1197,7 +1203,7 @@ kptllnd_peer_handle_hello (kptl_net_t *net,
 
         kptllnd_cull_peertable_locked(lpid);
 
-        write_unlock_irqrestore(g_lock, flags);
+        cfs_write_unlock_irqrestore(g_lock, flags);
 
         if (peer != NULL) {
                 CDEBUG(D_NET, "Peer %s (%s) reconnecting:"
@@ -1235,11 +1241,11 @@ kptllnd_peer_handle_hello (kptl_net_t *net,
                 return NULL;
         }
 
-        write_lock_irqsave(g_lock, flags);
+        cfs_write_lock_irqsave(g_lock, flags);
 
  again:
         if (net->net_shutdown) {
-                write_unlock_irqrestore(g_lock, flags);
+                cfs_write_unlock_irqrestore(g_lock, flags);
 
                 CERROR ("Shutdown started, refusing connection from %s\n",
                         libcfs_id2str(lpid));
@@ -1261,14 +1267,14 @@ kptllnd_peer_handle_hello (kptl_net_t *net,
                         peer->peer_max_msg_size =
                                 msg->ptlm_u.hello.kptlhm_max_msg_size;
 
-                        write_unlock_irqrestore(g_lock, flags);
+                        cfs_write_unlock_irqrestore(g_lock, flags);
 
                         CWARN("Outgoing instantiated peer %s\n",
                               libcfs_id2str(lpid));
                } else {
                        LASSERT (peer->peer_state == PEER_STATE_ACTIVE);
 
-                        write_unlock_irqrestore(g_lock, flags);
+                        cfs_write_unlock_irqrestore(g_lock, flags);
 
                        /* WOW!  Somehow this peer completed the HELLO
                         * handshake while I slept.  I guess I could have slept
@@ -1288,7 +1294,7 @@ kptllnd_peer_handle_hello (kptl_net_t *net,
         if (kptllnd_data.kptl_n_active_peers ==
             kptllnd_data.kptl_expected_peers) {
                 /* peer table full */
-                write_unlock_irqrestore(g_lock, flags);
+                cfs_write_unlock_irqrestore(g_lock, flags);
 
                 kptllnd_peertable_overflow_msg("Connection from ", lpid);
 
@@ -1302,7 +1308,7 @@ kptllnd_peer_handle_hello (kptl_net_t *net,
                         return NULL;
                 }
                 
-                write_lock_irqsave(g_lock, flags);
+                cfs_write_lock_irqsave(g_lock, flags);
                 kptllnd_data.kptl_expected_peers++;
                 goto again;
         }
@@ -1322,7 +1328,7 @@ kptllnd_peer_handle_hello (kptl_net_t *net,
         LASSERT (!net->net_shutdown);
         kptllnd_peer_add_peertable_locked(new_peer);
 
-        write_unlock_irqrestore(g_lock, flags);
+        cfs_write_unlock_irqrestore(g_lock, flags);
 
        /* NB someone else could get in now and post a message before I post
         * the HELLO, but post_tx/check_sends take care of that! */
@@ -1347,7 +1353,7 @@ int
 kptllnd_find_target(kptl_net_t *net, lnet_process_id_t target,
                     kptl_peer_t **peerp)
 {
-        rwlock_t         *g_lock = &kptllnd_data.kptl_peer_rw_lock;
+        cfs_rwlock_t     *g_lock = &kptllnd_data.kptl_peer_rw_lock;
         ptl_process_id_t  ptl_id;
         kptl_peer_t      *new_peer;
         kptl_tx_t        *hello_tx;
@@ -1356,13 +1362,13 @@ kptllnd_find_target(kptl_net_t *net, lnet_process_id_t target,
         __u64             last_matchbits_seen;
 
         /* I expect to find the peer, so I only take a read lock... */
-        read_lock_irqsave(g_lock, flags);
+        cfs_read_lock_irqsave(g_lock, flags);
         *peerp = kptllnd_id2peer_locked(target);
-        read_unlock_irqrestore(g_lock, flags);
+        cfs_read_unlock_irqrestore(g_lock, flags);
 
         if (*peerp != NULL)
                 return 0;
-        
+
         if ((target.pid & LNET_PID_USERFLAG) != 0) {
                 CWARN("Refusing to create a new connection to %s "
                       "(non-kernel peer)\n", libcfs_id2str(target));
@@ -1395,14 +1401,14 @@ kptllnd_find_target(kptl_net_t *net, lnet_process_id_t target,
         if (rc != 0)
                 goto unwind_1;
 
-        write_lock_irqsave(g_lock, flags);
+        cfs_write_lock_irqsave(g_lock, flags);
  again:
         /* Called only in lnd_send which can't happen after lnd_shutdown */
         LASSERT (!net->net_shutdown);
 
         *peerp = kptllnd_id2peer_locked(target);
         if (*peerp != NULL) {
-                write_unlock_irqrestore(g_lock, flags);
+                cfs_write_unlock_irqrestore(g_lock, flags);
                 goto unwind_2;
         }
 
@@ -1411,7 +1417,7 @@ kptllnd_find_target(kptl_net_t *net, lnet_process_id_t target,
         if (kptllnd_data.kptl_n_active_peers ==
             kptllnd_data.kptl_expected_peers) {
                 /* peer table full */
-                write_unlock_irqrestore(g_lock, flags);
+                cfs_write_unlock_irqrestore(g_lock, flags);
 
                 kptllnd_peertable_overflow_msg("Connection to ", target);
 
@@ -1422,7 +1428,7 @@ kptllnd_find_target(kptl_net_t *net, lnet_process_id_t target,
                         rc = -ENOMEM;
                         goto unwind_2;
                 }
-                write_lock_irqsave(g_lock, flags);
+                cfs_write_lock_irqsave(g_lock, flags);
                 kptllnd_data.kptl_expected_peers++;
                 goto again;
         }
@@ -1438,7 +1444,7 @@ kptllnd_find_target(kptl_net_t *net, lnet_process_id_t target,
         
         kptllnd_peer_add_peertable_locked(new_peer);
 
-        write_unlock_irqrestore(g_lock, flags);
+        cfs_write_unlock_irqrestore(g_lock, flags);
 
        /* NB someone else could get in now and post a message before I post
         * the HELLO, but post_tx/check_sends take care of that! */
index e85ab5a..bf226bb 100644 (file)
@@ -37,7 +37,7 @@
 #include "ptllnd.h"
 
 #ifdef CRAY_XT3
-static struct semaphore   ptltrace_mutex;
+static cfs_semaphore_t    ptltrace_mutex;
 static cfs_waitq_t        ptltrace_debug_ctlwq;
 
 void
@@ -139,7 +139,7 @@ kptllnd_dump_ptltrace_thread(void *arg)
         libcfs_daemonize("kpt_ptltrace_dump");
 
         /* serialise with other instances of me */
-        mutex_down(&ptltrace_mutex);
+        cfs_mutex_down(&ptltrace_mutex);
 
         snprintf(fname, sizeof(fname), "%s.%ld.%ld",
                  *kptllnd_tunables.kptl_ptltrace_basename,
@@ -147,7 +147,7 @@ kptllnd_dump_ptltrace_thread(void *arg)
 
         kptllnd_ptltrace_to_file(fname);
 
-        mutex_up(&ptltrace_mutex);
+        cfs_mutex_up(&ptltrace_mutex);
 
         /* unblock my creator */
         cfs_waitq_signal(&ptltrace_debug_ctlwq);
@@ -157,13 +157,13 @@ kptllnd_dump_ptltrace_thread(void *arg)
 void
 kptllnd_dump_ptltrace(void)
 {
-        int            rc;     
+        int            rc;
         cfs_waitlink_t wait;
         ENTRY;
 
         /* taken from libcfs_debug_dumplog */
         cfs_waitlink_init(&wait);
-        set_current_state(TASK_INTERRUPTIBLE);
+        cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
         cfs_waitq_add(&ptltrace_debug_ctlwq, &wait);
 
         rc = cfs_kernel_thread(kptllnd_dump_ptltrace_thread,
@@ -175,9 +175,9 @@ kptllnd_dump_ptltrace(void)
                 cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
         }
 
-        /* teardown if kernel_thread() failed */
+        /* teardown if cfs_kernel_thread() failed */
         cfs_waitq_del(&ptltrace_debug_ctlwq, &wait);
-        set_current_state(TASK_RUNNING);
+        cfs_set_current_state(CFS_TASK_RUNNING);
         EXIT;
 }
 
@@ -185,6 +185,6 @@ void
 kptllnd_init_ptltrace(void)
 {
         cfs_waitq_init(&ptltrace_debug_ctlwq);
-        init_mutex(&ptltrace_mutex);
+        cfs_init_mutex(&ptltrace_mutex);
 }
 #endif
index cacd125..535fe86 100644 (file)
@@ -44,8 +44,8 @@ void
 kptllnd_rx_buffer_pool_init(kptl_rx_buffer_pool_t *rxbp)
 {
         memset(rxbp, 0, sizeof(*rxbp));
-        spin_lock_init(&rxbp->rxbp_lock);
-        INIT_LIST_HEAD(&rxbp->rxbp_list);
+        cfs_spin_lock_init(&rxbp->rxbp_lock);
+        CFS_INIT_LIST_HEAD(&rxbp->rxbp_list);
 }
 
 void
@@ -58,7 +58,7 @@ kptllnd_rx_buffer_destroy(kptl_rx_buffer_t *rxb)
         LASSERT(!rxb->rxb_posted);
         LASSERT(rxb->rxb_idle);
 
-        list_del(&rxb->rxb_list);
+        cfs_list_del(&rxb->rxb_list);
         rxbp->rxbp_count--;
 
         LIBCFS_FREE(rxb->rxb_buffer, kptllnd_rx_buffer_size());
@@ -80,7 +80,7 @@ kptllnd_rx_buffer_pool_reserve(kptl_rx_buffer_pool_t *rxbp, int count)
 
         CDEBUG(D_NET, "kptllnd_rx_buffer_pool_reserve(%d)\n", count);
 
-        spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+        cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
 
         for (;;) {
                 if (rxbp->rxbp_shutdown) {
@@ -94,7 +94,7 @@ kptllnd_rx_buffer_pool_reserve(kptl_rx_buffer_pool_t *rxbp, int count)
                         break;
                 }
                 
-                spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+                cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
                 
                 LIBCFS_ALLOC(rxb, sizeof(*rxb));
                 LIBCFS_ALLOC(buffer, bufsize);
@@ -107,7 +107,7 @@ kptllnd_rx_buffer_pool_reserve(kptl_rx_buffer_pool_t *rxbp, int count)
                         if (buffer != NULL)
                                 LIBCFS_FREE(buffer, bufsize);
                         
-                        spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+                        cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
                         rc = -ENOMEM;
                         break;
                 }
@@ -122,33 +122,33 @@ kptllnd_rx_buffer_pool_reserve(kptl_rx_buffer_pool_t *rxbp, int count)
                 rxb->rxb_buffer = buffer;
                 rxb->rxb_mdh = PTL_INVALID_HANDLE;
 
-                spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+                cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
                 
                 if (rxbp->rxbp_shutdown) {
-                        spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+                        cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
                         
                         LIBCFS_FREE(rxb, sizeof(*rxb));
                         LIBCFS_FREE(buffer, bufsize);
 
-                        spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+                        cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
                         rc = -ESHUTDOWN;
                         break;
                 }
                 
-                list_add_tail(&rxb->rxb_list, &rxbp->rxbp_list);
+                cfs_list_add_tail(&rxb->rxb_list, &rxbp->rxbp_list);
                 rxbp->rxbp_count++;
 
-                spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+                cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
                 
                 kptllnd_rx_buffer_post(rxb);
 
-                spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+                cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
         }
 
         if (rc == 0)
                 rxbp->rxbp_reserved += count;
 
-        spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+        cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
 
         return rc;
 }
@@ -159,12 +159,12 @@ kptllnd_rx_buffer_pool_unreserve(kptl_rx_buffer_pool_t *rxbp,
 {
         unsigned long flags;
 
-        spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+        cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
 
         CDEBUG(D_NET, "kptllnd_rx_buffer_pool_unreserve(%d)\n", count);
         rxbp->rxbp_reserved -= count;
 
-        spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+        cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
 }
 
 void
@@ -174,11 +174,11 @@ kptllnd_rx_buffer_pool_fini(kptl_rx_buffer_pool_t *rxbp)
         int                     rc;
         int                     i;
         unsigned long           flags;
-        struct list_head       *tmp;
-        struct list_head       *nxt;
+        cfs_list_t             *tmp;
+        cfs_list_t             *nxt;
         ptl_handle_md_t         mdh;
 
-        /* CAVEAT EMPTOR: I'm racing with everything here!!!  
+        /* CAVEAT EMPTOR: I'm racing with everything here!!!
          *
          * Buffers can still be posted after I set rxbp_shutdown because I
          * can't hold rxbp_lock while I'm posting them.
@@ -189,20 +189,20 @@ kptllnd_rx_buffer_pool_fini(kptl_rx_buffer_pool_t *rxbp)
          * different MD) from when the MD is actually unlinked, to when the
          * event callback tells me it has been unlinked. */
 
-        spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+        cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
 
         rxbp->rxbp_shutdown = 1;
 
         for (i = 9;; i++) {
-                list_for_each_safe(tmp, nxt, &rxbp->rxbp_list) {
-                        rxb = list_entry (tmp, kptl_rx_buffer_t, rxb_list);
-                
+                cfs_list_for_each_safe(tmp, nxt, &rxbp->rxbp_list) {
+                        rxb = cfs_list_entry (tmp, kptl_rx_buffer_t, rxb_list);
+
                         if (rxb->rxb_idle) {
-                                spin_unlock_irqrestore(&rxbp->rxbp_lock, 
-                                                       flags);
+                                cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock,
+                                                           flags);
                                 kptllnd_rx_buffer_destroy(rxb);
-                                spin_lock_irqsave(&rxbp->rxbp_lock, 
-                                                  flags);
+                                cfs_spin_lock_irqsave(&rxbp->rxbp_lock,
+                                                      flags);
                                 continue;
                         }
 
@@ -210,11 +210,11 @@ kptllnd_rx_buffer_pool_fini(kptl_rx_buffer_pool_t *rxbp)
                         if (PtlHandleIsEqual(mdh, PTL_INVALID_HANDLE))
                                 continue;
                         
-                        spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+                        cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
 
                         rc = PtlMDUnlink(mdh);
 
-                        spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+                        cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
                         
 #ifdef LUSTRE_PORTALS_UNLINK_SEMANTICS
                         /* callback clears rxb_mdh and drops net's ref
@@ -230,10 +230,10 @@ kptllnd_rx_buffer_pool_fini(kptl_rx_buffer_pool_t *rxbp)
 #endif
                 }
 
-                if (list_empty(&rxbp->rxbp_list))
+                if (cfs_list_empty(&rxbp->rxbp_list))
                         break;
 
-                spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+                cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
 
                 /* Wait a bit for references to be dropped */
                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
@@ -242,10 +242,10 @@ kptllnd_rx_buffer_pool_fini(kptl_rx_buffer_pool_t *rxbp)
 
                 cfs_pause(cfs_time_seconds(1));
 
-                spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+                cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
         }
 
-        spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+        cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
 }
 
 void
@@ -259,7 +259,7 @@ kptllnd_rx_buffer_post(kptl_rx_buffer_t *rxb)
         kptl_rx_buffer_pool_t  *rxbp = rxb->rxb_pool;
         unsigned long           flags;
 
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
         LASSERT (rxb->rxb_refcount == 0);
         LASSERT (!rxb->rxb_idle);
         LASSERT (!rxb->rxb_posted);
@@ -268,18 +268,18 @@ kptllnd_rx_buffer_post(kptl_rx_buffer_t *rxb)
         any.nid = PTL_NID_ANY;
         any.pid = PTL_PID_ANY;
 
-        spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+        cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
 
         if (rxbp->rxbp_shutdown) {
                 rxb->rxb_idle = 1;
-                spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+                cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
                 return;
         }
 
         rxb->rxb_refcount = 1;                  /* net's ref */
         rxb->rxb_posted = 1;                    /* I'm posting */
         
-        spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+        cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
 
         rc = PtlMEAttach(kptllnd_data.kptl_nih,
                          *kptllnd_tunables.kptl_portal,
@@ -312,10 +312,10 @@ kptllnd_rx_buffer_post(kptl_rx_buffer_t *rxb)
 
         rc = PtlMDAttach(meh, md, PTL_UNLINK, &mdh);
         if (rc == PTL_OK) {
-                spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+                cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
                 if (rxb->rxb_posted)            /* Not auto-unlinked yet!!! */
                         rxb->rxb_mdh = mdh;
-                spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+                cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
                 return;
         }
         
@@ -325,11 +325,11 @@ kptllnd_rx_buffer_post(kptl_rx_buffer_t *rxb)
         LASSERT(rc == PTL_OK);
 
  failed:
-        spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+        cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
         rxb->rxb_posted = 0;
         /* XXX this will just try again immediately */
         kptllnd_rx_buffer_decref_locked(rxb);
-        spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+        cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
 }
 
 kptl_rx_t *
@@ -369,7 +369,7 @@ kptllnd_rx_done(kptl_rx_t *rx, int post_credit)
 
         if (peer != NULL) {
                 /* Update credits (after I've decref-ed the buffer) */
-                spin_lock_irqsave(&peer->peer_lock, flags);
+                cfs_spin_lock_irqsave(&peer->peer_lock, flags);
 
                 if (post_credit == PTLLND_POSTRX_PEER_CREDIT)
                         peer->peer_outstanding_credits++;
@@ -383,7 +383,7 @@ kptllnd_rx_done(kptl_rx_t *rx, int post_credit)
                        peer->peer_outstanding_credits, peer->peer_sent_credits,
                        rx);
 
-                spin_unlock_irqrestore(&peer->peer_lock, flags);
+                cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
 
                 /* I might have to send back credits */
                 kptllnd_peer_check_sends(peer);
@@ -410,16 +410,16 @@ kptllnd_rx_buffer_callback (ptl_event_t *ev)
 #endif
 
         CDEBUG(D_NET, "%s: %s(%d) rxb=%p fail=%s(%d) unlink=%d\n",
-               kptllnd_ptlid2str(ev->initiator), 
-               kptllnd_evtype2str(ev->type), ev->type, rxb, 
+               kptllnd_ptlid2str(ev->initiator),
+               kptllnd_evtype2str(ev->type), ev->type, rxb,
                kptllnd_errtype2str(ev->ni_fail_type), ev->ni_fail_type,
                unlinked);
 
         LASSERT (!rxb->rxb_idle);
         LASSERT (ev->md.start == rxb->rxb_buffer);
-        LASSERT (ev->offset + ev->mlength <= 
+        LASSERT (ev->offset + ev->mlength <=
                  PAGE_SIZE * *kptllnd_tunables.kptl_rxb_npages);
-        LASSERT (ev->type == PTL_EVENT_PUT_END || 
+        LASSERT (ev->type == PTL_EVENT_PUT_END ||
                  ev->type == PTL_EVENT_UNLINK);
         LASSERT (ev->type == PTL_EVENT_UNLINK ||
                  ev->match_bits == LNET_MSG_MATCHBITS);
@@ -463,7 +463,7 @@ kptllnd_rx_buffer_callback (ptl_event_t *ev)
                                 /* Portals can't force alignment - copy into
                                  * rx_space (avoiding overflow) to fix */
                                 int maxlen = *kptllnd_tunables.kptl_max_msg_size;
-                                
+
                                 rx->rx_rxb = NULL;
                                 rx->rx_nob = MIN(maxlen, ev->mlength);
                                 rx->rx_msg = (kptl_msg_t *)rx->rx_space;
@@ -481,26 +481,26 @@ kptllnd_rx_buffer_callback (ptl_event_t *ev)
                         rx->rx_uid = ev->uid;
 #endif
                         /* Queue for attention */
-                        spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, 
-                                          flags);
+                        cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
+                                              flags);
 
-                        list_add_tail(&rx->rx_list, 
-                                      &kptllnd_data.kptl_sched_rxq);
-                        wake_up(&kptllnd_data.kptl_sched_waitq);
+                        cfs_list_add_tail(&rx->rx_list,
+                                          &kptllnd_data.kptl_sched_rxq);
+                        cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
 
-                        spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, 
-                                               flags);
+                        cfs_spin_unlock_irqrestore(&kptllnd_data. \
+                                                   kptl_sched_lock, flags);
                 }
         }
 
         if (unlinked) {
-                spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+                cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
 
                 rxb->rxb_posted = 0;
                 rxb->rxb_mdh = PTL_INVALID_HANDLE;
                 kptllnd_rx_buffer_decref_locked(rxb);
 
-                spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+                cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
         }
 }
 
@@ -542,17 +542,17 @@ kptllnd_find_net (lnet_nid_t nid)
 {
         kptl_net_t *net;
 
-        read_lock(&kptllnd_data.kptl_net_rw_lock);
-        list_for_each_entry (net, &kptllnd_data.kptl_nets, net_list) {
+        cfs_read_lock(&kptllnd_data.kptl_net_rw_lock);
+        cfs_list_for_each_entry (net, &kptllnd_data.kptl_nets, net_list) {
                 LASSERT (!net->net_shutdown);
 
                 if (net->net_ni->ni_nid == nid) {
                         kptllnd_net_addref(net);
-                        read_unlock(&kptllnd_data.kptl_net_rw_lock);
+                        cfs_read_unlock(&kptllnd_data.kptl_net_rw_lock);
                         return net;
                 }
         }
-        read_unlock(&kptllnd_data.kptl_net_rw_lock);
+        cfs_read_unlock(&kptllnd_data.kptl_net_rw_lock);
 
         return NULL;
 }
@@ -565,14 +565,14 @@ kptllnd_rx_parse(kptl_rx_t *rx)
         int                     post_credit = PTLLND_POSTRX_PEER_CREDIT;
         kptl_net_t             *net = NULL;
         kptl_peer_t            *peer;
-        struct list_head        txs;
+        cfs_list_t              txs;
         unsigned long           flags;
         lnet_process_id_t       srcid;
 
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
         LASSERT (rx->rx_peer == NULL);
 
-        INIT_LIST_HEAD(&txs);
+        CFS_INIT_LIST_HEAD(&txs);
 
         if ((rx->rx_nob >= 4 &&
              (msg->ptlm_magic == LNET_PROTO_MAGIC ||
@@ -691,9 +691,9 @@ kptllnd_rx_parse(kptl_rx_t *rx)
 
                 if (peer->peer_state == PEER_STATE_WAITING_HELLO) {
                         /* recoverable error - restart txs */
-                        spin_lock_irqsave(&peer->peer_lock, flags);
+                        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
                         kptllnd_cancel_txlist(&peer->peer_sendq, &txs);
-                        spin_unlock_irqrestore(&peer->peer_lock, flags);
+                        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
 
                         CWARN("NAK %s: Unexpected %s message\n",
                               libcfs_id2str(srcid),
@@ -721,7 +721,7 @@ kptllnd_rx_parse(kptl_rx_t *rx)
         LASSERTF (msg->ptlm_srcpid == peer->peer_id.pid, "m %u p %u\n",
                   msg->ptlm_srcpid, peer->peer_id.pid);
 
-        spin_lock_irqsave(&peer->peer_lock, flags);
+        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
 
         /* Check peer only sends when I've sent her credits */
         if (peer->peer_sent_credits == 0) {
@@ -729,7 +729,7 @@ kptllnd_rx_parse(kptl_rx_t *rx)
                 int oc = peer->peer_outstanding_credits;
                 int sc = peer->peer_sent_credits;
 
-                spin_unlock_irqrestore(&peer->peer_lock, flags);
+                cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
 
                 CERROR("%s: buffer overrun [%d/%d+%d]\n",
                        libcfs_id2str(peer->peer_id), c, sc, oc);
@@ -748,7 +748,7 @@ kptllnd_rx_parse(kptl_rx_t *rx)
                 post_credit = PTLLND_POSTRX_NO_CREDIT;
         }
 
-        spin_unlock_irqrestore(&peer->peer_lock, flags);
+        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
 
         /* See if something can go out now that credits have come in */
         if (msg->ptlm_credits != 0)
@@ -795,14 +795,14 @@ kptllnd_rx_parse(kptl_rx_t *rx)
                          PTL_RESERVED_MATCHBITS);
 
                 /* Update last match bits seen */
-                spin_lock_irqsave(&peer->peer_lock, flags);
+                cfs_spin_lock_irqsave(&peer->peer_lock, flags);
 
                 if (msg->ptlm_u.rdma.kptlrm_matchbits >
                     rx->rx_peer->peer_last_matchbits_seen)
                         rx->rx_peer->peer_last_matchbits_seen =
                                 msg->ptlm_u.rdma.kptlrm_matchbits;
 
-                spin_unlock_irqrestore(&rx->rx_peer->peer_lock, flags);
+                cfs_spin_unlock_irqrestore(&rx->rx_peer->peer_lock, flags);
 
                 rc = lnet_parse(net->net_ni,
                                 &msg->ptlm_u.rdma.kptlrm_hdr,
@@ -820,7 +820,7 @@ kptllnd_rx_parse(kptl_rx_t *rx)
         kptllnd_peer_close(peer, rc);
         if (rx->rx_peer == NULL)                /* drop ref on peer */
                 kptllnd_peer_decref(peer);      /* unless rx_done will */
-        if (!list_empty(&txs)) {
+        if (!cfs_list_empty(&txs)) {
                 LASSERT (net != NULL);
                 kptllnd_restart_txs(net, srcid, &txs);
         }
index 10be464..625df9e 100644 (file)
@@ -51,10 +51,10 @@ kptllnd_free_tx(kptl_tx_t *tx)
 
         LIBCFS_FREE(tx, sizeof(*tx));
 
-        atomic_dec(&kptllnd_data.kptl_ntx);
+        cfs_atomic_dec(&kptllnd_data.kptl_ntx);
 
         /* Keep the tunable in step for visibility */
-        *kptllnd_tunables.kptl_ntx = atomic_read(&kptllnd_data.kptl_ntx);
+        *kptllnd_tunables.kptl_ntx = cfs_atomic_read(&kptllnd_data.kptl_ntx);
 }
 
 kptl_tx_t *
@@ -68,10 +68,10 @@ kptllnd_alloc_tx(void)
                 return NULL;
         }
 
-        atomic_inc(&kptllnd_data.kptl_ntx);
+        cfs_atomic_inc(&kptllnd_data.kptl_ntx);
 
         /* Keep the tunable in step for visibility */
-        *kptllnd_tunables.kptl_ntx = atomic_read(&kptllnd_data.kptl_ntx);
+        *kptllnd_tunables.kptl_ntx = cfs_atomic_read(&kptllnd_data.kptl_ntx);
 
         tx->tx_idle = 1;
         tx->tx_rdma_mdh = PTL_INVALID_HANDLE;
@@ -106,17 +106,17 @@ kptllnd_setup_tx_descs()
 {
         int       n = *kptllnd_tunables.kptl_ntx;
         int       i;
-        
+
         for (i = 0; i < n; i++) {
                 kptl_tx_t *tx = kptllnd_alloc_tx();
                 if (tx == NULL)
                         return -ENOMEM;
-                
-                spin_lock(&kptllnd_data.kptl_tx_lock);
-                list_add_tail(&tx->tx_list, &kptllnd_data.kptl_idle_txs);
-                spin_unlock(&kptllnd_data.kptl_tx_lock);
+
+                cfs_spin_lock(&kptllnd_data.kptl_tx_lock);
+                cfs_list_add_tail(&tx->tx_list, &kptllnd_data.kptl_idle_txs);
+                cfs_spin_unlock(&kptllnd_data.kptl_tx_lock);
         }
-        
+
         return 0;
 }
 
@@ -128,15 +128,15 @@ kptllnd_cleanup_tx_descs()
         /* No locking; single threaded now */
         LASSERT (kptllnd_data.kptl_shutdown == 2);
 
-        while (!list_empty(&kptllnd_data.kptl_idle_txs)) {
-                tx = list_entry(kptllnd_data.kptl_idle_txs.next,
-                                kptl_tx_t, tx_list);
-                
-                list_del(&tx->tx_list);
+        while (!cfs_list_empty(&kptllnd_data.kptl_idle_txs)) {
+                tx = cfs_list_entry(kptllnd_data.kptl_idle_txs.next,
+                                    kptl_tx_t, tx_list);
+
+                cfs_list_del(&tx->tx_list);
                 kptllnd_free_tx(tx);
         }
 
-        LASSERT (atomic_read(&kptllnd_data.kptl_ntx) == 0);
+        LASSERT (cfs_atomic_read(&kptllnd_data.kptl_ntx) == 0);
 }
 
 kptl_tx_t *
@@ -144,13 +144,13 @@ kptllnd_get_idle_tx(enum kptl_tx_type type)
 {
         kptl_tx_t      *tx = NULL;
 
-        if (IS_SIMULATION_ENABLED(FAIL_TX_PUT_ALLOC) && 
+        if (IS_SIMULATION_ENABLED(FAIL_TX_PUT_ALLOC) &&
             type == TX_TYPE_PUT_REQUEST) {
                 CERROR("FAIL_TX_PUT_ALLOC SIMULATION triggered\n");
                 return NULL;
         }
 
-        if (IS_SIMULATION_ENABLED(FAIL_TX_GET_ALLOC) && 
+        if (IS_SIMULATION_ENABLED(FAIL_TX_GET_ALLOC) &&
             type == TX_TYPE_GET_REQUEST) {
                 CERROR ("FAIL_TX_GET_ALLOC SIMULATION triggered\n");
                 return NULL;
@@ -161,23 +161,23 @@ kptllnd_get_idle_tx(enum kptl_tx_type type)
                 return NULL;
         }
 
-        spin_lock(&kptllnd_data.kptl_tx_lock);
+        cfs_spin_lock(&kptllnd_data.kptl_tx_lock);
 
-        if (list_empty (&kptllnd_data.kptl_idle_txs)) {
-                spin_unlock(&kptllnd_data.kptl_tx_lock);
+        if (cfs_list_empty (&kptllnd_data.kptl_idle_txs)) {
+                cfs_spin_unlock(&kptllnd_data.kptl_tx_lock);
 
                 tx = kptllnd_alloc_tx();
                 if (tx == NULL)
                         return NULL;
         } else {
-                tx = list_entry(kptllnd_data.kptl_idle_txs.next, 
-                                kptl_tx_t, tx_list);
-                list_del(&tx->tx_list);
+                tx = cfs_list_entry(kptllnd_data.kptl_idle_txs.next, 
+                                    kptl_tx_t, tx_list);
+                cfs_list_del(&tx->tx_list);
 
-                spin_unlock(&kptllnd_data.kptl_tx_lock);
+                cfs_spin_unlock(&kptllnd_data.kptl_tx_lock);
         }
 
-        LASSERT (atomic_read(&tx->tx_refcount)== 0);
+        LASSERT (cfs_atomic_read(&tx->tx_refcount)== 0);
         LASSERT (tx->tx_idle);
         LASSERT (!tx->tx_active);
         LASSERT (tx->tx_lnet_msg == NULL);
@@ -187,7 +187,7 @@ kptllnd_get_idle_tx(enum kptl_tx_type type)
         LASSERT (PtlHandleIsEqual(tx->tx_msg_mdh, PTL_INVALID_HANDLE));
         
         tx->tx_type = type;
-        atomic_set(&tx->tx_refcount, 1);
+        cfs_atomic_set(&tx->tx_refcount, 1);
         tx->tx_status = 0;
         tx->tx_idle = 0;
         tx->tx_tposted = 0;
@@ -206,17 +206,17 @@ kptllnd_tx_abort_netio(kptl_tx_t *tx)
         ptl_handle_md_t  rdma_mdh;
         unsigned long    flags;
 
-        LASSERT (atomic_read(&tx->tx_refcount) == 0);
+        LASSERT (cfs_atomic_read(&tx->tx_refcount) == 0);
         LASSERT (!tx->tx_active);
 
-        spin_lock_irqsave(&peer->peer_lock, flags);
+        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
 
         msg_mdh = tx->tx_msg_mdh;
         rdma_mdh = tx->tx_rdma_mdh;
 
         if (PtlHandleIsEqual(msg_mdh, PTL_INVALID_HANDLE) &&
             PtlHandleIsEqual(rdma_mdh, PTL_INVALID_HANDLE)) {
-                spin_unlock_irqrestore(&peer->peer_lock, flags);
+                cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
                 return 0;
         }
         
@@ -227,11 +227,11 @@ kptllnd_tx_abort_netio(kptl_tx_t *tx)
                   tx->tx_lnet_replymsg == NULL));
 
         /* stash the tx on its peer until it completes */
-        atomic_set(&tx->tx_refcount, 1);
+        cfs_atomic_set(&tx->tx_refcount, 1);
         tx->tx_active = 1;
-        list_add_tail(&tx->tx_list, &peer->peer_activeq);
+        cfs_list_add_tail(&tx->tx_list, &peer->peer_activeq);
         
-        spin_unlock_irqrestore(&peer->peer_lock, flags);
+        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
 
         /* These unlinks will ensure completion events (normal or unlink) will
          * happen ASAP */
@@ -254,17 +254,17 @@ kptllnd_tx_abort_netio(kptl_tx_t *tx)
         unsigned long    flags;
         ptl_err_t        prc;
 
-        LASSERT (atomic_read(&tx->tx_refcount) == 0);
+        LASSERT (cfs_atomic_read(&tx->tx_refcount) == 0);
         LASSERT (!tx->tx_active);
 
-        spin_lock_irqsave(&peer->peer_lock, flags);
+        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
 
         msg_mdh = tx->tx_msg_mdh;
         rdma_mdh = tx->tx_rdma_mdh;
 
         if (PtlHandleIsEqual(msg_mdh, PTL_INVALID_HANDLE) &&
             PtlHandleIsEqual(rdma_mdh, PTL_INVALID_HANDLE)) {
-                spin_unlock_irqrestore(&peer->peer_lock, flags);
+                cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
                 return 0;
         }
         
@@ -274,7 +274,7 @@ kptllnd_tx_abort_netio(kptl_tx_t *tx)
                  (tx->tx_lnet_msg == NULL && 
                   tx->tx_replymsg == NULL));
 
-        spin_unlock_irqrestore(&peer->peer_lock, flags);
+        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
 
         if (!PtlHandleIsEqual(msg_mdh, PTL_INVALID_HANDLE)) {
                 prc = PtlMDUnlink(msg_mdh);
@@ -288,7 +288,7 @@ kptllnd_tx_abort_netio(kptl_tx_t *tx)
                         rdma_mdh = PTL_INVALID_HANDLE;
         }
 
-        spin_lock_irqsave(&peer->peer_lock, flags);
+        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
 
         /* update tx_???_mdh if callback hasn't fired */
         if (PtlHandleIsEqual(tx->tx_msg_mdh, PTL_INVALID_HANDLE))
@@ -303,18 +303,18 @@ kptllnd_tx_abort_netio(kptl_tx_t *tx)
 
         if (PtlHandleIsEqual(msg_mdh, PTL_INVALID_HANDLE) &&
             PtlHandleIsEqual(rdma_mdh, PTL_INVALID_HANDLE)) {
-                spin_unlock_irqrestore(&peer->peer_lock, flags);
+                cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
                 return 0;
         }
 
         /* stash the tx on its peer until it completes */
-        atomic_set(&tx->tx_refcount, 1);
+        cfs_atomic_set(&tx->tx_refcount, 1);
         tx->tx_active = 1;
-        list_add_tail(&tx->tx_list, &peer->peer_activeq);
+        cfs_list_add_tail(&tx->tx_list, &peer->peer_activeq);
 
         kptllnd_peer_addref(peer);              /* extra ref for me... */
 
-        spin_unlock_irqrestore(&peer->peer_lock, flags);
+        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
 
         /* This will get the watchdog thread to try aborting all the peer's
          * comms again.  NB, this deems it fair that 1 failing tx which can't
@@ -337,8 +337,8 @@ kptllnd_tx_fini (kptl_tx_t *tx)
         int             status   = tx->tx_status;
         int             rc;
 
-        LASSERT (!in_interrupt());
-        LASSERT (atomic_read(&tx->tx_refcount) == 0);
+        LASSERT (!cfs_in_interrupt());
+        LASSERT (cfs_atomic_read(&tx->tx_refcount) == 0);
         LASSERT (!tx->tx_idle);
         LASSERT (!tx->tx_active);
 
@@ -357,9 +357,9 @@ kptllnd_tx_fini (kptl_tx_t *tx)
         tx->tx_peer = NULL;
         tx->tx_idle = 1;
 
-        spin_lock(&kptllnd_data.kptl_tx_lock);
-        list_add_tail(&tx->tx_list, &kptllnd_data.kptl_idle_txs);
-        spin_unlock(&kptllnd_data.kptl_tx_lock);
+        cfs_spin_lock(&kptllnd_data.kptl_tx_lock);
+        cfs_list_add_tail(&tx->tx_list, &kptllnd_data.kptl_idle_txs);
+        cfs_spin_unlock(&kptllnd_data.kptl_tx_lock);
 
         /* Must finalize AFTER freeing 'tx' */
         if (msg != NULL)
@@ -494,7 +494,7 @@ kptllnd_tx_callback(ptl_event_t *ev)
         if (!unlinked)
                 return;
 
-        spin_lock_irqsave(&peer->peer_lock, flags);
+        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
 
         if (ismsg)
                 tx->tx_msg_mdh = PTL_INVALID_HANDLE;
@@ -504,23 +504,24 @@ kptllnd_tx_callback(ptl_event_t *ev)
         if (!PtlHandleIsEqual(tx->tx_msg_mdh, PTL_INVALID_HANDLE) ||
             !PtlHandleIsEqual(tx->tx_rdma_mdh, PTL_INVALID_HANDLE) ||
             !tx->tx_active) {
-                spin_unlock_irqrestore(&peer->peer_lock, flags);
+                cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
                 return;
         }
 
-        list_del(&tx->tx_list);
+        cfs_list_del(&tx->tx_list);
         tx->tx_active = 0;
 
-        spin_unlock_irqrestore(&peer->peer_lock, flags);
+        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
 
         /* drop peer's ref, but if it was the last one... */
-        if (atomic_dec_and_test(&tx->tx_refcount)) {
+        if (cfs_atomic_dec_and_test(&tx->tx_refcount)) {
                 /* ...finalize it in thread context! */
-                spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
+                cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
 
-                list_add_tail(&tx->tx_list, &kptllnd_data.kptl_sched_txq);
-                wake_up(&kptllnd_data.kptl_sched_waitq);
+                cfs_list_add_tail(&tx->tx_list, &kptllnd_data.kptl_sched_txq);
+                cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
 
-                spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
+                cfs_spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
+                                           flags);
         }
 }
index 9a49984..0e8a931 100644 (file)
@@ -50,7 +50,7 @@
  * assertions generated here (but fails-safe if it ever does) */
 typedef struct {
         int     counter;
-} atomic_t;
+} cfs_atomic_t;
 
 #include <lnet/lib-types.h>
 #include <lnet/ptllnd_wire.h>
index 1a42b09..0e16e86 100644 (file)
@@ -57,19 +57,19 @@ int
 kqswnal_get_tx_desc (struct libcfs_ioctl_data *data)
 {
        unsigned long      flags;
-       struct list_head  *tmp;
+       cfs_list_t        *tmp;
        kqswnal_tx_t      *ktx;
        lnet_hdr_t        *hdr;
        int                index = data->ioc_count;
        int                rc = -ENOENT;
 
-       spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
+       cfs_spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
 
-       list_for_each (tmp, &kqswnal_data.kqn_activetxds) {
+       cfs_list_for_each (tmp, &kqswnal_data.kqn_activetxds) {
                if (index-- != 0)
                        continue;
 
-               ktx = list_entry (tmp, kqswnal_tx_t, ktx_list);
+               ktx = cfs_list_entry (tmp, kqswnal_tx_t, ktx_list);
                hdr = (lnet_hdr_t *)ktx->ktx_buffer;
 
                data->ioc_count  = le32_to_cpu(hdr->payload_length);
@@ -77,13 +77,14 @@ kqswnal_get_tx_desc (struct libcfs_ioctl_data *data)
                data->ioc_u64[0] = ktx->ktx_nid;
                data->ioc_u32[0] = le32_to_cpu(hdr->type);
                data->ioc_u32[1] = ktx->ktx_launcher;
-               data->ioc_flags  = (list_empty (&ktx->ktx_schedlist) ? 0 : 1) |
-                                  (ktx->ktx_state << 2);
+               data->ioc_flags  =
+                        (cfs_list_empty (&ktx->ktx_schedlist) ? 0 : 1) |
+                                        (ktx->ktx_state << 2);
                rc = 0;
                break;
        }
-       
-       spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
+
+       cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
        return (rc);
 }
 
@@ -101,14 +102,14 @@ kqswnal_ctl (lnet_ni_t *ni, unsigned int cmd, void *arg)
        case IOC_LIBCFS_REGISTER_MYNID:
                if (data->ioc_nid == ni->ni_nid)
                        return 0;
-               
+
                LASSERT (LNET_NIDNET(data->ioc_nid) == LNET_NIDNET(ni->ni_nid));
 
                CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID for %s(%s)\n",
                       libcfs_nid2str(data->ioc_nid),
                       libcfs_nid2str(ni->ni_nid));
                return 0;
-               
+
        default:
                return (-EINVAL);
        }
@@ -137,15 +138,15 @@ kqswnal_shutdown(lnet_ni_t *ni)
 
        /**********************************************************************/
        /* Signal the start of shutdown... */
-       spin_lock_irqsave(&kqswnal_data.kqn_idletxd_lock, flags);
+       cfs_spin_lock_irqsave(&kqswnal_data.kqn_idletxd_lock, flags);
        kqswnal_data.kqn_shuttingdown = 1;
-       spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);
+       cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);
 
        /**********************************************************************/
        /* wait for sends that have allocated a tx desc to launch or give up */
-       while (atomic_read (&kqswnal_data.kqn_pending_txs) != 0) {
+       while (cfs_atomic_read (&kqswnal_data.kqn_pending_txs) != 0) {
                CDEBUG(D_NET, "waiting for %d pending sends\n",
-                      atomic_read (&kqswnal_data.kqn_pending_txs));
+                      cfs_atomic_read (&kqswnal_data.kqn_pending_txs));
                cfs_pause(cfs_time_seconds(1));
        }
 
@@ -168,16 +169,16 @@ kqswnal_shutdown(lnet_ni_t *ni)
 
        /* NB ep_free_xmtr() returns only after all outstanding transmits
         * have called their callback... */
-       LASSERT(list_empty(&kqswnal_data.kqn_activetxds));
+       LASSERT(cfs_list_empty(&kqswnal_data.kqn_activetxds));
 
        /**********************************************************************/
        /* flag threads to terminate, wake them and wait for them to die */
        kqswnal_data.kqn_shuttingdown = 2;
-       wake_up_all (&kqswnal_data.kqn_sched_waitq);
+       cfs_waitq_broadcast (&kqswnal_data.kqn_sched_waitq);
 
-       while (atomic_read (&kqswnal_data.kqn_nthreads) != 0) {
+       while (cfs_atomic_read (&kqswnal_data.kqn_nthreads) != 0) {
                CDEBUG(D_NET, "waiting for %d threads to terminate\n",
-                      atomic_read (&kqswnal_data.kqn_nthreads));
+                      cfs_atomic_read (&kqswnal_data.kqn_nthreads));
                cfs_pause(cfs_time_seconds(1));
        }
 
@@ -186,9 +187,9 @@ kqswnal_shutdown(lnet_ni_t *ni)
         * I control the horizontals and the verticals...
         */
 
-       LASSERT (list_empty (&kqswnal_data.kqn_readyrxds));
-       LASSERT (list_empty (&kqswnal_data.kqn_donetxds));
-       LASSERT (list_empty (&kqswnal_data.kqn_delayedtxds));
+       LASSERT (cfs_list_empty (&kqswnal_data.kqn_readyrxds));
+       LASSERT (cfs_list_empty (&kqswnal_data.kqn_donetxds));
+       LASSERT (cfs_list_empty (&kqswnal_data.kqn_delayedtxds));
 
        /**********************************************************************/
        /* Unmap message buffers and free all descriptors and buffers
@@ -251,7 +252,7 @@ kqswnal_shutdown(lnet_ni_t *ni)
        /* resets flags, pointers to NULL etc */
        memset(&kqswnal_data, 0, sizeof (kqswnal_data));
 
-       CDEBUG (D_MALLOC, "done kmem %d\n", atomic_read(&libcfs_kmemory));
+       CDEBUG (D_MALLOC, "done kmem %d\n", cfs_atomic_read(&libcfs_kmemory));
 
        PORTAL_MODULE_UNUSE;
 }
@@ -287,7 +288,7 @@ kqswnal_startup (lnet_ni_t *ni)
                                   *kqswnal_tunables.kqn_credits);
        }
         
-       CDEBUG (D_MALLOC, "start kmem %d\n", atomic_read(&libcfs_kmemory));
+       CDEBUG (D_MALLOC, "start kmem %d\n", cfs_atomic_read(&libcfs_kmemory));
        
        /* ensure all pointers NULL etc */
        memset (&kqswnal_data, 0, sizeof (kqswnal_data));
@@ -297,16 +298,16 @@ kqswnal_startup (lnet_ni_t *ni)
        ni->ni_peertxcredits = *kqswnal_tunables.kqn_peercredits;
        ni->ni_maxtxcredits = *kqswnal_tunables.kqn_credits;
 
-       INIT_LIST_HEAD (&kqswnal_data.kqn_idletxds);
-       INIT_LIST_HEAD (&kqswnal_data.kqn_activetxds);
-       spin_lock_init (&kqswnal_data.kqn_idletxd_lock);
+       CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_idletxds);
+       CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_activetxds);
+       cfs_spin_lock_init (&kqswnal_data.kqn_idletxd_lock);
 
-       INIT_LIST_HEAD (&kqswnal_data.kqn_delayedtxds);
-       INIT_LIST_HEAD (&kqswnal_data.kqn_donetxds);
-       INIT_LIST_HEAD (&kqswnal_data.kqn_readyrxds);
+       CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_delayedtxds);
+       CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_donetxds);
+       CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_readyrxds);
 
-       spin_lock_init (&kqswnal_data.kqn_sched_lock);
-       init_waitqueue_head (&kqswnal_data.kqn_sched_waitq);
+       cfs_spin_lock_init (&kqswnal_data.kqn_sched_lock);
+       cfs_waitq_init (&kqswnal_data.kqn_sched_waitq);
 
        /* pointers/lists/locks initialised */
        kqswnal_data.kqn_init = KQN_INIT_DATA;
@@ -432,12 +433,12 @@ kqswnal_startup (lnet_ni_t *ni)
                ktx->ktx_basepage = basepage + premapped_pages; /* message mapping starts here */
                ktx->ktx_npages = KQSW_NTXMSGPAGES - premapped_pages; /* for this many pages */
 
-               INIT_LIST_HEAD (&ktx->ktx_schedlist);
+               CFS_INIT_LIST_HEAD (&ktx->ktx_schedlist);
 
                ktx->ktx_state = KTX_IDLE;
                ktx->ktx_rail = -1;             /* unset rail */
 
-               list_add_tail (&ktx->ktx_list, &kqswnal_data.kqn_idletxds);
+               cfs_list_add_tail (&ktx->ktx_list, &kqswnal_data.kqn_idletxds);
        }
 
        /**********************************************************************/
@@ -524,7 +525,7 @@ kqswnal_startup (lnet_ni_t *ni)
 
        /**********************************************************************/
        /* Spawn scheduling threads */
-       for (i = 0; i < num_online_cpus(); i++) {
+       for (i = 0; i < cfs_num_online_cpus(); i++) {
                rc = kqswnal_thread_start (kqswnal_scheduler, NULL);
                if (rc != 0)
                {
index aa27e64..d93cd7b 100644 (file)
@@ -184,21 +184,21 @@ typedef union {
 
 typedef struct kqswnal_rx
 {
-        struct list_head krx_list;              /* enqueue -> thread */
-        struct kqswnal_rx *krx_alloclist;       /* stack in kqn_rxds */
-        EP_RCVR         *krx_eprx;              /* port to post receives to */
-        EP_RXD          *krx_rxd;               /* receive descriptor (for repost) */
-        EP_NMD           krx_elanbuffer;        /* contiguous Elan buffer */
-        int              krx_npages;            /* # pages in receive buffer */
-        int              krx_nob;               /* Number Of Bytes received into buffer */
-        int              krx_rpc_reply_needed:1; /* peer waiting for EKC RPC reply */
-        int              krx_state;             /* what this RX is doing */
-        atomic_t         krx_refcount;          /* how to tell when rpc is done */
+        cfs_list_t           krx_list;     /* enqueue -> thread */
+        struct kqswnal_rx   *krx_alloclist;/* stack in kqn_rxds */
+        EP_RCVR             *krx_eprx;     /* port to post receives to */
+        EP_RXD              *krx_rxd;      /* receive descriptor (for repost) */
+        EP_NMD               krx_elanbuffer;/* contiguous Elan buffer */
+        int                  krx_npages;    /* # pages in receive buffer */
+        int                  krx_nob;       /* Number Of Bytes received into buffer */
+        int                  krx_rpc_reply_needed:1; /* peer waiting for EKC RPC reply */
+        int                  krx_state;     /* what this RX is doing */
+        cfs_atomic_t         krx_refcount;  /* how to tell when rpc is done */
 #if KQSW_CKSUM
-        __u32            krx_cksum;             /* checksum */
+        __u32                krx_cksum;     /* checksum */
 #endif
-        kqswnal_rpc_reply_t krx_rpc_reply;      /* rpc reply status block */
-        lnet_kiov_t      krx_kiov[KQSW_NRXMSGPAGES_LARGE]; /* buffer frags */
+        kqswnal_rpc_reply_t  krx_rpc_reply; /* rpc reply status block */
+        lnet_kiov_t          krx_kiov[KQSW_NRXMSGPAGES_LARGE];/* buffer frags */
 }  kqswnal_rx_t;
 
 #define KRX_POSTED       1                      /* receiving */
@@ -208,31 +208,31 @@ typedef struct kqswnal_rx
 
 typedef struct kqswnal_tx
 {
-        struct list_head  ktx_list;             /* enqueue idle/active */
-        struct list_head  ktx_schedlist;        /* enqueue on scheduler */
-        struct kqswnal_tx *ktx_alloclist;       /* stack in kqn_txds */
-        unsigned int      ktx_state:7;          /* What I'm doing */
-        unsigned int      ktx_firsttmpfrag:1;   /* ktx_frags[0] is in my ebuffer ? 0 : 1 */
-        __u32             ktx_basepage;         /* page offset in reserved elan tx vaddrs for mapping pages */
-        int               ktx_npages;           /* pages reserved for mapping messages */
-        int               ktx_nmappedpages;     /* # pages mapped for current message */
-        int               ktx_port;             /* destination ep port */
-        lnet_nid_t        ktx_nid;              /* destination node */
-        void             *ktx_args[3];          /* completion passthru */
-        char             *ktx_buffer;           /* pre-allocated contiguous buffer for hdr + small payloads */
-        cfs_time_t        ktx_launchtime;       /*  when (in jiffies) the transmit
-                                                 *  was launched */
-        int               ktx_status;           /* completion status */
+        cfs_list_t            ktx_list;         /* enqueue idle/active */
+        cfs_list_t            ktx_schedlist;    /* enqueue on scheduler */
+        struct kqswnal_tx    *ktx_alloclist;    /* stack in kqn_txds */
+        unsigned int          ktx_state:7;      /* What I'm doing */
+        unsigned int          ktx_firsttmpfrag:1;  /* ktx_frags[0] is in my ebuffer ? 0 : 1 */
+        __u32                 ktx_basepage;     /* page offset in reserved elan tx vaddrs for mapping pages */
+        int                   ktx_npages;       /* pages reserved for mapping messages */
+        int                   ktx_nmappedpages; /* # pages mapped for current message */
+        int                   ktx_port;         /* destination ep port */
+        lnet_nid_t            ktx_nid;          /* destination node */
+        void                 *ktx_args[3];      /* completion passthru */
+        char                 *ktx_buffer;       /* pre-allocated contiguous buffer for hdr + small payloads */
+        cfs_time_t            ktx_launchtime;   /* when (in jiffies) the
+                                                 * transmit was launched */
+        int                   ktx_status;       /* completion status */
 #if KQSW_CKSUM
-        __u32             ktx_cksum;            /* optimized GET payload checksum */
+        __u32                 ktx_cksum;        /* optimized GET payload checksum */
 #endif
         /* debug/info fields */
-        pid_t             ktx_launcher;         /* pid of launching process */
+        pid_t                 ktx_launcher;     /* pid of launching process */
 
-        int               ktx_nfrag;            /* # message frags */
-        int               ktx_rail;             /* preferred rail */
-        EP_NMD            ktx_ebuffer;          /* elan mapping of ktx_buffer */
-        EP_NMD            ktx_frags[EP_MAXFRAG];/* elan mapping of msg frags */
+        int                   ktx_nfrag;        /* # message frags */
+        int                   ktx_rail;         /* preferred rail */
+        EP_NMD                ktx_ebuffer;      /* elan mapping of ktx_buffer */
+        EP_NMD                ktx_frags[EP_MAXFRAG];/* elan mapping of msg frags */
 } kqswnal_tx_t;
 
 #define KTX_IDLE        0                       /* on kqn_idletxds */
@@ -265,40 +265,40 @@ typedef struct
 
 typedef struct
 {
-        char               kqn_init;            /* what's been initialised */
-        char               kqn_shuttingdown;    /* I'm trying to shut down */
-        atomic_t           kqn_nthreads;        /* # threads running */
-        lnet_ni_t         *kqn_ni;              /* _the_ instance of me */
-
-        kqswnal_rx_t      *kqn_rxds;            /* stack of all the receive descriptors */
-        kqswnal_tx_t      *kqn_txds;            /* stack of all the transmit descriptors */
-
-        struct list_head   kqn_idletxds;        /* transmit descriptors free to use */
-        struct list_head   kqn_activetxds;      /* transmit descriptors being used */
-        spinlock_t         kqn_idletxd_lock;    /* serialise idle txd access */
-        atomic_t           kqn_pending_txs;     /* # transmits being prepped */
-
-        spinlock_t         kqn_sched_lock;      /* serialise packet schedulers */
-        wait_queue_head_t  kqn_sched_waitq;     /* scheduler blocks here */
-
-        struct list_head   kqn_readyrxds;       /* rxds full of data */
-        struct list_head   kqn_donetxds;        /* completed transmits */
-        struct list_head   kqn_delayedtxds;     /* delayed transmits */
-
-        EP_SYS            *kqn_ep;              /* elan system */
-        EP_NMH            *kqn_ep_tx_nmh;       /* elan reserved tx vaddrs */
-        EP_NMH            *kqn_ep_rx_nmh;       /* elan reserved rx vaddrs */
-        EP_XMTR           *kqn_eptx;            /* elan transmitter */
-        EP_RCVR           *kqn_eprx_small;      /* elan receiver (small messages) */
-        EP_RCVR           *kqn_eprx_large;      /* elan receiver (large messages) */
-
-        int                kqn_nnodes;          /* this cluster's size */
-        int                kqn_elanid;          /* this nodes's elan ID */
-
-        EP_STATUSBLK       kqn_rpc_success;     /* preset RPC reply status blocks */
-        EP_STATUSBLK       kqn_rpc_failed;
-        EP_STATUSBLK       kqn_rpc_version;     /* reply to future version query */
-        EP_STATUSBLK       kqn_rpc_magic;       /* reply to future version query */
+        char                 kqn_init;        /* what's been initialised */
+        char                 kqn_shuttingdown;/* I'm trying to shut down */
+        cfs_atomic_t         kqn_nthreads;    /* # threads running */
+        lnet_ni_t           *kqn_ni;          /* _the_ instance of me */
+
+        kqswnal_rx_t        *kqn_rxds;        /* stack of all the receive descriptors */
+        kqswnal_tx_t        *kqn_txds;        /* stack of all the transmit descriptors */
+
+        cfs_list_t           kqn_idletxds;    /* transmit descriptors free to use */
+        cfs_list_t           kqn_activetxds;  /* transmit descriptors being used */
+        cfs_spinlock_t       kqn_idletxd_lock; /* serialise idle txd access */
+        cfs_atomic_t         kqn_pending_txs;/* # transmits being prepped */
+
+        cfs_spinlock_t       kqn_sched_lock; /* serialise packet schedulers */
+        cfs_waitq_t          kqn_sched_waitq;/* scheduler blocks here */
+
+        cfs_list_t           kqn_readyrxds;  /* rxds full of data */
+        cfs_list_t           kqn_donetxds;   /* completed transmits */
+        cfs_list_t           kqn_delayedtxds;/* delayed transmits */
+
+        EP_SYS              *kqn_ep;         /* elan system */
+        EP_NMH              *kqn_ep_tx_nmh;  /* elan reserved tx vaddrs */
+        EP_NMH              *kqn_ep_rx_nmh;  /* elan reserved rx vaddrs */
+        EP_XMTR             *kqn_eptx;       /* elan transmitter */
+        EP_RCVR             *kqn_eprx_small; /* elan receiver (small messages) */
+        EP_RCVR             *kqn_eprx_large; /* elan receiver (large messages) */
+
+        int                  kqn_nnodes;     /* this cluster's size */
+        int                  kqn_elanid;     /* this nodes's elan ID */
+
+        EP_STATUSBLK         kqn_rpc_success;/* preset RPC reply status blocks */
+        EP_STATUSBLK         kqn_rpc_failed;
+        EP_STATUSBLK         kqn_rpc_version;/* reply to future version query */
+        EP_STATUSBLK         kqn_rpc_magic;  /* reply to future version query */
 }  kqswnal_data_t;
 
 /* kqn_init state */
@@ -347,8 +347,8 @@ kqswnal_pages_spanned (void *base, int nob)
 
 static inline void kqswnal_rx_decref (kqswnal_rx_t *krx)
 {
-        LASSERT (atomic_read (&krx->krx_refcount) > 0);
-        if (atomic_dec_and_test (&krx->krx_refcount))
+        LASSERT (cfs_atomic_read (&krx->krx_refcount) > 0);
+        if (cfs_atomic_dec_and_test (&krx->krx_refcount))
                 kqswnal_rx_done(krx);
 }
 
index ff50456..3134ab1 100644 (file)
@@ -357,12 +357,12 @@ kqswnal_put_idle_tx (kqswnal_tx_t *ktx)
         kqswnal_unmap_tx (ktx);                 /* release temporary mappings */
         ktx->ktx_state = KTX_IDLE;
 
-        spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
+        cfs_spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
 
-        list_del (&ktx->ktx_list);              /* take off active list */
-        list_add (&ktx->ktx_list, &kqswnal_data.kqn_idletxds);
+        cfs_list_del (&ktx->ktx_list);              /* take off active list */
+        cfs_list_add (&ktx->ktx_list, &kqswnal_data.kqn_idletxds);
 
-        spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
+        cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
 }
 
 kqswnal_tx_t *
@@ -371,23 +371,25 @@ kqswnal_get_idle_tx (void)
         unsigned long  flags;
         kqswnal_tx_t  *ktx;
 
-        spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
+        cfs_spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
 
         if (kqswnal_data.kqn_shuttingdown ||
-            list_empty (&kqswnal_data.kqn_idletxds)) {
-                spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
+            cfs_list_empty (&kqswnal_data.kqn_idletxds)) {
+                cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock,
+                                            flags);
 
                 return NULL;
         }
 
-        ktx = list_entry (kqswnal_data.kqn_idletxds.next, kqswnal_tx_t, ktx_list);
-        list_del (&ktx->ktx_list);
+        ktx = cfs_list_entry (kqswnal_data.kqn_idletxds.next, kqswnal_tx_t,
+                              ktx_list);
+        cfs_list_del (&ktx->ktx_list);
 
-        list_add (&ktx->ktx_list, &kqswnal_data.kqn_activetxds);
+        cfs_list_add (&ktx->ktx_list, &kqswnal_data.kqn_activetxds);
         ktx->ktx_launcher = current->pid;
-        atomic_inc(&kqswnal_data.kqn_pending_txs);
+        cfs_atomic_inc(&kqswnal_data.kqn_pending_txs);
 
-        spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
+        cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
 
         /* Idle descs can't have any mapped (as opposed to pre-mapped) pages */
         LASSERT (ktx->ktx_nmappedpages == 0);
@@ -402,9 +404,9 @@ kqswnal_tx_done_in_thread_context (kqswnal_tx_t *ktx)
         int            status0  = 0;
         int            status1  = 0;
         kqswnal_rx_t  *krx;
-        
-        LASSERT (!in_interrupt());
-        
+
+        LASSERT (!cfs_in_interrupt());
+
         if (ktx->ktx_status == -EHOSTDOWN)
                 kqswnal_notify_peer_down(ktx);
 
@@ -507,19 +509,19 @@ kqswnal_tx_done (kqswnal_tx_t *ktx, int status)
 
         ktx->ktx_status = status;
 
-        if (!in_interrupt()) {
+        if (!cfs_in_interrupt()) {
                 kqswnal_tx_done_in_thread_context(ktx);
                 return;
         }
 
         /* Complete the send in thread context */
-        spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
-        
-        list_add_tail(&ktx->ktx_schedlist, 
-                      &kqswnal_data.kqn_donetxds);
-        wake_up(&kqswnal_data.kqn_sched_waitq);
-        
-        spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
+        cfs_spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
+
+        cfs_list_add_tail(&ktx->ktx_schedlist,
+                          &kqswnal_data.kqn_donetxds);
+        cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
+
+        cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
 }
 
 static void
@@ -602,7 +604,7 @@ int
 kqswnal_launch (kqswnal_tx_t *ktx)
 {
         /* Don't block for transmit descriptor if we're in interrupt context */
-        int   attr = in_interrupt() ? (EP_NO_SLEEP | EP_NO_ALLOC) : 0;
+        int   attr = cfs_in_interrupt() ? (EP_NO_SLEEP | EP_NO_ALLOC) : 0;
         int   dest = kqswnal_nid2elanid (ktx->ktx_nid);
         unsigned long flags;
         int   rc;
@@ -652,7 +654,7 @@ kqswnal_launch (kqswnal_tx_t *ktx)
                                          kqswnal_txhandler, ktx,
                                          NULL, ktx->ktx_frags, ktx->ktx_nfrag);
                 break;
-                
+
         default:
                 LBUG();
                 rc = -EINVAL;                   /* no compiler warning please */
@@ -664,16 +666,19 @@ kqswnal_launch (kqswnal_tx_t *ktx)
                 return (0);
 
         case EP_ENOMEM: /* can't allocate ep txd => queue for later */
-                spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
+                cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
 
-                list_add_tail (&ktx->ktx_schedlist, &kqswnal_data.kqn_delayedtxds);
-                wake_up (&kqswnal_data.kqn_sched_waitq);
+                cfs_list_add_tail (&ktx->ktx_schedlist,
+                                   &kqswnal_data.kqn_delayedtxds);
+                cfs_waitq_signal (&kqswnal_data.kqn_sched_waitq);
 
-                spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
+                cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock,
+                                            flags);
                 return (0);
 
         default: /* fatal error */
-                CDEBUG (D_NETERROR, "Tx to %s failed: %d\n", libcfs_nid2str(ktx->ktx_nid), rc);
+                CDEBUG (D_NETERROR, "Tx to %s failed: %d\n",
+                        libcfs_nid2str(ktx->ktx_nid), rc);
                 kqswnal_notify_peer_down(ktx);
                 return (-EHOSTUNREACH);
         }
@@ -895,9 +900,9 @@ kqswnal_rdma (kqswnal_rx_t *krx, lnet_msg_t *lntmsg,
         ktx->ktx_args[0] = krx;
         ktx->ktx_args[1] = lntmsg;
 
-        LASSERT (atomic_read(&krx->krx_refcount) > 0);
+        LASSERT (cfs_atomic_read(&krx->krx_refcount) > 0);
         /* Take an extra ref for the completion callback */
-        atomic_inc(&krx->krx_refcount);
+        cfs_atomic_inc(&krx->krx_refcount);
 
         /* Map on the rail the RPC prefers */
         ktx->ktx_rail = ep_rcvr_prefrail(krx->krx_eprx,
@@ -974,7 +979,7 @@ kqswnal_rdma (kqswnal_rx_t *krx, lnet_msg_t *lntmsg,
                 kqswnal_put_idle_tx (ktx);
         }
 
-        atomic_dec(&kqswnal_data.kqn_pending_txs);
+        cfs_atomic_dec(&kqswnal_data.kqn_pending_txs);
         return (rc);
 }
 
@@ -1005,7 +1010,7 @@ kqswnal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
         LASSERT (payload_niov <= LNET_MAX_IOV);
 
         /* It must be OK to kmap() if required */
-        LASSERT (payload_kiov == NULL || !in_interrupt ());
+        LASSERT (payload_kiov == NULL || !cfs_in_interrupt ());
         /* payload is either all vaddrs or all pages */
         LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
 
@@ -1250,14 +1255,14 @@ kqswnal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
                 
         }
         
-        atomic_dec(&kqswnal_data.kqn_pending_txs);
+        cfs_atomic_dec(&kqswnal_data.kqn_pending_txs);
         return (rc == 0 ? 0 : -EIO);
 }
 
 void
 kqswnal_requeue_rx (kqswnal_rx_t *krx)
 {
-        LASSERT (atomic_read(&krx->krx_refcount) == 0);
+        LASSERT (cfs_atomic_read(&krx->krx_refcount) == 0);
         LASSERT (!krx->krx_rpc_reply_needed);
 
         krx->krx_state = KRX_POSTED;
@@ -1294,14 +1299,14 @@ kqswnal_rx_done (kqswnal_rx_t *krx)
 {
         int           rc;
 
-        LASSERT (atomic_read(&krx->krx_refcount) == 0);
+        LASSERT (cfs_atomic_read(&krx->krx_refcount) == 0);
 
         if (krx->krx_rpc_reply_needed) {
                 /* We've not completed the peer's RPC yet... */
                 krx->krx_rpc_reply.msg.magic   = LNET_PROTO_QSW_MAGIC;
                 krx->krx_rpc_reply.msg.version = QSWLND_PROTO_VERSION;
 
-                LASSERT (!in_interrupt());
+                LASSERT (!cfs_in_interrupt());
 
                 rc = ep_complete_rpc(krx->krx_rxd, 
                                      kqswnal_rpc_complete, krx,
@@ -1329,7 +1334,7 @@ kqswnal_parse (kqswnal_rx_t *krx)
         int             nob;
         int             rc;
 
-        LASSERT (atomic_read(&krx->krx_refcount) == 1);
+        LASSERT (cfs_atomic_read(&krx->krx_refcount) == 1);
 
         if (krx->krx_nob < offsetof(kqswnal_msg_t, kqm_u)) {
                 CERROR("Short message %d received from %s\n",
@@ -1517,7 +1522,7 @@ kqswnal_rxhandler(EP_RXD *rxd)
 
         /* Default to failure if an RPC reply is requested but not handled */
         krx->krx_rpc_reply.msg.status = -EPROTO;
-        atomic_set (&krx->krx_refcount, 1);
+        cfs_atomic_set (&krx->krx_refcount, 1);
 
         if (status != EP_SUCCESS) {
                 /* receives complete with failure when receiver is removed */
@@ -1530,17 +1535,17 @@ kqswnal_rxhandler(EP_RXD *rxd)
                 return;
         }
 
-        if (!in_interrupt()) {
+        if (!cfs_in_interrupt()) {
                 kqswnal_parse(krx);
                 return;
         }
 
-        spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
+        cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
 
-        list_add_tail (&krx->krx_list, &kqswnal_data.kqn_readyrxds);
-        wake_up (&kqswnal_data.kqn_sched_waitq);
+        cfs_list_add_tail (&krx->krx_list, &kqswnal_data.kqn_readyrxds);
+        cfs_waitq_signal (&kqswnal_data.kqn_sched_waitq);
 
-        spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
+        cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
 }
 
 int
@@ -1563,7 +1568,7 @@ kqswnal_recv (lnet_ni_t     *ni,
         int                 msg_offset;
         int                 rc;
 
-        LASSERT (!in_interrupt ());             /* OK to map */
+        LASSERT (!cfs_in_interrupt ());             /* OK to map */
         /* Either all pages or all vaddrs */
         LASSERT (!(kiov != NULL && iov != NULL));
 
@@ -1653,19 +1658,19 @@ kqswnal_recv (lnet_ni_t     *ni,
 int
 kqswnal_thread_start (int (*fn)(void *arg), void *arg)
 {
-        long    pid = kernel_thread (fn, arg, 0);
+        long    pid = cfs_kernel_thread (fn, arg, 0);
 
         if (pid < 0)
                 return ((int)pid);
 
-        atomic_inc (&kqswnal_data.kqn_nthreads);
+        cfs_atomic_inc (&kqswnal_data.kqn_nthreads);
         return (0);
 }
 
 void
 kqswnal_thread_fini (void)
 {
-        atomic_dec (&kqswnal_data.kqn_nthreads);
+        cfs_atomic_dec (&kqswnal_data.kqn_nthreads);
 }
 
 int
@@ -1680,49 +1685,51 @@ kqswnal_scheduler (void *arg)
 
         cfs_daemonize ("kqswnal_sched");
         cfs_block_allsigs ();
-        
-        spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
+
+        cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
 
         for (;;)
         {
                 did_something = 0;
 
-                if (!list_empty (&kqswnal_data.kqn_readyrxds))
+                if (!cfs_list_empty (&kqswnal_data.kqn_readyrxds))
                 {
-                        krx = list_entry(kqswnal_data.kqn_readyrxds.next,
-                                         kqswnal_rx_t, krx_list);
-                        list_del (&krx->krx_list);
-                        spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
-                                               flags);
+                        krx = cfs_list_entry(kqswnal_data.kqn_readyrxds.next,
+                                             kqswnal_rx_t, krx_list);
+                        cfs_list_del (&krx->krx_list);
+                        cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
+                                                   flags);
 
                         LASSERT (krx->krx_state == KRX_PARSE);
                         kqswnal_parse (krx);
 
                         did_something = 1;
-                        spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
+                        cfs_spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
+                                              flags);
                 }
 
-                if (!list_empty (&kqswnal_data.kqn_donetxds))
+                if (!cfs_list_empty (&kqswnal_data.kqn_donetxds))
                 {
-                        ktx = list_entry(kqswnal_data.kqn_donetxds.next,
-                                         kqswnal_tx_t, ktx_schedlist);
-                        list_del_init (&ktx->ktx_schedlist);
-                        spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
-                                               flags);
+                        ktx = cfs_list_entry(kqswnal_data.kqn_donetxds.next,
+                                             kqswnal_tx_t, ktx_schedlist);
+                        cfs_list_del_init (&ktx->ktx_schedlist);
+                        cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
+                                                   flags);
 
                         kqswnal_tx_done_in_thread_context(ktx);
 
                         did_something = 1;
-                        spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
+                        cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock,
+                                               flags);
                 }
 
-                if (!list_empty (&kqswnal_data.kqn_delayedtxds))
+                if (!cfs_list_empty (&kqswnal_data.kqn_delayedtxds))
                 {
-                        ktx = list_entry(kqswnal_data.kqn_delayedtxds.next,
-                                         kqswnal_tx_t, ktx_schedlist);
-                        list_del_init (&ktx->ktx_schedlist);
-                        spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
-                                               flags);
+                        ktx = cfs_list_entry(kqswnal_data.kqn_delayedtxds.next,
+                                             kqswnal_tx_t, ktx_schedlist);
+                        cfs_list_del_init (&ktx->ktx_schedlist);
+                        cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
+                                                   flags);
 
                         rc = kqswnal_launch (ktx);
                         if (rc != 0) {
@@ -1730,36 +1737,41 @@ kqswnal_scheduler (void *arg)
                                        libcfs_nid2str(ktx->ktx_nid), rc);
                                 kqswnal_tx_done (ktx, rc);
                         }
-                        atomic_dec (&kqswnal_data.kqn_pending_txs);
+                        cfs_atomic_dec (&kqswnal_data.kqn_pending_txs);
 
                         did_something = 1;
-                        spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
+                        cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock,
+                                               flags);
                 }
 
                 /* nothing to do or hogging CPU */
                 if (!did_something || counter++ == KQSW_RESCHED) {
-                        spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
-                                               flags);
+                        cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
+                                                   flags);
 
                         counter = 0;
 
                         if (!did_something) {
                                 if (kqswnal_data.kqn_shuttingdown == 2) {
-                                        /* We only exit in stage 2 of shutdown when 
-                                         * there's nothing left to do */
+                                        /* We only exit in stage 2 of shutdown
+                                         * when there's nothing left to do */
                                         break;
                                 }
-                                rc = wait_event_interruptible_exclusive (
+                                cfs_wait_event_interruptible_exclusive (
                                         kqswnal_data.kqn_sched_waitq,
                                         kqswnal_data.kqn_shuttingdown == 2 ||
-                                        !list_empty(&kqswnal_data.kqn_readyrxds) ||
-                                        !list_empty(&kqswnal_data.kqn_donetxds) ||
-                                        !list_empty(&kqswnal_data.kqn_delayedtxds));
+                                        !cfs_list_empty(&kqswnal_data. \
+                                                        kqn_readyrxds) ||
+                                        !cfs_list_empty(&kqswnal_data. \
+                                                        kqn_donetxds) ||
+                                        !cfs_list_empty(&kqswnal_data. \
+                                                        kqn_delayedtxds, rc));
                                 LASSERT (rc == 0);
                         } else if (need_resched())
-                                schedule ();
+                                cfs_schedule ();
 
-                        spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
+                        cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock,
+                                               flags);
                 }
         }
 
index 0344f13..d235aba 100644 (file)
@@ -181,15 +181,15 @@ int
 kranal_close_stale_conns_locked (kra_peer_t *peer, kra_conn_t *newconn)
 {
         kra_conn_t         *conn;
-        struct list_head   *ctmp;
-        struct list_head   *cnxt;
+        cfs_list_t         *ctmp;
+        cfs_list_t         *cnxt;
         int                 loopback;
         int                 count = 0;
 
         loopback = peer->rap_nid == kranal_data.kra_ni->ni_nid;
 
-        list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
-                conn = list_entry(ctmp, kra_conn_t, rac_list);
+        cfs_list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
+                conn = cfs_list_entry(ctmp, kra_conn_t, rac_list);
 
                 if (conn == newconn)
                         continue;
@@ -231,13 +231,13 @@ int
 kranal_conn_isdup_locked(kra_peer_t *peer, kra_conn_t *newconn)
 {
         kra_conn_t       *conn;
-        struct list_head *tmp;
+        cfs_list_t       *tmp;
         int               loopback;
 
         loopback = peer->rap_nid == kranal_data.kra_ni->ni_nid;
 
-        list_for_each(tmp, &peer->rap_conns) {
-                conn = list_entry(tmp, kra_conn_t, rac_list);
+        cfs_list_for_each(tmp, &peer->rap_conns) {
+                conn = cfs_list_entry(tmp, kra_conn_t, rac_list);
 
                 /* 'newconn' is from an earlier version of 'peer'!!! */
                 if (newconn->rac_peerstamp < conn->rac_peerstamp)
@@ -280,7 +280,7 @@ kranal_set_conn_uniqueness (kra_conn_t *conn)
 {
         unsigned long  flags;
 
-        write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+        cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
         conn->rac_my_connstamp = kranal_data.kra_connstamp++;
 
@@ -288,7 +288,7 @@ kranal_set_conn_uniqueness (kra_conn_t *conn)
                 conn->rac_cqid = kranal_data.kra_next_cqid++;
         } while (kranal_cqid2conn_locked(conn->rac_cqid) != NULL);
 
-        write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+        cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
 }
 
 int
@@ -297,21 +297,21 @@ kranal_create_conn(kra_conn_t **connp, kra_device_t *dev)
         kra_conn_t    *conn;
         RAP_RETURN     rrc;
 
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
         LIBCFS_ALLOC(conn, sizeof(*conn));
 
         if (conn == NULL)
                 return -ENOMEM;
 
         memset(conn, 0, sizeof(*conn));
-        atomic_set(&conn->rac_refcount, 1);
-        INIT_LIST_HEAD(&conn->rac_list);
-        INIT_LIST_HEAD(&conn->rac_hashlist);
-        INIT_LIST_HEAD(&conn->rac_schedlist);
-        INIT_LIST_HEAD(&conn->rac_fmaq);
-        INIT_LIST_HEAD(&conn->rac_rdmaq);
-        INIT_LIST_HEAD(&conn->rac_replyq);
-        spin_lock_init(&conn->rac_lock);
+        cfs_atomic_set(&conn->rac_refcount, 1);
+        CFS_INIT_LIST_HEAD(&conn->rac_list);
+        CFS_INIT_LIST_HEAD(&conn->rac_hashlist);
+        CFS_INIT_LIST_HEAD(&conn->rac_schedlist);
+        CFS_INIT_LIST_HEAD(&conn->rac_fmaq);
+        CFS_INIT_LIST_HEAD(&conn->rac_rdmaq);
+        CFS_INIT_LIST_HEAD(&conn->rac_replyq);
+        cfs_spin_lock_init(&conn->rac_lock);
 
         kranal_set_conn_uniqueness(conn);
 
@@ -327,7 +327,7 @@ kranal_create_conn(kra_conn_t **connp, kra_device_t *dev)
                 return -ENETDOWN;
         }
 
-        atomic_inc(&kranal_data.kra_nconns);
+        cfs_atomic_inc(&kranal_data.kra_nconns);
         *connp = conn;
         return 0;
 }
@@ -337,15 +337,15 @@ kranal_destroy_conn(kra_conn_t *conn)
 {
         RAP_RETURN         rrc;
 
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
         LASSERT (!conn->rac_scheduled);
-        LASSERT (list_empty(&conn->rac_list));
-        LASSERT (list_empty(&conn->rac_hashlist));
-        LASSERT (list_empty(&conn->rac_schedlist));
-        LASSERT (atomic_read(&conn->rac_refcount) == 0);
-        LASSERT (list_empty(&conn->rac_fmaq));
-        LASSERT (list_empty(&conn->rac_rdmaq));
-        LASSERT (list_empty(&conn->rac_replyq));
+        LASSERT (cfs_list_empty(&conn->rac_list));
+        LASSERT (cfs_list_empty(&conn->rac_hashlist));
+        LASSERT (cfs_list_empty(&conn->rac_schedlist));
+        LASSERT (cfs_atomic_read(&conn->rac_refcount) == 0);
+        LASSERT (cfs_list_empty(&conn->rac_fmaq));
+        LASSERT (cfs_list_empty(&conn->rac_rdmaq));
+        LASSERT (cfs_list_empty(&conn->rac_replyq));
 
         rrc = RapkDestroyRi(conn->rac_device->rad_handle,
                             conn->rac_rihandle);
@@ -355,19 +355,19 @@ kranal_destroy_conn(kra_conn_t *conn)
                 kranal_peer_decref(conn->rac_peer);
 
         LIBCFS_FREE(conn, sizeof(*conn));
-        atomic_dec(&kranal_data.kra_nconns);
+        cfs_atomic_dec(&kranal_data.kra_nconns);
 }
 
 void
 kranal_terminate_conn_locked (kra_conn_t *conn)
 {
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
         LASSERT (conn->rac_state == RANAL_CONN_CLOSING);
-        LASSERT (!list_empty(&conn->rac_hashlist));
-        LASSERT (list_empty(&conn->rac_list));
+        LASSERT (!cfs_list_empty(&conn->rac_hashlist));
+        LASSERT (cfs_list_empty(&conn->rac_list));
 
         /* Remove from conn hash table: no new callbacks */
-        list_del_init(&conn->rac_hashlist);
+        cfs_list_del_init(&conn->rac_hashlist);
         kranal_conn_decref(conn);
 
         conn->rac_state = RANAL_CONN_CLOSED;
@@ -386,14 +386,14 @@ kranal_close_conn_locked (kra_conn_t *conn, int error)
                "closing conn to %s: error %d\n", 
                libcfs_nid2str(peer->rap_nid), error);
 
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
         LASSERT (conn->rac_state == RANAL_CONN_ESTABLISHED);
-        LASSERT (!list_empty(&conn->rac_hashlist));
-        LASSERT (!list_empty(&conn->rac_list));
+        LASSERT (!cfs_list_empty(&conn->rac_hashlist));
+        LASSERT (!cfs_list_empty(&conn->rac_list));
 
-        list_del_init(&conn->rac_list);
+        cfs_list_del_init(&conn->rac_list);
 
-        if (list_empty(&peer->rap_conns) &&
+        if (cfs_list_empty(&peer->rap_conns) &&
             peer->rap_persistence == 0) {
                 /* Non-persistent peer with no more conns... */
                 kranal_unlink_peer_locked(peer);
@@ -404,7 +404,7 @@ kranal_close_conn_locked (kra_conn_t *conn, int error)
          * RDMA.  Otherwise if we wait for the full timeout we can also be sure
          * all RDMA has stopped. */
         conn->rac_last_rx = jiffies;
-        mb();
+        cfs_mb();
 
         conn->rac_state = RANAL_CONN_CLOSING;
         kranal_schedule_conn(conn);             /* schedule sending CLOSE */
@@ -418,12 +418,12 @@ kranal_close_conn (kra_conn_t *conn, int error)
         unsigned long    flags;
 
 
-        write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+        cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
         if (conn->rac_state == RANAL_CONN_ESTABLISHED)
                 kranal_close_conn_locked(conn, error);
 
-        write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+        cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
 }
 
 int
@@ -448,10 +448,10 @@ kranal_set_conn_params(kra_conn_t *conn, kra_connreq_t *connreq,
 
         /* Schedule conn on rad_new_conns */
         kranal_conn_addref(conn);
-        spin_lock_irqsave(&dev->rad_lock, flags);
-        list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns);
-        wake_up(&dev->rad_waitq);
-        spin_unlock_irqrestore(&dev->rad_lock, flags);
+        cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+        cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns);
+        cfs_waitq_signal(&dev->rad_waitq);
+        cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
 
         rrc = RapkWaitToConnect(conn->rac_rihandle);
         if (rrc != RAP_SUCCESS) {
@@ -653,8 +653,8 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer)
 {
         kra_peer_t        *peer2;
         kra_tx_t          *tx;
-        lnet_nid_t          peer_nid;
-        lnet_nid_t          dst_nid;
+        lnet_nid_t         peer_nid;
+        lnet_nid_t         dst_nid;
         unsigned long      flags;
         kra_conn_t        *conn;
         int                rc;
@@ -670,12 +670,13 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer)
                 if (rc != 0)
                         return rc;
 
-                write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+                cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
                 if (!kranal_peer_active(peer)) {
                         /* raced with peer getting unlinked */
-                        write_unlock_irqrestore(&kranal_data.kra_global_lock,
-                                                flags);
+                        cfs_write_unlock_irqrestore(&kranal_data. \
+                                                    kra_global_lock,
+                                                    flags);
                         kranal_conn_decref(conn);
                         return -ESTALE;
                 }
@@ -699,7 +700,7 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer)
                         return -ENOMEM;
                 }
 
-                write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+                cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
                 peer2 = kranal_find_peer_locked(peer_nid);
                 if (peer2 == NULL) {
@@ -717,7 +718,8 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer)
          * this while holding the global lock, to synch with connection
          * destruction on NID change. */
         if (kranal_data.kra_ni->ni_nid != dst_nid) {
-                write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+                cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+                                            flags);
 
                 CERROR("Stale/bad connection with %s: dst_nid %s, expected %s\n",
                        libcfs_nid2str(peer_nid), libcfs_nid2str(dst_nid), 
@@ -731,9 +733,10 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer)
          * _don't_ have any blocked txs to complete with failure. */
         rc = kranal_conn_isdup_locked(peer, conn);
         if (rc != 0) {
-                LASSERT (!list_empty(&peer->rap_conns));
-                LASSERT (list_empty(&peer->rap_tx_queue));
-                write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+                LASSERT (!cfs_list_empty(&peer->rap_conns));
+                LASSERT (cfs_list_empty(&peer->rap_tx_queue));
+                cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+                                            flags);
                 CWARN("Not creating duplicate connection to %s: %d\n",
                       libcfs_nid2str(peer_nid), rc);
                 rc = 0;
@@ -742,8 +745,8 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer)
 
         if (new_peer) {
                 /* peer table takes my ref on the new peer */
-                list_add_tail(&peer->rap_list,
-                              kranal_nid2peerlist(peer_nid));
+                cfs_list_add_tail(&peer->rap_list,
+                                  kranal_nid2peerlist(peer_nid));
         }
 
         /* initialise timestamps before reaper looks at them */
@@ -751,24 +754,24 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer)
 
         kranal_peer_addref(peer);               /* +1 ref for conn */
         conn->rac_peer = peer;
-        list_add_tail(&conn->rac_list, &peer->rap_conns);
+        cfs_list_add_tail(&conn->rac_list, &peer->rap_conns);
 
         kranal_conn_addref(conn);               /* +1 ref for conn table */
-        list_add_tail(&conn->rac_hashlist,
-                      kranal_cqid2connlist(conn->rac_cqid));
+        cfs_list_add_tail(&conn->rac_hashlist,
+                          kranal_cqid2connlist(conn->rac_cqid));
 
         /* Schedule all packets blocking for a connection */
-        while (!list_empty(&peer->rap_tx_queue)) {
-                tx = list_entry(peer->rap_tx_queue.next,
-                                kra_tx_t, tx_list);
+        while (!cfs_list_empty(&peer->rap_tx_queue)) {
+                tx = cfs_list_entry(peer->rap_tx_queue.next,
+                                    kra_tx_t, tx_list);
 
-                list_del(&tx->tx_list);
+                cfs_list_del(&tx->tx_list);
                 kranal_post_fma(conn, tx);
         }
 
         nstale = kranal_close_stale_conns_locked(peer, conn);
 
-        write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+        cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
 
         /* CAVEAT EMPTOR: passive peer can disappear NOW */
 
@@ -797,7 +800,7 @@ kranal_connect (kra_peer_t *peer)
 {
         kra_tx_t          *tx;
         unsigned long      flags;
-        struct list_head   zombies;
+        cfs_list_t         zombies;
         int                rc;
 
         LASSERT (peer->rap_connecting);
@@ -810,7 +813,7 @@ kranal_connect (kra_peer_t *peer)
         CDEBUG(D_NET, "Done handshake %s:%d \n", 
                libcfs_nid2str(peer->rap_nid), rc);
 
-        write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+        cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
         LASSERT (peer->rap_connecting);
         peer->rap_connecting = 0;
@@ -818,11 +821,12 @@ kranal_connect (kra_peer_t *peer)
         if (rc == 0) {
                 /* kranal_conn_handshake() queues blocked txs immediately on
                  * success to avoid messages jumping the queue */
-                LASSERT (list_empty(&peer->rap_tx_queue));
+                LASSERT (cfs_list_empty(&peer->rap_tx_queue));
 
                 peer->rap_reconnect_interval = 0; /* OK to reconnect at any time */
 
-                write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+                cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+                                            flags);
                 return;
         }
 
@@ -834,27 +838,28 @@ kranal_connect (kra_peer_t *peer)
                 MIN(peer->rap_reconnect_interval,
                     *kranal_tunables.kra_max_reconnect_interval);
 
-        peer->rap_reconnect_time = jiffies + peer->rap_reconnect_interval * HZ;
+        peer->rap_reconnect_time = jiffies + peer->rap_reconnect_interval *
+                CFS_HZ;
 
         /* Grab all blocked packets while we have the global lock */
-        list_add(&zombies, &peer->rap_tx_queue);
-        list_del_init(&peer->rap_tx_queue);
+        cfs_list_add(&zombies, &peer->rap_tx_queue);
+        cfs_list_del_init(&peer->rap_tx_queue);
 
-        write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+        cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
 
-        if (list_empty(&zombies))
+        if (cfs_list_empty(&zombies))
                 return;
 
         CDEBUG(D_NETERROR, "Dropping packets for %s: connection failed\n",
                libcfs_nid2str(peer->rap_nid));
 
         do {
-                tx = list_entry(zombies.next, kra_tx_t, tx_list);
+                tx = cfs_list_entry(zombies.next, kra_tx_t, tx_list);
 
-                list_del(&tx->tx_list);
+                cfs_list_del(&tx->tx_list);
                 kranal_tx_done(tx, -EHOSTUNREACH);
 
-        } while (!list_empty(&zombies));
+        } while (!cfs_list_empty(&zombies));
 }
 
 void
@@ -885,12 +890,12 @@ kranal_accept (lnet_ni_t *ni, struct socket *sock)
 
         ras->ras_sock = sock;
 
-        spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+        cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
 
-        list_add_tail(&ras->ras_list, &kranal_data.kra_connd_acceptq);
-        wake_up(&kranal_data.kra_connd_waitq);
+        cfs_list_add_tail(&ras->ras_list, &kranal_data.kra_connd_acceptq);
+        cfs_waitq_signal(&kranal_data.kra_connd_waitq);
 
-        spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+        cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
         return 0;
 }
 
@@ -909,29 +914,30 @@ kranal_create_peer (kra_peer_t **peerp, lnet_nid_t nid)
         memset(peer, 0, sizeof(*peer));         /* zero flags etc */
 
         peer->rap_nid = nid;
-        atomic_set(&peer->rap_refcount, 1);     /* 1 ref for caller */
+        cfs_atomic_set(&peer->rap_refcount, 1);     /* 1 ref for caller */
 
-        INIT_LIST_HEAD(&peer->rap_list);
-        INIT_LIST_HEAD(&peer->rap_connd_list);
-        INIT_LIST_HEAD(&peer->rap_conns);
-        INIT_LIST_HEAD(&peer->rap_tx_queue);
+        CFS_INIT_LIST_HEAD(&peer->rap_list);
+        CFS_INIT_LIST_HEAD(&peer->rap_connd_list);
+        CFS_INIT_LIST_HEAD(&peer->rap_conns);
+        CFS_INIT_LIST_HEAD(&peer->rap_tx_queue);
 
         peer->rap_reconnect_interval = 0;       /* OK to connect at any time */
 
-        write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+        cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
         if (kranal_data.kra_nonewpeers) {
                 /* shutdown has started already */
-                write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
-                
+                cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+                                            flags);
+
                 LIBCFS_FREE(peer, sizeof(*peer));
                 CERROR("Can't create peer: network shutdown\n");
                 return -ESHUTDOWN;
         }
 
-        atomic_inc(&kranal_data.kra_npeers);
+        cfs_atomic_inc(&kranal_data.kra_npeers);
 
-        write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+        cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
 
         *peerp = peer;
         return 0;
@@ -943,13 +949,13 @@ kranal_destroy_peer (kra_peer_t *peer)
         CDEBUG(D_NET, "peer %s %p deleted\n", 
                libcfs_nid2str(peer->rap_nid), peer);
 
-        LASSERT (atomic_read(&peer->rap_refcount) == 0);
+        LASSERT (cfs_atomic_read(&peer->rap_refcount) == 0);
         LASSERT (peer->rap_persistence == 0);
         LASSERT (!kranal_peer_active(peer));
         LASSERT (!peer->rap_connecting);
-        LASSERT (list_empty(&peer->rap_conns));
-        LASSERT (list_empty(&peer->rap_tx_queue));
-        LASSERT (list_empty(&peer->rap_connd_list));
+        LASSERT (cfs_list_empty(&peer->rap_conns));
+        LASSERT (cfs_list_empty(&peer->rap_tx_queue));
+        LASSERT (cfs_list_empty(&peer->rap_connd_list));
 
         LIBCFS_FREE(peer, sizeof(*peer));
 
@@ -957,29 +963,29 @@ kranal_destroy_peer (kra_peer_t *peer)
          * they are destroyed, so we can be assured that _all_ state to do
          * with this peer has been cleaned up when its refcount drops to
          * zero. */
-        atomic_dec(&kranal_data.kra_npeers);
+        cfs_atomic_dec(&kranal_data.kra_npeers);
 }
 
 kra_peer_t *
 kranal_find_peer_locked (lnet_nid_t nid)
 {
-        struct list_head *peer_list = kranal_nid2peerlist(nid);
-        struct list_head *tmp;
+        cfs_list_t       *peer_list = kranal_nid2peerlist(nid);
+        cfs_list_t       *tmp;
         kra_peer_t       *peer;
 
-        list_for_each (tmp, peer_list) {
+        cfs_list_for_each (tmp, peer_list) {
 
-                peer = list_entry(tmp, kra_peer_t, rap_list);
+                peer = cfs_list_entry(tmp, kra_peer_t, rap_list);
 
                 LASSERT (peer->rap_persistence > 0 ||     /* persistent peer */
-                         !list_empty(&peer->rap_conns));  /* active conn */
+                         !cfs_list_empty(&peer->rap_conns));  /* active conn */
 
                 if (peer->rap_nid != nid)
                         continue;
 
                 CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
                        peer, libcfs_nid2str(nid), 
-                       atomic_read(&peer->rap_refcount));
+                       cfs_atomic_read(&peer->rap_refcount));
                 return peer;
         }
         return NULL;
@@ -990,11 +996,11 @@ kranal_find_peer (lnet_nid_t nid)
 {
         kra_peer_t     *peer;
 
-        read_lock(&kranal_data.kra_global_lock);
+        cfs_read_lock(&kranal_data.kra_global_lock);
         peer = kranal_find_peer_locked(nid);
         if (peer != NULL)                       /* +1 ref for caller? */
                 kranal_peer_addref(peer);
-        read_unlock(&kranal_data.kra_global_lock);
+        cfs_read_unlock(&kranal_data.kra_global_lock);
 
         return peer;
 }
@@ -1003,10 +1009,10 @@ void
 kranal_unlink_peer_locked (kra_peer_t *peer)
 {
         LASSERT (peer->rap_persistence == 0);
-        LASSERT (list_empty(&peer->rap_conns));
+        LASSERT (cfs_list_empty(&peer->rap_conns));
 
         LASSERT (kranal_peer_active(peer));
-        list_del_init(&peer->rap_list);
+        cfs_list_del_init(&peer->rap_list);
 
         /* lose peerlist's ref */
         kranal_peer_decref(peer);
@@ -1017,18 +1023,18 @@ kranal_get_peer_info (int index, lnet_nid_t *nidp, __u32 *ipp, int *portp,
                       int *persistencep)
 {
         kra_peer_t        *peer;
-        struct list_head  *ptmp;
+        cfs_list_t        *ptmp;
         int                i;
 
-        read_lock(&kranal_data.kra_global_lock);
+        cfs_read_lock(&kranal_data.kra_global_lock);
 
         for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
 
-                list_for_each(ptmp, &kranal_data.kra_peers[i]) {
+                cfs_list_for_each(ptmp, &kranal_data.kra_peers[i]) {
 
-                        peer = list_entry(ptmp, kra_peer_t, rap_list);
+                        peer = cfs_list_entry(ptmp, kra_peer_t, rap_list);
                         LASSERT (peer->rap_persistence > 0 ||
-                                 !list_empty(&peer->rap_conns));
+                                 !cfs_list_empty(&peer->rap_conns));
 
                         if (index-- > 0)
                                 continue;
@@ -1038,12 +1044,12 @@ kranal_get_peer_info (int index, lnet_nid_t *nidp, __u32 *ipp, int *portp,
                         *portp = peer->rap_port;
                         *persistencep = peer->rap_persistence;
 
-                        read_unlock(&kranal_data.kra_global_lock);
+                        cfs_read_unlock(&kranal_data.kra_global_lock);
                         return 0;
                 }
         }
 
-        read_unlock(&kranal_data.kra_global_lock);
+        cfs_read_unlock(&kranal_data.kra_global_lock);
         return -ENOENT;
 }
 
@@ -1062,7 +1068,7 @@ kranal_add_persistent_peer (lnet_nid_t nid, __u32 ip, int port)
         if (rc != 0)
                 return rc;
 
-        write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+        cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
         peer2 = kranal_find_peer_locked(nid);
         if (peer2 != NULL) {
@@ -1070,7 +1076,7 @@ kranal_add_persistent_peer (lnet_nid_t nid, __u32 ip, int port)
                 peer = peer2;
         } else {
                 /* peer table takes existing ref on peer */
-                list_add_tail(&peer->rap_list,
+                cfs_list_add_tail(&peer->rap_list,
                               kranal_nid2peerlist(nid));
         }
 
@@ -1078,24 +1084,24 @@ kranal_add_persistent_peer (lnet_nid_t nid, __u32 ip, int port)
         peer->rap_port = port;
         peer->rap_persistence++;
 
-        write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+        cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
         return 0;
 }
 
 void
 kranal_del_peer_locked (kra_peer_t *peer)
 {
-        struct list_head *ctmp;
-        struct list_head *cnxt;
+        cfs_list_t       *ctmp;
+        cfs_list_t       *cnxt;
         kra_conn_t       *conn;
 
         peer->rap_persistence = 0;
 
-        if (list_empty(&peer->rap_conns)) {
+        if (cfs_list_empty(&peer->rap_conns)) {
                 kranal_unlink_peer_locked(peer);
         } else {
-                list_for_each_safe(ctmp, cnxt, &peer->rap_conns) {
-                        conn = list_entry(ctmp, kra_conn_t, rac_list);
+                cfs_list_for_each_safe(ctmp, cnxt, &peer->rap_conns) {
+                        conn = cfs_list_entry(ctmp, kra_conn_t, rac_list);
 
                         kranal_close_conn_locked(conn, 0);
                 }
@@ -1107,15 +1113,15 @@ int
 kranal_del_peer (lnet_nid_t nid)
 {
         unsigned long      flags;
-        struct list_head  *ptmp;
-        struct list_head  *pnxt;
+        cfs_list_t        *ptmp;
+        cfs_list_t        *pnxt;
         kra_peer_t        *peer;
         int                lo;
         int                hi;
         int                i;
         int                rc = -ENOENT;
 
-        write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+        cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
         if (nid != LNET_NID_ANY)
                 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
@@ -1125,10 +1131,10 @@ kranal_del_peer (lnet_nid_t nid)
         }
 
         for (i = lo; i <= hi; i++) {
-                list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
-                        peer = list_entry(ptmp, kra_peer_t, rap_list);
+                cfs_list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
+                        peer = cfs_list_entry(ptmp, kra_peer_t, rap_list);
                         LASSERT (peer->rap_persistence > 0 ||
-                                 !list_empty(&peer->rap_conns));
+                                 !cfs_list_empty(&peer->rap_conns));
 
                         if (!(nid == LNET_NID_ANY || peer->rap_nid == nid))
                                 continue;
@@ -1138,7 +1144,7 @@ kranal_del_peer (lnet_nid_t nid)
                 }
         }
 
-        write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+        cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
 
         return rc;
 }
@@ -1147,36 +1153,37 @@ kra_conn_t *
 kranal_get_conn_by_idx (int index)
 {
         kra_peer_t        *peer;
-        struct list_head  *ptmp;
+        cfs_list_t        *ptmp;
         kra_conn_t        *conn;
-        struct list_head  *ctmp;
+        cfs_list_t        *ctmp;
         int                i;
 
-        read_lock (&kranal_data.kra_global_lock);
+        cfs_read_lock (&kranal_data.kra_global_lock);
 
         for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
-                list_for_each (ptmp, &kranal_data.kra_peers[i]) {
+                cfs_list_for_each (ptmp, &kranal_data.kra_peers[i]) {
 
-                        peer = list_entry(ptmp, kra_peer_t, rap_list);
+                        peer = cfs_list_entry(ptmp, kra_peer_t, rap_list);
                         LASSERT (peer->rap_persistence > 0 ||
-                                 !list_empty(&peer->rap_conns));
+                                 !cfs_list_empty(&peer->rap_conns));
 
-                        list_for_each (ctmp, &peer->rap_conns) {
+                        cfs_list_for_each (ctmp, &peer->rap_conns) {
                                 if (index-- > 0)
                                         continue;
 
-                                conn = list_entry(ctmp, kra_conn_t, rac_list);
-                                CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn, 
+                                conn = cfs_list_entry(ctmp, kra_conn_t,
+                                                      rac_list);
+                                CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
                                        libcfs_nid2str(conn->rac_peer->rap_nid),
-                                       atomic_read(&conn->rac_refcount));
-                                atomic_inc(&conn->rac_refcount);
-                                read_unlock(&kranal_data.kra_global_lock);
+                                       cfs_atomic_read(&conn->rac_refcount));
+                                cfs_atomic_inc(&conn->rac_refcount);
+                                cfs_read_unlock(&kranal_data.kra_global_lock);
                                 return conn;
                         }
                 }
         }
 
-        read_unlock(&kranal_data.kra_global_lock);
+        cfs_read_unlock(&kranal_data.kra_global_lock);
         return NULL;
 }
 
@@ -1184,12 +1191,12 @@ int
 kranal_close_peer_conns_locked (kra_peer_t *peer, int why)
 {
         kra_conn_t         *conn;
-        struct list_head   *ctmp;
-        struct list_head   *cnxt;
+        cfs_list_t         *ctmp;
+        cfs_list_t         *cnxt;
         int                 count = 0;
 
-        list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
-                conn = list_entry(ctmp, kra_conn_t, rac_list);
+        cfs_list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
+                conn = cfs_list_entry(ctmp, kra_conn_t, rac_list);
 
                 count++;
                 kranal_close_conn_locked(conn, why);
@@ -1203,14 +1210,14 @@ kranal_close_matching_conns (lnet_nid_t nid)
 {
         unsigned long       flags;
         kra_peer_t         *peer;
-        struct list_head   *ptmp;
-        struct list_head   *pnxt;
+        cfs_list_t         *ptmp;
+        cfs_list_t         *pnxt;
         int                 lo;
         int                 hi;
         int                 i;
         int                 count = 0;
 
-        write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+        cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
         if (nid != LNET_NID_ANY)
                 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
@@ -1220,11 +1227,11 @@ kranal_close_matching_conns (lnet_nid_t nid)
         }
 
         for (i = lo; i <= hi; i++) {
-                list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
+                cfs_list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
 
-                        peer = list_entry(ptmp, kra_peer_t, rap_list);
+                        peer = cfs_list_entry(ptmp, kra_peer_t, rap_list);
                         LASSERT (peer->rap_persistence > 0 ||
-                                 !list_empty(&peer->rap_conns));
+                                 !cfs_list_empty(&peer->rap_conns));
 
                         if (!(nid == LNET_NID_ANY || nid == peer->rap_nid))
                                 continue;
@@ -1233,7 +1240,7 @@ kranal_close_matching_conns (lnet_nid_t nid)
                 }
         }
 
-        write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+        cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
 
         /* wildcards always succeed */
         if (nid == LNET_NID_ANY)
@@ -1310,27 +1317,27 @@ kranal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
 }
 
 void
-kranal_free_txdescs(struct list_head *freelist)
+kranal_free_txdescs(cfs_list_t *freelist)
 {
         kra_tx_t    *tx;
 
-        while (!list_empty(freelist)) {
-                tx = list_entry(freelist->next, kra_tx_t, tx_list);
+        while (!cfs_list_empty(freelist)) {
+                tx = cfs_list_entry(freelist->next, kra_tx_t, tx_list);
 
-                list_del(&tx->tx_list);
+                cfs_list_del(&tx->tx_list);
                 LIBCFS_FREE(tx->tx_phys, LNET_MAX_IOV * sizeof(*tx->tx_phys));
                 LIBCFS_FREE(tx, sizeof(*tx));
         }
 }
 
 int
-kranal_alloc_txdescs(struct list_head *freelist, int n)
+kranal_alloc_txdescs(cfs_list_t *freelist, int n)
 {
         int            i;
         kra_tx_t      *tx;
 
         LASSERT (freelist == &kranal_data.kra_idle_txs);
-        LASSERT (list_empty(freelist));
+        LASSERT (cfs_list_empty(freelist));
 
         for (i = 0; i < n; i++) {
 
@@ -1354,7 +1361,7 @@ kranal_alloc_txdescs(struct list_head *freelist, int n)
                 tx->tx_buftype = RANAL_BUF_NONE;
                 tx->tx_msg.ram_type = RANAL_MSG_NONE;
 
-                list_add(&tx->tx_list, freelist);
+                cfs_list_add(&tx->tx_list, freelist);
         }
 
         return 0;
@@ -1411,13 +1418,13 @@ kranal_device_init(int id, kra_device_t *dev)
 void
 kranal_device_fini(kra_device_t *dev)
 {
-        LASSERT (list_empty(&dev->rad_ready_conns));
-        LASSERT (list_empty(&dev->rad_new_conns));
+        LASSERT (cfs_list_empty(&dev->rad_ready_conns));
+        LASSERT (cfs_list_empty(&dev->rad_new_conns));
         LASSERT (dev->rad_nphysmap == 0);
         LASSERT (dev->rad_nppphysmap == 0);
         LASSERT (dev->rad_nvirtmap == 0);
         LASSERT (dev->rad_nobvirtmap == 0);
-                
+
         LASSERT(dev->rad_scheduler == NULL);
         RapkDestroyCQ(dev->rad_handle, dev->rad_fma_cqh);
         RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cqh);
@@ -1431,7 +1438,7 @@ kranal_shutdown (lnet_ni_t *ni)
         unsigned long flags;
 
         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
-               atomic_read(&libcfs_kmemory));
+               cfs_atomic_read(&libcfs_kmemory));
 
         LASSERT (ni == kranal_data.kra_ni);
         LASSERT (ni->ni_data == &kranal_data);
@@ -1443,35 +1450,37 @@ kranal_shutdown (lnet_ni_t *ni)
 
         case RANAL_INIT_ALL:
                 /* Prevent new peers from being created */
-                write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+                cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
                 kranal_data.kra_nonewpeers = 1;
-                write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
-                
+                cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+                                            flags);
+
                 /* Remove all existing peers from the peer table */
                 kranal_del_peer(LNET_NID_ANY);
 
                 /* Wait for pending conn reqs to be handled */
                 i = 2;
-                spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
-                while (!list_empty(&kranal_data.kra_connd_acceptq)) {
-                        spin_unlock_irqrestore(&kranal_data.kra_connd_lock, 
-                                               flags);
+                cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+                while (!cfs_list_empty(&kranal_data.kra_connd_acceptq)) {
+                        cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
+                                                   flags);
                         i++;
                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */
                                "waiting for conn reqs to clean up\n");
                         cfs_pause(cfs_time_seconds(1));
 
-                        spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+                        cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock,
+                                              flags);
                 }
-                spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+                cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
 
                 /* Wait for all peers to be freed */
                 i = 2;
-                while (atomic_read(&kranal_data.kra_npeers) != 0) {
+                while (cfs_atomic_read(&kranal_data.kra_npeers) != 0) {
                         i++;
                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */
                                "waiting for %d peers to close down\n",
-                               atomic_read(&kranal_data.kra_npeers));
+                               cfs_atomic_read(&kranal_data.kra_npeers));
                         cfs_pause(cfs_time_seconds(1));
                 }
                 /* fall through */
@@ -1485,7 +1494,7 @@ kranal_shutdown (lnet_ni_t *ni)
          * while there are still active connds, but these will be temporary
          * since peer creation always fails after the listener has started to
          * shut down. */
-        LASSERT (atomic_read(&kranal_data.kra_npeers) == 0);
+        LASSERT (cfs_atomic_read(&kranal_data.kra_npeers) == 0);
         
         /* Flag threads to terminate */
         kranal_data.kra_shutdown = 1;
@@ -1493,47 +1502,47 @@ kranal_shutdown (lnet_ni_t *ni)
         for (i = 0; i < kranal_data.kra_ndevs; i++) {
                 kra_device_t *dev = &kranal_data.kra_devices[i];
 
-                spin_lock_irqsave(&dev->rad_lock, flags);
-                wake_up(&dev->rad_waitq);
-                spin_unlock_irqrestore(&dev->rad_lock, flags);
+                cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+                cfs_waitq_signal(&dev->rad_waitq);
+                cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
         }
 
-        spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
-        wake_up_all(&kranal_data.kra_reaper_waitq);
-        spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
+        cfs_spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
+        cfs_waitq_broadcast(&kranal_data.kra_reaper_waitq);
+        cfs_spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
 
-        LASSERT (list_empty(&kranal_data.kra_connd_peers));
-        spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
-        wake_up_all(&kranal_data.kra_connd_waitq);
-        spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+        LASSERT (cfs_list_empty(&kranal_data.kra_connd_peers));
+        cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+        cfs_waitq_broadcast(&kranal_data.kra_connd_waitq);
+        cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
 
         /* Wait for threads to exit */
         i = 2;
-        while (atomic_read(&kranal_data.kra_nthreads) != 0) {
+        while (cfs_atomic_read(&kranal_data.kra_nthreads) != 0) {
                 i++;
                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
                        "Waiting for %d threads to terminate\n",
-                       atomic_read(&kranal_data.kra_nthreads));
+                       cfs_atomic_read(&kranal_data.kra_nthreads));
                 cfs_pause(cfs_time_seconds(1));
         }
 
-        LASSERT (atomic_read(&kranal_data.kra_npeers) == 0);
+        LASSERT (cfs_atomic_read(&kranal_data.kra_npeers) == 0);
         if (kranal_data.kra_peers != NULL) {
                 for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
-                        LASSERT (list_empty(&kranal_data.kra_peers[i]));
+                        LASSERT (cfs_list_empty(&kranal_data.kra_peers[i]));
 
                 LIBCFS_FREE(kranal_data.kra_peers,
-                            sizeof (struct list_head) *
+                            sizeof (cfs_list_t) *
                             kranal_data.kra_peer_hash_size);
         }
 
-        LASSERT (atomic_read(&kranal_data.kra_nconns) == 0);
+        LASSERT (cfs_atomic_read(&kranal_data.kra_nconns) == 0);
         if (kranal_data.kra_conns != NULL) {
                 for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
-                        LASSERT (list_empty(&kranal_data.kra_conns[i]));
+                        LASSERT (cfs_list_empty(&kranal_data.kra_conns[i]));
 
                 LIBCFS_FREE(kranal_data.kra_conns,
-                            sizeof (struct list_head) *
+                            sizeof (cfs_list_t) *
                             kranal_data.kra_conn_hash_size);
         }
 
@@ -1543,7 +1552,7 @@ kranal_shutdown (lnet_ni_t *ni)
         kranal_free_txdescs(&kranal_data.kra_idle_txs);
 
         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
-               atomic_read(&libcfs_kmemory));
+               cfs_atomic_read(&libcfs_kmemory));
 
         kranal_data.kra_init = RANAL_INIT_NOTHING;
         PORTAL_MODULE_UNUSE;
@@ -1553,7 +1562,7 @@ int
 kranal_startup (lnet_ni_t *ni)
 {
         struct timeval    tv;
-        int               pkmem = atomic_read(&libcfs_kmemory);
+        int               pkmem = cfs_atomic_read(&libcfs_kmemory);
         int               rc;
         int               i;
         kra_device_t     *dev;
@@ -1592,33 +1601,33 @@ kranal_startup (lnet_ni_t *ni)
          * initialised with seconds + microseconds at startup time.  So we
          * rely on NOT creating connections more frequently on average than
          * 1MHz to ensure we don't use old connstamps when we reboot. */
-        do_gettimeofday(&tv);
+        cfs_gettimeofday(&tv);
         kranal_data.kra_connstamp =
         kranal_data.kra_peerstamp = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
 
-        rwlock_init(&kranal_data.kra_global_lock);
+        cfs_rwlock_init(&kranal_data.kra_global_lock);
 
         for (i = 0; i < RANAL_MAXDEVS; i++ ) {
                 kra_device_t  *dev = &kranal_data.kra_devices[i];
 
                 dev->rad_idx = i;
-                INIT_LIST_HEAD(&dev->rad_ready_conns);
-                INIT_LIST_HEAD(&dev->rad_new_conns);
-                init_waitqueue_head(&dev->rad_waitq);
-                spin_lock_init(&dev->rad_lock);
+                CFS_INIT_LIST_HEAD(&dev->rad_ready_conns);
+                CFS_INIT_LIST_HEAD(&dev->rad_new_conns);
+                cfs_waitq_init(&dev->rad_waitq);
+                cfs_spin_lock_init(&dev->rad_lock);
         }
 
-        kranal_data.kra_new_min_timeout = MAX_SCHEDULE_TIMEOUT;
-        init_waitqueue_head(&kranal_data.kra_reaper_waitq);
-        spin_lock_init(&kranal_data.kra_reaper_lock);
+        kranal_data.kra_new_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
+        cfs_waitq_init(&kranal_data.kra_reaper_waitq);
+        cfs_spin_lock_init(&kranal_data.kra_reaper_lock);
 
-        INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq);
-        INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
-        init_waitqueue_head(&kranal_data.kra_connd_waitq);
-        spin_lock_init(&kranal_data.kra_connd_lock);
+        CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq);
+        CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
+        cfs_waitq_init(&kranal_data.kra_connd_waitq);
+        cfs_spin_lock_init(&kranal_data.kra_connd_lock);
 
-        INIT_LIST_HEAD(&kranal_data.kra_idle_txs);
-        spin_lock_init(&kranal_data.kra_tx_lock);
+        CFS_INIT_LIST_HEAD(&kranal_data.kra_idle_txs);
+        cfs_spin_lock_init(&kranal_data.kra_tx_lock);
 
         /* OK to call kranal_api_shutdown() to cleanup now */
         kranal_data.kra_init = RANAL_INIT_DATA;
@@ -1626,21 +1635,23 @@ kranal_startup (lnet_ni_t *ni)
 
         kranal_data.kra_peer_hash_size = RANAL_PEER_HASH_SIZE;
         LIBCFS_ALLOC(kranal_data.kra_peers,
-                     sizeof(struct list_head) * kranal_data.kra_peer_hash_size);
+                     sizeof(cfs_list_t) *
+                            kranal_data.kra_peer_hash_size);
         if (kranal_data.kra_peers == NULL)
                 goto failed;
 
         for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
-                INIT_LIST_HEAD(&kranal_data.kra_peers[i]);
+                CFS_INIT_LIST_HEAD(&kranal_data.kra_peers[i]);
 
         kranal_data.kra_conn_hash_size = RANAL_PEER_HASH_SIZE;
         LIBCFS_ALLOC(kranal_data.kra_conns,
-                     sizeof(struct list_head) * kranal_data.kra_conn_hash_size);
+                     sizeof(cfs_list_t) *
+                            kranal_data.kra_conn_hash_size);
         if (kranal_data.kra_conns == NULL)
                 goto failed;
 
         for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
-                INIT_LIST_HEAD(&kranal_data.kra_conns[i]);
+                CFS_INIT_LIST_HEAD(&kranal_data.kra_conns[i]);
 
         rc = kranal_alloc_txdescs(&kranal_data.kra_idle_txs, 
                                   *kranal_tunables.kra_ntx);
index 50bbc16..b4902b1 100644 (file)
@@ -112,68 +112,68 @@ typedef struct
 
 typedef struct
 {
-        RAP_PVOID               rad_handle;     /* device handle */
-        RAP_PVOID               rad_fma_cqh;    /* FMA completion queue handle */
-        RAP_PVOID               rad_rdma_cqh;   /* rdma completion queue handle */
-        int                     rad_id;         /* device id */
-        int                     rad_idx;        /* index in kra_devices */
-        int                     rad_ready;      /* set by device callback */
-        struct list_head        rad_ready_conns;/* connections ready to tx/rx */
-        struct list_head        rad_new_conns;  /* new connections to complete */
-        wait_queue_head_t       rad_waitq;      /* scheduler waits here */
-        spinlock_t              rad_lock;       /* serialise */
-        void                   *rad_scheduler;  /* scheduling thread */
-        unsigned int            rad_nphysmap;   /* # phys mappings */
-        unsigned int            rad_nppphysmap; /* # phys pages mapped */
-        unsigned int            rad_nvirtmap;   /* # virt mappings */
-        unsigned long           rad_nobvirtmap; /* # virt bytes mapped */
+        RAP_PVOID              rad_handle;    /* device handle */
+        RAP_PVOID              rad_fma_cqh;   /* FMA completion queue handle */
+        RAP_PVOID              rad_rdma_cqh;  /* rdma completion queue handle */
+        int                    rad_id;        /* device id */
+        int                    rad_idx;       /* index in kra_devices */
+        int                    rad_ready;     /* set by device callback */
+        cfs_list_t             rad_ready_conns;/* connections ready to tx/rx */
+        cfs_list_t             rad_new_conns; /* new connections to complete */
+        cfs_waitq_t            rad_waitq;     /* scheduler waits here */
+        cfs_spinlock_t         rad_lock;      /* serialise */
+        void                  *rad_scheduler; /* scheduling thread */
+        unsigned int           rad_nphysmap;  /* # phys mappings */
+        unsigned int           rad_nppphysmap;/* # phys pages mapped */
+        unsigned int           rad_nvirtmap;  /* # virt mappings */
+        unsigned long          rad_nobvirtmap;/* # virt bytes mapped */
 } kra_device_t;
 
 typedef struct
 {
-        int               kra_init;             /* initialisation state */
-        int               kra_shutdown;         /* shut down? */
-        atomic_t          kra_nthreads;         /* # live threads */
-        lnet_ni_t        *kra_ni;               /* _the_ nal instance */
-        
-        kra_device_t      kra_devices[RANAL_MAXDEVS]; /* device/ptag/cq etc */
-        int               kra_ndevs;            /* # devices */
-
-        rwlock_t          kra_global_lock;      /* stabilize peer/conn ops */
-
-        struct list_head *kra_peers;            /* hash table of all my known peers */
-        int               kra_peer_hash_size;   /* size of kra_peers */
-        atomic_t          kra_npeers;           /* # peers extant */
-        int               kra_nonewpeers;       /* prevent new peers */
-
-        struct list_head *kra_conns;            /* conns hashed by cqid */
-        int               kra_conn_hash_size;   /* size of kra_conns */
-        __u64             kra_peerstamp;        /* when I started up */
-        __u64             kra_connstamp;        /* conn stamp generator */
-        int               kra_next_cqid;        /* cqid generator */
-        atomic_t          kra_nconns;           /* # connections extant */
-
-        long              kra_new_min_timeout;  /* minimum timeout on any new conn */
-        wait_queue_head_t kra_reaper_waitq;     /* reaper sleeps here */
-        spinlock_t        kra_reaper_lock;      /* serialise */
-
-        struct list_head  kra_connd_peers;      /* peers waiting for a connection */
-        struct list_head  kra_connd_acceptq;    /* accepted sockets to handshake */
-        wait_queue_head_t kra_connd_waitq;      /* connection daemons sleep here */
-        spinlock_t        kra_connd_lock;       /* serialise */
-
-        struct list_head  kra_idle_txs;         /* idle tx descriptors */
-        __u64             kra_next_tx_cookie;   /* RDMA completion cookie */
-        spinlock_t        kra_tx_lock;          /* serialise */
+        int               kra_init;            /* initialisation state */
+        int               kra_shutdown;        /* shut down? */
+        cfs_atomic_t      kra_nthreads;        /* # live threads */
+        lnet_ni_t        *kra_ni;              /* _the_ nal instance */
+
+        kra_device_t      kra_devices[RANAL_MAXDEVS]; /* device/ptag/cq */
+        int               kra_ndevs;           /* # devices */
+
+        cfs_rwlock_t      kra_global_lock;     /* stabilize peer/conn ops */
+
+        cfs_list_t       *kra_peers;           /* hash table of all my known peers */
+        int               kra_peer_hash_size;  /* size of kra_peers */
+        cfs_atomic_t      kra_npeers;          /* # peers extant */
+        int               kra_nonewpeers;      /* prevent new peers */
+
+        cfs_list_t       *kra_conns;           /* conns hashed by cqid */
+        int               kra_conn_hash_size;  /* size of kra_conns */
+        __u64             kra_peerstamp;       /* when I started up */
+        __u64             kra_connstamp;       /* conn stamp generator */
+        int               kra_next_cqid;       /* cqid generator */
+        cfs_atomic_t      kra_nconns;          /* # connections extant */
+
+        long              kra_new_min_timeout; /* minimum timeout on any new conn */
+        cfs_waitq_t       kra_reaper_waitq;    /* reaper sleeps here */
+        cfs_spinlock_t    kra_reaper_lock;     /* serialise */
+
+        cfs_list_t        kra_connd_peers;     /* peers waiting for a connection */
+        cfs_list_t        kra_connd_acceptq;   /* accepted sockets to handshake */
+        cfs_waitq_t       kra_connd_waitq;     /* connection daemons sleep here */
+        cfs_spinlock_t    kra_connd_lock;      /* serialise */
+
+        cfs_list_t        kra_idle_txs;        /* idle tx descriptors */
+        __u64             kra_next_tx_cookie;  /* RDMA completion cookie */
+        cfs_spinlock_t    kra_tx_lock;         /* serialise */
 } kra_data_t;
 
 #define RANAL_INIT_NOTHING         0
 #define RANAL_INIT_DATA            1
 #define RANAL_INIT_ALL             2
 
-typedef struct kra_acceptsock                   /* accepted socket queued for connd */
+typedef struct kra_acceptsock             /* accepted socket queued for connd */
 {
-        struct list_head     ras_list;          /* queue for attention */
+        cfs_list_t           ras_list;          /* queue for attention */
         struct socket       *ras_sock;          /* the accepted socket */
 } kra_acceptsock_t;
 
@@ -271,20 +271,20 @@ typedef struct                                  /* NB must fit in FMA "Prefix" *
 
 typedef struct kra_tx                           /* message descriptor */
 {
-        struct list_head          tx_list;      /* queue on idle_txs/rac_sendq/rac_waitq */
-        struct kra_conn          *tx_conn;      /* owning conn */
-        lnet_msg_t               *tx_lntmsg[2]; /* ptl msgs to finalize on completion */
-        unsigned long             tx_qtime;     /* when tx started to wait for something (jiffies) */
-        int                       tx_nob;       /* # bytes of payload */
-        int                       tx_buftype;   /* payload buffer type */
-        void                     *tx_buffer;    /* source/sink buffer */
-        int                       tx_phys_offset; /* first page offset (if phys) */
-        int                       tx_phys_npages; /* # physical pages */
-        RAP_PHYS_REGION          *tx_phys;      /* page descriptors */
-        RAP_MEM_KEY               tx_map_key;   /* mapping key */
-        RAP_RDMA_DESCRIPTOR       tx_rdma_desc; /* rdma descriptor */
-        __u64                     tx_cookie;    /* identify this tx to peer */
-        kra_msg_t                 tx_msg;       /* FMA message buffer */
+        cfs_list_t            tx_list;      /* queue on idle_txs/rac_sendq/rac_waitq */
+        struct kra_conn      *tx_conn;      /* owning conn */
+        lnet_msg_t           *tx_lntmsg[2]; /* ptl msgs to finalize on completion */
+        unsigned long         tx_qtime;     /* when tx started to wait for something (jiffies) */
+        int                   tx_nob;       /* # bytes of payload */
+        int                   tx_buftype;   /* payload buffer type */
+        void                 *tx_buffer;    /* source/sink buffer */
+        int                   tx_phys_offset; /* first page offset (if phys) */
+        int                   tx_phys_npages; /* # physical pages */
+        RAP_PHYS_REGION      *tx_phys;      /* page descriptors */
+        RAP_MEM_KEY           tx_map_key;   /* mapping key */
+        RAP_RDMA_DESCRIPTOR   tx_rdma_desc; /* rdma descriptor */
+        __u64                 tx_cookie;    /* identify this tx to peer */
+        kra_msg_t             tx_msg;       /* FMA message buffer */
 } kra_tx_t;
 
 #define RANAL_BUF_NONE           0              /* buffer type not set */
@@ -297,32 +297,32 @@ typedef struct kra_tx                           /* message descriptor */
 typedef struct kra_conn
 {
         struct kra_peer    *rac_peer;           /* owning peer */
-        struct list_head    rac_list;           /* stash on peer's conn list */
-        struct list_head    rac_hashlist;       /* stash in connection hash table */
-        struct list_head    rac_schedlist;      /* schedule (on rad_???_conns) for attention */
-        struct list_head    rac_fmaq;           /* txs queued for FMA */
-        struct list_head    rac_rdmaq;          /* txs awaiting RDMA completion */
-        struct list_head    rac_replyq;         /* txs awaiting replies */
-        __u64               rac_peerstamp;      /* peer's unique stamp */
-        __u64               rac_peer_connstamp; /* peer's unique connection stamp */
-        __u64               rac_my_connstamp;   /* my unique connection stamp */
-        unsigned long       rac_last_tx;        /* when I last sent an FMA message (jiffies) */
-        unsigned long       rac_last_rx;        /* when I last received an FMA messages (jiffies) */
-        long                rac_keepalive;      /* keepalive interval (seconds) */
-        long                rac_timeout;        /* infer peer death if no rx for this many seconds */
-        __u32               rac_cqid;           /* my completion callback id (non-unique) */
-        __u32               rac_tx_seq;         /* tx msg sequence number */
-        __u32               rac_rx_seq;         /* rx msg sequence number */
-        atomic_t            rac_refcount;       /* # users */
-        unsigned int        rac_close_sent;     /* I've sent CLOSE */
-        unsigned int        rac_close_recvd;    /* I've received CLOSE */
-        unsigned int        rac_state;          /* connection state */
-        unsigned int        rac_scheduled;      /* being attented to */
-        spinlock_t          rac_lock;           /* serialise */
-        kra_device_t       *rac_device;         /* which device */
-        RAP_PVOID           rac_rihandle;       /* RA endpoint */
-        kra_msg_t          *rac_rxmsg;          /* incoming message (FMA prefix) */
-        kra_msg_t           rac_msg;            /* keepalive/CLOSE message buffer */
+        cfs_list_t          rac_list;          /* stash on peer's conn list */
+        cfs_list_t          rac_hashlist;      /* stash in connection hash table */
+        cfs_list_t          rac_schedlist;     /* schedule (on rad_???_conns) for attention */
+        cfs_list_t          rac_fmaq;          /* txs queued for FMA */
+        cfs_list_t          rac_rdmaq;         /* txs awaiting RDMA completion */
+        cfs_list_t          rac_replyq;        /* txs awaiting replies */
+        __u64               rac_peerstamp;     /* peer's unique stamp */
+        __u64               rac_peer_connstamp;/* peer's unique connection stamp */
+        __u64               rac_my_connstamp;  /* my unique connection stamp */
+        unsigned long       rac_last_tx;       /* when I last sent an FMA message (jiffies) */
+        unsigned long       rac_last_rx;       /* when I last received an FMA messages (jiffies) */
+        long                rac_keepalive;     /* keepalive interval (seconds) */
+        long                rac_timeout;       /* infer peer death if no rx for this many seconds */
+        __u32               rac_cqid;          /* my completion callback id (non-unique) */
+        __u32               rac_tx_seq;        /* tx msg sequence number */
+        __u32               rac_rx_seq;        /* rx msg sequence number */
+        cfs_atomic_t        rac_refcount;      /* # users */
+        unsigned int        rac_close_sent;    /* I've sent CLOSE */
+        unsigned int        rac_close_recvd;   /* I've received CLOSE */
+        unsigned int        rac_state;         /* connection state */
+        unsigned int        rac_scheduled;     /* being attented to */
+        cfs_spinlock_t      rac_lock;          /* serialise */
+        kra_device_t       *rac_device;        /* which device */
+        RAP_PVOID           rac_rihandle;      /* RA endpoint */
+        kra_msg_t          *rac_rxmsg;         /* incoming message (FMA prefix) */
+        kra_msg_t           rac_msg;           /* keepalive/CLOSE message buffer */
 } kra_conn_t;
 
 #define RANAL_CONN_ESTABLISHED     0
@@ -331,16 +331,16 @@ typedef struct kra_conn
 
 typedef struct kra_peer
 {
-        struct list_head    rap_list;           /* stash on global peer list */
-        struct list_head    rap_connd_list;     /* schedule on kra_connd_peers */
-        struct list_head    rap_conns;          /* all active connections */
-        struct list_head    rap_tx_queue;       /* msgs waiting for a conn */
-        lnet_nid_t           rap_nid;            /* who's on the other end(s) */
-        __u32               rap_ip;             /* IP address of peer */
-        int                 rap_port;           /* port on which peer listens */
-        atomic_t            rap_refcount;       /* # users */
-        int                 rap_persistence;    /* "known" peer refs */
-        int                 rap_connecting;     /* connection forming */
+        cfs_list_t          rap_list;         /* stash on global peer list */
+        cfs_list_t          rap_connd_list;   /* schedule on kra_connd_peers */
+        cfs_list_t          rap_conns;        /* all active connections */
+        cfs_list_t          rap_tx_queue;     /* msgs waiting for a conn */
+        lnet_nid_t          rap_nid;          /* who's on the other end(s) */
+        __u32               rap_ip;           /* IP address of peer */
+        int                 rap_port;         /* port on which peer listens */
+        cfs_atomic_t        rap_refcount;     /* # users */
+        int                 rap_persistence;  /* "known" peer refs */
+        int                 rap_connecting;   /* connection forming */
         unsigned long       rap_reconnect_time; /* CURRENT_SECONDS when reconnect OK */
         unsigned long       rap_reconnect_interval; /* exponential backoff */
 } kra_peer_t;
@@ -355,20 +355,20 @@ static inline void
 kranal_peer_addref(kra_peer_t *peer)
 {
         CDEBUG(D_NET, "%p->%s\n", peer, libcfs_nid2str(peer->rap_nid));
-        LASSERT(atomic_read(&peer->rap_refcount) > 0);
-        atomic_inc(&peer->rap_refcount);
+        LASSERT(cfs_atomic_read(&peer->rap_refcount) > 0);
+        cfs_atomic_inc(&peer->rap_refcount);
 }
 
 static inline void
 kranal_peer_decref(kra_peer_t *peer)
 {
         CDEBUG(D_NET, "%p->%s\n", peer, libcfs_nid2str(peer->rap_nid));
-        LASSERT(atomic_read(&peer->rap_refcount) > 0);
-        if (atomic_dec_and_test(&peer->rap_refcount))
+        LASSERT(cfs_atomic_read(&peer->rap_refcount) > 0);
+        if (cfs_atomic_dec_and_test(&peer->rap_refcount))
                 kranal_destroy_peer(peer);
 }
 
-static inline struct list_head *
+static inline cfs_list_t *
 kranal_nid2peerlist (lnet_nid_t nid)
 {
         unsigned int hash = ((unsigned int)nid) % kranal_data.kra_peer_hash_size;
@@ -380,7 +380,7 @@ static inline int
 kranal_peer_active(kra_peer_t *peer)
 {
         /* Am I in the peer hash table? */
-        return (!list_empty(&peer->rap_list));
+        return (!cfs_list_empty(&peer->rap_list));
 }
 
 static inline void
@@ -388,8 +388,8 @@ kranal_conn_addref(kra_conn_t *conn)
 {
         CDEBUG(D_NET, "%p->%s\n", conn, 
                libcfs_nid2str(conn->rac_peer->rap_nid));
-        LASSERT(atomic_read(&conn->rac_refcount) > 0);
-        atomic_inc(&conn->rac_refcount);
+        LASSERT(cfs_atomic_read(&conn->rac_refcount) > 0);
+        cfs_atomic_inc(&conn->rac_refcount);
 }
 
 static inline void
@@ -397,12 +397,12 @@ kranal_conn_decref(kra_conn_t *conn)
 {
         CDEBUG(D_NET, "%p->%s\n", conn,
                libcfs_nid2str(conn->rac_peer->rap_nid));
-        LASSERT(atomic_read(&conn->rac_refcount) > 0);
-        if (atomic_dec_and_test(&conn->rac_refcount))
+        LASSERT(cfs_atomic_read(&conn->rac_refcount) > 0);
+        if (cfs_atomic_dec_and_test(&conn->rac_refcount))
                 kranal_destroy_conn(conn);
 }
 
-static inline struct list_head *
+static inline cfs_list_t *
 kranal_cqid2connlist (__u32 cqid)
 {
         unsigned int hash = cqid % kranal_data.kra_conn_hash_size;
@@ -413,12 +413,12 @@ kranal_cqid2connlist (__u32 cqid)
 static inline kra_conn_t *
 kranal_cqid2conn_locked (__u32 cqid)
 {
-        struct list_head *conns = kranal_cqid2connlist(cqid);
-        struct list_head *tmp;
+        cfs_list_t       *conns = kranal_cqid2connlist(cqid);
+        cfs_list_t       *tmp;
         kra_conn_t       *conn;
 
-        list_for_each(tmp, conns) {
-                conn = list_entry(tmp, kra_conn_t, rac_hashlist);
+        cfs_list_for_each(tmp, conns) {
+                conn = cfs_list_entry(tmp, kra_conn_t, rac_hashlist);
 
                 if (conn->rac_cqid == cqid)
                         return conn;
@@ -438,10 +438,10 @@ int kranal_startup (lnet_ni_t *ni);
 void kranal_shutdown (lnet_ni_t *ni);
 int kranal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg);
 int kranal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
-int kranal_eager_recv(lnet_ni_t *ni, void *private, 
-                        lnet_msg_t *lntmsg, void **new_private);
-int kranal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, 
-                int delayed, unsigned int niov, 
+int kranal_eager_recv(lnet_ni_t *ni, void *private,
+                      lnet_msg_t *lntmsg, void **new_private);
+int kranal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
+                int delayed, unsigned int niov,
                 struct iovec *iov, lnet_kiov_t *kiov,
                 unsigned int offset, unsigned int mlen, unsigned int rlen);
 int kranal_accept(lnet_ni_t *ni, struct socket *sock);
index 9fa2958..82ab324 100644 (file)
@@ -55,14 +55,14 @@ kranal_device_callback(RAP_INT32 devid, RAP_PVOID arg)
                 if (dev->rad_id != devid)
                         continue;
 
-                spin_lock_irqsave(&dev->rad_lock, flags);
+                cfs_spin_lock_irqsave(&dev->rad_lock, flags);
 
                 if (!dev->rad_ready) {
                         dev->rad_ready = 1;
-                        wake_up(&dev->rad_waitq);
+                        cfs_waitq_signal(&dev->rad_waitq);
                 }
 
-                spin_unlock_irqrestore(&dev->rad_lock, flags);
+                cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
                 return;
         }
 
@@ -75,16 +75,16 @@ kranal_schedule_conn(kra_conn_t *conn)
         kra_device_t    *dev = conn->rac_device;
         unsigned long    flags;
 
-        spin_lock_irqsave(&dev->rad_lock, flags);
+        cfs_spin_lock_irqsave(&dev->rad_lock, flags);
 
         if (!conn->rac_scheduled) {
                 kranal_conn_addref(conn);       /* +1 ref for scheduler */
                 conn->rac_scheduled = 1;
-                list_add_tail(&conn->rac_schedlist, &dev->rad_ready_conns);
-                wake_up(&dev->rad_waitq);
+                cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_ready_conns);
+                cfs_waitq_signal(&dev->rad_waitq);
         }
 
-        spin_unlock_irqrestore(&dev->rad_lock, flags);
+        cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
 }
 
 kra_tx_t *
@@ -93,21 +93,21 @@ kranal_get_idle_tx (void)
         unsigned long  flags;
         kra_tx_t      *tx;
 
-        spin_lock_irqsave(&kranal_data.kra_tx_lock, flags);
+        cfs_spin_lock_irqsave(&kranal_data.kra_tx_lock, flags);
 
-        if (list_empty(&kranal_data.kra_idle_txs)) {
-                spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
+        if (cfs_list_empty(&kranal_data.kra_idle_txs)) {
+                cfs_spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
                 return NULL;
         }
 
-        tx = list_entry(kranal_data.kra_idle_txs.next, kra_tx_t, tx_list);
-        list_del(&tx->tx_list);
+        tx = cfs_list_entry(kranal_data.kra_idle_txs.next, kra_tx_t, tx_list);
+        cfs_list_del(&tx->tx_list);
 
         /* Allocate a new completion cookie.  It might not be needed, but we've
          * got a lock right now... */
         tx->tx_cookie = kranal_data.kra_next_tx_cookie++;
 
-        spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
+        cfs_spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
 
         LASSERT (tx->tx_buftype == RANAL_BUF_NONE);
         LASSERT (tx->tx_msg.ram_type == RANAL_MSG_NONE);
@@ -389,7 +389,7 @@ kranal_tx_done (kra_tx_t *tx, int completion)
         unsigned long    flags;
         int              i;
 
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
 
         kranal_unmap_buffer(tx);
 
@@ -400,11 +400,11 @@ kranal_tx_done (kra_tx_t *tx, int completion)
         tx->tx_msg.ram_type = RANAL_MSG_NONE;
         tx->tx_conn = NULL;
 
-        spin_lock_irqsave(&kranal_data.kra_tx_lock, flags);
+        cfs_spin_lock_irqsave(&kranal_data.kra_tx_lock, flags);
 
-        list_add_tail(&tx->tx_list, &kranal_data.kra_idle_txs);
+        cfs_list_add_tail(&tx->tx_list, &kranal_data.kra_idle_txs);
 
-        spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
+        cfs_spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
 
         /* finalize AFTER freeing lnet msgs */
         for (i = 0; i < 2; i++) {
@@ -418,11 +418,11 @@ kranal_tx_done (kra_tx_t *tx, int completion)
 kra_conn_t *
 kranal_find_conn_locked (kra_peer_t *peer)
 {
-        struct list_head *tmp;
+        cfs_list_t *tmp;
 
         /* just return the first connection */
-        list_for_each (tmp, &peer->rap_conns) {
-                return list_entry(tmp, kra_conn_t, rac_list);
+        cfs_list_for_each (tmp, &peer->rap_conns) {
+                return cfs_list_entry(tmp, kra_conn_t, rac_list);
         }
 
         return NULL;
@@ -435,10 +435,10 @@ kranal_post_fma (kra_conn_t *conn, kra_tx_t *tx)
 
         tx->tx_conn = conn;
 
-        spin_lock_irqsave(&conn->rac_lock, flags);
-        list_add_tail(&tx->tx_list, &conn->rac_fmaq);
+        cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+        cfs_list_add_tail(&tx->tx_list, &conn->rac_fmaq);
         tx->tx_qtime = jiffies;
-        spin_unlock_irqrestore(&conn->rac_lock, flags);
+        cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
 
         kranal_schedule_conn(conn);
 }
@@ -451,36 +451,36 @@ kranal_launch_tx (kra_tx_t *tx, lnet_nid_t nid)
         kra_conn_t      *conn;
         int              rc;
         int              retry;
-        rwlock_t        *g_lock = &kranal_data.kra_global_lock;
+        cfs_rwlock_t    *g_lock = &kranal_data.kra_global_lock;
 
         /* If I get here, I've committed to send, so I complete the tx with
          * failure on any problems */
 
-        LASSERT (tx->tx_conn == NULL);          /* only set when assigned a conn */
+        LASSERT (tx->tx_conn == NULL);      /* only set when assigned a conn */
 
         for (retry = 0; ; retry = 1) {
 
-                read_lock(g_lock);
+                cfs_read_lock(g_lock);
 
                 peer = kranal_find_peer_locked(nid);
                 if (peer != NULL) {
                         conn = kranal_find_conn_locked(peer);
                         if (conn != NULL) {
                                 kranal_post_fma(conn, tx);
-                                read_unlock(g_lock);
+                                cfs_read_unlock(g_lock);
                                 return;
                         }
                 }
                 
                 /* Making connections; I'll need a write lock... */
-                read_unlock(g_lock);
-                write_lock_irqsave(g_lock, flags);
+                cfs_read_unlock(g_lock);
+                cfs_write_lock_irqsave(g_lock, flags);
 
                 peer = kranal_find_peer_locked(nid);
                 if (peer != NULL)
                         break;
                 
-                write_unlock_irqrestore(g_lock, flags);
+                cfs_write_unlock_irqrestore(g_lock, flags);
                 
                 if (retry) {
                         CERROR("Can't find peer %s\n", libcfs_nid2str(nid));
@@ -502,18 +502,18 @@ kranal_launch_tx (kra_tx_t *tx, lnet_nid_t nid)
         if (conn != NULL) {
                 /* Connection exists; queue message on it */
                 kranal_post_fma(conn, tx);
-                write_unlock_irqrestore(g_lock, flags);
+                cfs_write_unlock_irqrestore(g_lock, flags);
                 return;
         }
                         
         LASSERT (peer->rap_persistence > 0);
 
         if (!peer->rap_connecting) {
-                LASSERT (list_empty(&peer->rap_tx_queue));
+                LASSERT (cfs_list_empty(&peer->rap_tx_queue));
 
                 if (!(peer->rap_reconnect_interval == 0 || /* first attempt */
-                      time_after_eq(jiffies, peer->rap_reconnect_time))) {
-                        write_unlock_irqrestore(g_lock, flags);
+                      cfs_time_aftereq(jiffies, peer->rap_reconnect_time))) {
+                        cfs_write_unlock_irqrestore(g_lock, flags);
                         kranal_tx_done(tx, -EHOSTUNREACH);
                         return;
                 }
@@ -521,19 +521,19 @@ kranal_launch_tx (kra_tx_t *tx, lnet_nid_t nid)
                 peer->rap_connecting = 1;
                 kranal_peer_addref(peer); /* extra ref for connd */
 
-                spin_lock(&kranal_data.kra_connd_lock);
+                cfs_spin_lock(&kranal_data.kra_connd_lock);
 
-                list_add_tail(&peer->rap_connd_list,
+                cfs_list_add_tail(&peer->rap_connd_list,
                               &kranal_data.kra_connd_peers);
-                wake_up(&kranal_data.kra_connd_waitq);
+                cfs_waitq_signal(&kranal_data.kra_connd_waitq);
 
-                spin_unlock(&kranal_data.kra_connd_lock);
+                cfs_spin_unlock(&kranal_data.kra_connd_lock);
         }
 
         /* A connection is being established; queue the message... */
-        list_add_tail(&tx->tx_list, &peer->rap_tx_queue);
+        cfs_list_add_tail(&tx->tx_list, &peer->rap_tx_queue);
 
-        write_unlock_irqrestore(g_lock, flags);
+        cfs_write_unlock_irqrestore(g_lock, flags);
 }
 
 void
@@ -573,10 +573,10 @@ kranal_rdma(kra_tx_t *tx, int type,
         rrc = RapkPostRdma(conn->rac_rihandle, &tx->tx_rdma_desc);
         LASSERT (rrc == RAP_SUCCESS);
 
-        spin_lock_irqsave(&conn->rac_lock, flags);
-        list_add_tail(&tx->tx_list, &conn->rac_rdmaq);
+        cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+        cfs_list_add_tail(&tx->tx_list, &conn->rac_rdmaq);
         tx->tx_qtime = jiffies;
-        spin_unlock_irqrestore(&conn->rac_lock, flags);
+        cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
 }
 
 int
@@ -628,7 +628,7 @@ kranal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
         LASSERT (nob == 0 || niov > 0);
         LASSERT (niov <= LNET_MAX_IOV);
 
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
         /* payload is either all vaddrs or all pages */
         LASSERT (!(kiov != NULL && iov != NULL));
 
@@ -802,7 +802,7 @@ kranal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
         int          rc;
 
         LASSERT (mlen <= rlen);
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
         /* Either all pages or all vaddrs */
         LASSERT (!(kiov != NULL && iov != NULL));
 
@@ -880,7 +880,7 @@ kranal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
                         /* No match */
                         tx = kranal_new_tx_msg(RANAL_MSG_GET_NAK);
                         if (tx != NULL) {
-                                tx->tx_msg.ram_u.completion.racm_cookie = 
+                                tx->tx_msg.ram_u.completion.racm_cookie =
                                         rxmsg->ram_u.get.ragm_cookie;
                                 kranal_post_fma(conn, tx);
                         }
@@ -893,26 +893,26 @@ kranal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
 int
 kranal_thread_start (int(*fn)(void *arg), void *arg)
 {
-        long    pid = kernel_thread(fn, arg, 0);
+        long    pid = cfs_kernel_thread(fn, arg, 0);
 
         if (pid < 0)
                 return(int)pid;
 
-        atomic_inc(&kranal_data.kra_nthreads);
+        cfs_atomic_inc(&kranal_data.kra_nthreads);
         return 0;
 }
 
 void
 kranal_thread_fini (void)
 {
-        atomic_dec(&kranal_data.kra_nthreads);
+        cfs_atomic_dec(&kranal_data.kra_nthreads);
 }
 
 int
 kranal_check_conn_timeouts (kra_conn_t *conn)
 {
         kra_tx_t          *tx;
-        struct list_head  *ttmp;
+        cfs_list_t        *ttmp;
         unsigned long      flags;
         long               timeout;
         unsigned long      now = jiffies;
@@ -921,22 +921,23 @@ kranal_check_conn_timeouts (kra_conn_t *conn)
                  conn->rac_state == RANAL_CONN_CLOSING);
 
         if (!conn->rac_close_sent &&
-            time_after_eq(now, conn->rac_last_tx + conn->rac_keepalive * HZ)) {
+            cfs_time_aftereq(now, conn->rac_last_tx + conn->rac_keepalive *
+                             CFS_HZ)) {
                 /* not sent in a while; schedule conn so scheduler sends a keepalive */
                 CDEBUG(D_NET, "Scheduling keepalive %p->%s\n",
                        conn, libcfs_nid2str(conn->rac_peer->rap_nid));
                 kranal_schedule_conn(conn);
         }
 
-        timeout = conn->rac_timeout * HZ;
+        timeout = conn->rac_timeout * CFS_HZ;
 
         if (!conn->rac_close_recvd &&
-            time_after_eq(now, conn->rac_last_rx + timeout)) {
+            cfs_time_aftereq(now, conn->rac_last_rx + timeout)) {
                 CERROR("%s received from %s within %lu seconds\n",
                        (conn->rac_state == RANAL_CONN_ESTABLISHED) ?
                        "Nothing" : "CLOSE not",
                        libcfs_nid2str(conn->rac_peer->rap_nid), 
-                       (now - conn->rac_last_rx)/HZ);
+                       (now - conn->rac_last_rx)/CFS_HZ);
                 return -ETIMEDOUT;
         }
 
@@ -947,53 +948,53 @@ kranal_check_conn_timeouts (kra_conn_t *conn)
          * in case of hardware/software errors that make this conn seem
          * responsive even though it isn't progressing its message queues. */
 
-        spin_lock_irqsave(&conn->rac_lock, flags);
+        cfs_spin_lock_irqsave(&conn->rac_lock, flags);
 
-        list_for_each (ttmp, &conn->rac_fmaq) {
-                tx = list_entry(ttmp, kra_tx_t, tx_list);
+        cfs_list_for_each (ttmp, &conn->rac_fmaq) {
+                tx = cfs_list_entry(ttmp, kra_tx_t, tx_list);
 
-                if (time_after_eq(now, tx->tx_qtime + timeout)) {
-                        spin_unlock_irqrestore(&conn->rac_lock, flags);
+                if (cfs_time_aftereq(now, tx->tx_qtime + timeout)) {
+                        cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
                         CERROR("tx on fmaq for %s blocked %lu seconds\n",
                                libcfs_nid2str(conn->rac_peer->rap_nid),
-                               (now - tx->tx_qtime)/HZ);
+                               (now - tx->tx_qtime)/CFS_HZ);
                         return -ETIMEDOUT;
                 }
         }
 
-        list_for_each (ttmp, &conn->rac_rdmaq) {
-                tx = list_entry(ttmp, kra_tx_t, tx_list);
+        cfs_list_for_each (ttmp, &conn->rac_rdmaq) {
+                tx = cfs_list_entry(ttmp, kra_tx_t, tx_list);
 
-                if (time_after_eq(now, tx->tx_qtime + timeout)) {
-                        spin_unlock_irqrestore(&conn->rac_lock, flags);
+                if (cfs_time_aftereq(now, tx->tx_qtime + timeout)) {
+                        cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
                         CERROR("tx on rdmaq for %s blocked %lu seconds\n",
                                libcfs_nid2str(conn->rac_peer->rap_nid), 
-                               (now - tx->tx_qtime)/HZ);
+                               (now - tx->tx_qtime)/CFS_HZ);
                         return -ETIMEDOUT;
                 }
         }
 
-        list_for_each (ttmp, &conn->rac_replyq) {
-                tx = list_entry(ttmp, kra_tx_t, tx_list);
+        cfs_list_for_each (ttmp, &conn->rac_replyq) {
+                tx = cfs_list_entry(ttmp, kra_tx_t, tx_list);
 
-                if (time_after_eq(now, tx->tx_qtime + timeout)) {
-                        spin_unlock_irqrestore(&conn->rac_lock, flags);
+                if (cfs_time_aftereq(now, tx->tx_qtime + timeout)) {
+                        cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
                         CERROR("tx on replyq for %s blocked %lu seconds\n",
                                libcfs_nid2str(conn->rac_peer->rap_nid),
-                               (now - tx->tx_qtime)/HZ);
+                               (now - tx->tx_qtime)/CFS_HZ);
                         return -ETIMEDOUT;
                 }
         }
 
-        spin_unlock_irqrestore(&conn->rac_lock, flags);
+        cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
         return 0;
 }
 
 void
 kranal_reaper_check (int idx, unsigned long *min_timeoutp)
 {
-        struct list_head  *conns = &kranal_data.kra_conns[idx];
-        struct list_head  *ctmp;
+        cfs_list_t        *conns = &kranal_data.kra_conns[idx];
+        cfs_list_t        *ctmp;
         kra_conn_t        *conn;
         unsigned long      flags;
         int                rc;
@@ -1001,10 +1002,10 @@ kranal_reaper_check (int idx, unsigned long *min_timeoutp)
  again:
         /* NB. We expect to check all the conns and not find any problems, so
          * we just use a shared lock while we take a look... */
-        read_lock(&kranal_data.kra_global_lock);
+        cfs_read_lock(&kranal_data.kra_global_lock);
 
-        list_for_each (ctmp, conns) {
-                conn = list_entry(ctmp, kra_conn_t, rac_hashlist);
+        cfs_list_for_each (ctmp, conns) {
+                conn = cfs_list_entry(ctmp, kra_conn_t, rac_hashlist);
 
                 if (conn->rac_timeout < *min_timeoutp )
                         *min_timeoutp = conn->rac_timeout;
@@ -1016,13 +1017,13 @@ kranal_reaper_check (int idx, unsigned long *min_timeoutp)
                         continue;
 
                 kranal_conn_addref(conn);
-                read_unlock(&kranal_data.kra_global_lock);
+                cfs_read_unlock(&kranal_data.kra_global_lock);
 
                 CERROR("Conn to %s, cqid %d timed out\n",
                        libcfs_nid2str(conn->rac_peer->rap_nid), 
                        conn->rac_cqid);
 
-                write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+                cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
                 switch (conn->rac_state) {
                 default:
@@ -1037,7 +1038,8 @@ kranal_reaper_check (int idx, unsigned long *min_timeoutp)
                         break;
                 }
 
-                write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+                cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+                                            flags);
 
                 kranal_conn_decref(conn);
 
@@ -1045,7 +1047,7 @@ kranal_reaper_check (int idx, unsigned long *min_timeoutp)
                 goto again;
         }
 
-        read_unlock(&kranal_data.kra_global_lock);
+        cfs_read_unlock(&kranal_data.kra_global_lock);
 }
 
 int
@@ -1053,7 +1055,7 @@ kranal_connd (void *arg)
 {
         long               id = (long)arg;
         char               name[16];
-        wait_queue_t       wait;
+        cfs_waitlink_t     wait;
         unsigned long      flags;
         kra_peer_t        *peer;
         kra_acceptsock_t  *ras;
@@ -1063,19 +1065,20 @@ kranal_connd (void *arg)
         cfs_daemonize(name);
         cfs_block_allsigs();
 
-        init_waitqueue_entry(&wait, current);
+        cfs_waitlink_init(&wait);
 
-        spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+        cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
 
         while (!kranal_data.kra_shutdown) {
                 did_something = 0;
 
-                if (!list_empty(&kranal_data.kra_connd_acceptq)) {
-                        ras = list_entry(kranal_data.kra_connd_acceptq.next,
-                                         kra_acceptsock_t, ras_list);
-                        list_del(&ras->ras_list);
+                if (!cfs_list_empty(&kranal_data.kra_connd_acceptq)) {
+                        ras = cfs_list_entry(kranal_data.kra_connd_acceptq.next,
+                                             kra_acceptsock_t, ras_list);
+                        cfs_list_del(&ras->ras_list);
 
-                        spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+                        cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
+                                                   flags);
 
                         CDEBUG(D_NET,"About to handshake someone\n");
 
@@ -1084,41 +1087,44 @@ kranal_connd (void *arg)
 
                         CDEBUG(D_NET,"Finished handshaking someone\n");
 
-                        spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+                        cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock,
+                                              flags);
                         did_something = 1;
                 }
 
-                if (!list_empty(&kranal_data.kra_connd_peers)) {
-                        peer = list_entry(kranal_data.kra_connd_peers.next,
-                                          kra_peer_t, rap_connd_list);
+                if (!cfs_list_empty(&kranal_data.kra_connd_peers)) {
+                        peer = cfs_list_entry(kranal_data.kra_connd_peers.next,
+                                              kra_peer_t, rap_connd_list);
 
-                        list_del_init(&peer->rap_connd_list);
-                        spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+                        cfs_list_del_init(&peer->rap_connd_list);
+                        cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
+                                                   flags);
 
                         kranal_connect(peer);
                         kranal_peer_decref(peer);
 
-                        spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+                        cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock,
+                                              flags);
                         did_something = 1;
                 }
 
                 if (did_something)
                         continue;
 
-                set_current_state(TASK_INTERRUPTIBLE);
-                add_wait_queue_exclusive(&kranal_data.kra_connd_waitq, &wait);
+                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+                cfs_waitq_add_exclusive(&kranal_data.kra_connd_waitq, &wait);
 
-                spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+                cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
 
-                schedule ();
+                cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
 
-                set_current_state(TASK_RUNNING);
-                remove_wait_queue(&kranal_data.kra_connd_waitq, &wait);
+                cfs_set_current_state(CFS_TASK_RUNNING);
+                cfs_waitq_del(&kranal_data.kra_connd_waitq, &wait);
 
-                spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+                cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
         }
 
-        spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+        cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
 
         kranal_thread_fini();
         return 0;
@@ -1131,18 +1137,18 @@ kranal_update_reaper_timeout(long timeout)
 
         LASSERT (timeout > 0);
 
-        spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
+        cfs_spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
 
         if (timeout < kranal_data.kra_new_min_timeout)
                 kranal_data.kra_new_min_timeout = timeout;
 
-        spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
+        cfs_spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
 }
 
 int
 kranal_reaper (void *arg)
 {
-        wait_queue_t       wait;
+        cfs_waitlink_t     wait;
         unsigned long      flags;
         long               timeout;
         int                i;
@@ -1150,15 +1156,15 @@ kranal_reaper (void *arg)
         int                conn_index = 0;
         int                base_index = conn_entries - 1;
         unsigned long      next_check_time = jiffies;
-        long               next_min_timeout = MAX_SCHEDULE_TIMEOUT;
+        long               next_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
         long               current_min_timeout = 1;
 
         cfs_daemonize("kranal_reaper");
         cfs_block_allsigs();
 
-        init_waitqueue_entry(&wait, current);
+        cfs_waitlink_init(&wait);
 
-        spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
+        cfs_spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
 
         while (!kranal_data.kra_shutdown) {
                 /* I wake up every 'p' seconds to check for timeouts on some
@@ -1174,38 +1180,45 @@ kranal_reaper (void *arg)
                 /* careful with the jiffy wrap... */
                 timeout = (long)(next_check_time - jiffies);
                 if (timeout > 0) {
-                        set_current_state(TASK_INTERRUPTIBLE);
-                        add_wait_queue(&kranal_data.kra_reaper_waitq, &wait);
+                        cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+                        cfs_waitq_add(&kranal_data.kra_reaper_waitq, &wait);
 
-                        spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
+                        cfs_spin_unlock_irqrestore(&kranal_data.kra_reaper_lock,
+                                                   flags);
 
-                        schedule_timeout(timeout);
+                        cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
+                                            timeout);
 
-                        spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
+                        cfs_spin_lock_irqsave(&kranal_data.kra_reaper_lock,
+                                              flags);
 
-                        set_current_state(TASK_RUNNING);
-                        remove_wait_queue(&kranal_data.kra_reaper_waitq, &wait);
+                        cfs_set_current_state(CFS_TASK_RUNNING);
+                        cfs_waitq_del(&kranal_data.kra_reaper_waitq, &wait);
                         continue;
                 }
 
-                if (kranal_data.kra_new_min_timeout != MAX_SCHEDULE_TIMEOUT) {
+                if (kranal_data.kra_new_min_timeout !=
+                    CFS_MAX_SCHEDULE_TIMEOUT) {
                         /* new min timeout set: restart min timeout scan */
-                        next_min_timeout = MAX_SCHEDULE_TIMEOUT;
+                        next_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
                         base_index = conn_index - 1;
                         if (base_index < 0)
                                 base_index = conn_entries - 1;
 
-                        if (kranal_data.kra_new_min_timeout < current_min_timeout) {
-                                current_min_timeout = kranal_data.kra_new_min_timeout;
+                        if (kranal_data.kra_new_min_timeout <
+                            current_min_timeout) {
+                                current_min_timeout =
+                                        kranal_data.kra_new_min_timeout;
                                 CDEBUG(D_NET, "Set new min timeout %ld\n",
                                        current_min_timeout);
                         }
 
-                        kranal_data.kra_new_min_timeout = MAX_SCHEDULE_TIMEOUT;
+                        kranal_data.kra_new_min_timeout =
+                                CFS_MAX_SCHEDULE_TIMEOUT;
                 }
                 min_timeout = current_min_timeout;
 
-                spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
+                cfs_spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
 
                 LASSERT (min_timeout > 0);
 
@@ -1224,9 +1237,9 @@ kranal_reaper (void *arg)
                         conn_index = (conn_index + 1) % conn_entries;
                 }
 
-                next_check_time += p * HZ;
+                next_check_time += p * CFS_HZ;
 
-                spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
+                cfs_spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
 
                 if (((conn_index - chunk <= base_index &&
                       base_index < conn_index) ||
@@ -1241,7 +1254,7 @@ kranal_reaper (void *arg)
                         }
 
                         /* ...and restart min timeout scan */
-                        next_min_timeout = MAX_SCHEDULE_TIMEOUT;
+                        next_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
                         base_index = conn_index - 1;
                         if (base_index < 0)
                                 base_index = conn_entries - 1;
@@ -1273,13 +1286,13 @@ kranal_check_rdma_cq (kra_device_t *dev)
                 LASSERT (rrc == RAP_SUCCESS);
                 LASSERT ((event_type & RAPK_CQ_EVENT_OVERRUN) == 0);
 
-                read_lock(&kranal_data.kra_global_lock);
+                cfs_read_lock(&kranal_data.kra_global_lock);
 
                 conn = kranal_cqid2conn_locked(cqid);
                 if (conn == NULL) {
                         /* Conn was destroyed? */
                         CDEBUG(D_NET, "RDMA CQID lookup %d failed\n", cqid);
-                        read_unlock(&kranal_data.kra_global_lock);
+                        cfs_read_unlock(&kranal_data.kra_global_lock);
                         continue;
                 }
 
@@ -1287,28 +1300,28 @@ kranal_check_rdma_cq (kra_device_t *dev)
                 LASSERT (rrc == RAP_SUCCESS);
 
                 CDEBUG(D_NET, "Completed %p\n",
-                       list_entry(conn->rac_rdmaq.next, kra_tx_t, tx_list));
+                       cfs_list_entry(conn->rac_rdmaq.next, kra_tx_t, tx_list));
 
-                spin_lock_irqsave(&conn->rac_lock, flags);
+                cfs_spin_lock_irqsave(&conn->rac_lock, flags);
 
-                LASSERT (!list_empty(&conn->rac_rdmaq));
-                tx = list_entry(conn->rac_rdmaq.next, kra_tx_t, tx_list);
-                list_del(&tx->tx_list);
+                LASSERT (!cfs_list_empty(&conn->rac_rdmaq));
+                tx = cfs_list_entry(conn->rac_rdmaq.next, kra_tx_t, tx_list);
+                cfs_list_del(&tx->tx_list);
 
                 LASSERT(desc->AppPtr == (void *)tx);
                 LASSERT(tx->tx_msg.ram_type == RANAL_MSG_PUT_DONE ||
                         tx->tx_msg.ram_type == RANAL_MSG_GET_DONE);
 
-                list_add_tail(&tx->tx_list, &conn->rac_fmaq);
+                cfs_list_add_tail(&tx->tx_list, &conn->rac_fmaq);
                 tx->tx_qtime = jiffies;
 
-                spin_unlock_irqrestore(&conn->rac_lock, flags);
+                cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
 
                 /* Get conn's fmaq processed, now I've just put something
                  * there */
                 kranal_schedule_conn(conn);
 
-                read_unlock(&kranal_data.kra_global_lock);
+                cfs_read_unlock(&kranal_data.kra_global_lock);
         }
 }
 
@@ -1319,8 +1332,8 @@ kranal_check_fma_cq (kra_device_t *dev)
         RAP_RETURN          rrc;
         __u32               cqid;
         __u32               event_type;
-        struct list_head   *conns;
-        struct list_head   *tmp;
+        cfs_list_t         *conns;
+        cfs_list_t         *tmp;
         int                 i;
 
         for (;;) {
@@ -1334,7 +1347,7 @@ kranal_check_fma_cq (kra_device_t *dev)
 
                 if ((event_type & RAPK_CQ_EVENT_OVERRUN) == 0) {
 
-                        read_lock(&kranal_data.kra_global_lock);
+                        cfs_read_lock(&kranal_data.kra_global_lock);
 
                         conn = kranal_cqid2conn_locked(cqid);
                         if (conn == NULL) {
@@ -1346,7 +1359,7 @@ kranal_check_fma_cq (kra_device_t *dev)
                                 kranal_schedule_conn(conn);
                         }
 
-                        read_unlock(&kranal_data.kra_global_lock);
+                        cfs_read_unlock(&kranal_data.kra_global_lock);
                         continue;
                 }
 
@@ -1356,20 +1369,20 @@ kranal_check_fma_cq (kra_device_t *dev)
 
                 for (i = 0; i < kranal_data.kra_conn_hash_size; i++) {
 
-                        read_lock(&kranal_data.kra_global_lock);
+                        cfs_read_lock(&kranal_data.kra_global_lock);
 
                         conns = &kranal_data.kra_conns[i];
 
-                        list_for_each (tmp, conns) {
-                                conn = list_entry(tmp, kra_conn_t,
-                                                  rac_hashlist);
+                        cfs_list_for_each (tmp, conns) {
+                                conn = cfs_list_entry(tmp, kra_conn_t,
+                                                      rac_hashlist);
 
                                 if (conn->rac_device == dev)
                                         kranal_schedule_conn(conn);
                         }
 
                         /* don't block write lockers for too long... */
-                        read_unlock(&kranal_data.kra_global_lock);
+                        cfs_read_unlock(&kranal_data.kra_global_lock);
                 }
         }
 }
@@ -1412,10 +1425,12 @@ kranal_sendmsg(kra_conn_t *conn, kra_msg_t *msg,
                 return 0;
 
         case RAP_NOT_DONE:
-                if (time_after_eq(jiffies,
-                                  conn->rac_last_tx + conn->rac_keepalive*HZ))
+                if (cfs_time_aftereq(jiffies,
+                                     conn->rac_last_tx + conn->rac_keepalive *
+                                     CFS_HZ))
                         CWARN("EAGAIN sending %02x (idle %lu secs)\n",
-                               msg->ram_type, (jiffies - conn->rac_last_tx)/HZ);
+                              msg->ram_type,
+                              (jiffies - conn->rac_last_tx)/CFS_HZ);
                 return -EAGAIN;
         }
 }
@@ -1441,13 +1456,13 @@ kranal_process_fmaq (kra_conn_t *conn)
         LASSERT (current == conn->rac_device->rad_scheduler);
 
         if (conn->rac_state != RANAL_CONN_ESTABLISHED) {
-                if (!list_empty(&conn->rac_rdmaq)) {
+                if (!cfs_list_empty(&conn->rac_rdmaq)) {
                         /* RDMAs in progress */
                         LASSERT (!conn->rac_close_sent);
 
-                        if (time_after_eq(jiffies,
-                                          conn->rac_last_tx +
-                                          conn->rac_keepalive * HZ)) {
+                        if (cfs_time_aftereq(jiffies,
+                                             conn->rac_last_tx +
+                                             conn->rac_keepalive * CFS_HZ)) {
                                 CDEBUG(D_NET, "sending NOOP (rdma in progress)\n");
                                 kranal_init_msg(&conn->rac_msg, RANAL_MSG_NOOP);
                                 kranal_sendmsg(conn, &conn->rac_msg, NULL, 0);
@@ -1469,37 +1484,40 @@ kranal_process_fmaq (kra_conn_t *conn)
                 if (!conn->rac_close_recvd)
                         return;
 
-                write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+                cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
                 if (conn->rac_state == RANAL_CONN_CLOSING)
                         kranal_terminate_conn_locked(conn);
 
-                write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+                cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+                                            flags);
                 return;
         }
 
-        spin_lock_irqsave(&conn->rac_lock, flags);
+        cfs_spin_lock_irqsave(&conn->rac_lock, flags);
 
-        if (list_empty(&conn->rac_fmaq)) {
+        if (cfs_list_empty(&conn->rac_fmaq)) {
 
-                spin_unlock_irqrestore(&conn->rac_lock, flags);
+                cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
 
-                if (time_after_eq(jiffies,
-                                  conn->rac_last_tx + conn->rac_keepalive * HZ)) {
+                if (cfs_time_aftereq(jiffies,
+                                     conn->rac_last_tx + conn->rac_keepalive *
+                                     CFS_HZ)) {
                         CDEBUG(D_NET, "sending NOOP -> %s (%p idle %lu(%ld))\n",
                                libcfs_nid2str(conn->rac_peer->rap_nid), conn,
-                               (jiffies - conn->rac_last_tx)/HZ, conn->rac_keepalive);
+                               (jiffies - conn->rac_last_tx)/CFS_HZ,
+                               conn->rac_keepalive);
                         kranal_init_msg(&conn->rac_msg, RANAL_MSG_NOOP);
                         kranal_sendmsg(conn, &conn->rac_msg, NULL, 0);
                 }
                 return;
         }
 
-        tx = list_entry(conn->rac_fmaq.next, kra_tx_t, tx_list);
-        list_del(&tx->tx_list);
-        more_to_do = !list_empty(&conn->rac_fmaq);
+        tx = cfs_list_entry(conn->rac_fmaq.next, kra_tx_t, tx_list);
+        cfs_list_del(&tx->tx_list);
+        more_to_do = !cfs_list_empty(&conn->rac_fmaq);
 
-        spin_unlock_irqrestore(&conn->rac_lock, flags);
+        cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
 
         expect_reply = 0;
         CDEBUG(D_NET, "sending regular msg: %p, type %02x, cookie "LPX64"\n",
@@ -1556,9 +1574,9 @@ kranal_process_fmaq (kra_conn_t *conn)
                 /* I need credits to send this.  Replace tx at the head of the
                  * fmaq and I'll get rescheduled when credits appear */
                 CDEBUG(D_NET, "EAGAIN on %p\n", conn);
-                spin_lock_irqsave(&conn->rac_lock, flags);
-                list_add(&tx->tx_list, &conn->rac_fmaq);
-                spin_unlock_irqrestore(&conn->rac_lock, flags);
+                cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+                cfs_list_add(&tx->tx_list, &conn->rac_fmaq);
+                cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
                 return;
         }
 
@@ -1567,10 +1585,10 @@ kranal_process_fmaq (kra_conn_t *conn)
         } else {
                 /* LASSERT(current) above ensures this doesn't race with reply
                  * processing */
-                spin_lock_irqsave(&conn->rac_lock, flags);
-                list_add_tail(&tx->tx_list, &conn->rac_replyq);
+                cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+                cfs_list_add_tail(&tx->tx_list, &conn->rac_replyq);
                 tx->tx_qtime = jiffies;
-                spin_unlock_irqrestore(&conn->rac_lock, flags);
+                cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
         }
 
         if (more_to_do) {
@@ -1593,14 +1611,14 @@ kranal_swab_rdma_desc (kra_rdma_desc_t *d)
 kra_tx_t *
 kranal_match_reply(kra_conn_t *conn, int type, __u64 cookie)
 {
-        struct list_head *ttmp;
+        cfs_list_t       *ttmp;
         kra_tx_t         *tx;
         unsigned long     flags;
 
-        spin_lock_irqsave(&conn->rac_lock, flags);
+        cfs_spin_lock_irqsave(&conn->rac_lock, flags);
 
-        list_for_each(ttmp, &conn->rac_replyq) {
-                tx = list_entry(ttmp, kra_tx_t, tx_list);
+        cfs_list_for_each(ttmp, &conn->rac_replyq) {
+                tx = cfs_list_entry(ttmp, kra_tx_t, tx_list);
 
                 CDEBUG(D_NET,"Checking %p %02x/"LPX64"\n",
                        tx, tx->tx_msg.ram_type, tx->tx_cookie);
@@ -1609,7 +1627,7 @@ kranal_match_reply(kra_conn_t *conn, int type, __u64 cookie)
                         continue;
 
                 if (tx->tx_msg.ram_type != type) {
-                        spin_unlock_irqrestore(&conn->rac_lock, flags);
+                        cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
                         CWARN("Unexpected type %x (%x expected) "
                               "matched reply from %s\n",
                               tx->tx_msg.ram_type, type,
@@ -1617,12 +1635,12 @@ kranal_match_reply(kra_conn_t *conn, int type, __u64 cookie)
                         return NULL;
                 }
 
-                list_del(&tx->tx_list);
-                spin_unlock_irqrestore(&conn->rac_lock, flags);
+                cfs_list_del(&tx->tx_list);
+                cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
                 return tx;
         }
 
-        spin_unlock_irqrestore(&conn->rac_lock, flags);
+        cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
         CWARN("Unmatched reply %02x/"LPX64" from %s\n",
               type, cookie, libcfs_nid2str(conn->rac_peer->rap_nid));
         return NULL;
@@ -1737,7 +1755,7 @@ kranal_check_fma_rx (kra_conn_t *conn)
         if (msg->ram_type == RANAL_MSG_CLOSE) {
                 CWARN("RX CLOSE from %s\n", libcfs_nid2str(conn->rac_peer->rap_nid));
                 conn->rac_close_recvd = 1;
-                write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+                cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
                 if (conn->rac_state == RANAL_CONN_ESTABLISHED)
                         kranal_close_conn_locked(conn, 0);
@@ -1745,7 +1763,8 @@ kranal_check_fma_rx (kra_conn_t *conn)
                          conn->rac_close_sent)
                         kranal_terminate_conn_locked(conn);
 
-                write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+                cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+                                            flags);
                 goto out;
         }
 
@@ -1865,22 +1884,22 @@ kranal_complete_closed_conn (kra_conn_t *conn)
         int         nreplies;
 
         LASSERT (conn->rac_state == RANAL_CONN_CLOSED);
-        LASSERT (list_empty(&conn->rac_list));
-        LASSERT (list_empty(&conn->rac_hashlist));
+        LASSERT (cfs_list_empty(&conn->rac_list));
+        LASSERT (cfs_list_empty(&conn->rac_hashlist));
 
-        for (nfma = 0; !list_empty(&conn->rac_fmaq); nfma++) {
-                tx = list_entry(conn->rac_fmaq.next, kra_tx_t, tx_list);
+        for (nfma = 0; !cfs_list_empty(&conn->rac_fmaq); nfma++) {
+                tx = cfs_list_entry(conn->rac_fmaq.next, kra_tx_t, tx_list);
 
-                list_del(&tx->tx_list);
+                cfs_list_del(&tx->tx_list);
                 kranal_tx_done(tx, -ECONNABORTED);
         }
 
-        LASSERT (list_empty(&conn->rac_rdmaq));
+        LASSERT (cfs_list_empty(&conn->rac_rdmaq));
 
-        for (nreplies = 0; !list_empty(&conn->rac_replyq); nreplies++) {
-                tx = list_entry(conn->rac_replyq.next, kra_tx_t, tx_list);
+        for (nreplies = 0; !cfs_list_empty(&conn->rac_replyq); nreplies++) {
+                tx = cfs_list_entry(conn->rac_replyq.next, kra_tx_t, tx_list);
 
-                list_del(&tx->tx_list);
+                cfs_list_del(&tx->tx_list);
                 kranal_tx_done(tx, -ECONNABORTED);
         }
 
@@ -1892,14 +1911,14 @@ int
 kranal_process_new_conn (kra_conn_t *conn)
 {
         RAP_RETURN   rrc;
-        
+
         rrc = RapkCompleteSync(conn->rac_rihandle, 1);
         if (rrc == RAP_SUCCESS)
                 return 0;
 
         LASSERT (rrc == RAP_NOT_DONE);
-        if (!time_after_eq(jiffies, conn->rac_last_tx + 
-                           conn->rac_timeout * HZ))
+        if (!cfs_time_aftereq(jiffies, conn->rac_last_tx +
+                              conn->rac_timeout * CFS_HZ))
                 return -EAGAIN;
 
         /* Too late */
@@ -1912,7 +1931,7 @@ int
 kranal_scheduler (void *arg)
 {
         kra_device_t     *dev = (kra_device_t *)arg;
-        wait_queue_t      wait;
+        cfs_waitlink_t    wait;
         char              name[16];
         kra_conn_t       *conn;
         unsigned long     flags;
@@ -1920,8 +1939,8 @@ kranal_scheduler (void *arg)
         unsigned long     soonest;
         int               nsoonest;
         long              timeout;
-        struct list_head *tmp;
-        struct list_head *nxt;
+        cfs_list_t       *tmp;
+        cfs_list_t       *nxt;
         int               rc;
         int               dropped_lock;
         int               busy_loops = 0;
@@ -1931,20 +1950,20 @@ kranal_scheduler (void *arg)
         cfs_block_allsigs();
 
         dev->rad_scheduler = current;
-        init_waitqueue_entry(&wait, current);
+        cfs_waitlink_init(&wait);
 
-        spin_lock_irqsave(&dev->rad_lock, flags);
+        cfs_spin_lock_irqsave(&dev->rad_lock, flags);
 
         while (!kranal_data.kra_shutdown) {
                 /* Safe: kra_shutdown only set when quiescent */
 
                 if (busy_loops++ >= RANAL_RESCHED) {
-                        spin_unlock_irqrestore(&dev->rad_lock, flags);
+                        cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
 
-                        our_cond_resched();
+                        cfs_cond_resched();
                         busy_loops = 0;
 
-                        spin_lock_irqsave(&dev->rad_lock, flags);
+                        cfs_spin_lock_irqsave(&dev->rad_lock, flags);
                 }
 
                 dropped_lock = 0;
@@ -1952,22 +1971,22 @@ kranal_scheduler (void *arg)
                 if (dev->rad_ready) {
                         /* Device callback fired since I last checked it */
                         dev->rad_ready = 0;
-                        spin_unlock_irqrestore(&dev->rad_lock, flags);
+                        cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
                         dropped_lock = 1;
 
                         kranal_check_rdma_cq(dev);
                         kranal_check_fma_cq(dev);
 
-                        spin_lock_irqsave(&dev->rad_lock, flags);
+                        cfs_spin_lock_irqsave(&dev->rad_lock, flags);
                 }
 
-                list_for_each_safe(tmp, nxt, &dev->rad_ready_conns) {
-                        conn = list_entry(tmp, kra_conn_t, rac_schedlist);
+                cfs_list_for_each_safe(tmp, nxt, &dev->rad_ready_conns) {
+                        conn = cfs_list_entry(tmp, kra_conn_t, rac_schedlist);
 
-                        list_del_init(&conn->rac_schedlist);
+                        cfs_list_del_init(&conn->rac_schedlist);
                         LASSERT (conn->rac_scheduled);
                         conn->rac_scheduled = 0;
-                        spin_unlock_irqrestore(&dev->rad_lock, flags);
+                        cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
                         dropped_lock = 1;
 
                         kranal_check_fma_rx(conn);
@@ -1977,75 +1996,82 @@ kranal_scheduler (void *arg)
                                 kranal_complete_closed_conn(conn);
 
                         kranal_conn_decref(conn);
-                        spin_lock_irqsave(&dev->rad_lock, flags);
+                        cfs_spin_lock_irqsave(&dev->rad_lock, flags);
                 }
 
                 nsoonest = 0;
                 soonest = jiffies;
 
-                list_for_each_safe(tmp, nxt, &dev->rad_new_conns) {
-                        conn = list_entry(tmp, kra_conn_t, rac_schedlist);
-                        
+                cfs_list_for_each_safe(tmp, nxt, &dev->rad_new_conns) {
+                        conn = cfs_list_entry(tmp, kra_conn_t, rac_schedlist);
+
                         deadline = conn->rac_last_tx + conn->rac_keepalive;
-                        if (time_after_eq(jiffies, deadline)) {
+                        if (cfs_time_aftereq(jiffies, deadline)) {
                                 /* Time to process this new conn */
-                                spin_unlock_irqrestore(&dev->rad_lock, flags);
+                                cfs_spin_unlock_irqrestore(&dev->rad_lock,
+                                                           flags);
                                 dropped_lock = 1;
 
                                 rc = kranal_process_new_conn(conn);
                                 if (rc != -EAGAIN) {
                                         /* All done with this conn */
-                                        spin_lock_irqsave(&dev->rad_lock, flags);
-                                        list_del_init(&conn->rac_schedlist);
-                                        spin_unlock_irqrestore(&dev->rad_lock, flags);
+                                        cfs_spin_lock_irqsave(&dev->rad_lock,
+                                                              flags);
+                                        cfs_list_del_init(&conn->rac_schedlist);
+                                        cfs_spin_unlock_irqrestore(&dev-> \
+                                                                   rad_lock,
+                                                                   flags);
 
                                         kranal_conn_decref(conn);
-                                        spin_lock_irqsave(&dev->rad_lock, flags);
+                                        cfs_spin_lock_irqsave(&dev->rad_lock,
+                                                              flags);
                                         continue;
                                 }
 
                                 /* retry with exponential backoff until HZ */
                                 if (conn->rac_keepalive == 0)
                                         conn->rac_keepalive = 1;
-                                else if (conn->rac_keepalive <= HZ)
+                                else if (conn->rac_keepalive <= CFS_HZ)
                                         conn->rac_keepalive *= 2;
                                 else
-                                        conn->rac_keepalive += HZ;
+                                        conn->rac_keepalive += CFS_HZ;
                                 
                                 deadline = conn->rac_last_tx + conn->rac_keepalive;
-                                spin_lock_irqsave(&dev->rad_lock, flags);
+                                cfs_spin_lock_irqsave(&dev->rad_lock, flags);
                         }
 
                         /* Does this conn need attention soonest? */
                         if (nsoonest++ == 0 ||
-                            !time_after_eq(deadline, soonest))
+                            !cfs_time_aftereq(deadline, soonest))
                                 soonest = deadline;
                 }
 
                 if (dropped_lock)               /* may sleep iff I didn't drop the lock */
                         continue;
 
-                set_current_state(TASK_INTERRUPTIBLE);
-                add_wait_queue_exclusive(&dev->rad_waitq, &wait);
-                spin_unlock_irqrestore(&dev->rad_lock, flags);
+                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+                cfs_waitq_add_exclusive(&dev->rad_waitq, &wait);
+                cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
 
                 if (nsoonest == 0) {
                         busy_loops = 0;
-                        schedule();
+                        cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
                 } else {
                         timeout = (long)(soonest - jiffies);
                         if (timeout > 0) {
                                 busy_loops = 0;
-                                schedule_timeout(timeout);
+                                cfs_waitq_timedwait(&wait,
+                                                    CFS_TASK_INTERRUPTIBLE,
+                                                    timeout);
                         }
                 }
 
-                remove_wait_queue(&dev->rad_waitq, &wait);
-                set_current_state(TASK_RUNNING);
-                spin_lock_irqsave(&dev->rad_lock, flags);
+                cfs_waitq_del(&dev->rad_waitq, &wait);
+                cfs_set_current_state(CFS_TASK_RUNNING);
+                cfs_spin_lock_irqsave(&dev->rad_lock, flags);
         }
 
-        spin_unlock_irqrestore(&dev->rad_lock, flags);
+        cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
 
         dev->rad_scheduler = NULL;
         kranal_thread_fini();
index 15bb8cf..6f4a475 100644 (file)
@@ -158,10 +158,10 @@ ksocknal_destroy_peer (ksock_peer_t *peer)
 
         LASSERT (cfs_atomic_read (&peer->ksnp_refcount) == 0);
         LASSERT (peer->ksnp_accepting == 0);
-        LASSERT (list_empty (&peer->ksnp_conns));
-        LASSERT (list_empty (&peer->ksnp_routes));
-        LASSERT (list_empty (&peer->ksnp_tx_queue));
-        LASSERT (list_empty (&peer->ksnp_zc_req_list));
+        LASSERT (cfs_list_empty (&peer->ksnp_conns));
+        LASSERT (cfs_list_empty (&peer->ksnp_routes));
+        LASSERT (cfs_list_empty (&peer->ksnp_tx_queue));
+        LASSERT (cfs_list_empty (&peer->ksnp_zc_req_list));
 
         LIBCFS_FREE (peer, sizeof (*peer));
 
@@ -177,13 +177,13 @@ ksocknal_destroy_peer (ksock_peer_t *peer)
 ksock_peer_t *
 ksocknal_find_peer_locked (lnet_ni_t *ni, lnet_process_id_t id)
 {
-        struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
-        struct list_head *tmp;
+        cfs_list_t       *peer_list = ksocknal_nid2peerlist(id.nid);
+        cfs_list_t       *tmp;
         ksock_peer_t     *peer;
 
-        list_for_each (tmp, peer_list) {
+        cfs_list_for_each (tmp, peer_list) {
 
-                peer = list_entry (tmp, ksock_peer_t, ksnp_list);
+                peer = cfs_list_entry (tmp, ksock_peer_t, ksnp_list);
 
                 LASSERT (!peer->ksnp_closing);
 
@@ -237,24 +237,24 @@ ksocknal_unlink_peer_locked (ksock_peer_t *peer)
                 iface->ksni_npeers--;
         }
 
-        LASSERT (list_empty(&peer->ksnp_conns));
-        LASSERT (list_empty(&peer->ksnp_routes));
+        LASSERT (cfs_list_empty(&peer->ksnp_conns));
+        LASSERT (cfs_list_empty(&peer->ksnp_routes));
         LASSERT (!peer->ksnp_closing);
         peer->ksnp_closing = 1;
-        list_del (&peer->ksnp_list);
+        cfs_list_del (&peer->ksnp_list);
         /* lose peerlist's ref */
         ksocknal_peer_decref(peer);
 }
 
 int
 ksocknal_get_peer_info (lnet_ni_t *ni, int index,
-                        lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip, int *port,
-                        int *conn_count, int *share_count)
+                        lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
+                        int *port, int *conn_count, int *share_count)
 {
         ksock_peer_t      *peer;
-        struct list_head  *ptmp;
+        cfs_list_t        *ptmp;
         ksock_route_t     *route;
-        struct list_head  *rtmp;
+        cfs_list_t        *rtmp;
         int                i;
         int                j;
         int                rc = -ENOENT;
@@ -263,14 +263,14 @@ ksocknal_get_peer_info (lnet_ni_t *ni, int index,
 
         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
 
-                list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
-                        peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
+                cfs_list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
+                        peer = cfs_list_entry (ptmp, ksock_peer_t, ksnp_list);
 
                         if (peer->ksnp_ni != ni)
                                 continue;
 
                         if (peer->ksnp_n_passive_ips == 0 &&
-                            list_empty(&peer->ksnp_routes)) {
+                            cfs_list_empty(&peer->ksnp_routes)) {
                                 if (index-- > 0)
                                         continue;
 
@@ -298,12 +298,12 @@ ksocknal_get_peer_info (lnet_ni_t *ni, int index,
                                 goto out;
                         }
 
-                        list_for_each (rtmp, &peer->ksnp_routes) {
+                        cfs_list_for_each (rtmp, &peer->ksnp_routes) {
                                 if (index-- > 0)
                                         continue;
 
-                                route = list_entry(rtmp, ksock_route_t,
-                                                   ksnr_list);
+                                route = cfs_list_entry(rtmp, ksock_route_t,
+                                                       ksnr_list);
 
                                 *id = peer->ksnp_id;
                                 *myip = route->ksnr_myipaddr;
@@ -369,7 +369,7 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
 void
 ksocknal_add_route_locked (ksock_peer_t *peer, ksock_route_t *route)
 {
-        struct list_head  *tmp;
+        cfs_list_t        *tmp;
         ksock_conn_t      *conn;
         ksock_route_t     *route2;
 
@@ -380,8 +380,8 @@ ksocknal_add_route_locked (ksock_peer_t *peer, ksock_route_t *route)
         LASSERT (route->ksnr_connected == 0);
 
         /* LASSERT(unique) */
-        list_for_each(tmp, &peer->ksnp_routes) {
-                route2 = list_entry(tmp, ksock_route_t, ksnr_list);
+        cfs_list_for_each(tmp, &peer->ksnp_routes) {
+                route2 = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
 
                 if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
                         CERROR ("Duplicate route %s %u.%u.%u.%u\n",
@@ -394,10 +394,10 @@ ksocknal_add_route_locked (ksock_peer_t *peer, ksock_route_t *route)
         route->ksnr_peer = peer;
         ksocknal_peer_addref(peer);
         /* peer's routelist takes over my ref on 'route' */
-        list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
+        cfs_list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
 
-        list_for_each(tmp, &peer->ksnp_conns) {
-                conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+        cfs_list_for_each(tmp, &peer->ksnp_conns) {
+                conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
 
                 if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
                         continue;
@@ -413,14 +413,14 @@ ksocknal_del_route_locked (ksock_route_t *route)
         ksock_peer_t      *peer = route->ksnr_peer;
         ksock_interface_t *iface;
         ksock_conn_t      *conn;
-        struct list_head  *ctmp;
-        struct list_head  *cnxt;
+        cfs_list_t        *ctmp;
+        cfs_list_t        *cnxt;
 
         LASSERT (!route->ksnr_deleted);
 
         /* Close associated conns */
-        list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) {
-                conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
+        cfs_list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) {
+                conn = cfs_list_entry(ctmp, ksock_conn_t, ksnc_list);
 
                 if (conn->ksnc_route != route)
                         continue;
@@ -436,11 +436,11 @@ ksocknal_del_route_locked (ksock_route_t *route)
         }
 
         route->ksnr_deleted = 1;
-        list_del (&route->ksnr_list);
+        cfs_list_del (&route->ksnr_list);
         ksocknal_route_decref(route);             /* drop peer's ref */
 
-        if (list_empty (&peer->ksnp_routes) &&
-            list_empty (&peer->ksnp_conns)) {
+        if (cfs_list_empty (&peer->ksnp_routes) &&
+            cfs_list_empty (&peer->ksnp_conns)) {
                 /* I've just removed the last route to a peer with no active
                  * connections */
                 ksocknal_unlink_peer_locked (peer);
@@ -450,7 +450,7 @@ ksocknal_del_route_locked (ksock_route_t *route)
 int
 ksocknal_add_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
 {
-        struct list_head  *tmp;
+        cfs_list_t        *tmp;
         ksock_peer_t      *peer;
         ksock_peer_t      *peer2;
         ksock_route_t     *route;
@@ -483,13 +483,13 @@ ksocknal_add_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
                 peer = peer2;
         } else {
                 /* peer table takes my ref on peer */
-                list_add_tail (&peer->ksnp_list,
-                               ksocknal_nid2peerlist (id.nid));
+                cfs_list_add_tail (&peer->ksnp_list,
+                                   ksocknal_nid2peerlist (id.nid));
         }
 
         route2 = NULL;
-        list_for_each (tmp, &peer->ksnp_routes) {
-                route2 = list_entry(tmp, ksock_route_t, ksnr_list);
+        cfs_list_for_each (tmp, &peer->ksnp_routes) {
+                route2 = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
 
                 if (route2->ksnr_ipaddr == ipaddr)
                         break;
@@ -514,8 +514,8 @@ ksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip)
 {
         ksock_conn_t     *conn;
         ksock_route_t    *route;
-        struct list_head *tmp;
-        struct list_head *nxt;
+        cfs_list_t       *tmp;
+        cfs_list_t       *nxt;
         int               nshared;
 
         LASSERT (!peer->ksnp_closing);
@@ -523,8 +523,8 @@ ksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip)
         /* Extra ref prevents peer disappearing until I'm done with it */
         ksocknal_peer_addref(peer);
 
-        list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
-                route = list_entry(tmp, ksock_route_t, ksnr_list);
+        cfs_list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
+                route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
 
                 /* no match */
                 if (!(ip == 0 || route->ksnr_ipaddr == ip))
@@ -536,8 +536,8 @@ ksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip)
         }
 
         nshared = 0;
-        list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
-                route = list_entry(tmp, ksock_route_t, ksnr_list);
+        cfs_list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
+                route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
                 nshared += route->ksnr_share_count;
         }
 
@@ -545,16 +545,16 @@ ksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip)
                 /* remove everything else if there are no explicit entries
                  * left */
 
-                list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
-                        route = list_entry(tmp, ksock_route_t, ksnr_list);
+                cfs_list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
+                        route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
 
                         /* we should only be removing auto-entries */
                         LASSERT(route->ksnr_share_count == 0);
                         ksocknal_del_route_locked (route);
                 }
 
-                list_for_each_safe (tmp, nxt, &peer->ksnp_conns) {
-                        conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+                cfs_list_for_each_safe (tmp, nxt, &peer->ksnp_conns) {
+                        conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
 
                         ksocknal_close_conn_locked(conn, 0);
                 }
@@ -568,8 +568,8 @@ int
 ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
 {
         CFS_LIST_HEAD     (zombies);
-        struct list_head  *ptmp;
-        struct list_head  *pnxt;
+        cfs_list_t        *ptmp;
+        cfs_list_t        *pnxt;
         ksock_peer_t      *peer;
         int                lo;
         int                hi;
@@ -586,8 +586,9 @@ ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
         }
 
         for (i = lo; i <= hi; i++) {
-                list_for_each_safe (ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
-                        peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
+                cfs_list_for_each_safe (ptmp, pnxt,
+                                        &ksocknal_data.ksnd_peers[i]) {
+                        peer = cfs_list_entry (ptmp, ksock_peer_t, ksnp_list);
 
                         if (peer->ksnp_ni != ni)
                                 continue;
@@ -600,11 +601,13 @@ ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
 
                         ksocknal_del_peer_locked (peer, ip);
 
-                        if (peer->ksnp_closing && !list_empty(&peer->ksnp_tx_queue)) {
-                                LASSERT (list_empty(&peer->ksnp_conns));
-                                LASSERT (list_empty(&peer->ksnp_routes));
+                        if (peer->ksnp_closing &&
+                            !cfs_list_empty(&peer->ksnp_tx_queue)) {
+                                LASSERT (cfs_list_empty(&peer->ksnp_conns));
+                                LASSERT (cfs_list_empty(&peer->ksnp_routes));
 
-                                list_splice_init(&peer->ksnp_tx_queue, &zombies);
+                                cfs_list_splice_init(&peer->ksnp_tx_queue,
+                                                     &zombies);
                         }
 
                         ksocknal_peer_decref(peer);     /* ...till here */
@@ -624,29 +627,31 @@ ksock_conn_t *
 ksocknal_get_conn_by_idx (lnet_ni_t *ni, int index)
 {
         ksock_peer_t      *peer;
-        struct list_head  *ptmp;
+        cfs_list_t        *ptmp;
         ksock_conn_t      *conn;
-        struct list_head  *ctmp;
+        cfs_list_t        *ctmp;
         int                i;
 
         cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
-                list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
-                        peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
+                cfs_list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
+                        peer = cfs_list_entry (ptmp, ksock_peer_t, ksnp_list);
 
                         LASSERT (!peer->ksnp_closing);
 
                         if (peer->ksnp_ni != ni)
                                 continue;
 
-                        list_for_each (ctmp, &peer->ksnp_conns) {
+                        cfs_list_for_each (ctmp, &peer->ksnp_conns) {
                                 if (index-- > 0)
                                         continue;
 
-                                conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
+                                conn = cfs_list_entry (ctmp, ksock_conn_t,
+                                                       ksnc_list);
                                 ksocknal_conn_addref(conn);
-                                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+                                cfs_read_unlock (&ksocknal_data. \
+                                                 ksnd_global_lock);
                                 return (conn);
                         }
                 }
@@ -663,7 +668,7 @@ ksocknal_choose_scheduler_locked (unsigned int irq)
         ksock_irqinfo_t  *info;
         int               i;
 
-        LASSERT (irq < NR_IRQS);
+        LASSERT (irq < CFS_NR_IRQS);
         info = &ksocknal_data.ksnd_irqinfo[irq];
 
         if (irq != 0 &&                         /* hardware NIC */
@@ -860,19 +865,19 @@ void
 ksocknal_create_routes(ksock_peer_t *peer, int port,
                        __u32 *peer_ipaddrs, int npeer_ipaddrs)
 {
-        ksock_route_t      *newroute = NULL;
-        cfs_rwlock_t       *global_lock = &ksocknal_data.ksnd_global_lock;
-        lnet_ni_t          *ni = peer->ksnp_ni;
-        ksock_net_t        *net = ni->ni_data;
-        struct list_head   *rtmp;
-        ksock_route_t      *route;
-        ksock_interface_t  *iface;
-        ksock_interface_t  *best_iface;
-        int                 best_netmatch;
-        int                 this_netmatch;
-        int                 best_nroutes;
-        int                 i;
-        int                 j;
+        ksock_route_t       *newroute = NULL;
+        cfs_rwlock_t        *global_lock = &ksocknal_data.ksnd_global_lock;
+        lnet_ni_t           *ni = peer->ksnp_ni;
+        ksock_net_t         *net = ni->ni_data;
+        cfs_list_t          *rtmp;
+        ksock_route_t       *route;
+        ksock_interface_t   *iface;
+        ksock_interface_t   *best_iface;
+        int                  best_netmatch;
+        int                  this_netmatch;
+        int                  best_nroutes;
+        int                  i;
+        int                  j;
 
         /* CAVEAT EMPTOR: We do all our interface matching with an
          * exclusive hold of global lock at IRQ priority.  We're only
@@ -910,8 +915,8 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
 
                 /* Already got a route? */
                 route = NULL;
-                list_for_each(rtmp, &peer->ksnp_routes) {
-                        route = list_entry(rtmp, ksock_route_t, ksnr_list);
+                cfs_list_for_each(rtmp, &peer->ksnp_routes) {
+                        route = cfs_list_entry(rtmp, ksock_route_t, ksnr_list);
 
                         if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
                                 break;
@@ -932,8 +937,9 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
                         iface = &net->ksnn_interfaces[j];
 
                         /* Using this interface already? */
-                        list_for_each(rtmp, &peer->ksnp_routes) {
-                                route = list_entry(rtmp, ksock_route_t, ksnr_list);
+                        cfs_list_for_each(rtmp, &peer->ksnp_routes) {
+                                route = cfs_list_entry(rtmp, ksock_route_t,
+                                                       ksnr_list);
 
                                 if (route->ksnr_myipaddr == iface->ksni_ipaddr)
                                         break;
@@ -998,7 +1004,7 @@ ksocknal_accept (lnet_ni_t *ni, cfs_socket_t *sock)
 
         cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
 
-        list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
+        cfs_list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
         cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
 
         cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
@@ -1026,7 +1032,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
         cfs_rwlock_t      *global_lock = &ksocknal_data.ksnd_global_lock;
         CFS_LIST_HEAD     (zombies);
         lnet_process_id_t  peerid;
-        struct list_head  *tmp;
+        cfs_list_t        *tmp;
         __u64              incarnation;
         ksock_conn_t      *conn;
         ksock_conn_t      *conn2;
@@ -1149,8 +1155,8 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
                 if (peer2 == NULL) {
                         /* NB this puts an "empty" peer in the peer
                          * table (which takes my ref) */
-                        list_add_tail(&peer->ksnp_list,
-                                      ksocknal_nid2peerlist(peerid.nid));
+                        cfs_list_add_tail(&peer->ksnp_list,
+                                          ksocknal_nid2peerlist(peerid.nid));
                 } else {
                         ksocknal_peer_decref(peer);
                         peer = peer2;
@@ -1183,7 +1189,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
                  * NB recv_hello may have returned EPROTO to signal my peer
                  * wants a different protocol than the one I asked for.
                  */
-                LASSERT (list_empty(&peer->ksnp_conns));
+                LASSERT (cfs_list_empty(&peer->ksnp_conns));
 
                 peer->ksnp_proto = conn->ksnc_proto;
                 peer->ksnp_incarnation = incarnation;
@@ -1218,8 +1224,8 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
         /* Refuse to duplicate an existing connection, unless this is a
          * loopback connection */
         if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
-                list_for_each(tmp, &peer->ksnp_conns) {
-                        conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
+                cfs_list_for_each(tmp, &peer->ksnp_conns) {
+                        conn2 = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
 
                         if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
                             conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
@@ -1252,8 +1258,8 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
          * create an association.  This allows incoming connections created
          * by routes in my peer to match my own route entries so I don't
          * continually create duplicate routes. */
-        list_for_each (tmp, &peer->ksnp_routes) {
-                route = list_entry(tmp, ksock_route_t, ksnr_list);
+        cfs_list_for_each (tmp, &peer->ksnp_routes) {
+                route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
 
                 if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
                         continue;
@@ -1277,7 +1283,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
         conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
         cfs_mb();   /* order with adding to peer's conn list */
 
-        list_add (&conn->ksnc_list, &peer->ksnp_conns);
+        cfs_list_add (&conn->ksnc_list, &peer->ksnp_conns);
         ksocknal_conn_addref(conn);
 
         ksocknal_new_packet(conn, 0);
@@ -1285,11 +1291,11 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
         conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
 
         /* Take packets blocking for this connection. */
-        list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
+        cfs_list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
                 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) == SOCKNAL_MATCH_NO)
                                 continue;
 
-                list_del (&tx->tx_list);
+                cfs_list_del (&tx->tx_list);
                 ksocknal_queue_tx_locked (tx, conn);
         }
 
@@ -1363,10 +1369,10 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
 
  failed_2:
         if (!peer->ksnp_closing &&
-            list_empty (&peer->ksnp_conns) &&
-            list_empty (&peer->ksnp_routes)) {
-                list_add(&zombies, &peer->ksnp_tx_queue);
-                list_del_init(&peer->ksnp_tx_queue);
+            cfs_list_empty (&peer->ksnp_conns) &&
+            cfs_list_empty (&peer->ksnp_routes)) {
+                cfs_list_add(&zombies, &peer->ksnp_tx_queue);
+                cfs_list_del_init(&peer->ksnp_tx_queue);
                 ksocknal_unlink_peer_locked(peer);
         }
 
@@ -1419,14 +1425,14 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
         ksock_peer_t      *peer = conn->ksnc_peer;
         ksock_route_t     *route;
         ksock_conn_t      *conn2;
-        struct list_head  *tmp;
+        cfs_list_t        *tmp;
 
         LASSERT (peer->ksnp_error == 0);
         LASSERT (!conn->ksnc_closing);
         conn->ksnc_closing = 1;
 
         /* ksnd_deathrow_conns takes over peer's ref */
-        list_del (&conn->ksnc_list);
+        cfs_list_del (&conn->ksnc_list);
 
         route = conn->ksnc_route;
         if (route != NULL) {
@@ -1435,8 +1441,8 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
                 LASSERT ((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
 
                 conn2 = NULL;
-                list_for_each(tmp, &peer->ksnp_conns) {
-                        conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
+                cfs_list_for_each(tmp, &peer->ksnp_conns) {
+                        conn2 = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
 
                         if (conn2->ksnc_route == route &&
                             conn2->ksnc_type == conn->ksnc_type)
@@ -1450,34 +1456,37 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
                 conn->ksnc_route = NULL;
 
 #if 0           /* irrelevent with only eager routes */
-                list_del (&route->ksnr_list);   /* make route least favourite */
-                list_add_tail (&route->ksnr_list, &peer->ksnp_routes);
+                /* make route least favourite */
+                cfs_list_del (&route->ksnr_list);
+                cfs_list_add_tail (&route->ksnr_list, &peer->ksnp_routes);
 #endif
                 ksocknal_route_decref(route);     /* drop conn's ref on route */
         }
 
-        if (list_empty (&peer->ksnp_conns)) {
+        if (cfs_list_empty (&peer->ksnp_conns)) {
                 /* No more connections to this peer */
 
-                if (!list_empty(&peer->ksnp_tx_queue)) {
+                if (!cfs_list_empty(&peer->ksnp_tx_queue)) {
                         ksock_tx_t *tx;
 
                         LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
 
                         /* throw them to the last connection...,
                          * these TXs will be send to /dev/null by scheduler */
-                        list_for_each_entry(tx, &peer->ksnp_tx_queue, tx_list)
+                        cfs_list_for_each_entry(tx, &peer->ksnp_tx_queue,
+                                                tx_list)
                                 ksocknal_tx_prep(conn, tx);
 
-                        spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
-                        list_splice_init(&peer->ksnp_tx_queue, &conn->ksnc_tx_queue);
-                        spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
+                        cfs_spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
+                        cfs_list_splice_init(&peer->ksnp_tx_queue,
+                                             &conn->ksnc_tx_queue);
+                        cfs_spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
                 }
 
                 peer->ksnp_proto = NULL;        /* renegotiate protocol version */
                 peer->ksnp_error = error;       /* stash last conn close reason */
 
-                if (list_empty (&peer->ksnp_routes)) {
+                if (cfs_list_empty (&peer->ksnp_routes)) {
                         /* I've just closed last conn belonging to a
                          * peer with no routes to it */
                         ksocknal_unlink_peer_locked (peer);
@@ -1486,7 +1495,8 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
 
         cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
 
-        list_add_tail (&conn->ksnc_list, &ksocknal_data.ksnd_deathrow_conns);
+        cfs_list_add_tail (&conn->ksnc_list,
+                           &ksocknal_data.ksnd_deathrow_conns);
         cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq);
 
         cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
@@ -1505,7 +1515,7 @@ ksocknal_peer_failed (ksock_peer_t *peer)
         cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
         if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
-            list_empty(&peer->ksnp_conns) &&
+            cfs_list_empty(&peer->ksnp_conns) &&
             peer->ksnp_accepting == 0 &&
             ksocknal_find_connecting_route_locked(peer) == NULL) {
                 notify = 1;
@@ -1541,16 +1551,16 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn)
                 LASSERT (tx->tx_msg.ksm_zc_cookies[0] != 0);
 
                 tx->tx_msg.ksm_zc_cookies[0] = 0;
-                list_del(&tx->tx_zc_list);
-                list_add(&tx->tx_zc_list, &zlist);
+                cfs_list_del(&tx->tx_zc_list);
+                cfs_list_add(&tx->tx_zc_list, &zlist);
         }
 
         cfs_spin_unlock(&peer->ksnp_lock);
 
-        while (!list_empty(&zlist)) {
-                tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
+        while (!cfs_list_empty(&zlist)) {
+                tx = cfs_list_entry(zlist.next, ksock_tx_t, tx_zc_list);
 
-                list_del(&tx->tx_zc_list);
+                cfs_list_del(&tx->tx_zc_list);
                 ksocknal_tx_decref(tx);
         }
 }
@@ -1575,8 +1585,8 @@ ksocknal_terminate_conn (ksock_conn_t *conn)
         conn->ksnc_tx_ready = 1;
 
         if (!conn->ksnc_tx_scheduled &&
-            !list_empty(&conn->ksnc_tx_queue)){
-                list_add_tail (&conn->ksnc_tx_list,
+            !cfs_list_empty(&conn->ksnc_tx_queue)){
+                cfs_list_add_tail (&conn->ksnc_tx_list,
                                &sched->kss_tx_conns);
                 conn->ksnc_tx_scheduled = 1;
                 /* extra ref for scheduler */
@@ -1598,7 +1608,7 @@ ksocknal_terminate_conn (ksock_conn_t *conn)
 
         if (peer->ksnp_error != 0) {
                 /* peer's last conn closed in error */
-                LASSERT (list_empty (&peer->ksnp_conns));
+                LASSERT (cfs_list_empty (&peer->ksnp_conns));
                 failed = 1;
                 peer->ksnp_error = 0;     /* avoid multiple notifications */
         }
@@ -1624,7 +1634,7 @@ ksocknal_queue_zombie_conn (ksock_conn_t *conn)
         LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) == 0);
         cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
 
-        list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
+        cfs_list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
         cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
 
         cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
@@ -1642,7 +1652,7 @@ ksocknal_destroy_conn (ksock_conn_t *conn)
         LASSERT (conn->ksnc_route == NULL);
         LASSERT (!conn->ksnc_tx_scheduled);
         LASSERT (!conn->ksnc_rx_scheduled);
-        LASSERT (list_empty(&conn->ksnc_tx_queue));
+        LASSERT (cfs_list_empty(&conn->ksnc_tx_queue));
 
         /* complete current receive if any */
         switch (conn->ksnc_rx_state) {
@@ -1691,12 +1701,12 @@ int
 ksocknal_close_peer_conns_locked (ksock_peer_t *peer, __u32 ipaddr, int why)
 {
         ksock_conn_t       *conn;
-        struct list_head   *ctmp;
-        struct list_head   *cnxt;
+        cfs_list_t         *ctmp;
+        cfs_list_t         *cnxt;
         int                 count = 0;
 
-        list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) {
-                conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
+        cfs_list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) {
+                conn = cfs_list_entry (ctmp, ksock_conn_t, ksnc_list);
 
                 if (ipaddr == 0 ||
                     conn->ksnc_ipaddr == ipaddr) {
@@ -1728,8 +1738,8 @@ int
 ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr)
 {
         ksock_peer_t       *peer;
-        struct list_head   *ptmp;
-        struct list_head   *pnxt;
+        cfs_list_t         *ptmp;
+        cfs_list_t         *pnxt;
         int                 lo;
         int                 hi;
         int                 i;
@@ -1745,9 +1755,10 @@ ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr)
         }
 
         for (i = lo; i <= hi; i++) {
-                list_for_each_safe (ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
+                cfs_list_for_each_safe (ptmp, pnxt,
+                                        &ksocknal_data.ksnd_peers[i]) {
 
-                        peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
+                        peer = cfs_list_entry (ptmp, ksock_peer_t, ksnp_list);
 
                         if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) &&
                               (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
@@ -1795,19 +1806,19 @@ ksocknal_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
         int                connect = 1;
         cfs_time_t         last_alive = 0;
         ksock_peer_t      *peer = NULL;
-        rwlock_t          *glock = &ksocknal_data.ksnd_global_lock;
+        cfs_rwlock_t      *glock = &ksocknal_data.ksnd_global_lock;
         lnet_process_id_t  id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID};
 
-        read_lock(glock);
+        cfs_read_lock(glock);
 
         peer = ksocknal_find_peer_locked(ni, id);
         if (peer != NULL) {
-                struct list_head *tmp;
+                cfs_list_t       *tmp;
                 ksock_conn_t     *conn;
                 int               bufnob;
 
-                list_for_each (tmp, &peer->ksnp_conns) {
-                        conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+                cfs_list_for_each (tmp, &peer->ksnp_conns) {
+                        conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
                         bufnob = libcfs_sock_wmem_queued(conn->ksnc_sock);
 
                         if (bufnob < conn->ksnc_tx_bufnob) {
@@ -1824,7 +1835,7 @@ ksocknal_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
                         connect = 0;
         }
 
-        read_unlock(glock);
+        cfs_read_unlock(glock);
 
         if (last_alive != 0)
                 *when = last_alive;
@@ -1834,13 +1845,13 @@ ksocknal_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
 
         ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
 
-        write_lock_bh(glock);
+        cfs_write_lock_bh(glock);
 
         peer = ksocknal_find_peer_locked(ni, id);
         if (peer != NULL)
                 ksocknal_launch_all_connections_locked(peer);
 
-        write_unlock_bh(glock);
+        cfs_write_unlock_bh(glock);
         return;
 }
 
@@ -1849,7 +1860,7 @@ ksocknal_push_peer (ksock_peer_t *peer)
 {
         int               index;
         int               i;
-        struct list_head *tmp;
+        cfs_list_t       *tmp;
         ksock_conn_t     *conn;
 
         for (index = 0; ; index++) {
@@ -1858,9 +1869,10 @@ ksocknal_push_peer (ksock_peer_t *peer)
                 i = 0;
                 conn = NULL;
 
-                list_for_each (tmp, &peer->ksnp_conns) {
+                cfs_list_for_each (tmp, &peer->ksnp_conns) {
                         if (i++ == index) {
-                                conn = list_entry (tmp, ksock_conn_t, ksnc_list);
+                                conn = cfs_list_entry (tmp, ksock_conn_t,
+                                                       ksnc_list);
                                 ksocknal_conn_addref(conn);
                                 break;
                         }
@@ -1880,7 +1892,7 @@ int
 ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id)
 {
         ksock_peer_t      *peer;
-        struct list_head  *tmp;
+        cfs_list_t        *tmp;
         int                index;
         int                i;
         int                j;
@@ -1893,9 +1905,9 @@ ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id)
                         index = 0;
                         peer = NULL;
 
-                        list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
-                                peer = list_entry(tmp, ksock_peer_t,
-                                                  ksnp_list);
+                        cfs_list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
+                                peer = cfs_list_entry(tmp, ksock_peer_t,
+                                                      ksnp_list);
 
                                 if (!((id.nid == LNET_NID_ANY ||
                                        id.nid == peer->ksnp_id.nid) &&
@@ -1933,9 +1945,9 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
         int                rc;
         int                i;
         int                j;
-        struct list_head  *ptmp;
+        cfs_list_t        *ptmp;
         ksock_peer_t      *peer;
-        struct list_head  *rtmp;
+        cfs_list_t        *rtmp;
         ksock_route_t     *route;
 
         if (ipaddress == 0 ||
@@ -1959,15 +1971,18 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
                 iface->ksni_npeers = 0;
 
                 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
-                        list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
-                                peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
+                        cfs_list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
+                                peer = cfs_list_entry(ptmp, ksock_peer_t,
+                                                      ksnp_list);
 
                                 for (j = 0; j < peer->ksnp_n_passive_ips; j++)
                                         if (peer->ksnp_passive_ips[j] == ipaddress)
                                                 iface->ksni_npeers++;
 
-                                list_for_each(rtmp, &peer->ksnp_routes) {
-                                        route = list_entry(rtmp, ksock_route_t, ksnr_list);
+                                cfs_list_for_each(rtmp, &peer->ksnp_routes) {
+                                        route = cfs_list_entry(rtmp,
+                                                               ksock_route_t,
+                                                               ksnr_list);
 
                                         if (route->ksnr_myipaddr == ipaddress)
                                                 iface->ksni_nroutes++;
@@ -1987,8 +2002,8 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
 void
 ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
 {
-        struct list_head   *tmp;
-        struct list_head   *nxt;
+        cfs_list_t         *tmp;
+        cfs_list_t         *nxt;
         ksock_route_t      *route;
         ksock_conn_t       *conn;
         int                 i;
@@ -2003,8 +2018,8 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
                         break;
                 }
 
-        list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
-                route = list_entry (tmp, ksock_route_t, ksnr_list);
+        cfs_list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
+                route = cfs_list_entry (tmp, ksock_route_t, ksnr_list);
 
                 if (route->ksnr_myipaddr != ipaddr)
                         continue;
@@ -2017,8 +2032,8 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
                 }
         }
 
-        list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
-                conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+        cfs_list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
+                conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
 
                 if (conn->ksnc_myipaddr == ipaddr)
                         ksocknal_close_conn_locked (conn, 0);
@@ -2030,8 +2045,8 @@ ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
 {
         ksock_net_t       *net = ni->ni_data;
         int                rc = -ENOENT;
-        struct list_head  *tmp;
-        struct list_head  *nxt;
+        cfs_list_t        *tmp;
+        cfs_list_t        *nxt;
         ksock_peer_t      *peer;
         __u32              this_ip;
         int                i;
@@ -2055,8 +2070,10 @@ ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
                 net->ksnn_ninterfaces--;
 
                 for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
-                        list_for_each_safe(tmp, nxt, &ksocknal_data.ksnd_peers[j]) {
-                                peer = list_entry(tmp, ksock_peer_t, ksnp_list);
+                        cfs_list_for_each_safe(tmp, nxt,
+                                               &ksocknal_data.ksnd_peers[j]) {
+                                peer = cfs_list_entry(tmp, ksock_peer_t,
+                                                      ksnp_list);
 
                                 if (peer->ksnp_ni != ni)
                                         continue;
@@ -2209,22 +2226,22 @@ ksocknal_free_buffers (void)
                              sizeof (ksock_sched_t) * ksocknal_data.ksnd_nschedulers);
 
         LIBCFS_FREE (ksocknal_data.ksnd_peers,
-                     sizeof (struct list_head) *
+                     sizeof (cfs_list_t) *
                      ksocknal_data.ksnd_peer_hash_size);
 
         cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
 
-        if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
-                struct list_head  zlist;
+        if (!cfs_list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
+                cfs_list_t        zlist;
                 ksock_tx_t       *tx;
 
-                list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
-                list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
+                cfs_list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
+                cfs_list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
                 cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
 
-                while(!list_empty(&zlist)) {
-                        tx = list_entry(zlist.next, ksock_tx_t, tx_list);
-                        list_del(&tx->tx_list);
+                while(!cfs_list_empty(&zlist)) {
+                        tx = cfs_list_entry(zlist.next, ksock_tx_t, tx_list);
+                        cfs_list_del(&tx->tx_list);
                         LIBCFS_FREE(tx, tx->tx_desc_size);
                 }
         } else {
@@ -2250,21 +2267,22 @@ ksocknal_base_shutdown (void)
         case SOCKNAL_INIT_DATA:
                 LASSERT (ksocknal_data.ksnd_peers != NULL);
                 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
-                        LASSERT (list_empty (&ksocknal_data.ksnd_peers[i]));
+                        LASSERT (cfs_list_empty (&ksocknal_data.ksnd_peers[i]));
                 }
-                LASSERT (list_empty (&ksocknal_data.ksnd_enomem_conns));
-                LASSERT (list_empty (&ksocknal_data.ksnd_zombie_conns));
-                LASSERT (list_empty (&ksocknal_data.ksnd_connd_connreqs));
-                LASSERT (list_empty (&ksocknal_data.ksnd_connd_routes));
+                LASSERT (cfs_list_empty (&ksocknal_data.ksnd_enomem_conns));
+                LASSERT (cfs_list_empty (&ksocknal_data.ksnd_zombie_conns));
+                LASSERT (cfs_list_empty (&ksocknal_data.ksnd_connd_connreqs));
+                LASSERT (cfs_list_empty (&ksocknal_data.ksnd_connd_routes));
 
                 if (ksocknal_data.ksnd_schedulers != NULL)
                         for (i = 0; i < ksocknal_data.ksnd_nschedulers; i++) {
                                 ksock_sched_t *kss =
                                         &ksocknal_data.ksnd_schedulers[i];
 
-                                LASSERT (list_empty (&kss->kss_tx_conns));
-                                LASSERT (list_empty (&kss->kss_rx_conns));
-                                LASSERT (list_empty (&kss->kss_zombie_noop_txs));
+                                LASSERT (cfs_list_empty (&kss->kss_tx_conns));
+                                LASSERT (cfs_list_empty (&kss->kss_rx_conns));
+                                LASSERT (cfs_list_empty (&kss-> \
+                                                         kss_zombie_noop_txs));
                                 LASSERT (kss->kss_nconns == 0);
                         }
 
@@ -2314,7 +2332,7 @@ ksocknal_new_incarnation (void)
          * we won't be able to reboot more frequently than 1MHz for the
          * forseeable future :) */
 
-        cfs_do_gettimeofday(&tv);
+        cfs_gettimeofday(&tv);
 
         return (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
 }
@@ -2332,7 +2350,8 @@ ksocknal_base_startup (void)
 
         ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
         LIBCFS_ALLOC (ksocknal_data.ksnd_peers,
-                      sizeof (struct list_head) * ksocknal_data.ksnd_peer_hash_size);
+                      sizeof (cfs_list_t) *
+                      ksocknal_data.ksnd_peer_hash_size);
         if (ksocknal_data.ksnd_peers == NULL)
                 return -ENOMEM;
 
@@ -2422,14 +2441,14 @@ void
 ksocknal_debug_peerhash (lnet_ni_t *ni)
 {
         ksock_peer_t     *peer = NULL;
-        struct list_head *tmp;
+        cfs_list_t       *tmp;
         int               i;
 
         cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
-                list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
-                        peer = list_entry (tmp, ksock_peer_t, ksnp_list);
+                cfs_list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
+                        peer = cfs_list_entry (tmp, ksock_peer_t, ksnp_list);
 
                         if (peer->ksnp_ni == ni) break;
 
@@ -2448,19 +2467,19 @@ ksocknal_debug_peerhash (lnet_ni_t *ni)
                        peer->ksnp_sharecount, peer->ksnp_closing,
                        peer->ksnp_accepting, peer->ksnp_error,
                        peer->ksnp_zc_next_cookie,
-                       !list_empty(&peer->ksnp_tx_queue),
-                       !list_empty(&peer->ksnp_zc_req_list));
+                       !cfs_list_empty(&peer->ksnp_tx_queue),
+                       !cfs_list_empty(&peer->ksnp_zc_req_list));
 
-                list_for_each (tmp, &peer->ksnp_routes) {
-                        route = list_entry(tmp, ksock_route_t, ksnr_list);
+                cfs_list_for_each (tmp, &peer->ksnp_routes) {
+                        route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
                         CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
                                "del %d\n", cfs_atomic_read(&route->ksnr_refcount),
                                route->ksnr_scheduled, route->ksnr_connecting,
                                route->ksnr_connected, route->ksnr_deleted);
                 }
 
-                list_for_each (tmp, &peer->ksnp_conns) {
-                        conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+                cfs_list_for_each (tmp, &peer->ksnp_conns) {
+                        conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
                         CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
                                cfs_atomic_read(&conn->ksnc_conn_refcount),
                                cfs_atomic_read(&conn->ksnc_sock_refcount),
index a4cec42..ed79ccb 100644 (file)
@@ -68,9 +68,9 @@
 typedef struct                                  /* per scheduler state */
 {
         cfs_spinlock_t    kss_lock;             /* serialise */
-        struct list_head  kss_rx_conns;         /* conn waiting to be read */
-        struct list_head  kss_tx_conns;         /* conn waiting to be written */
-        struct list_head  kss_zombie_noop_txs;  /* zombie noop tx list */
+        cfs_list_t        kss_rx_conns;         /* conn waiting to be read */
+        cfs_list_t        kss_tx_conns;         /* conn waiting to be written */
+        cfs_list_t        kss_zombie_noop_txs;  /* zombie noop tx list */
         cfs_waitq_t       kss_waitq;            /* where scheduler sleeps */
         int               kss_nconns;           /* # connections assigned to this scheduler */
 #if !SOCKNAL_SINGLE_FRAG_RX
@@ -150,41 +150,41 @@ typedef struct
 
 typedef struct
 {
-        int               ksnd_init;            /* initialisation state */
-        int               ksnd_nnets;           /* # networks set up */
-
-        cfs_rwlock_t      ksnd_global_lock;     /* stabilize peer/conn ops */
-        struct list_head *ksnd_peers;           /* hash table of all my known peers */
-        int               ksnd_peer_hash_size;  /* size of ksnd_peers */
-
-        int               ksnd_nthreads;        /* # live threads */
-        int               ksnd_shuttingdown;    /* tell threads to exit */
-        int               ksnd_nschedulers;     /* # schedulers */
-        ksock_sched_t    *ksnd_schedulers;      /* their state */
-
-        cfs_atomic_t      ksnd_nactive_txs;     /* #active txs */
-
-        struct list_head  ksnd_deathrow_conns;  /* conns to close: reaper_lock*/
-        struct list_head  ksnd_zombie_conns;    /* conns to free: reaper_lock */
-        struct list_head  ksnd_enomem_conns;    /* conns to retry: reaper_lock*/
-        cfs_waitq_t       ksnd_reaper_waitq;    /* reaper sleeps here */
-        cfs_time_t        ksnd_reaper_waketime; /* when reaper will wake */
-        cfs_spinlock_t    ksnd_reaper_lock;     /* serialise */
-
-        int               ksnd_enomem_tx;       /* test ENOMEM sender */
-        int               ksnd_stall_tx;        /* test sluggish sender */
-        int               ksnd_stall_rx;        /* test sluggish receiver */
-
-        struct list_head  ksnd_connd_connreqs;  /* incoming connection requests */
-        struct list_head  ksnd_connd_routes;    /* routes waiting to be connected */
-        cfs_waitq_t       ksnd_connd_waitq;     /* connds sleep here */
+        int               ksnd_init;           /* initialisation state */
+        int               ksnd_nnets;          /* # networks set up */
+
+        cfs_rwlock_t      ksnd_global_lock;    /* stabilize peer/conn ops */
+        cfs_list_t       *ksnd_peers;          /* hash table of all my known peers */
+        int               ksnd_peer_hash_size; /* size of ksnd_peers */
+
+        int               ksnd_nthreads;       /* # live threads */
+        int               ksnd_shuttingdown;   /* tell threads to exit */
+        int               ksnd_nschedulers;    /* # schedulers */
+        ksock_sched_t    *ksnd_schedulers;     /* their state */
+
+        cfs_atomic_t      ksnd_nactive_txs;    /* #active txs */
+
+        cfs_list_t        ksnd_deathrow_conns; /* conns to close: reaper_lock*/
+        cfs_list_t        ksnd_zombie_conns;   /* conns to free: reaper_lock */
+        cfs_list_t        ksnd_enomem_conns;   /* conns to retry: reaper_lock*/
+        cfs_waitq_t       ksnd_reaper_waitq;   /* reaper sleeps here */
+        cfs_time_t        ksnd_reaper_waketime;/* when reaper will wake */
+        cfs_spinlock_t    ksnd_reaper_lock;    /* serialise */
+
+        int               ksnd_enomem_tx;      /* test ENOMEM sender */
+        int               ksnd_stall_tx;       /* test sluggish sender */
+        int               ksnd_stall_rx;       /* test sluggish receiver */
+
+        cfs_list_t        ksnd_connd_connreqs; /* incoming connection requests */
+        cfs_list_t        ksnd_connd_routes;   /* routes waiting to be connected */
+        cfs_waitq_t       ksnd_connd_waitq;    /* connds sleep here */
         int               ksnd_connd_connecting;/* # connds connecting */
-        cfs_spinlock_t    ksnd_connd_lock;      /* serialise */
+        cfs_spinlock_t    ksnd_connd_lock;     /* serialise */
 
-        struct list_head  ksnd_idle_noop_txs;   /* list head for freed noop tx */
-        cfs_spinlock_t    ksnd_tx_lock;         /* serialise, NOT safe in g_lock */
+        cfs_list_t        ksnd_idle_noop_txs;  /* list head for freed noop tx */
+        cfs_spinlock_t    ksnd_tx_lock;        /* serialise, NOT safe in g_lock */
 
-        ksock_irqinfo_t   ksnd_irqinfo[NR_IRQS];/* irq->scheduler lookup */
+        ksock_irqinfo_t   ksnd_irqinfo[CFS_NR_IRQS];/* irq->scheduler lookup */
 
 } ksock_nal_data_t;
 
@@ -208,23 +208,23 @@ struct ksock_proto;                             /* forward ref */
 
 typedef struct                                  /* transmit packet */
 {
-        struct list_head        tx_list;        /* queue on conn for transmission etc */
-        struct list_head        tx_zc_list;     /* queue on peer for ZC request */
-        cfs_atomic_t            tx_refcount;    /* tx reference count */
-        int                     tx_nob;         /* # packet bytes */
-        int                     tx_resid;       /* residual bytes */
-        int                     tx_niov;        /* # packet iovec frags */
-        struct iovec           *tx_iov;         /* packet iovec frags */
-        int                     tx_nkiov;       /* # packet page frags */
-        unsigned int            tx_zc_capable:1; /* payload is large enough for ZC */
-        unsigned int            tx_zc_checked:1; /* Have I checked if I should ZC? */
-        unsigned int            tx_nonblk:1;    /* it's a non-blocking ACK */
-        lnet_kiov_t            *tx_kiov;        /* packet page frags */
-        struct ksock_conn      *tx_conn;        /* owning conn */
-        lnet_msg_t             *tx_lnetmsg;     /* lnet message for lnet_finalize() */
-        cfs_time_t              tx_deadline;    /* when (in jiffies) tx times out */
-        ksock_msg_t             tx_msg;         /* socklnd message buffer */
-        int                     tx_desc_size;   /* size of this descriptor */
+        cfs_list_t     tx_list;        /* queue on conn for transmission etc */
+        cfs_list_t     tx_zc_list;     /* queue on peer for ZC request */
+        cfs_atomic_t   tx_refcount;    /* tx reference count */
+        int            tx_nob;         /* # packet bytes */
+        int            tx_resid;       /* residual bytes */
+        int            tx_niov;        /* # packet iovec frags */
+        struct iovec  *tx_iov;         /* packet iovec frags */
+        int            tx_nkiov;       /* # packet page frags */
+        unsigned int   tx_zc_capable:1; /* payload is large enough for ZC */
+        unsigned int   tx_zc_checked:1; /* Have I checked if I should ZC? */
+        unsigned int   tx_nonblk:1;    /* it's a non-blocking ACK */
+        lnet_kiov_t   *tx_kiov;        /* packet page frags */
+        struct ksock_conn  *tx_conn;        /* owning conn */
+        lnet_msg_t    *tx_lnetmsg;     /* lnet message for lnet_finalize() */
+        cfs_time_t     tx_deadline;    /* when (in jiffies) tx times out */
+        ksock_msg_t    tx_msg;         /* socklnd message buffer */
+        int            tx_desc_size;   /* size of this descriptor */
         union {
                 struct {
                         struct iovec iov;       /* virt hdr */
@@ -256,115 +256,117 @@ typedef union {
 
 typedef struct ksock_conn
 {
-        struct ksock_peer  *ksnc_peer;          /* owning peer */
-        struct ksock_route *ksnc_route;         /* owning route */
-        struct list_head    ksnc_list;          /* stash on peer's conn list */
-        cfs_socket_t       *ksnc_sock;          /* actual socket */
+        struct ksock_peer  *ksnc_peer;         /* owning peer */
+        struct ksock_route *ksnc_route;        /* owning route */
+        cfs_list_t          ksnc_list;         /* stash on peer's conn list */
+        cfs_socket_t       *ksnc_sock;         /* actual socket */
         void               *ksnc_saved_data_ready; /* socket's original data_ready() callback */
         void               *ksnc_saved_write_space; /* socket's original write_space() callback */
         cfs_atomic_t        ksnc_conn_refcount; /* conn refcount */
         cfs_atomic_t        ksnc_sock_refcount; /* sock refcount */
-        ksock_sched_t      *ksnc_scheduler;     /* who schedules this connection */
-        __u32               ksnc_myipaddr;      /* my IP */
-        __u32               ksnc_ipaddr;        /* peer's IP */
-        int                 ksnc_port;          /* peer's port */
-        int                 ksnc_type:3;        /* type of connection, should be signed value */
-        int                 ksnc_closing:1;     /* being shut down */
-        int                 ksnc_flip:1;        /* flip or not, only for V2.x */
-        int                 ksnc_zc_capable:1;  /* enable to ZC */
-        struct ksock_proto *ksnc_proto;         /* protocol for the connection */
+        ksock_sched_t      *ksnc_scheduler;  /* who schedules this connection */
+        __u32               ksnc_myipaddr;   /* my IP */
+        __u32               ksnc_ipaddr;     /* peer's IP */
+        int                 ksnc_port;       /* peer's port */
+        int                 ksnc_type:3;     /* type of connection, should be signed value */
+        int                 ksnc_closing:1;  /* being shut down */
+        int                 ksnc_flip:1;     /* flip or not, only for V2.x */
+        int                 ksnc_zc_capable:1; /* enable to ZC */
+        struct ksock_proto *ksnc_proto;      /* protocol for the connection */
 
         /* reader */
-        struct list_head    ksnc_rx_list;       /* where I enq waiting input or a forwarding descriptor */
-        cfs_time_t          ksnc_rx_deadline;   /* when (in jiffies) receive times out */
-        __u8                ksnc_rx_started;    /* started receiving a message */
-        __u8                ksnc_rx_ready;      /* data ready to read */
-        __u8                ksnc_rx_scheduled;  /* being progressed */
-        __u8                ksnc_rx_state;      /* what is being read */
-        int                 ksnc_rx_nob_left;   /* # bytes to next hdr/body  */
-        int                 ksnc_rx_nob_wanted; /* bytes actually wanted */
-        int                 ksnc_rx_niov;       /* # iovec frags */
-        struct iovec       *ksnc_rx_iov;        /* the iovec frags */
-        int                 ksnc_rx_nkiov;      /* # page frags */
-        lnet_kiov_t        *ksnc_rx_kiov;       /* the page frags */
-        ksock_rxiovspace_t  ksnc_rx_iov_space;  /* space for frag descriptors */
-        __u32               ksnc_rx_csum;       /* partial checksum for incoming data */
-        void               *ksnc_cookie;        /* rx lnet_finalize passthru arg */
-        ksock_msg_t         ksnc_msg;           /* incoming message buffer:
-                                                 * V2.x message takes the whole struct
-                                                 * V1.x message is a bare lnet_hdr_t, it's stored
-                                                 * in ksnc_msg.ksm_u.lnetmsg */
+        cfs_list_t  ksnc_rx_list;     /* where I enq waiting input or a forwarding descriptor */
+        cfs_time_t            ksnc_rx_deadline; /* when (in jiffies) receive times out */
+        __u8                  ksnc_rx_started;  /* started receiving a message */
+        __u8                  ksnc_rx_ready;    /* data ready to read */
+        __u8                  ksnc_rx_scheduled;/* being progressed */
+        __u8                  ksnc_rx_state;    /* what is being read */
+        int                   ksnc_rx_nob_left; /* # bytes to next hdr/body */
+        int                   ksnc_rx_nob_wanted; /* bytes actually wanted */
+        int                   ksnc_rx_niov;     /* # iovec frags */
+        struct iovec         *ksnc_rx_iov;      /* the iovec frags */
+        int                   ksnc_rx_nkiov;    /* # page frags */
+        lnet_kiov_t          *ksnc_rx_kiov;     /* the page frags */
+        ksock_rxiovspace_t    ksnc_rx_iov_space;/* space for frag descriptors */
+        __u32                 ksnc_rx_csum;     /* partial checksum for incoming data */
+        void                 *ksnc_cookie;      /* rx lnet_finalize passthru arg */
+        ksock_msg_t           ksnc_msg;         /* incoming message buffer:
+                                                 * V2.x message takes the
+                                                 * whole struct
+                                                 * V1.x message is a bare
+                                                 * lnet_hdr_t, it's stored in
+                                                 * ksnc_msg.ksm_u.lnetmsg */
 
         /* WRITER */
-        struct list_head    ksnc_tx_list;       /* where I enq waiting for output space */
-        struct list_head    ksnc_tx_queue;      /* packets waiting to be sent */
-        ksock_tx_t         *ksnc_tx_carrier;    /* next TX that can carry a LNet message or ZC-ACK */
-        cfs_time_t          ksnc_tx_deadline;   /* when (in jiffies) tx times out */
-        int                 ksnc_tx_bufnob;     /* send buffer marker */
-        cfs_atomic_t        ksnc_tx_nob;        /* # bytes queued */
-        int                 ksnc_tx_ready;      /* write space */
-        int                 ksnc_tx_scheduled;  /* being progressed */
-        cfs_time_t          ksnc_tx_last_post;  /* time stamp of the last posted TX */
+        cfs_list_t            ksnc_tx_list;     /* where I enq waiting for output space */
+        cfs_list_t            ksnc_tx_queue;    /* packets waiting to be sent */
+        ksock_tx_t           *ksnc_tx_carrier;  /* next TX that can carry a LNet message or ZC-ACK */
+        cfs_time_t            ksnc_tx_deadline; /* when (in jiffies) tx times out */
+        int                   ksnc_tx_bufnob;     /* send buffer marker */
+        cfs_atomic_t          ksnc_tx_nob;        /* # bytes queued */
+        int                   ksnc_tx_ready;      /* write space */
+        int                   ksnc_tx_scheduled;  /* being progressed */
+        cfs_time_t            ksnc_tx_last_post;  /* time stamp of the last posted TX */
 } ksock_conn_t;
 
 typedef struct ksock_route
 {
-        struct list_head    ksnr_list;          /* chain on peer route list */
-        struct list_head    ksnr_connd_list;    /* chain on ksnr_connd_routes */
-        struct ksock_peer  *ksnr_peer;          /* owning peer */
-        cfs_atomic_t        ksnr_refcount;      /* # users */
-        cfs_time_t          ksnr_timeout;       /* when (in jiffies) reconnection can happen next */
-        cfs_duration_t      ksnr_retry_interval; /* how long between retries */
-        __u32               ksnr_myipaddr;      /* my IP */
-        __u32               ksnr_ipaddr;        /* IP address to connect to */
-        int                 ksnr_port;          /* port to connect to */
-        unsigned int        ksnr_scheduled:1;   /* scheduled for attention */
-        unsigned int        ksnr_connecting:1;  /* connection establishment in progress */
-        unsigned int        ksnr_connected:4;   /* connections established by type */
-        unsigned int        ksnr_deleted:1;     /* been removed from peer? */
-        unsigned int        ksnr_share_count;   /* created explicitly? */
-        int                 ksnr_conn_count;    /* # conns established by this route */
+        cfs_list_t            ksnr_list;        /* chain on peer route list */
+        cfs_list_t            ksnr_connd_list;  /* chain on ksnr_connd_routes */
+        struct ksock_peer    *ksnr_peer;        /* owning peer */
+        cfs_atomic_t          ksnr_refcount;    /* # users */
+        cfs_time_t            ksnr_timeout;     /* when (in jiffies) reconnection can happen next */
+        cfs_duration_t        ksnr_retry_interval; /* how long between retries */
+        __u32                 ksnr_myipaddr;    /* my IP */
+        __u32                 ksnr_ipaddr;      /* IP address to connect to */
+        int                   ksnr_port;        /* port to connect to */
+        unsigned int          ksnr_scheduled:1; /* scheduled for attention */
+        unsigned int          ksnr_connecting:1;/* connection establishment in progress */
+        unsigned int          ksnr_connected:4; /* connections established by type */
+        unsigned int          ksnr_deleted:1;   /* been removed from peer? */
+        unsigned int          ksnr_share_count; /* created explicitly? */
+        int                   ksnr_conn_count;  /* # conns established by this route */
 } ksock_route_t;
 
 #define SOCKNAL_KEEPALIVE_PING          1       /* cookie for keepalive ping */
 
 typedef struct ksock_peer
 {
-        struct list_head    ksnp_list;          /* stash on global peer list */
-        cfs_time_t          ksnp_last_alive;    /* when (in jiffies) I was last alive */
-        lnet_process_id_t   ksnp_id;            /* who's on the other end(s) */
-        cfs_atomic_t        ksnp_refcount;      /* # users */
-        int                 ksnp_sharecount;    /* lconf usage counter */
-        int                 ksnp_closing;       /* being closed */
-        int                 ksnp_accepting;     /* # passive connections pending */
-        int                 ksnp_error;         /* errno on closing last conn */
-        __u64               ksnp_zc_next_cookie;/* ZC completion cookie */
-        __u64               ksnp_incarnation;   /* latest known peer incarnation */
-        struct ksock_proto *ksnp_proto;         /* latest known peer protocol */
-        struct list_head    ksnp_conns;         /* all active connections */
-        struct list_head    ksnp_routes;        /* routes */
-        struct list_head    ksnp_tx_queue;      /* waiting packets */
-        cfs_spinlock_t      ksnp_lock;          /* serialize, NOT safe in g_lock */
-        struct list_head    ksnp_zc_req_list;   /* zero copy requests wait for ACK  */
-        cfs_time_t          ksnp_send_keepalive; /* time to send keepalive */
-        lnet_ni_t          *ksnp_ni;            /* which network */
-        int                 ksnp_n_passive_ips; /* # of... */
-        __u32               ksnp_passive_ips[LNET_MAX_INTERFACES]; /* preferred local interfaces */
+        cfs_list_t            ksnp_list;        /* stash on global peer list */
+        cfs_time_t            ksnp_last_alive;  /* when (in jiffies) I was last alive */
+        lnet_process_id_t     ksnp_id;       /* who's on the other end(s) */
+        cfs_atomic_t          ksnp_refcount; /* # users */
+        int                   ksnp_sharecount;  /* lconf usage counter */
+        int                   ksnp_closing;  /* being closed */
+        int                   ksnp_accepting;/* # passive connections pending */
+        int                   ksnp_error;    /* errno on closing last conn */
+        __u64                 ksnp_zc_next_cookie;/* ZC completion cookie */
+        __u64                 ksnp_incarnation;   /* latest known peer incarnation */
+        struct ksock_proto   *ksnp_proto;    /* latest known peer protocol */
+        cfs_list_t            ksnp_conns;    /* all active connections */
+        cfs_list_t            ksnp_routes;   /* routes */
+        cfs_list_t            ksnp_tx_queue; /* waiting packets */
+        cfs_spinlock_t        ksnp_lock;     /* serialize, NOT safe in g_lock */
+        cfs_list_t            ksnp_zc_req_list;   /* zero copy requests wait for ACK  */
+        cfs_time_t            ksnp_send_keepalive; /* time to send keepalive */
+        lnet_ni_t            *ksnp_ni;       /* which network */
+        int                   ksnp_n_passive_ips; /* # of... */
+        __u32                 ksnp_passive_ips[LNET_MAX_INTERFACES]; /* preferred local interfaces */
 } ksock_peer_t;
 
 typedef struct ksock_connreq
 {
-        struct list_head    ksncr_list;         /* stash on ksnd_connd_connreqs */
-        lnet_ni_t          *ksncr_ni;           /* chosen NI */
-        cfs_socket_t       *ksncr_sock;         /* accepted socket */
+        cfs_list_t            ksncr_list;     /* stash on ksnd_connd_connreqs */
+        lnet_ni_t            *ksncr_ni;       /* chosen NI */
+        cfs_socket_t         *ksncr_sock;     /* accepted socket */
 } ksock_connreq_t;
 
 extern ksock_nal_data_t ksocknal_data;
 extern ksock_tunables_t ksocknal_tunables;
 
-#define SOCKNAL_MATCH_NO        0               /* TX can't match type of connection */
-#define SOCKNAL_MATCH_YES       1               /* TX matches type of connection */
-#define SOCKNAL_MATCH_MAY       2               /* TX can be sent on the connection, but not preferred */
+#define SOCKNAL_MATCH_NO        0        /* TX can't match type of connection */
+#define SOCKNAL_MATCH_YES       1        /* TX matches type of connection */
+#define SOCKNAL_MATCH_MAY       2        /* TX can be sent on the connection, but not preferred */
 
 typedef struct ksock_proto
 {
@@ -407,7 +409,7 @@ ksocknal_route_mask(void)
                 (1 << SOCKLND_CONN_BULK_OUT));
 }
 
-static inline struct list_head *
+static inline cfs_list_t *
 ksocknal_nid2peerlist (lnet_nid_t nid)
 {
         unsigned int hash = ((unsigned int)nid) % ksocknal_data.ksnd_peer_hash_size;
@@ -546,7 +548,8 @@ extern void ksocknal_free_tx (ksock_tx_t *tx);
 extern ksock_tx_t *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk);
 extern void ksocknal_next_tx_carrier(ksock_conn_t *conn);
 extern void ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn);
-extern void ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error);
+extern void ksocknal_txlist_done (lnet_ni_t *ni, cfs_list_t *txlist,
+                                  int error);
 extern void ksocknal_notify (lnet_ni_t *ni, lnet_nid_t gw_nid, int alive);
 extern void ksocknal_query (struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when);
 extern int ksocknal_thread_start (int (*fn)(void *arg), void *arg);
@@ -560,7 +563,7 @@ extern int ksocknal_connd (void *arg);
 extern int ksocknal_reaper (void *arg);
 extern int ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn,
                                 lnet_nid_t peer_nid, ksock_hello_msg_t *hello);
-extern int ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, 
+extern int ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
                                 ksock_hello_msg_t *hello, lnet_process_id_t *id,
                                 __u64 *incarnation);
 extern void ksocknal_read_callback(ksock_conn_t *conn);
index cd30882..1a2354a 100644 (file)
@@ -37,11 +37,11 @@ ksocknal_alloc_tx(int type, int size)
                 /* searching for a noop tx in free list */
                 cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
 
-                if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
-                        tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
-                                        ksock_tx_t, tx_list);
+                if (!cfs_list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
+                        tx = cfs_list_entry(ksocknal_data.ksnd_idle_noop_txs. \
+                                            next, ksock_tx_t, tx_list);
                         LASSERT(tx->tx_desc_size == size);
-                        list_del(&tx->tx_list);
+                        cfs_list_del(&tx->tx_list);
                 }
 
                 cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
@@ -98,7 +98,7 @@ ksocknal_free_tx (ksock_tx_t *tx)
                 /* it's a noop tx */
                 cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
 
-                list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
+                cfs_list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
 
                 cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
         } else {
@@ -406,12 +406,12 @@ ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx)
 }
 
 void
-ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error)
+ksocknal_txlist_done (lnet_ni_t *ni, cfs_list_t *txlist, int error)
 {
         ksock_tx_t *tx;
 
-        while (!list_empty (txlist)) {
-                tx = list_entry (txlist->next, ksock_tx_t, tx_list);
+        while (!cfs_list_empty (txlist)) {
+                tx = cfs_list_entry (txlist->next, ksock_tx_t, tx_list);
 
                 if (error && tx->tx_lnetmsg != NULL) {
                         CDEBUG (D_NETERROR, "Deleting packet type %d len %d %s->%s\n",
@@ -423,7 +423,7 @@ ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error)
                         CDEBUG (D_NETERROR, "Deleting noop packet\n");
                 }
 
-                list_del (&tx->tx_list);
+                cfs_list_del (&tx->tx_list);
 
                 LASSERT (cfs_atomic_read(&tx->tx_refcount) == 1);
                 ksocknal_tx_done (ni, tx);
@@ -469,7 +469,7 @@ ksocknal_check_zc_req(ksock_tx_t *tx)
         if (peer->ksnp_zc_next_cookie == 0)
                 peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
 
-        list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
+        cfs_list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
 
         cfs_spin_unlock(&peer->ksnp_lock);
 }
@@ -493,7 +493,7 @@ ksocknal_uncheck_zc_req(ksock_tx_t *tx)
         }
 
         tx->tx_msg.ksm_zc_cookies[0] = 0;
-        list_del(&tx->tx_zc_list);
+        cfs_list_del(&tx->tx_zc_list);
 
         cfs_spin_unlock(&peer->ksnp_lock);
 
@@ -535,8 +535,8 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
 
                 /* enomem list takes over scheduler's ref... */
                 LASSERT (conn->ksnc_tx_scheduled);
-                list_add_tail(&conn->ksnc_tx_list,
-                              &ksocknal_data.ksnd_enomem_conns);
+                cfs_list_add_tail(&conn->ksnc_tx_list,
+                                  &ksocknal_data.ksnd_enomem_conns);
                 if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
                                                    SOCKNAL_ENOMEM_RETRY),
                                    ksocknal_data.ksnd_reaper_waketime))
@@ -595,8 +595,8 @@ ksocknal_launch_connection_locked (ksock_route_t *route)
 
         cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
 
-        list_add_tail (&route->ksnr_connd_list,
-                       &ksocknal_data.ksnd_connd_routes);
+        cfs_list_add_tail (&route->ksnr_connd_list,
+                           &ksocknal_data.ksnd_connd_routes);
         cfs_waitq_signal (&ksocknal_data.ksnd_connd_waitq);
 
         cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
@@ -621,15 +621,15 @@ ksocknal_launch_all_connections_locked (ksock_peer_t *peer)
 ksock_conn_t *
 ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
 {
-        struct list_head *tmp;
+        cfs_list_t       *tmp;
         ksock_conn_t     *conn;
         ksock_conn_t     *typed = NULL;
         ksock_conn_t     *fallback = NULL;
         int               tnob     = 0;
         int               fnob     = 0;
 
-        list_for_each (tmp, &peer->ksnp_conns) {
-                ksock_conn_t *c   = list_entry(tmp, ksock_conn_t, ksnc_list);
+        cfs_list_for_each (tmp, &peer->ksnp_conns) {
+                ksock_conn_t *c  = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
                 int           nob = cfs_atomic_read(&c->ksnc_tx_nob) +
                                     libcfs_sock_wmem_queued(c->ksnc_sock);
                 int           rc;
@@ -730,7 +730,7 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
         bufnob = libcfs_sock_wmem_queued(conn->ksnc_sock);
         cfs_spin_lock_bh (&sched->kss_lock);
 
-        if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
+        if (cfs_list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
                 /* First packet starts the timeout */
                 conn->ksnc_tx_deadline =
                         cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
@@ -761,15 +761,15 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
 
         if (ztx != NULL) {
                 cfs_atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
-                list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
+                cfs_list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
         }
 
         if (conn->ksnc_tx_ready &&      /* able to send */
             !conn->ksnc_tx_scheduled) { /* not scheduled to send */
                 /* +1 ref for scheduler */
                 ksocknal_conn_addref(conn);
-                list_add_tail (&conn->ksnc_tx_list,
-                               &sched->kss_tx_conns);
+                cfs_list_add_tail (&conn->ksnc_tx_list,
+                                   &sched->kss_tx_conns);
                 conn->ksnc_tx_scheduled = 1;
                 cfs_waitq_signal (&sched->kss_waitq);
         }
@@ -781,11 +781,11 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
 ksock_route_t *
 ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
 {
-        struct list_head  *tmp;
+        cfs_list_t        *tmp;
         ksock_route_t     *route;
 
-        list_for_each (tmp, &peer->ksnp_routes) {
-                route = list_entry (tmp, ksock_route_t, ksnr_list);
+        cfs_list_for_each (tmp, &peer->ksnp_routes) {
+                route = cfs_list_entry (tmp, ksock_route_t, ksnr_list);
 
                 LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
 
@@ -811,11 +811,11 @@ ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
 ksock_route_t *
 ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
 {
-        struct list_head  *tmp;
+        cfs_list_t        *tmp;
         ksock_route_t     *route;
 
-        list_for_each (tmp, &peer->ksnp_routes) {
-                route = list_entry (tmp, ksock_route_t, ksnr_list);
+        cfs_list_for_each (tmp, &peer->ksnp_routes) {
+                route = cfs_list_entry (tmp, ksock_route_t, ksnr_list);
 
                 LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
 
@@ -905,7 +905,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
                         cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
 
                 /* Queue the message until a connection is established */
-                list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
+                cfs_list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
                 cfs_write_unlock_bh (g_lock);
                 return 0;
         }
@@ -1340,7 +1340,7 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
 
         switch (conn->ksnc_rx_state) {
         case SOCKNAL_RX_PARSE_WAIT:
-                list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
+                cfs_list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
                 cfs_waitq_signal (&sched->kss_waitq);
                 LASSERT (conn->ksnc_rx_ready);
                 break;
@@ -1365,8 +1365,8 @@ ksocknal_sched_cansleep(ksock_sched_t *sched)
         cfs_spin_lock_bh (&sched->kss_lock);
 
         rc = (!ksocknal_data.ksnd_shuttingdown &&
-              list_empty(&sched->kss_rx_conns) &&
-              list_empty(&sched->kss_tx_conns));
+              cfs_list_empty(&sched->kss_rx_conns) &&
+              cfs_list_empty(&sched->kss_tx_conns));
 
         cfs_spin_unlock_bh (&sched->kss_lock);
         return (rc);
@@ -1396,10 +1396,10 @@ int ksocknal_scheduler (void *arg)
 
                 /* Ensure I progress everything semi-fairly */
 
-                if (!list_empty (&sched->kss_rx_conns)) {
-                        conn = list_entry(sched->kss_rx_conns.next,
-                                          ksock_conn_t, ksnc_rx_list);
-                        list_del(&conn->ksnc_rx_list);
+                if (!cfs_list_empty (&sched->kss_rx_conns)) {
+                        conn = cfs_list_entry(sched->kss_rx_conns.next,
+                                              ksock_conn_t, ksnc_rx_list);
+                        cfs_list_del(&conn->ksnc_rx_list);
 
                         LASSERT(conn->ksnc_rx_scheduled);
                         LASSERT(conn->ksnc_rx_ready);
@@ -1429,8 +1429,8 @@ int ksocknal_scheduler (void *arg)
                                 conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
                         } else if (conn->ksnc_rx_ready) {
                                 /* reschedule for rx */
-                                list_add_tail (&conn->ksnc_rx_list,
-                                               &sched->kss_rx_conns);
+                                cfs_list_add_tail (&conn->ksnc_rx_list,
+                                                   &sched->kss_rx_conns);
                         } else {
                                 conn->ksnc_rx_scheduled = 0;
                                 /* drop my ref */
@@ -1440,30 +1440,31 @@ int ksocknal_scheduler (void *arg)
                         did_something = 1;
                 }
 
-                if (!list_empty (&sched->kss_tx_conns)) {
+                if (!cfs_list_empty (&sched->kss_tx_conns)) {
                         CFS_LIST_HEAD    (zlist);
 
-                        if (!list_empty(&sched->kss_zombie_noop_txs)) {
-                                list_add(&zlist, &sched->kss_zombie_noop_txs);
-                                list_del_init(&sched->kss_zombie_noop_txs);
+                        if (!cfs_list_empty(&sched->kss_zombie_noop_txs)) {
+                                cfs_list_add(&zlist,
+                                             &sched->kss_zombie_noop_txs);
+                                cfs_list_del_init(&sched->kss_zombie_noop_txs);
                         }
 
-                        conn = list_entry(sched->kss_tx_conns.next,
-                                          ksock_conn_t, ksnc_tx_list);
-                        list_del (&conn->ksnc_tx_list);
+                        conn = cfs_list_entry(sched->kss_tx_conns.next,
+                                              ksock_conn_t, ksnc_tx_list);
+                        cfs_list_del (&conn->ksnc_tx_list);
 
                         LASSERT(conn->ksnc_tx_scheduled);
                         LASSERT(conn->ksnc_tx_ready);
-                        LASSERT(!list_empty(&conn->ksnc_tx_queue));
+                        LASSERT(!cfs_list_empty(&conn->ksnc_tx_queue));
 
-                        tx = list_entry(conn->ksnc_tx_queue.next,
-                                        ksock_tx_t, tx_list);
+                        tx = cfs_list_entry(conn->ksnc_tx_queue.next,
+                                            ksock_tx_t, tx_list);
 
                         if (conn->ksnc_tx_carrier == tx)
                                 ksocknal_next_tx_carrier(conn);
 
                         /* dequeue now so empty list => more to send */
-                        list_del(&tx->tx_list);
+                        cfs_list_del(&tx->tx_list);
 
                         /* Clear tx_ready in case send isn't complete.  Do
                          * it BEFORE we call process_transmit, since
@@ -1472,7 +1473,7 @@ int ksocknal_scheduler (void *arg)
                         conn->ksnc_tx_ready = 0;
                         cfs_spin_unlock_bh (&sched->kss_lock);
 
-                        if (!list_empty(&zlist)) {
+                        if (!cfs_list_empty(&zlist)) {
                                 /* free zombie noop txs, it's fast because 
                                  * noop txs are just put in freelist */
                                 ksocknal_txlist_done(NULL, &zlist, 0);
@@ -1483,7 +1484,8 @@ int ksocknal_scheduler (void *arg)
                         if (rc == -ENOMEM || rc == -EAGAIN) {
                                 /* Incomplete send: replace tx on HEAD of tx_queue */
                                 cfs_spin_lock_bh (&sched->kss_lock);
-                                list_add (&tx->tx_list, &conn->ksnc_tx_queue);
+                                cfs_list_add (&tx->tx_list,
+                                              &conn->ksnc_tx_queue);
                         } else {
                                 /* Complete send; tx -ref */
                                 ksocknal_tx_decref (tx);
@@ -1497,10 +1499,10 @@ int ksocknal_scheduler (void *arg)
                                 /* Do nothing; after a short timeout, this
                                  * conn will be reposted on kss_tx_conns. */
                         } else if (conn->ksnc_tx_ready &&
-                                   !list_empty (&conn->ksnc_tx_queue)) {
+                                   !cfs_list_empty (&conn->ksnc_tx_queue)) {
                                 /* reschedule for tx */
-                                list_add_tail (&conn->ksnc_tx_list,
-                                               &sched->kss_tx_conns);
+                                cfs_list_add_tail (&conn->ksnc_tx_list,
+                                                   &sched->kss_tx_conns);
                         } else {
                                 conn->ksnc_tx_scheduled = 0;
                                 /* drop my ref */
@@ -1521,7 +1523,7 @@ int ksocknal_scheduler (void *arg)
                                         !ksocknal_sched_cansleep(sched), rc);
                                 LASSERT (rc == 0);
                         } else {
-                                our_cond_resched();
+                                cfs_cond_resched();
                         }
 
                         cfs_spin_lock_bh (&sched->kss_lock);
@@ -1549,8 +1551,8 @@ void ksocknal_read_callback (ksock_conn_t *conn)
         conn->ksnc_rx_ready = 1;
 
         if (!conn->ksnc_rx_scheduled) {  /* not being progressed */
-                list_add_tail(&conn->ksnc_rx_list,
-                              &sched->kss_rx_conns);
+                cfs_list_add_tail(&conn->ksnc_rx_list,
+                                  &sched->kss_rx_conns);
                 conn->ksnc_rx_scheduled = 1;
                 /* extra ref for scheduler */
                 ksocknal_conn_addref(conn);
@@ -1577,10 +1579,10 @@ void ksocknal_write_callback (ksock_conn_t *conn)
 
         conn->ksnc_tx_ready = 1;
 
-        if (!conn->ksnc_tx_scheduled && // not being progressed 
-            !list_empty(&conn->ksnc_tx_queue)){//packets to send 
-                list_add_tail (&conn->ksnc_tx_list,
-                               &sched->kss_tx_conns);
+        if (!conn->ksnc_tx_scheduled && // not being progressed
+            !cfs_list_empty(&conn->ksnc_tx_queue)){//packets to send
+                cfs_list_add_tail (&conn->ksnc_tx_list,
+                                   &sched->kss_tx_conns);
                 conn->ksnc_tx_scheduled = 1;
                 /* extra ref for scheduler */
                 ksocknal_conn_addref(conn);
@@ -1954,28 +1956,29 @@ ksocknal_connect (ksock_route_t *route)
         route->ksnr_timeout = cfs_time_add(cfs_time_current(),
                                            route->ksnr_retry_interval);
 
-        if (!list_empty(&peer->ksnp_tx_queue) &&
+        if (!cfs_list_empty(&peer->ksnp_tx_queue) &&
             peer->ksnp_accepting == 0 &&
             ksocknal_find_connecting_route_locked(peer) == NULL) {
                 ksock_conn_t *conn;
 
                 /* ksnp_tx_queue is queued on a conn on successful
                  * connection for V1.x and V2.x */
-                if (!list_empty (&peer->ksnp_conns)) {
-                        conn = list_entry(peer->ksnp_conns.next, ksock_conn_t, ksnc_list);
+                if (!cfs_list_empty (&peer->ksnp_conns)) {
+                        conn = cfs_list_entry(peer->ksnp_conns.next,
+                                              ksock_conn_t, ksnc_list);
                         LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
                 }
 
                 /* take all the blocked packets while I've got the lock and
                  * complete below... */
-                list_splice_init(&peer->ksnp_tx_queue, &zombies);
+                cfs_list_splice_init(&peer->ksnp_tx_queue, &zombies);
         }
 
 #if 0           /* irrelevent with only eager routes */
         if (!route->ksnr_deleted) {
                 /* make this route least-favourite for re-selection */
-                list_del(&route->ksnr_list);
-                list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
+                cfs_list_del(&route->ksnr_list);
+                cfs_list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
         }
 #endif
         cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
@@ -2001,8 +2004,8 @@ ksocknal_connd_get_route_locked(signed long *timeout_p)
         now = cfs_time_current();
 
         /* connd_routes can contain both pending and ordinary routes */
-        list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
-                             ksnr_connd_list) {
+        cfs_list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
+                                 ksnr_connd_list) {
 
                 if (route->ksnr_retry_interval == 0 ||
                     cfs_time_aftereq(now, route->ksnr_timeout))
@@ -2039,12 +2042,12 @@ ksocknal_connd (void *arg)
 
                 dropped_lock = 0;
 
-                if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
+                if (!cfs_list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
                         /* Connection accepted by the listener */
-                        cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
-                                        ksock_connreq_t, ksncr_list);
+                        cr = cfs_list_entry(ksocknal_data.ksnd_connd_connreqs. \
+                                            next, ksock_connreq_t, ksncr_list);
 
-                        list_del(&cr->ksncr_list);
+                        cfs_list_del(&cr->ksncr_list);
                         cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
                         dropped_lock = 1;
 
@@ -2063,7 +2066,7 @@ ksocknal_connd (void *arg)
                 route = ksocknal_connd_get_route_locked(&timeout);
 
                 if (route != NULL) {
-                        list_del (&route->ksnr_connd_list);
+                        cfs_list_del (&route->ksnr_connd_list);
                         ksocknal_data.ksnd_connd_connecting++;
                         cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
                         dropped_lock = 1;
@@ -2080,7 +2083,8 @@ ksocknal_connd (void *arg)
 
                 /* Nothing to do for 'timeout'  */
                 cfs_set_current_state (CFS_TASK_INTERRUPTIBLE);
-                cfs_waitq_add_exclusive (&ksocknal_data.ksnd_connd_waitq, &wait);
+                cfs_waitq_add_exclusive (&ksocknal_data.ksnd_connd_waitq,
+                                         &wait);
                 cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
 
                 cfs_waitq_timedwait (&wait, CFS_TASK_INTERRUPTIBLE, timeout);
@@ -2101,11 +2105,11 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
 {
         /* We're called with a shared lock on ksnd_global_lock */
         ksock_conn_t      *conn;
-        struct list_head  *ctmp;
+        cfs_list_t        *ctmp;
 
-        list_for_each (ctmp, &peer->ksnp_conns) {
+        cfs_list_for_each (ctmp, &peer->ksnp_conns) {
                 int     error;
-                conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
+                conn = cfs_list_entry (ctmp, ksock_conn_t, ksnc_list);
 
                 /* Don't need the {get,put}connsock dance to deref ksnc_sock */
                 LASSERT (!conn->ksnc_closing);
@@ -2162,7 +2166,7 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
                         return (conn);
                 }
 
-                if ((!list_empty(&conn->ksnc_tx_queue) ||
+                if ((!cfs_list_empty(&conn->ksnc_tx_queue) ||
                      libcfs_sock_wmem_queued(conn->ksnc_sock) != 0) &&
                     cfs_time_aftereq(cfs_time_current(),
                                      conn->ksnc_tx_deadline)) {
@@ -2187,19 +2191,19 @@ ksocknal_flush_stale_txs(ksock_peer_t *peer)
 {
         ksock_tx_t        *tx;
         CFS_LIST_HEAD      (stale_txs);
-        
+
         cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
 
-        while (!list_empty (&peer->ksnp_tx_queue)) {
-                tx = list_entry (peer->ksnp_tx_queue.next,
-                                 ksock_tx_t, tx_list);
+        while (!cfs_list_empty (&peer->ksnp_tx_queue)) {
+                tx = cfs_list_entry (peer->ksnp_tx_queue.next,
+                                     ksock_tx_t, tx_list);
 
                 if (!cfs_time_aftereq(cfs_time_current(),
                                       tx->tx_deadline))
                         break;
-                
-                list_del (&tx->tx_list);
-                list_add_tail (&tx->tx_list, &stale_txs);
+
+                cfs_list_del (&tx->tx_list);
+                cfs_list_add_tail (&tx->tx_list, &stale_txs);
         }
 
         cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
@@ -2214,7 +2218,7 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer)
         ksock_conn_t   *conn;
         ksock_tx_t     *tx;
 
-        if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
+        if (cfs_list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
                 return 0;
 
         if (peer->ksnp_proto != &ksocknal_protocol_v3x)
@@ -2238,32 +2242,32 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer)
         if (conn != NULL) {
                 sched = conn->ksnc_scheduler;
 
-                spin_lock_bh (&sched->kss_lock);
-                if (!list_empty(&conn->ksnc_tx_queue)) {
-                        spin_unlock_bh(&sched->kss_lock);
+                cfs_spin_lock_bh (&sched->kss_lock);
+                if (!cfs_list_empty(&conn->ksnc_tx_queue)) {
+                        cfs_spin_unlock_bh(&sched->kss_lock);
                         /* there is an queued ACK, don't need keepalive */
                         return 0;
                 }
 
-                spin_unlock_bh(&sched->kss_lock);
+                cfs_spin_unlock_bh(&sched->kss_lock);
         }
 
-        read_unlock(&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock(&ksocknal_data.ksnd_global_lock);
 
         /* cookie = 1 is reserved for keepalive PING */
         tx = ksocknal_alloc_tx_noop(1, 1);
         if (tx == NULL) {
-                read_lock(&ksocknal_data.ksnd_global_lock);
+                cfs_read_lock(&ksocknal_data.ksnd_global_lock);
                 return -ENOMEM;
         }
 
         if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) {
-                read_lock(&ksocknal_data.ksnd_global_lock);
+                cfs_read_lock(&ksocknal_data.ksnd_global_lock);
                 return 1;
         }
 
         ksocknal_free_tx(tx);
-        read_lock(&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock(&ksocknal_data.ksnd_global_lock);
 
         return -EIO;
 }
@@ -2272,7 +2276,7 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer)
 void
 ksocknal_check_peer_timeouts (int idx)
 {
-        struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
+        cfs_list_t       *peers = &ksocknal_data.ksnd_peers[idx];
         ksock_peer_t     *peer;
         ksock_conn_t     *conn;
 
@@ -2284,7 +2288,7 @@ ksocknal_check_peer_timeouts (int idx)
 
         cfs_list_for_each_entry_typed(peer, peers, ksock_peer_t, ksnp_list) {
                 if (ksocknal_send_keepalive_locked(peer) != 0) {
-                        read_unlock (&ksocknal_data.ksnd_global_lock);
+                        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
                         goto again;
                 }
 
@@ -2304,9 +2308,10 @@ ksocknal_check_peer_timeouts (int idx)
 
                 /* we can't process stale txs right here because we're
                  * holding only shared lock */
-                if (!list_empty (&peer->ksnp_tx_queue)) {
-                        ksock_tx_t *tx = list_entry (peer->ksnp_tx_queue.next,
-                                                     ksock_tx_t, tx_list);
+                if (!cfs_list_empty (&peer->ksnp_tx_queue)) {
+                        ksock_tx_t *tx =
+                                cfs_list_entry (peer->ksnp_tx_queue.next,
+                                                ksock_tx_t, tx_list);
 
                         if (cfs_time_aftereq(cfs_time_current(),
                                              tx->tx_deadline)) {
@@ -2336,8 +2341,8 @@ ksocknal_check_peer_timeouts (int idx)
                 }
 
                 if (n != 0) {
-                        tx = list_entry (peer->ksnp_zc_req_list.next,
-                                         ksock_tx_t, tx_zc_list);
+                        tx = cfs_list_entry (peer->ksnp_zc_req_list.next,
+                                             ksock_tx_t, tx_zc_list);
                         CWARN("Stale ZC_REQs for peer %s detected: %d; the "
                               "oldest (%p) timed out %ld secs ago\n",
                               libcfs_nid2str(peer->ksnp_id.nid), n, tx,
@@ -2355,7 +2360,7 @@ ksocknal_reaper (void *arg)
         cfs_waitlink_t     wait;
         ksock_conn_t      *conn;
         ksock_sched_t     *sched;
-        struct list_head   enomem_conns;
+        cfs_list_t         enomem_conns;
         int                nenomem_conns;
         cfs_duration_t     timeout;
         int                i;
@@ -2372,10 +2377,11 @@ ksocknal_reaper (void *arg)
 
         while (!ksocknal_data.ksnd_shuttingdown) {
 
-                if (!list_empty (&ksocknal_data.ksnd_deathrow_conns)) {
-                        conn = list_entry (ksocknal_data.ksnd_deathrow_conns.next,
-                                           ksock_conn_t, ksnc_list);
-                        list_del (&conn->ksnc_list);
+                if (!cfs_list_empty (&ksocknal_data.ksnd_deathrow_conns)) {
+                        conn = cfs_list_entry (ksocknal_data. \
+                                               ksnd_deathrow_conns.next,
+                                               ksock_conn_t, ksnc_list);
+                        cfs_list_del (&conn->ksnc_list);
 
                         cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
 
@@ -2386,10 +2392,10 @@ ksocknal_reaper (void *arg)
                         continue;
                 }
 
-                if (!list_empty (&ksocknal_data.ksnd_zombie_conns)) {
-                        conn = list_entry (ksocknal_data.ksnd_zombie_conns.next,
-                                           ksock_conn_t, ksnc_list);
-                        list_del (&conn->ksnc_list);
+                if (!cfs_list_empty (&ksocknal_data.ksnd_zombie_conns)) {
+                        conn = cfs_list_entry (ksocknal_data.ksnd_zombie_conns.\
+                                               next, ksock_conn_t, ksnc_list);
+                        cfs_list_del (&conn->ksnc_list);
 
                         cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
 
@@ -2399,19 +2405,20 @@ ksocknal_reaper (void *arg)
                         continue;
                 }
 
-                if (!list_empty (&ksocknal_data.ksnd_enomem_conns)) {
-                        list_add(&enomem_conns, &ksocknal_data.ksnd_enomem_conns);
-                        list_del_init(&ksocknal_data.ksnd_enomem_conns);
+                if (!cfs_list_empty (&ksocknal_data.ksnd_enomem_conns)) {
+                        cfs_list_add(&enomem_conns,
+                                     &ksocknal_data.ksnd_enomem_conns);
+                        cfs_list_del_init(&ksocknal_data.ksnd_enomem_conns);
                 }
 
                 cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
 
                 /* reschedule all the connections that stalled with ENOMEM... */
                 nenomem_conns = 0;
-                while (!list_empty (&enomem_conns)) {
-                        conn = list_entry (enomem_conns.next,
-                                           ksock_conn_t, ksnc_tx_list);
-                        list_del (&conn->ksnc_tx_list);
+                while (!cfs_list_empty (&enomem_conns)) {
+                        conn = cfs_list_entry (enomem_conns.next,
+                                               ksock_conn_t, ksnc_tx_list);
+                        cfs_list_del (&conn->ksnc_tx_list);
 
                         sched = conn->ksnc_scheduler;
 
@@ -2419,7 +2426,8 @@ ksocknal_reaper (void *arg)
 
                         LASSERT (conn->ksnc_tx_scheduled);
                         conn->ksnc_tx_ready = 1;
-                        list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
+                        cfs_list_add_tail(&conn->ksnc_tx_list,
+                                          &sched->kss_tx_conns);
                         cfs_waitq_signal (&sched->kss_waitq);
 
                         cfs_spin_unlock_bh (&sched->kss_lock);
@@ -2468,9 +2476,10 @@ ksocknal_reaper (void *arg)
                 cfs_waitq_add (&ksocknal_data.ksnd_reaper_waitq, &wait);
 
                 if (!ksocknal_data.ksnd_shuttingdown &&
-                    list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
-                    list_empty (&ksocknal_data.ksnd_zombie_conns))
-                        cfs_waitq_timedwait (&wait, CFS_TASK_INTERRUPTIBLE, timeout);
+                    cfs_list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
+                    cfs_list_empty (&ksocknal_data.ksnd_zombie_conns))
+                        cfs_waitq_timedwait (&wait, CFS_TASK_INTERRUPTIBLE,
+                                             timeout);
 
                 cfs_set_current_state (CFS_TASK_RUNNING);
                 cfs_waitq_del (&ksocknal_data.ksnd_reaper_waitq, &wait);
index 73c1958..5637620 100644 (file)
@@ -558,7 +558,7 @@ ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
                         nob += scratchiov[i].iov_len;
                 }
 
-                if (!list_empty(&conn->ksnc_tx_queue) ||
+                if (!cfs_list_empty(&conn->ksnc_tx_queue) ||
                     nob < tx->tx_resid)
                         msg.msg_flags |= MSG_MORE;
 
@@ -593,7 +593,7 @@ ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
                 CDEBUG(D_NET, "page %p + offset %x for %d\n",
                                page, offset, kiov->kiov_len);
 
-                if (!list_empty(&conn->ksnc_tx_queue) ||
+                if (!cfs_list_empty(&conn->ksnc_tx_queue) ||
                     fragsize < tx->tx_resid)
                         msgflg |= MSG_MORE;
 
@@ -633,7 +633,7 @@ ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
                         nob += scratchiov[i].iov_len = kiov[i].kiov_len;
                 }
 
-                if (!list_empty(&conn->ksnc_tx_queue) ||
+                if (!cfs_list_empty(&conn->ksnc_tx_queue) ||
                     nob < tx->tx_resid)
                         msg.msg_flags |= MSG_MORE;
 
@@ -1191,7 +1191,7 @@ ksocknal_write_space (struct sock *sk)
                                       " ready" : " blocked"),
                (conn == NULL) ? "" : (conn->ksnc_tx_scheduled ?
                                       " scheduled" : " idle"),
-               (conn == NULL) ? "" : (list_empty (&conn->ksnc_tx_queue) ?
+               (conn == NULL) ? "" : (cfs_list_empty (&conn->ksnc_tx_queue) ?
                                       " empty" : " queued"));
 
         if (conn == NULL) {             /* raced with ksocknal_terminate_conn */
@@ -1256,7 +1256,7 @@ ksocknal_lib_memory_pressure(ksock_conn_t *conn)
         
         sched = conn->ksnc_scheduler;
         cfs_spin_lock_bh (&sched->kss_lock);
-        
+
         if (!SOCK_TEST_NOSPACE(conn->ksnc_sock) &&
             !conn->ksnc_tx_ready) {
                 /* SOCK_NOSPACE is set when the socket fills
@@ -1269,7 +1269,7 @@ ksocknal_lib_memory_pressure(ksock_conn_t *conn)
                  * after a timeout */
                 rc = -ENOMEM;
         }
-        
+
         cfs_spin_unlock_bh (&sched->kss_lock);
 
         return rc;
index 6b88bac..52f4a09 100755 (executable)
@@ -328,7 +328,7 @@ ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
                 ksocknal_lib_csum_tx(tx);
 
         nob = ks_query_iovs_length(tx->tx_iov, tx->tx_niov);
-        flags = (!list_empty (&conn->ksnc_tx_queue) || nob < tx->tx_resid) ? 
+        flags = (!cfs_list_empty (&conn->ksnc_tx_queue) || nob < tx->tx_resid) ? 
                 (MSG_DONTWAIT | MSG_MORE) : MSG_DONTWAIT;
         rc = ks_send_iovs(sock, tx->tx_iov, tx->tx_niov, flags, 0);
 
@@ -349,7 +349,7 @@ ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
 
         nkiov = tx->tx_nkiov;
         nob = ks_query_kiovs_length(tx->tx_kiov, nkiov);
-        flags = (!list_empty (&conn->ksnc_tx_queue) || nob < tx->tx_resid) ? 
+        flags = (!cfs_list_empty (&conn->ksnc_tx_queue) || nob < tx->tx_resid) ? 
                 (MSG_DONTWAIT | MSG_MORE) : MSG_DONTWAIT;
         rc = ks_send_kiovs(sock, tx->tx_kiov, nkiov, flags, 0);
 
@@ -544,7 +544,7 @@ ksocknal_lib_push_conn (ksock_conn_t *conn)
 
         ks_get_tconn(tconn);
 
-        spin_lock(&tconn->kstc_lock);
+        cfs_spin_lock(&tconn->kstc_lock);
         if (tconn->kstc_type == kstt_sender) {
                 nagle = tconn->sender.kstc_info.nagle;
                 tconn->sender.kstc_info.nagle = 0;
@@ -554,7 +554,7 @@ ksocknal_lib_push_conn (ksock_conn_t *conn)
                 tconn->child.kstc_info.nagle = 0;
         }
 
-        spin_unlock(&tconn->kstc_lock);
+        cfs_spin_unlock(&tconn->kstc_lock);
 
         val = 1;
         rc = ks_set_tcp_option(
@@ -565,7 +565,7 @@ ksocknal_lib_push_conn (ksock_conn_t *conn)
                     );
 
         LASSERT (rc == 0);
-        spin_lock(&tconn->kstc_lock);
+        cfs_spin_lock(&tconn->kstc_lock);
 
         if (tconn->kstc_type == kstt_sender) {
                 tconn->sender.kstc_info.nagle = nagle;
@@ -573,7 +573,7 @@ ksocknal_lib_push_conn (ksock_conn_t *conn)
                 LASSERT(tconn->kstc_type == kstt_child);
                 tconn->child.kstc_info.nagle = nagle;
         }
-        spin_unlock(&tconn->kstc_lock);
+        cfs_spin_unlock(&tconn->kstc_lock);
         ks_put_tconn(tconn);
 }
 
@@ -618,25 +618,25 @@ void ksocknal_schedule_callback(struct socket*sock, int mode)
 {
         ksock_conn_t * conn = (ksock_conn_t *) sock->kstc_conn;
 
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
         if (mode) {
                 ksocknal_write_callback(conn);
         } else {
                 ksocknal_read_callback(conn);
         }
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
 }
 
 void
 ksocknal_tx_fini_callback(ksock_conn_t * conn, ksock_tx_t * tx)
 {
         /* remove tx/conn from conn's outgoing queue */
-        spin_lock_bh (&conn->ksnc_scheduler->kss_lock);
-        list_del(&tx->tx_list);
-        if (list_empty(&conn->ksnc_tx_queue)) {
-                list_del (&conn->ksnc_tx_list);
+        cfs_spin_lock_bh (&conn->ksnc_scheduler->kss_lock);
+        cfs_list_del(&tx->tx_list);
+        if (cfs_list_empty(&conn->ksnc_tx_queue)) {
+                cfs_list_del (&conn->ksnc_tx_list);
         }
-        spin_unlock_bh (&conn->ksnc_scheduler->kss_lock);
+        cfs_spin_unlock_bh (&conn->ksnc_scheduler->kss_lock);
 
         /* complete send; tx -ref */
         ksocknal_tx_decref (tx);
index 6bc9311..6c08647 100755 (executable)
@@ -57,7 +57,7 @@ int ksocknal_nsched(void)
 static inline int
 ksocknal_nsched(void)
 {
-        return num_online_cpus();
+        return cfs_num_online_cpus();
 }
 
 static inline int
index 20bdc4d..2e1e133 100644 (file)
@@ -45,7 +45,7 @@ static ksock_tx_t *
 ksocknal_queue_tx_msg_v1(ksock_conn_t *conn, ksock_tx_t *tx_msg)
 {
         /* V1.x, just enqueue it */
-        list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
+        cfs_list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
         return NULL;
 }
 
@@ -55,7 +55,7 @@ ksocknal_next_tx_carrier(ksock_conn_t *conn)
         ksock_tx_t     *tx = conn->ksnc_tx_carrier;
 
         /* Called holding BH lock: conn->ksnc_scheduler->kss_lock */
-        LASSERT (!list_empty(&conn->ksnc_tx_queue));
+        LASSERT (!cfs_list_empty(&conn->ksnc_tx_queue));
         LASSERT (tx != NULL);
 
         /* Next TX that can carry ZC-ACK or LNet message */
@@ -63,7 +63,8 @@ ksocknal_next_tx_carrier(ksock_conn_t *conn)
                 /* no more packets queued */
                 conn->ksnc_tx_carrier = NULL;
         } else {
-                conn->ksnc_tx_carrier = list_entry(tx->tx_list.next, ksock_tx_t, tx_list);
+                conn->ksnc_tx_carrier = cfs_list_entry(tx->tx_list.next,
+                                                       ksock_tx_t, tx_list);
                 LASSERT (conn->ksnc_tx_carrier->tx_msg.ksm_type == tx->tx_msg.ksm_type);
         }
 }
@@ -86,7 +87,8 @@ ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn,
          */
         if (tx == NULL) {
                 if (tx_ack != NULL) {
-                        list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
+                        cfs_list_add_tail(&tx_ack->tx_list,
+                                          &conn->ksnc_tx_queue);
                         conn->ksnc_tx_carrier = tx_ack;
                 }
                 return 0;
@@ -95,7 +97,8 @@ ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn,
         if (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP) {
                 /* tx is noop zc-ack, can't piggyback zc-ack cookie */
                 if (tx_ack != NULL)
-                        list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
+                        cfs_list_add_tail(&tx_ack->tx_list,
+                                          &conn->ksnc_tx_queue);
                 return 0;
         }
 
@@ -126,13 +129,13 @@ ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg)
          *   and replace the NOOP tx, and return the NOOP tx.
          */
         if (tx == NULL) { /* nothing on queue */
-                list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
+                cfs_list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
                 conn->ksnc_tx_carrier = tx_msg;
                 return NULL;
         }
 
         if (tx->tx_msg.ksm_type == KSOCK_MSG_LNET) { /* nothing to carry */
-                list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
+                cfs_list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
                 return NULL;
         }
 
@@ -143,8 +146,8 @@ ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg)
         ksocknal_next_tx_carrier(conn);
 
         /* use new_tx to replace the noop zc-ack packet */
-        list_add(&tx_msg->tx_list, &tx->tx_list);
-        list_del(&tx->tx_list);
+        cfs_list_add(&tx_msg->tx_list, &tx->tx_list);
+        cfs_list_del(&tx->tx_list);
 
         return tx;
 }
@@ -164,7 +167,8 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
 
         if ((tx = conn->ksnc_tx_carrier) == NULL) {
                 if (tx_ack != NULL) {
-                        list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
+                        cfs_list_add_tail(&tx_ack->tx_list,
+                                          &conn->ksnc_tx_queue);
                         conn->ksnc_tx_carrier = tx_ack;
                 }
                 return 0;
@@ -257,7 +261,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
 
         /* failed to piggyback ZC-ACK */
         if (tx_ack != NULL) {
-                list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
+                cfs_list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
                 /* the next tx can piggyback at least 1 ACK */
                 ksocknal_next_tx_carrier(conn);
         }
@@ -376,7 +380,7 @@ ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote)
                 cfs_spin_unlock_bh (&sched->kss_lock);
 
                 if (rc) { /* piggybacked */
-                        read_unlock (&ksocknal_data.ksnd_global_lock);
+                        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
                         return 0;
                 }
         }
@@ -418,14 +422,14 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
 
         cfs_spin_lock(&peer->ksnp_lock);
 
-        list_for_each_entry_safe(tx, tmp,
-                                 &peer->ksnp_zc_req_list, tx_zc_list) {
+        cfs_list_for_each_entry_safe(tx, tmp,
+                                     &peer->ksnp_zc_req_list, tx_zc_list) {
                 __u64 c = tx->tx_msg.ksm_zc_cookies[0];
 
                 if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) {
                         tx->tx_msg.ksm_zc_cookies[0] = 0;
-                        list_del(&tx->tx_zc_list);
-                        list_add(&tx->tx_zc_list, &zlist);
+                        cfs_list_del(&tx->tx_zc_list);
+                        cfs_list_add(&tx->tx_zc_list, &zlist);
 
                         if (--count == 0)
                                 break;
@@ -434,9 +438,9 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
 
         cfs_spin_unlock(&peer->ksnp_lock);
 
-        while (!list_empty(&zlist)) {
-                tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
-                list_del(&tx->tx_zc_list);
+        while (!cfs_list_empty(&zlist)) {
+                tx = cfs_list_entry(zlist.next, ksock_tx_t, tx_zc_list);
+                cfs_list_del(&tx->tx_zc_list);
                 ksocknal_tx_decref(tx);
         }
 
index 5b143a9..1a5652c 100644 (file)
@@ -47,9 +47,9 @@ struct {
         int                   pta_shutdown;
         cfs_socket_t         *pta_sock;
 #ifdef __KERNEL__
-        struct semaphore      pta_signal;
+        cfs_completion_t      pta_signal;
 #else
-        struct cfs_completion pta_signal;
+        cfs_mt_completion_t   pta_signal;
 #endif
 } lnet_acceptor_state;
 
@@ -68,11 +68,11 @@ lnet_accept_magic(__u32 magic, __u32 constant)
 
 #ifdef __KERNEL__
 
-#define cfs_init_completion(c)     init_mutex_locked(c)
-#define cfs_wait_for_completion(c) mutex_down(c)
-#define cfs_complete(c)            mutex_up(c)
-#define cfs_fini_completion(c)     do { } while (0)
-#define cfs_create_thread(func, a) cfs_kernel_thread(func, a, 0)
+#define cfs_mt_init_completion(c)     cfs_init_completion(c)
+#define cfs_mt_wait_for_completion(c) cfs_wait_for_completion(c)
+#define cfs_mt_complete(c)            cfs_complete(c)
+#define cfs_mt_fini_completion(c)     cfs_fini_completion(c)
+#define cfs_create_thread(func, a)    cfs_kernel_thread(func, a, 0)
 
 EXPORT_SYMBOL(lnet_acceptor_port);
 
@@ -435,7 +435,7 @@ lnet_acceptor(void *arg)
 
         /* set init status and unblock parent */
         lnet_acceptor_state.pta_shutdown = rc;
-        cfs_complete(&lnet_acceptor_state.pta_signal);
+        cfs_mt_complete(&lnet_acceptor_state.pta_signal);
 
         if (rc != 0)
                 return rc;
@@ -494,7 +494,7 @@ lnet_acceptor(void *arg)
         LCONSOLE(0, "Acceptor stopping\n");
 
         /* unblock lnet_acceptor_stop() */
-        cfs_complete(&lnet_acceptor_state.pta_signal);
+        cfs_mt_complete(&lnet_acceptor_state.pta_signal);
         return 0;
 }
 
@@ -534,11 +534,11 @@ lnet_acceptor_start(void)
         if ((the_lnet.ln_pid & LNET_PID_USERFLAG) != 0)
                 return 0;
 #endif
-        cfs_init_completion(&lnet_acceptor_state.pta_signal);
 
+        cfs_mt_init_completion(&lnet_acceptor_state.pta_signal);
         rc = accept2secure(accept_type, &secure);
         if (rc <= 0) {
-                cfs_fini_completion(&lnet_acceptor_state.pta_signal);
+                cfs_mt_fini_completion(&lnet_acceptor_state.pta_signal);
                 return rc;
         }
 
@@ -548,12 +548,13 @@ lnet_acceptor_start(void)
         rc2 = cfs_create_thread(lnet_acceptor, (void *)(ulong_ptr_t)secure);
         if (rc2 < 0) {
                 CERROR("Can't start acceptor thread: %d\n", rc);
-                cfs_fini_completion(&lnet_acceptor_state.pta_signal);
+                cfs_mt_fini_completion(&lnet_acceptor_state.pta_signal);
+
                 return -ESRCH;
         }
 
         /* wait for acceptor to startup */
-        cfs_wait_for_completion(&lnet_acceptor_state.pta_signal);
+        cfs_mt_wait_for_completion(&lnet_acceptor_state.pta_signal);
 
         if (!lnet_acceptor_state.pta_shutdown) {
                 /* started OK */
@@ -562,7 +563,8 @@ lnet_acceptor_start(void)
         }
 
         LASSERT (lnet_acceptor_state.pta_sock == NULL);
-        cfs_fini_completion(&lnet_acceptor_state.pta_signal);
+        cfs_mt_fini_completion(&lnet_acceptor_state.pta_signal);
+
         return -ENETDOWN;
 }
 
@@ -576,9 +578,9 @@ lnet_acceptor_stop(void)
         libcfs_sock_abort_accept(lnet_acceptor_state.pta_sock);
 
         /* block until acceptor signals exit */
-        cfs_wait_for_completion(&lnet_acceptor_state.pta_signal);
+        cfs_mt_wait_for_completion(&lnet_acceptor_state.pta_signal);
 
-        cfs_fini_completion(&lnet_acceptor_state.pta_signal);
+        cfs_mt_fini_completion(&lnet_acceptor_state.pta_signal);
 }
 
 #else /* single-threaded user-space */
index 75cbbcc..4d3fd1a 100644 (file)
@@ -91,10 +91,10 @@ lnet_get_networks(void)
 void
 lnet_init_locks(void)
 {
-        spin_lock_init (&the_lnet.ln_lock);
+        cfs_spin_lock_init (&the_lnet.ln_lock);
         cfs_waitq_init (&the_lnet.ln_waitq);
-        init_mutex(&the_lnet.ln_lnd_mutex);
-        init_mutex(&the_lnet.ln_api_mutex);
+        cfs_init_mutex(&the_lnet.ln_lnd_mutex);
+        cfs_init_mutex(&the_lnet.ln_api_mutex);
 }
 
 void
@@ -123,7 +123,7 @@ lnet_get_networks (void)
         int               len;
         int               nob;
         int               rc;
-        struct list_head *tmp;
+        cfs_list_t       *tmp;
 
 #ifdef NOT_YET
         if (networks != NULL && ip2nets != NULL) {
@@ -150,8 +150,8 @@ lnet_get_networks (void)
         *str = 0;
         sep = "";
 
-        list_for_each (tmp, &the_lnet.ln_lnds) {
-                lnd_t *lnd = list_entry(tmp, lnd_t, lnd_list);
+        cfs_list_for_each (tmp, &the_lnet.ln_lnds) {
+                lnd_t *lnd = cfs_list_entry(tmp, lnd_t, lnd_list);
 
                 nob = snprintf(str, len, "%s%s", sep,
                                libcfs_lnd2str(lnd->lnd_type));
@@ -303,11 +303,11 @@ lnd_t *
 lnet_find_lnd_by_type (int type)
 {
         lnd_t              *lnd;
-        struct list_head   *tmp;
+        cfs_list_t         *tmp;
 
         /* holding lnd mutex */
-        list_for_each (tmp, &the_lnet.ln_lnds) {
-                lnd = list_entry(tmp, lnd_t, lnd_list);
+        cfs_list_for_each (tmp, &the_lnet.ln_lnds) {
+                lnd = cfs_list_entry(tmp, lnd_t, lnd_list);
 
                 if ((int)lnd->lnd_type == type)
                         return lnd;
@@ -325,7 +325,7 @@ lnet_register_lnd (lnd_t *lnd)
         LASSERT (libcfs_isknown_lnd(lnd->lnd_type));
         LASSERT (lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
 
-        list_add_tail (&lnd->lnd_list, &the_lnet.ln_lnds);
+        cfs_list_add_tail (&lnd->lnd_list, &the_lnet.ln_lnds);
         lnd->lnd_refcount = 0;
 
         CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
@@ -342,7 +342,7 @@ lnet_unregister_lnd (lnd_t *lnd)
         LASSERT (lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
         LASSERT (lnd->lnd_refcount == 0);
 
-        list_del (&lnd->lnd_list);
+        cfs_list_del (&lnd->lnd_list);
         CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
 
         LNET_MUTEX_UP(&the_lnet.ln_lnd_mutex);
@@ -384,7 +384,7 @@ lnet_freelist_init (lnet_freelist_t *fl, int n, int size)
         do
         {
                 memset (space, 0, size);
-                list_add ((struct list_head *)space, &fl->fl_list);
+                cfs_list_add ((cfs_list_t *)space, &fl->fl_list);
                 space += size;
         } while (--n != 0);
 
@@ -394,7 +394,7 @@ lnet_freelist_init (lnet_freelist_t *fl, int n, int size)
 void
 lnet_freelist_fini (lnet_freelist_t *fl)
 {
-        struct list_head *el;
+        cfs_list_t       *el;
         int               count;
 
         if (fl->fl_nobjs == 0)
@@ -466,7 +466,7 @@ lnet_create_interface_cookie (void)
         int            rc = gettimeofday (&tv, NULL);
         LASSERT (rc == 0);
 #else
-        do_gettimeofday(&tv);
+        cfs_gettimeofday(&tv);
 #endif
         cookie = tv.tv_sec;
         cookie *= 1000000;
@@ -481,12 +481,13 @@ lnet_setup_handle_hash (void)
 
         /* Arbitrary choice of hash table size */
 #ifdef __KERNEL__
-        the_lnet.ln_lh_hash_size = CFS_PAGE_SIZE / sizeof (struct list_head);
+        the_lnet.ln_lh_hash_size =
+                CFS_PAGE_SIZE / sizeof (cfs_list_t);
 #else
         the_lnet.ln_lh_hash_size = (MAX_MES + MAX_MDS + MAX_EQS)/4;
 #endif
         LIBCFS_ALLOC(the_lnet.ln_lh_hash_table,
-                     the_lnet.ln_lh_hash_size * sizeof (struct list_head));
+                     the_lnet.ln_lh_hash_size * sizeof (cfs_list_t));
         if (the_lnet.ln_lh_hash_table == NULL)
                 return (-ENOMEM);
 
@@ -505,15 +506,15 @@ lnet_cleanup_handle_hash (void)
                 return;
 
         LIBCFS_FREE(the_lnet.ln_lh_hash_table,
-                    the_lnet.ln_lh_hash_size * sizeof (struct list_head));
+                    the_lnet.ln_lh_hash_size * sizeof (cfs_list_t));
 }
 
 lnet_libhandle_t *
 lnet_lookup_cookie (__u64 cookie, int type)
 {
         /* ALWAYS called with LNET_LOCK held */
-        struct list_head    *list;
-        struct list_head    *el;
+        cfs_list_t          *list;
+        cfs_list_t          *el;
         unsigned int         hash;
 
         if ((cookie & (LNET_COOKIE_TYPES - 1)) != type)
@@ -522,9 +523,9 @@ lnet_lookup_cookie (__u64 cookie, int type)
         hash = ((unsigned int)cookie) % the_lnet.ln_lh_hash_size;
         list = &the_lnet.ln_lh_hash_table[hash];
 
-        list_for_each (el, list) {
-                lnet_libhandle_t *lh = list_entry (el, lnet_libhandle_t,
-                                                  lh_hash_chain);
+        cfs_list_for_each (el, list) {
+                lnet_libhandle_t *lh = cfs_list_entry (el, lnet_libhandle_t,
+                                                      lh_hash_chain);
 
                 if (lh->lh_cookie == cookie)
                         return (lh);
@@ -544,14 +545,14 @@ lnet_initialise_handle (lnet_libhandle_t *lh, int type)
         the_lnet.ln_next_object_cookie += LNET_COOKIE_TYPES;
 
         hash = ((unsigned int)lh->lh_cookie) % the_lnet.ln_lh_hash_size;
-        list_add (&lh->lh_hash_chain, &the_lnet.ln_lh_hash_table[hash]);
+        cfs_list_add (&lh->lh_hash_chain, &the_lnet.ln_lh_hash_table[hash]);
 }
 
 void
 lnet_invalidate_handle (lnet_libhandle_t *lh)
 {
         /* ALWAYS called with LNET_LOCK held */
-        list_del (&lh->lh_hash_chain);
+        cfs_list_del (&lh->lh_hash_chain);
 }
 
 int
@@ -560,7 +561,7 @@ lnet_init_finalizers(void)
 #ifdef __KERNEL__
         int    i;
 
-        the_lnet.ln_nfinalizers = (int) num_online_cpus();
+        the_lnet.ln_nfinalizers = (int) cfs_num_online_cpus();
 
         LIBCFS_ALLOC(the_lnet.ln_finalizers,
                      the_lnet.ln_nfinalizers *
@@ -595,7 +596,7 @@ lnet_fini_finalizers(void)
 #else
         LASSERT (!the_lnet.ln_finalizing);
 #endif
-        LASSERT (list_empty(&the_lnet.ln_finalizeq));
+        LASSERT (cfs_list_empty(&the_lnet.ln_finalizeq));
 }
 
 #ifndef __KERNEL__
@@ -710,51 +711,51 @@ lnet_unprepare (void)
 
         lnet_fail_nid(LNET_NID_ANY, 0);
 
-        LASSERT (list_empty(&the_lnet.ln_test_peers));
+        LASSERT (cfs_list_empty(&the_lnet.ln_test_peers));
         LASSERT (the_lnet.ln_refcount == 0);
-        LASSERT (list_empty(&the_lnet.ln_nis));
-        LASSERT (list_empty(&the_lnet.ln_zombie_nis));
+        LASSERT (cfs_list_empty(&the_lnet.ln_nis));
+        LASSERT (cfs_list_empty(&the_lnet.ln_zombie_nis));
         LASSERT (the_lnet.ln_nzombie_nis == 0);
 
         for (idx = 0; idx < the_lnet.ln_nportals; idx++) {
-                LASSERT (list_empty(&the_lnet.ln_portals[idx].ptl_msgq));
+                LASSERT (cfs_list_empty(&the_lnet.ln_portals[idx].ptl_msgq));
 
-                while (!list_empty (&the_lnet.ln_portals[idx].ptl_ml)) {
-                        lnet_me_t *me = list_entry (the_lnet.ln_portals[idx].ptl_ml.next,
-                                                    lnet_me_t, me_list);
+                while (!cfs_list_empty (&the_lnet.ln_portals[idx].ptl_ml)) {
+                        lnet_me_t *me = cfs_list_entry (the_lnet.ln_portals[idx].ptl_ml.next,
+                                                        lnet_me_t, me_list);
 
                         CERROR ("Active me %p on exit\n", me);
-                        list_del (&me->me_list);
+                        cfs_list_del (&me->me_list);
                         lnet_me_free (me);
                 }
         }
 
-        while (!list_empty (&the_lnet.ln_active_mds)) {
-                lnet_libmd_t *md = list_entry (the_lnet.ln_active_mds.next,
-                                               lnet_libmd_t, md_list);
+        while (!cfs_list_empty (&the_lnet.ln_active_mds)) {
+                lnet_libmd_t *md = cfs_list_entry (the_lnet.ln_active_mds.next,
+                                                   lnet_libmd_t, md_list);
 
                 CERROR ("Active md %p on exit\n", md);
-                list_del_init (&md->md_list);
+                cfs_list_del_init (&md->md_list);
                 lnet_md_free (md);
         }
 
-        while (!list_empty (&the_lnet.ln_active_eqs)) {
-                lnet_eq_t *eq = list_entry (the_lnet.ln_active_eqs.next,
-                                            lnet_eq_t, eq_list);
+        while (!cfs_list_empty (&the_lnet.ln_active_eqs)) {
+                lnet_eq_t *eq = cfs_list_entry (the_lnet.ln_active_eqs.next,
+                                                lnet_eq_t, eq_list);
 
                 CERROR ("Active eq %p on exit\n", eq);
-                list_del (&eq->eq_list);
+                cfs_list_del (&eq->eq_list);
                 lnet_eq_free (eq);
         }
 
-        while (!list_empty (&the_lnet.ln_active_msgs)) {
-                lnet_msg_t *msg = list_entry (the_lnet.ln_active_msgs.next,
-                                              lnet_msg_t, msg_activelist);
+        while (!cfs_list_empty (&the_lnet.ln_active_msgs)) {
+                lnet_msg_t *msg = cfs_list_entry (the_lnet.ln_active_msgs.next,
+                                                  lnet_msg_t, msg_activelist);
 
                 CERROR ("Active msg %p on exit\n", msg);
                 LASSERT (msg->msg_onactivelist);
                 msg->msg_onactivelist = 0;
-                list_del (&msg->msg_activelist);
+                cfs_list_del (&msg->msg_activelist);
                 lnet_msg_free (msg);
         }
 
@@ -773,11 +774,11 @@ lnet_unprepare (void)
 lnet_ni_t  *
 lnet_net2ni_locked (__u32 net)
 {
-        struct list_head *tmp;
+        cfs_list_t       *tmp;
         lnet_ni_t        *ni;
 
-        list_for_each (tmp, &the_lnet.ln_nis) {
-                ni = list_entry(tmp, lnet_ni_t, ni_list);
+        cfs_list_for_each (tmp, &the_lnet.ln_nis) {
+                ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
 
                 if (LNET_NIDNET(ni->ni_nid) == net) {
                         lnet_ni_addref_locked(ni);
@@ -805,11 +806,11 @@ lnet_islocalnet (__u32 net)
 lnet_ni_t  *
 lnet_nid2ni_locked (lnet_nid_t nid)
 {
-        struct list_head *tmp;
+        cfs_list_t       *tmp;
         lnet_ni_t        *ni;
 
-        list_for_each (tmp, &the_lnet.ln_nis) {
-                ni = list_entry(tmp, lnet_ni_t, ni_list);
+        cfs_list_for_each (tmp, &the_lnet.ln_nis) {
+                ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
 
                 if (ni->ni_nid == nid) {
                         lnet_ni_addref_locked(ni);
@@ -838,14 +839,14 @@ int
 lnet_count_acceptor_nis (void)
 {
         /* Return the # of NIs that need the acceptor. */
-        int                count = 0;
+        int            count = 0;
 #if defined(__KERNEL__) || defined(HAVE_LIBPTHREAD)
-        struct list_head  *tmp;
-        lnet_ni_t         *ni;
+        cfs_list_t    *tmp;
+        lnet_ni_t     *ni;
 
         LNET_LOCK();
-        list_for_each (tmp, &the_lnet.ln_nis) {
-                ni = list_entry(tmp, lnet_ni_t, ni_list);
+        cfs_list_for_each (tmp, &the_lnet.ln_nis) {
+                ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
 
                 if (ni->ni_lnd->lnd_accept != NULL)
                         count++;
@@ -869,18 +870,18 @@ lnet_shutdown_lndnis (void)
         /* All quiet on the API front */
         LASSERT (!the_lnet.ln_shutdown);
         LASSERT (the_lnet.ln_refcount == 0);
-        LASSERT (list_empty(&the_lnet.ln_zombie_nis));
+        LASSERT (cfs_list_empty(&the_lnet.ln_zombie_nis));
         LASSERT (the_lnet.ln_nzombie_nis == 0);
-        LASSERT (list_empty(&the_lnet.ln_remote_nets));
+        LASSERT (cfs_list_empty(&the_lnet.ln_remote_nets));
 
         LNET_LOCK();
         the_lnet.ln_shutdown = 1;               /* flag shutdown */
 
         /* Unlink NIs from the global table */
-        while (!list_empty(&the_lnet.ln_nis)) {
-                ni = list_entry(the_lnet.ln_nis.next,
-                                lnet_ni_t, ni_list);
-                list_del (&ni->ni_list);
+        while (!cfs_list_empty(&the_lnet.ln_nis)) {
+                ni = cfs_list_entry(the_lnet.ln_nis.next,
+                                    lnet_ni_t, ni_list);
+                cfs_list_del (&ni->ni_list);
 
                 the_lnet.ln_nzombie_nis++;
                 lnet_ni_decref_locked(ni); /* drop apini's ref */
@@ -915,7 +916,7 @@ lnet_shutdown_lndnis (void)
         i = 2;
         while (the_lnet.ln_nzombie_nis != 0) {
 
-                while (list_empty(&the_lnet.ln_zombie_nis)) {
+                while (cfs_list_empty(&the_lnet.ln_zombie_nis)) {
                         LNET_UNLOCK();
                         ++i;
                         if ((i & (-i)) == i)
@@ -925,16 +926,16 @@ lnet_shutdown_lndnis (void)
                         LNET_LOCK();
                 }
 
-                ni = list_entry(the_lnet.ln_zombie_nis.next,
-                                lnet_ni_t, ni_list);
-                list_del(&ni->ni_list);
+                ni = cfs_list_entry(the_lnet.ln_zombie_nis.next,
+                                    lnet_ni_t, ni_list);
+                cfs_list_del(&ni->ni_list);
                 ni->ni_lnd->lnd_refcount--;
 
                 LNET_UNLOCK();
 
                 islo = ni->ni_lnd->lnd_type == LOLND;
 
-                LASSERT (!in_interrupt ());
+                LASSERT (!cfs_in_interrupt ());
                 (ni->ni_lnd->lnd_shutdown)(ni);
 
                 /* can't deref lnd anymore now; it might have unregistered
@@ -965,7 +966,7 @@ lnet_startup_lndnis (void)
 {
         lnd_t             *lnd;
         lnet_ni_t         *ni;
-        struct list_head   nilist;
+        cfs_list_t         nilist;
         int                rc = 0;
         int                lnd_type;
         int                nicount = 0;
@@ -980,8 +981,8 @@ lnet_startup_lndnis (void)
         if (rc != 0)
                 goto failed;
 
-        while (!list_empty(&nilist)) {
-                ni = list_entry(nilist.next, lnet_ni_t, ni_list);
+        while (!cfs_list_empty(&nilist)) {
+                ni = cfs_list_entry(nilist.next, lnet_ni_t, ni_list);
                 lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
 
                 LASSERT (libcfs_isknown_lnd(lnd_type));
@@ -992,7 +993,8 @@ lnet_startup_lndnis (void)
 #ifdef __KERNEL__
                 if (lnd == NULL) {
                         LNET_MUTEX_UP(&the_lnet.ln_lnd_mutex);
-                        rc = request_module("%s", libcfs_lnd2modname(lnd_type));
+                        rc = cfs_request_module("%s",
+                                                libcfs_lnd2modname(lnd_type));
                         LNET_MUTEX_DOWN(&the_lnet.ln_lnd_mutex);
 
                         lnd = lnet_find_lnd_by_type(lnd_type);
@@ -1042,10 +1044,10 @@ lnet_startup_lndnis (void)
 
                 LASSERT (ni->ni_peertimeout <= 0 || lnd->lnd_query != NULL);
 
-                list_del(&ni->ni_list);
+                cfs_list_del(&ni->ni_list);
 
                 LNET_LOCK();
-                list_add_tail(&ni->ni_list, &the_lnet.ln_nis);
+                cfs_list_add_tail(&ni->ni_list, &the_lnet.ln_nis);
                 LNET_UNLOCK();
 
                 if (lnd->lnd_type == LOLND) {
@@ -1102,9 +1104,9 @@ lnet_startup_lndnis (void)
  failed:
         lnet_shutdown_lndnis();
 
-        while (!list_empty(&nilist)) {
-                ni = list_entry(nilist.next, lnet_ni_t, ni_list);
-                list_del(&ni->ni_list);
+        while (!cfs_list_empty(&nilist)) {
+                ni = cfs_list_entry(nilist.next, lnet_ni_t, ni_list);
+                cfs_list_del(&ni->ni_list);
                 LIBCFS_FREE(ni, sizeof(*ni));
         }
 
@@ -1150,9 +1152,9 @@ LNetFini(void)
         LASSERT (the_lnet.ln_init);
         LASSERT (the_lnet.ln_refcount == 0);
 
-        while (!list_empty(&the_lnet.ln_lnds))
-                lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
-                                               lnd_t, lnd_list));
+        while (!cfs_list_empty(&the_lnet.ln_lnds))
+                lnet_unregister_lnd(cfs_list_entry(the_lnet.ln_lnds.next,
+                                                   lnd_t, lnd_list));
         lnet_fini_locks();
 
         the_lnet.ln_init = 0;
@@ -1379,7 +1381,7 @@ int
 LNetGetId(unsigned int index, lnet_process_id_t *id)
 {
         lnet_ni_t        *ni;
-        struct list_head *tmp;
+        cfs_list_t       *tmp;
         int               rc = -ENOENT;
 
         LASSERT (the_lnet.ln_init);
@@ -1387,11 +1389,11 @@ LNetGetId(unsigned int index, lnet_process_id_t *id)
 
         LNET_LOCK();
 
-        list_for_each(tmp, &the_lnet.ln_nis) {
+        cfs_list_for_each(tmp, &the_lnet.ln_nis) {
                 if (index-- != 0)
                         continue;
 
-                ni = list_entry(tmp, lnet_ni_t, ni_list);
+                ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
 
                 id->nid = ni->ni_nid;
                 id->pid = the_lnet.ln_pid;
@@ -1472,7 +1474,7 @@ lnet_destroy_ping_info(void)
 
         LNET_LOCK();
 
-        list_for_each_entry (ni, &the_lnet.ln_nis, ni_list) {
+        cfs_list_for_each_entry (ni, &the_lnet.ln_nis, ni_list) {
                 ni->ni_status = NULL;
         }
 
@@ -1757,7 +1759,7 @@ lnet_ping (lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_i
                 tmpid.pid = info->pi_pid;
                 tmpid.nid = info->pi_ni[i].ns_nid;
 #ifdef __KERNEL__
-                if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
+                if (cfs_copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
                         goto out_1;
 #else
                 ids[i] = tmpid;
index f8d6dea..7107751 100644 (file)
 #define DEBUG_SUBSYSTEM S_LNET
 #include <lnet/lib-lnet.h>
 
-typedef struct {                                /* tmp struct for parsing routes */
-       struct list_head   ltb_list;            /* stash on lists */
-       int                ltb_size;            /* allocated size */
-       char               ltb_text[0];         /* text buffer */
+typedef struct {                            /* tmp struct for parsing routes */
+       cfs_list_t         ltb_list;        /* stash on lists */
+       int                ltb_size;        /* allocated size */
+       char               ltb_text[0];     /* text buffer */
 } lnet_text_buf_t;
 
 static int lnet_tbnob = 0;                     /* track text buf allocation */
@@ -48,7 +48,7 @@ static int lnet_tbnob = 0;                    /* track text buf allocation */
 #define LNET_SINGLE_TEXTBUF_NOB  (4<<10)
 
 typedef struct {
-        struct list_head   lre_list;            /* stash in a list */
+        cfs_list_t         lre_list;            /* stash in a list */
         int                lre_min;             /* min value */
         int                lre_max;             /* max value */
         int                lre_stride;          /* stride */
@@ -56,7 +56,7 @@ typedef struct {
 
 static int lnet_re_alloc = 0;                   /* track expr allocation */
 
-void 
+void
 lnet_syntax(char *name, char *str, int offset, int width)
 {
         static char dots[LNET_SINGLE_TEXTBUF_NOB];
@@ -90,10 +90,10 @@ char *
 lnet_trimwhite(char *str)
 {
        char *end;
-       
+
         while (cfs_iswhite(*str))
                str++;
-       
+
        end = str + strlen(str);
        while (end > str) {
                 if (!cfs_iswhite(end[-1]))
@@ -106,13 +106,13 @@ lnet_trimwhite(char *str)
 }
 
 int
-lnet_net_unique(__u32 net, struct list_head *nilist)
+lnet_net_unique(__u32 net, cfs_list_t *nilist)
 {
-        struct list_head *tmp;
+        cfs_list_t       *tmp;
         lnet_ni_t        *ni;
 
-        list_for_each (tmp, nilist) {
-                ni = list_entry(tmp, lnet_ni_t, ni_list);
+        cfs_list_for_each (tmp, nilist) {
+                ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
 
                 if (LNET_NIDNET(ni->ni_nid) == net)
                         return 0;
@@ -122,7 +122,7 @@ lnet_net_unique(__u32 net, struct list_head *nilist)
 }
 
 lnet_ni_t *
-lnet_new_ni(__u32 net, struct list_head *nilist)
+lnet_new_ni(__u32 net, cfs_list_t *nilist)
 {
         lnet_ni_t *ni;
 
@@ -147,12 +147,12 @@ lnet_new_ni(__u32 net, struct list_head *nilist)
         CFS_INIT_LIST_HEAD(&ni->ni_txq);
         ni->ni_last_alive = cfs_time_current();
 
-        list_add_tail(&ni->ni_list, nilist);
+        cfs_list_add_tail(&ni->ni_list, nilist);
         return ni;
 }
 
 int
-lnet_parse_networks(struct list_head *nilist, char *networks)
+lnet_parse_networks(cfs_list_t *nilist, char *networks)
 {
        int        tokensize = strlen(networks) + 1;
         char      *tokens;
@@ -287,14 +287,14 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
                 }
        }
 
-        LASSERT (!list_empty(nilist));
+        LASSERT (!cfs_list_empty(nilist));
         return 0;
 
  failed:
-        while (!list_empty(nilist)) {
-                ni = list_entry(nilist->next, lnet_ni_t, ni_list);
+        while (!cfs_list_empty(nilist)) {
+                ni = cfs_list_entry(nilist->next, lnet_ni_t, ni_list);
                 
-                list_del(&ni->ni_list);
+                cfs_list_del(&ni->ni_list);
                 LIBCFS_FREE(ni, sizeof(*ni));
         }
        LIBCFS_FREE(tokens, tokensize);
@@ -340,26 +340,26 @@ lnet_free_text_buf (lnet_text_buf_t *ltb)
 }
 
 void
-lnet_free_text_bufs(struct list_head *tbs)
+lnet_free_text_bufs(cfs_list_t *tbs)
 {
        lnet_text_buf_t  *ltb;
-       
-       while (!list_empty(tbs)) {
-               ltb = list_entry(tbs->next, lnet_text_buf_t, ltb_list);
-               
-               list_del(&ltb->ltb_list);
+
+       while (!cfs_list_empty(tbs)) {
+               ltb = cfs_list_entry(tbs->next, lnet_text_buf_t, ltb_list);
+
+               cfs_list_del(&ltb->ltb_list);
                lnet_free_text_buf(ltb);
        }
 }
 
 void
-lnet_print_text_bufs(struct list_head *tbs)
+lnet_print_text_bufs(cfs_list_t *tbs)
 {
-       struct list_head  *tmp;
+       cfs_list_t        *tmp;
        lnet_text_buf_t   *ltb;
 
-       list_for_each (tmp, tbs) {
-               ltb = list_entry(tmp, lnet_text_buf_t, ltb_list);
+       cfs_list_for_each (tmp, tbs) {
+               ltb = cfs_list_entry(tmp, lnet_text_buf_t, ltb_list);
 
                CDEBUG(D_WARNING, "%s\n", ltb->ltb_text);
        }
@@ -368,9 +368,9 @@ lnet_print_text_bufs(struct list_head *tbs)
 }
 
 int
-lnet_str2tbs_sep (struct list_head *tbs, char *str) 
+lnet_str2tbs_sep (cfs_list_t *tbs, char *str) 
 {
-       struct list_head  pending;
+       cfs_list_t        pending;
        char             *sep;
        int               nob;
         int               i;
@@ -405,7 +405,7 @@ lnet_str2tbs_sep (struct list_head *tbs, char *str)
 
                        ltb->ltb_text[nob] = 0;
 
-                       list_add_tail(&ltb->ltb_list, &pending);
+                       cfs_list_add_tail(&ltb->ltb_list, &pending);
                }
 
                if (*sep == '#') {
@@ -421,12 +421,12 @@ lnet_str2tbs_sep (struct list_head *tbs, char *str)
                str = sep + 1;
        }
 
-       list_splice(&pending, tbs->prev);
+       cfs_list_splice(&pending, tbs->prev);
        return 0;
 }
 
 int
-lnet_expand1tb (struct list_head *list, 
+lnet_expand1tb (cfs_list_t *list, 
               char *str, char *sep1, char *sep2, 
               char *item, int itemlen)
 {
@@ -440,21 +440,21 @@ lnet_expand1tb (struct list_head *list,
        ltb = lnet_new_text_buf(len1 + itemlen + len2);
        if (ltb == NULL)
                return -ENOMEM;
-       
+
        memcpy(ltb->ltb_text, str, len1);
        memcpy(&ltb->ltb_text[len1], item, itemlen);
        memcpy(&ltb->ltb_text[len1+itemlen], sep2 + 1, len2);
        ltb->ltb_text[len1 + itemlen + len2] = 0;
-       
-       list_add_tail(&ltb->ltb_list, list);
+
+       cfs_list_add_tail(&ltb->ltb_list, list);
        return 0;
 }
 
 int
-lnet_str2tbs_expand (struct list_head *tbs, char *str)
+lnet_str2tbs_expand (cfs_list_t *tbs, char *str)
 {
        char              num[16];
-       struct list_head  pending;
+       cfs_list_t        pending;
        char             *sep;
        char             *sep2;
        char             *parsed;
@@ -467,7 +467,7 @@ lnet_str2tbs_expand (struct list_head *tbs, char *str)
        int               scanned;
 
        CFS_INIT_LIST_HEAD(&pending);
-       
+
        sep = strchr(str, '[');
        if (sep == NULL)                        /* nothing to expand */
                return 0;
@@ -522,7 +522,7 @@ lnet_str2tbs_expand (struct list_head *tbs, char *str)
                }
        }
                
-       list_splice(&pending, tbs->prev);
+       cfs_list_splice(&pending, tbs->prev);
        return 1;
        
  failed:
@@ -535,7 +535,7 @@ lnet_parse_hops (char *str, unsigned int *hops)
 {
         int     len = strlen(str);
         int     nob = len;
-        
+       
         return (sscanf(str, "%u%n", hops, &nob) >= 1 &&
                 nob == len &&
                 *hops > 0 && *hops < 256);
@@ -548,10 +548,10 @@ lnet_parse_route (char *str, int *im_a_router)
        /* static scratch buffer OK (single threaded) */
        static char       cmd[LNET_SINGLE_TEXTBUF_NOB];
 
-       struct list_head  nets;
-       struct list_head  gateways;
-       struct list_head *tmp1;
-       struct list_head *tmp2;
+       cfs_list_t        nets;
+       cfs_list_t        gateways;
+       cfs_list_t       *tmp1;
+       cfs_list_t       *tmp2;
        __u32             net;
        lnet_nid_t        nid;
        lnet_text_buf_t  *ltb;
@@ -606,10 +606,10 @@ lnet_parse_route (char *str, int *im_a_router)
 
                strcpy(ltb->ltb_text, token);
                tmp1 = &ltb->ltb_list;
-               list_add_tail(tmp1, tmp2);
+               cfs_list_add_tail(tmp1, tmp2);
                
                while (tmp1 != tmp2) {
-                       ltb = list_entry(tmp1, lnet_text_buf_t, ltb_list);
+                       ltb = cfs_list_entry(tmp1, lnet_text_buf_t, ltb_list);
 
                        rc = lnet_str2tbs_expand(tmp1->next, ltb->ltb_text);
                        if (rc < 0)
@@ -618,7 +618,7 @@ lnet_parse_route (char *str, int *im_a_router)
                        tmp1 = tmp1->next;
                        
                        if (rc > 0) {           /* expanded! */
-                               list_del(&ltb->ltb_list);
+                               cfs_list_del(&ltb->ltb_list);
                                lnet_free_text_buf(ltb);
                                continue;
                        }
@@ -640,16 +640,16 @@ lnet_parse_route (char *str, int *im_a_router)
         if (!got_hops)
                 hops = 1;
 
-       LASSERT (!list_empty(&nets));
-       LASSERT (!list_empty(&gateways));
+       LASSERT (!cfs_list_empty(&nets));
+       LASSERT (!cfs_list_empty(&gateways));
 
-       list_for_each (tmp1, &nets) {
-               ltb = list_entry(tmp1, lnet_text_buf_t, ltb_list);
+       cfs_list_for_each (tmp1, &nets) {
+               ltb = cfs_list_entry(tmp1, lnet_text_buf_t, ltb_list);
                net = libcfs_str2net(ltb->ltb_text);
                LASSERT (net != LNET_NIDNET(LNET_NID_ANY));
 
-               list_for_each (tmp2, &gateways) {
-                       ltb = list_entry(tmp2, lnet_text_buf_t, ltb_list);
+               cfs_list_for_each (tmp2, &gateways) {
+                       ltb = cfs_list_entry(tmp2, lnet_text_buf_t, ltb_list);
                        nid = libcfs_str2nid(ltb->ltb_text);
                        LASSERT (nid != LNET_NID_ANY);
 
@@ -681,19 +681,19 @@ lnet_parse_route (char *str, int *im_a_router)
 }
 
 int
-lnet_parse_route_tbs(struct list_head *tbs, int *im_a_router)
+lnet_parse_route_tbs(cfs_list_t *tbs, int *im_a_router)
 {
        lnet_text_buf_t   *ltb;
 
-       while (!list_empty(tbs)) {
-               ltb = list_entry(tbs->next, lnet_text_buf_t, ltb_list);
+       while (!cfs_list_empty(tbs)) {
+               ltb = cfs_list_entry(tbs->next, lnet_text_buf_t, ltb_list);
 
                if (lnet_parse_route(ltb->ltb_text, im_a_router) < 0) {
                        lnet_free_text_bufs(tbs);
                        return -EINVAL;
                }
 
-               list_del(&ltb->ltb_list);
+               cfs_list_del(&ltb->ltb_list);
                lnet_free_text_buf(ltb);
        }
 
@@ -703,7 +703,7 @@ lnet_parse_route_tbs(struct list_head *tbs, int *im_a_router)
 int
 lnet_parse_routes (char *routes, int *im_a_router)
 {
-       struct list_head  tbs;
+       cfs_list_t        tbs;
        int               rc = 0;
 
         *im_a_router = 0;
@@ -722,13 +722,13 @@ lnet_parse_routes (char *routes, int *im_a_router)
 }
 
 void
-lnet_print_range_exprs(struct list_head *exprs)
+lnet_print_range_exprs(cfs_list_t *exprs)
 {
-        struct list_head   *e;
+        cfs_list_t        *e;
         lnet_range_expr_t *lre;
-        
-        list_for_each(e, exprs) {
-                lre = list_entry(exprs->next, lnet_range_expr_t, lre_list);
+
+        cfs_list_for_each(e, exprs) {
+                lre = cfs_list_entry(exprs->next, lnet_range_expr_t, lre_list);
                 
                 CDEBUG(D_WARNING, "%d-%d/%d\n", 
                        lre->lre_min, lre->lre_max, lre->lre_stride);
@@ -738,7 +738,7 @@ lnet_print_range_exprs(struct list_head *exprs)
 }
 
 int
-lnet_new_range_expr(struct list_head *exprs, int min, int max, int stride)
+lnet_new_range_expr(cfs_list_t *exprs, int min, int max, int stride)
 {
         lnet_range_expr_t *lre;
 
@@ -757,26 +757,26 @@ lnet_new_range_expr(struct list_head *exprs, int min, int max, int stride)
         lre->lre_max = max;
         lre->lre_stride = stride;
         
-        list_add(&lre->lre_list, exprs);
+        cfs_list_add(&lre->lre_list, exprs);
         return 0;
 }
 
 void
-lnet_destroy_range_exprs(struct list_head *exprs)
+lnet_destroy_range_exprs(cfs_list_t *exprs)
 {
         lnet_range_expr_t *lre;
         
-        while (!list_empty(exprs)) {
-                lre = list_entry(exprs->next, lnet_range_expr_t, lre_list);
+        while (!cfs_list_empty(exprs)) {
+                lre = cfs_list_entry(exprs->next, lnet_range_expr_t, lre_list);
                 
-                list_del(&lre->lre_list);
+                cfs_list_del(&lre->lre_list);
                 LIBCFS_FREE(lre, sizeof(*lre));
                 lnet_re_alloc--;
         }
 }
 
 int
-lnet_parse_range_expr(struct list_head *exprs, char *str)
+lnet_parse_range_expr(cfs_list_t *exprs, char *str)
 {
         int                nob = strlen(str);
         char              *sep;
@@ -851,8 +851,8 @@ lnet_parse_range_expr(struct list_head *exprs, char *str)
 int
 lnet_match_network_token(char *token, __u32 *ipaddrs, int nip)
 {
-        struct list_head   exprs[4];
-        struct list_head  *e;
+        cfs_list_t         exprs[4];
+        cfs_list_t        *e;
         lnet_range_expr_t *re;
         char              *str;
         int                i;
@@ -885,13 +885,14 @@ lnet_match_network_token(char *token, __u32 *ipaddrs, int nip)
 
         for (match = i = 0; !match && i < nip; i++) {
                 ip = ipaddrs[i];
-                
+
                 for (match = 1, j = 0; match && j < 4; j++) {
                         n = (ip >> (8 * (3 - j))) & 0xff;
                         match = 0;
 
-                        list_for_each(e, &exprs[j]) {
-                                re = list_entry(e, lnet_range_expr_t, lre_list);
+                        cfs_list_for_each(e, &exprs[j]) {
+                                re = cfs_list_entry(e, lnet_range_expr_t,
+                                                    lre_list);
 
                                 if (re->lre_min <= n &&
                                     re->lre_max >= n &&
@@ -970,7 +971,7 @@ lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip)
         return 1;
 }
 
-__u32 
+__u32
 lnet_netspec2net(char *netspec)
 {
         char   *bracket = strchr(netspec, '(');
@@ -983,27 +984,27 @@ lnet_netspec2net(char *netspec)
 
         if (bracket != NULL)
                 *bracket = '(';
-                
+
         return net;
 }
 
 int
-lnet_splitnets(char *source, struct list_head *nets)
+lnet_splitnets(char *source, cfs_list_t *nets)
 {
         int               offset = 0;
         int               offset2;
         int               len;
         lnet_text_buf_t  *tb;
         lnet_text_buf_t  *tb2;
-        struct list_head *t;
+        cfs_list_t       *t;
         char             *sep;
         char             *bracket;
         __u32             net;
 
-        LASSERT (!list_empty(nets));
+        LASSERT (!cfs_list_empty(nets));
         LASSERT (nets->next == nets->prev);     /* single entry */
-        
-        tb = list_entry(nets->next, lnet_text_buf_t, ltb_list);
+
+        tb = cfs_list_entry(nets->next, lnet_text_buf_t, ltb_list);
 
         for (;;) {
                 sep = strchr(tb->ltb_text, ',');
@@ -1038,8 +1039,8 @@ lnet_splitnets(char *source, struct list_head *nets)
                         return -EINVAL;
                 }
 
-                list_for_each(t, nets) {
-                        tb2 = list_entry(t, lnet_text_buf_t, ltb_list);
+                cfs_list_for_each(t, nets) {
+                        tb2 = cfs_list_entry(t, lnet_text_buf_t, ltb_list);
 
                         if (tb2 == tb)
                                 continue;
@@ -1051,7 +1052,7 @@ lnet_splitnets(char *source, struct list_head *nets)
                                 return -EINVAL;
                         }
                 }
-                
+
                 if (sep == NULL)
                         return 0;
 
@@ -1059,9 +1060,9 @@ lnet_splitnets(char *source, struct list_head *nets)
                 tb2 = lnet_new_text_buf(strlen(sep));
                 if (tb2 == NULL)
                         return -ENOMEM;
-                        
+
                 strcpy(tb2->ltb_text, sep);
-                list_add_tail(&tb2->ltb_list, nets);
+                cfs_list_add_tail(&tb2->ltb_list, nets);
 
                 tb = tb2;
         }
@@ -1070,14 +1071,14 @@ lnet_splitnets(char *source, struct list_head *nets)
 int
 lnet_match_networks (char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
 {
-        static char  networks[LNET_SINGLE_TEXTBUF_NOB];
-        static char  source[LNET_SINGLE_TEXTBUF_NOB];
-
-        struct list_head    raw_entries;
-        struct list_head    matched_nets;
-        struct list_head    current_nets;
-        struct list_head   *t;
-        struct list_head   *t2;
+        static char        networks[LNET_SINGLE_TEXTBUF_NOB];
+        static char        source[LNET_SINGLE_TEXTBUF_NOB];
+
+        cfs_list_t          raw_entries;
+        cfs_list_t          matched_nets;
+        cfs_list_t          current_nets;
+        cfs_list_t         *t;
+        cfs_list_t         *t2;
         lnet_text_buf_t    *tb;
         lnet_text_buf_t    *tb2;
         __u32               net1;
@@ -1101,8 +1102,9 @@ lnet_match_networks (char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
         len = 0;
         rc = 0;
 
-        while (!list_empty(&raw_entries)) {
-                tb = list_entry(raw_entries.next, lnet_text_buf_t, ltb_list);
+        while (!cfs_list_empty(&raw_entries)) {
+                tb = cfs_list_entry(raw_entries.next, lnet_text_buf_t,
+                                    ltb_list);
 
                 strncpy(source, tb->ltb_text, sizeof(source)-1);
                 source[sizeof(source)-1] = 0;
@@ -1112,7 +1114,7 @@ lnet_match_networks (char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
                 if (rc < 0)
                         break;
 
-                list_del(&tb->ltb_list);
+                cfs_list_del(&tb->ltb_list);
 
                 if (rc == 0) {                  /* no match */
                         lnet_free_text_buf(tb);
@@ -1121,19 +1123,20 @@ lnet_match_networks (char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
 
                 /* split into separate networks */
                 CFS_INIT_LIST_HEAD(&current_nets);
-                list_add(&tb->ltb_list, &current_nets);
+                cfs_list_add(&tb->ltb_list, &current_nets);
                 rc = lnet_splitnets(source, &current_nets);
                 if (rc < 0)
                         break;
 
                 dup = 0;
-                list_for_each (t, &current_nets) {
-                        tb = list_entry(t, lnet_text_buf_t, ltb_list);
+                cfs_list_for_each (t, &current_nets) {
+                        tb = cfs_list_entry(t, lnet_text_buf_t, ltb_list);
                         net1 = lnet_netspec2net(tb->ltb_text);
                         LASSERT (net1 != LNET_NIDNET(LNET_NID_ANY));
 
-                        list_for_each(t2, &matched_nets) {
-                                tb2 = list_entry(t2, lnet_text_buf_t, ltb_list);
+                        cfs_list_for_each(t2, &matched_nets) {
+                                tb2 = cfs_list_entry(t2, lnet_text_buf_t,
+                                                     ltb_list);
                                 net2 = lnet_netspec2net(tb2->ltb_text);
                                 LASSERT (net2 != LNET_NIDNET(LNET_NID_ANY));
 
@@ -1152,11 +1155,11 @@ lnet_match_networks (char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
                         continue;
                 }
 
-                list_for_each_safe(t, t2, &current_nets) {
-                        tb = list_entry(t, lnet_text_buf_t, ltb_list);
+                cfs_list_for_each_safe(t, t2, &current_nets) {
+                        tb = cfs_list_entry(t, lnet_text_buf_t, ltb_list);
                         
-                        list_del(&tb->ltb_list);
-                        list_add_tail(&tb->ltb_list, &matched_nets);
+                        cfs_list_del(&tb->ltb_list);
+                        cfs_list_add_tail(&tb->ltb_list, &matched_nets);
 
                         len += snprintf(networks + len, sizeof(networks) - len,
                                         "%s%s", (len == 0) ? "" : ",", 
@@ -1180,7 +1183,7 @@ lnet_match_networks (char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
 
         if (rc < 0)
                 return rc;
-        
+
         *networksp = networks;
         return count;
 }
@@ -1207,7 +1210,7 @@ lnet_ipaddr_enumerate (__u32 **ipaddrsp)
 
         if (nif <= 0)
                 return nif;
-        
+
         LIBCFS_ALLOC(ipaddrs, nif * sizeof(*ipaddrs));
         if (ipaddrs == NULL) {
                 CERROR("Can't allocate ipaddrs[%d]\n", nif);
@@ -1218,8 +1221,8 @@ lnet_ipaddr_enumerate (__u32 **ipaddrsp)
         for (i = nip = 0; i < nif; i++) {
                 if (!strcmp(ifnames[i], "lo"))
                         continue;
-                
-                rc = libcfs_ipif_query(ifnames[i], &up, 
+
+                rc = libcfs_ipif_query(ifnames[i], &up,
                                        &ipaddrs[nip], &netmask);
                 if (rc != 0) {
                         CWARN("Can't query interface %s: %d\n",
@@ -1295,7 +1298,7 @@ lnet_parse_ip2nets (char **networksp, char *ip2nets)
 }
 
 int
-lnet_set_ip_niaddr (lnet_ni_t *ni) 
+lnet_set_ip_niaddr (lnet_ni_t *ni)
 {
         __u32  net = LNET_NIDNET(ni->ni_nid);
         char **names;
@@ -1318,7 +1321,7 @@ lnet_set_ip_niaddr (lnet_ni_t *ni)
                                libcfs_net2str(net));
                         return -EPERM;
                 }
-                
+
                 rc = libcfs_ipif_query(ni->ni_interfaces[0],
                                        &up, &ip, &netmask);
                 if (rc != 0) {
@@ -1332,14 +1335,14 @@ lnet_set_ip_niaddr (lnet_ni_t *ni)
                                libcfs_net2str(net), ni->ni_interfaces[0]);
                         return -ENETDOWN;
                 }
-                
+
                 ni->ni_nid = LNET_MKNID(net, ip);
                 return 0;
         }
 
         n = libcfs_ipif_enumerate(&names);
         if (n <= 0) {
-                CERROR("Net %s can't enumerate interfaces: %d\n", 
+                CERROR("Net %s can't enumerate interfaces: %d\n",
                        libcfs_net2str(net), n);
                 return 0;
         }
@@ -1347,15 +1350,15 @@ lnet_set_ip_niaddr (lnet_ni_t *ni)
         for (i = 0; i < n; i++) {
                 if (!strcmp(names[i], "lo")) /* skip the loopback IF */
                         continue;
-                
+
                 rc = libcfs_ipif_query(names[i], &up, &ip, &netmask);
-                
+
                 if (rc != 0) {
                         CWARN("Net %s can't query interface %s: %d\n",
                               libcfs_net2str(net), names[i], rc);
                         continue;
                 }
-                        
+
                 if (!up) {
                         CWARN("Net %s ignoring interface %s (down)\n",
                               libcfs_net2str(net), names[i]);
index 701352c..61af852 100644 (file)
@@ -91,7 +91,7 @@ LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
         LNET_LOCK();
 
         lnet_initialise_handle (&eq->eq_lh, LNET_COOKIE_TYPE_EQ);
-        list_add (&eq->eq_list, &the_lnet.ln_active_eqs);
+        cfs_list_add (&eq->eq_list, &the_lnet.ln_active_eqs);
 
         LNET_UNLOCK();
 
@@ -129,7 +129,7 @@ LNetEQFree(lnet_handle_eq_t eqh)
         size    = eq->eq_size;
 
         lnet_invalidate_handle (&eq->eq_lh);
-        list_del (&eq->eq_list);
+        cfs_list_del (&eq->eq_list);
         lnet_eq_free (eq);
 
         LNET_UNLOCK();
@@ -252,7 +252,7 @@ LNetEQPoll (lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
                 }
 
                 cfs_waitlink_init(&wl);
-                set_current_state(TASK_INTERRUPTIBLE);
+                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
                 cfs_waitq_add(&the_lnet.ln_waitq, &wl);
 
                 LNET_UNLOCK();
index 1d5eb7a..1210cf2 100644 (file)
@@ -77,8 +77,8 @@ lnet_md_unlink(lnet_libmd_t *md)
                 LASSERT (md->md_eq->eq_refcount >= 0);
         }
 
-        LASSERT (!list_empty(&md->md_list));
-        list_del_init (&md->md_list);
+        LASSERT (!cfs_list_empty(&md->md_list));
+        cfs_list_del_init (&md->md_list);
         lnet_md_free(md);
 }
 
@@ -187,8 +187,8 @@ lib_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
 
         /* It's good; let handle2md succeed and add to active mds */
         lnet_initialise_handle (&lmd->md_lh, LNET_COOKIE_TYPE_MD);
-        LASSERT (list_empty(&lmd->md_list));
-        list_add (&lmd->md_list, &the_lnet.ln_active_mds);
+        LASSERT (cfs_list_empty(&lmd->md_list));
+        cfs_list_add (&lmd->md_list, &the_lnet.ln_active_mds);
 
         return 0;
 }
index 9ede0d2..8900ec7 100644 (file)
@@ -73,9 +73,11 @@ LNetMEAttach(unsigned int portal,
         lnet_initialise_handle (&me->me_lh, LNET_COOKIE_TYPE_ME);
 
         if (pos == LNET_INS_AFTER)
-                list_add_tail(&me->me_list, &(the_lnet.ln_portals[portal].ptl_ml));
+                cfs_list_add_tail(&me->me_list,
+                                  &(the_lnet.ln_portals[portal].ptl_ml));
         else
-                list_add(&me->me_list, &(the_lnet.ln_portals[portal].ptl_ml));
+                cfs_list_add(&me->me_list,
+                             &(the_lnet.ln_portals[portal].ptl_ml));
 
         lnet_me2handle(handle, me);
 
@@ -121,9 +123,9 @@ LNetMEInsert(lnet_handle_me_t current_meh,
         lnet_initialise_handle (&new_me->me_lh, LNET_COOKIE_TYPE_ME);
 
         if (pos == LNET_INS_AFTER)
-                list_add(&new_me->me_list, &current_me->me_list);
+                cfs_list_add(&new_me->me_list, &current_me->me_list);
         else
-                list_add_tail(&new_me->me_list, &current_me->me_list);
+                cfs_list_add_tail(&new_me->me_list, &current_me->me_list);
 
         lnet_me2handle(handle, new_me);
 
@@ -168,7 +170,7 @@ LNetMEUnlink(lnet_handle_me_t meh)
 void
 lnet_me_unlink(lnet_me_t *me)
 {
-        list_del (&me->me_list);
+        cfs_list_del (&me->me_list);
 
         if (me->me_md != NULL) {
                 me->me_md->md_me = NULL;
@@ -191,8 +193,8 @@ lib_me_dump(lnet_me_t *me)
 
         CWARN("\tMD\t= %p\n", me->md);
         CWARN("\tprev\t= %p\n",
-              list_entry(me->me_list.prev, lnet_me_t, me_list));
+              cfs_list_entry(me->me_list.prev, lnet_me_t, me_list));
         CWARN("\tnext\t= %p\n",
-              list_entry(me->me_list.next, lnet_me_t, me_list));
+              cfs_list_entry(me->me_list.next, lnet_me_t, me_list));
 }
 #endif
index 40195ce..3ae5fc1 100644 (file)
@@ -209,10 +209,10 @@ lnet_match_md(int index, int op_mask, lnet_process_id_t src,
 int
 lnet_fail_nid (lnet_nid_t nid, unsigned int threshold)
 {
-        lnet_test_peer_t   *tp;
-        struct list_head  *el;
-        struct list_head  *next;
-        struct list_head   cull;
+        lnet_test_peer_t  *tp;
+        cfs_list_t        *el;
+        cfs_list_t        *next;
+        cfs_list_t         cull;
 
         LASSERT (the_lnet.ln_init);
 
@@ -226,7 +226,7 @@ lnet_fail_nid (lnet_nid_t nid, unsigned int threshold)
                 tp->tp_threshold = threshold;
 
                 LNET_LOCK();
-                list_add_tail (&tp->tp_list, &the_lnet.ln_test_peers);
+                cfs_list_add_tail (&tp->tp_list, &the_lnet.ln_test_peers);
                 LNET_UNLOCK();
                 return 0;
         }
@@ -236,24 +236,24 @@ lnet_fail_nid (lnet_nid_t nid, unsigned int threshold)
 
         LNET_LOCK();
 
-        list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
-                tp = list_entry (el, lnet_test_peer_t, tp_list);
+        cfs_list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
+                tp = cfs_list_entry (el, lnet_test_peer_t, tp_list);
 
                 if (tp->tp_threshold == 0 ||    /* needs culling anyway */
                     nid == LNET_NID_ANY ||       /* removing all entries */
                     tp->tp_nid == nid)          /* matched this one */
                 {
-                        list_del (&tp->tp_list);
-                        list_add (&tp->tp_list, &cull);
+                        cfs_list_del (&tp->tp_list);
+                        cfs_list_add (&tp->tp_list, &cull);
                 }
         }
 
         LNET_UNLOCK();
 
-        while (!list_empty (&cull)) {
-                tp = list_entry (cull.next, lnet_test_peer_t, tp_list);
+        while (!cfs_list_empty (&cull)) {
+                tp = cfs_list_entry (cull.next, lnet_test_peer_t, tp_list);
 
-                list_del (&tp->tp_list);
+                cfs_list_del (&tp->tp_list);
                 LIBCFS_FREE(tp, sizeof (*tp));
         }
         return 0;
@@ -262,18 +262,18 @@ lnet_fail_nid (lnet_nid_t nid, unsigned int threshold)
 static int
 fail_peer (lnet_nid_t nid, int outgoing)
 {
-        lnet_test_peer_t  *tp;
-        struct list_head *el;
-        struct list_head *next;
-        struct list_head  cull;
+        lnet_test_peer_t *tp;
+        cfs_list_t       *el;
+        cfs_list_t       *next;
+        cfs_list_t        cull;
         int               fail = 0;
 
         CFS_INIT_LIST_HEAD (&cull);
 
         LNET_LOCK();
 
-        list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
-                tp = list_entry (el, lnet_test_peer_t, tp_list);
+        cfs_list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
+                tp = cfs_list_entry (el, lnet_test_peer_t, tp_list);
 
                 if (tp->tp_threshold == 0) {
                         /* zombie entry */
@@ -281,8 +281,8 @@ fail_peer (lnet_nid_t nid, int outgoing)
                                 /* only cull zombies on outgoing tests,
                                  * since we may be at interrupt priority on
                                  * incoming messages. */
-                                list_del (&tp->tp_list);
-                                list_add (&tp->tp_list, &cull);
+                                cfs_list_del (&tp->tp_list);
+                                cfs_list_add (&tp->tp_list, &cull);
                         }
                         continue;
                 }
@@ -296,8 +296,8 @@ fail_peer (lnet_nid_t nid, int outgoing)
                                 if (outgoing &&
                                     tp->tp_threshold == 0) {
                                         /* see above */
-                                        list_del (&tp->tp_list);
-                                        list_add (&tp->tp_list, &cull);
+                                        cfs_list_del (&tp->tp_list);
+                                        cfs_list_add (&tp->tp_list, &cull);
                                 }
                         }
                         break;
@@ -306,9 +306,9 @@ fail_peer (lnet_nid_t nid, int outgoing)
 
         LNET_UNLOCK ();
 
-        while (!list_empty (&cull)) {
-                tp = list_entry (cull.next, lnet_test_peer_t, tp_list);
-                list_del (&tp->tp_list);
+        while (!cfs_list_empty (&cull)) {
+                tp = cfs_list_entry (cull.next, lnet_test_peer_t, tp_list);
+                cfs_list_del (&tp->tp_list);
 
                 LIBCFS_FREE(tp, sizeof (*tp));
         }
@@ -497,7 +497,7 @@ lnet_copy_kiov2kiov (unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset
         if (nob == 0)
                 return;
 
-        LASSERT (!in_interrupt ());
+        LASSERT (!cfs_in_interrupt ());
 
         LASSERT (ndiov > 0);
         while (doffset >= diov->kiov_len) {
@@ -577,7 +577,7 @@ lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset
         if (nob == 0)
                 return;
 
-        LASSERT (!in_interrupt ());
+        LASSERT (!cfs_in_interrupt ());
 
         LASSERT (niov > 0);
         while (iovoffset >= iov->iov_len) {
@@ -646,7 +646,7 @@ lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffs
         if (nob == 0)
                 return;
 
-        LASSERT (!in_interrupt ());
+        LASSERT (!cfs_in_interrupt ());
 
         LASSERT (nkiov > 0);
         while (kiovoffset >= kiov->kiov_len) {
@@ -761,7 +761,7 @@ lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
         lnet_kiov_t  *kiov = NULL;
         int           rc;
 
-        LASSERT (!in_interrupt ());
+        LASSERT (!cfs_in_interrupt ());
         LASSERT (mlen == 0 || msg != NULL);
 
         if (msg != NULL) {
@@ -864,7 +864,7 @@ lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
         void   *priv = msg->msg_private;
         int     rc;
 
-        LASSERT (!in_interrupt ());
+        LASSERT (!cfs_in_interrupt ());
         LASSERT (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
                  (msg->msg_txcredit && msg->msg_peertxcredit));
 
@@ -1034,7 +1034,8 @@ lnet_post_send_locked (lnet_msg_t *msg, int do_send)
         }
 
         if (!msg->msg_peertxcredit) {
-                LASSERT ((lp->lp_txcredits < 0) == !list_empty(&lp->lp_txq));
+                LASSERT ((lp->lp_txcredits < 0) ==
+                         !cfs_list_empty(&lp->lp_txq));
 
                 msg->msg_peertxcredit = 1;
                 lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
@@ -1045,13 +1046,14 @@ lnet_post_send_locked (lnet_msg_t *msg, int do_send)
 
                 if (lp->lp_txcredits < 0) {
                         msg->msg_delayed = 1;
-                        list_add_tail (&msg->msg_list, &lp->lp_txq);
+                        cfs_list_add_tail (&msg->msg_list, &lp->lp_txq);
                         return EAGAIN;
                 }
         }
 
         if (!msg->msg_txcredit) {
-                LASSERT ((ni->ni_txcredits < 0) == !list_empty(&ni->ni_txq));
+                LASSERT ((ni->ni_txcredits < 0) ==
+                         !cfs_list_empty(&ni->ni_txq));
 
                 msg->msg_txcredit = 1;
                 ni->ni_txcredits--;
@@ -1061,7 +1063,7 @@ lnet_post_send_locked (lnet_msg_t *msg, int do_send)
 
                 if (ni->ni_txcredits < 0) {
                         msg->msg_delayed = 1;
-                        list_add_tail (&msg->msg_list, &ni->ni_txq);
+                        cfs_list_add_tail (&msg->msg_list, &ni->ni_txq);
                         return EAGAIN;
                 }
         }
@@ -1092,7 +1094,7 @@ lnet_commit_routedmsg (lnet_msg_t *msg)
 
         LASSERT (!msg->msg_onactivelist);
         msg->msg_onactivelist = 1;
-        list_add (&msg->msg_activelist, &the_lnet.ln_active_msgs);
+        cfs_list_add (&msg->msg_activelist, &the_lnet.ln_active_msgs);
 }
 
 lnet_rtrbufpool_t *
@@ -1130,7 +1132,8 @@ lnet_post_routed_recv_locked (lnet_msg_t *msg, int do_recv)
         LASSERT (!do_recv || msg->msg_delayed);
 
         if (!msg->msg_peerrtrcredit) {
-                LASSERT ((lp->lp_rtrcredits < 0) == !list_empty(&lp->lp_rtrq));
+                LASSERT ((lp->lp_rtrcredits < 0) ==
+                         !cfs_list_empty(&lp->lp_rtrq));
 
                 msg->msg_peerrtrcredit = 1;
                 lp->lp_rtrcredits--;
@@ -1140,7 +1143,7 @@ lnet_post_routed_recv_locked (lnet_msg_t *msg, int do_recv)
                 if (lp->lp_rtrcredits < 0) {
                         /* must have checked eager_recv before here */
                         LASSERT (msg->msg_delayed);
-                        list_add_tail(&msg->msg_list, &lp->lp_rtrq);
+                        cfs_list_add_tail(&msg->msg_list, &lp->lp_rtrq);
                         return EAGAIN;
                 }
         }
@@ -1148,7 +1151,8 @@ lnet_post_routed_recv_locked (lnet_msg_t *msg, int do_recv)
         rbp = lnet_msg2bufpool(msg);
 
         if (!msg->msg_rtrcredit) {
-                LASSERT ((rbp->rbp_credits < 0) == !list_empty(&rbp->rbp_msgs));
+                LASSERT ((rbp->rbp_credits < 0) ==
+                         !cfs_list_empty(&rbp->rbp_msgs));
 
                 msg->msg_rtrcredit = 1;
                 rbp->rbp_credits--;
@@ -1158,14 +1162,14 @@ lnet_post_routed_recv_locked (lnet_msg_t *msg, int do_recv)
                 if (rbp->rbp_credits < 0) {
                         /* must have checked eager_recv before here */
                         LASSERT (msg->msg_delayed);
-                        list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
+                        cfs_list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
                         return EAGAIN;
                 }
         }
 
-        LASSERT (!list_empty(&rbp->rbp_bufs));
-        rb = list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
-        list_del(&rb->rb_list);
+        LASSERT (!cfs_list_empty(&rbp->rbp_bufs));
+        rb = cfs_list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
+        cfs_list_del(&rb->rb_list);
 
         msg->msg_niov = rbp->rbp_npages;
         msg->msg_kiov = &rb->rb_kiov[0];
@@ -1193,12 +1197,13 @@ lnet_return_credits_locked (lnet_msg_t *msg)
                 msg->msg_txcredit = 0;
                 ni = txpeer->lp_ni;
 
-                LASSERT((ni->ni_txcredits < 0) == !list_empty(&ni->ni_txq));
+                LASSERT((ni->ni_txcredits < 0) == !cfs_list_empty(&ni->ni_txq));
 
                 ni->ni_txcredits++;
                 if (ni->ni_txcredits <= 0) {
-                        msg2 = list_entry(ni->ni_txq.next, lnet_msg_t, msg_list);
-                        list_del(&msg2->msg_list);
+                        msg2 = cfs_list_entry(ni->ni_txq.next, lnet_msg_t,
+                                              msg_list);
+                        cfs_list_del(&msg2->msg_list);
 
                         LASSERT(msg2->msg_txpeer->lp_ni == ni);
                         LASSERT(msg2->msg_delayed);
@@ -1211,16 +1216,17 @@ lnet_return_credits_locked (lnet_msg_t *msg)
                 /* give back peer txcredits */
                 msg->msg_peertxcredit = 0;
 
-                LASSERT((txpeer->lp_txcredits < 0) == !list_empty(&txpeer->lp_txq));
+                LASSERT((txpeer->lp_txcredits < 0) ==
+                        !cfs_list_empty(&txpeer->lp_txq));
 
                 txpeer->lp_txqnob -= msg->msg_len + sizeof(lnet_hdr_t);
                 LASSERT (txpeer->lp_txqnob >= 0);
 
                 txpeer->lp_txcredits++;
                 if (txpeer->lp_txcredits <= 0) {
-                        msg2 = list_entry(txpeer->lp_txq.next,
-                                          lnet_msg_t, msg_list);
-                        list_del(&msg2->msg_list);
+                        msg2 = cfs_list_entry(txpeer->lp_txq.next,
+                                              lnet_msg_t, msg_list);
+                        cfs_list_del(&msg2->msg_list);
 
                         LASSERT (msg2->msg_txpeer == txpeer);
                         LASSERT (msg2->msg_delayed);
@@ -1245,22 +1251,24 @@ lnet_return_credits_locked (lnet_msg_t *msg)
                  * itself */
                 LASSERT (msg->msg_kiov != NULL);
 
-                rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
+                rb = cfs_list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
                 rbp = rb->rb_pool;
                 LASSERT (rbp == lnet_msg2bufpool(msg));
 
                 msg->msg_kiov = NULL;
                 msg->msg_rtrcredit = 0;
 
-                LASSERT((rbp->rbp_credits < 0) == !list_empty(&rbp->rbp_msgs));
-                LASSERT((rbp->rbp_credits > 0) == !list_empty(&rbp->rbp_bufs));
+                LASSERT((rbp->rbp_credits < 0) ==
+                        !cfs_list_empty(&rbp->rbp_msgs));
+                LASSERT((rbp->rbp_credits > 0) ==
+                        !cfs_list_empty(&rbp->rbp_bufs));
 
-                list_add(&rb->rb_list, &rbp->rbp_bufs);
+                cfs_list_add(&rb->rb_list, &rbp->rbp_bufs);
                 rbp->rbp_credits++;
                 if (rbp->rbp_credits <= 0) {
-                        msg2 = list_entry(rbp->rbp_msgs.next,
-                                          lnet_msg_t, msg_list);
-                        list_del(&msg2->msg_list);
+                        msg2 = cfs_list_entry(rbp->rbp_msgs.next,
+                                              lnet_msg_t, msg_list);
+                        cfs_list_del(&msg2->msg_list);
 
                         (void) lnet_post_routed_recv_locked(msg2, 1);
                 }
@@ -1270,13 +1278,14 @@ lnet_return_credits_locked (lnet_msg_t *msg)
                 /* give back peer router credits */
                 msg->msg_peerrtrcredit = 0;
 
-                LASSERT((rxpeer->lp_rtrcredits < 0) == !list_empty(&rxpeer->lp_rtrq));
+                LASSERT((rxpeer->lp_rtrcredits < 0) ==
+                        !cfs_list_empty(&rxpeer->lp_rtrq));
 
                 rxpeer->lp_rtrcredits++;
                 if (rxpeer->lp_rtrcredits <= 0) {
-                        msg2 = list_entry(rxpeer->lp_rtrq.next,
-                                          lnet_msg_t, msg_list);
-                        list_del(&msg2->msg_list);
+                        msg2 = cfs_list_entry(rxpeer->lp_rtrq.next,
+                                              lnet_msg_t, msg_list);
+                        cfs_list_del(&msg2->msg_list);
 
                         (void) lnet_post_routed_recv_locked(msg2, 1);
                 }
@@ -1300,7 +1309,7 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg)
         lnet_remotenet_t *rnet;
         lnet_route_t     *route;
         lnet_route_t     *best_route;
-        struct list_head *tmp;
+        cfs_list_t       *tmp;
         lnet_peer_t      *lp;
         lnet_peer_t      *lp2;
         int               rc;
@@ -1402,8 +1411,8 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg)
                 /* Find the best gateway I can use */
                 lp = NULL;
                 best_route = NULL;
-                list_for_each(tmp, &rnet->lrn_routes) {
-                        route = list_entry(tmp, lnet_route_t, lr_list);
+                cfs_list_for_each(tmp, &rnet->lrn_routes) {
+                        route = cfs_list_entry(tmp, lnet_route_t, lr_list);
                         lp2 = route->lr_gateway;
 
                         if (lp2->lp_alive &&
@@ -1429,8 +1438,8 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg)
 
                 /* Place selected route at the end of the route list to ensure
                  * fairness; everything else being equal... */
-                list_del(&best_route->lr_list);
-                list_add_tail(&best_route->lr_list, &rnet->lrn_routes);
+                cfs_list_del(&best_route->lr_list);
+                cfs_list_add_tail(&best_route->lr_list, &rnet->lrn_routes);
 
                 if (src_ni == NULL) {
                         src_ni = lp->lp_ni;
@@ -1499,7 +1508,7 @@ lnet_commit_md (lnet_libmd_t *md, lnet_msg_t *msg)
 
         LASSERT (!msg->msg_onactivelist);
         msg->msg_onactivelist = 1;
-        list_add (&msg->msg_activelist, &the_lnet.ln_active_msgs);
+        cfs_list_add (&msg->msg_activelist, &the_lnet.ln_active_msgs);
 }
 
 static void
@@ -1574,7 +1583,7 @@ LNetSetLazyPortal(int portal)
 int
 LNetClearLazyPortal(int portal)
 {
-        struct list_head  zombies;
+        cfs_list_t        zombies;
         lnet_portal_t    *ptl = &the_lnet.ln_portals[portal];
         lnet_msg_t       *msg;
 
@@ -1594,17 +1603,17 @@ LNetClearLazyPortal(int portal)
                 CDEBUG (D_NET, "clearing portal %d lazy\n", portal);
 
         /* grab all the blocked messages atomically */
-        list_add(&zombies, &ptl->ptl_msgq);
-        list_del_init(&ptl->ptl_msgq);
+        cfs_list_add(&zombies, &ptl->ptl_msgq);
+        cfs_list_del_init(&ptl->ptl_msgq);
 
         ptl->ptl_msgq_version++;
         ptl->ptl_options &= ~LNET_PTL_LAZY;
 
         LNET_UNLOCK();
 
-        while (!list_empty(&zombies)) {
-                msg = list_entry(zombies.next, lnet_msg_t, msg_list);
-                list_del(&msg->msg_list);
+        while (!cfs_list_empty(&zombies)) {
+                msg = cfs_list_entry(zombies.next, lnet_msg_t, msg_list);
+                cfs_list_del(&msg->msg_list);
 
                 lnet_drop_delayed_put(msg, "Clearing lazy portal attr");
         }
@@ -1650,8 +1659,8 @@ lnet_match_blocked_msg(lnet_libmd_t *md)
 {
         CFS_LIST_HEAD    (drops);
         CFS_LIST_HEAD    (matches);
-        struct list_head *tmp;
-        struct list_head *entry;
+        cfs_list_t       *tmp;
+        cfs_list_t       *entry;
         lnet_msg_t       *msg;
         lnet_me_t        *me  = md->md_me;
         lnet_portal_t    *ptl = &the_lnet.ln_portals[me->me_portal];
@@ -1659,13 +1668,13 @@ lnet_match_blocked_msg(lnet_libmd_t *md)
         LASSERT (me->me_portal < (unsigned int)the_lnet.ln_nportals);
 
         if ((ptl->ptl_options & LNET_PTL_LAZY) == 0) {
-                LASSERT (list_empty(&ptl->ptl_msgq));
+                LASSERT (cfs_list_empty(&ptl->ptl_msgq));
                 return;
         }
 
         LASSERT (md->md_refcount == 0); /* a brand new MD */
 
-        list_for_each_safe (entry, tmp, &ptl->ptl_msgq) {
+        cfs_list_for_each_safe (entry, tmp, &ptl->ptl_msgq) {
                 int               rc;
                 int               index;
                 unsigned int      mlength;
@@ -1673,7 +1682,7 @@ lnet_match_blocked_msg(lnet_libmd_t *md)
                 lnet_hdr_t       *hdr;
                 lnet_process_id_t src;
 
-                msg = list_entry(entry, lnet_msg_t, msg_list);
+                msg = cfs_list_entry(entry, lnet_msg_t, msg_list);
 
                 LASSERT (msg->msg_delayed);
 
@@ -1693,11 +1702,11 @@ lnet_match_blocked_msg(lnet_libmd_t *md)
                         continue;
 
                 /* Hurrah! This _is_ a match */
-                list_del(&msg->msg_list);
+                cfs_list_del(&msg->msg_list);
                 ptl->ptl_msgq_version++;
 
                 if (rc == LNET_MATCHMD_OK) {
-                        list_add_tail(&msg->msg_list, &matches);
+                        cfs_list_add_tail(&msg->msg_list, &matches);
 
                         CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
                                "match "LPU64" offset %d length %d.\n",
@@ -1709,7 +1718,7 @@ lnet_match_blocked_msg(lnet_libmd_t *md)
                 } else {
                         LASSERT (rc == LNET_MATCHMD_DROP);
 
-                        list_add_tail(&msg->msg_list, &drops);
+                        cfs_list_add_tail(&msg->msg_list, &drops);
                 }
 
                 if (lnet_md_exhausted(md))
@@ -1718,18 +1727,18 @@ lnet_match_blocked_msg(lnet_libmd_t *md)
 
         LNET_UNLOCK();
 
-        list_for_each_safe (entry, tmp, &drops) {
-                msg = list_entry(entry, lnet_msg_t, msg_list);
+        cfs_list_for_each_safe (entry, tmp, &drops) {
+                msg = cfs_list_entry(entry, lnet_msg_t, msg_list);
 
-                list_del(&msg->msg_list);
+                cfs_list_del(&msg->msg_list);
 
                 lnet_drop_delayed_put(msg, "Bad match");
         }
 
-        list_for_each_safe (entry, tmp, &matches) {
-                msg = list_entry(entry, lnet_msg_t, msg_list);
+        cfs_list_for_each_safe (entry, tmp, &matches) {
+                msg = cfs_list_entry(entry, lnet_msg_t, msg_list);
 
-                list_del(&msg->msg_list);
+                cfs_list_del(&msg->msg_list);
 
                 /* md won't disappear under me, since each msg
                  * holds a ref on it */
@@ -1795,7 +1804,7 @@ lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
                         if (version != ptl->ptl_ml_version)
                                 goto again;
 
-                        list_add_tail(&msg->msg_list, &ptl->ptl_msgq);
+                        cfs_list_add_tail(&msg->msg_list, &ptl->ptl_msgq);
                         ptl->ptl_msgq_version++;
                         LNET_UNLOCK();
 
@@ -2129,7 +2138,7 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
         __u32          payload_length;
         __u32          type;
 
-        LASSERT (!in_interrupt ());
+        LASSERT (!cfs_in_interrupt ());
 
         type = le32_to_cpu(hdr->type);
         src_nid = le64_to_cpu(hdr->src_nid);
@@ -2233,7 +2242,7 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
         /* Message looks OK; we're not going to return an error, so we MUST
          * call back lnd_recv() come what may... */
 
-        if (!list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
+        if (!cfs_list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
             fail_peer (src_nid, 0))             /* shall we now? */
         {
                 CERROR("%s, src %s: Dropping %s to simulate failure\n",
@@ -2360,7 +2369,7 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
         LASSERT (the_lnet.ln_init);
         LASSERT (the_lnet.ln_refcount > 0);
 
-        if (!list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
+        if (!cfs_list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
             fail_peer (target.nid, 1))          /* shall we now? */
         {
                 CERROR("Dropping PUT to %s: simulated failure\n",
@@ -2545,7 +2554,7 @@ LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
         LASSERT (the_lnet.ln_init);
         LASSERT (the_lnet.ln_refcount > 0);
 
-        if (!list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
+        if (!cfs_list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
             fail_peer (target.nid, 1))          /* shall we now? */
         {
                 CERROR("Dropping GET to %s: simulated failure\n",
@@ -2627,7 +2636,7 @@ LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
 int
 LNetDist (lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
 {
-        struct list_head *e;
+        cfs_list_t       *e;
         lnet_ni_t        *ni;
         lnet_remotenet_t *rnet;
         __u32             dstnet = LNET_NIDNET(dstnid);
@@ -2644,8 +2653,8 @@ LNetDist (lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
 
         LNET_LOCK();
 
-        list_for_each (e, &the_lnet.ln_nis) {
-                ni = list_entry(e, lnet_ni_t, ni_list);
+        cfs_list_for_each (e, &the_lnet.ln_nis) {
+                ni = cfs_list_entry(e, lnet_ni_t, ni_list);
 
                 if (ni->ni_nid == dstnid) {
                         if (srcnidp != NULL)
@@ -2673,16 +2682,17 @@ LNetDist (lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
                 order++;
         }
 
-        list_for_each (e, &the_lnet.ln_remote_nets) {
-                rnet = list_entry(e, lnet_remotenet_t, lrn_list);
+        cfs_list_for_each (e, &the_lnet.ln_remote_nets) {
+                rnet = cfs_list_entry(e, lnet_remotenet_t, lrn_list);
 
                 if (rnet->lrn_net == dstnet) {
                         lnet_route_t *route;
                         lnet_route_t *shortest = NULL;
 
-                        LASSERT (!list_empty(&rnet->lrn_routes));
+                        LASSERT (!cfs_list_empty(&rnet->lrn_routes));
 
-                        list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
+                        cfs_list_for_each_entry(route, &rnet->lrn_routes,
+                                                lr_list) {
                                 if (shortest == NULL ||
                                     route->lr_hops < shortest->lr_hops)
                                         shortest = route;
@@ -2712,7 +2722,7 @@ LNetSetAsync(lnet_process_id_t id, int nasync)
 #else
         lnet_ni_t        *ni;
         lnet_remotenet_t *rnet;
-        struct list_head *tmp;
+        cfs_list_t       *tmp;
         lnet_route_t     *route;
         lnet_nid_t       *nids;
         int               nnids;
@@ -2720,7 +2730,7 @@ LNetSetAsync(lnet_process_id_t id, int nasync)
         int               rc = 0;
         int               rc2;
 
-        /* Target on a local network? */ 
+        /* Target on a local network? */
 
         ni = lnet_net2ni(LNET_NIDNET(id.nid));
         if (ni != NULL) {
@@ -2741,7 +2751,7 @@ LNetSetAsync(lnet_process_id_t id, int nasync)
         LNET_LOCK();
         rnet = lnet_find_net_locked(LNET_NIDNET(id.nid));
         if (rnet != NULL) {
-                list_for_each(tmp, &rnet->lrn_routes) {
+                cfs_list_for_each(tmp, &rnet->lrn_routes) {
                         if (nnids == maxnids) {
                                 LNET_UNLOCK();
                                 LIBCFS_FREE(nids, maxnids * sizeof(*nids));
@@ -2749,7 +2759,7 @@ LNetSetAsync(lnet_process_id_t id, int nasync)
                                 goto again;
                         }
 
-                        route = list_entry(tmp, lnet_route_t, lr_list);
+                        route = cfs_list_entry(tmp, lnet_route_t, lr_list);
                         nids[nnids++] = route->lr_gateway->lp_nid;
                 }
         }
index 68286b3..089d788 100644 (file)
@@ -147,7 +147,7 @@ lnet_complete_msg_locked(lnet_msg_t *msg)
 
         LASSERT (msg->msg_onactivelist);
         msg->msg_onactivelist = 0;
-        list_del (&msg->msg_activelist);
+        cfs_list_del (&msg->msg_activelist);
         the_lnet.ln_counters.msgs_alloc--;
         lnet_msg_free(msg);
 }
@@ -162,7 +162,7 @@ lnet_finalize (lnet_ni_t *ni, lnet_msg_t *msg, int status)
 #endif
         lnet_libmd_t      *md;
 
-        LASSERT (!in_interrupt ());
+        LASSERT (!cfs_in_interrupt ());
 
         if (msg == NULL)
                 return;
@@ -210,7 +210,7 @@ lnet_finalize (lnet_ni_t *ni, lnet_msg_t *msg, int status)
                 msg->msg_md = NULL;
         }
 
-        list_add_tail (&msg->msg_list, &the_lnet.ln_finalizeq);
+        cfs_list_add_tail (&msg->msg_list, &the_lnet.ln_finalizeq);
 
         /* Recursion breaker.  Don't complete the message here if I am (or
          * enough other threads are) already completing messages */
@@ -234,11 +234,11 @@ lnet_finalize (lnet_ni_t *ni, lnet_msg_t *msg, int status)
         the_lnet.ln_finalizing = 1;
 #endif
 
-        while (!list_empty(&the_lnet.ln_finalizeq)) {
-                msg = list_entry(the_lnet.ln_finalizeq.next,
-                                 lnet_msg_t, msg_list);
-                
-                list_del(&msg->msg_list);
+        while (!cfs_list_empty(&the_lnet.ln_finalizeq)) {
+                msg = cfs_list_entry(the_lnet.ln_finalizeq.next,
+                                     lnet_msg_t, msg_list);
+
+                cfs_list_del(&msg->msg_list);
 
                 /* NB drops and regains the lnet lock if it actually does
                  * anything, so my finalizing friends can chomp along too */
index 575d176..4ca69a9 100644 (file)
@@ -44,7 +44,7 @@ static int config_on_load = 0;
 CFS_MODULE_PARM(config_on_load, "i", int, 0444,
                 "configure network at module load");
 
-static struct semaphore lnet_config_mutex;
+static cfs_semaphore_t lnet_config_mutex;
 
 int
 lnet_configure (void *arg)
@@ -119,7 +119,7 @@ init_lnet(void)
         int                  rc;
         ENTRY;
 
-        init_mutex(&lnet_config_mutex);
+        cfs_init_mutex(&lnet_config_mutex);
 
         rc = LNetInit();
         if (rc != 0) {
index 8baf199..1565b51 100644 (file)
 int
 lnet_create_peer_table(void)
 {
-       struct list_head *hash;
-       int               i;
+       cfs_list_t  *hash;
+       int          i;
 
        LASSERT (the_lnet.ln_peer_hash == NULL);
-       LIBCFS_ALLOC(hash, LNET_PEER_HASHSIZE * sizeof(struct list_head));
+       LIBCFS_ALLOC(hash, LNET_PEER_HASHSIZE * sizeof(cfs_list_t));
        
        if (hash == NULL) {
                CERROR("Can't allocate peer hash table\n");
@@ -70,10 +70,10 @@ lnet_destroy_peer_table(void)
                 return;
 
        for (i = 0; i < LNET_PEER_HASHSIZE; i++)
-               LASSERT (list_empty(&the_lnet.ln_peer_hash[i]));
-       
+               LASSERT (cfs_list_empty(&the_lnet.ln_peer_hash[i]));
+
        LIBCFS_FREE(the_lnet.ln_peer_hash,
-                   LNET_PEER_HASHSIZE * sizeof (struct list_head));
+                   LNET_PEER_HASHSIZE * sizeof (cfs_list_t));
         the_lnet.ln_peer_hash = NULL;
 }
 
@@ -83,16 +83,17 @@ lnet_clear_peer_table(void)
        int         i;
 
         LASSERT (the_lnet.ln_shutdown);         /* i.e. no new peers */
-       
+
        for (i = 0; i < LNET_PEER_HASHSIZE; i++) {
-               struct list_head *peers = &the_lnet.ln_peer_hash[i];
+               cfs_list_t *peers = &the_lnet.ln_peer_hash[i];
 
                LNET_LOCK();
-               while (!list_empty(peers)) {
-                       lnet_peer_t *lp = list_entry(peers->next,
-                                                    lnet_peer_t, lp_hashlist);
-                       
-                       list_del(&lp->lp_hashlist);
+               while (!cfs_list_empty(peers)) {
+                       lnet_peer_t *lp = cfs_list_entry(peers->next,
+                                                         lnet_peer_t,
+                                                         lp_hashlist);
+
+                       cfs_list_del(&lp->lp_hashlist);
                         lnet_peer_decref_locked(lp);   /* lose hash table's ref */
                }
                LNET_UNLOCK();
@@ -120,7 +121,7 @@ lnet_destroy_peer_locked (lnet_peer_t *lp)
 
         LASSERT (lp->lp_refcount == 0);
         LASSERT (lp->lp_rtr_refcount == 0);
-       LASSERT (list_empty(&lp->lp_txq));
+       LASSERT (cfs_list_empty(&lp->lp_txq));
         LASSERT (lp->lp_txqnob == 0);
         LASSERT (lp->lp_rcd == NULL);
 
@@ -136,22 +137,22 @@ lnet_peer_t *
 lnet_find_peer_locked (lnet_nid_t nid)
 {
        unsigned int      idx = LNET_NIDADDR(nid) % LNET_PEER_HASHSIZE;
-       struct list_head *peers = &the_lnet.ln_peer_hash[idx];
-       struct list_head *tmp;
+       cfs_list_t       *peers = &the_lnet.ln_peer_hash[idx];
+       cfs_list_t       *tmp;
         lnet_peer_t      *lp;
 
        if (the_lnet.ln_shutdown)
                 return NULL;
 
-       list_for_each (tmp, peers) {
-               lp = list_entry(tmp, lnet_peer_t, lp_hashlist);
-               
+       cfs_list_for_each (tmp, peers) {
+               lp = cfs_list_entry(tmp, lnet_peer_t, lp_hashlist);
+
                if (lp->lp_nid == nid) {
                         lnet_peer_addref_locked(lp);
                        return lp;
                 }
        }
-        
+
        return NULL;
 }
 
@@ -230,7 +231,7 @@ lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid)
         /* can't add peers after shutdown starts */
         LASSERT (!the_lnet.ln_shutdown);
 
-        list_add_tail(&lp->lp_hashlist, lnet_nid2peerhash(nid));
+        cfs_list_add_tail(&lp->lp_hashlist, lnet_nid2peerhash(nid));
         the_lnet.ln_npeers++;
         the_lnet.ln_peertable_version++;
         *lpp = lp;
index 9a231e4..a566c31 100644 (file)
@@ -179,18 +179,18 @@ lnet_rtr_addref_locked(lnet_peer_t *lp)
 
         lp->lp_rtr_refcount++;
         if (lp->lp_rtr_refcount == 1) {
-                struct list_head *pos;
+                cfs_list_t *pos;
 
                 /* a simple insertion sort */
-                list_for_each_prev(pos, &the_lnet.ln_routers) {
-                        lnet_peer_t *rtr = list_entry(pos, lnet_peer_t, 
-                                                      lp_rtr_list);
+                cfs_list_for_each_prev(pos, &the_lnet.ln_routers) {
+                        lnet_peer_t *rtr = cfs_list_entry(pos, lnet_peer_t,
+                                                          lp_rtr_list);
 
                         if (rtr->lp_nid < lp->lp_nid)
                                 break;
                 }
 
-                list_add(&lp->lp_rtr_list, pos);
+                cfs_list_add(&lp->lp_rtr_list, pos);
                 /* addref for the_lnet.ln_routers */
                 lnet_peer_addref_locked(lp);
                 the_lnet.ln_routers_version++;
@@ -206,12 +206,12 @@ lnet_rtr_decref_locked(lnet_peer_t *lp)
         lp->lp_rtr_refcount--;
         if (lp->lp_rtr_refcount == 0) {
                 if (lp->lp_rcd != NULL) {
-                        list_add(&lp->lp_rcd->rcd_list,
-                                 &the_lnet.ln_zombie_rcd);
+                        cfs_list_add(&lp->lp_rcd->rcd_list,
+                                     &the_lnet.ln_zombie_rcd);
                         lp->lp_rcd = NULL;
                 }
 
-                list_del(&lp->lp_rtr_list);
+                cfs_list_del(&lp->lp_rtr_list);
                 /* decref for the_lnet.ln_routers */
                 lnet_peer_decref_locked(lp);
                 the_lnet.ln_routers_version++;
@@ -222,12 +222,12 @@ lnet_remotenet_t *
 lnet_find_net_locked (__u32 net)
 {
         lnet_remotenet_t *rnet;
-        struct list_head *tmp;
+        cfs_list_t       *tmp;
 
         LASSERT (!the_lnet.ln_shutdown);
 
-        list_for_each (tmp, &the_lnet.ln_remote_nets) {
-                rnet = list_entry(tmp, lnet_remotenet_t, lrn_list);
+        cfs_list_for_each (tmp, &the_lnet.ln_remote_nets) {
+                rnet = cfs_list_entry(tmp, lnet_remotenet_t, lrn_list);
 
                 if (rnet->lrn_net == net)
                         return rnet;
@@ -241,10 +241,10 @@ lnet_add_route_to_rnet (lnet_remotenet_t *rnet, lnet_route_t *route)
 {
         unsigned int      len = 0;
         unsigned int      offset = 0;
-        struct list_head *e;
+        cfs_list_t       *e;
         extern __u64 lnet_create_interface_cookie(void);
 
-        list_for_each (e, &rnet->lrn_routes) {
+        cfs_list_for_each (e, &rnet->lrn_routes) {
                 len++;
         }
 
@@ -252,12 +252,12 @@ lnet_add_route_to_rnet (lnet_remotenet_t *rnet, lnet_route_t *route)
          * See bug 18751 */
         /* len+1 positions to add a new entry, also prevents division by 0 */
         offset = ((unsigned int) lnet_create_interface_cookie()) % (len + 1);
-        list_for_each (e, &rnet->lrn_routes) {
+        cfs_list_for_each (e, &rnet->lrn_routes) {
                 if (offset == 0)
                         break;
                 offset--;
         }
-        list_add(&route->lr_list, e);
+        cfs_list_add(&route->lr_list, e);
 
         the_lnet.ln_remote_nets_version++;
         lnet_rtr_addref_locked(route->lr_gateway);
@@ -266,7 +266,7 @@ lnet_add_route_to_rnet (lnet_remotenet_t *rnet, lnet_route_t *route)
 int
 lnet_add_route (__u32 net, unsigned int hops, lnet_nid_t gateway)
 {
-        struct list_head    *e;
+        cfs_list_t          *e;
         lnet_remotenet_t    *rnet;
         lnet_remotenet_t    *rnet2;
         lnet_route_t        *route;
@@ -327,14 +327,14 @@ lnet_add_route (__u32 net, unsigned int hops, lnet_nid_t gateway)
         rnet2 = lnet_find_net_locked(net);
         if (rnet2 == NULL) {
                 /* new network */
-                list_add_tail(&rnet->lrn_list, &the_lnet.ln_remote_nets);
+                cfs_list_add_tail(&rnet->lrn_list, &the_lnet.ln_remote_nets);
                 rnet2 = rnet;
         }
 
         /* Search for a duplicate route (it's a NOOP if it is) */
         add_route = 1;
-        list_for_each (e, &rnet2->lrn_routes) {
-                lnet_route_t *route2 = list_entry(e, lnet_route_t, lr_list);
+        cfs_list_for_each (e, &rnet2->lrn_routes) {
+                lnet_route_t *route2 = cfs_list_entry(e, lnet_route_t, lr_list);
 
                 if (route2->lr_gateway == route->lr_gateway) {
                         add_route = 0;
@@ -375,17 +375,17 @@ lnet_check_routes (void)
         lnet_remotenet_t    *rnet;
         lnet_route_t        *route;
         lnet_route_t        *route2;
-        struct list_head    *e1;
-        struct list_head    *e2;
+        cfs_list_t          *e1;
+        cfs_list_t          *e2;
 
         LNET_LOCK();
 
-        list_for_each (e1, &the_lnet.ln_remote_nets) {
-                rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
+        cfs_list_for_each (e1, &the_lnet.ln_remote_nets) {
+                rnet = cfs_list_entry(e1, lnet_remotenet_t, lrn_list);
 
                 route2 = NULL;
-                list_for_each (e2, &rnet->lrn_routes) {
-                        route = list_entry(e2, lnet_route_t, lr_list);
+                cfs_list_for_each (e2, &rnet->lrn_routes) {
+                        route = cfs_list_entry(e2, lnet_route_t, lr_list);
 
                         if (route2 == NULL)
                                 route2 = route;
@@ -411,8 +411,8 @@ lnet_del_route (__u32 net, lnet_nid_t gw_nid)
 {
         lnet_remotenet_t    *rnet;
         lnet_route_t        *route;
-        struct list_head    *e1;
-        struct list_head    *e2;
+        cfs_list_t          *e1;
+        cfs_list_t          *e2;
         int                  rc = -ENOENT;
 
         CDEBUG(D_NET, "Del route: net %s : gw %s\n",
@@ -424,25 +424,25 @@ lnet_del_route (__u32 net, lnet_nid_t gw_nid)
  again:
         LNET_LOCK();
 
-        list_for_each (e1, &the_lnet.ln_remote_nets) {
-                rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
+        cfs_list_for_each (e1, &the_lnet.ln_remote_nets) {
+                rnet = cfs_list_entry(e1, lnet_remotenet_t, lrn_list);
 
                 if (!(net == LNET_NIDNET(LNET_NID_ANY) ||
                       net == rnet->lrn_net))
                         continue;
 
-                list_for_each (e2, &rnet->lrn_routes) {
-                        route = list_entry(e2, lnet_route_t, lr_list);
+                cfs_list_for_each (e2, &rnet->lrn_routes) {
+                        route = cfs_list_entry(e2, lnet_route_t, lr_list);
 
                         if (!(gw_nid == LNET_NID_ANY ||
                               gw_nid == route->lr_gateway->lp_nid))
                                 continue;
 
-                        list_del(&route->lr_list);
+                        cfs_list_del(&route->lr_list);
                         the_lnet.ln_remote_nets_version++;
 
-                        if (list_empty(&rnet->lrn_routes))
-                                list_del(&rnet->lrn_list);
+                        if (cfs_list_empty(&rnet->lrn_routes))
+                                cfs_list_del(&rnet->lrn_list);
                         else
                                 rnet = NULL;
 
@@ -474,18 +474,18 @@ int
 lnet_get_route (int idx, __u32 *net, __u32 *hops,
                lnet_nid_t *gateway, __u32 *alive)
 {
-        struct list_head    *e1;
-        struct list_head    *e2;
+        cfs_list_t          *e1;
+        cfs_list_t          *e2;
         lnet_remotenet_t    *rnet;
         lnet_route_t        *route;
 
         LNET_LOCK();
 
-        list_for_each (e1, &the_lnet.ln_remote_nets) {
-                rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
+        cfs_list_for_each (e1, &the_lnet.ln_remote_nets) {
+                rnet = cfs_list_entry(e1, lnet_remotenet_t, lrn_list);
 
-                list_for_each (e2, &rnet->lrn_routes) {
-                        route = list_entry(e2, lnet_route_t, lr_list);
+                cfs_list_for_each (e2, &rnet->lrn_routes) {
+                        route = cfs_list_entry(e2, lnet_route_t, lr_list);
 
                         if (idx-- == 0) {
                                 *net     = rnet->lrn_net;
@@ -603,7 +603,7 @@ void
 lnet_wait_known_routerstate(void)
 {
         lnet_peer_t         *rtr;
-        struct list_head    *entry;
+        cfs_list_t          *entry;
         int                  all_known;
 
         LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
@@ -612,8 +612,8 @@ lnet_wait_known_routerstate(void)
                 LNET_LOCK();
 
                 all_known = 1;
-                list_for_each (entry, &the_lnet.ln_routers) {
-                        rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
+                cfs_list_for_each (entry, &the_lnet.ln_routers) {
+                        rtr = cfs_list_entry(entry, lnet_peer_t, lp_rtr_list);
 
                         if (rtr->lp_alive_count == 0) {
                                 all_known = 0;
@@ -654,7 +654,7 @@ lnet_router_checker_event (lnet_event_t *event)
                 LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_UNLINKING);
                 the_lnet.ln_rc_state = LNET_RC_STATE_UNLINKED;
 #ifdef __KERNEL__
-                mutex_up(&the_lnet.ln_rc_signal);
+                cfs_mutex_up(&the_lnet.ln_rc_signal);
 #endif
                 return;
         }
@@ -716,7 +716,7 @@ lnet_update_ni_status(void)
 
         LNET_LOCK();
 
-        list_for_each_entry (ni, &the_lnet.ln_nis, ni_list) {
+        cfs_list_for_each_entry (ni, &the_lnet.ln_nis, ni_list) {
                 lnet_ni_status_t *ns = ni->ni_status;
 
                 LASSERT (ns != NULL);
@@ -741,7 +741,7 @@ lnet_update_ni_status(void)
 void
 lnet_destroy_rc_data (lnet_rc_data_t *rcd)
 {
-        LASSERT (list_empty(&rcd->rcd_list));
+        LASSERT (cfs_list_empty(&rcd->rcd_list));
         /* detached from network */
         LASSERT (LNetHandleIsInvalid(rcd->rcd_mdh));
 
@@ -838,7 +838,7 @@ lnet_ping_router_locked (lnet_peer_t *rtr)
         if (!lnet_isrouter(rtr)) {
                 lnet_peer_decref_locked(rtr);
                 if (rcd != NULL)
-                        list_add(&rcd->rcd_list, &the_lnet.ln_zombie_rcd);
+                        cfs_list_add(&rcd->rcd_list, &the_lnet.ln_zombie_rcd);
                 return; /* router table changed! */
         }
 
@@ -912,7 +912,7 @@ lnet_router_checker_start(void)
          * outstanding events as it is allowed outstanding sends */
         eqsz = 0;
         version = the_lnet.ln_routers_version;
-        list_for_each_entry(rtr, &the_lnet.ln_routers, lp_rtr_list) {
+        cfs_list_for_each_entry(rtr, &the_lnet.ln_routers, lp_rtr_list) {
                 lnet_ni_t         *ni = rtr->lp_ni;
                 lnet_process_id_t  id;
 
@@ -970,7 +970,7 @@ lnet_router_checker_start(void)
                 return 0;
 
 #ifdef __KERNEL__
-        init_mutex_locked(&the_lnet.ln_rc_signal);
+        cfs_init_mutex_locked(&the_lnet.ln_rc_signal);
         /* EQ size doesn't matter; the callback is guaranteed to get every
          * event */
         eqsz = 1;
@@ -1010,7 +1010,7 @@ lnet_router_checker_start(void)
                 rc = LNetMDUnlink(the_lnet.ln_rc_mdh);
                 LASSERT (rc == 0);
                 /* block until event callback signals exit */
-                mutex_down(&the_lnet.ln_rc_signal);
+                cfs_mutex_down(&the_lnet.ln_rc_signal);
                 rc = LNetEQFree(the_lnet.ln_rc_eqh);
                 LASSERT (rc == 0);
                 the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
@@ -1041,7 +1041,7 @@ lnet_router_checker_stop (void)
 
 #ifdef __KERNEL__
         /* block until event callback signals exit */
-        mutex_down(&the_lnet.ln_rc_signal);
+        cfs_mutex_down(&the_lnet.ln_rc_signal);
 #else
         while (the_lnet.ln_rc_state != LNET_RC_STATE_UNLINKED) {
                 lnet_router_checker();
@@ -1063,7 +1063,7 @@ lnet_prune_zombie_rcd (int wait_unlink)
 {
         lnet_rc_data_t   *rcd;
         lnet_rc_data_t   *tmp;
-        struct list_head  free_rcd;
+        cfs_list_t        free_rcd;
         int               i;
         __u64             version;
 
@@ -1072,10 +1072,11 @@ lnet_prune_zombie_rcd (int wait_unlink)
         LNET_LOCK();
 rescan:
         version = the_lnet.ln_routers_version;
-        list_for_each_entry_safe (rcd, tmp, &the_lnet.ln_zombie_rcd, rcd_list) {
+        cfs_list_for_each_entry_safe (rcd, tmp, &the_lnet.ln_zombie_rcd,
+                                      rcd_list) {
                 if (LNetHandleIsInvalid(rcd->rcd_mdh)) {
-                        list_del(&rcd->rcd_list);
-                        list_add(&rcd->rcd_list, &free_rcd);
+                        cfs_list_del(&rcd->rcd_list);
+                        cfs_list_add(&rcd->rcd_list, &free_rcd);
                         continue;
                 }
 
@@ -1089,12 +1090,12 @@ rescan:
         }
 
         i = 2;
-        while (wait_unlink && !list_empty(&the_lnet.ln_zombie_rcd)) {
-                rcd = list_entry(the_lnet.ln_zombie_rcd.next,
-                                 lnet_rc_data_t, rcd_list);
+        while (wait_unlink && !cfs_list_empty(&the_lnet.ln_zombie_rcd)) {
+                rcd = cfs_list_entry(the_lnet.ln_zombie_rcd.next,
+                                     lnet_rc_data_t, rcd_list);
                 if (LNetHandleIsInvalid(rcd->rcd_mdh)) {
-                        list_del(&rcd->rcd_list);
-                        list_add(&rcd->rcd_list, &free_rcd);
+                        cfs_list_del(&rcd->rcd_list);
+                        cfs_list_add(&rcd->rcd_list, &free_rcd);
                         continue;
                 }
 
@@ -1112,9 +1113,9 @@ rescan:
 
         LNET_UNLOCK();
 
-        while (!list_empty(&free_rcd)) {
-                rcd = list_entry(free_rcd.next, lnet_rc_data_t, rcd_list);
-                list_del_init(&rcd->rcd_list);
+        while (!cfs_list_empty(&free_rcd)) {
+                rcd = cfs_list_entry(free_rcd.next, lnet_rc_data_t, rcd_list);
+                cfs_list_del_init(&rcd->rcd_list);
                 lnet_destroy_rc_data(rcd);
         }
         return;
@@ -1125,7 +1126,7 @@ lnet_router_checker(void *arg)
 {
         int                rc;
         lnet_peer_t       *rtr;
-        struct list_head  *entry;
+        cfs_list_t        *entry;
         lnet_process_id_t  rtr_id;
 
         cfs_daemonize("router_checker");
@@ -1142,8 +1143,8 @@ lnet_router_checker(void *arg)
 rescan:
                 version = the_lnet.ln_routers_version;
 
-                list_for_each (entry, &the_lnet.ln_routers) {
-                        rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
+                cfs_list_for_each (entry, &the_lnet.ln_routers) {
+                        rtr = cfs_list_entry(entry, lnet_peer_t, lp_rtr_list);
                         lnet_ping_router_locked(rtr);
 
                         /* NB dropped lock */
@@ -1163,20 +1164,20 @@ rescan:
                 /* Call cfs_pause() here always adds 1 to load average 
                  * because kernel counts # active tasks as nr_running 
                  * + nr_uninterruptible. */
-                cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE,
-                                     cfs_time_seconds(1));
+                cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
+                                                   cfs_time_seconds(1));
         }
 
         LNET_LOCK();
 
-        list_for_each (entry, &the_lnet.ln_routers) {
-                rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
+        cfs_list_for_each (entry, &the_lnet.ln_routers) {
+                rtr = cfs_list_entry(entry, lnet_peer_t, lp_rtr_list);
 
                 if (rtr->lp_rcd == NULL)
                         continue;
 
-                LASSERT (list_empty(&rtr->lp_rcd->rcd_list));
-                list_add(&rtr->lp_rcd->rcd_list, &the_lnet.ln_zombie_rcd);
+                LASSERT (cfs_list_empty(&rtr->lp_rcd->rcd_list));
+                cfs_list_add(&rtr->lp_rcd->rcd_list, &the_lnet.ln_zombie_rcd);
                 rtr->lp_rcd = NULL;
         }
 
@@ -1245,15 +1246,15 @@ lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp)
         int            nbuffers = 0;
         lnet_rtrbuf_t *rb;
 
-        LASSERT (list_empty(&rbp->rbp_msgs));
+        LASSERT (cfs_list_empty(&rbp->rbp_msgs));
         LASSERT (rbp->rbp_credits == rbp->rbp_nbuffers);
 
-        while (!list_empty(&rbp->rbp_bufs)) {
+        while (!cfs_list_empty(&rbp->rbp_bufs)) {
                 LASSERT (rbp->rbp_credits > 0);
 
-                rb = list_entry(rbp->rbp_bufs.next,
-                                lnet_rtrbuf_t, rb_list);
-                list_del(&rb->rb_list);
+                rb = cfs_list_entry(rbp->rbp_bufs.next,
+                                    lnet_rtrbuf_t, rb_list);
+                cfs_list_del(&rb->rb_list);
                 lnet_destroy_rtrbuf(rb, npages);
                 nbuffers++;
         }
@@ -1287,7 +1288,7 @@ lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs)
                 rbp->rbp_nbuffers++;
                 rbp->rbp_credits++;
                 rbp->rbp_mincredits++;
-                list_add(&rb->rb_list, &rbp->rbp_bufs);
+                cfs_list_add(&rb->rb_list, &rbp->rbp_bufs);
 
                 /* No allocation "under fire" */
                 /* Otherwise we'd need code to schedule blocked msgs etc */
@@ -1402,7 +1403,7 @@ lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, cfs_time_t when)
         lnet_peer_t *lp = NULL;
         cfs_time_t   now = cfs_time_current();
 
-        LASSERT (!in_interrupt ());
+        LASSERT (!cfs_in_interrupt ());
 
         CDEBUG (D_NET, "%s notifying %s: %s\n",
                 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
@@ -1551,7 +1552,7 @@ lnet_router_checker (void)
         LNET_LOCK();
 
         version = the_lnet.ln_routers_version;
-        list_for_each_entry (rtr, &the_lnet.ln_routers, lp_rtr_list) {
+        cfs_list_for_each_entry (rtr, &the_lnet.ln_routers, lp_rtr_list) {
                 lnet_ping_router_locked(rtr);
                 LASSERT (version == the_lnet.ln_routers_version);
         }
index f04f7ba..c053f4c 100644 (file)
@@ -96,8 +96,8 @@ static int __proc_lnet_stats(void *data, int write,
         if (pos >= min_t(int, len, strlen(tmpstr)))
                 rc = 0;
         else
-                rc = trace_copyout_string(buffer, nob,
-                                          tmpstr + pos, "\n");
+                rc = cfs_trace_copyout_string(buffer, nob,
+                                              tmpstr + pos, "\n");
 
         LIBCFS_FREE(tmpstr, tmpsiz);
         LIBCFS_FREE(ctrs, sizeof(*ctrs));
@@ -141,8 +141,8 @@ int LL_PROC_PROTO(proc_lnet_routes)
                 *ver_p = (unsigned int)the_lnet.ln_remote_nets_version;
                 LNET_UNLOCK();
         } else {
-                struct list_head  *n;
-                struct list_head  *r;
+                cfs_list_t        *n;
+                cfs_list_t        *r;
                 lnet_route_t      *route = NULL;
                 lnet_remotenet_t  *rnet  = NULL;
                 int                skip  = *ppos - 1;
@@ -158,13 +158,14 @@ int LL_PROC_PROTO(proc_lnet_routes)
                 n = the_lnet.ln_remote_nets.next;
 
                 while (n != &the_lnet.ln_remote_nets && route == NULL) {
-                        rnet = list_entry(n, lnet_remotenet_t, lrn_list);
+                        rnet = cfs_list_entry(n, lnet_remotenet_t, lrn_list);
 
                         r = rnet->lrn_routes.next;
 
                         while (r != &rnet->lrn_routes) {
-                                lnet_route_t *re = list_entry(r, lnet_route_t,
-                                                              lr_list);
+                                lnet_route_t *re =
+                                        cfs_list_entry(r, lnet_route_t,
+                                                       lr_list);
                                 if (skip == 0) {
                                         route = re;
                                         break;
@@ -197,7 +198,7 @@ int LL_PROC_PROTO(proc_lnet_routes)
         if (len > *lenp) {    /* linux-supplied buffer is too small */
                 rc = -EINVAL;
         } else if (len > 0) { /* wrote something */
-                if (copy_to_user(buffer, tmpstr, len))
+                if (cfs_copy_to_user(buffer, tmpstr, len))
                         rc = -EFAULT;
                 else
                         *ppos += 1;
@@ -244,7 +245,7 @@ int LL_PROC_PROTO(proc_lnet_routers)
                 *ver_p = (unsigned int)the_lnet.ln_routers_version;
                 LNET_UNLOCK();
         } else {
-                struct list_head  *r;
+                cfs_list_t        *r;
                 lnet_peer_t       *peer = NULL;
                 int                skip = *ppos - 1;
 
@@ -259,8 +260,8 @@ int LL_PROC_PROTO(proc_lnet_routers)
                 r = the_lnet.ln_routers.next;
 
                 while (r != &the_lnet.ln_routers) {
-                        lnet_peer_t *lp = list_entry(r, lnet_peer_t,
-                                                     lp_rtr_list);
+                        lnet_peer_t *lp = cfs_list_entry(r, lnet_peer_t,
+                                                         lp_rtr_list);
 
                         if (skip == 0) {
                                 peer = lp;
@@ -311,7 +312,7 @@ int LL_PROC_PROTO(proc_lnet_routers)
         if (len > *lenp) {    /* linux-supplied buffer is too small */
                 rc = -EINVAL;
         } else if (len > 0) { /* wrote something */
-                if (copy_to_user(buffer, tmpstr, len))
+                if (cfs_copy_to_user(buffer, tmpstr, len))
                         rc = -EFAULT;
                 else
                         *ppos += 1;
@@ -384,7 +385,7 @@ int LL_PROC_PROTO(proc_lnet_peers)
 
                 num++;
         } else {
-                struct list_head  *p    = NULL;
+                cfs_list_t        *p    = NULL;
                 lnet_peer_t       *peer = NULL;
                 int                skip = num - 1;
 
@@ -401,8 +402,8 @@ int LL_PROC_PROTO(proc_lnet_peers)
                                 p = the_lnet.ln_peer_hash[idx].next;
 
                         while (p != &the_lnet.ln_peer_hash[idx]) {
-                                lnet_peer_t *lp = list_entry(p, lnet_peer_t,
-                                                             lp_hashlist);
+                                lnet_peer_t *lp = cfs_list_entry(p, lnet_peer_t,
+                                                                 lp_hashlist);
                                 if (skip == 0) {
                                         peer = lp;
 
@@ -463,7 +464,7 @@ int LL_PROC_PROTO(proc_lnet_peers)
         if (len > *lenp) {    /* linux-supplied buffer is too small */
                 rc = -EINVAL;
         } else if (len > 0) { /* wrote something */
-                if (copy_to_user(buffer, tmpstr, len))
+                if (cfs_copy_to_user(buffer, tmpstr, len))
                         rc = -EFAULT;
                 else
                         *ppos = LNET_PHASH_POS_MAKE(idx, num);
@@ -524,8 +525,8 @@ static int __proc_lnet_buffers(void *data, int write,
         if (pos >= min_t(int, len, strlen(tmpstr)))
                 rc = 0;
         else
-                rc = trace_copyout_string(buffer, nob,
-                                          tmpstr + pos, NULL);
+                rc = cfs_trace_copyout_string(buffer, nob,
+                                              tmpstr + pos, NULL);
 
         LIBCFS_FREE(tmpstr, tmpsiz);
         return rc;
@@ -561,7 +562,7 @@ int LL_PROC_PROTO(proc_lnet_nis)
                               "rtr", "max", "tx", "min");
                 LASSERT (tmpstr + tmpsiz - s > 0);
         } else {
-                struct list_head  *n;
+                cfs_list_t        *n;
                 lnet_ni_t         *ni   = NULL;
                 int                skip = *ppos - 1;
 
@@ -570,7 +571,7 @@ int LL_PROC_PROTO(proc_lnet_nis)
                 n = the_lnet.ln_nis.next;
 
                 while (n != &the_lnet.ln_nis) {
-                        lnet_ni_t *a_ni = list_entry(n, lnet_ni_t, ni_list);
+                        lnet_ni_t *a_ni = cfs_list_entry(n, lnet_ni_t, ni_list);
 
                         if (skip == 0) {
                                 ni = a_ni;
@@ -619,7 +620,7 @@ int LL_PROC_PROTO(proc_lnet_nis)
         if (len > *lenp) {    /* linux-supplied buffer is too small */
                 rc = -EINVAL;
         } else if (len > 0) { /* wrote something */
-                if (copy_to_user(buffer, tmpstr, len))
+                if (cfs_copy_to_user(buffer, tmpstr, len))
                         rc = -EFAULT;
                 else
                         *ppos += 1;
index a64d4ca..a36c120 100644 (file)
@@ -110,7 +110,7 @@ brw_inject_one_error (void)
 #ifndef __KERNEL__
         gettimeofday(&tv, NULL);
 #else
-        do_gettimeofday(&tv);
+        cfs_gettimeofday(&tv);
 #endif
 
         if ((tv.tv_usec & 1) == 0) return 0;
@@ -278,7 +278,7 @@ brw_client_done_rpc (sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
                 CERROR ("BRW RPC to %s failed with %d\n",
                         libcfs_id2str(rpc->crpc_dest), rpc->crpc_status);
                 if (!tsi->tsi_stopping) /* rpc could have been aborted */
-                        atomic_inc(&sn->sn_brw_errors);
+                        cfs_atomic_inc(&sn->sn_brw_errors);
                 goto out;
         }
 
@@ -292,7 +292,7 @@ brw_client_done_rpc (sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
                 libcfs_id2str(rpc->crpc_dest), reply->brw_status);
 
         if (reply->brw_status != 0) {
-                atomic_inc(&sn->sn_brw_errors);
+                cfs_atomic_inc(&sn->sn_brw_errors);
                 rpc->crpc_status = -(int)reply->brw_status;
                 goto out;
         }
@@ -302,7 +302,7 @@ brw_client_done_rpc (sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
         if (brw_check_bulk(&rpc->crpc_bulk, reqst->brw_flags, magic) != 0) {
                 CERROR ("Bulk data from %s is corrupted!\n",
                         libcfs_id2str(rpc->crpc_dest));
-                atomic_inc(&sn->sn_brw_errors);
+                cfs_atomic_inc(&sn->sn_brw_errors);
                 rpc->crpc_status = -EBADMSG;
         }
 
index 8b0a1e3..15c23db 100644 (file)
@@ -47,7 +47,7 @@
 #include "console.h"
 
 int
-lst_session_new_ioctl(lstio_session_new_args_t *args) 
+lst_session_new_ioctl(lstio_session_new_args_t *args)
 {
         char      *name;
         int        rc;
@@ -58,26 +58,26 @@ lst_session_new_ioctl(lstio_session_new_args_t *args)
             args->lstio_ses_nmlen <= 0 ||
             args->lstio_ses_nmlen > LST_NAME_SIZE)
                 return -EINVAL;
-       
+
         LIBCFS_ALLOC(name, args->lstio_ses_nmlen + 1);
         if (name == NULL)
                 return -ENOMEM;
-        
-        if (copy_from_user(name,
-                           args->lstio_ses_namep,
-                           args->lstio_ses_nmlen)) {
+
+        if (cfs_copy_from_user(name,
+                               args->lstio_ses_namep,
+                               args->lstio_ses_nmlen)) {
                 LIBCFS_FREE(name, args->lstio_ses_nmlen + 1);
                 return -EFAULT;
         }
-        
+
         name[args->lstio_ses_nmlen] = 0;
-        
+
         rc = lstcon_session_new(name,
                              args->lstio_ses_key,
                              args->lstio_ses_timeout,
                              args->lstio_ses_force,
                              args->lstio_ses_idp);
-        
+
         LIBCFS_FREE(name, args->lstio_ses_nmlen + 1);
 
         return rc;
@@ -135,8 +135,8 @@ lst_debug_ioctl(lstio_debug_args_t *args)
                 if (name == NULL)
                         return -ENOMEM;
 
-                if (copy_from_user(name, args->lstio_dbg_namep,
-                                   args->lstio_dbg_nmlen)) {
+                if (cfs_copy_from_user(name, args->lstio_dbg_namep,
+                                       args->lstio_dbg_nmlen)) {
                         LIBCFS_FREE(name, args->lstio_dbg_nmlen + 1);
 
                         return -EFAULT;
@@ -211,9 +211,9 @@ lst_group_add_ioctl(lstio_group_add_args_t *args)
         if (name == NULL)
                 return -ENOMEM;
 
-        if (copy_from_user(name,
-                           args->lstio_grp_namep,
-                           args->lstio_grp_nmlen)) {
+        if (cfs_copy_from_user(name,
+                               args->lstio_grp_namep,
+                               args->lstio_grp_nmlen)) {
                 LIBCFS_FREE(name, args->lstio_grp_nmlen);
                 return -EFAULT;
         }
@@ -235,9 +235,9 @@ lst_group_del_ioctl(lstio_group_del_args_t *args)
 
         if (args->lstio_grp_key != console_session.ses_key)
                 return -EACCES;
-        
+
         if (args->lstio_grp_namep == NULL ||
-            args->lstio_grp_nmlen <= 0 || 
+            args->lstio_grp_nmlen <= 0 ||
             args->lstio_grp_nmlen > LST_NAME_SIZE)
                 return -EINVAL;
 
@@ -245,9 +245,9 @@ lst_group_del_ioctl(lstio_group_del_args_t *args)
         if (name == NULL)
                 return -ENOMEM;
 
-        if (copy_from_user(name,
-                           args->lstio_grp_namep,
-                           args->lstio_grp_nmlen)) {
+        if (cfs_copy_from_user(name,
+                               args->lstio_grp_namep,
+                               args->lstio_grp_nmlen)) {
                 LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
                 return -EFAULT;
         }
@@ -280,7 +280,7 @@ lst_group_update_ioctl(lstio_group_update_args_t *args)
         if (name == NULL)
                 return -ENOMEM;
 
-        if (copy_from_user(name,
+        if (cfs_copy_from_user(name,
                            args->lstio_grp_namep,
                            args->lstio_grp_nmlen)) {
                 LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
@@ -315,7 +315,7 @@ lst_group_update_ioctl(lstio_group_update_args_t *args)
         }
 
         LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
-        
+
         return rc;
 }
 
@@ -340,8 +340,8 @@ lst_nodes_add_ioctl(lstio_group_nodes_args_t *args)
         if (name == NULL)
                 return -ENOMEM;
 
-        if (copy_from_user(name, args->lstio_grp_namep,
-                           args->lstio_grp_nmlen)) {
+        if (cfs_copy_from_user(name, args->lstio_grp_namep,
+                               args->lstio_grp_nmlen)) {
                 LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
 
                 return -EFAULT;
@@ -399,10 +399,11 @@ lst_group_info_ioctl(lstio_group_info_args_t *args)
                 if (args->lstio_grp_idxp == NULL || /* node index */
                     args->lstio_grp_ndentp == NULL) /* # of node entry */
                         return -EINVAL;
-                                
-                if (copy_from_user(&ndent,
-                                   args->lstio_grp_ndentp, sizeof(ndent)) ||
-                    copy_from_user(&index, args->lstio_grp_idxp, sizeof(index)))
+
+                if (cfs_copy_from_user(&ndent, args->lstio_grp_ndentp,
+                                       sizeof(ndent)) ||
+                    cfs_copy_from_user(&index, args->lstio_grp_idxp,
+                                       sizeof(index)))
                         return -EFAULT;
 
                 if (ndent <= 0 || index < 0)
@@ -413,9 +414,9 @@ lst_group_info_ioctl(lstio_group_info_args_t *args)
         if (name == NULL)
                 return -ENOMEM;
 
-        if (copy_from_user(name,
-                           args->lstio_grp_namep,
-                           args->lstio_grp_nmlen)) {
+        if (cfs_copy_from_user(name,
+                               args->lstio_grp_namep,
+                               args->lstio_grp_nmlen)) {
                 LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
                 return -EFAULT;
         }
@@ -431,8 +432,8 @@ lst_group_info_ioctl(lstio_group_info_args_t *args)
                 return rc;
 
         if (args->lstio_grp_dentsp != NULL && 
-            (copy_to_user(args->lstio_grp_idxp, &index, sizeof(index)) ||
-             copy_to_user(args->lstio_grp_ndentp, &ndent, sizeof(ndent))))
+            (cfs_copy_to_user(args->lstio_grp_idxp, &index, sizeof(index)) ||
+             cfs_copy_to_user(args->lstio_grp_ndentp, &ndent, sizeof(ndent))))
                 rc = -EFAULT;
 
         return 0;
@@ -456,9 +457,9 @@ lst_batch_add_ioctl(lstio_batch_add_args_t *args)
         if (name == NULL)
                 return -ENOMEM;
 
-        if (copy_from_user(name,
-                           args->lstio_bat_namep,
-                           args->lstio_bat_nmlen)) {
+        if (cfs_copy_from_user(name,
+                               args->lstio_bat_namep,
+                               args->lstio_bat_nmlen)) {
                 LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
                 return -EFAULT;
         }
@@ -490,9 +491,9 @@ lst_batch_run_ioctl(lstio_batch_run_args_t *args)
         if (name == NULL)
                 return -ENOMEM;
 
-        if (copy_from_user(name,
-                           args->lstio_bat_namep,
-                           args->lstio_bat_nmlen)) {
+        if (cfs_copy_from_user(name,
+                               args->lstio_bat_namep,
+                               args->lstio_bat_nmlen)) {
                 LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
                 return -EFAULT;
         }
@@ -526,9 +527,9 @@ lst_batch_stop_ioctl(lstio_batch_stop_args_t *args)
         if (name == NULL)
                 return -ENOMEM;
 
-        if (copy_from_user(name,
-                           args->lstio_bat_namep,
-                           args->lstio_bat_nmlen)) {
+        if (cfs_copy_from_user(name,
+                               args->lstio_bat_namep,
+                               args->lstio_bat_nmlen)) {
                 LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
                 return -EFAULT;
         }
@@ -565,9 +566,9 @@ lst_batch_query_ioctl(lstio_batch_query_args_t *args)
         if (name == NULL)
                 return -ENOMEM;
 
-        if (copy_from_user(name,
-                           args->lstio_bat_namep,
-                           args->lstio_bat_nmlen)) {
+        if (cfs_copy_from_user(name,
+                               args->lstio_bat_namep,
+                               args->lstio_bat_nmlen)) {
                 LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
                 return -EFAULT;
         }
@@ -626,9 +627,11 @@ lst_batch_info_ioctl(lstio_batch_info_args_t *args)
                 if (args->lstio_bat_idxp == NULL || /* node index */
                     args->lstio_bat_ndentp == NULL) /* # of node entry */
                         return -EINVAL;
-                                
-                if (copy_from_user(&index, args->lstio_bat_idxp, sizeof(index)) ||
-                    copy_from_user(&ndent, args->lstio_bat_ndentp, sizeof(ndent)))
+
+                if (cfs_copy_from_user(&index, args->lstio_bat_idxp,
+                                       sizeof(index)) ||
+                    cfs_copy_from_user(&ndent, args->lstio_bat_ndentp,
+                                       sizeof(ndent)))
                         return -EFAULT;
 
                 if (ndent <= 0 || index < 0)
@@ -639,8 +642,8 @@ lst_batch_info_ioctl(lstio_batch_info_args_t *args)
         if (name == NULL)
                 return -ENOMEM;
 
-        if (copy_from_user(name,
-                           args->lstio_bat_namep, args->lstio_bat_nmlen)) {
+        if (cfs_copy_from_user(name,
+                               args->lstio_bat_namep, args->lstio_bat_nmlen)) {
                 LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
                 return -EFAULT;
         }
@@ -658,8 +661,8 @@ lst_batch_info_ioctl(lstio_batch_info_args_t *args)
                 return rc;
 
         if (args->lstio_bat_dentsp != NULL && 
-            (copy_to_user(args->lstio_bat_idxp, &index, sizeof(index)) ||
-             copy_to_user(args->lstio_bat_ndentp, &ndent, sizeof(ndent))))
+            (cfs_copy_to_user(args->lstio_bat_idxp, &index, sizeof(index)) ||
+             cfs_copy_to_user(args->lstio_bat_ndentp, &ndent, sizeof(ndent))))
                 rc = -EFAULT;
 
         return rc;
@@ -690,8 +693,8 @@ lst_stat_query_ioctl(lstio_stat_args_t *args)
         if (name == NULL)
                 return -ENOMEM;
 
-        if (copy_from_user(name, args->lstio_sta_namep,
-                           args->lstio_sta_nmlen)) {
+        if (cfs_copy_from_user(name, args->lstio_sta_namep,
+                               args->lstio_sta_nmlen)) {
                 LIBCFS_FREE(name, args->lstio_sta_nmlen + 1);
                 return -EFAULT;
         }
@@ -758,17 +761,17 @@ int lst_test_add_ioctl(lstio_test_args_t *args)
         }
 
         rc = -EFAULT;
-        if (copy_from_user(name,
-                           args->lstio_tes_bat_name,
-                           args->lstio_tes_bat_nmlen) ||
-            copy_from_user(srcgrp,
-                           args->lstio_tes_sgrp_name,
-                           args->lstio_tes_sgrp_nmlen) ||
-            copy_from_user(dstgrp,
-                           args->lstio_tes_dgrp_name,
-                           args->lstio_tes_dgrp_nmlen) ||
-            copy_from_user(param, args->lstio_tes_param,
-                           args->lstio_tes_param_len))
+        if (cfs_copy_from_user(name,
+                              args->lstio_tes_bat_name,
+                              args->lstio_tes_bat_nmlen) ||
+            cfs_copy_from_user(srcgrp,
+                              args->lstio_tes_sgrp_name,
+                              args->lstio_tes_sgrp_nmlen) ||
+            cfs_copy_from_user(dstgrp,
+                              args->lstio_tes_dgrp_name,
+                              args->lstio_tes_dgrp_nmlen) ||
+            cfs_copy_from_user(param, args->lstio_tes_param,
+                              args->lstio_tes_param_len))
                 goto out;
 
         rc = lstcon_test_add(name,
@@ -780,8 +783,8 @@ int lst_test_add_ioctl(lstio_test_args_t *args)
                             &ret, args->lstio_tes_resultp);
 
         if (ret != 0)
-                rc = (copy_to_user(args->lstio_tes_retp, &ret, sizeof(ret))) ?
-                     -EFAULT : 0;
+                rc = (cfs_copy_to_user(args->lstio_tes_retp, &ret,
+                                       sizeof(ret))) ? -EFAULT : 0;
 out:
         if (name != NULL)
                 LIBCFS_FREE(name, args->lstio_tes_bat_nmlen + 1);
@@ -816,12 +819,12 @@ lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data)
                 return -ENOMEM;
 
         /* copy in parameter */
-        if (copy_from_user(buf, data->ioc_pbuf1, data->ioc_plen1)) {
+        if (cfs_copy_from_user(buf, data->ioc_pbuf1, data->ioc_plen1)) {
                 LIBCFS_FREE(buf, data->ioc_plen1);
                 return -EFAULT;
         }
 
-        mutex_down(&console_session.ses_mutex);
+        cfs_mutex_down(&console_session.ses_mutex);
 
         console_session.ses_laststamp = cfs_time_current_sec();
 
@@ -841,7 +844,7 @@ lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data)
         }
 
         memset(&console_session.ses_trans_stat, 0, sizeof(lstcon_trans_stat_t));
-        
+
         switch (opc) {
                 case LSTIO_SESSION_NEW:
                         rc = lst_session_new_ioctl((lstio_session_new_args_t *)buf);
@@ -901,11 +904,11 @@ lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data)
                         rc = -EINVAL;
         }
 
-        if (copy_to_user(data->ioc_pbuf2, &console_session.ses_trans_stat,
-                         sizeof(lstcon_trans_stat_t)))
+        if (cfs_copy_to_user(data->ioc_pbuf2, &console_session.ses_trans_stat,
+                             sizeof(lstcon_trans_stat_t)))
                 rc = -EFAULT;
 out:
-        mutex_up(&console_session.ses_mutex);
+        cfs_mutex_up(&console_session.ses_mutex);
 
         LIBCFS_FREE(buf, data->ioc_plen1);
 
index d9e0732..313ca9e 100644 (file)
@@ -59,12 +59,12 @@ lstcon_rpc_done(srpc_client_rpc_t *rpc)
         LASSERT (crpc != NULL && rpc == crpc->crp_rpc);
         LASSERT (crpc->crp_posted && !crpc->crp_finished);
 
-        spin_lock(&rpc->crpc_lock);
+        cfs_spin_lock(&rpc->crpc_lock);
 
         if (crpc->crp_trans == NULL) {
                 /* Orphan RPC is not in any transaction, 
                  * I'm just a poor body and nobody loves me */
-                spin_unlock(&rpc->crpc_lock);
+                cfs_spin_unlock(&rpc->crpc_lock);
 
                 /* release it */
                 lstcon_rpc_put(crpc);
@@ -83,10 +83,10 @@ lstcon_rpc_done(srpc_client_rpc_t *rpc)
         }
 
         /* wakeup (transaction)thread if I'm the last RPC in the transaction */
-        if (atomic_dec_and_test(&crpc->crp_trans->tas_remaining))
+        if (cfs_atomic_dec_and_test(&crpc->crp_trans->tas_remaining))
                 cfs_waitq_signal(&crpc->crp_trans->tas_waitq);
 
-        spin_unlock(&rpc->crpc_lock);
+        cfs_spin_unlock(&rpc->crpc_lock);
 }
 
 int
@@ -110,7 +110,7 @@ lstcon_rpc_init(lstcon_node_t *nd, int service,
         crpc->crp_static   = !cached;
         CFS_INIT_LIST_HEAD(&crpc->crp_link);
 
-        atomic_inc(&console_session.ses_rpc_counter);
+        cfs_atomic_inc(&console_session.ses_rpc_counter);
 
         return 0;
 }
@@ -122,15 +122,15 @@ lstcon_rpc_prep(lstcon_node_t *nd, int service,
         lstcon_rpc_t  *crpc = NULL;
         int            rc;
 
-        spin_lock(&console_session.ses_rpc_lock);
+        cfs_spin_lock(&console_session.ses_rpc_lock);
 
-        if (!list_empty(&console_session.ses_rpc_freelist)) {
-                crpc = list_entry(console_session.ses_rpc_freelist.next,
-                                  lstcon_rpc_t, crp_link);
-                list_del_init(&crpc->crp_link);
+        if (!cfs_list_empty(&console_session.ses_rpc_freelist)) {
+                crpc = cfs_list_entry(console_session.ses_rpc_freelist.next,
+                                      lstcon_rpc_t, crp_link);
+                cfs_list_del_init(&crpc->crp_link);
         }
 
-        spin_unlock(&console_session.ses_rpc_lock);
+        cfs_spin_unlock(&console_session.ses_rpc_lock);
 
         if (crpc == NULL) {
                 LIBCFS_ALLOC(crpc, sizeof(*crpc));
@@ -155,7 +155,7 @@ lstcon_rpc_put(lstcon_rpc_t *crpc)
         srpc_bulk_t *bulk = &crpc->crp_rpc->crpc_bulk;
         int          i;
 
-        LASSERT (list_empty(&crpc->crp_link));
+        LASSERT (cfs_list_empty(&crpc->crp_link));
 
         for (i = 0; i < bulk->bk_niov; i++) {
                 if (bulk->bk_iovs[i].kiov_page == NULL)
@@ -172,15 +172,16 @@ lstcon_rpc_put(lstcon_rpc_t *crpc)
                 crpc->crp_static = 1;
 
         } else {
-                spin_lock(&console_session.ses_rpc_lock);
+                cfs_spin_lock(&console_session.ses_rpc_lock);
 
-                list_add(&crpc->crp_link, &console_session.ses_rpc_freelist);
+                cfs_list_add(&crpc->crp_link,
+                             &console_session.ses_rpc_freelist);
 
-                spin_unlock(&console_session.ses_rpc_lock);
+                cfs_spin_unlock(&console_session.ses_rpc_lock);
         }
 
         /* RPC is not alive now */
-        atomic_dec(&console_session.ses_rpc_counter);
+        cfs_atomic_dec(&console_session.ses_rpc_counter);
 }
 
 void
@@ -190,7 +191,7 @@ lstcon_rpc_post(lstcon_rpc_t *crpc)
 
         LASSERT (trans != NULL);
 
-        atomic_inc(&trans->tas_remaining);
+        cfs_atomic_inc(&trans->tas_remaining);
         crpc->crp_posted = 1;
 
         sfw_post_rpc(crpc->crp_rpc);
@@ -236,7 +237,7 @@ lstcon_rpc_trans_name(int transop)
 }
 
 int
-lstcon_rpc_trans_prep(struct list_head *translist,
+lstcon_rpc_trans_prep(cfs_list_t *translist,
                       int transop, lstcon_rpc_trans_t **transpp)
 {
         lstcon_rpc_trans_t *trans;
@@ -261,12 +262,12 @@ lstcon_rpc_trans_prep(struct list_head *translist,
         if (translist == NULL)       
                 CFS_INIT_LIST_HEAD(&trans->tas_olink);
         else
-                list_add_tail(&trans->tas_olink, translist);
+                cfs_list_add_tail(&trans->tas_olink, translist);
 
-        list_add_tail(&trans->tas_link, &console_session.ses_trans_list);
+        cfs_list_add_tail(&trans->tas_link, &console_session.ses_trans_list);
 
         CFS_INIT_LIST_HEAD(&trans->tas_rpcs_list);
-        atomic_set(&trans->tas_remaining, 0);
+        cfs_atomic_set(&trans->tas_remaining, 0);
         cfs_waitq_init(&trans->tas_waitq);
 
         *transpp = trans;
@@ -277,7 +278,7 @@ lstcon_rpc_trans_prep(struct list_head *translist,
 void
 lstcon_rpc_trans_addreq(lstcon_rpc_trans_t *trans, lstcon_rpc_t *crpc)
 {
-        list_add_tail(&crpc->crp_link, &trans->tas_rpcs_list);
+        cfs_list_add_tail(&crpc->crp_link, &trans->tas_rpcs_list);
         crpc->crp_trans = trans;
 }
 
@@ -292,18 +293,18 @@ lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error)
                                        lstcon_rpc_t, crp_link) {
                 rpc = crpc->crp_rpc;
 
-                spin_lock(&rpc->crpc_lock);
+                cfs_spin_lock(&rpc->crpc_lock);
 
                 if (!crpc->crp_posted || crpc->crp_stamp != 0) {
                         /* rpc done or aborted already */
-                        spin_unlock(&rpc->crpc_lock);
+                        cfs_spin_unlock(&rpc->crpc_lock);
                         continue;
                 }
 
                 crpc->crp_stamp  = cfs_time_current();
                 crpc->crp_status = error;
 
-                spin_unlock(&rpc->crpc_lock);
+                cfs_spin_unlock(&rpc->crpc_lock);
 
                 sfw_abort_rpc(rpc);
 
@@ -323,10 +324,10 @@ static int
 lstcon_rpc_trans_check(lstcon_rpc_trans_t *trans)
 {
         if (console_session.ses_shutdown &&
-            !list_empty(&trans->tas_olink)) /* It's not an end session RPC */
+            !cfs_list_empty(&trans->tas_olink)) /* Not an end session RPC */
                 return 1;
 
-        return (atomic_read(&trans->tas_remaining) == 0) ? 1: 0;
+        return (cfs_atomic_read(&trans->tas_remaining) == 0) ? 1: 0;
 }
 
 int
@@ -335,7 +336,7 @@ lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout)
         lstcon_rpc_t  *crpc;
         int            rc;
 
-        if (list_empty(&trans->tas_rpcs_list))
+        if (cfs_list_empty(&trans->tas_rpcs_list))
                 return 0;
 
         if (timeout < LST_TRANS_MIN_TIMEOUT)
@@ -352,7 +353,7 @@ lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout)
                 lstcon_rpc_post(crpc);
         }
 
-        mutex_up(&console_session.ses_mutex);
+        cfs_mutex_up(&console_session.ses_mutex);
 
         cfs_waitq_wait_event_interruptible_timeout(trans->tas_waitq,
                                               lstcon_rpc_trans_check(trans),
@@ -360,7 +361,7 @@ lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout)
 
         rc = (rc > 0)? 0: ((rc < 0)? -EINTR: -ETIMEDOUT);
 
-        mutex_down(&console_session.ses_mutex);
+        cfs_mutex_down(&console_session.ses_mutex);
 
         if (console_session.ses_shutdown)
                 rc = -ESHUTDOWN;
@@ -466,11 +467,11 @@ lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat)
 
 int
 lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
-                             struct list_head *head_up,
+                             cfs_list_t *head_up,
                              lstcon_rpc_readent_func_t readent)
 {
-        struct list_head      tmp;
-        struct list_head     *next;
+        cfs_list_t            tmp;
+        cfs_list_t           *next;
         lstcon_rpc_ent_t     *ent;
         srpc_generic_reply_t *rep;
         srpc_client_rpc_t    *rpc;
@@ -487,7 +488,8 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
 
         cfs_list_for_each_entry_typed(crpc, &trans->tas_rpcs_list,
                                       lstcon_rpc_t, crp_link) {
-                if (copy_from_user(&tmp, next, sizeof(struct list_head)))
+                if (cfs_copy_from_user(&tmp, next,
+                                       sizeof(cfs_list_t)))
                         return -EFAULT;
 
                 if (tmp.next == head_up)
@@ -495,7 +497,7 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
 
                 next = tmp.next;
 
-                ent = list_entry(next, lstcon_rpc_ent_t, rpe_link);
+                ent = cfs_list_entry(next, lstcon_rpc_ent_t, rpe_link);
 
                 rpc = crpc->crp_rpc;
 
@@ -509,12 +511,13 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
                       (cfs_time_t)console_session.ses_id.ses_stamp);
                 cfs_duration_usec(dur, &tv);
 
-                if (copy_to_user(&ent->rpe_peer,
-                                 &nd->nd_id, sizeof(lnet_process_id_t)) ||
-                    copy_to_user(&ent->rpe_stamp, &tv, sizeof(tv)) ||
-                    copy_to_user(&ent->rpe_state,
-                                 &nd->nd_state, sizeof(nd->nd_state)) ||
-                    copy_to_user(&ent->rpe_rpc_errno, &error, sizeof(error)))
+                if (cfs_copy_to_user(&ent->rpe_peer,
+                                     &nd->nd_id, sizeof(lnet_process_id_t)) ||
+                    cfs_copy_to_user(&ent->rpe_stamp, &tv, sizeof(tv)) ||
+                    cfs_copy_to_user(&ent->rpe_state,
+                                     &nd->nd_state, sizeof(nd->nd_state)) ||
+                    cfs_copy_to_user(&ent->rpe_rpc_errno, &error,
+                                     sizeof(error)))
                         return -EFAULT;
 
                 if (error != 0)
@@ -523,10 +526,10 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
                 /* RPC is done */
                 rep = (srpc_generic_reply_t *)&msg->msg_body.reply;
 
-                if (copy_to_user(&ent->rpe_sid,
-                                 &rep->sid, sizeof(lst_sid_t)) ||
-                    copy_to_user(&ent->rpe_fwk_errno,
-                                 &rep->status, sizeof(rep->status)))
+                if (cfs_copy_to_user(&ent->rpe_sid,
+                                     &rep->sid, sizeof(lst_sid_t)) ||
+                    cfs_copy_to_user(&ent->rpe_fwk_errno,
+                                     &rep->status, sizeof(rep->status)))
                         return -EFAULT;
 
                 if (readent == NULL)
@@ -546,19 +549,19 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
         lstcon_rpc_t      *crpc;
         lstcon_rpc_t      *tmp;
         int                count = 0;
-        
+
         cfs_list_for_each_entry_safe_typed(crpc, tmp,
                                            &trans->tas_rpcs_list,
                                            lstcon_rpc_t, crp_link) {
                 rpc = crpc->crp_rpc;
 
-                spin_lock(&rpc->crpc_lock);
+                cfs_spin_lock(&rpc->crpc_lock);
 
                 /* free it if not posted or finished already */
                 if (!crpc->crp_posted || crpc->crp_finished) {
-                        spin_unlock(&rpc->crpc_lock);
+                        cfs_spin_unlock(&rpc->crpc_lock);
 
-                        list_del_init(&crpc->crp_link);
+                        cfs_list_del_init(&crpc->crp_link);
                         lstcon_rpc_put(crpc);
 
                         continue;
@@ -566,26 +569,26 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
 
                 /* rpcs can be still not callbacked (even LNetMDUnlink is called)
                  * because huge timeout for inaccessible network, don't make
-                 * user wait for them, just abandon them, they will be recycled 
+                 * user wait for them, just abandon them, they will be recycled
                  * in callback */
 
                 LASSERT (crpc->crp_status != 0);
 
                 crpc->crp_node  = NULL;
                 crpc->crp_trans = NULL;
-                list_del_init(&crpc->crp_link);
+                cfs_list_del_init(&crpc->crp_link);
                 count ++;
 
-                spin_unlock(&rpc->crpc_lock);
+                cfs_spin_unlock(&rpc->crpc_lock);
 
-                atomic_dec(&trans->tas_remaining);
+                cfs_atomic_dec(&trans->tas_remaining);
         }
 
-        LASSERT (atomic_read(&trans->tas_remaining) == 0);
+        LASSERT (cfs_atomic_read(&trans->tas_remaining) == 0);
 
-        list_del(&trans->tas_link);
-        if (!list_empty(&trans->tas_olink))
-                list_del(&trans->tas_olink);
+        cfs_list_del(&trans->tas_link);
+        if (!cfs_list_empty(&trans->tas_olink))
+                cfs_list_del(&trans->tas_olink);
 
         CDEBUG(D_NET, "Transaction %s destroyed with %d pending RPCs\n",
                lstcon_rpc_trans_name(trans->tas_opc), count);
@@ -1009,8 +1012,8 @@ lstcon_rpc_stat_reply(int transop, srpc_msg_t *msg,
 }
 
 int
-lstcon_rpc_trans_ndlist(struct list_head *ndlist,
-                        struct list_head *translist, int transop,
+lstcon_rpc_trans_ndlist(cfs_list_t *ndlist,
+                        cfs_list_t *translist, int transop,
                         void *arg, lstcon_rpc_cond_func_t condition,
                         lstcon_rpc_trans_t **transpp)
 {
@@ -1108,10 +1111,10 @@ lstcon_rpc_pinger(void *arg)
         /* RPC pinger is a special case of transaction,
          * it's called by timer at 8 seconds interval.
          */
-        mutex_down(&console_session.ses_mutex);
+        cfs_mutex_down(&console_session.ses_mutex);
 
         if (console_session.ses_shutdown || console_session.ses_expired) {
-                mutex_up(&console_session.ses_mutex);
+                cfs_mutex_up(&console_session.ses_mutex);
                 return;
         }
 
@@ -1149,23 +1152,23 @@ lstcon_rpc_pinger(void *arg)
 
                 if (crpc->crp_rpc != NULL) {
                         LASSERT (crpc->crp_trans == trans);
-                        LASSERT (!list_empty(&crpc->crp_link));
+                        LASSERT (!cfs_list_empty(&crpc->crp_link));
 
-                        spin_lock(&crpc->crp_rpc->crpc_lock);
+                        cfs_spin_lock(&crpc->crp_rpc->crpc_lock);
 
                         LASSERT (crpc->crp_posted);
 
                         if (!crpc->crp_finished) {
                                 /* in flight */
-                                spin_unlock(&crpc->crp_rpc->crpc_lock);
+                                cfs_spin_unlock(&crpc->crp_rpc->crpc_lock);
                                 continue;
                         }
 
-                        spin_unlock(&crpc->crp_rpc->crpc_lock);
+                        cfs_spin_unlock(&crpc->crp_rpc->crpc_lock);
 
                         lstcon_rpc_get_reply(crpc, &rep);
 
-                        list_del_init(&crpc->crp_link);
+                        cfs_list_del_init(&crpc->crp_link);
                 
                         lstcon_rpc_put(crpc);
                 }
@@ -1196,7 +1199,7 @@ lstcon_rpc_pinger(void *arg)
         }
 
         if (console_session.ses_expired) {
-                mutex_up(&console_session.ses_mutex);
+                cfs_mutex_up(&console_session.ses_mutex);
                 return;
         }
 
@@ -1205,7 +1208,7 @@ lstcon_rpc_pinger(void *arg)
         ptimer->stt_expires = (cfs_time_t)(cfs_time_current_sec() + LST_PING_INTERVAL);
         stt_add_timer(ptimer);
 
-        mutex_up(&console_session.ses_mutex);
+        cfs_mutex_up(&console_session.ses_mutex);
 }
 
 int
@@ -1214,8 +1217,8 @@ lstcon_rpc_pinger_start(void)
         stt_timer_t    *ptimer;
         int             rc;
 
-        LASSERT (list_empty(&console_session.ses_rpc_freelist));
-        LASSERT (atomic_read(&console_session.ses_rpc_counter) == 0);
+        LASSERT (cfs_list_empty(&console_session.ses_rpc_freelist));
+        LASSERT (cfs_atomic_read(&console_session.ses_rpc_counter) == 0);
 
         rc = lstcon_rpc_trans_prep(NULL, LST_TRANS_SESPING,
                                    &console_session.ses_ping);
@@ -1253,16 +1256,17 @@ lstcon_rpc_cleanup_wait(void)
 {
         lstcon_rpc_trans_t *trans;
         lstcon_rpc_t       *crpc;
-        struct list_head   *pacer;
-        struct list_head    zlist;
+        cfs_list_t         *pacer;
+        cfs_list_t          zlist;
 
         /* Called with hold of global mutex */
 
         LASSERT (console_session.ses_shutdown);
 
-        while (!list_empty(&console_session.ses_trans_list)) { 
-                list_for_each(pacer, &console_session.ses_trans_list) {
-                        trans = list_entry(pacer, lstcon_rpc_trans_t, tas_link);
+        while (!cfs_list_empty(&console_session.ses_trans_list)) { 
+                cfs_list_for_each(pacer, &console_session.ses_trans_list) {
+                        trans = cfs_list_entry(pacer, lstcon_rpc_trans_t,
+                                               tas_link);
 
                         CDEBUG(D_NET, "Session closed, wakeup transaction %s\n",
                                lstcon_rpc_trans_name(trans->tas_opc));
@@ -1270,32 +1274,32 @@ lstcon_rpc_cleanup_wait(void)
                         cfs_waitq_signal(&trans->tas_waitq);
                 }
 
-                mutex_up(&console_session.ses_mutex);
+                cfs_mutex_up(&console_session.ses_mutex);
 
                 CWARN("Session is shutting down, "
                       "waiting for termination of transactions\n");
                 cfs_pause(cfs_time_seconds(1));
 
-                mutex_down(&console_session.ses_mutex);
+                cfs_mutex_down(&console_session.ses_mutex);
         }
 
-        spin_lock(&console_session.ses_rpc_lock);
+        cfs_spin_lock(&console_session.ses_rpc_lock);
 
-        lst_wait_until((atomic_read(&console_session.ses_rpc_counter) == 0),
+        lst_wait_until((cfs_atomic_read(&console_session.ses_rpc_counter) == 0),
                        console_session.ses_rpc_lock,
                        "Network is not accessable or target is down, "
                        "waiting for %d console RPCs to being recycled\n",
-                       atomic_read(&console_session.ses_rpc_counter));
+                       cfs_atomic_read(&console_session.ses_rpc_counter));
 
-        list_add(&zlist, &console_session.ses_rpc_freelist);
-        list_del_init(&console_session.ses_rpc_freelist);
+        cfs_list_add(&zlist, &console_session.ses_rpc_freelist);
+        cfs_list_del_init(&console_session.ses_rpc_freelist);
 
-        spin_unlock(&console_session.ses_rpc_lock);
+        cfs_spin_unlock(&console_session.ses_rpc_lock);
 
-        while (!list_empty(&zlist)) {
-                crpc = list_entry(zlist.next, lstcon_rpc_t, crp_link);
+        while (!cfs_list_empty(&zlist)) {
+                crpc = cfs_list_entry(zlist.next, lstcon_rpc_t, crp_link);
 
-                list_del(&crpc->crp_link);
+                cfs_list_del(&crpc->crp_link);
                 LIBCFS_FREE(crpc, sizeof(lstcon_rpc_t));
         }
 }
@@ -1309,8 +1313,8 @@ lstcon_rpc_module_init(void)
 
         console_session.ses_ping = NULL;
 
-        spin_lock_init(&console_session.ses_rpc_lock);
-        atomic_set(&console_session.ses_rpc_counter, 0);
+        cfs_spin_lock_init(&console_session.ses_rpc_lock);
+        cfs_atomic_set(&console_session.ses_rpc_counter, 0);
         CFS_INIT_LIST_HEAD(&console_session.ses_rpc_freelist);
 
         return 0;
@@ -1319,8 +1323,8 @@ lstcon_rpc_module_init(void)
 void
 lstcon_rpc_module_fini(void)
 {
-        LASSERT (list_empty(&console_session.ses_rpc_freelist));
-        LASSERT (atomic_read(&console_session.ses_rpc_counter) == 0);
+        LASSERT (cfs_list_empty(&console_session.ses_rpc_freelist));
+        LASSERT (cfs_atomic_read(&console_session.ses_rpc_counter) == 0);
 }
 
 #endif
index 1bb4963..1fc0f46 100644 (file)
@@ -65,26 +65,26 @@ struct lstcon_test;
 struct lstcon_node;
 
 typedef struct lstcon_rpc {
-        struct list_head        crp_link;       /* chain on rpc transaction */
-        srpc_client_rpc_t      *crp_rpc;        /* client rpc */
-        struct lstcon_node     *crp_node;       /* destination node */
+        cfs_list_t               crp_link;       /* chain on rpc transaction */
+        srpc_client_rpc_t       *crp_rpc;        /* client rpc */
+        struct lstcon_node      *crp_node;       /* destination node */
         struct lstcon_rpc_trans *crp_trans;     /* conrpc transaction */
 
-        int                     crp_posted:1;   /* rpc is posted */
-        int                     crp_finished:1; /* rpc is finished */
-        int                     crp_unpacked:1; /* reply is unpacked */
-        int                     crp_static:1;   /* not from RPC buffer */
-        int                     crp_status;     /* console rpc errors */
-        cfs_time_t              crp_stamp;      /* replied time stamp */
+        int                      crp_posted:1;   /* rpc is posted */
+        int                      crp_finished:1; /* rpc is finished */
+        int                      crp_unpacked:1; /* reply is unpacked */
+        int                      crp_static:1;   /* not from RPC buffer */
+        int                      crp_status;     /* console rpc errors */
+        cfs_time_t               crp_stamp;      /* replied time stamp */
 } lstcon_rpc_t;
 
 typedef struct lstcon_rpc_trans {
-        struct list_head        tas_olink;      /* link chain on owner list */
-        struct list_head        tas_link;       /* link chain on global list */
-        int                     tas_opc;        /* operation code of transaction */
-        cfs_waitq_t             tas_waitq;      /* wait queue head */
-        atomic_t                tas_remaining;  /* # of un-scheduled rpcs */
-        struct list_head        tas_rpcs_list;  /* queued requests */
+        cfs_list_t            tas_olink;     /* link chain on owner list */
+        cfs_list_t            tas_link;      /* link chain on global list */
+        int                   tas_opc;       /* operation code of transaction */
+        cfs_waitq_t           tas_waitq;     /* wait queue head */
+        cfs_atomic_t          tas_remaining; /* # of un-scheduled rpcs */
+        cfs_list_t            tas_rpcs_list; /* queued requests */
 } lstcon_rpc_trans_t;
 
 #define LST_TRANS_PRIVATE       0x1000
@@ -115,16 +115,16 @@ int  lstcon_testrpc_prep(struct lstcon_node *nd, int transop,
                          struct lstcon_test *test, lstcon_rpc_t **crpc);
 int  lstcon_statrpc_prep(struct lstcon_node *nd, lstcon_rpc_t **crpc);
 void lstcon_rpc_put(lstcon_rpc_t *crpc);
-int  lstcon_rpc_trans_prep(struct list_head *translist,
+int  lstcon_rpc_trans_prep(cfs_list_t *translist,
                            int transop, lstcon_rpc_trans_t **transpp);
-int  lstcon_rpc_trans_ndlist(struct list_head *ndlist,
-                             struct list_head *translist, int transop,
+int  lstcon_rpc_trans_ndlist(cfs_list_t *ndlist,
+                             cfs_list_t *translist, int transop,
                              void *arg, lstcon_rpc_cond_func_t condition,
                              lstcon_rpc_trans_t **transpp);
 void lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans,
                            lstcon_trans_stat_t *stat);
 int  lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
-                                  struct list_head *head_up,
+                                  cfs_list_t *head_up,
                                   lstcon_rpc_readent_func_t readent);
 void lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error);
 void lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans);
index c482748..3b09de3 100644 (file)
@@ -110,8 +110,8 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
         /* queued in global hash & list, no refcount is taken by
          * global hash & list, if caller release his refcount,
          * node will be released */
-        list_add_tail(&ndl->ndl_hlink, &console_session.ses_ndl_hash[idx]);
-        list_add_tail(&ndl->ndl_link, &console_session.ses_ndl_list);
+        cfs_list_add_tail(&ndl->ndl_hlink, &console_session.ses_ndl_hash[idx]);
+        cfs_list_add_tail(&ndl->ndl_link, &console_session.ses_ndl_list);
 
         return 0;
 }
@@ -128,18 +128,18 @@ lstcon_node_put(lstcon_node_t *nd)
 
         ndl = (lstcon_ndlink_t *)(nd + 1);
 
-        LASSERT (!list_empty(&ndl->ndl_link));
-        LASSERT (!list_empty(&ndl->ndl_hlink));
+        LASSERT (!cfs_list_empty(&ndl->ndl_link));
+        LASSERT (!cfs_list_empty(&ndl->ndl_hlink));
 
         /* remove from session */
-        list_del(&ndl->ndl_link);
-        list_del(&ndl->ndl_hlink);
+        cfs_list_del(&ndl->ndl_link);
+        cfs_list_del(&ndl->ndl_hlink);
 
         LIBCFS_FREE(nd, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t));
 }
 
 static int
-lstcon_ndlink_find(struct list_head *hash,
+lstcon_ndlink_find(cfs_list_t *hash,
                    lnet_process_id_t id, lstcon_ndlink_t **ndlpp, int create)
 {
         unsigned int     idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
@@ -179,7 +179,7 @@ lstcon_ndlink_find(struct list_head *hash,
 
         ndl->ndl_node = nd;
         CFS_INIT_LIST_HEAD(&ndl->ndl_link);
-        list_add_tail(&ndl->ndl_hlink, &hash[idx]);
+        cfs_list_add_tail(&ndl->ndl_hlink, &hash[idx]);
 
         return  0;
 }
@@ -187,10 +187,10 @@ lstcon_ndlink_find(struct list_head *hash,
 static void
 lstcon_ndlink_release(lstcon_ndlink_t *ndl)
 {
-        LASSERT (list_empty(&ndl->ndl_link));
-        LASSERT (!list_empty(&ndl->ndl_hlink));
+        LASSERT (cfs_list_empty(&ndl->ndl_link));
+        LASSERT (!cfs_list_empty(&ndl->ndl_hlink));
 
-        list_del(&ndl->ndl_hlink); /* delete from hash */
+        cfs_list_del(&ndl->ndl_hlink); /* delete from hash */
         lstcon_node_put(ndl->ndl_node);
 
         LIBCFS_FREE(ndl, sizeof(*ndl));
@@ -255,13 +255,13 @@ lstcon_group_decref(lstcon_group_t *grp)
         if (--grp->grp_ref > 0)
                 return;
 
-        if (!list_empty(&grp->grp_link))
-                list_del(&grp->grp_link);
+        if (!cfs_list_empty(&grp->grp_link))
+                cfs_list_del(&grp->grp_link);
 
         lstcon_group_drain(grp, 0);
 
         for (i = 0; i < LST_NODE_HASHSIZE; i++) {
-                LASSERT (list_empty(&grp->grp_ndl_hash[i]));
+                LASSERT (cfs_list_empty(&grp->grp_ndl_hash[i]));
         }
 
         LIBCFS_FREE(grp, offsetof(lstcon_group_t,
@@ -302,10 +302,10 @@ lstcon_group_ndlink_find(lstcon_group_t *grp, lnet_process_id_t id,
         if (rc != 0)
                 return rc;
 
-        if (!list_empty(&(*ndlpp)->ndl_link))
+        if (!cfs_list_empty(&(*ndlpp)->ndl_link))
                 return 0;
 
-        list_add_tail(&(*ndlpp)->ndl_link, &grp->grp_ndl_list);
+        cfs_list_add_tail(&(*ndlpp)->ndl_link, &grp->grp_ndl_list);
         grp->grp_nnode ++;
 
         return 0;
@@ -314,7 +314,7 @@ lstcon_group_ndlink_find(lstcon_group_t *grp, lnet_process_id_t id,
 static void
 lstcon_group_ndlink_release(lstcon_group_t *grp, lstcon_ndlink_t *ndl)
 {
-        list_del_init(&ndl->ndl_link);
+        cfs_list_del_init(&ndl->ndl_link);
         lstcon_ndlink_release(ndl);
         grp->grp_nnode --;
 }
@@ -326,12 +326,12 @@ lstcon_group_ndlink_move(lstcon_group_t *old,
         unsigned int idx = LNET_NIDADDR(ndl->ndl_node->nd_id.nid) %
                            LST_NODE_HASHSIZE;
 
-        list_del(&ndl->ndl_hlink);
-        list_del(&ndl->ndl_link);
+        cfs_list_del(&ndl->ndl_hlink);
+        cfs_list_del(&ndl->ndl_link);
         old->grp_nnode --;
 
-        list_add_tail(&ndl->ndl_hlink, &new->grp_ndl_hash[idx]);
-        list_add_tail(&ndl->ndl_link, &new->grp_ndl_list);
+        cfs_list_add_tail(&ndl->ndl_hlink, &new->grp_ndl_hash[idx]);
+        cfs_list_add_tail(&ndl->ndl_link, &new->grp_ndl_list);
         new->grp_nnode ++;
 
         return;
@@ -342,9 +342,9 @@ lstcon_group_move(lstcon_group_t *old, lstcon_group_t *new)
 {
         lstcon_ndlink_t *ndl;
 
-        while (!list_empty(&old->grp_ndl_list)) {
-                ndl = list_entry(old->grp_ndl_list.next,
-                                 lstcon_ndlink_t, ndl_link);
+        while (!cfs_list_empty(&old->grp_ndl_list)) {
+                ndl = cfs_list_entry(old->grp_ndl_list.next,
+                                     lstcon_ndlink_t, ndl_link);
                 lstcon_group_ndlink_move(old, new, ndl);
         }
 }
@@ -392,10 +392,10 @@ lstcon_sesrpc_readent(int transop, srpc_msg_t *msg,
         case LST_TRANS_SESQRY:
                 rep = &msg->msg_body.dbg_reply;
 
-                if (copy_to_user(&ent_up->rpe_priv[0],
-                                 &rep->dbg_timeout, sizeof(int)) ||
-                    copy_to_user(&ent_up->rpe_payload[0],
-                                 &rep->dbg_name, LST_NAME_SIZE))
+                if (cfs_copy_to_user(&ent_up->rpe_priv[0],
+                                     &rep->dbg_timeout, sizeof(int)) ||
+                    cfs_copy_to_user(&ent_up->rpe_payload[0],
+                                     &rep->dbg_name, LST_NAME_SIZE))
                         return -EFAULT;
 
                 return 0;
@@ -409,7 +409,8 @@ lstcon_sesrpc_readent(int transop, srpc_msg_t *msg,
 
 static int
 lstcon_group_nodes_add(lstcon_group_t *grp, int count,
-                       lnet_process_id_t *ids_up, struct list_head *result_up)
+                       lnet_process_id_t *ids_up,
+                       cfs_list_t *result_up)
 {
         lstcon_rpc_trans_t      *trans;
         lstcon_ndlink_t         *ndl;
@@ -425,7 +426,7 @@ lstcon_group_nodes_add(lstcon_group_t *grp, int count,
         }
 
         for (i = 0 ; i < count; i++) {
-                if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
+                if (cfs_copy_from_user(&id, &ids_up[i], sizeof(id))) {
                         rc = -EFAULT;
                         break;
                 }
@@ -459,7 +460,7 @@ lstcon_group_nodes_add(lstcon_group_t *grp, int count,
 
         /* post all RPCs */
         lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
-        
+
         rc = lstcon_rpc_trans_interpreter(trans, result_up,
                                           lstcon_sesrpc_readent);
         /* destroy all RPGs */
@@ -474,7 +475,7 @@ lstcon_group_nodes_add(lstcon_group_t *grp, int count,
 static int
 lstcon_group_nodes_remove(lstcon_group_t *grp,
                           int count, lnet_process_id_t *ids_up,
-                          struct list_head *result_up)
+                          cfs_list_t *result_up)
 {
         lstcon_rpc_trans_t     *trans;
         lstcon_ndlink_t        *ndl;
@@ -492,7 +493,7 @@ lstcon_group_nodes_remove(lstcon_group_t *grp,
         }
 
         for (i = 0; i < count; i++) {
-                if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
+                if (cfs_copy_from_user(&id, &ids_up[i], sizeof(id))) {
                         rc = -EFAULT;
                         goto error;
                 }
@@ -545,14 +546,14 @@ lstcon_group_add(char *name)
                 return -ENOMEM;
         }
 
-        list_add_tail(&grp->grp_link, &console_session.ses_grp_list);
+        cfs_list_add_tail(&grp->grp_link, &console_session.ses_grp_list);
 
         return rc;
 }
 
 int
 lstcon_nodes_add(char *name, int count,
-                 lnet_process_id_t *ids_up, struct list_head *result_up)
+                 lnet_process_id_t *ids_up, cfs_list_t *result_up)
 {
         lstcon_group_t         *grp;
         int                     rc;
@@ -648,7 +649,7 @@ lstcon_group_clean(char *name, int args)
 
         lstcon_group_put(grp);
         /* release empty group */
-        if (list_empty(&grp->grp_ndl_list))
+        if (cfs_list_empty(&grp->grp_ndl_list))
                 lstcon_group_put(grp);
 
         return 0;
@@ -656,7 +657,7 @@ lstcon_group_clean(char *name, int args)
 
 int
 lstcon_nodes_remove(char *name, int count,
-                    lnet_process_id_t *ids_up, struct list_head *result_up)
+                    lnet_process_id_t *ids_up, cfs_list_t *result_up)
 {
         lstcon_group_t *grp = NULL;
         int             rc;
@@ -678,14 +679,14 @@ lstcon_nodes_remove(char *name, int count,
 
         lstcon_group_put(grp);
         /* release empty group */
-        if (list_empty(&grp->grp_ndl_list))
+        if (cfs_list_empty(&grp->grp_ndl_list))
                 lstcon_group_put(grp);
 
         return rc;
 }
 
 int
-lstcon_group_refresh(char *name, struct list_head *result_up)
+lstcon_group_refresh(char *name, cfs_list_t *result_up)
 {
         lstcon_rpc_trans_t      *trans;
         lstcon_group_t          *grp;
@@ -737,7 +738,7 @@ lstcon_group_list(int index, int len, char *name_up)
         cfs_list_for_each_entry_typed(grp, &console_session.ses_grp_list,
                                       lstcon_group_t, grp_link) {
                 if (index-- == 0) {
-                        return copy_to_user(name_up, grp->grp_name, len) ?
+                        return cfs_copy_to_user(name_up, grp->grp_name, len) ?
                                -EFAULT : 0;
                 }
         }
@@ -746,7 +747,7 @@ lstcon_group_list(int index, int len, char *name_up)
 }
 
 static int
-lstcon_nodes_getent(struct list_head *head, int *index_p,
+lstcon_nodes_getent(cfs_list_t *head, int *index_p,
                     int *count_p, lstcon_node_ent_t *dents_up)
 {
         lstcon_ndlink_t  *ndl;
@@ -767,10 +768,10 @@ lstcon_nodes_getent(struct list_head *head, int *index_p,
                         break;
 
                 nd = ndl->ndl_node;
-                if (copy_to_user(&dents_up[count].nde_id,
-                                 &nd->nd_id, sizeof(nd->nd_id)) ||
-                    copy_to_user(&dents_up[count].nde_state,
-                                 &nd->nd_state, sizeof(nd->nd_state)))
+                if (cfs_copy_to_user(&dents_up[count].nde_id,
+                                     &nd->nd_id, sizeof(nd->nd_id)) ||
+                    cfs_copy_to_user(&dents_up[count].nde_state,
+                                     &nd->nd_state, sizeof(nd->nd_state)))
                         return -EFAULT;
 
                 count ++;
@@ -824,11 +825,11 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p,
                                       lstcon_ndlink_t, ndl_link)
                 LST_NODE_STATE_COUNTER(ndl->ndl_node, gentp);
 
-        rc = copy_to_user(gents_p, gentp,
-                          sizeof(lstcon_ndlist_ent_t)) ? -EFAULT: 0;
+        rc = cfs_copy_to_user(gents_p, gentp,
+                              sizeof(lstcon_ndlist_ent_t)) ? -EFAULT: 0;
 
         LIBCFS_FREE(gentp, sizeof(lstcon_ndlist_ent_t));
-                
+
         lstcon_group_put(grp);
 
         return 0;
@@ -870,7 +871,7 @@ lstcon_batch_add(char *name)
         }
 
         LIBCFS_ALLOC(bat->bat_cli_hash,
-                     sizeof(struct list_head) * LST_NODE_HASHSIZE);
+                     sizeof(cfs_list_t) * LST_NODE_HASHSIZE);
         if (bat->bat_cli_hash == NULL) {
                 CERROR("Can't allocate hash for batch %s\n", name);
                 LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
@@ -879,7 +880,7 @@ lstcon_batch_add(char *name)
         }
 
         LIBCFS_ALLOC(bat->bat_srv_hash,
-                     sizeof(struct list_head) * LST_NODE_HASHSIZE);
+                     sizeof(cfs_list_t) * LST_NODE_HASHSIZE);
         if (bat->bat_srv_hash == NULL) {
                 CERROR("Can't allocate hash for batch %s\n", name);
                 LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE);
@@ -905,7 +906,7 @@ lstcon_batch_add(char *name)
                 CFS_INIT_LIST_HEAD(&bat->bat_srv_hash[i]);
         }
 
-        list_add_tail(&bat->bat_link, &console_session.ses_bat_list);
+        cfs_list_add_tail(&bat->bat_link, &console_session.ses_bat_list);
 
         return rc;
 }
@@ -921,7 +922,7 @@ lstcon_batch_list(int index, int len, char *name_up)
         cfs_list_for_each_entry_typed(bat, &console_session.ses_bat_list,
                                       lstcon_batch_t, bat_link) {
                 if (index-- == 0) {
-                        return copy_to_user(name_up,bat->bat_name, len) ?
+                        return cfs_copy_to_user(name_up,bat->bat_name, len) ?
                                -EFAULT: 0;
                 }
         }
@@ -935,8 +936,8 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, int server,
                   lstcon_node_ent_t *dents_up)
 {
         lstcon_test_batch_ent_t *entp;
-        struct list_head        *clilst;
-        struct list_head        *srvlst;
+        cfs_list_t              *clilst;
+        cfs_list_t              *srvlst;
         lstcon_test_t           *test = NULL;
         lstcon_batch_t          *bat;
         lstcon_ndlink_t         *ndl;
@@ -997,8 +998,8 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, int server,
         cfs_list_for_each_entry_typed(ndl, srvlst, lstcon_ndlink_t, ndl_link)
                 LST_NODE_STATE_COUNTER(ndl->ndl_node, &entp->tbe_srv_nle);
 
-        rc = copy_to_user(ent_up, entp,
-                          sizeof(lstcon_test_batch_ent_t)) ? -EFAULT : 0;
+        rc = cfs_copy_to_user(ent_up, entp,
+                              sizeof(lstcon_test_batch_ent_t)) ? -EFAULT : 0;
 
         LIBCFS_FREE(entp, sizeof(lstcon_test_batch_ent_t));
 
@@ -1028,7 +1029,8 @@ lstcon_batrpc_condition(int transop, lstcon_node_t *nd, void *arg)
 }
 
 static int
-lstcon_batch_op(lstcon_batch_t *bat, int transop, struct list_head *result_up)
+lstcon_batch_op(lstcon_batch_t *bat, int transop,
+                cfs_list_t *result_up)
 {
         lstcon_rpc_trans_t *trans;
         int                 rc;
@@ -1051,7 +1053,7 @@ lstcon_batch_op(lstcon_batch_t *bat, int transop, struct list_head *result_up)
 }
 
 int
-lstcon_batch_run(char *name, int timeout, struct list_head *result_up)
+lstcon_batch_run(char *name, int timeout, cfs_list_t *result_up)
 {
         lstcon_batch_t *bat;
         int             rc;
@@ -1073,7 +1075,7 @@ lstcon_batch_run(char *name, int timeout, struct list_head *result_up)
 }
 
 int
-lstcon_batch_stop(char *name, int force, struct list_head *result_up)
+lstcon_batch_stop(char *name, int force, cfs_list_t *result_up)
 {
         lstcon_batch_t *bat;
         int             rc;
@@ -1101,14 +1103,14 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
         lstcon_test_t      *test;
         int                 i;
 
-        list_del(&bat->bat_link);
+        cfs_list_del(&bat->bat_link);
 
-        while (!list_empty(&bat->bat_test_list)) {
-                test = list_entry(bat->bat_test_list.next,
-                                  lstcon_test_t, tes_link);
-                LASSERT (list_empty(&test->tes_trans_list));
+        while (!cfs_list_empty(&bat->bat_test_list)) {
+                test = cfs_list_entry(bat->bat_test_list.next,
+                                      lstcon_test_t, tes_link);
+                LASSERT (cfs_list_empty(&test->tes_trans_list));
 
-                list_del(&test->tes_link);
+                cfs_list_del(&test->tes_link);
 
                 lstcon_group_put(test->tes_src_grp);
                 lstcon_group_put(test->tes_dst_grp);
@@ -1117,33 +1119,33 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
                                            tes_param[test->tes_paramlen]));
         }
 
-        LASSERT (list_empty(&bat->bat_trans_list));
+        LASSERT (cfs_list_empty(&bat->bat_trans_list));
 
-        while (!list_empty(&bat->bat_cli_list)) {
-                ndl = list_entry(bat->bat_cli_list.next,
-                                 lstcon_ndlink_t, ndl_link);
-                list_del_init(&ndl->ndl_link);
+        while (!cfs_list_empty(&bat->bat_cli_list)) {
+                ndl = cfs_list_entry(bat->bat_cli_list.next,
+                                     lstcon_ndlink_t, ndl_link);
+                cfs_list_del_init(&ndl->ndl_link);
 
                 lstcon_ndlink_release(ndl);
         }
 
-        while (!list_empty(&bat->bat_srv_list)) {
-                ndl = list_entry(bat->bat_srv_list.next,
-                                 lstcon_ndlink_t, ndl_link);
-                list_del_init(&ndl->ndl_link);
+        while (!cfs_list_empty(&bat->bat_srv_list)) {
+                ndl = cfs_list_entry(bat->bat_srv_list.next,
+                                     lstcon_ndlink_t, ndl_link);
+                cfs_list_del_init(&ndl->ndl_link);
 
                 lstcon_ndlink_release(ndl);
         }
 
         for (i = 0; i < LST_NODE_HASHSIZE; i++) {
-                LASSERT (list_empty(&bat->bat_cli_hash[i]));
-                LASSERT (list_empty(&bat->bat_srv_hash[i]));
+                LASSERT (cfs_list_empty(&bat->bat_cli_hash[i]));
+                LASSERT (cfs_list_empty(&bat->bat_srv_hash[i]));
         }
 
         LIBCFS_FREE(bat->bat_cli_hash,
-                    sizeof(struct list_head) * LST_NODE_HASHSIZE);
+                    sizeof(cfs_list_t) * LST_NODE_HASHSIZE);
         LIBCFS_FREE(bat->bat_srv_hash,
-                    sizeof(struct list_head) * LST_NODE_HASHSIZE);
+                    sizeof(cfs_list_t) * LST_NODE_HASHSIZE);
         LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
 }
 
@@ -1153,8 +1155,8 @@ lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg)
         lstcon_test_t    *test;
         lstcon_batch_t   *batch;
         lstcon_ndlink_t  *ndl;
-        struct list_head *hash;
-        struct list_head *head;
+        cfs_list_t       *hash;
+        cfs_list_t       *head;
 
         test = (lstcon_test_t *)arg;
         LASSERT (test != NULL);
@@ -1185,14 +1187,14 @@ lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg)
         if (lstcon_ndlink_find(hash, nd->nd_id, &ndl, 1) != 0)
                 return -ENOMEM;
 
-        if (list_empty(&ndl->ndl_link))
-                list_add_tail(&ndl->ndl_link, head);
+        if (cfs_list_empty(&ndl->ndl_link))
+                cfs_list_add_tail(&ndl->ndl_link, head);
 
         return 1;
 }
 
 static int
-lstcon_test_nodes_add(lstcon_test_t *test, struct list_head *result_up)
+lstcon_test_nodes_add(lstcon_test_t *test, cfs_list_t *result_up)
 {
         lstcon_rpc_trans_t     *trans;
         lstcon_group_t         *grp;
@@ -1246,8 +1248,8 @@ again:
 int
 lstcon_test_add(char *name, int type, int loop, int concur,
                 int dist, int span, char *src_name, char * dst_name,
-                void *param, int paramlen, int *retp, struct list_head *result_up)
-                
+                void *param, int paramlen, int *retp,
+                cfs_list_t *result_up)
 {
         lstcon_group_t  *src_grp = NULL;
         lstcon_group_t  *dst_grp = NULL;
@@ -1319,7 +1321,7 @@ lstcon_test_add(char *name, int type, int loop, int concur,
                 CDEBUG(D_NET, "Failed to add test %d to batch %s\n", type, name);
 
         /* add to test list anyway, so user can check what's going on */
-        list_add_tail(&test->tes_link, &batch->bat_test_list);
+        cfs_list_add_tail(&test->tes_link, &batch->bat_test_list);
 
         batch->bat_ntest ++;
         test->tes_hdr.tsb_index = batch->bat_ntest;
@@ -1365,8 +1367,8 @@ lstcon_tsbrpc_readent(int transop, srpc_msg_t *msg,
                  transop == LST_TRANS_TSBSRVQRY);
 
         /* positive errno, framework error code */
-        if (copy_to_user(&ent_up->rpe_priv[0],
-                         &rep->bar_active, sizeof(rep->bar_active)))
+        if (cfs_copy_to_user(&ent_up->rpe_priv[0],
+                             &rep->bar_active, sizeof(rep->bar_active)))
                 return -EFAULT;
 
         return 0;
@@ -1374,11 +1376,11 @@ lstcon_tsbrpc_readent(int transop, srpc_msg_t *msg,
 
 int
 lstcon_test_batch_query(char *name, int testidx, int client,
-                        int timeout, struct list_head *result_up)
+                        int timeout, cfs_list_t *result_up)
 {
         lstcon_rpc_trans_t *trans;
-        struct list_head   *translist;
-        struct list_head   *ndlist;
+        cfs_list_t         *translist;
+        cfs_list_t         *ndlist;
         lstcon_tsb_hdr_t   *hdr;
         lstcon_batch_t     *batch;
         lstcon_test_t      *test = NULL;
@@ -1450,19 +1452,19 @@ lstcon_statrpc_readent(int transop, srpc_msg_t *msg,
         srpc_stat = (srpc_counters_t *)((char *)sfwk_stat + sizeof(*sfwk_stat));
         lnet_stat = (lnet_counters_t *)((char *)srpc_stat + sizeof(*srpc_stat));
 
-        if (copy_to_user(sfwk_stat, &rep->str_fw, sizeof(*sfwk_stat)) ||
-            copy_to_user(srpc_stat, &rep->str_rpc, sizeof(*srpc_stat)) ||
-            copy_to_user(lnet_stat, &rep->str_lnet, sizeof(*lnet_stat)))
+        if (cfs_copy_to_user(sfwk_stat, &rep->str_fw, sizeof(*sfwk_stat)) ||
+            cfs_copy_to_user(srpc_stat, &rep->str_rpc, sizeof(*srpc_stat)) ||
+            cfs_copy_to_user(lnet_stat, &rep->str_lnet, sizeof(*lnet_stat)))
                 return -EFAULT;
 
         return 0;
 }
 
 int
-lstcon_ndlist_stat(struct list_head *ndlist,
-                   int timeout, struct list_head *result_up)
+lstcon_ndlist_stat(cfs_list_t *ndlist,
+                   int timeout, cfs_list_t *result_up)
 {
-        struct list_head    head;
+        cfs_list_t          head;
         lstcon_rpc_trans_t *trans;
         int                 rc;
 
@@ -1485,7 +1487,7 @@ lstcon_ndlist_stat(struct list_head *ndlist,
 }
 
 int
-lstcon_group_stat(char *grp_name, int timeout, struct list_head *result_up)
+lstcon_group_stat(char *grp_name, int timeout, cfs_list_t *result_up)
 {
         lstcon_group_t     *grp;
         int                 rc;
@@ -1505,7 +1507,7 @@ lstcon_group_stat(char *grp_name, int timeout, struct list_head *result_up)
 
 int
 lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
-                  int timeout, struct list_head *result_up)
+                  int timeout, cfs_list_t *result_up)
 {
         lstcon_ndlink_t         *ndl;
         lstcon_group_t          *tmp;
@@ -1520,7 +1522,7 @@ lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
         }
 
         for (i = 0 ; i < count; i++) {
-                if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
+                if (cfs_copy_from_user(&id, &ids_up[i], sizeof(id))) {
                         rc = -EFAULT;
                         break;
                 }
@@ -1548,9 +1550,9 @@ lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
 }
 
 int
-lstcon_debug_ndlist(struct list_head *ndlist,
-                    struct list_head *translist,
-                    int timeout, struct list_head *result_up)
+lstcon_debug_ndlist(cfs_list_t *ndlist,
+                    cfs_list_t *translist,
+                    int timeout, cfs_list_t *result_up)
 {
         lstcon_rpc_trans_t *trans;
         int                 rc;
@@ -1572,7 +1574,7 @@ lstcon_debug_ndlist(struct list_head *ndlist,
 }
 
 int
-lstcon_session_debug(int timeout, struct list_head *result_up)
+lstcon_session_debug(int timeout, cfs_list_t *result_up)
 {
         return lstcon_debug_ndlist(&console_session.ses_ndl_list,
                                    NULL, timeout, result_up);
@@ -1580,7 +1582,7 @@ lstcon_session_debug(int timeout, struct list_head *result_up)
 
 int
 lstcon_batch_debug(int timeout, char *name,
-                   int client, struct list_head *result_up)
+                   int client, cfs_list_t *result_up)
 {
         lstcon_batch_t *bat;
         int             rc;
@@ -1598,7 +1600,7 @@ lstcon_batch_debug(int timeout, char *name,
 
 int
 lstcon_group_debug(int timeout, char *name,
-                   struct list_head *result_up)
+                   cfs_list_t *result_up)
 {
         lstcon_group_t *grp;
         int             rc;
@@ -1617,7 +1619,7 @@ lstcon_group_debug(int timeout, char *name,
 int
 lstcon_nodes_debug(int timeout,
                    int count, lnet_process_id_t *ids_up, 
-                   struct list_head *result_up)
+                   cfs_list_t *result_up)
 {
         lnet_process_id_t  id;
         lstcon_ndlink_t   *ndl;
@@ -1632,7 +1634,7 @@ lstcon_nodes_debug(int timeout,
         }
 
         for (i = 0; i < count; i++) {
-                if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
+                if (cfs_copy_from_user(&id, &ids_up[i], sizeof(id))) {
                         rc = -EFAULT;
                         break;
                 }
@@ -1702,7 +1704,7 @@ lstcon_session_new(char *name, int key,
         }
 
         for (i = 0; i < LST_GLOBAL_HASHSIZE; i++) {
-                LASSERT (list_empty(&console_session.ses_ndl_hash[i]));
+                LASSERT (cfs_list_empty(&console_session.ses_ndl_hash[i]));
         }
 
         rc = lstcon_batch_add(LST_DEFAULT_BATCH);
@@ -1728,8 +1730,8 @@ lstcon_session_new(char *name, int key,
                                                       timeout;
         strcpy(console_session.ses_name, name);
 
-        if (copy_to_user(sid_up, &console_session.ses_id,
-                         sizeof(lst_sid_t)) == 0)
+        if (cfs_copy_to_user(sid_up, &console_session.ses_id,
+                             sizeof(lst_sid_t)) == 0)
                 return rc;
 
         lstcon_session_end();
@@ -1744,7 +1746,7 @@ lstcon_session_info(lst_sid_t *sid_up, int *key_up,
         lstcon_ndlist_ent_t *entp;
         lstcon_ndlink_t     *ndl;
         int                  rc = 0;
-        
+
         if (console_session.ses_state != LST_SESSION_ACTIVE)
                 return -ESRCH;
 
@@ -1758,10 +1760,11 @@ lstcon_session_info(lst_sid_t *sid_up, int *key_up,
                                       lstcon_ndlink_t, ndl_link)
                 LST_NODE_STATE_COUNTER(ndl->ndl_node, entp);
 
-        if (copy_to_user(sid_up, &console_session.ses_id, sizeof(lst_sid_t)) ||
-            copy_to_user(key_up, &console_session.ses_key, sizeof(int)) ||
-            copy_to_user(ndinfo_up, entp, sizeof(*entp)) ||
-            copy_to_user(name_up, console_session.ses_name, len))
+        if (cfs_copy_to_user(sid_up, &console_session.ses_id,
+                             sizeof(lst_sid_t)) ||
+            cfs_copy_to_user(key_up, &console_session.ses_key, sizeof(int)) ||
+            cfs_copy_to_user(ndinfo_up, entp, sizeof(*entp)) ||
+            cfs_copy_to_user(name_up, console_session.ses_name, len))
                 rc = -EFAULT;
 
         LIBCFS_FREE(entp, sizeof(*entp));
@@ -1805,24 +1808,24 @@ lstcon_session_end()
         console_session.ses_force = 0;
 
         /* destroy all batches */
-        while (!list_empty(&console_session.ses_bat_list)) {
-                bat = list_entry(console_session.ses_bat_list.next,
-                                 lstcon_batch_t, bat_link);
+        while (!cfs_list_empty(&console_session.ses_bat_list)) {
+                bat = cfs_list_entry(console_session.ses_bat_list.next,
+                                     lstcon_batch_t, bat_link);
 
                 lstcon_batch_destroy(bat);
         }
 
         /* destroy all groups */
-        while (!list_empty(&console_session.ses_grp_list)) {
-                grp = list_entry(console_session.ses_grp_list.next,
-                                 lstcon_group_t, grp_link);
+        while (!cfs_list_empty(&console_session.ses_grp_list)) {
+                grp = cfs_list_entry(console_session.ses_grp_list.next,
+                                     lstcon_group_t, grp_link);
                 LASSERT (grp->grp_ref == 1);
 
                 lstcon_group_put(grp);
         }
 
         /* all nodes should be released */
-        LASSERT (list_empty(&console_session.ses_ndl_list));
+        LASSERT (cfs_list_empty(&console_session.ses_ndl_list));
 
         console_session.ses_shutdown = 0;
         console_session.ses_expired  = 0;
@@ -1843,7 +1846,7 @@ lstcon_acceptor_handle (srpc_server_rpc_t *rpc)
 
         sfw_unpack_message(req);
 
-        mutex_down(&console_session.ses_mutex);
+        cfs_mutex_down(&console_session.ses_mutex);
 
         jrep->join_sid = console_session.ses_id;
 
@@ -1865,8 +1868,8 @@ lstcon_acceptor_handle (srpc_server_rpc_t *rpc)
                         goto out;
                 }
 
-                list_add_tail(&grp->grp_link,
-                              &console_session.ses_grp_list);
+                cfs_list_add_tail(&grp->grp_link,
+                                  &console_session.ses_grp_list);
                 lstcon_group_addref(grp);
         }
 
@@ -1902,7 +1905,7 @@ out:
         if (grp != NULL)
                 lstcon_group_put(grp);
 
-        mutex_up(&console_session.ses_mutex);
+        cfs_mutex_up(&console_session.ses_mutex);
 
         return rc;
 }
@@ -1938,7 +1941,7 @@ lstcon_console_init(void)
         console_session.ses_expired = 0;
         console_session.ses_laststamp = cfs_time_current_sec();   
 
-        init_mutex(&console_session.ses_mutex);
+        cfs_init_mutex(&console_session.ses_mutex);
 
         CFS_INIT_LIST_HEAD(&console_session.ses_ndl_list);
         CFS_INIT_LIST_HEAD(&console_session.ses_grp_list);
@@ -1946,7 +1949,7 @@ lstcon_console_init(void)
         CFS_INIT_LIST_HEAD(&console_session.ses_trans_list);
 
         LIBCFS_ALLOC(console_session.ses_ndl_hash,
-                     sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
+                     sizeof(cfs_list_t) * LST_GLOBAL_HASHSIZE);
         if (console_session.ses_ndl_hash == NULL)
                 return -ENOMEM;
 
@@ -1961,7 +1964,7 @@ lstcon_console_init(void)
         LASSERT (rc != -EBUSY);
         if (rc != 0) {
                 LIBCFS_FREE(console_session.ses_ndl_hash,
-                            sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
+                            sizeof(cfs_list_t) * LST_GLOBAL_HASHSIZE);
                 return rc;
         }
 
@@ -1983,7 +1986,7 @@ out:
         srpc_remove_service(&lstcon_acceptor_service);
 
         LIBCFS_FREE(console_session.ses_ndl_hash,
-                    sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
+                    sizeof(cfs_list_t) * LST_GLOBAL_HASHSIZE);
 
         srpc_wait_service_shutdown(&lstcon_acceptor_service);
 
@@ -1997,29 +2000,29 @@ lstcon_console_fini(void)
 
         libcfs_deregister_ioctl(&lstcon_ioctl_handler);
 
-        mutex_down(&console_session.ses_mutex);
+        cfs_mutex_down(&console_session.ses_mutex);
 
         srpc_shutdown_service(&lstcon_acceptor_service);
         srpc_remove_service(&lstcon_acceptor_service);
 
-        if (console_session.ses_state != LST_SESSION_NONE) 
+        if (console_session.ses_state != LST_SESSION_NONE)
                 lstcon_session_end();
 
         lstcon_rpc_module_fini();
 
-        mutex_up(&console_session.ses_mutex);
+        cfs_mutex_up(&console_session.ses_mutex);
 
-        LASSERT (list_empty(&console_session.ses_ndl_list));
-        LASSERT (list_empty(&console_session.ses_grp_list));
-        LASSERT (list_empty(&console_session.ses_bat_list));
-        LASSERT (list_empty(&console_session.ses_trans_list));
+        LASSERT (cfs_list_empty(&console_session.ses_ndl_list));
+        LASSERT (cfs_list_empty(&console_session.ses_grp_list));
+        LASSERT (cfs_list_empty(&console_session.ses_bat_list));
+        LASSERT (cfs_list_empty(&console_session.ses_trans_list));
 
         for (i = 0; i < LST_NODE_HASHSIZE; i++) {
-                LASSERT (list_empty(&console_session.ses_ndl_hash[i]));
+                LASSERT (cfs_list_empty(&console_session.ses_ndl_hash[i]));
         }
 
         LIBCFS_FREE(console_session.ses_ndl_hash,
-                    sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
+                    sizeof(cfs_list_t) * LST_GLOBAL_HASHSIZE);
 
         srpc_wait_service_shutdown(&lstcon_acceptor_service);
 
index 685bad2..38c8289 100644 (file)
 #include "conrpc.h"
 
 typedef struct lstcon_node {
-        lnet_process_id_t       nd_id;          /* id of the node */
-        int                     nd_ref;         /* reference count */
-        int                     nd_state;       /* state of the node */
-        int                     nd_timeout;     /* session timeout */
-        cfs_time_t              nd_stamp;       /* timestamp of last replied RPC */
-        struct lstcon_rpc       nd_ping;        /* ping rpc */
+        lnet_process_id_t    nd_id;          /* id of the node */
+        int                  nd_ref;         /* reference count */
+        int                  nd_state;       /* state of the node */
+        int                  nd_timeout;     /* session timeout */
+        cfs_time_t           nd_stamp;       /* timestamp of last replied RPC */
+        struct lstcon_rpc    nd_ping;        /* ping rpc */
 } lstcon_node_t;                                /*** node descriptor */
 
 typedef struct {
-        struct list_head        ndl_link;       /* chain on list */
-        struct list_head        ndl_hlink;      /* chain on hash */
-        lstcon_node_t          *ndl_node;       /* pointer to node */
+        cfs_list_t           ndl_link;       /* chain on list */
+        cfs_list_t           ndl_hlink;      /* chain on hash */
+        lstcon_node_t       *ndl_node;       /* pointer to node */
 } lstcon_ndlink_t;                              /*** node link descriptor */
 
 typedef struct {
-        struct list_head        grp_link;       /* chain on global group list */
-        int                     grp_ref;        /* reference count */
-        int                     grp_userland;   /* has userland nodes */
-        int                     grp_nnode;      /* # of nodes */
-        char                    grp_name[LST_NAME_SIZE]; /* group name */
+        cfs_list_t           grp_link;       /* chain on global group list */
+        int                  grp_ref;        /* reference count */
+        int                  grp_userland;   /* has userland nodes */
+        int                  grp_nnode;      /* # of nodes */
+        char                 grp_name[LST_NAME_SIZE]; /* group name */
 
-        struct list_head        grp_trans_list; /* transaction list */
-        struct list_head        grp_ndl_list;   /* nodes list */
-        struct list_head        grp_ndl_hash[0];/* hash table for nodes */
-} lstcon_group_t;                               /*** (alias of nodes) group descriptor */
+        cfs_list_t           grp_trans_list; /* transaction list */
+        cfs_list_t           grp_ndl_list;   /* nodes list */
+        cfs_list_t           grp_ndl_hash[0];/* hash table for nodes */
+} lstcon_group_t;                    /*** (alias of nodes) group descriptor */
 
 #define LST_BATCH_IDLE          0xB0            /* idle batch */
 #define LST_BATCH_RUNNING       0xB1            /* running batch */
@@ -89,41 +89,41 @@ typedef struct lstcon_tsb_hdr {
 
 typedef struct {
         lstcon_tsb_hdr_t        bat_hdr;        /* test_batch header */
-        struct list_head        bat_link;       /* chain on session's batches list */
+        cfs_list_t              bat_link;       /* chain on session's batches list */
         int                     bat_ntest;      /* # of test */
         int                     bat_state;      /* state of the batch */
         int                     bat_arg;        /* parameter for run|stop, timeout for run, force for stop */
         char                    bat_name[LST_NAME_SIZE]; /* name of batch */
 
-        struct list_head        bat_test_list;  /* list head of tests (lstcon_test_t) */
-        struct list_head        bat_trans_list; /* list head of transaction */
-        struct list_head        bat_cli_list;   /* list head of client nodes (lstcon_node_t) */
-        struct list_head       *bat_cli_hash;   /* hash table of client nodes */ 
-        struct list_head        bat_srv_list;   /* list head of server nodes */
-        struct list_head       *bat_srv_hash;   /* hash table of server nodes */
-} lstcon_batch_t;                                  /*** (tests ) batch descritptor */
+        cfs_list_t              bat_test_list;  /* list head of tests (lstcon_test_t) */
+        cfs_list_t              bat_trans_list; /* list head of transaction */
+        cfs_list_t              bat_cli_list;   /* list head of client nodes (lstcon_node_t) */
+        cfs_list_t             *bat_cli_hash;   /* hash table of client nodes */ 
+        cfs_list_t              bat_srv_list;   /* list head of server nodes */
+        cfs_list_t             *bat_srv_hash;   /* hash table of server nodes */
+} lstcon_batch_t;                             /*** (tests ) batch descritptor */
 
 typedef struct lstcon_test {
-        lstcon_tsb_hdr_t        tes_hdr;        /* test batch header */
-        struct list_head        tes_link;       /* chain on batch's tests list */
-        lstcon_batch_t         *tes_batch;      /* pointer to batch */
-
-        int                     tes_type;       /* type of the test, i.e: bulk, ping */
-        int                     tes_stop_onerr; /* stop on error */
-        int                     tes_oneside;    /* one-sided test */
-        int                     tes_concur;     /* concurrency */
-        int                     tes_loop;       /* loop count */
-        int                     tes_dist;       /* nodes distribution of target group */
-        int                     tes_span;       /* nodes span of target group */
-        int                     tes_cliidx;     /* client index, used for RPC creating */
-
-        struct list_head        tes_trans_list; /* transaction list */
-        lstcon_group_t         *tes_src_grp;    /* group run the test */
-        lstcon_group_t         *tes_dst_grp;    /* target group */
-
-        int                     tes_paramlen;   /* test parameter length */
-        char                    tes_param[0];   /* test parameter */
-} lstcon_test_t;                                   /*** a single test descriptor */
+        lstcon_tsb_hdr_t      tes_hdr;        /* test batch header */
+        cfs_list_t            tes_link;       /* chain on batch's tests list */
+        lstcon_batch_t       *tes_batch;      /* pointer to batch */
+
+        int                   tes_type;       /* type of the test, i.e: bulk, ping */
+        int                   tes_stop_onerr; /* stop on error */
+        int                   tes_oneside;    /* one-sided test */
+        int                   tes_concur;     /* concurrency */
+        int                   tes_loop;       /* loop count */
+        int                   tes_dist;       /* nodes distribution of target group */
+        int                   tes_span;       /* nodes span of target group */
+        int                   tes_cliidx;     /* client index, used for RPC creating */
+
+        cfs_list_t  tes_trans_list; /* transaction list */
+        lstcon_group_t       *tes_src_grp;    /* group run the test */
+        lstcon_group_t       *tes_dst_grp;    /* target group */
+
+        int                   tes_paramlen;   /* test parameter length */
+        char                  tes_param[0];   /* test parameter */
+} lstcon_test_t;                                /*** a single test descriptor */
 
 #define LST_GLOBAL_HASHSIZE     503             /* global nodes hash table size */
 #define LST_NODE_HASHSIZE       239             /* node hash table (for batch or group) */
@@ -134,7 +134,7 @@ typedef struct lstcon_test {
 #define LST_CONSOLE_TIMEOUT     300             /* default console timeout */
 
 typedef struct {
-        struct semaphore        ses_mutex;      /* lock for session, only one thread can enter session */
+        cfs_semaphore_t         ses_mutex;      /* lock for session, only one thread can enter session */
         lst_sid_t               ses_id;         /* global session id */
         int                     ses_key;        /* local session key */
         int                     ses_state;      /* state of session */
@@ -149,15 +149,15 @@ typedef struct {
         stt_timer_t             ses_ping_timer; /* timer for pinger */
         lstcon_trans_stat_t     ses_trans_stat; /* transaction stats */
 
-        struct list_head        ses_trans_list; /* global list of transaction */
-        struct list_head        ses_grp_list;   /* global list of groups */
-        struct list_head        ses_bat_list;   /* global list of batches */
-        struct list_head        ses_ndl_list;   /* global list of nodes */
-        struct list_head       *ses_ndl_hash;   /* hash table of nodes */
+        cfs_list_t              ses_trans_list; /* global list of transaction */
+        cfs_list_t              ses_grp_list;   /* global list of groups */
+        cfs_list_t              ses_bat_list;   /* global list of batches */
+        cfs_list_t              ses_ndl_list;   /* global list of nodes */
+        cfs_list_t             *ses_ndl_hash;   /* hash table of nodes */
 
-        spinlock_t              ses_rpc_lock;   /* serialize */
-        atomic_t                ses_rpc_counter;/* # of initialized RPCs */
-        struct list_head        ses_rpc_freelist; /* idle console rpc */
+        cfs_spinlock_t          ses_rpc_lock;   /* serialize */
+        cfs_atomic_t            ses_rpc_counter;/* # of initialized RPCs */
+        cfs_list_t              ses_rpc_freelist; /* idle console rpc */
 } lstcon_session_t;                             /*** session descriptor */
 
 extern lstcon_session_t         console_session;
@@ -167,8 +167,8 @@ lstcon_trans_stat(void)
         return &console_session.ses_trans_stat;
 }
 
-static inline struct list_head *
-lstcon_id2hash (lnet_process_id_t id, struct list_head *hash)
+static inline cfs_list_t *
+lstcon_id2hash (lnet_process_id_t id, cfs_list_t *hash)
 {
         unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
 
@@ -181,42 +181,45 @@ extern int lstcon_session_new(char *name, int key,
 extern int lstcon_session_info(lst_sid_t *sid_up, int *key,
                                lstcon_ndlist_ent_t *entp, char *name_up, int len);
 extern int lstcon_session_end(void);
-extern int lstcon_session_debug(int timeout, struct list_head *result_up);
+extern int lstcon_session_debug(int timeout, cfs_list_t *result_up);
 extern int lstcon_batch_debug(int timeout, char *name, 
-                              int client, struct list_head *result_up);
+                              int client, cfs_list_t *result_up);
 extern int lstcon_group_debug(int timeout, char *name,
-                              struct list_head *result_up);
+                              cfs_list_t *result_up);
 extern int lstcon_nodes_debug(int timeout, int nnd, lnet_process_id_t *nds_up,
-                              struct list_head *result_up);
+                              cfs_list_t *result_up);
 extern int lstcon_group_add(char *name);
 extern int lstcon_group_del(char *name);
 extern int lstcon_group_clean(char *name, int args);
-extern int lstcon_group_refresh(char *name, struct list_head *result_up);
+extern int lstcon_group_refresh(char *name, cfs_list_t *result_up);
 extern int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t *nds_up,
-                            struct list_head *result_up);
+                            cfs_list_t *result_up);
 extern int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t *nds_up,
-                               struct list_head *result_up);
+                               cfs_list_t *result_up);
 extern int lstcon_group_info(char *name, lstcon_ndlist_ent_t *gent_up, 
                              int *index_p, int *ndent_p, lstcon_node_ent_t *ndents_up);
 extern int lstcon_group_list(int idx, int len, char *name_up);
 extern int lstcon_batch_add(char *name);
-extern int lstcon_batch_run(char *name, int timeout, struct list_head *result_up);
-extern int lstcon_batch_stop(char *name, int force, struct list_head *result_up);
+extern int lstcon_batch_run(char *name, int timeout,
+                            cfs_list_t *result_up);
+extern int lstcon_batch_stop(char *name, int force,
+                             cfs_list_t *result_up);
 extern int lstcon_test_batch_query(char *name, int testidx,
                                    int client, int timeout,
-                                   struct list_head *result_up);
+                                   cfs_list_t *result_up);
 extern int lstcon_batch_del(char *name);
 extern int lstcon_batch_list(int idx, int namelen, char *name_up);
 extern int lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up,
                              int server, int testidx, int *index_p,
                              int *ndent_p, lstcon_node_ent_t *dents_up);
 extern int lstcon_group_stat(char *grp_name, int timeout,
-                             struct list_head *result_up);
+                             cfs_list_t *result_up);
 extern int lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
-                             int timeout, struct list_head *result_up);
+                             int timeout, cfs_list_t *result_up);
 extern int lstcon_test_add(char *name, int type, int loop, int concur,
                            int dist, int span, char *src_name, char * dst_name,
-                           void *param, int paramlen, int *retp, struct list_head *result_up);
+                           void *param, int paramlen, int *retp,
+                           cfs_list_t *result_up);
 #endif
 
-#endif  
+#endif
index 8758c37..da63616 100644 (file)
@@ -109,15 +109,15 @@ do {                                    \
         __swab64s(&(lc).route_length);  \
 } while (0)
 
-#define sfw_test_active(t)      (atomic_read(&(t)->tsi_nactive) != 0)
-#define sfw_batch_active(b)     (atomic_read(&(b)->bat_nactive) != 0)
+#define sfw_test_active(t)      (cfs_atomic_read(&(t)->tsi_nactive) != 0)
+#define sfw_batch_active(b)     (cfs_atomic_read(&(b)->bat_nactive) != 0)
 
 struct smoketest_framework {
-        struct list_head   fw_zombie_rpcs;     /* RPCs to be recycled */
-        struct list_head   fw_zombie_sessions; /* stopping sessions */
-        struct list_head   fw_tests;           /* registered test cases */
-        atomic_t           fw_nzombies;        /* # zombie sessions */
-        spinlock_t         fw_lock;            /* serialise */
+        cfs_list_t         fw_zombie_rpcs;     /* RPCs to be recycled */
+        cfs_list_t         fw_zombie_sessions; /* stopping sessions */
+        cfs_list_t         fw_tests;           /* registered test cases */
+        cfs_atomic_t       fw_nzombies;        /* # zombie sessions */
+        cfs_spinlock_t     fw_lock;            /* serialise */
         sfw_session_t     *fw_session;         /* _the_ session */
         int                fw_shuttingdown;    /* shutdown in progress */
         srpc_server_rpc_t *fw_active_srpc;     /* running RPC */
@@ -163,7 +163,7 @@ sfw_register_test (srpc_service_t *service, sfw_test_client_ops_t *cliops)
         tsc->tsc_cli_ops     = cliops;
         tsc->tsc_srv_service = service;
 
-        list_add_tail(&tsc->tsc_list, &sfw_data.fw_tests);
+        cfs_list_add_tail(&tsc->tsc_list, &sfw_data.fw_tests);
         return 0;
 }
 
@@ -223,17 +223,17 @@ sfw_deactivate_session (void)
         LASSERT (!sn->sn_timer_active);
 
         sfw_data.fw_session = NULL;
-        atomic_inc(&sfw_data.fw_nzombies);
-        list_add(&sn->sn_list, &sfw_data.fw_zombie_sessions);
+        cfs_atomic_inc(&sfw_data.fw_nzombies);
+        cfs_list_add(&sn->sn_list, &sfw_data.fw_zombie_sessions);
 
-        spin_unlock(&sfw_data.fw_lock);
+        cfs_spin_unlock(&sfw_data.fw_lock);
 
         cfs_list_for_each_entry_typed (tsc, &sfw_data.fw_tests,
                                        sfw_test_case_t, tsc_list) {
                 srpc_abort_service(tsc->tsc_srv_service);
         }
 
-        spin_lock(&sfw_data.fw_lock);
+        cfs_spin_lock(&sfw_data.fw_lock);
 
         cfs_list_for_each_entry_typed (tsb, &sn->sn_batches,
                                        sfw_batch_t, bat_list) {
@@ -246,12 +246,12 @@ sfw_deactivate_session (void)
         if (nactive != 0)
                 return;   /* wait for active batches to stop */
 
-        list_del_init(&sn->sn_list);
-        spin_unlock(&sfw_data.fw_lock);
+        cfs_list_del_init(&sn->sn_list);
+        cfs_spin_unlock(&sfw_data.fw_lock);
 
         sfw_destroy_session(sn);
 
-        spin_lock(&sfw_data.fw_lock);
+        cfs_spin_lock(&sfw_data.fw_lock);
         return;
 }
 
@@ -270,7 +270,7 @@ sfw_session_expired (void *data)
 {
         sfw_session_t *sn = data;
 
-        spin_lock(&sfw_data.fw_lock);
+        cfs_spin_lock(&sfw_data.fw_lock);
 
         LASSERT (sn->sn_timer_active);
         LASSERT (sn == sfw_data.fw_session);
@@ -282,7 +282,7 @@ sfw_session_expired (void *data)
         sn->sn_timer_active = 0;
         sfw_deactivate_session();
 
-        spin_unlock(&sfw_data.fw_lock);
+        cfs_spin_unlock(&sfw_data.fw_lock);
         return;
 }
 
@@ -294,9 +294,9 @@ sfw_init_session (sfw_session_t *sn, lst_sid_t sid, const char *name)
         memset(sn, 0, sizeof(sfw_session_t));
         CFS_INIT_LIST_HEAD(&sn->sn_list);
         CFS_INIT_LIST_HEAD(&sn->sn_batches);
-        atomic_set(&sn->sn_refcount, 1);        /* +1 for caller */
-        atomic_set(&sn->sn_brw_errors, 0);
-        atomic_set(&sn->sn_ping_errors, 0);
+        cfs_atomic_set(&sn->sn_refcount, 1);        /* +1 for caller */
+        cfs_atomic_set(&sn->sn_brw_errors, 0);
+        cfs_atomic_set(&sn->sn_ping_errors, 0);
         strncpy(&sn->sn_name[0], name, LST_NAME_SIZE);
 
         sn->sn_timer_active = 0;
@@ -331,8 +331,8 @@ void
 sfw_client_rpc_fini (srpc_client_rpc_t *rpc)
 {
         LASSERT (rpc->crpc_bulk.bk_niov == 0);
-        LASSERT (list_empty(&rpc->crpc_list));
-        LASSERT (atomic_read(&rpc->crpc_refcount) == 0);
+        LASSERT (cfs_list_empty(&rpc->crpc_list));
+        LASSERT (cfs_atomic_read(&rpc->crpc_refcount) == 0);
 #ifndef __KERNEL__
         LASSERT (rpc->crpc_bulk.bk_pages == NULL);
 #endif
@@ -344,13 +344,13 @@ sfw_client_rpc_fini (srpc_client_rpc_t *rpc)
                 swi_state2str(rpc->crpc_wi.wi_state),
                 rpc->crpc_aborted, rpc->crpc_status);
 
-        spin_lock(&sfw_data.fw_lock);
+        cfs_spin_lock(&sfw_data.fw_lock);
 
         /* my callers must finish all RPCs before shutting me down */
         LASSERT (!sfw_data.fw_shuttingdown);
-        list_add(&rpc->crpc_list, &sfw_data.fw_zombie_rpcs);
+        cfs_list_add(&rpc->crpc_list, &sfw_data.fw_zombie_rpcs);
 
-        spin_unlock(&sfw_data.fw_lock);
+        cfs_spin_unlock(&sfw_data.fw_lock);
         return;
 }
 
@@ -390,10 +390,10 @@ sfw_bid2batch (lst_bid_t bid)
         bat->bat_error    = 0;
         bat->bat_session  = sn;
         bat->bat_id       = bid;
-        atomic_set(&bat->bat_nactive, 0);
+        cfs_atomic_set(&bat->bat_nactive, 0);
         CFS_INIT_LIST_HEAD(&bat->bat_tests);
 
-        list_add_tail(&bat->bat_list, &sn->sn_batches);
+        cfs_list_add_tail(&bat->bat_list, &sn->sn_batches);
         return bat;
 }
 
@@ -422,14 +422,14 @@ sfw_get_stats (srpc_stat_reqst_t *request, srpc_stat_reply_t *reply)
 
         srpc_get_counters(&reply->str_rpc);
 
-        cnt->brw_errors      = atomic_read(&sn->sn_brw_errors);
-        cnt->ping_errors     = atomic_read(&sn->sn_ping_errors);
-        cnt->zombie_sessions = atomic_read(&sfw_data.fw_nzombies);
+        cnt->brw_errors      = cfs_atomic_read(&sn->sn_brw_errors);
+        cnt->ping_errors     = cfs_atomic_read(&sn->sn_ping_errors);
+        cnt->zombie_sessions = cfs_atomic_read(&sfw_data.fw_nzombies);
 
         cnt->active_tests = cnt->active_batches = 0;
         cfs_list_for_each_entry_typed (bat, &sn->sn_batches,
                                        sfw_batch_t, bat_list) {
-                int n = atomic_read(&bat->bat_nactive);
+                int n = cfs_atomic_read(&bat->bat_nactive);
 
                 if (n > 0) {
                         cnt->active_batches++;
@@ -458,7 +458,7 @@ sfw_make_session (srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
                 reply->mksn_timeout = sn->sn_timeout;
 
                 if (sfw_sid_equal(request->mksn_sid, sn->sn_id)) {
-                        atomic_inc(&sn->sn_refcount);
+                        cfs_atomic_inc(&sn->sn_refcount);
                         return 0;
                 }
 
@@ -478,13 +478,13 @@ sfw_make_session (srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
 
         sfw_init_session(sn, request->mksn_sid, &request->mksn_name[0]);
 
-        spin_lock(&sfw_data.fw_lock);
+        cfs_spin_lock(&sfw_data.fw_lock);
 
         sfw_deactivate_session();
         LASSERT (sfw_data.fw_session == NULL);
         sfw_data.fw_session = sn;
 
-        spin_unlock(&sfw_data.fw_lock);
+        cfs_spin_unlock(&sfw_data.fw_lock);
 
         reply->mksn_status  = 0;
         reply->mksn_sid     = sn->sn_id;
@@ -509,14 +509,14 @@ sfw_remove_session (srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply)
                 return 0;
         }
 
-        if (!atomic_dec_and_test(&sn->sn_refcount)) {
+        if (!cfs_atomic_dec_and_test(&sn->sn_refcount)) {
                 reply->rmsn_status = 0;
                 return 0;
         }
 
-        spin_lock(&sfw_data.fw_lock);
+        cfs_spin_lock(&sfw_data.fw_lock);
         sfw_deactivate_session();
-        spin_unlock(&sfw_data.fw_lock);
+        cfs_spin_unlock(&sfw_data.fw_lock);
 
         reply->rmsn_status = 0;
         reply->rmsn_sid    = LST_INVALID_SID;
@@ -550,8 +550,8 @@ sfw_test_rpc_fini (srpc_client_rpc_t *rpc)
         sfw_test_instance_t *tsi = tsu->tsu_instance;
 
         /* Called with hold of tsi->tsi_lock */
-        LASSERT (list_empty(&rpc->crpc_list));
-        list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
+        LASSERT (cfs_list_empty(&rpc->crpc_list));
+        cfs_list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
 }
 
 int
@@ -606,20 +606,20 @@ sfw_destroy_test_instance (sfw_test_instance_t *tsi)
         tsi->tsi_ops->tso_fini(tsi);
 
         LASSERT (!tsi->tsi_stopping);
-        LASSERT (list_empty(&tsi->tsi_active_rpcs));
+        LASSERT (cfs_list_empty(&tsi->tsi_active_rpcs));
         LASSERT (!sfw_test_active(tsi));
 
-        while (!list_empty(&tsi->tsi_units)) {
-                tsu = list_entry(tsi->tsi_units.next,
-                                 sfw_test_unit_t, tsu_list);
-                list_del(&tsu->tsu_list);
+        while (!cfs_list_empty(&tsi->tsi_units)) {
+                tsu = cfs_list_entry(tsi->tsi_units.next,
+                                     sfw_test_unit_t, tsu_list);
+                cfs_list_del(&tsu->tsu_list);
                 LIBCFS_FREE(tsu, sizeof(*tsu));
         }
 
-        while (!list_empty(&tsi->tsi_free_rpcs)) {
-                rpc = list_entry(tsi->tsi_free_rpcs.next,
-                                 srpc_client_rpc_t, crpc_list);
-                list_del(&rpc->crpc_list);
+        while (!cfs_list_empty(&tsi->tsi_free_rpcs)) {
+                rpc = cfs_list_entry(tsi->tsi_free_rpcs.next,
+                                     srpc_client_rpc_t, crpc_list);
+                cfs_list_del(&rpc->crpc_list);
                 LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
         }
 
@@ -635,12 +635,12 @@ sfw_destroy_batch (sfw_batch_t *tsb)
         sfw_test_instance_t *tsi;
 
         LASSERT (!sfw_batch_active(tsb));
-        LASSERT (list_empty(&tsb->bat_list));
+        LASSERT (cfs_list_empty(&tsb->bat_list));
 
-        while (!list_empty(&tsb->bat_tests)) {
-                tsi = list_entry(tsb->bat_tests.next,
-                                 sfw_test_instance_t, tsi_list);
-                list_del_init(&tsi->tsi_list);
+        while (!cfs_list_empty(&tsb->bat_tests)) {
+                tsi = cfs_list_entry(tsb->bat_tests.next,
+                                     sfw_test_instance_t, tsi_list);
+                cfs_list_del_init(&tsi->tsi_list);
                 sfw_destroy_test_instance(tsi);
         }
 
@@ -653,18 +653,18 @@ sfw_destroy_session (sfw_session_t *sn)
 {
         sfw_batch_t *batch;
 
-        LASSERT (list_empty(&sn->sn_list));
+        LASSERT (cfs_list_empty(&sn->sn_list));
         LASSERT (sn != sfw_data.fw_session);
 
-        while (!list_empty(&sn->sn_batches)) {
-                batch = list_entry(sn->sn_batches.next,
-                                   sfw_batch_t, bat_list);
-                list_del_init(&batch->bat_list);
+        while (!cfs_list_empty(&sn->sn_batches)) {
+                batch = cfs_list_entry(sn->sn_batches.next,
+                                       sfw_batch_t, bat_list);
+                cfs_list_del_init(&batch->bat_list);
                 sfw_destroy_batch(batch);
         }
 
         LIBCFS_FREE(sn, sizeof(*sn));
-        atomic_dec(&sfw_data.fw_nzombies);
+        cfs_atomic_dec(&sfw_data.fw_nzombies);
         return;
 }
 
@@ -722,8 +722,8 @@ sfw_add_test_instance (sfw_batch_t *tsb, srpc_server_rpc_t *rpc)
         }
 
         memset(tsi, 0, sizeof(*tsi));
-        spin_lock_init(&tsi->tsi_lock);
-        atomic_set(&tsi->tsi_nactive, 0);
+        cfs_spin_lock_init(&tsi->tsi_lock);
+        cfs_atomic_set(&tsi->tsi_nactive, 0);
         CFS_INIT_LIST_HEAD(&tsi->tsi_units);
         CFS_INIT_LIST_HEAD(&tsi->tsi_free_rpcs);
         CFS_INIT_LIST_HEAD(&tsi->tsi_active_rpcs);
@@ -746,7 +746,7 @@ sfw_add_test_instance (sfw_batch_t *tsb, srpc_server_rpc_t *rpc)
 
         if (!tsi->tsi_is_client) {
                 /* it's test server, just add it to tsb */
-                list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
+                cfs_list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
                 return 0;
         }
 
@@ -787,13 +787,13 @@ sfw_add_test_instance (sfw_batch_t *tsb, srpc_server_rpc_t *rpc)
                         tsu->tsu_dest     = id;
                         tsu->tsu_instance = tsi;
                         tsu->tsu_private  = NULL;
-                        list_add_tail(&tsu->tsu_list, &tsi->tsi_units);
+                        cfs_list_add_tail(&tsu->tsu_list, &tsi->tsi_units);
                 }
         }
 
         rc = tsi->tsi_ops->tso_init(tsi);
         if (rc == 0) {
-                list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
+                cfs_list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
                 return 0;
         }
 
@@ -812,36 +812,36 @@ sfw_test_unit_done (sfw_test_unit_t *tsu)
 
         LASSERT (sfw_test_active(tsi));
 
-        if (!atomic_dec_and_test(&tsi->tsi_nactive))
+        if (!cfs_atomic_dec_and_test(&tsi->tsi_nactive))
                 return;
-        
+
         /* the test instance is done */
-        spin_lock(&tsi->tsi_lock);
+        cfs_spin_lock(&tsi->tsi_lock);
 
         tsi->tsi_stopping = 0;
 
-        spin_unlock(&tsi->tsi_lock);
+        cfs_spin_unlock(&tsi->tsi_lock);
 
-        spin_lock(&sfw_data.fw_lock);
+        cfs_spin_lock(&sfw_data.fw_lock);
 
-        if (!atomic_dec_and_test(&tsb->bat_nactive) || /* tsb still active */
+        if (!cfs_atomic_dec_and_test(&tsb->bat_nactive) ||/* tsb still active */
             sn == sfw_data.fw_session) {               /* sn also active */
-                spin_unlock(&sfw_data.fw_lock);
+                cfs_spin_unlock(&sfw_data.fw_lock);
                 return;
         }
-        
-        LASSERT (!list_empty(&sn->sn_list)); /* I'm a zombie! */
+
+        LASSERT (!cfs_list_empty(&sn->sn_list)); /* I'm a zombie! */
 
         cfs_list_for_each_entry_typed (tsb, &sn->sn_batches,
                                        sfw_batch_t, bat_list) {
                 if (sfw_batch_active(tsb)) {
-                        spin_unlock(&sfw_data.fw_lock);
+                        cfs_spin_unlock(&sfw_data.fw_lock);
                         return;
                 }
         }
 
-        list_del_init(&sn->sn_list);
-        spin_unlock(&sfw_data.fw_lock);
+        cfs_list_del_init(&sn->sn_list);
+        cfs_spin_unlock(&sfw_data.fw_lock);
 
         sfw_destroy_session(sn);
         return;
@@ -856,12 +856,12 @@ sfw_test_rpc_done (srpc_client_rpc_t *rpc)
 
         tsi->tsi_ops->tso_done_rpc(tsu, rpc);
                       
-        spin_lock(&tsi->tsi_lock);
+        cfs_spin_lock(&tsi->tsi_lock);
 
         LASSERT (sfw_test_active(tsi));
-        LASSERT (!list_empty(&rpc->crpc_list));
+        LASSERT (!cfs_list_empty(&rpc->crpc_list));
 
-        list_del_init(&rpc->crpc_list);
+        cfs_list_del_init(&rpc->crpc_list);
 
         /* batch is stopping or loop is done or get error */
         if (tsi->tsi_stopping ||
@@ -872,7 +872,7 @@ sfw_test_rpc_done (srpc_client_rpc_t *rpc)
         /* dec ref for poster */
         srpc_client_rpc_decref(rpc);
 
-        spin_unlock(&tsi->tsi_lock);
+        cfs_spin_unlock(&tsi->tsi_lock);
 
         if (!done) {
                 swi_schedule_workitem(&tsu->tsu_worker);
@@ -889,24 +889,24 @@ sfw_create_test_rpc (sfw_test_unit_t *tsu, lnet_process_id_t peer,
 {
         srpc_client_rpc_t   *rpc = NULL;
         sfw_test_instance_t *tsi = tsu->tsu_instance;
-        
-        spin_lock(&tsi->tsi_lock);
+
+        cfs_spin_lock(&tsi->tsi_lock);
 
         LASSERT (sfw_test_active(tsi));
 
-        if (!list_empty(&tsi->tsi_free_rpcs)) {
+        if (!cfs_list_empty(&tsi->tsi_free_rpcs)) {
                 /* pick request from buffer */
-                rpc = list_entry(tsi->tsi_free_rpcs.next,
-                                 srpc_client_rpc_t, crpc_list);
+                rpc = cfs_list_entry(tsi->tsi_free_rpcs.next,
+                                     srpc_client_rpc_t, crpc_list);
                 LASSERT (nblk == rpc->crpc_bulk.bk_niov);
-                list_del_init(&rpc->crpc_list);
+                cfs_list_del_init(&rpc->crpc_list);
 
                 srpc_init_client_rpc(rpc, peer, tsi->tsi_service, nblk,
                                      blklen, sfw_test_rpc_done,
                                      sfw_test_rpc_fini, tsu);
         }
 
-        spin_unlock(&tsi->tsi_lock);
+        cfs_spin_unlock(&tsi->tsi_lock);
         
         if (rpc == NULL)
                 rpc = srpc_create_client_rpc(peer, tsi->tsi_service, nblk,
@@ -937,25 +937,25 @@ sfw_run_test (swi_workitem_t *wi)
 
         LASSERT (rpc != NULL);
 
-        spin_lock(&tsi->tsi_lock);
+        cfs_spin_lock(&tsi->tsi_lock);
 
         if (tsi->tsi_stopping) {
-                list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
-                spin_unlock(&tsi->tsi_lock);
+                cfs_list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
+                cfs_spin_unlock(&tsi->tsi_lock);
                 goto test_done;
         }
 
         if (tsu->tsu_loop > 0)
                 tsu->tsu_loop--;
 
-        list_add_tail(&rpc->crpc_list, &tsi->tsi_active_rpcs);
-        spin_unlock(&tsi->tsi_lock);
+        cfs_list_add_tail(&rpc->crpc_list, &tsi->tsi_active_rpcs);
+        cfs_spin_unlock(&tsi->tsi_lock);
 
         rpc->crpc_timeout = rpc_timeout;
 
-        spin_lock(&rpc->crpc_lock);
+        cfs_spin_lock(&rpc->crpc_lock);
         srpc_post_rpc(rpc);
-        spin_unlock(&rpc->crpc_lock);
+        cfs_spin_unlock(&rpc->crpc_lock);
         return 0;
 
 test_done:
@@ -980,7 +980,7 @@ sfw_run_batch (sfw_batch_t *tsb)
 
         if (sfw_batch_active(tsb)) {
                 CDEBUG(D_NET, "Batch already active: "LPU64" (%d)\n",
-                       tsb->bat_id.bat_id, atomic_read(&tsb->bat_nactive));
+                       tsb->bat_id.bat_id, cfs_atomic_read(&tsb->bat_nactive));
                 return 0;
         }
 
@@ -992,11 +992,11 @@ sfw_run_batch (sfw_batch_t *tsb)
                 LASSERT (!tsi->tsi_stopping);
                 LASSERT (!sfw_test_active(tsi));
 
-                atomic_inc(&tsb->bat_nactive);
+                cfs_atomic_inc(&tsb->bat_nactive);
 
                 cfs_list_for_each_entry_typed (tsu, &tsi->tsi_units,
                                                sfw_test_unit_t, tsu_list) {
-                        atomic_inc(&tsi->tsi_nactive);
+                        cfs_atomic_inc(&tsi->tsi_nactive);
                         tsu->tsu_loop = tsi->tsi_loop;
                         wi = &tsu->tsu_worker;
                         swi_init_workitem(wi, tsu, sfw_run_test);
@@ -1020,32 +1020,32 @@ sfw_stop_batch (sfw_batch_t *tsb, int force)
 
         cfs_list_for_each_entry_typed (tsi, &tsb->bat_tests,
                                        sfw_test_instance_t, tsi_list) {
-                spin_lock(&tsi->tsi_lock);
+                cfs_spin_lock(&tsi->tsi_lock);
 
                 if (!tsi->tsi_is_client ||
                     !sfw_test_active(tsi) || tsi->tsi_stopping) {
-                        spin_unlock(&tsi->tsi_lock);
+                        cfs_spin_unlock(&tsi->tsi_lock);
                         continue;
                 }
 
                 tsi->tsi_stopping = 1;
 
                 if (!force) {
-                        spin_unlock(&tsi->tsi_lock);
+                        cfs_spin_unlock(&tsi->tsi_lock);
                         continue;
                 }
 
                 /* abort launched rpcs in the test */
                 cfs_list_for_each_entry_typed (rpc, &tsi->tsi_active_rpcs,
                                                srpc_client_rpc_t, crpc_list) {
-                        spin_lock(&rpc->crpc_lock);
+                        cfs_spin_lock(&rpc->crpc_lock);
 
                         srpc_abort_rpc(rpc, -EINTR);
 
-                        spin_unlock(&rpc->crpc_lock);
+                        cfs_spin_unlock(&rpc->crpc_lock);
                 }
 
-                spin_unlock(&tsi->tsi_lock);
+                cfs_spin_unlock(&tsi->tsi_lock);
         }
 
         return 0;
@@ -1060,7 +1060,7 @@ sfw_query_batch (sfw_batch_t *tsb, int testidx, srpc_batch_reply_t *reply)
                 return -EINVAL;
 
         if (testidx == 0) {
-                reply->bar_active = atomic_read(&tsb->bat_nactive);
+                reply->bar_active = cfs_atomic_read(&tsb->bat_nactive);
                 return 0;
         }
 
@@ -1069,7 +1069,7 @@ sfw_query_batch (sfw_batch_t *tsb, int testidx, srpc_batch_reply_t *reply)
                 if (testidx-- > 1)
                         continue;
 
-                reply->bar_active = atomic_read(&tsi->tsi_nactive);
+                reply->bar_active = cfs_atomic_read(&tsi->tsi_nactive);
                 return 0;
         }
 
@@ -1207,10 +1207,10 @@ sfw_handle_server_rpc (srpc_server_rpc_t *rpc)
         LASSERT (sfw_data.fw_active_srpc == NULL);
         LASSERT (sv->sv_id <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
 
-        spin_lock(&sfw_data.fw_lock);
+        cfs_spin_lock(&sfw_data.fw_lock);
 
         if (sfw_data.fw_shuttingdown) {
-                spin_unlock(&sfw_data.fw_lock);
+                cfs_spin_unlock(&sfw_data.fw_lock);
                 return -ESHUTDOWN;
         }
 
@@ -1218,12 +1218,12 @@ sfw_handle_server_rpc (srpc_server_rpc_t *rpc)
         if (sfw_del_session_timer() != 0) {
                 CERROR ("Dropping RPC (%s) from %s: racing with expiry timer.",
                         sv->sv_name, libcfs_id2str(rpc->srpc_peer));
-                spin_unlock(&sfw_data.fw_lock);
+                cfs_spin_unlock(&sfw_data.fw_lock);
                 return -EAGAIN;
         }
 
         sfw_data.fw_active_srpc = rpc;
-        spin_unlock(&sfw_data.fw_lock);
+        cfs_spin_unlock(&sfw_data.fw_lock);
 
         sfw_unpack_message(request);
         LASSERT (request->msg_type == srpc_service2request(sv->sv_id));
@@ -1262,7 +1262,7 @@ sfw_handle_server_rpc (srpc_server_rpc_t *rpc)
         }
 
         rpc->srpc_done = sfw_server_rpc_done;
-        spin_lock(&sfw_data.fw_lock);
+        cfs_spin_lock(&sfw_data.fw_lock);
 
 #ifdef __KERNEL__
         if (!sfw_data.fw_shuttingdown)
@@ -1273,7 +1273,7 @@ sfw_handle_server_rpc (srpc_server_rpc_t *rpc)
 #endif
 
         sfw_data.fw_active_srpc = NULL;
-        spin_unlock(&sfw_data.fw_lock);
+        cfs_spin_unlock(&sfw_data.fw_lock);
         return rc;
 }
 
@@ -1288,34 +1288,34 @@ sfw_bulk_ready (srpc_server_rpc_t *rpc, int status)
         LASSERT (sfw_data.fw_active_srpc == NULL);
         LASSERT (rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst.tsr_is_client);
 
-        spin_lock(&sfw_data.fw_lock);
+        cfs_spin_lock(&sfw_data.fw_lock);
 
         if (status != 0) {
                 CERROR ("Bulk transfer failed for RPC: "
                         "service %s, peer %s, status %d\n",
                         sv->sv_name, libcfs_id2str(rpc->srpc_peer), status);
-                spin_unlock(&sfw_data.fw_lock);
+                cfs_spin_unlock(&sfw_data.fw_lock);
                 return -EIO;
         }
 
         if (sfw_data.fw_shuttingdown) {
-                spin_unlock(&sfw_data.fw_lock);
+                cfs_spin_unlock(&sfw_data.fw_lock);
                 return -ESHUTDOWN;
         }
 
         if (sfw_del_session_timer() != 0) {
                 CERROR ("Dropping RPC (%s) from %s: racing with expiry timer",
                         sv->sv_name, libcfs_id2str(rpc->srpc_peer));
-                spin_unlock(&sfw_data.fw_lock);
+                cfs_spin_unlock(&sfw_data.fw_lock);
                 return -EAGAIN;
         }
 
         sfw_data.fw_active_srpc = rpc;
-        spin_unlock(&sfw_data.fw_lock);
+        cfs_spin_unlock(&sfw_data.fw_lock);
 
         rc = sfw_add_test(rpc);
 
-        spin_lock(&sfw_data.fw_lock);
+        cfs_spin_lock(&sfw_data.fw_lock);
 
 #ifdef __KERNEL__
         if (!sfw_data.fw_shuttingdown)
@@ -1326,7 +1326,7 @@ sfw_bulk_ready (srpc_server_rpc_t *rpc, int status)
 #endif
 
         sfw_data.fw_active_srpc = NULL;
-        spin_unlock(&sfw_data.fw_lock);
+        cfs_spin_unlock(&sfw_data.fw_lock);
         return rc;
 }
 
@@ -1337,23 +1337,23 @@ sfw_create_rpc (lnet_process_id_t peer, int service,
 {
         srpc_client_rpc_t *rpc;
 
-        spin_lock(&sfw_data.fw_lock);
+        cfs_spin_lock(&sfw_data.fw_lock);
 
         LASSERT (!sfw_data.fw_shuttingdown);
         LASSERT (service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
 
-        if (nbulkiov == 0 && !list_empty(&sfw_data.fw_zombie_rpcs)) {
-                rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
-                                 srpc_client_rpc_t, crpc_list);
-                list_del(&rpc->crpc_list);
-                spin_unlock(&sfw_data.fw_lock);
+        if (nbulkiov == 0 && !cfs_list_empty(&sfw_data.fw_zombie_rpcs)) {
+                rpc = cfs_list_entry(sfw_data.fw_zombie_rpcs.next,
+                                     srpc_client_rpc_t, crpc_list);
+                cfs_list_del(&rpc->crpc_list);
+                cfs_spin_unlock(&sfw_data.fw_lock);
 
                 srpc_init_client_rpc(rpc, peer, service, 0, 0,
                                      done, sfw_client_rpc_fini, priv);
                 return rpc;
         }
 
-        spin_unlock(&sfw_data.fw_lock);
+        cfs_spin_unlock(&sfw_data.fw_lock);
 
         rpc = srpc_create_client_rpc(peer, service, nbulkiov, bulklen, done,
                                      nbulkiov != 0 ? NULL : sfw_client_rpc_fini,
@@ -1509,29 +1509,29 @@ sfw_unpack_message (srpc_msg_t *msg)
 void
 sfw_abort_rpc (srpc_client_rpc_t *rpc)
 {
-        LASSERT (atomic_read(&rpc->crpc_refcount) > 0);
+        LASSERT (cfs_atomic_read(&rpc->crpc_refcount) > 0);
         LASSERT (rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
 
-        spin_lock(&rpc->crpc_lock);
+        cfs_spin_lock(&rpc->crpc_lock);
         srpc_abort_rpc(rpc, -EINTR);
-        spin_unlock(&rpc->crpc_lock);
+        cfs_spin_unlock(&rpc->crpc_lock);
         return;
 }
 
 void
 sfw_post_rpc (srpc_client_rpc_t *rpc)
 {
-        spin_lock(&rpc->crpc_lock);
+        cfs_spin_lock(&rpc->crpc_lock);
 
         LASSERT (!rpc->crpc_closed);
         LASSERT (!rpc->crpc_aborted);
-        LASSERT (list_empty(&rpc->crpc_list));
+        LASSERT (cfs_list_empty(&rpc->crpc_list));
         LASSERT (!sfw_data.fw_shuttingdown);
 
         rpc->crpc_timeout = rpc_timeout;
         srpc_post_rpc(rpc);
 
-        spin_unlock(&rpc->crpc_lock);
+        cfs_spin_unlock(&rpc->crpc_lock);
         return;
 }
 
@@ -1631,8 +1631,8 @@ sfw_startup (void)
 
         sfw_data.fw_session     = NULL;
         sfw_data.fw_active_srpc = NULL;
-        spin_lock_init(&sfw_data.fw_lock);
-        atomic_set(&sfw_data.fw_nzombies, 0);
+        cfs_spin_lock_init(&sfw_data.fw_lock);
+        cfs_atomic_set(&sfw_data.fw_nzombies, 0);
         CFS_INIT_LIST_HEAD(&sfw_data.fw_tests);
         CFS_INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
         CFS_INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
@@ -1681,7 +1681,7 @@ sfw_startup (void)
                 }
 
                 /* about to sfw_shutdown, no need to add buffer */
-                if (error) continue; 
+                if (error) continue;
 
                 rc = srpc_service_add_buffers(sv, SFW_POST_BUFFERS);
                 if (rc != SFW_POST_BUFFERS) {
@@ -1704,7 +1704,7 @@ sfw_shutdown (void)
         sfw_test_case_t *tsc;
         int              i;
 
-        spin_lock(&sfw_data.fw_lock);
+        cfs_spin_lock(&sfw_data.fw_lock);
 
         sfw_data.fw_shuttingdown = 1;
 #ifdef __KERNEL__
@@ -1719,12 +1719,12 @@ sfw_shutdown (void)
                                "waiting for session timer to explode.\n");
 
         sfw_deactivate_session();
-        lst_wait_until(atomic_read(&sfw_data.fw_nzombies) == 0,
+        lst_wait_until(cfs_atomic_read(&sfw_data.fw_nzombies) == 0,
                        sfw_data.fw_lock,
                        "waiting for %d zombie sessions to die.\n",
-                       atomic_read(&sfw_data.fw_nzombies));
+                       cfs_atomic_read(&sfw_data.fw_nzombies));
 
-        spin_unlock(&sfw_data.fw_lock);
+        cfs_spin_unlock(&sfw_data.fw_lock);
 
         for (i = 0; ; i++) {
                 sv = &sfw_services[i];
@@ -1742,12 +1742,12 @@ sfw_shutdown (void)
                 srpc_remove_service(sv);
         }
 
-        while (!list_empty(&sfw_data.fw_zombie_rpcs)) {
+        while (!cfs_list_empty(&sfw_data.fw_zombie_rpcs)) {
                 srpc_client_rpc_t *rpc;
 
-                rpc = list_entry(sfw_data.fw_zombie_rpcs.next, 
-                                 srpc_client_rpc_t, crpc_list);
-                list_del(&rpc->crpc_list);
+                rpc = cfs_list_entry(sfw_data.fw_zombie_rpcs.next, 
+                                     srpc_client_rpc_t, crpc_list);
+                cfs_list_del(&rpc->crpc_list);
 
                 LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
         }
@@ -1760,13 +1760,13 @@ sfw_shutdown (void)
                 srpc_wait_service_shutdown(sv);
         }
 
-        while (!list_empty(&sfw_data.fw_tests)) {
-                tsc = list_entry(sfw_data.fw_tests.next,
-                                 sfw_test_case_t, tsc_list);
-                
+        while (!cfs_list_empty(&sfw_data.fw_tests)) {
+                tsc = cfs_list_entry(sfw_data.fw_tests.next,
+                                     sfw_test_case_t, tsc_list);
+
                 srpc_wait_service_shutdown(tsc->tsc_srv_service);
 
-                list_del(&tsc->tsc_list);
+                cfs_list_del(&tsc->tsc_list);
                 LIBCFS_FREE(tsc, sizeof(*tsc));
         }
 
index 2dad588..1a668d8 100644 (file)
@@ -45,7 +45,7 @@
 #define LST_PING_TEST_MAGIC     0xbabeface
 
 typedef struct {
-        spinlock_t      pnd_lock;       /* serialize */
+        cfs_spinlock_t  pnd_lock;       /* serialize */
         int             pnd_counter;    /* sequence counter */
 } lst_ping_data_t;
 
@@ -56,7 +56,7 @@ ping_client_init(sfw_test_instance_t *tsi)
 {
         LASSERT (tsi->tsi_is_client);
 
-        spin_lock_init(&lst_ping_data.pnd_lock);
+        cfs_spin_lock_init(&lst_ping_data.pnd_lock);
         lst_ping_data.pnd_counter = 0;
 
         return 0;
@@ -71,7 +71,7 @@ ping_client_fini (sfw_test_instance_t *tsi)
         LASSERT (sn != NULL);
         LASSERT (tsi->tsi_is_client);
 
-        errors = atomic_read(&sn->sn_ping_errors);
+        errors = cfs_atomic_read(&sn->sn_ping_errors);
         if (errors)
                 CWARN ("%d pings have failed.\n", errors);
         else
@@ -94,9 +94,9 @@ ping_client_prep_rpc(sfw_test_unit_t *tsu,
 
         req->pnr_magic = LST_PING_TEST_MAGIC;
 
-        spin_lock(&lst_ping_data.pnd_lock);
+        cfs_spin_lock(&lst_ping_data.pnd_lock);
         req->pnr_seq = lst_ping_data.pnd_counter ++;
-        spin_unlock(&lst_ping_data.pnd_lock);
+        cfs_spin_unlock(&lst_ping_data.pnd_lock);
 
         cfs_fs_timeval(&tv);
         req->pnr_time_sec  = tv.tv_sec;
@@ -118,7 +118,7 @@ ping_client_done_rpc (sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
 
         if (rpc->crpc_status != 0) {
                 if (!tsi->tsi_stopping) /* rpc could have been aborted */
-                        atomic_inc(&sn->sn_ping_errors);
+                        cfs_atomic_inc(&sn->sn_ping_errors);
                 CERROR ("Unable to ping %s (%d): %d\n",
                         libcfs_id2str(rpc->crpc_dest),
                         reqst->pnr_seq, rpc->crpc_status);
@@ -133,7 +133,7 @@ ping_client_done_rpc (sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
         
         if (reply->pnr_magic != LST_PING_TEST_MAGIC) {
                 rpc->crpc_status = -EBADMSG;
-                atomic_inc(&sn->sn_ping_errors);
+                cfs_atomic_inc(&sn->sn_ping_errors);
                 CERROR ("Bad magic %u from %s, %u expected.\n",
                         reply->pnr_magic, libcfs_id2str(rpc->crpc_dest),
                         LST_PING_TEST_MAGIC);
@@ -142,7 +142,7 @@ ping_client_done_rpc (sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
         
         if (reply->pnr_seq != reqst->pnr_seq) {
                 rpc->crpc_status = -EBADMSG;
-                atomic_inc(&sn->sn_ping_errors);
+                cfs_atomic_inc(&sn->sn_ping_errors);
                 CERROR ("Bad seq %u from %s, %u expected.\n",
                         reply->pnr_seq, libcfs_id2str(rpc->crpc_dest),
                         reqst->pnr_seq);
index 49f483f..abaed8a 100644 (file)
@@ -53,7 +53,7 @@ typedef enum {
 } srpc_state_t;
 
 struct smoketest_rpc {
-        spinlock_t        rpc_glock;     /* global lock */
+        cfs_spinlock_t    rpc_glock;     /* global lock */
         srpc_service_t   *rpc_services[SRPC_SERVICE_MAX_ID + 1];
         lnet_handle_eq_t  rpc_lnet_eq;   /* _the_ LNet event queue */
         srpc_state_t      rpc_state;
@@ -66,16 +66,16 @@ int srpc_handle_rpc (swi_workitem_t *wi);
 
 void srpc_get_counters (srpc_counters_t *cnt)
 {
-        spin_lock(&srpc_data.rpc_glock);
+        cfs_spin_lock(&srpc_data.rpc_glock);
         *cnt = srpc_data.rpc_counters;
-        spin_unlock(&srpc_data.rpc_glock);
+        cfs_spin_unlock(&srpc_data.rpc_glock);
 }
 
 void srpc_set_counters (const srpc_counters_t *cnt)
 {
-        spin_lock(&srpc_data.rpc_glock);
+        cfs_spin_lock(&srpc_data.rpc_glock);
         srpc_data.rpc_counters = *cnt;
-        spin_unlock(&srpc_data.rpc_glock);
+        cfs_spin_unlock(&srpc_data.rpc_glock);
 }
 
 void
@@ -179,9 +179,9 @@ srpc_next_id (void)
 {
         __u64 id;
 
-        spin_lock(&srpc_data.rpc_glock);
+        cfs_spin_lock(&srpc_data.rpc_glock);
         id = srpc_data.rpc_matchbits++;
-        spin_unlock(&srpc_data.rpc_glock);
+        cfs_spin_unlock(&srpc_data.rpc_glock);
         return id;
 }
 
@@ -211,22 +211,22 @@ srpc_add_service (srpc_service_t *sv)
         LASSERT (sv->sv_concur > 0);
         LASSERT (0 <= id && id <= SRPC_SERVICE_MAX_ID);
 
-        spin_lock(&srpc_data.rpc_glock);
+        cfs_spin_lock(&srpc_data.rpc_glock);
 
         LASSERT (srpc_data.rpc_state == SRPC_STATE_RUNNING);
 
         if (srpc_data.rpc_services[id] != NULL) {
-                spin_unlock(&srpc_data.rpc_glock);
+                cfs_spin_unlock(&srpc_data.rpc_glock);
                 return -EBUSY;
         }
 
         srpc_data.rpc_services[id] = sv;
-        spin_unlock(&srpc_data.rpc_glock);
+        cfs_spin_unlock(&srpc_data.rpc_glock);
 
         sv->sv_nprune       = 0;
         sv->sv_nposted_msg  = 0;
         sv->sv_shuttingdown = 0;
-        spin_lock_init(&sv->sv_lock);
+        cfs_spin_lock_init(&sv->sv_lock);
         CFS_INIT_LIST_HEAD(&sv->sv_free_rpcq);
         CFS_INIT_LIST_HEAD(&sv->sv_active_rpcq);
         CFS_INIT_LIST_HEAD(&sv->sv_posted_msgq);
@@ -239,7 +239,7 @@ srpc_add_service (srpc_service_t *sv)
                 LIBCFS_ALLOC(rpc, sizeof(*rpc));
                 if (rpc == NULL) goto enomem;
 
-                list_add(&rpc->srpc_list, &sv->sv_free_rpcq);
+                cfs_list_add(&rpc->srpc_list, &sv->sv_free_rpcq);
         }
 
         CDEBUG (D_NET, "Adding service: id %d, name %s, concurrency %d\n",
@@ -247,16 +247,16 @@ srpc_add_service (srpc_service_t *sv)
         return 0;
 
 enomem:
-        while (!list_empty(&sv->sv_free_rpcq)) {
-                rpc = list_entry(sv->sv_free_rpcq.next,
-                                 srpc_server_rpc_t, srpc_list);
-                list_del(&rpc->srpc_list);
+        while (!cfs_list_empty(&sv->sv_free_rpcq)) {
+                rpc = cfs_list_entry(sv->sv_free_rpcq.next,
+                                     srpc_server_rpc_t, srpc_list);
+                cfs_list_del(&rpc->srpc_list);
                 LIBCFS_FREE(rpc, sizeof(*rpc));
         }
 
-        spin_lock(&srpc_data.rpc_glock);
+        cfs_spin_lock(&srpc_data.rpc_glock);
         srpc_data.rpc_services[id] = NULL;
-        spin_unlock(&srpc_data.rpc_glock);
+        cfs_spin_unlock(&srpc_data.rpc_glock);
         return -ENOMEM;
 }
 
@@ -265,15 +265,15 @@ srpc_remove_service (srpc_service_t *sv)
 {
         int id = sv->sv_id;
 
-        spin_lock(&srpc_data.rpc_glock);
+        cfs_spin_lock(&srpc_data.rpc_glock);
 
         if (srpc_data.rpc_services[id] != sv) {
-                spin_unlock(&srpc_data.rpc_glock);
+                cfs_spin_unlock(&srpc_data.rpc_glock);
                 return -ENOENT;
         }
 
         srpc_data.rpc_services[id] = NULL;
-        spin_unlock(&srpc_data.rpc_glock);
+        cfs_spin_unlock(&srpc_data.rpc_glock);
         return 0;
 }
 
@@ -417,9 +417,9 @@ srpc_service_post_buffer (srpc_service_t *sv, srpc_buffer_t *buf)
         LASSERT (!sv->sv_shuttingdown);
 
         LNetInvalidateHandle(&buf->buf_mdh);
-        list_add(&buf->buf_list, &sv->sv_posted_msgq);
+        cfs_list_add(&buf->buf_list, &sv->sv_posted_msgq);
         sv->sv_nposted_msg++;
-        spin_unlock(&sv->sv_lock);
+        cfs_spin_unlock(&sv->sv_lock);
 
         rc = srpc_post_passive_rqtbuf(sv->sv_id, msg, sizeof(*msg),
                                       &buf->buf_mdh, &sv->sv_ev);
@@ -428,17 +428,17 @@ srpc_service_post_buffer (srpc_service_t *sv, srpc_buffer_t *buf)
          * msg and its event handler has been called. So we must add
          * buf to sv_posted_msgq _before_ dropping sv_lock */
 
-        spin_lock(&sv->sv_lock);
+        cfs_spin_lock(&sv->sv_lock);
 
         if (rc == 0) {
                 if (sv->sv_shuttingdown) {
-                        spin_unlock(&sv->sv_lock);
+                        cfs_spin_unlock(&sv->sv_lock);
 
                         /* srpc_shutdown_service might have tried to unlink me
                          * when my buf_mdh was still invalid */
                         LNetMDUnlink(buf->buf_mdh);
 
-                        spin_lock(&sv->sv_lock);
+                        cfs_spin_lock(&sv->sv_lock);
                 }
                 return 0;
         }
@@ -446,11 +446,11 @@ srpc_service_post_buffer (srpc_service_t *sv, srpc_buffer_t *buf)
         sv->sv_nposted_msg--;
         if (sv->sv_shuttingdown) return rc;
 
-        list_del(&buf->buf_list);
+        cfs_list_del(&buf->buf_list);
 
-        spin_unlock(&sv->sv_lock);
+        cfs_spin_unlock(&sv->sv_lock);
         LIBCFS_FREE(buf, sizeof(*buf));
-        spin_lock(&sv->sv_lock);
+        cfs_spin_lock(&sv->sv_lock);
         return rc;
 }
 
@@ -468,9 +468,9 @@ srpc_service_add_buffers (srpc_service_t *sv, int nbuffer)
                 LIBCFS_ALLOC(buf, sizeof(*buf));
                 if (buf == NULL) break;
 
-                spin_lock(&sv->sv_lock);
+                cfs_spin_lock(&sv->sv_lock);
                 rc = srpc_service_post_buffer(sv, buf);
-                spin_unlock(&sv->sv_lock);
+                cfs_spin_unlock(&sv->sv_lock);
 
                 if (rc != 0) break;
         }
@@ -484,14 +484,14 @@ srpc_service_remove_buffers (srpc_service_t *sv, int nbuffer)
         LASSERTF (nbuffer > 0,
                   "nbuffer must be positive: %d\n", nbuffer);
 
-        spin_lock(&sv->sv_lock);
+        cfs_spin_lock(&sv->sv_lock);
 
         LASSERT (sv->sv_nprune >= 0);
         LASSERT (!sv->sv_shuttingdown);
 
         sv->sv_nprune += nbuffer;
 
-        spin_unlock(&sv->sv_lock);
+        cfs_spin_unlock(&sv->sv_lock);
         return;
 }
 
@@ -502,19 +502,19 @@ srpc_finish_service (srpc_service_t *sv)
         srpc_server_rpc_t *rpc;
         srpc_buffer_t     *buf;
 
-        spin_lock(&sv->sv_lock);
+        cfs_spin_lock(&sv->sv_lock);
 
         LASSERT (sv->sv_shuttingdown); /* srpc_shutdown_service called */
 
-        if (sv->sv_nposted_msg != 0 || !list_empty(&sv->sv_active_rpcq)) {
+        if (sv->sv_nposted_msg != 0 || !cfs_list_empty(&sv->sv_active_rpcq)) {
                 CDEBUG (D_NET,
                         "waiting for %d posted buffers to unlink and "
                         "in-flight RPCs to die.\n",
                         sv->sv_nposted_msg);
 
-                if (!list_empty(&sv->sv_active_rpcq)) {
-                        rpc = list_entry(sv->sv_active_rpcq.next,
-                                         srpc_server_rpc_t, srpc_list);
+                if (!cfs_list_empty(&sv->sv_active_rpcq)) {
+                        rpc = cfs_list_entry(sv->sv_active_rpcq.next,
+                                             srpc_server_rpc_t, srpc_list);
                         CDEBUG (D_NETERROR,
                                 "Active RPC %p on shutdown: sv %s, peer %s, "
                                 "wi %s scheduled %d running %d, "
@@ -529,32 +529,32 @@ srpc_finish_service (srpc_service_t *sv)
                                 rpc->srpc_ev.ev_lnet);
                 }
 
-                spin_unlock(&sv->sv_lock);
+                cfs_spin_unlock(&sv->sv_lock);
                 return 0;
         }
 
-        spin_unlock(&sv->sv_lock); /* no lock needed from now on */
+        cfs_spin_unlock(&sv->sv_lock); /* no lock needed from now on */
 
         for (;;) {
-                struct list_head *q;
+                cfs_list_t *q;
 
-                if (!list_empty(&sv->sv_posted_msgq))
+                if (!cfs_list_empty(&sv->sv_posted_msgq))
                         q = &sv->sv_posted_msgq;
-                else if (!list_empty(&sv->sv_blocked_msgq))
+                else if (!cfs_list_empty(&sv->sv_blocked_msgq))
                         q = &sv->sv_blocked_msgq;
                 else
                         break;
 
-                buf = list_entry(q->next, srpc_buffer_t, buf_list);
-                list_del(&buf->buf_list);
+                buf = cfs_list_entry(q->next, srpc_buffer_t, buf_list);
+                cfs_list_del(&buf->buf_list);
 
                 LIBCFS_FREE(buf, sizeof(*buf));
         }
 
-        while (!list_empty(&sv->sv_free_rpcq)) {
-                rpc = list_entry(sv->sv_free_rpcq.next,
-                                 srpc_server_rpc_t, srpc_list);
-                list_del(&rpc->srpc_list);
+        while (!cfs_list_empty(&sv->sv_free_rpcq)) {
+                rpc = cfs_list_entry(sv->sv_free_rpcq.next,
+                                     srpc_server_rpc_t, srpc_list);
+                cfs_list_del(&rpc->srpc_list);
                 LIBCFS_FREE(rpc, sizeof(*rpc));
         }
 
@@ -575,9 +575,9 @@ srpc_service_recycle_buffer (srpc_service_t *sv, srpc_buffer_t *buf)
 
         sv->sv_nprune--;
 free:
-        spin_unlock(&sv->sv_lock);
+        cfs_spin_unlock(&sv->sv_lock);
         LIBCFS_FREE(buf, sizeof(*buf));
-        spin_lock(&sv->sv_lock);
+        cfs_spin_lock(&sv->sv_lock);
 }
 
 /* called with srpc_service_t::sv_lock held */
@@ -599,7 +599,7 @@ srpc_abort_service (srpc_service_t *sv)
 {
         srpc_server_rpc_t *rpc;
 
-        spin_lock(&sv->sv_lock);
+        cfs_spin_lock(&sv->sv_lock);
 
         CDEBUG(D_NET, "Aborting service: id %d, name %s\n",
                sv->sv_id, sv->sv_name);
@@ -607,12 +607,12 @@ srpc_abort_service (srpc_service_t *sv)
         /* schedule in-flight RPCs to notice the abort, NB:
          * racing with incoming RPCs; complete fix should make test
          * RPCs carry session ID in its headers */
-        list_for_each_entry (rpc, &sv->sv_active_rpcq, srpc_list) {
+        cfs_list_for_each_entry (rpc, &sv->sv_active_rpcq, srpc_list) {
                 rpc->srpc_aborted = 1;
                 srpc_schedule_server_rpc(rpc);
         }
 
-        spin_unlock(&sv->sv_lock);
+        cfs_spin_unlock(&sv->sv_lock);
         return;
 }
 
@@ -622,7 +622,7 @@ srpc_shutdown_service (srpc_service_t *sv)
         srpc_server_rpc_t *rpc;
         srpc_buffer_t     *buf;
 
-        spin_lock(&sv->sv_lock);
+        cfs_spin_lock(&sv->sv_lock);
 
         CDEBUG(D_NET, "Shutting down service: id %d, name %s\n",
                sv->sv_id, sv->sv_name);
@@ -635,7 +635,7 @@ srpc_shutdown_service (srpc_service_t *sv)
                 srpc_schedule_server_rpc(rpc);
         }
 
-        spin_unlock(&sv->sv_lock);
+        cfs_spin_unlock(&sv->sv_lock);
 
         /* OK to traverse sv_posted_msgq without lock, since no one
          * touches sv_posted_msgq now */
@@ -774,16 +774,16 @@ srpc_server_rpc_done (srpc_server_rpc_t *rpc, int status)
                 swi_state2str(rpc->srpc_wi.wi_state), status);
 
         if (status != 0) {
-                spin_lock(&srpc_data.rpc_glock);
+                cfs_spin_lock(&srpc_data.rpc_glock);
                 srpc_data.rpc_counters.rpcs_dropped++;
-                spin_unlock(&srpc_data.rpc_glock);
+                cfs_spin_unlock(&srpc_data.rpc_glock);
         }
 
         if (rpc->srpc_done != NULL)
                 (*rpc->srpc_done) (rpc);
         LASSERT (rpc->srpc_bulk == NULL);
 
-        spin_lock(&sv->sv_lock);
+        cfs_spin_lock(&sv->sv_lock);
 
         if (rpc->srpc_reqstbuf != NULL) {
                 /* NB might drop sv_lock in srpc_service_recycle_buffer, but
@@ -792,7 +792,7 @@ srpc_server_rpc_done (srpc_server_rpc_t *rpc, int status)
                 rpc->srpc_reqstbuf = NULL;
         }
 
-        list_del(&rpc->srpc_list); /* from sv->sv_active_rpcq */
+        cfs_list_del(&rpc->srpc_list); /* from sv->sv_active_rpcq */
 
         /*
          * No one can schedule me now since:
@@ -803,19 +803,19 @@ srpc_server_rpc_done (srpc_server_rpc_t *rpc, int status)
         LASSERT (rpc->srpc_ev.ev_fired);
         swi_kill_workitem(&rpc->srpc_wi);
 
-        if (!sv->sv_shuttingdown && !list_empty(&sv->sv_blocked_msgq)) {
-                buffer = list_entry(sv->sv_blocked_msgq.next,
+        if (!sv->sv_shuttingdown && !cfs_list_empty(&sv->sv_blocked_msgq)) {
+                buffer = cfs_list_entry(sv->sv_blocked_msgq.next,
                                     srpc_buffer_t, buf_list);
-                list_del(&buffer->buf_list);
+                cfs_list_del(&buffer->buf_list);
 
                 srpc_init_server_rpc(rpc, sv, buffer);
-                list_add_tail(&rpc->srpc_list, &sv->sv_active_rpcq);
+                cfs_list_add_tail(&rpc->srpc_list, &sv->sv_active_rpcq);
                 srpc_schedule_server_rpc(rpc);
         } else {
-                list_add(&rpc->srpc_list, &sv->sv_free_rpcq);
+                cfs_list_add(&rpc->srpc_list, &sv->sv_free_rpcq);
         }
 
-        spin_unlock(&sv->sv_lock);
+        cfs_spin_unlock(&sv->sv_lock);
         return;
 }
 
@@ -830,10 +830,10 @@ srpc_handle_rpc (swi_workitem_t *wi)
 
         LASSERT (wi == &rpc->srpc_wi);
 
-        spin_lock(&sv->sv_lock);
+        cfs_spin_lock(&sv->sv_lock);
 
         if (sv->sv_shuttingdown || rpc->srpc_aborted) {
-                spin_unlock(&sv->sv_lock);
+                cfs_spin_unlock(&sv->sv_lock);
 
                 if (rpc->srpc_bulk != NULL)
                         LNetMDUnlink(rpc->srpc_bulk->bk_mdh);
@@ -846,7 +846,7 @@ srpc_handle_rpc (swi_workitem_t *wi)
                 return 0;
         }
 
-        spin_unlock(&sv->sv_lock);
+        cfs_spin_unlock(&sv->sv_lock);
 
         switch (wi->wi_state) {
         default:
@@ -937,16 +937,16 @@ srpc_client_rpc_expired (void *data)
                rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
                rpc->crpc_timeout);
 
-        spin_lock(&rpc->crpc_lock);
+        cfs_spin_lock(&rpc->crpc_lock);
 
         rpc->crpc_timeout = 0;
         srpc_abort_rpc(rpc, -ETIMEDOUT);
 
-        spin_unlock(&rpc->crpc_lock);
+        cfs_spin_unlock(&rpc->crpc_lock);
 
-        spin_lock(&srpc_data.rpc_glock);
+        cfs_spin_lock(&srpc_data.rpc_glock);
         srpc_data.rpc_counters.rpcs_expired++;
-        spin_unlock(&srpc_data.rpc_glock);
+        cfs_spin_unlock(&srpc_data.rpc_glock);
         return;
 }
 
@@ -983,11 +983,11 @@ srpc_del_client_rpc_timer (srpc_client_rpc_t *rpc)
 #ifdef __KERNEL__
         /* timer detonated, wait for it to explode */
         while (rpc->crpc_timeout != 0) {
-                spin_unlock(&rpc->crpc_lock);
+                cfs_spin_unlock(&rpc->crpc_lock);
 
                 cfs_schedule();
 
-                spin_lock(&rpc->crpc_lock);
+                cfs_spin_lock(&rpc->crpc_lock);
         }
 #else
         LBUG(); /* impossible in single-threaded runtime */
@@ -1002,7 +1002,7 @@ srpc_client_rpc_done (srpc_client_rpc_t *rpc, int status)
 
         LASSERT (status != 0 || wi->wi_state == SWI_STATE_DONE);
 
-        spin_lock(&rpc->crpc_lock);
+        cfs_spin_lock(&rpc->crpc_lock);
 
         rpc->crpc_closed = 1;
         if (rpc->crpc_status == 0)
@@ -1026,7 +1026,7 @@ srpc_client_rpc_done (srpc_client_rpc_t *rpc, int status)
         LASSERT (!srpc_event_pending(rpc));
         swi_kill_workitem(wi);
 
-        spin_unlock(&rpc->crpc_lock);
+        cfs_spin_unlock(&rpc->crpc_lock);
 
         (*rpc->crpc_done) (rpc);
         return;
@@ -1044,14 +1044,14 @@ srpc_send_rpc (swi_workitem_t *wi)
         LASSERT (rpc != NULL);
         LASSERT (wi == &rpc->crpc_wi);
 
-        spin_lock(&rpc->crpc_lock);
+        cfs_spin_lock(&rpc->crpc_lock);
 
         if (rpc->crpc_aborted) {
-                spin_unlock(&rpc->crpc_lock);
+                cfs_spin_unlock(&rpc->crpc_lock);
                 goto abort;
         }
 
-        spin_unlock(&rpc->crpc_lock);
+        cfs_spin_unlock(&rpc->crpc_lock);
 
         switch (wi->wi_state) {
         default:
@@ -1133,9 +1133,9 @@ srpc_send_rpc (swi_workitem_t *wi)
         }
 
         if (rc != 0) {
-                spin_lock(&rpc->crpc_lock);
+                cfs_spin_lock(&rpc->crpc_lock);
                 srpc_abort_rpc(rpc, rc);
-                spin_unlock(&rpc->crpc_lock);
+                cfs_spin_unlock(&rpc->crpc_lock);
         }
 
 abort:
@@ -1222,7 +1222,7 @@ srpc_send_reply (srpc_server_rpc_t *rpc)
         LASSERT (buffer != NULL);
         rpyid = buffer->buf_msg.msg_body.reqst.rpyid;
 
-        spin_lock(&sv->sv_lock);
+        cfs_spin_lock(&sv->sv_lock);
 
         if (!sv->sv_shuttingdown &&
             sv->sv_id > SRPC_FRAMEWORK_SERVICE_MAX_ID) {
@@ -1233,7 +1233,7 @@ srpc_send_reply (srpc_server_rpc_t *rpc)
                 rpc->srpc_reqstbuf = NULL;
         }
 
-        spin_unlock(&sv->sv_lock);
+        cfs_spin_unlock(&sv->sv_lock);
 
         ev->ev_fired = 0;
         ev->ev_data  = rpc;
@@ -1264,12 +1264,12 @@ srpc_lnet_ev_handler (lnet_event_t *ev)
         srpc_msg_t        *msg;
         srpc_msg_type_t    type;
 
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
 
         if (ev->status != 0) {
-                spin_lock(&srpc_data.rpc_glock);
+                cfs_spin_lock(&srpc_data.rpc_glock);
                 srpc_data.rpc_counters.errors++;
-                spin_unlock(&srpc_data.rpc_glock);
+                cfs_spin_unlock(&srpc_data.rpc_glock);
         }
 
         rpcev->ev_lnet = ev->type;
@@ -1281,9 +1281,9 @@ srpc_lnet_ev_handler (lnet_event_t *ev)
                 LBUG ();
         case SRPC_REQUEST_SENT:
                 if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
-                        spin_lock(&srpc_data.rpc_glock);
+                        cfs_spin_lock(&srpc_data.rpc_glock);
                         srpc_data.rpc_counters.rpcs_sent++;
-                        spin_unlock(&srpc_data.rpc_glock);
+                        cfs_spin_unlock(&srpc_data.rpc_glock);
                 }
         case SRPC_REPLY_RCVD:
         case SRPC_BULK_REQ_RCVD:
@@ -1300,7 +1300,7 @@ srpc_lnet_ev_handler (lnet_event_t *ev)
                         LBUG ();
                 }
 
-                spin_lock(&crpc->crpc_lock);
+                cfs_spin_lock(&crpc->crpc_lock);
 
                 LASSERT (rpcev->ev_fired == 0);
                 rpcev->ev_fired  = 1;
@@ -1308,7 +1308,7 @@ srpc_lnet_ev_handler (lnet_event_t *ev)
                                                 -EINTR : ev->status;
                 swi_schedule_workitem(&crpc->crpc_wi);
 
-                spin_unlock(&crpc->crpc_lock);
+                cfs_spin_unlock(&crpc->crpc_lock);
                 break;
 
         case SRPC_REQUEST_RCVD:
@@ -1316,7 +1316,7 @@ srpc_lnet_ev_handler (lnet_event_t *ev)
 
                 LASSERT (rpcev == &sv->sv_ev);
 
-                spin_lock(&sv->sv_lock);
+                cfs_spin_lock(&sv->sv_lock);
 
                 LASSERT (ev->unlinked);
                 LASSERT (ev->type == LNET_EVENT_PUT ||
@@ -1334,11 +1334,11 @@ srpc_lnet_ev_handler (lnet_event_t *ev)
                 if (sv->sv_shuttingdown) {
                         /* Leave buffer on sv->sv_posted_msgq since
                          * srpc_finish_service needs to traverse it. */
-                        spin_unlock(&sv->sv_lock);
+                        cfs_spin_unlock(&sv->sv_lock);
                         break;
                 }
 
-                list_del(&buffer->buf_list); /* from sv->sv_posted_msgq */
+                cfs_list_del(&buffer->buf_list); /* from sv->sv_posted_msgq */
                 msg = &buffer->buf_msg;
                 type = srpc_service2request(sv->sv_id);
 
@@ -1359,23 +1359,25 @@ srpc_lnet_ev_handler (lnet_event_t *ev)
                         msg->msg_magic = 0;
                 }
 
-                if (!list_empty(&sv->sv_free_rpcq)) {
-                        srpc = list_entry(sv->sv_free_rpcq.next,
-                                          srpc_server_rpc_t, srpc_list);
-                        list_del(&srpc->srpc_list);
+                if (!cfs_list_empty(&sv->sv_free_rpcq)) {
+                        srpc = cfs_list_entry(sv->sv_free_rpcq.next,
+                                              srpc_server_rpc_t, srpc_list);
+                        cfs_list_del(&srpc->srpc_list);
 
                         srpc_init_server_rpc(srpc, sv, buffer);
-                        list_add_tail(&srpc->srpc_list, &sv->sv_active_rpcq);
+                        cfs_list_add_tail(&srpc->srpc_list,
+                                          &sv->sv_active_rpcq);
                         srpc_schedule_server_rpc(srpc);
                 } else {
-                        list_add_tail(&buffer->buf_list, &sv->sv_blocked_msgq);
+                        cfs_list_add_tail(&buffer->buf_list,
+                                          &sv->sv_blocked_msgq);
                 }
 
-                spin_unlock(&sv->sv_lock);
+                cfs_spin_unlock(&sv->sv_lock);
 
-                spin_lock(&srpc_data.rpc_glock);
+                cfs_spin_lock(&srpc_data.rpc_glock);
                 srpc_data.rpc_counters.rpcs_rcvd++;
-                spin_unlock(&srpc_data.rpc_glock);
+                cfs_spin_unlock(&srpc_data.rpc_glock);
                 break;
 
         case SRPC_BULK_GET_RPLD:
@@ -1388,14 +1390,14 @@ srpc_lnet_ev_handler (lnet_event_t *ev)
 
         case SRPC_BULK_PUT_SENT:
                 if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
-                        spin_lock(&srpc_data.rpc_glock);
+                        cfs_spin_lock(&srpc_data.rpc_glock);
 
                         if (rpcev->ev_type == SRPC_BULK_GET_RPLD)
                                 srpc_data.rpc_counters.bulk_get += ev->mlength;
                         else
                                 srpc_data.rpc_counters.bulk_put += ev->mlength;
 
-                        spin_unlock(&srpc_data.rpc_glock);
+                        cfs_spin_unlock(&srpc_data.rpc_glock);
                 }
         case SRPC_REPLY_SENT:
                 srpc = rpcev->ev_data;
@@ -1403,14 +1405,14 @@ srpc_lnet_ev_handler (lnet_event_t *ev)
 
                 LASSERT (rpcev == &srpc->srpc_ev);
 
-                spin_lock(&sv->sv_lock);
+                cfs_spin_lock(&sv->sv_lock);
 
                 rpcev->ev_fired  = 1;
                 rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
                                                 -EINTR : ev->status;
                 srpc_schedule_server_rpc(srpc);
 
-                spin_unlock(&sv->sv_lock);
+                cfs_spin_unlock(&sv->sv_lock);
                 break;
         }
 
@@ -1450,7 +1452,7 @@ srpc_startup (void)
         int rc;
 
         memset(&srpc_data, 0, sizeof(struct smoketest_rpc));
-        spin_lock_init(&srpc_data.rpc_glock);
+        cfs_spin_lock_init(&srpc_data.rpc_glock);
 
         /* 1 second pause to avoid timestamp reuse */
         cfs_pause(cfs_time_seconds(1));
@@ -1520,7 +1522,7 @@ srpc_shutdown (void)
         default:
                 LBUG ();
         case SRPC_STATE_RUNNING:
-                spin_lock(&srpc_data.rpc_glock);
+                cfs_spin_lock(&srpc_data.rpc_glock);
 
                 for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) {
                         srpc_service_t *sv = srpc_data.rpc_services[i];
@@ -1530,7 +1532,7 @@ srpc_shutdown (void)
                                   i, sv->sv_name);
                 }
 
-                spin_unlock(&srpc_data.rpc_glock);
+                cfs_spin_unlock(&srpc_data.rpc_glock);
 
                 stt_shutdown();
 
index 5dc9481..26d31b2 100644 (file)
@@ -105,7 +105,7 @@ struct sfw_test_instance;
  */
 typedef int (*swi_action_t) (struct swi_workitem *);
 typedef struct swi_workitem {
-        struct list_head wi_list;        /* chain on runq */
+        cfs_list_t       wi_list;        /* chain on runq */
         int              wi_state;
         swi_action_t     wi_action;
         void            *wi_data;
@@ -224,7 +224,7 @@ typedef struct {
 
 /* message buffer descriptor */
 typedef struct {
-        struct list_head     buf_list; /* chain on srpc_service::*_msgq */
+        cfs_list_t           buf_list; /* chain on srpc_service::*_msgq */
         srpc_msg_t           buf_msg;
         lnet_handle_md_t     buf_mdh;
         lnet_nid_t           buf_self;
@@ -233,7 +233,7 @@ typedef struct {
 
 /* server-side state of a RPC */
 typedef struct srpc_server_rpc {
-        struct list_head     srpc_list;    /* chain on srpc_service::*_rpcq */
+        cfs_list_t           srpc_list;    /* chain on srpc_service::*_rpcq */
         struct srpc_service *srpc_service;
         swi_workitem_t       srpc_wi;
         srpc_event_t         srpc_ev;      /* bulk/reply event */
@@ -251,10 +251,10 @@ typedef struct srpc_server_rpc {
 
 /* client-side state of a RPC */
 typedef struct srpc_client_rpc {
-        struct list_head     crpc_list;   /* chain on user's lists */
-        spinlock_t           crpc_lock;   /* serialize */
+        cfs_list_t           crpc_list;   /* chain on user's lists */
+        cfs_spinlock_t       crpc_lock;   /* serialize */
         int                  crpc_service;
-        atomic_t             crpc_refcount;
+        cfs_atomic_t         crpc_refcount;
         int                  crpc_timeout; /* # seconds to wait for reply */
         stt_timer_t          crpc_timer;
         swi_workitem_t       crpc_wi;
@@ -289,18 +289,18 @@ offsetof(srpc_client_rpc_t, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov])
 do {                                                                    \
         CDEBUG(D_NET, "RPC[%p] -> %s (%d)++\n",                         \
                (rpc), libcfs_id2str((rpc)->crpc_dest),                  \
-               atomic_read(&(rpc)->crpc_refcount));                     \
-        LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0);                \
-        atomic_inc(&(rpc)->crpc_refcount);                              \
+               cfs_atomic_read(&(rpc)->crpc_refcount));                 \
+        LASSERT(cfs_atomic_read(&(rpc)->crpc_refcount) > 0);            \
+        cfs_atomic_inc(&(rpc)->crpc_refcount);                          \
 } while (0)
 
 #define srpc_client_rpc_decref(rpc)                                     \
 do {                                                                    \
         CDEBUG(D_NET, "RPC[%p] -> %s (%d)--\n",                         \
                (rpc), libcfs_id2str((rpc)->crpc_dest),                  \
-               atomic_read(&(rpc)->crpc_refcount));                     \
-        LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0);                \
-        if (atomic_dec_and_test(&(rpc)->crpc_refcount))                 \
+               cfs_atomic_read(&(rpc)->crpc_refcount));                 \
+        LASSERT(cfs_atomic_read(&(rpc)->crpc_refcount) > 0);            \
+        if (cfs_atomic_dec_and_test(&(rpc)->crpc_refcount))             \
                 srpc_destroy_client_rpc(rpc);                           \
 } while (0)
 
@@ -314,49 +314,49 @@ typedef struct srpc_service {
         int                sv_nprune;        /* # posted RPC to be pruned */
         int                sv_concur;        /* max # concurrent RPCs */
 
-        spinlock_t         sv_lock;
+        cfs_spinlock_t     sv_lock;
         int                sv_shuttingdown;
         srpc_event_t       sv_ev;            /* LNet event */
         int                sv_nposted_msg;   /* # posted message buffers */
-        struct list_head   sv_free_rpcq;     /* free RPC descriptors */
-        struct list_head   sv_active_rpcq;   /* in-flight RPCs */
-        struct list_head   sv_posted_msgq;   /* posted message buffers */
-        struct list_head   sv_blocked_msgq;  /* blocked for RPC descriptor */
+        cfs_list_t         sv_free_rpcq;     /* free RPC descriptors */
+        cfs_list_t         sv_active_rpcq;   /* in-flight RPCs */
+        cfs_list_t         sv_posted_msgq;   /* posted message buffers */
+        cfs_list_t         sv_blocked_msgq;  /* blocked for RPC descriptor */
 
         /* Service callbacks:
          * - sv_handler: process incoming RPC request
          * - sv_bulk_ready: notify bulk data
          */
-        int                (*sv_handler) (srpc_server_rpc_t *);
-        int                (*sv_bulk_ready) (srpc_server_rpc_t *, int);
+        int              (*sv_handler) (srpc_server_rpc_t *);
+        int              (*sv_bulk_ready) (srpc_server_rpc_t *, int);
 } srpc_service_t;
 
 #define SFW_POST_BUFFERS         256
 #define SFW_SERVICE_CONCURRENCY  (SFW_POST_BUFFERS/2)
 
 typedef struct {
-        struct list_head  sn_list;    /* chain on fw_zombie_sessions */
+        cfs_list_t        sn_list;    /* chain on fw_zombie_sessions */
         lst_sid_t         sn_id;      /* unique identifier */
         unsigned int      sn_timeout; /* # seconds' inactivity to expire */
         int               sn_timer_active;
         stt_timer_t       sn_timer;
-        struct list_head  sn_batches; /* list of batches */
+        cfs_list_t        sn_batches; /* list of batches */
         char              sn_name[LST_NAME_SIZE];
-        atomic_t          sn_refcount;
-        atomic_t          sn_brw_errors;
-        atomic_t          sn_ping_errors;
+        cfs_atomic_t      sn_refcount;
+        cfs_atomic_t      sn_brw_errors;
+        cfs_atomic_t      sn_ping_errors;
 } sfw_session_t;
 
 #define sfw_sid_equal(sid0, sid1)     ((sid0).ses_nid == (sid1).ses_nid && \
                                        (sid0).ses_stamp == (sid1).ses_stamp)
 
 typedef struct {
-        struct list_head  bat_list;      /* chain on sn_batches */
+        cfs_list_t        bat_list;      /* chain on sn_batches */
         lst_bid_t         bat_id;        /* batch id */
         int               bat_error;     /* error code of batch */
         sfw_session_t    *bat_session;   /* batch's session */
-        atomic_t          bat_nactive;   /* # of active tests */
-        struct list_head  bat_tests;     /* test instances */
+        cfs_atomic_t      bat_nactive;   /* # of active tests */
+        cfs_list_t        bat_tests;     /* test instances */
 } sfw_batch_t;
 
 typedef struct {
@@ -370,7 +370,7 @@ typedef struct {
 } sfw_test_client_ops_t;
 
 typedef struct sfw_test_instance {
-        struct list_head        tsi_list;         /* chain on batch */
+        cfs_list_t              tsi_list;         /* chain on batch */
         int                     tsi_service;      /* test type */
         sfw_batch_t            *tsi_batch;        /* batch */
         sfw_test_client_ops_t  *tsi_ops;          /* test client operations */
@@ -382,12 +382,12 @@ typedef struct sfw_test_instance {
         int                     tsi_loop;            /* loop count */
 
         /* status of test instance */
-        spinlock_t              tsi_lock;         /* serialize */
+        cfs_spinlock_t          tsi_lock;         /* serialize */
         int                     tsi_stopping:1;   /* test is stopping */
-        atomic_t                tsi_nactive;      /* # of active test unit */
-        struct list_head        tsi_units;        /* test units */
-        struct list_head        tsi_free_rpcs;    /* free rpcs */
-        struct list_head        tsi_active_rpcs;  /* active rpcs */
+        cfs_atomic_t            tsi_nactive;      /* # of active test unit */
+        cfs_list_t              tsi_units;        /* test units */
+        cfs_list_t              tsi_free_rpcs;    /* free rpcs */
+        cfs_list_t              tsi_active_rpcs;  /* active rpcs */
 
         union {
                 test_bulk_req_t bulk;             /* bulk parameter */
@@ -403,16 +403,16 @@ typedef struct sfw_test_instance {
 #define sfw_id_pages(n)    (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
 
 typedef struct sfw_test_unit {
-        struct list_head        tsu_list;         /* chain on lst_test_instance */
-        lnet_process_id_t       tsu_dest;         /* id of dest node */
-        int                     tsu_loop;         /* loop count of the test */
-        sfw_test_instance_t    *tsu_instance;     /* pointer to test instance */
-        void                   *tsu_private;      /* private data */
-        swi_workitem_t          tsu_worker;       /* workitem of the test unit */
+        cfs_list_t            tsu_list;         /* chain on lst_test_instance */
+        lnet_process_id_t     tsu_dest;         /* id of dest node */
+        int                   tsu_loop;         /* loop count of the test */
+        sfw_test_instance_t  *tsu_instance;     /* pointer to test instance */
+        void                 *tsu_private;      /* private data */
+        swi_workitem_t        tsu_worker;       /* workitem of the test unit */
 } sfw_test_unit_t;
 
 typedef struct {
-        struct list_head        tsc_list;         /* chain on fw_tests */
+        cfs_list_t              tsc_list;         /* chain on fw_tests */
         srpc_service_t         *tsc_srv_service;  /* test service */
         sfw_test_client_ops_t  *tsc_cli_ops;      /* ops of test client */
 } sfw_test_case_t;
@@ -468,7 +468,7 @@ srpc_destroy_client_rpc (srpc_client_rpc_t *rpc)
 {
         LASSERT (rpc != NULL);
         LASSERT (!srpc_event_pending(rpc));
-        LASSERT (atomic_read(&rpc->crpc_refcount) == 0);
+        LASSERT (cfs_atomic_read(&rpc->crpc_refcount) == 0);
 #ifndef __KERNEL__
         LASSERT (rpc->crpc_bulk.bk_pages == NULL);
 #endif
@@ -495,8 +495,8 @@ srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer,
 
         CFS_INIT_LIST_HEAD(&rpc->crpc_list);
         swi_init_workitem(&rpc->crpc_wi, rpc, srpc_send_rpc);
-        spin_lock_init(&rpc->crpc_lock);
-        atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */
+        cfs_spin_lock_init(&rpc->crpc_lock);
+        cfs_atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */
 
         rpc->crpc_dest         = peer;
         rpc->crpc_priv         = priv;
@@ -566,11 +566,11 @@ do {                                                                    \
         while (!(cond)) {                                               \
                 CDEBUG(IS_PO2(++__I) ? D_WARNING : D_NET,               \
                        fmt, ## __VA_ARGS__);                            \
-                spin_unlock(&(lock));                                   \
+                cfs_spin_unlock(&(lock));                               \
                                                                         \
                 selftest_wait_events();                                 \
                                                                         \
-                spin_lock(&(lock));                                     \
+                cfs_spin_lock(&(lock));                                 \
         }                                                               \
 } while (0)
 
@@ -579,9 +579,9 @@ srpc_wait_service_shutdown (srpc_service_t *sv)
 {
         int i = 2;
 
-        spin_lock(&sv->sv_lock);
+        cfs_spin_lock(&sv->sv_lock);
         LASSERT (sv->sv_shuttingdown);
-        spin_unlock(&sv->sv_lock);
+        cfs_spin_unlock(&sv->sv_lock);
 
         while (srpc_finish_service(sv) == 0) {
                 i++;
index c5b7764..4d7d4a5 100644 (file)
                                                     (STTIMER_NSLOTS - 1))])
 
 struct st_timer_data {
-        spinlock_t       stt_lock;
+        cfs_spinlock_t   stt_lock;
         /* start time of the slot processed previously */
-        cfs_time_t       stt_prev_slot; 
-        struct list_head stt_hash[STTIMER_NSLOTS];
+        cfs_time_t       stt_prev_slot;
+        cfs_list_t       stt_hash[STTIMER_NSLOTS];
         int              stt_shuttingdown;
 #ifdef __KERNEL__
         cfs_waitq_t      stt_waitq;
@@ -72,28 +72,28 @@ struct st_timer_data {
 void
 stt_add_timer (stt_timer_t *timer)
 {
-        struct list_head *pos;
+        cfs_list_t *pos;
 
-        spin_lock(&stt_data.stt_lock);
+        cfs_spin_lock(&stt_data.stt_lock);
 
 #ifdef __KERNEL__
         LASSERT (stt_data.stt_nthreads > 0);
 #endif
         LASSERT (!stt_data.stt_shuttingdown);
         LASSERT (timer->stt_func != NULL);
-        LASSERT (list_empty(&timer->stt_list));
+        LASSERT (cfs_list_empty(&timer->stt_list));
         LASSERT (cfs_time_after(timer->stt_expires, cfs_time_current_sec()));
 
         /* a simple insertion sort */
-        list_for_each_prev (pos, STTIMER_SLOT(timer->stt_expires)) {
-                stt_timer_t *old = list_entry(pos, stt_timer_t, stt_list);
+        cfs_list_for_each_prev (pos, STTIMER_SLOT(timer->stt_expires)) {
+                stt_timer_t *old = cfs_list_entry(pos, stt_timer_t, stt_list);
 
                 if (cfs_time_aftereq(timer->stt_expires, old->stt_expires))
                         break;
         }
-        list_add(&timer->stt_list, pos);
+        cfs_list_add(&timer->stt_list, pos);
 
-        spin_unlock(&stt_data.stt_lock);
+        cfs_spin_unlock(&stt_data.stt_lock);
 }
 
 /*
@@ -110,42 +110,42 @@ stt_del_timer (stt_timer_t *timer)
 {
         int ret = 0;
 
-        spin_lock(&stt_data.stt_lock);
+        cfs_spin_lock(&stt_data.stt_lock);
 
 #ifdef __KERNEL__
         LASSERT (stt_data.stt_nthreads > 0);
 #endif
         LASSERT (!stt_data.stt_shuttingdown);
 
-        if (!list_empty(&timer->stt_list)) {
+        if (!cfs_list_empty(&timer->stt_list)) {
                 ret = 1;
-                list_del_init(&timer->stt_list);
+                cfs_list_del_init(&timer->stt_list);
         }
 
-        spin_unlock(&stt_data.stt_lock);
+        cfs_spin_unlock(&stt_data.stt_lock);
         return ret;
 }
 
 /* called with stt_data.stt_lock held */
 int
-stt_expire_list (struct list_head *slot, cfs_time_t now)
+stt_expire_list (cfs_list_t *slot, cfs_time_t now)
 {
         int          expired = 0;
         stt_timer_t *timer;
 
-        while (!list_empty(slot)) {
-                timer = list_entry(slot->next, stt_timer_t, stt_list);
+        while (!cfs_list_empty(slot)) {
+                timer = cfs_list_entry(slot->next, stt_timer_t, stt_list);
 
                 if (cfs_time_after(timer->stt_expires, now))
                         break;
 
-                list_del_init(&timer->stt_list);
-                spin_unlock(&stt_data.stt_lock);
+                cfs_list_del_init(&timer->stt_list);
+                cfs_spin_unlock(&stt_data.stt_lock);
 
                 expired++;
                 (*timer->stt_func) (timer->stt_data);
                 
-                spin_lock(&stt_data.stt_lock);
+                cfs_spin_lock(&stt_data.stt_lock);
         }
 
         return expired;
@@ -161,7 +161,7 @@ stt_check_timers (cfs_time_t *last)
         now = cfs_time_current_sec();
         this_slot = now & STTIMER_SLOTTIMEMASK;
 
-        spin_lock(&stt_data.stt_lock);
+        cfs_spin_lock(&stt_data.stt_lock);
 
         while (cfs_time_aftereq(this_slot, *last)) {
                 expired += stt_expire_list(STTIMER_SLOT(this_slot), now);
@@ -169,7 +169,7 @@ stt_check_timers (cfs_time_t *last)
         }
 
         *last = now & STTIMER_SLOTTIMEMASK;
-        spin_unlock(&stt_data.stt_lock);
+        cfs_spin_unlock(&stt_data.stt_lock);
         return expired;
 }
 
@@ -193,9 +193,9 @@ stt_timer_main (void *arg)
                                    rc);
         }
 
-        spin_lock(&stt_data.stt_lock);
+        cfs_spin_lock(&stt_data.stt_lock);
         stt_data.stt_nthreads--;
-        spin_unlock(&stt_data.stt_lock);
+        cfs_spin_unlock(&stt_data.stt_lock);
         return 0;
 }
 
@@ -210,9 +210,9 @@ stt_start_timer_thread (void)
         if (pid < 0)
                 return (int)pid;
 
-        spin_lock(&stt_data.stt_lock);
+        cfs_spin_lock(&stt_data.stt_lock);
         stt_data.stt_nthreads++;
-        spin_unlock(&stt_data.stt_lock);
+        cfs_spin_unlock(&stt_data.stt_lock);
         return 0;
 }
 
@@ -241,7 +241,7 @@ stt_startup (void)
         stt_data.stt_shuttingdown = 0;
         stt_data.stt_prev_slot = cfs_time_current_sec() & STTIMER_SLOTTIMEMASK;
 
-        spin_lock_init(&stt_data.stt_lock);
+        cfs_spin_lock_init(&stt_data.stt_lock);
         for (i = 0; i < STTIMER_NSLOTS; i++)
                 CFS_INIT_LIST_HEAD(&stt_data.stt_hash[i]);
 
@@ -261,10 +261,10 @@ stt_shutdown (void)
 {
         int i;
 
-        spin_lock(&stt_data.stt_lock);
+        cfs_spin_lock(&stt_data.stt_lock);
 
         for (i = 0; i < STTIMER_NSLOTS; i++)
-                LASSERT (list_empty(&stt_data.stt_hash[i]));
+                LASSERT (cfs_list_empty(&stt_data.stt_hash[i]));
 
         stt_data.stt_shuttingdown = 1;
 
@@ -275,6 +275,6 @@ stt_shutdown (void)
                        stt_data.stt_nthreads);
 #endif
 
-        spin_unlock(&stt_data.stt_lock);
+        cfs_spin_unlock(&stt_data.stt_lock);
         return;
 }
index bba302a..bb559c5 100644 (file)
@@ -41,7 +41,7 @@
 #define __SELFTEST_TIMER_H__
 
 typedef struct {
-        struct list_head  stt_list;
+        cfs_list_t        stt_list;
         cfs_time_t        stt_expires;
         void            (*stt_func) (void *);
         void             *stt_data;
index 5b3dee2..cd51b19 100755 (executable)
@@ -67,7 +67,7 @@ DECLARE_EXIT(lnet_selftest_fini);
  * module info
  */
 
-struct module libcfs_global_module =  {"selftest"};
+cfs_module_t libcfs_global_module =  {"selftest"};
 
 /*
  * structure definitions
index c638188..cfcf5e8 100644 (file)
 
 
 struct smoketest_workitem {
-        struct list_head wi_runq;         /* concurrent workitems */
-        struct list_head wi_serial_runq;  /* serialised workitems */
+        cfs_list_t       wi_runq;         /* concurrent workitems */
+        cfs_list_t       wi_serial_runq;  /* serialised workitems */
         cfs_waitq_t      wi_waitq;        /* where schedulers sleep */
         cfs_waitq_t      wi_serial_waitq; /* where serial scheduler sleep */
-        spinlock_t       wi_lock;         /* serialize */
+        cfs_spinlock_t   wi_lock;         /* serialize */
         int              wi_shuttingdown;
         int              wi_nthreads;
 } swi_data;
 
 static inline int
-swi_sched_cansleep (struct list_head *q)
+swi_sched_cansleep (cfs_list_t *q)
 {
         int rc;
 
-        spin_lock(&swi_data.wi_lock);
+        cfs_spin_lock(&swi_data.wi_lock);
 
-        rc = !swi_data.wi_shuttingdown && list_empty(q);
+        rc = !swi_data.wi_shuttingdown && cfs_list_empty(q);
 
-        spin_unlock(&swi_data.wi_lock);
+        cfs_spin_unlock(&swi_data.wi_lock);
         return rc;
 }
 
@@ -72,45 +72,45 @@ swi_sched_cansleep (struct list_head *q)
 void
 swi_kill_workitem (swi_workitem_t *wi)
 {
-        LASSERT (!in_interrupt()); /* because we use plain spinlock */
+        LASSERT (!cfs_in_interrupt()); /* because we use plain spinlock */
         LASSERT (!swi_data.wi_shuttingdown);
 
-        spin_lock(&swi_data.wi_lock);
+        cfs_spin_lock(&swi_data.wi_lock);
 
 #ifdef __KERNEL__
         LASSERT (wi->wi_running);
 #endif
 
         if (wi->wi_scheduled) { /* cancel pending schedules */
-                LASSERT (!list_empty(&wi->wi_list));
-                list_del_init(&wi->wi_list);
+                LASSERT (!cfs_list_empty(&wi->wi_list));
+                cfs_list_del_init(&wi->wi_list);
         }
 
-        LASSERT (list_empty(&wi->wi_list));
+        LASSERT (cfs_list_empty(&wi->wi_list));
         wi->wi_scheduled = 1; /* LBUG future schedule attempts */
 
-        spin_unlock(&swi_data.wi_lock);
+        cfs_spin_unlock(&swi_data.wi_lock);
         return;
 }
 
 void
 swi_schedule_workitem (swi_workitem_t *wi)
 {
-        LASSERT (!in_interrupt()); /* because we use plain spinlock */
+        LASSERT (!cfs_in_interrupt()); /* because we use plain spinlock */
         LASSERT (!swi_data.wi_shuttingdown);
 
-        spin_lock(&swi_data.wi_lock);
+        cfs_spin_lock(&swi_data.wi_lock);
 
         if (!wi->wi_scheduled) {
-                LASSERT (list_empty(&wi->wi_list));
+                LASSERT (cfs_list_empty(&wi->wi_list));
 
                 wi->wi_scheduled = 1;
-                list_add_tail(&wi->wi_list, &swi_data.wi_runq);
+                cfs_list_add_tail(&wi->wi_list, &swi_data.wi_runq);
                 cfs_waitq_signal(&swi_data.wi_waitq);
         }
 
-        LASSERT (!list_empty(&wi->wi_list));
-        spin_unlock(&swi_data.wi_lock);
+        LASSERT (!cfs_list_empty(&wi->wi_list));
+        cfs_spin_unlock(&swi_data.wi_lock);
         return;
 }
 
@@ -124,21 +124,21 @@ swi_schedule_workitem (swi_workitem_t *wi)
 void
 swi_schedule_serial_workitem (swi_workitem_t *wi)
 {
-        LASSERT (!in_interrupt()); /* because we use plain spinlock */
+        LASSERT (!cfs_in_interrupt()); /* because we use plain spinlock */
         LASSERT (!swi_data.wi_shuttingdown);
 
-        spin_lock(&swi_data.wi_lock);
+        cfs_spin_lock(&swi_data.wi_lock);
 
         if (!wi->wi_scheduled) {
-                LASSERT (list_empty(&wi->wi_list));
+                LASSERT (cfs_list_empty(&wi->wi_list));
 
                 wi->wi_scheduled = 1;
-                list_add_tail(&wi->wi_list, &swi_data.wi_serial_runq);
+                cfs_list_add_tail(&wi->wi_list, &swi_data.wi_serial_runq);
                 cfs_waitq_signal(&swi_data.wi_serial_waitq);
         }
 
-        LASSERT (!list_empty(&wi->wi_list));
-        spin_unlock(&swi_data.wi_lock);
+        LASSERT (!cfs_list_empty(&wi->wi_list));
+        cfs_spin_unlock(&swi_data.wi_lock);
         return;
 }
 
@@ -154,52 +154,53 @@ swi_scheduler_main (void *arg)
         cfs_daemonize(name);
         cfs_block_allsigs();
 
-        spin_lock(&swi_data.wi_lock);
+        cfs_spin_lock(&swi_data.wi_lock);
 
         while (!swi_data.wi_shuttingdown) {
                 int             nloops = 0;
                 int             rc;
                 swi_workitem_t *wi;
 
-                while (!list_empty(&swi_data.wi_runq) && 
+                while (!cfs_list_empty(&swi_data.wi_runq) &&
                        nloops < SWI_RESCHED) {
-                        wi = list_entry(swi_data.wi_runq.next,
-                                        swi_workitem_t, wi_list);
-                        list_del_init(&wi->wi_list);
+                        wi = cfs_list_entry(swi_data.wi_runq.next,
+                                            swi_workitem_t, wi_list);
+                        cfs_list_del_init(&wi->wi_list);
 
                         LASSERT (wi->wi_scheduled);
 
                         nloops++;
                         if (wi->wi_running) {
-                                list_add_tail(&wi->wi_list, &swi_data.wi_runq);
+                                cfs_list_add_tail(&wi->wi_list,
+                                                  &swi_data.wi_runq);
                                 continue;
                         }
 
                         wi->wi_running   = 1;
                         wi->wi_scheduled = 0;
-                        spin_unlock(&swi_data.wi_lock);
+                        cfs_spin_unlock(&swi_data.wi_lock);
 
                         rc = (*wi->wi_action) (wi);
 
-                        spin_lock(&swi_data.wi_lock);
+                        cfs_spin_lock(&swi_data.wi_lock);
                         if (rc == 0) /* wi still active */
                                 wi->wi_running = 0;
                 }
 
-                spin_unlock(&swi_data.wi_lock);
+                cfs_spin_unlock(&swi_data.wi_lock);
 
                 if (nloops < SWI_RESCHED)
                         cfs_wait_event_interruptible_exclusive(
-                                   swi_data.wi_waitq,
-                                   !swi_sched_cansleep(&swi_data.wi_runq), rc);
+                                swi_data.wi_waitq,
+                                !swi_sched_cansleep(&swi_data.wi_runq), rc);
                 else
-                        our_cond_resched();
+                        cfs_cond_resched();
 
-                spin_lock(&swi_data.wi_lock);
+                cfs_spin_lock(&swi_data.wi_lock);
         }
 
         swi_data.wi_nthreads--;
-        spin_unlock(&swi_data.wi_lock);
+        cfs_spin_unlock(&swi_data.wi_lock);
         return 0;
 }
 
@@ -211,18 +212,18 @@ swi_serial_scheduler_main (void *arg)
         cfs_daemonize("swi_serial_sd");
         cfs_block_allsigs();
 
-        spin_lock(&swi_data.wi_lock);
+        cfs_spin_lock(&swi_data.wi_lock);
 
         while (!swi_data.wi_shuttingdown) {
                 int             nloops = 0;
                 int             rc;
                 swi_workitem_t *wi;
 
-                while (!list_empty(&swi_data.wi_serial_runq) &&
+                while (!cfs_list_empty(&swi_data.wi_serial_runq) &&
                        nloops < SWI_RESCHED) {
-                        wi = list_entry(swi_data.wi_serial_runq.next,
-                                        swi_workitem_t, wi_list);
-                        list_del_init(&wi->wi_list);
+                        wi = cfs_list_entry(swi_data.wi_serial_runq.next,
+                                            swi_workitem_t, wi_list);
+                        cfs_list_del_init(&wi->wi_list);
 
                         LASSERTF (!wi->wi_running && wi->wi_scheduled,
                                   "wi %p running %d scheduled %d\n",
@@ -231,29 +232,30 @@ swi_serial_scheduler_main (void *arg)
                         nloops++;
                         wi->wi_running   = 1;
                         wi->wi_scheduled = 0;
-                        spin_unlock(&swi_data.wi_lock);
+                        cfs_spin_unlock(&swi_data.wi_lock);
 
                         rc = (*wi->wi_action) (wi);
 
-                        spin_lock(&swi_data.wi_lock);
+                        cfs_spin_lock(&swi_data.wi_lock);
                         if (rc == 0) /* wi still active */
                                 wi->wi_running = 0;
                 }
 
-                spin_unlock(&swi_data.wi_lock);
+                cfs_spin_unlock(&swi_data.wi_lock);
 
                 if (nloops < SWI_RESCHED)
                         cfs_wait_event_interruptible_exclusive(
-                             swi_data.wi_serial_waitq,
-                             !swi_sched_cansleep(&swi_data.wi_serial_runq), rc);
+                                swi_data.wi_serial_waitq,
+                                !swi_sched_cansleep(&swi_data.wi_serial_runq),
+                                rc);
                 else
-                        our_cond_resched();
+                        cfs_cond_resched();
 
-                spin_lock(&swi_data.wi_lock);
+                cfs_spin_lock(&swi_data.wi_lock);
         }
 
         swi_data.wi_nthreads--;
-        spin_unlock(&swi_data.wi_lock);
+        cfs_spin_unlock(&swi_data.wi_lock);
         return 0;
 }
 
@@ -268,9 +270,9 @@ swi_start_thread (int (*func) (void*), void *arg)
         if (pid < 0)
                 return (int)pid;
 
-        spin_lock(&swi_data.wi_lock);
+        cfs_spin_lock(&swi_data.wi_lock);
         swi_data.wi_nthreads++;
-        spin_unlock(&swi_data.wi_lock);
+        cfs_spin_unlock(&swi_data.wi_lock);
         return 0;
 }
 
@@ -281,32 +283,32 @@ swi_check_events (void)
 {
         int               n = 0;
         swi_workitem_t   *wi;
-        struct list_head *q;
+        cfs_list_t       *q;
 
-        spin_lock(&swi_data.wi_lock);
+        cfs_spin_lock(&swi_data.wi_lock);
 
         for (;;) {
-                if (!list_empty(&swi_data.wi_serial_runq))
+                if (!cfs_list_empty(&swi_data.wi_serial_runq))
                         q = &swi_data.wi_serial_runq;
-                else if (!list_empty(&swi_data.wi_runq))
+                else if (!cfs_list_empty(&swi_data.wi_runq))
                         q = &swi_data.wi_runq;
                 else
                         break;
 
-                wi = list_entry(q->next, swi_workitem_t, wi_list);
-                list_del_init(&wi->wi_list);
+                wi = cfs_list_entry(q->next, swi_workitem_t, wi_list);
+                cfs_list_del_init(&wi->wi_list);
 
                 LASSERT (wi->wi_scheduled);
                 wi->wi_scheduled = 0;
-                spin_unlock(&swi_data.wi_lock);
+                cfs_spin_unlock(&swi_data.wi_lock);
 
                 n++;
                 (*wi->wi_action) (wi);
 
-                spin_lock(&swi_data.wi_lock);
+                cfs_spin_lock(&swi_data.wi_lock);
         }
 
-        spin_unlock(&swi_data.wi_lock);
+        cfs_spin_unlock(&swi_data.wi_lock);
         return n;
 }
 
@@ -320,7 +322,7 @@ swi_startup (void)
 
         swi_data.wi_nthreads = 0;
         swi_data.wi_shuttingdown = 0;
-        spin_lock_init(&swi_data.wi_lock);
+        cfs_spin_lock_init(&swi_data.wi_lock);
         cfs_waitq_init(&swi_data.wi_waitq);
         cfs_waitq_init(&swi_data.wi_serial_waitq);
         CFS_INIT_LIST_HEAD(&swi_data.wi_runq);
@@ -334,7 +336,7 @@ swi_startup (void)
                 return rc;
         }
 
-        for (i = 0; i < num_online_cpus(); i++) {
+        for (i = 0; i < cfs_num_online_cpus(); i++) {
                 rc = swi_start_thread(swi_scheduler_main,
                                       (void *) (long_ptr_t) i);
                 if (rc != 0) {
@@ -354,10 +356,10 @@ swi_startup (void)
 void
 swi_shutdown (void)
 {
-        spin_lock(&swi_data.wi_lock);
+        cfs_spin_lock(&swi_data.wi_lock);
 
-        LASSERT (list_empty(&swi_data.wi_runq));
-        LASSERT (list_empty(&swi_data.wi_serial_runq));
+        LASSERT (cfs_list_empty(&swi_data.wi_runq));
+        LASSERT (cfs_list_empty(&swi_data.wi_serial_runq));
 
         swi_data.wi_shuttingdown = 1;
 
@@ -369,6 +371,6 @@ swi_shutdown (void)
                        swi_data.wi_nthreads);
 #endif
 
-        spin_unlock(&swi_data.wi_lock);
+        cfs_spin_unlock(&swi_data.wi_lock);
         return;
 }
index 0b70023..0e378b5 100644 (file)
@@ -54,27 +54,27 @@ lnd_t               the_ptllnd = {
 
 static int ptllnd_ni_count = 0;
 
-static struct list_head ptllnd_idle_history;
-static struct list_head ptllnd_history_list;
+static cfs_list_t ptllnd_idle_history;
+static cfs_list_t ptllnd_history_list;
 
 void
 ptllnd_history_fini(void)
 {
         ptllnd_he_t *he;
 
-        while (!list_empty(&ptllnd_idle_history)) {
-                he = list_entry(ptllnd_idle_history.next,
-                                ptllnd_he_t, he_list);
+        while (!cfs_list_empty(&ptllnd_idle_history)) {
+                he = cfs_list_entry(ptllnd_idle_history.next,
+                                    ptllnd_he_t, he_list);
 
-                list_del(&he->he_list);
+                cfs_list_del(&he->he_list);
                 LIBCFS_FREE(he, sizeof(*he));
         }
 
-        while (!list_empty(&ptllnd_history_list)) {
-                he = list_entry(ptllnd_history_list.next,
-                                ptllnd_he_t, he_list);
+        while (!cfs_list_empty(&ptllnd_history_list)) {
+                he = cfs_list_entry(ptllnd_history_list.next,
+                                    ptllnd_he_t, he_list);
 
-                list_del(&he->he_list);
+                cfs_list_del(&he->he_list);
                 LIBCFS_FREE(he, sizeof(*he));
         }
 }
@@ -101,7 +101,7 @@ ptllnd_history_init(void)
                         return -ENOMEM;
                 }
 
-                list_add(&he->he_list, &ptllnd_idle_history);
+                cfs_list_add(&he->he_list, &ptllnd_idle_history);
         }
 
         PTLLND_HISTORY("Init");
@@ -118,18 +118,18 @@ ptllnd_history(const char *fn, const char *file, const int line,
         va_list        ap;
         ptllnd_he_t   *he;
 
-        if (!list_empty(&ptllnd_idle_history)) {
-                he = list_entry(ptllnd_idle_history.next,
-                                ptllnd_he_t, he_list);
-        } else if (!list_empty(&ptllnd_history_list)) {
-                he = list_entry(ptllnd_history_list.next,
-                                ptllnd_he_t, he_list);
+        if (!cfs_list_empty(&ptllnd_idle_history)) {
+                he = cfs_list_entry(ptllnd_idle_history.next,
+                                    ptllnd_he_t, he_list);
+        } else if (!cfs_list_empty(&ptllnd_history_list)) {
+                he = cfs_list_entry(ptllnd_history_list.next,
+                                    ptllnd_he_t, he_list);
         } else {
                 return;
         }
 
-        list_del(&he->he_list);
-        list_add_tail(&he->he_list, &ptllnd_history_list);
+        cfs_list_del(&he->he_list);
+        cfs_list_add_tail(&he->he_list, &ptllnd_history_list);
 
         he->he_seq = seq++;
         he->he_fn = fn;
@@ -149,17 +149,17 @@ ptllnd_dump_history(void)
 
         PTLLND_HISTORY("dumping...");
 
-        while (!list_empty(&ptllnd_history_list)) {
-                he = list_entry(ptllnd_history_list.next,
+        while (!cfs_list_empty(&ptllnd_history_list)) {
+                he = cfs_list_entry(ptllnd_history_list.next,
                                 ptllnd_he_t, he_list);
 
-                list_del(&he->he_list);
+                cfs_list_del(&he->he_list);
 
                 CDEBUG(D_WARNING, "%d %d.%06d (%s:%d:%s()) %s\n", he->he_seq,
                        (int)he->he_time.tv_sec, (int)he->he_time.tv_usec,
                        he->he_file, he->he_line, he->he_fn, he->he_msg);
 
-                list_add_tail(&he->he_list, &ptllnd_idle_history);
+                cfs_list_add_tail(&he->he_list, &ptllnd_idle_history);
         }
 
         PTLLND_HISTORY("complete");
@@ -413,7 +413,7 @@ ptllnd_create_buffer (lnet_ni_t *ni)
                 return NULL;
         }
 
-        list_add(&buf->plb_list, &plni->plni_buffers);
+        cfs_list_add(&buf->plb_list, &plni->plni_buffers);
         plni->plni_nbuffers++;
 
         return buf;
@@ -427,7 +427,7 @@ ptllnd_destroy_buffer (ptllnd_buffer_t *buf)
         LASSERT (!buf->plb_posted);
 
         plni->plni_nbuffers--;
-        list_del(&buf->plb_list);
+        cfs_list_del(&buf->plb_list);
         LIBCFS_FREE(buf->plb_buffer, plni->plni_buffer_size);
         LIBCFS_FREE(buf, sizeof(*buf));
 }
@@ -480,14 +480,14 @@ ptllnd_destroy_buffers (lnet_ni_t *ni)
 {
         ptllnd_ni_t       *plni = ni->ni_data;
         ptllnd_buffer_t   *buf;
-        struct list_head  *tmp;
-        struct list_head  *nxt;
+        cfs_list_t        *tmp;
+        cfs_list_t        *nxt;
 
         CDEBUG(D_NET, "nposted_buffers = %d (before)\n",plni->plni_nposted_buffers);
         CDEBUG(D_NET, "nbuffers = %d (before)\n",plni->plni_nbuffers);
 
-        list_for_each_safe(tmp, nxt, &plni->plni_buffers) {
-                buf = list_entry(tmp, ptllnd_buffer_t, plb_list);
+        cfs_list_for_each_safe(tmp, nxt, &plni->plni_buffers) {
+                buf = cfs_list_entry(tmp, ptllnd_buffer_t, plb_list);
 
                 //CDEBUG(D_NET, "buf=%p posted=%d\n",buf,buf->plb_posted);
 
@@ -568,7 +568,7 @@ ptllnd_destroy_peer_hash (lnet_ni_t *ni)
         LASSERT( plni->plni_npeers == 0);
 
         for (i = 0; i < plni->plni_peer_hash_size; i++)
-                LASSERT (list_empty(&plni->plni_peer_hash[i]));
+                LASSERT (cfs_list_empty(&plni->plni_peer_hash[i]));
 
         LIBCFS_FREE(plni->plni_peer_hash,
                     plni->plni_peer_hash_size * sizeof(*plni->plni_peer_hash));
@@ -582,9 +582,9 @@ ptllnd_close_peers (lnet_ni_t *ni)
         int             i;
 
         for (i = 0; i < plni->plni_peer_hash_size; i++)
-                while (!list_empty(&plni->plni_peer_hash[i])) {
-                        plp = list_entry(plni->plni_peer_hash[i].next,
-                                         ptllnd_peer_t, plp_list);
+                while (!cfs_list_empty(&plni->plni_peer_hash[i])) {
+                        plp = cfs_list_entry(plni->plni_peer_hash[i].next,
+                                             ptllnd_peer_t, plp_list);
 
                         ptllnd_close_peer(plp, 0);
                 }
index 2ad730e..2088b63 100644 (file)
 #include <lnet/ptllnd.h>           /* Depends on portals/p30.h */
 #include <stdarg.h>
 
-/* Hack to record history 
+/* Hack to record history
  * This should really be done by CDEBUG(D_NETTRACE...  */
 
 typedef struct {
-        struct list_head          he_list;
+        cfs_list_t                he_list;
         struct timeval            he_time;
         const char               *he_fn;
         const char               *he_file;
@@ -67,7 +67,7 @@ void ptllnd_history(const char *fn, const char *file, const int line,
 #define PTLLND_HISTORY(fmt, a...) \
         ptllnd_history(__FUNCTION__, __FILE__, __LINE__, fmt, ## a)
 
-        
+
 #define PTLLND_MD_OPTIONS        (PTL_MD_LUSTRE_COMPLETION_SEMANTICS |\
                                   PTL_MD_EVENT_START_DISABLE)
 typedef struct
@@ -91,8 +91,8 @@ typedef struct
         int                        plni_timeout;
 
         __u64                      plni_stamp;
-        struct list_head           plni_active_txs;
-        struct list_head           plni_zombie_txs;
+        cfs_list_t                 plni_active_txs;
+        cfs_list_t                 plni_zombie_txs;
         int                        plni_ntxs;
         int                        plni_nrxs;
 
@@ -100,16 +100,16 @@ typedef struct
         ptl_handle_eq_t            plni_eqh;
         ptl_process_id_t           plni_portals_id;   /* Portals ID of interface */
 
-        struct list_head          *plni_peer_hash;
+        cfs_list_t                *plni_peer_hash;
         int                        plni_npeers;
 
         int                        plni_watchdog_nextt;
         int                        plni_watchdog_peeridx;
 
-        struct list_head           plni_tx_history;
+        cfs_list_t                 plni_tx_history;
         int                        plni_ntx_history;
 
-        struct list_head           plni_buffers;
+        cfs_list_t                 plni_buffers;
         int                        plni_nbuffers;
         int                        plni_nposted_buffers;
         int                        plni_nmsgs;
@@ -119,7 +119,7 @@ typedef struct
 
 typedef struct
 {
-        struct list_head           plp_list;
+        cfs_list_t                 plp_list;
         lnet_ni_t                 *plp_ni;
         lnet_process_id_t          plp_id;
         ptl_process_id_t           plp_ptlid;
@@ -145,14 +145,14 @@ typedef struct
         int                        plp_closing:1;
         __u64                      plp_match;
         __u64                      plp_stamp;
-        struct list_head           plp_txq;
-        struct list_head           plp_noopq;
-        struct list_head           plp_activeq;
+        cfs_list_t                 plp_txq;
+        cfs_list_t                 plp_noopq;
+        cfs_list_t                 plp_activeq;
 } ptllnd_peer_t;
 
 typedef struct
 {
-        struct list_head           plb_list;
+        cfs_list_t                 plb_list;
         lnet_ni_t                 *plb_ni;
         int                        plb_posted;
         ptl_handle_md_t            plb_md;
@@ -168,7 +168,7 @@ typedef struct
 
 typedef struct
 {
-        struct list_head           tx_list;
+        cfs_list_t                 tx_list;
         int                        tx_type;
         int                        tx_status;
         ptllnd_peer_t             *tx_peer;
index 6a41072..6930f9c 100644 (file)
@@ -58,7 +58,7 @@ ptllnd_post_tx(ptllnd_tx_t *tx)
         LASSERT (tx->tx_type != PTLLND_MSG_TYPE_NOOP);
 
         ptllnd_set_tx_deadline(tx);
-        list_add_tail(&tx->tx_list, &peer->plp_txq);
+        cfs_list_add_tail(&tx->tx_list, &peer->plp_txq);
         ptllnd_check_sends(peer);
 }
 
@@ -89,22 +89,22 @@ ptllnd_destroy_peer(ptllnd_peer_t *peer)
 
         LASSERT (peer->plp_closing);
         LASSERT (plni->plni_npeers > 0);
-        LASSERT (list_empty(&peer->plp_txq));
-        LASSERT (list_empty(&peer->plp_noopq));
-        LASSERT (list_empty(&peer->plp_activeq));
+        LASSERT (cfs_list_empty(&peer->plp_txq));
+        LASSERT (cfs_list_empty(&peer->plp_noopq));
+        LASSERT (cfs_list_empty(&peer->plp_activeq));
         plni->plni_npeers--;
         LIBCFS_FREE(peer, sizeof(*peer));
 }
 
 void
-ptllnd_abort_txs(ptllnd_ni_t *plni, struct list_head *q)
+ptllnd_abort_txs(ptllnd_ni_t *plni, cfs_list_t *q)
 {
-        while (!list_empty(q)) {
-                ptllnd_tx_t *tx = list_entry(q->next, ptllnd_tx_t, tx_list);
+        while (!cfs_list_empty(q)) {
+                ptllnd_tx_t *tx = cfs_list_entry(q->next, ptllnd_tx_t, tx_list);
 
                 tx->tx_status = -ESHUTDOWN;
-                list_del(&tx->tx_list);
-                list_add_tail(&tx->tx_list, &plni->plni_zombie_txs);
+                cfs_list_del(&tx->tx_list);
+                cfs_list_add_tail(&tx->tx_list, &plni->plni_zombie_txs);
         }
 }
 
@@ -119,9 +119,9 @@ ptllnd_close_peer(ptllnd_peer_t *peer, int error)
 
         peer->plp_closing = 1;
 
-        if (!list_empty(&peer->plp_txq) ||
-            !list_empty(&peer->plp_noopq) ||
-            !list_empty(&peer->plp_activeq) ||
+        if (!cfs_list_empty(&peer->plp_txq) ||
+            !cfs_list_empty(&peer->plp_noopq) ||
+            !cfs_list_empty(&peer->plp_activeq) ||
             error != 0) {
                 CWARN("Closing %s: %d\n", libcfs_id2str(peer->plp_id), error);
                 if (plni->plni_debug)
@@ -132,7 +132,7 @@ ptllnd_close_peer(ptllnd_peer_t *peer, int error)
         ptllnd_abort_txs(plni, &peer->plp_noopq);
         ptllnd_abort_txs(plni, &peer->plp_activeq);
 
-        list_del(&peer->plp_list);
+        cfs_list_del(&peer->plp_list);
         ptllnd_peer_decref(peer);
 }
 
@@ -147,7 +147,7 @@ ptllnd_find_peer(lnet_ni_t *ni, lnet_process_id_t id, int create)
 
         LASSERT (LNET_NIDNET(id.nid) == LNET_NIDNET(ni->ni_nid));
 
-        list_for_each_entry (plp, &plni->plni_peer_hash[hash], plp_list) {
+        cfs_list_for_each_entry (plp, &plni->plni_peer_hash[hash], plp_list) {
                 if (plp->plp_id.nid == id.nid &&
                     plp->plp_id.pid == id.pid) {
                         ptllnd_peer_addref(plp);
@@ -196,7 +196,7 @@ ptllnd_find_peer(lnet_ni_t *ni, lnet_process_id_t id, int create)
         CFS_INIT_LIST_HEAD(&plp->plp_activeq);
 
         ptllnd_peer_addref(plp);
-        list_add_tail(&plp->plp_list, &plni->plni_peer_hash[hash]);
+        cfs_list_add_tail(&plp->plp_list, &plni->plni_peer_hash[hash]);
 
         tx = ptllnd_new_tx(plp, PTLLND_MSG_TYPE_HELLO, 0);
         if (tx == NULL) {
@@ -221,12 +221,12 @@ ptllnd_find_peer(lnet_ni_t *ni, lnet_process_id_t id, int create)
 }
 
 int
-ptllnd_count_q(struct list_head *q)
+ptllnd_count_q(cfs_list_t *q)
 {
-        struct list_head *e;
-        int               n = 0;
+        cfs_list_t *e;
+        int         n = 0;
 
-        list_for_each(e, q) {
+        cfs_list_for_each(e, q) {
                 n++;
         }
 
@@ -303,29 +303,29 @@ ptllnd_debug_peer(lnet_ni_t *ni, lnet_process_id_t id)
               plni->plni_peer_credits + plp->plp_lazy_credits);
 
         CDEBUG(D_WARNING, "txq:\n");
-        list_for_each_entry (tx, &plp->plp_txq, tx_list) {
+        cfs_list_for_each_entry (tx, &plp->plp_txq, tx_list) {
                 ptllnd_debug_tx(tx);
         }
 
         CDEBUG(D_WARNING, "noopq:\n");
-        list_for_each_entry (tx, &plp->plp_noopq, tx_list) {
+        cfs_list_for_each_entry (tx, &plp->plp_noopq, tx_list) {
                 ptllnd_debug_tx(tx);
         }
 
         CDEBUG(D_WARNING, "activeq:\n");
-        list_for_each_entry (tx, &plp->plp_activeq, tx_list) {
+        cfs_list_for_each_entry (tx, &plp->plp_activeq, tx_list) {
                 ptllnd_debug_tx(tx);
         }
 
         CDEBUG(D_WARNING, "zombies:\n");
-        list_for_each_entry (tx, &plni->plni_zombie_txs, tx_list) {
+        cfs_list_for_each_entry (tx, &plni->plni_zombie_txs, tx_list) {
                 if (tx->tx_peer->plp_id.nid == id.nid &&
                     tx->tx_peer->plp_id.pid == id.pid)
                         ptllnd_debug_tx(tx);
         }
 
         CDEBUG(D_WARNING, "history:\n");
-        list_for_each_entry (tx, &plni->plni_tx_history, tx_list) {
+        cfs_list_for_each_entry (tx, &plni->plni_tx_history, tx_list) {
                 if (tx->tx_peer->plp_id.nid == id.nid &&
                     tx->tx_peer->plp_id.pid == id.pid)
                         ptllnd_debug_tx(tx);
@@ -523,9 +523,9 @@ ptllnd_cull_tx_history(ptllnd_ni_t *plni)
         int max = plni->plni_max_tx_history;
 
         while (plni->plni_ntx_history > max) {
-                ptllnd_tx_t *tx = list_entry(plni->plni_tx_history.next, 
-                                             ptllnd_tx_t, tx_list);
-                list_del(&tx->tx_list);
+                ptllnd_tx_t *tx = cfs_list_entry(plni->plni_tx_history.next,
+                                                 ptllnd_tx_t, tx_list);
+                cfs_list_del(&tx->tx_list);
 
                 ptllnd_peer_decref(tx->tx_peer);
 
@@ -553,8 +553,8 @@ ptllnd_tx_done(ptllnd_tx_t *tx)
 
         tx->tx_completing = 1;
 
-        if (!list_empty(&tx->tx_list))
-                list_del_init(&tx->tx_list);
+        if (!cfs_list_empty(&tx->tx_list))
+                cfs_list_del_init(&tx->tx_list);
 
         if (tx->tx_status != 0) {
                 if (plni->plni_debug) {
@@ -585,7 +585,7 @@ ptllnd_tx_done(ptllnd_tx_t *tx)
         }
 
         plni->plni_ntx_history++;
-        list_add_tail(&tx->tx_list, &plni->plni_tx_history);
+        cfs_list_add_tail(&tx->tx_list, &plni->plni_tx_history);
 
         ptllnd_cull_tx_history(plni);
 }
@@ -733,12 +733,12 @@ ptllnd_peer_send_noop (ptllnd_peer_t *peer)
 
         if (!peer->plp_sent_hello ||
             peer->plp_credits == 0 ||
-            !list_empty(&peer->plp_noopq) ||
+            !cfs_list_empty(&peer->plp_noopq) ||
             peer->plp_outstanding_credits < PTLLND_CREDIT_HIGHWATER(plni))
                 return 0;
 
         /* No tx to piggyback NOOP onto or no credit to send a tx */
-        return (list_empty(&peer->plp_txq) || peer->plp_credits == 1);
+        return (cfs_list_empty(&peer->plp_txq) || peer->plp_credits == 1);
 }
 
 void
@@ -763,18 +763,18 @@ ptllnd_check_sends(ptllnd_peer_t *peer)
                                libcfs_id2str(peer->plp_id));
                 } else {
                         ptllnd_set_tx_deadline(tx);
-                        list_add_tail(&tx->tx_list, &peer->plp_noopq);
+                        cfs_list_add_tail(&tx->tx_list, &peer->plp_noopq);
                 }
         }
 
         for (;;) {
-                if (!list_empty(&peer->plp_noopq)) {
+                if (!cfs_list_empty(&peer->plp_noopq)) {
                         LASSERT (peer->plp_sent_hello);
-                        tx = list_entry(peer->plp_noopq.next,
-                                        ptllnd_tx_t, tx_list);
-                } else if (!list_empty(&peer->plp_txq)) {
-                        tx = list_entry(peer->plp_txq.next,
-                                        ptllnd_tx_t, tx_list);
+                        tx = cfs_list_entry(peer->plp_noopq.next,
+                                            ptllnd_tx_t, tx_list);
+                } else if (!cfs_list_empty(&peer->plp_txq)) {
+                        tx = cfs_list_entry(peer->plp_txq.next,
+                                            ptllnd_tx_t, tx_list);
                 } else {
                         /* nothing to send right now */
                         break;
@@ -790,7 +790,7 @@ ptllnd_check_sends(ptllnd_peer_t *peer)
 
                 /* say HELLO first */
                 if (!peer->plp_sent_hello) {
-                        LASSERT (list_empty(&peer->plp_noopq));
+                        LASSERT (cfs_list_empty(&peer->plp_noopq));
                         LASSERT (tx->tx_type == PTLLND_MSG_TYPE_HELLO);
 
                         peer->plp_sent_hello = 1;
@@ -821,8 +821,8 @@ ptllnd_check_sends(ptllnd_peer_t *peer)
                         break;
                 }
 
-                list_del(&tx->tx_list);
-                list_add_tail(&tx->tx_list, &peer->plp_activeq);
+                cfs_list_del(&tx->tx_list);
+                cfs_list_add_tail(&tx->tx_list, &peer->plp_activeq);
 
                 CDEBUG(D_NET, "Sending at TX=%p type=%s (%d)\n",tx,
                        ptllnd_msgtype2str(tx->tx_type),tx->tx_type);
@@ -1100,7 +1100,7 @@ ptllnd_active_rdma(ptllnd_peer_t *peer, int type,
         tx->tx_lnetmsg = msg;
 
         ptllnd_set_tx_deadline(tx);
-        list_add_tail(&tx->tx_list, &peer->plp_activeq);
+        cfs_list_add_tail(&tx->tx_list, &peer->plp_activeq);
         gettimeofday(&tx->tx_bulk_posted, NULL);
 
         if (type == PTLLND_RDMA_READ)
@@ -1734,8 +1734,8 @@ ptllnd_tx_event (lnet_ni_t *ni, ptl_event_t *event)
              PtlHandleIsEqual(tx->tx_reqmdh, PTL_INVALID_HANDLE))) {
                 if (error)
                         tx->tx_status = -EIO;
-                list_del(&tx->tx_list);
-                list_add_tail(&tx->tx_list, &plni->plni_zombie_txs);
+                cfs_list_del(&tx->tx_list);
+                cfs_list_add_tail(&tx->tx_list, &plni->plni_zombie_txs);
         }
 }
 
@@ -1745,17 +1745,17 @@ ptllnd_find_timed_out_tx(ptllnd_peer_t *peer)
         time_t            now = cfs_time_current_sec();
         ptllnd_tx_t *tx;
 
-        list_for_each_entry (tx, &peer->plp_txq, tx_list) {
+        cfs_list_for_each_entry (tx, &peer->plp_txq, tx_list) {
                 if (tx->tx_deadline < now)
                         return tx;
         }
 
-        list_for_each_entry (tx, &peer->plp_noopq, tx_list) {
+        cfs_list_for_each_entry (tx, &peer->plp_noopq, tx_list) {
                 if (tx->tx_deadline < now)
                         return tx;
         }
 
-        list_for_each_entry (tx, &peer->plp_activeq, tx_list) {
+        cfs_list_for_each_entry (tx, &peer->plp_activeq, tx_list) {
                 if (tx->tx_deadline < now)
                         return tx;
         }
@@ -1789,11 +1789,11 @@ ptllnd_watchdog (lnet_ni_t *ni, time_t now)
         int               chunk = plni->plni_peer_hash_size;
         int               interval = now - (plni->plni_watchdog_nextt - p);
         int               i;
-        struct list_head *hashlist;
-        struct list_head *tmp;
-        struct list_head *nxt;
+        cfs_list_t       *hashlist;
+        cfs_list_t       *tmp;
+        cfs_list_t       *nxt;
 
-        /* Time to check for RDMA timeouts on a few more peers: 
+        /* Time to check for RDMA timeouts on a few more peers:
          * I try to do checks every 'p' seconds on a proportion of the peer
          * table and I need to check every connection 'n' times within a
          * timeout interval, to ensure I detect a timeout on any connection
@@ -1810,8 +1810,9 @@ ptllnd_watchdog (lnet_ni_t *ni, time_t now)
         for (i = 0; i < chunk; i++) {
                 hashlist = &plni->plni_peer_hash[plni->plni_watchdog_peeridx];
 
-                list_for_each_safe(tmp, nxt, hashlist) {
-                        ptllnd_check_peer(list_entry(tmp, ptllnd_peer_t, plp_list));
+                cfs_list_for_each_safe(tmp, nxt, hashlist) {
+                        ptllnd_check_peer(cfs_list_entry(tmp, ptllnd_peer_t,
+                                          plp_list));
                 }
 
                 plni->plni_watchdog_peeridx = (plni->plni_watchdog_peeridx + 1) %
@@ -1923,10 +1924,10 @@ ptllnd_wait (lnet_ni_t *ni, int milliseconds)
                 }
         }
 
-        while (!list_empty(&plni->plni_zombie_txs)) {
-                tx = list_entry(plni->plni_zombie_txs.next,
+        while (!cfs_list_empty(&plni->plni_zombie_txs)) {
+                tx = cfs_list_entry(plni->plni_zombie_txs.next,
                                 ptllnd_tx_t, tx_list);
-                list_del_init(&tx->tx_list);
+                cfs_list_del_init(&tx->tx_list);
                 ptllnd_tx_done(tx);
         }
 
index 48c9a1e..b63615a 100644 (file)
@@ -149,7 +149,7 @@ usocklnd_tear_peer_conn(usock_conn_t *conn)
                 }
 
                 /* we cannot finilize txs right now (bug #18844) */
-                list_splice_init(&conn->uc_tx_list, &zombie_txs);
+                cfs_list_splice_init(&conn->uc_tx_list, &zombie_txs);
 
                 peer->up_conns[idx] = NULL;
                 conn->uc_peer = NULL;
@@ -198,12 +198,12 @@ usocklnd_check_peer_stale(lnet_ni_t *ni, lnet_process_id_t id)
                 return;
         }
 
-        if (cfs_atomic_read(&peer->up_refcount) == 2) {
+        if (cfs_mt_atomic_read(&peer->up_refcount) == 2) {
                 int i;
                 for (i = 0; i < N_CONN_TYPES; i++)
                         LASSERT (peer->up_conns[i] == NULL);
 
-                list_del(&peer->up_list);
+                cfs_list_del(&peer->up_list);
 
                 if (peer->up_errored &&
                     (peer->up_peerid.pid & LNET_PID_USERFLAG) == 0)
@@ -252,7 +252,7 @@ usocklnd_create_passive_conn(lnet_ni_t *ni,
         CFS_INIT_LIST_HEAD (&conn->uc_tx_list);
         CFS_INIT_LIST_HEAD (&conn->uc_zcack_list);
         pthread_mutex_init(&conn->uc_lock, NULL);
-        cfs_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */
+        cfs_mt_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */
 
         *connp = conn;
         return 0;
@@ -309,7 +309,7 @@ usocklnd_create_active_conn(usock_peer_t *peer, int type,
         CFS_INIT_LIST_HEAD (&conn->uc_tx_list);
         CFS_INIT_LIST_HEAD (&conn->uc_zcack_list);
         pthread_mutex_init(&conn->uc_lock, NULL);
-        cfs_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */
+        cfs_mt_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */
 
         *connp = conn;
         return 0;
@@ -564,26 +564,27 @@ usocklnd_destroy_tx(lnet_ni_t *ni, usock_tx_t *tx)
 }
 
 void
-usocklnd_destroy_txlist(lnet_ni_t *ni, struct list_head *txlist)
+usocklnd_destroy_txlist(lnet_ni_t *ni, cfs_list_t *txlist)
 {
         usock_tx_t *tx;
 
-        while (!list_empty(txlist)) {
-                tx = list_entry(txlist->next, usock_tx_t, tx_list);
-                list_del(&tx->tx_list);
+        while (!cfs_list_empty(txlist)) {
+                tx = cfs_list_entry(txlist->next, usock_tx_t, tx_list);
+                cfs_list_del(&tx->tx_list);
 
                 usocklnd_destroy_tx(ni, tx);
         }
 }
 
 void
-usocklnd_destroy_zcack_list(struct list_head *zcack_list)
+usocklnd_destroy_zcack_list(cfs_list_t *zcack_list)
 {
         usock_zc_ack_t *zcack;
 
-        while (!list_empty(zcack_list)) {
-                zcack = list_entry(zcack_list->next, usock_zc_ack_t, zc_list);
-                list_del(&zcack->zc_list);
+        while (!cfs_list_empty(zcack_list)) {
+                zcack = cfs_list_entry(zcack_list->next, usock_zc_ack_t,
+                                       zc_list);
+                cfs_list_del(&zcack->zc_list);
 
                 LIBCFS_FREE (zcack, sizeof(*zcack));
         }
@@ -616,7 +617,7 @@ usocklnd_destroy_conn(usock_conn_t *conn)
                 lnet_finalize(conn->uc_peer->up_ni, conn->uc_rx_lnetmsg, -EIO);
         }
 
-        if (!list_empty(&conn->uc_tx_list)) {
+        if (!cfs_list_empty(&conn->uc_tx_list)) {
                 LASSERT (conn->uc_peer != NULL);
                 usocklnd_destroy_txlist(conn->uc_peer->up_ni, &conn->uc_tx_list);
         }
@@ -669,13 +670,13 @@ int usocklnd_type2idx(int type)
 usock_peer_t *
 usocklnd_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id)
 {
-        struct list_head *peer_list = usocklnd_nid2peerlist(id.nid);
-        struct list_head *tmp;
+        cfs_list_t       *peer_list = usocklnd_nid2peerlist(id.nid);
+        cfs_list_t       *tmp;
         usock_peer_t     *peer;
 
-        list_for_each (tmp, peer_list) {
+        cfs_list_for_each (tmp, peer_list) {
 
-                peer = list_entry (tmp, usock_peer_t, up_list);
+                peer = cfs_list_entry (tmp, usock_peer_t, up_list);
 
                 if (peer->up_ni != ni)
                         continue;
@@ -710,7 +711,7 @@ usocklnd_create_peer(lnet_ni_t *ni, lnet_process_id_t id,
         peer->up_incrn_is_set = 0;
         peer->up_errored      = 0;
         peer->up_last_alive   = 0;
-        cfs_atomic_set (&peer->up_refcount, 1); /* 1 ref for caller */
+        cfs_mt_atomic_set (&peer->up_refcount, 1); /* 1 ref for caller */
         pthread_mutex_init(&peer->up_lock, NULL);
 
         pthread_mutex_lock(&net->un_lock);
@@ -755,8 +756,8 @@ usocklnd_find_or_create_peer(lnet_ni_t *ni, lnet_process_id_t id,
 
                 /* peer table will take 1 of my refs on peer */
                 usocklnd_peer_addref(peer);
-                list_add_tail (&peer->up_list,
-                               usocklnd_nid2peerlist(id.nid));
+                cfs_list_add_tail (&peer->up_list,
+                                   usocklnd_nid2peerlist(id.nid));
         } else {
                 usocklnd_peer_decref(peer); /* should destroy peer */
                 peer = peer2;
@@ -773,8 +774,8 @@ static int
 usocklnd_enqueue_zcack(usock_conn_t *conn, usock_zc_ack_t *zc_ack)
 {
         if (conn->uc_state == UC_READY &&
-            list_empty(&conn->uc_tx_list) &&
-            list_empty(&conn->uc_zcack_list) &&
+            cfs_list_empty(&conn->uc_tx_list) &&
+            cfs_list_empty(&conn->uc_zcack_list) &&
             !conn->uc_sending) {
                 int rc = usocklnd_add_pollrequest(conn, POLL_TX_SET_REQUEST,
                                                   POLLOUT);
@@ -782,7 +783,7 @@ usocklnd_enqueue_zcack(usock_conn_t *conn, usock_zc_ack_t *zc_ack)
                         return rc;
         }
 
-        list_add_tail(&zc_ack->zc_list, &conn->uc_zcack_list);
+        cfs_list_add_tail(&zc_ack->zc_list, &conn->uc_zcack_list);
         return 0;
 }
 
@@ -794,8 +795,8 @@ usocklnd_enqueue_tx(usock_conn_t *conn, usock_tx_t *tx,
                     int *send_immediately)
 {
         if (conn->uc_state == UC_READY &&
-            list_empty(&conn->uc_tx_list) &&
-            list_empty(&conn->uc_zcack_list) &&
+            cfs_list_empty(&conn->uc_tx_list) &&
+            cfs_list_empty(&conn->uc_zcack_list) &&
             !conn->uc_sending) {
                 conn->uc_sending = 1;
                 *send_immediately = 1;
@@ -803,7 +804,7 @@ usocklnd_enqueue_tx(usock_conn_t *conn, usock_tx_t *tx,
         }
 
         *send_immediately = 0;
-        list_add_tail(&tx->tx_list, &conn->uc_tx_list);
+        cfs_list_add_tail(&tx->tx_list, &conn->uc_tx_list);
 }
 
 /* Safely create new conn if needed. Save result in *connp.
index 521e5b2..e05d63e 100644 (file)
@@ -390,8 +390,8 @@ usocklnd_activeconn_hellorecv(usock_conn_t *conn)
          * Don't try to link it to peer because the conn
          * has already had a chance to proceed at the beginning */
         if (peer == NULL) {
-                LASSERT(list_empty(&conn->uc_tx_list) &&
-                        list_empty(&conn->uc_zcack_list));
+                LASSERT(cfs_list_empty(&conn->uc_tx_list) &&
+                        cfs_list_empty(&conn->uc_zcack_list));
 
                 usocklnd_conn_kill(conn);
                 return 0;
@@ -407,7 +407,7 @@ usocklnd_activeconn_hellorecv(usock_conn_t *conn)
                  * make us zombie soon and take care of our txs and
                  * zc_acks */
 
-                struct list_head tx_list, zcack_list;
+                cfs_list_t tx_list, zcack_list;
                 usock_conn_t *conn2;
                 int idx = usocklnd_type2idx(conn->uc_type);
 
@@ -441,16 +441,16 @@ usocklnd_activeconn_hellorecv(usock_conn_t *conn)
                 conn2->uc_peer = peer;
 
                 /* unlink txs and zcack from the conn */
-                list_add(&tx_list, &conn->uc_tx_list);
-                list_del_init(&conn->uc_tx_list);
-                list_add(&zcack_list, &conn->uc_zcack_list);
-                list_del_init(&conn->uc_zcack_list);
+                cfs_list_add(&tx_list, &conn->uc_tx_list);
+                cfs_list_del_init(&conn->uc_tx_list);
+                cfs_list_add(&zcack_list, &conn->uc_zcack_list);
+                cfs_list_del_init(&conn->uc_zcack_list);
 
                 /* link they to the conn2 */
-                list_add(&conn2->uc_tx_list, &tx_list);
-                list_del_init(&tx_list);
-                list_add(&conn2->uc_zcack_list, &zcack_list);
-                list_del_init(&zcack_list);
+                cfs_list_add(&conn2->uc_tx_list, &tx_list);
+                cfs_list_del_init(&tx_list);
+                cfs_list_add(&conn2->uc_zcack_list, &zcack_list);
+                cfs_list_del_init(&zcack_list);
 
                 /* make conn zombie */
                 conn->uc_peer = NULL;
@@ -488,8 +488,8 @@ usocklnd_activeconn_hellorecv(usock_conn_t *conn)
                          * received hello, but maybe we've smth. to
                          * send? */
                         LASSERT (conn->uc_sending == 0);
-                        if ( !list_empty(&conn->uc_tx_list) ||
-                             !list_empty(&conn->uc_zcack_list) ) {
+                        if ( !cfs_list_empty(&conn->uc_tx_list) ||
+                             !cfs_list_empty(&conn->uc_zcack_list) ) {
 
                                 conn->uc_tx_deadline =
                                         cfs_time_shift(usock_tuns.ut_timeout);
@@ -660,8 +660,8 @@ usocklnd_write_handler(usock_conn_t *conn)
                 LASSERT (peer != NULL);
                 ni = peer->up_ni;
 
-                if (list_empty(&conn->uc_tx_list) &&
-                    list_empty(&conn->uc_zcack_list)) {
+                if (cfs_list_empty(&conn->uc_tx_list) &&
+                    cfs_list_empty(&conn->uc_zcack_list)) {
                         LASSERT(usock_tuns.ut_fair_limit > 1);
                         pthread_mutex_unlock(&conn->uc_lock);
                         return 0;
@@ -682,7 +682,7 @@ usocklnd_write_handler(usock_conn_t *conn)
                 rc = usocklnd_send_tx(conn, tx);
                 if (rc == 0) { /* partial send or connection closed */
                         pthread_mutex_lock(&conn->uc_lock);
-                        list_add(&tx->tx_list, &conn->uc_tx_list);
+                        cfs_list_add(&tx->tx_list, &conn->uc_tx_list);
                         conn->uc_sending = 0;
                         pthread_mutex_unlock(&conn->uc_lock);
                         break;
@@ -698,8 +698,8 @@ usocklnd_write_handler(usock_conn_t *conn)
                 pthread_mutex_lock(&conn->uc_lock);
                 conn->uc_sending = 0;
                 if (conn->uc_state != UC_DEAD &&
-                    list_empty(&conn->uc_tx_list) &&
-                    list_empty(&conn->uc_zcack_list)) {
+                    cfs_list_empty(&conn->uc_tx_list) &&
+                    cfs_list_empty(&conn->uc_zcack_list)) {
                         conn->uc_tx_flag = 0;
                         ret = usocklnd_add_pollrequest(conn,
                                                       POLL_TX_SET_REQUEST, 0);
@@ -728,18 +728,18 @@ usocklnd_write_handler(usock_conn_t *conn)
  * brand new noop tx for zc_ack from zcack_list. Return NULL
  * if an error happened */
 usock_tx_t *
-usocklnd_try_piggyback(struct list_head *tx_list_p,
-                       struct list_head *zcack_list_p)
+usocklnd_try_piggyback(cfs_list_t *tx_list_p,
+                       cfs_list_t *zcack_list_p)
 {
         usock_tx_t     *tx;
         usock_zc_ack_t *zc_ack;
 
         /* assign tx and zc_ack */
-        if (list_empty(tx_list_p))
+        if (cfs_list_empty(tx_list_p))
                 tx = NULL;
         else {
-                tx = list_entry(tx_list_p->next, usock_tx_t, tx_list);
-                list_del(&tx->tx_list);
+                tx = cfs_list_entry(tx_list_p->next, usock_tx_t, tx_list);
+                cfs_list_del(&tx->tx_list);
 
                 /* already piggybacked or partially send */
                 if (tx->tx_msg.ksm_zc_cookies[1] != 0 ||
@@ -747,13 +747,13 @@ usocklnd_try_piggyback(struct list_head *tx_list_p,
                         return tx;
         }
 
-        if (list_empty(zcack_list_p)) {
+        if (cfs_list_empty(zcack_list_p)) {
                 /* nothing to piggyback */
                 return tx;
         } else {
-                zc_ack = list_entry(zcack_list_p->next,
-                                    usock_zc_ack_t, zc_list);
-                list_del(&zc_ack->zc_list);
+                zc_ack = cfs_list_entry(zcack_list_p->next,
+                                        usock_zc_ack_t, zc_list);
+                cfs_list_del(&zc_ack->zc_list);
         }
 
         if (tx != NULL)
@@ -806,8 +806,8 @@ usocklnd_passiveconn_hellosent(usock_conn_t *conn)
 {
         usock_conn_t    *conn2;
         usock_peer_t    *peer;
-        struct list_head tx_list;
-        struct list_head zcack_list;
+        cfs_list_t       tx_list;
+        cfs_list_t       zcack_list;
         int              idx;
         int              rc = 0;
 
@@ -828,8 +828,8 @@ usocklnd_passiveconn_hellosent(usock_conn_t *conn)
 
         /* conn is passive and isn't linked to any peer,
            so its tx and zc_ack lists have to be empty */
-        LASSERT (list_empty(&conn->uc_tx_list) &&
-                 list_empty(&conn->uc_zcack_list) &&
+        LASSERT (cfs_list_empty(&conn->uc_tx_list) &&
+                 cfs_list_empty(&conn->uc_zcack_list) &&
                  conn->uc_sending == 0);
 
         rc = usocklnd_find_or_create_peer(conn->uc_ni, conn->uc_peerid, &peer);
@@ -862,16 +862,16 @@ usocklnd_passiveconn_hellosent(usock_conn_t *conn)
                  * We're sure that nobody but us can access to conn,
                  * nevertheless we use mutex (if we're wrong yet,
                  * deadlock is easy to see that corrupted list */
-                list_add(&tx_list, &conn2->uc_tx_list);
-                list_del_init(&conn2->uc_tx_list);
-                list_add(&zcack_list, &conn2->uc_zcack_list);
-                list_del_init(&conn2->uc_zcack_list);
+                cfs_list_add(&tx_list, &conn2->uc_tx_list);
+                cfs_list_del_init(&conn2->uc_tx_list);
+                cfs_list_add(&zcack_list, &conn2->uc_zcack_list);
+                cfs_list_del_init(&conn2->uc_zcack_list);
 
                 pthread_mutex_lock(&conn->uc_lock);
-                list_add_tail(&conn->uc_tx_list, &tx_list);
-                list_del_init(&tx_list);
-                list_add_tail(&conn->uc_zcack_list, &zcack_list);
-                list_del_init(&zcack_list);
+                cfs_list_add_tail(&conn->uc_tx_list, &tx_list);
+                cfs_list_del_init(&tx_list);
+                cfs_list_add_tail(&conn->uc_zcack_list, &zcack_list);
+                cfs_list_del_init(&zcack_list);
                 conn->uc_peer = peer;
                 pthread_mutex_unlock(&conn->uc_lock);
 
@@ -899,8 +899,8 @@ usocklnd_passiveconn_hellosent(usock_conn_t *conn)
                 /* we're ready to recive incoming packets and maybe
                    already have smth. to transmit */
                 LASSERT (conn->uc_sending == 0);
-                if ( list_empty(&conn->uc_tx_list) &&
-                     list_empty(&conn->uc_zcack_list) ) {
+                if ( cfs_list_empty(&conn->uc_tx_list) &&
+                     cfs_list_empty(&conn->uc_zcack_list) ) {
                         conn->uc_tx_flag = 0;
                         rc = usocklnd_add_pollrequest(conn, POLL_SET_REQUEST,
                                                  POLLIN);
index 01ea259..b297d92 100644 (file)
 void
 usocklnd_process_stale_list(usock_pollthread_t *pt_data)
 {
-        while (!list_empty(&pt_data->upt_stale_list)) {
+        while (!cfs_list_empty(&pt_data->upt_stale_list)) {
                 usock_conn_t *conn;
-                conn = list_entry(pt_data->upt_stale_list.next,
-                                  usock_conn_t, uc_stale_list);
+                conn = cfs_list_entry(pt_data->upt_stale_list.next,
+                                      usock_conn_t, uc_stale_list);
 
-                list_del(&conn->uc_stale_list);
+                cfs_list_del(&conn->uc_stale_list);
 
                 usocklnd_tear_peer_conn(conn);
                 usocklnd_conn_decref(conn); /* -1 for idx2conn[idx] or pr */
@@ -90,12 +90,12 @@ usocklnd_poll_thread(void *arg)
 
                 /* Process all enqueued poll requests */
                 pthread_mutex_lock(&pt_data->upt_pollrequests_lock);
-                while (!list_empty(&pt_data->upt_pollrequests)) {
+                while (!cfs_list_empty(&pt_data->upt_pollrequests)) {
                         usock_pollrequest_t *pr;
-                        pr = list_entry(pt_data->upt_pollrequests.next,
-                                        usock_pollrequest_t, upr_list);
+                        pr = cfs_list_entry(pt_data->upt_pollrequests.next,
+                                            usock_pollrequest_t, upr_list);
 
-                        list_del(&pr->upr_list);
+                        cfs_list_del(&pr->upr_list);
                         rc = usocklnd_process_pollrequest(pr, pt_data);
                         if (rc)
                                 break;
@@ -171,17 +171,17 @@ usocklnd_poll_thread(void *arg)
                 /* Block new poll requests to be enqueued */
                 pt_data->upt_errno = rc;
 
-                while (!list_empty(&pt_data->upt_pollrequests)) {
+                while (!cfs_list_empty(&pt_data->upt_pollrequests)) {
                         usock_pollrequest_t *pr;
-                        pr = list_entry(pt_data->upt_pollrequests.next,
+                        pr = cfs_list_entry(pt_data->upt_pollrequests.next,
                                         usock_pollrequest_t, upr_list);
 
-                        list_del(&pr->upr_list);
+                        cfs_list_del(&pr->upr_list);
 
                         if (pr->upr_type == POLL_ADD_REQUEST) {
                                 libcfs_sock_release(pr->upr_conn->uc_sock);
-                                list_add_tail(&pr->upr_conn->uc_stale_list,
-                                              &pt_data->upt_stale_list);
+                                cfs_list_add_tail(&pr->upr_conn->uc_stale_list,
+                                                  &pt_data->upt_stale_list);
                         } else {
                                 usocklnd_conn_decref(pr->upr_conn);
                         }
@@ -202,7 +202,7 @@ usocklnd_poll_thread(void *arg)
         }
 
         /* unblock usocklnd_shutdown() */
-        cfs_complete(&pt_data->upt_completion);
+        cfs_mt_complete(&pt_data->upt_completion);
 
         return 0;
 }
@@ -237,7 +237,7 @@ usocklnd_add_pollrequest(usock_conn_t *conn, int type, short value)
                 return rc;
         }
 
-        list_add_tail(&pr->upr_list, &pt->upt_pollrequests);
+        cfs_list_add_tail(&pr->upr_list, &pt->upt_pollrequests);
         pthread_mutex_unlock(&pt->upt_pollrequests_lock);
         return 0;
 }
@@ -266,7 +266,7 @@ usocklnd_add_killrequest(usock_conn_t *conn)
                         return; /* conn will be killed in poll thread anyway */
                 }
 
-                list_add_tail(&pr->upr_list, &pt->upt_pollrequests);
+                cfs_list_add_tail(&pr->upr_list, &pt->upt_pollrequests);
                 pthread_mutex_unlock(&pt->upt_pollrequests_lock);
 
                 conn->uc_preq = NULL;
@@ -387,7 +387,8 @@ usocklnd_process_pollrequest(usock_pollrequest_t *pr,
                 }
 
                 libcfs_sock_release(conn->uc_sock);
-                list_add_tail(&conn->uc_stale_list, &pt_data->upt_stale_list);
+                cfs_list_add_tail(&conn->uc_stale_list,
+                                  &pt_data->upt_stale_list);
                 break;
         case POLL_RX_SET_REQUEST:
                 pollfd[idx].events = (pollfd[idx].events & ~POLLIN) | value;
index 0b887c3..19e311a 100644 (file)
@@ -143,7 +143,7 @@ usocklnd_release_poll_states(int n)
                 libcfs_sock_release(pt->upt_notifier[1]);
 
                 pthread_mutex_destroy(&pt->upt_pollrequests_lock);
-                cfs_fini_completion(&pt->upt_completion);
+                cfs_mt_fini_completion(&pt->upt_completion);
 
                 LIBCFS_FREE (pt->upt_pollfd,
                              sizeof(struct pollfd) * pt->upt_npollfd);
@@ -286,7 +286,7 @@ usocklnd_base_startup()
                 CFS_INIT_LIST_HEAD (&pt->upt_pollrequests);
                 CFS_INIT_LIST_HEAD (&pt->upt_stale_list);
                 pthread_mutex_init(&pt->upt_pollrequests_lock, NULL);
-                cfs_init_completion(&pt->upt_completion);
+                cfs_mt_init_completion(&pt->upt_completion);
         }
 
         /* Initialize peer hash list */
@@ -335,7 +335,7 @@ usocklnd_base_shutdown(int n)
         for (i = 0; i < n; i++) {
                 usock_pollthread_t *pt = &usock_data.ud_pollthreads[i];
                 usocklnd_wakeup_pollthread(i);
-                cfs_wait_for_completion(&pt->upt_completion);
+                cfs_mt_wait_for_completion(&pt->upt_completion);
         }
 
         pthread_rwlock_destroy(&usock_data.ud_peers_lock);
@@ -516,16 +516,16 @@ usocklnd_shutdown(lnet_ni_t *ni)
 void
 usocklnd_del_all_peers(lnet_ni_t *ni)
 {
-        struct list_head  *ptmp;
-        struct list_head  *pnxt;
+        cfs_list_t        *ptmp;
+        cfs_list_t        *pnxt;
         usock_peer_t      *peer;
         int                i;
 
         pthread_rwlock_wrlock(&usock_data.ud_peers_lock);
 
         for (i = 0; i < UD_PEER_HASH_SIZE; i++) {
-                list_for_each_safe (ptmp, pnxt, &usock_data.ud_peers[i]) {
-                        peer = list_entry (ptmp, usock_peer_t, up_list);
+                cfs_list_for_each_safe (ptmp, pnxt, &usock_data.ud_peers[i]) {
+                        peer = cfs_list_entry (ptmp, usock_peer_t, up_list);
 
                         if (peer->up_ni != ni)
                                 continue;
@@ -552,7 +552,7 @@ usocklnd_del_peer_and_conns(usock_peer_t *peer)
         pthread_mutex_unlock(&peer->up_lock);
 
         /* peer hash list is still protected by the caller */
-        list_del(&peer->up_list);
+        cfs_list_del(&peer->up_list);
 
         usocklnd_peer_decref(peer); /* peer isn't in hash list anymore */
 }
index 4a91918..e019d16 100644 (file)
@@ -46,7 +46,7 @@
 #include <lnet/socklnd.h>
 
 typedef struct {
-        struct list_head tx_list;    /* neccessary to form tx list */
+        cfs_list_t       tx_list;    /* neccessary to form tx list */
         lnet_msg_t      *tx_lnetmsg; /* lnet message for lnet_finalize() */
         ksock_msg_t      tx_msg;     /* buffer for wire header of ksock msg */
         int              tx_resid;   /* # of residual bytes */
@@ -73,7 +73,7 @@ typedef struct {
         struct usock_preq_s  *uc_preq;       /* preallocated request */
         __u32                 uc_peer_ip;    /* IP address of the peer */
         __u16                 uc_peer_port;  /* port of the peer */
-        struct list_head      uc_stale_list; /* orphaned connections */
+        cfs_list_t            uc_stale_list; /* orphaned connections */
 
         /* Receive state */
         int                uc_rx_state;      /* message or hello state */
@@ -89,14 +89,14 @@ typedef struct {
         ksock_msg_t        uc_rx_msg;        /* message buffer */
 
         /* Send state */
-        struct list_head   uc_tx_list;       /* pending txs */
-        struct list_head   uc_zcack_list;    /* pending zc_acks */
+        cfs_list_t         uc_tx_list;       /* pending txs */
+        cfs_list_t         uc_zcack_list;    /* pending zc_acks */
         cfs_time_t         uc_tx_deadline;   /* when to time out */
         int                uc_tx_flag;       /* deadline valid? */
         int                uc_sending;       /* send op is in progress */
         usock_tx_t        *uc_tx_hello;      /* fake tx with hello */
 
-        cfs_atomic_t       uc_refcount;      /* # of users */
+        cfs_mt_atomic_t    uc_refcount;      /* # of users */
         pthread_mutex_t    uc_lock;          /* serialize */
         int                uc_errored;       /* a flag for lnet_notify() */
 } usock_conn_t;
@@ -123,39 +123,39 @@ typedef struct {
 #define N_CONN_TYPES 3 /* CONTROL, BULK_IN and BULK_OUT */
 
 typedef struct usock_peer_s {
-        struct list_head  up_list;         /* neccessary to form peer list */
-        lnet_process_id_t up_peerid;       /* id of remote peer */
+        cfs_list_t        up_list;        /* neccessary to form peer list */
+        lnet_process_id_t up_peerid;      /* id of remote peer */
         usock_conn_t     *up_conns[N_CONN_TYPES]; /* conns that connect us
-                                                   * us with the peer */
-        lnet_ni_t        *up_ni;           /* pointer to parent NI */
-        __u64             up_incarnation;  /* peer's incarnation */
-        int               up_incrn_is_set; /* 0 if peer's incarnation
-                                            * hasn't been set so far */
-        cfs_atomic_t      up_refcount;     /* # of users */
-        pthread_mutex_t   up_lock;         /* serialize */
-        int               up_errored;      /* a flag for lnet_notify() */
-        cfs_time_t        up_last_alive;   /* when the peer was last alive */
+                                                       * us with the peer */
+        lnet_ni_t        *up_ni;          /* pointer to parent NI */
+        __u64             up_incarnation; /* peer's incarnation */
+        int               up_incrn_is_set;/* 0 if peer's incarnation
+                                               * hasn't been set so far */
+        cfs_mt_atomic_t   up_refcount;    /* # of users */
+        pthread_mutex_t   up_lock;        /* serialize */
+        int               up_errored;     /* a flag for lnet_notify() */
+        cfs_time_t        up_last_alive;  /* when the peer was last alive */
 } usock_peer_t;
 
 typedef struct {
-        cfs_socket_t     *upt_notifier[2];       /* notifier sockets: 1st for
-                                                  writing, 2nd for reading */
-        struct pollfd    *upt_pollfd;            /* poll fds */
-        int               upt_nfds;              /* active poll fds */
-        int               upt_npollfd;           /* allocated poll fds */
-        usock_conn_t    **upt_idx2conn;          /* conns corresponding to
-                                                  * upt_pollfd[idx] */
-        int              *upt_skip;              /* skip chain */
-        int              *upt_fd2idx;            /* index into upt_pollfd[]
-                                                  * by fd */
-        int               upt_nfd2idx;           /* # of allocated elements
-                                                  * of upt_fd2idx[] */
-        struct list_head  upt_stale_list;        /* list of orphaned conns */
-        struct list_head  upt_pollrequests;      /* list of poll requests */
-        pthread_mutex_t   upt_pollrequests_lock; /* serialize */
-        int               upt_errno;             /* non-zero if errored */
-        struct cfs_completion upt_completion;    /* wait/signal facility for
-                                                  * syncronizing shutdown */
+        cfs_socket_t       *upt_notifier[2];    /* notifier sockets: 1st for
+                                                 * writing, 2nd for reading */
+        struct pollfd      *upt_pollfd;         /* poll fds */
+        int                 upt_nfds;           /* active poll fds */
+        int                 upt_npollfd;        /* allocated poll fds */
+        usock_conn_t      **upt_idx2conn;       /* conns corresponding to
+                                                 * upt_pollfd[idx] */
+        int                *upt_skip;           /* skip chain */
+        int                *upt_fd2idx;         /* index into upt_pollfd[]
+                                                 * by fd */
+        int                 upt_nfd2idx;        /* # of allocated elements
+                                                 * of upt_fd2idx[] */
+        cfs_list_t          upt_stale_list;     /* list of orphaned conns */
+        cfs_list_t          upt_pollrequests;   /* list of poll requests */
+        pthread_mutex_t     upt_pollrequests_lock; /* serialize */
+        int                 upt_errno;         /* non-zero if errored */
+        cfs_mt_completion_t upt_completion;    /* wait/signal facility for
+                                                * syncronizing shutdown */
 } usock_pollthread_t;
 
 /* Number of elements in upt_pollfd[], upt_idx2conn[] and upt_fd2idx[]
@@ -171,7 +171,7 @@ typedef struct {
         usock_pollthread_t *ud_pollthreads;    /* their state */
         int                 ud_shutdown;       /* shutdown flag */
         int                 ud_nets_count;     /* # of instances */
-        struct list_head    ud_peers[UD_PEER_HASH_SIZE]; /* peer hash table */
+        cfs_list_t          ud_peers[UD_PEER_HASH_SIZE]; /* peer hash table */
         pthread_rwlock_t    ud_peers_lock;     /* serialize */
 } usock_data_t;
 
@@ -205,11 +205,11 @@ typedef struct {
 extern usock_tunables_t usock_tuns;
 
 typedef struct usock_preq_s {
-        int              upr_type;  /* type of requested action */
+        int              upr_type;   /* type of requested action */
         short            upr_value; /* bitmask of POLLIN and POLLOUT bits */
         usock_conn_t *   upr_conn;  /* a conn for the sake of which
                                      * action will be performed */
-        struct list_head upr_list;  /* neccessary to form list */
+        cfs_list_t       upr_list;  /* neccessary to form list */
 } usock_pollrequest_t;
 
 /* Allowable poll request types are: */
@@ -220,15 +220,15 @@ typedef struct usock_preq_s {
 #define POLL_SET_REQUEST 5
 
 typedef struct {
-        struct list_head zc_list;   /* neccessary to form zc_ack list */
+        cfs_list_t       zc_list;   /* neccessary to form zc_ack list */
         __u64            zc_cookie; /* zero-copy cookie */
 } usock_zc_ack_t;
 
 static inline void
 usocklnd_conn_addref(usock_conn_t *conn)
 {
-        LASSERT (cfs_atomic_read(&conn->uc_refcount) > 0);
-        cfs_atomic_inc(&conn->uc_refcount);
+        LASSERT (cfs_mt_atomic_read(&conn->uc_refcount) > 0);
+        cfs_mt_atomic_inc(&conn->uc_refcount);
 }
 
 void usocklnd_destroy_conn(usock_conn_t *conn);
@@ -236,16 +236,16 @@ void usocklnd_destroy_conn(usock_conn_t *conn);
 static inline void
 usocklnd_conn_decref(usock_conn_t *conn)
 {
-        LASSERT (cfs_atomic_read(&conn->uc_refcount) > 0);
-        if (cfs_atomic_dec_and_test(&conn->uc_refcount))
+        LASSERT (cfs_mt_atomic_read(&conn->uc_refcount) > 0);
+        if (cfs_mt_atomic_dec_and_test(&conn->uc_refcount))
                 usocklnd_destroy_conn(conn);
 }
 
 static inline void
 usocklnd_peer_addref(usock_peer_t *peer)
 {
-        LASSERT (cfs_atomic_read(&peer->up_refcount) > 0);
-        cfs_atomic_inc(&peer->up_refcount);
+        LASSERT (cfs_mt_atomic_read(&peer->up_refcount) > 0);
+        cfs_mt_atomic_inc(&peer->up_refcount);
 }
 
 void usocklnd_destroy_peer(usock_peer_t *peer);
@@ -253,8 +253,8 @@ void usocklnd_destroy_peer(usock_peer_t *peer);
 static inline void
 usocklnd_peer_decref(usock_peer_t *peer)
 {
-        LASSERT (cfs_atomic_read(&peer->up_refcount) > 0);
-        if (cfs_atomic_dec_and_test(&peer->up_refcount))
+        LASSERT (cfs_mt_atomic_read(&peer->up_refcount) > 0);
+        if (cfs_mt_atomic_dec_and_test(&peer->up_refcount))
                 usocklnd_destroy_peer(peer);
 }
 
@@ -263,7 +263,7 @@ usocklnd_ip2pt_idx(__u32 ip) {
         return ip % usock_data.ud_npollthreads;
 }
 
-static inline struct list_head *
+static inline cfs_list_t *
 usocklnd_nid2peerlist(lnet_nid_t nid)
 {
         unsigned int hash = ((unsigned int)nid) % UD_PEER_HASH_SIZE;
@@ -297,8 +297,8 @@ int usocklnd_read_hello(usock_conn_t *conn, int *cont_flag);
 int usocklnd_activeconn_hellorecv(usock_conn_t *conn);
 int usocklnd_passiveconn_hellorecv(usock_conn_t *conn);
 int usocklnd_write_handler(usock_conn_t *conn);
-usock_tx_t * usocklnd_try_piggyback(struct list_head *tx_list_p,
-                                    struct list_head *zcack_list_p);
+usock_tx_t * usocklnd_try_piggyback(cfs_list_t *tx_list_p,
+                                    cfs_list_t *zcack_list_p);
 int usocklnd_activeconn_hellosent(usock_conn_t *conn);
 int usocklnd_passiveconn_hellosent(usock_conn_t *conn);
 int usocklnd_send_tx(usock_conn_t *conn, usock_tx_t *tx);
@@ -337,8 +337,8 @@ usock_tx_t *usocklnd_create_hello_tx(lnet_ni_t *ni,
 usock_tx_t *usocklnd_create_cr_hello_tx(lnet_ni_t *ni,
                                         int type, lnet_nid_t peer_nid);
 void usocklnd_destroy_tx(lnet_ni_t *ni, usock_tx_t *tx);
-void usocklnd_destroy_txlist(lnet_ni_t *ni, struct list_head *txlist);
-void usocklnd_destroy_zcack_list(struct list_head *zcack_list);
+void usocklnd_destroy_txlist(lnet_ni_t *ni, cfs_list_t *txlist);
+void usocklnd_destroy_zcack_list(cfs_list_t *zcack_list);
 void usocklnd_destroy_peer (usock_peer_t *peer);
 int usocklnd_get_conn_type(lnet_msg_t *lntmsg);
 int usocklnd_type2idx(int type);
index 1195f42..6923d8a 100644 (file)
@@ -57,7 +57,7 @@ usocklnd_send_tx_immediately(usock_conn_t *conn, usock_tx_t *tx)
         rc = usocklnd_send_tx(conn, tx);
         if (rc == 0) { /* partial send or connection closed */
                 pthread_mutex_lock(&conn->uc_lock);
-                list_add(&tx->tx_list, &conn->uc_tx_list);
+                cfs_list_add(&tx->tx_list, &conn->uc_tx_list);
                 conn->uc_sending = 0;
                 pthread_mutex_unlock(&conn->uc_lock);
                 partial_send = 1;
@@ -81,8 +81,8 @@ usocklnd_send_tx_immediately(usock_conn_t *conn, usock_tx_t *tx)
         /* schedule write handler */
         if (partial_send ||
             (conn->uc_state == UC_READY &&
-             (!list_empty(&conn->uc_tx_list) ||
-              !list_empty(&conn->uc_zcack_list)))) {
+             (!cfs_list_empty(&conn->uc_tx_list) ||
+              !cfs_list_empty(&conn->uc_zcack_list)))) {
                 conn->uc_tx_deadline =
                         cfs_time_shift(usock_tuns.ut_timeout);
                 conn->uc_tx_flag = 1;
index 8a28b4f..91200c6 100644 (file)
@@ -601,7 +601,7 @@ int jt_dbg_debug_kernel(int argc, char **argv)
                 strcpy(filename, argv[1]);
         else
                 sprintf(filename, "%s"CFS_TIME_T".%u",
-                       DEBUG_FILE_PATH_DEFAULT, time(NULL), getpid());
+                       LIBCFS_DEBUG_FILE_PATH_DEFAULT, time(NULL), getpid());
 
         if (stat(filename, &st) == 0 && S_ISREG(st.st_mode))
                 unlink(filename);
index 46f5d6b..37a7580 100644 (file)
@@ -391,20 +391,20 @@ lst_print_error(char *sub, const char *def_format, ...)
 }
 
 void
-lst_free_rpcent(struct list_head *head)
+lst_free_rpcent(cfs_list_t *head)
 {
         lstcon_rpc_ent_t *ent;
 
-        while (!list_empty(head)) {
-                ent = list_entry(head->next, lstcon_rpc_ent_t, rpe_link);
+        while (!cfs_list_empty(head)) {
+                ent = cfs_list_entry(head->next, lstcon_rpc_ent_t, rpe_link);
 
-                list_del(&ent->rpe_link);
+                cfs_list_del(&ent->rpe_link);
                 free(ent);
         }
 }
 
 void
-lst_reset_rpcent(struct list_head *head)
+lst_reset_rpcent(cfs_list_t *head)
 {
         lstcon_rpc_ent_t *ent;
 
@@ -417,7 +417,7 @@ lst_reset_rpcent(struct list_head *head)
 }
 
 int
-lst_alloc_rpcent(struct list_head *head, int count, int offset)
+lst_alloc_rpcent(cfs_list_t *head, int count, int offset)
 {
         lstcon_rpc_ent_t *ent;
         int               i;
@@ -434,14 +434,14 @@ lst_alloc_rpcent(struct list_head *head, int count, int offset)
                 ent->rpe_sid      = LST_INVALID_SID;
                 ent->rpe_peer.nid = LNET_NID_ANY;
                 ent->rpe_peer.pid = LNET_PID_ANY;
-                list_add(&ent->rpe_link, head);
+                cfs_list_add(&ent->rpe_link, head);
         }
 
         return 0;
 }
 
 void
-lst_print_transerr(struct list_head *head, char *optstr)
+lst_print_transerr(cfs_list_t *head, char *optstr)
 {
         lstcon_rpc_ent_t  *ent;
 
@@ -470,7 +470,7 @@ int lst_info_group_ioctl(char *name, lstcon_ndlist_ent_t *gent,
                          int *idx, int *count, lstcon_node_ent_t *dents);
 
 int lst_query_batch_ioctl(char *batch, int test, int server,
-                          int timeout, struct list_head *head);
+                          int timeout, cfs_list_t *head);
 
 int
 lst_ioctl(unsigned int opc, void *buf, int len)
@@ -704,7 +704,7 @@ jt_lst_end_session(int argc, char **argv)
 
 int
 lst_ping_ioctl(char *str, int type, int timeout,
-               int count, lnet_process_id_t *ids, struct list_head *head)
+               int count, lnet_process_id_t *ids, cfs_list_t *head)
 {
         lstio_debug_args_t args = {0};
 
@@ -765,7 +765,7 @@ lst_get_node_count(int type, char *str, int *countp, lnet_process_id_t **idspp)
 int
 jt_lst_ping(int argc,  char **argv)
 {
-        struct list_head   head;
+        cfs_list_t         head;
         lnet_process_id_t *ids = NULL;
         lstcon_rpc_ent_t  *ent = NULL;
         char              *str = NULL;
@@ -898,7 +898,7 @@ out:
 
 int
 lst_add_nodes_ioctl (char *name, int count, lnet_process_id_t *ids,
-                     struct list_head *resultp)
+                     cfs_list_t *resultp)
 {
         lstio_group_nodes_args_t args = {0};
 
@@ -927,7 +927,7 @@ lst_add_group_ioctl (char *name)
 int
 jt_lst_add_group(int argc, char **argv)
 {
-        struct list_head   head;
+        cfs_list_t         head;
         lnet_process_id_t *ids;
         char              *name;
         int                count;
@@ -1063,7 +1063,7 @@ jt_lst_del_group(int argc, char **argv)
 
 int
 lst_update_group_ioctl(int opc, char *name, int clean, int count,
-                       lnet_process_id_t *ids, struct list_head *resultp)
+                       lnet_process_id_t *ids, cfs_list_t *resultp)
 {
         lstio_group_update_args_t args = {0};
 
@@ -1082,7 +1082,7 @@ lst_update_group_ioctl(int opc, char *name, int clean, int count,
 int
 jt_lst_update_group(int argc, char **argv)
 {
-        struct list_head   head;
+        cfs_list_t         head;
         lnet_process_id_t *ids = NULL;
         char              *str = NULL;
         char              *grp = NULL;
@@ -1413,7 +1413,7 @@ jt_lst_list_group(int argc, char **argv)
 
 int
 lst_stat_ioctl (char *name, int count, lnet_process_id_t *idsp,
-                int timeout, struct list_head *resultp)
+                int timeout, cfs_list_t *resultp)
 {
         lstio_stat_args_t args = {0};
 
@@ -1429,11 +1429,11 @@ lst_stat_ioctl (char *name, int count, lnet_process_id_t *idsp,
 }
 
 typedef struct {
-        struct list_head        srp_link;
+        cfs_list_t              srp_link;
         int                     srp_count;
         char                   *srp_name;
         lnet_process_id_t      *srp_ids;
-        struct list_head        srp_result[2];
+        cfs_list_t              srp_result[2];
 } lst_stat_req_param_t;
 
 static void
@@ -1691,10 +1691,10 @@ lst_print_lnet_stat(char *name, int bwrt, int rdwr, int type)
 }
 
 void
-lst_print_stat(char *name, struct list_head *resultp,
+lst_print_stat(char *name, cfs_list_t *resultp,
                int idx, int lnet, int bwrt, int rdwr, int type)
 {
-        struct list_head  tmp[2];
+        cfs_list_t        tmp[2];
         lstcon_rpc_ent_t *new;
         lstcon_rpc_ent_t *old;
         sfw_counters_t   *sfwk_new;
@@ -1712,14 +1712,16 @@ lst_print_stat(char *name, struct list_head *resultp,
 
         memset(&lnet_stat_result, 0, sizeof(lnet_stat_result));
 
-        while (!list_empty(&resultp[idx])) {
-                if (list_empty(&resultp[1 - idx])) {
+        while (!cfs_list_empty(&resultp[idx])) {
+                if (cfs_list_empty(&resultp[1 - idx])) {
                         fprintf(stderr, "Group is changed, re-run stat\n");
                         break;
                 }
 
-                new = list_entry(resultp[idx].next, lstcon_rpc_ent_t, rpe_link);
-                old = list_entry(resultp[1 - idx].next, lstcon_rpc_ent_t, rpe_link);
+                new = cfs_list_entry(resultp[idx].next, lstcon_rpc_ent_t,
+                                     rpe_link);
+                old = cfs_list_entry(resultp[1 - idx].next, lstcon_rpc_ent_t,
+                                     rpe_link);
 
                 /* first time get stats result, can't calculate diff */
                 if (new->rpe_peer.nid == LNET_NID_ANY)
@@ -1731,11 +1733,11 @@ lst_print_stat(char *name, struct list_head *resultp,
                         break;
                 }
 
-                list_del(&new->rpe_link);
-                list_add_tail(&new->rpe_link, &tmp[idx]);
+                cfs_list_del(&new->rpe_link);
+                cfs_list_add_tail(&new->rpe_link, &tmp[idx]);
 
-                list_del(&old->rpe_link);
-                list_add_tail(&old->rpe_link, &tmp[1 - idx]);
+                cfs_list_del(&old->rpe_link);
+                cfs_list_add_tail(&old->rpe_link, &tmp[1 - idx]);
 
                 if (new->rpe_rpc_errno != 0 || new->rpe_fwk_errno != 0 ||
                     old->rpe_rpc_errno != 0 || old->rpe_fwk_errno != 0) {
@@ -1762,8 +1764,8 @@ lst_print_stat(char *name, struct list_head *resultp,
                 lst_cal_lnet_stat(delta, lnet_new, lnet_old);
         }
 
-        list_splice(&tmp[idx], &resultp[idx]);
-        list_splice(&tmp[1 - idx], &resultp[1 - idx]);
+        cfs_list_splice(&tmp[idx], &resultp[idx]);
+        cfs_list_splice(&tmp[1 - idx], &resultp[1 - idx]);
 
         if (errcount > 0)
                 fprintf(stdout, "Failed to stat on %d nodes\n", errcount);
@@ -1777,7 +1779,7 @@ lst_print_stat(char *name, struct list_head *resultp,
 int
 jt_lst_stat(int argc, char **argv)
 {
-        struct list_head      head;
+        cfs_list_t            head;
         lst_stat_req_param_t *srp;
         time_t                last    = 0;
         int                   optidx  = 0;
@@ -1888,7 +1890,7 @@ jt_lst_stat(int argc, char **argv)
                 if (rc != 0)
                         goto out;
 
-                list_add_tail(&srp->srp_link, &head);
+                cfs_list_add_tail(&srp->srp_link, &head);
         }
 
         while (1) {
@@ -1922,10 +1924,10 @@ jt_lst_stat(int argc, char **argv)
         }
 
 out:
-        while (!list_empty(&head)) {
-                srp = list_entry(head.next, lst_stat_req_param_t, srp_link);
+        while (!cfs_list_empty(&head)) {
+                srp = cfs_list_entry(head.next, lst_stat_req_param_t, srp_link);
 
-                list_del(&srp->srp_link);
+                cfs_list_del(&srp->srp_link);
                 lst_stat_req_param_free(srp);
         }
 
@@ -1935,7 +1937,7 @@ out:
 int
 jt_lst_show_error(int argc, char **argv)
 {
-        struct list_head      head;
+        cfs_list_t            head;
         lst_stat_req_param_t *srp;
         lstcon_rpc_ent_t     *ent;
         sfw_counters_t       *sfwk;
@@ -1988,7 +1990,7 @@ jt_lst_show_error(int argc, char **argv)
                 if (rc != 0)
                         goto out;
 
-                list_add_tail(&srp->srp_link, &head);
+                cfs_list_add_tail(&srp->srp_link, &head);
         }
 
         cfs_list_for_each_entry_typed(srp, &head, lst_stat_req_param_t,
@@ -2051,10 +2053,10 @@ jt_lst_show_error(int argc, char **argv)
                 fprintf(stdout, "Total %d error nodes in %s\n", ecount, srp->srp_name);
         }
 out:
-        while (!list_empty(&head)) {
-                srp = list_entry(head.next, lst_stat_req_param_t, srp_link);
+        while (!cfs_list_empty(&head)) {
+                srp = cfs_list_entry(head.next, lst_stat_req_param_t, srp_link);
 
-                list_del(&srp->srp_link);
+                cfs_list_del(&srp->srp_link);
                 lst_stat_req_param_free(srp);
         }
 
@@ -2108,7 +2110,7 @@ jt_lst_add_batch(int argc, char **argv)
 }
 
 int
-lst_start_batch_ioctl (char *name, int timeout, struct list_head *resultp)
+lst_start_batch_ioctl (char *name, int timeout, cfs_list_t *resultp)
 {
         lstio_batch_run_args_t args = {0};
 
@@ -2124,7 +2126,7 @@ lst_start_batch_ioctl (char *name, int timeout, struct list_head *resultp)
 int
 jt_lst_start_batch(int argc, char **argv)
 {
-        struct list_head  head;
+        cfs_list_t        head;
         char             *batch;
         int               optidx  = 0;
         int               timeout = 0;
@@ -2211,7 +2213,7 @@ jt_lst_start_batch(int argc, char **argv)
 }
 
 int
-lst_stop_batch_ioctl(char *name, int force, struct list_head *resultp)
+lst_stop_batch_ioctl(char *name, int force, cfs_list_t *resultp)
 {
         lstio_batch_stop_args_t args = {0};
 
@@ -2227,7 +2229,7 @@ lst_stop_batch_ioctl(char *name, int force, struct list_head *resultp)
 int
 jt_lst_stop_batch(int argc, char **argv)
 {
-        struct list_head  head;
+        cfs_list_t        head;
         char             *batch;
         int               force = 0;
         int               optidx;
@@ -2580,7 +2582,7 @@ loop:
 
 int
 lst_query_batch_ioctl(char *batch, int test, int server,
-                      int timeout, struct list_head *head)
+                      int timeout, cfs_list_t *head)
 {
         lstio_batch_query_args_t args = {0};
 
@@ -2596,7 +2598,7 @@ lst_query_batch_ioctl(char *batch, int test, int server,
 }
 
 void
-lst_print_tsb_verbose(struct list_head *head,
+lst_print_tsb_verbose(cfs_list_t *head,
                       int active, int idle, int error)
 {
         lstcon_rpc_ent_t *ent;
@@ -2624,23 +2626,23 @@ int
 jt_lst_query_batch(int argc, char **argv)
 {
         lstcon_test_batch_ent_t ent;
-        struct list_head     head;
-        char                *batch   = NULL;
-        time_t               last    = 0;
-        int                  optidx  = 0;
-        int                  verbose = 0;
-        int                  server  = 0;
-        int                  timeout = 5; /* default 5 seconds */
-        int                  delay   = 5; /* default 5 seconds */
-        int                  loop    = 1; /* default 1 loop */
-        int                  active  = 0;
-        int                  error   = 0;
-        int                  idle    = 0;
-        int                  count   = 0;
-        int                  test    = 0;
-        int                  rc      = 0;
-        int                  c       = 0;
-        int                  i;
+        cfs_list_t              head;
+        char                   *batch   = NULL;
+        time_t                  last    = 0;
+        int                     optidx  = 0;
+        int                     verbose = 0;
+        int                     server  = 0;
+        int                     timeout = 5; /* default 5 seconds */
+        int                     delay   = 5; /* default 5 seconds */
+        int                     loop    = 1; /* default 1 loop */
+        int                     active  = 0;
+        int                     error   = 0;
+        int                     idle    = 0;
+        int                     count   = 0;
+        int                     test    = 0;
+        int                     rc      = 0;
+        int                     c       = 0;
+        int                     i;
 
         static struct option query_batch_opts[] =
         {
@@ -2936,7 +2938,7 @@ lst_get_test_param(char *test, int argc, char **argv, void **param, int *plen)
 int
 lst_add_test_ioctl(char *batch, int type, int loop, int concur,
                    int dist, int span, char *sgrp, char *dgrp,
-                   void *param, int plen, int *retp, struct list_head *resultp)
+                   void *param, int plen, int *retp, cfs_list_t *resultp)
 {
         lstio_test_args_t args = {0};
 
@@ -2964,25 +2966,25 @@ lst_add_test_ioctl(char *batch, int type, int loop, int concur,
 int
 jt_lst_add_test(int argc, char **argv)
 {
-        struct list_head  head;
-        char             *batch  = NULL;
-        char             *test   = NULL;
-        char             *dstr   = NULL;
-        char             *from   = NULL;
-        char             *to     = NULL;
-        void             *param  = NULL;
-        int               optidx = 0;
-        int               concur = 1;
-        int               loop   = -1;
-        int               dist   = 1;
-        int               span   = 1;
-        int               plen   = 0;
-        int               fcount = 0;
-        int               tcount = 0;
-        int               ret    = 0;
-        int               type;
-        int               rc;
-        int               c;
+        cfs_list_t    head;
+        char         *batch  = NULL;
+        char         *test   = NULL;
+        char         *dstr   = NULL;
+        char         *from   = NULL;
+        char         *to     = NULL;
+        void         *param  = NULL;
+        int           optidx = 0;
+        int           concur = 1;
+        int           loop   = -1;
+        int           dist   = 1;
+        int           span   = 1;
+        int           plen   = 0;
+        int           fcount = 0;
+        int           tcount = 0;
+        int           ret    = 0;
+        int           type;
+        int           rc;
+        int           c;
 
         static struct option add_test_opts[] =
         {
index e50037f..2a42437 100644 (file)
@@ -1265,7 +1265,7 @@ lwt_control(int enable, int clear)
 }
 
 static int
-lwt_snapshot(cycles_t *now, int *ncpu, int *totalsize,
+lwt_snapshot(cfs_cycles_t *now, int *ncpu, int *totalsize,
              lwt_event_t *events, int size)
 {
         struct libcfs_ioctl_data data;
@@ -1362,7 +1362,8 @@ lwt_put_string(char *ustr)
 }
 
 static int
-lwt_print(FILE *f, cycles_t t0, cycles_t tlast, double mhz, int cpu, lwt_event_t *e)
+lwt_print(FILE *f, cfs_cycles_t t0, cfs_cycles_t tlast, double mhz, int cpu,
+          lwt_event_t *e)
 {
 #ifndef __WORDSIZE
 # error "__WORDSIZE not defined"
@@ -1427,9 +1428,9 @@ jt_ptl_lwt(int argc, char **argv)
         int             rc;
         int             i;
         double          mhz;
-        cycles_t        t0;
-        cycles_t        tlast;
-        cycles_t        tnow;
+        cfs_cycles_t    t0;
+        cfs_cycles_t    tlast;
+        cfs_cycles_t    tnow;
         struct timeval  tvnow;
         int             printed_date = 0;
         int             nlines = 0;
@@ -1569,7 +1570,7 @@ jt_ptl_lwt(int argc, char **argv)
                 if (t0 <= next_event[cpu]->lwte_when) {
                         /* on or after the first event */
                         if (!printed_date) {
-                                cycles_t du = (tnow - t0) / mhz;
+                                cfs_cycles_t du = (tnow - t0) / mhz;
                                 time_t   then = tvnow.tv_sec - du/1000000;
 
                                 if (du % 1000000 > tvnow.tv_usec)
index 8867fd8..5c6737f 100644 (file)
@@ -81,7 +81,7 @@ int cmm_root_get(const struct lu_env *env, struct md_device *md,
 }
 
 static int cmm_statfs(const struct lu_env *env, struct md_device *md,
-                      struct kstatfs *sfs)
+                      cfs_kstatfs_t *sfs)
 {
         struct cmm_device *cmm_dev = md2cmm_dev(md);
         int rc;
@@ -428,16 +428,16 @@ static int cmm_post_init_mdc(const struct lu_env *env,
 
         /* get the max mdsize and cookiesize from lower layer */
         rc = cmm_maxsize_get(env, &cmm->cmm_md_dev, &max_mdsize,
-                                                &max_cookiesize);
+                             &max_cookiesize);
         if (rc)
                 RETURN(rc);
 
-        spin_lock(&cmm->cmm_tgt_guard);
-        list_for_each_entry_safe(mc, tmp, &cmm->cmm_targets,
-                                 mc_linkage) {
+        cfs_spin_lock(&cmm->cmm_tgt_guard);
+        cfs_list_for_each_entry_safe(mc, tmp, &cmm->cmm_targets,
+                                     mc_linkage) {
                 cmm_mdc_init_ea_size(env, mc, max_mdsize, max_cookiesize);
         }
-        spin_unlock(&cmm->cmm_tgt_guard);
+        cfs_spin_unlock(&cmm->cmm_tgt_guard);
         RETURN(rc);
 }
 
@@ -468,15 +468,15 @@ static int cmm_add_mdc(const struct lu_env *env,
                 RETURN(-EINVAL);
         }
 
-        spin_lock(&cm->cmm_tgt_guard);
-        list_for_each_entry_safe(mc, tmp, &cm->cmm_targets,
-                                 mc_linkage) {
+        cfs_spin_lock(&cm->cmm_tgt_guard);
+        cfs_list_for_each_entry_safe(mc, tmp, &cm->cmm_targets,
+                                     mc_linkage) {
                 if (mc->mc_num == mdc_num) {
-                        spin_unlock(&cm->cmm_tgt_guard);
+                        cfs_spin_unlock(&cm->cmm_tgt_guard);
                         RETURN(-EEXIST);
                 }
         }
-        spin_unlock(&cm->cmm_tgt_guard);
+        cfs_spin_unlock(&cm->cmm_tgt_guard);
         ld = ldt->ldt_ops->ldto_device_alloc(env, ldt, cfg);
         if (IS_ERR(ld))
                 RETURN(PTR_ERR(ld));
@@ -496,23 +496,23 @@ static int cmm_add_mdc(const struct lu_env *env,
                 RETURN(rc);
         }
 
-        spin_lock(&cm->cmm_tgt_guard);
-        list_for_each_entry_safe(mc, tmp, &cm->cmm_targets,
-                                 mc_linkage) {
+        cfs_spin_lock(&cm->cmm_tgt_guard);
+        cfs_list_for_each_entry_safe(mc, tmp, &cm->cmm_targets,
+                                     mc_linkage) {
                 if (mc->mc_num == mdc_num) {
-                        spin_unlock(&cm->cmm_tgt_guard);
+                        cfs_spin_unlock(&cm->cmm_tgt_guard);
                         ldt->ldt_ops->ldto_device_fini(env, ld);
                         ldt->ldt_ops->ldto_device_free(env, ld);
                         RETURN(-EEXIST);
                 }
         }
         mc = lu2mdc_dev(ld);
-        list_add_tail(&mc->mc_linkage, &cm->cmm_targets);
+        cfs_list_add_tail(&mc->mc_linkage, &cm->cmm_targets);
         cm->cmm_tgt_count++;
 #ifdef HAVE_QUOTA_SUPPORT
         first = cm->cmm_tgt_count;
 #endif
-        spin_unlock(&cm->cmm_tgt_guard);
+        cfs_spin_unlock(&cm->cmm_tgt_guard);
 
         lu_device_get(cmm_lu);
         lu_ref_add(&cmm_lu->ld_reference, "mdc-child", ld);
@@ -552,13 +552,13 @@ static void cmm_device_shutdown(const struct lu_env *env,
         fld_client_del_target(cm->cmm_fld, cm->cmm_local_num);
 
         /* Finish all mdc devices. */
-        spin_lock(&cm->cmm_tgt_guard);
-        list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, mc_linkage) {
+        cfs_spin_lock(&cm->cmm_tgt_guard);
+        cfs_list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, mc_linkage) {
                 struct lu_device *ld_m = mdc2lu_dev(mc);
                 fld_client_del_target(cm->cmm_fld, mc->mc_num);
                 ld_m->ld_ops->ldo_process_config(env, ld_m, cfg);
         }
-        spin_unlock(&cm->cmm_tgt_guard);
+        cfs_spin_unlock(&cm->cmm_tgt_guard);
 
         /* remove upcall device*/
         md_upcall_fini(&cm->cmm_md_dev);
@@ -687,7 +687,7 @@ static struct lu_device *cmm_device_free(const struct lu_env *env,
         ENTRY;
 
         LASSERT(m->cmm_tgt_count == 0);
-        LASSERT(list_empty(&m->cmm_targets));
+        LASSERT(cfs_list_empty(&m->cmm_targets));
         if (m->cmm_fld != NULL) {
                 OBD_FREE_PTR(m->cmm_fld);
                 m->cmm_fld = NULL;
@@ -784,7 +784,7 @@ static int cmm_device_init(const struct lu_env *env, struct lu_device *d,
         int err = 0;
         ENTRY;
 
-        spin_lock_init(&m->cmm_tgt_guard);
+        cfs_spin_lock_init(&m->cmm_tgt_guard);
         CFS_INIT_LIST_HEAD(&m->cmm_targets);
         m->cmm_tgt_count = 0;
         m->cmm_child = lu2md_dev(next);
@@ -813,19 +813,19 @@ static struct lu_device *cmm_device_fini(const struct lu_env *env,
         ENTRY;
 
         /* Finish all mdc devices */
-        spin_lock(&cm->cmm_tgt_guard);
-        list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, mc_linkage) {
+        cfs_spin_lock(&cm->cmm_tgt_guard);
+        cfs_list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, mc_linkage) {
                 struct lu_device *ld_m = mdc2lu_dev(mc);
                 struct lu_device *ld_c = cmm2lu_dev(cm);
 
-                list_del_init(&mc->mc_linkage);
+                cfs_list_del_init(&mc->mc_linkage);
                 lu_ref_del(&ld_c->ld_reference, "mdc-child", ld_m);
                 lu_device_put(ld_c);
                 ld_m->ld_type->ldt_ops->ldto_device_fini(env, ld_m);
                 ld_m->ld_type->ldt_ops->ldto_device_free(env, ld_m);
                 cm->cmm_tgt_count--;
         }
-        spin_unlock(&cm->cmm_tgt_guard);
+        cfs_spin_unlock(&cm->cmm_tgt_guard);
 
         fld_client_fini(cm->cmm_fld);
         ls = cmm2lu_dev(cm)->ld_site;
index 36e9b16..00bfcce 100644 (file)
@@ -62,8 +62,8 @@ struct cmm_device {
         /* other MD servers in cluster */
         mdsno_t                 cmm_local_num;
         __u32                   cmm_tgt_count;
-        struct list_head        cmm_targets;
-        spinlock_t              cmm_tgt_guard;
+        cfs_list_t              cmm_targets;
+        cfs_spinlock_t          cmm_tgt_guard;
         cfs_proc_dir_entry_t   *cmm_proc_entry;
         struct lprocfs_stats   *cmm_stats;
 };
index 2893511..c9301cf 100644 (file)
@@ -801,14 +801,14 @@ static struct lu_device *cmr_child_dev(struct cmm_device *d, __u32 num)
         struct lu_device *next = NULL;
         struct mdc_device *mdc;
 
-        spin_lock(&d->cmm_tgt_guard);
-        list_for_each_entry(mdc, &d->cmm_targets, mc_linkage) {
+        cfs_spin_lock(&d->cmm_tgt_guard);
+        cfs_list_for_each_entry(mdc, &d->cmm_targets, mc_linkage) {
                 if (mdc->mc_num == num) {
                         next = mdc2lu_dev(mdc);
                         break;
                 }
         }
-        spin_unlock(&d->cmm_tgt_guard);
+        cfs_spin_unlock(&d->cmm_tgt_guard);
         return next;
 }
 
index 4487876..eb91f3f 100644 (file)
@@ -264,13 +264,13 @@ static int cmm_split_fid_alloc(const struct lu_env *env,
 
         LASSERT(cmm != NULL && mc != NULL && fid != NULL);
 
-        down(&mc->mc_fid_sem);
+        cfs_down(&mc->mc_fid_sem);
 
         /* Alloc new fid on @mc. */
         rc = obd_fid_alloc(mc->mc_desc.cl_exp, fid, NULL);
         if (rc > 0)
                 rc = 0;
-        up(&mc->mc_fid_sem);
+        cfs_up(&mc->mc_fid_sem);
 
         RETURN(rc);
 }
@@ -346,7 +346,7 @@ static int cmm_split_slaves_create(const struct lu_env *env,
         slave_lmv->mea_magic = MEA_MAGIC_HASH_SEGMENT;
         slave_lmv->mea_count = 0;
 
-        list_for_each_entry_safe(mc, tmp, &cmm->cmm_targets, mc_linkage) {
+        cfs_list_for_each_entry_safe(mc, tmp, &cmm->cmm_targets, mc_linkage) {
                 rc = cmm_split_slave_create(env, cmm, mc, &lmv->mea_ids[i],
                                             ma, slave_lmv, sizeof(*slave_lmv));
                 if (rc)
index 51386de..fc77a67 100644 (file)
@@ -286,7 +286,7 @@ static struct lu_device *mdc_device_alloc(const struct lu_env *env,
                 mc->mc_md_dev.md_ops = &mdc_md_ops;
                 ld = mdc2lu_dev(mc);
                 ld->ld_ops = &mdc_lu_ops;
-                sema_init(&mc->mc_fid_sem, 1);
+                cfs_sema_init(&mc->mc_fid_sem, 1);
         }
 
         RETURN (ld);
@@ -297,9 +297,9 @@ static struct lu_device *mdc_device_free(const struct lu_env *env,
 {
         struct mdc_device *mc = lu2mdc_dev(ld);
 
-        LASSERTF(atomic_read(&ld->ld_ref) == 0,
-                 "Refcount = %i\n", atomic_read(&ld->ld_ref));
-        LASSERT(list_empty(&mc->mc_linkage));
+        LASSERTF(cfs_atomic_read(&ld->ld_ref) == 0,
+                 "Refcount = %i\n", cfs_atomic_read(&ld->ld_ref));
+        LASSERT(cfs_list_empty(&mc->mc_linkage));
         md_device_fini(&mc->mc_md_dev);
         OBD_FREE_PTR(mc);
         return NULL;
index bcd5f3f..d0795bf 100644 (file)
@@ -61,10 +61,10 @@ struct mdc_cli_desc {
 struct mdc_device {
         struct md_device        mc_md_dev;
         /* other MD servers in cluster */
-        struct list_head        mc_linkage;
+        cfs_list_t              mc_linkage;
         mdsno_t                 mc_num;
         struct mdc_cli_desc     mc_desc;
-        struct semaphore        mc_fid_sem;
+        cfs_semaphore_t         mc_fid_sem;
 };
 
 struct mdc_thread_info {
index 7039302..e85d0ef 100644 (file)
@@ -74,7 +74,7 @@ int seq_server_set_cli(struct lu_server_seq *seq,
          * Ask client for new range, assign that range to ->seq_space and write
          * seq state to backing store should be atomic.
          */
-        down(&seq->lss_sem);
+        cfs_down(&seq->lss_sem);
 
         if (cli == NULL) {
                 CDEBUG(D_INFO, "%s: Detached sequence client %s\n",
@@ -96,7 +96,7 @@ int seq_server_set_cli(struct lu_server_seq *seq,
         cli->lcs_space.lsr_mdt = seq->lss_site->ms_node_id;
         EXIT;
 out_up:
-        up(&seq->lss_sem);
+        cfs_up(&seq->lss_sem);
         return rc;
 }
 EXPORT_SYMBOL(seq_server_set_cli);
@@ -185,9 +185,9 @@ int seq_server_alloc_super(struct lu_server_seq *seq,
         int rc;
         ENTRY;
 
-        down(&seq->lss_sem);
+        cfs_down(&seq->lss_sem);
         rc = __seq_server_alloc_super(seq, in, out, env);
-        up(&seq->lss_sem);
+        cfs_up(&seq->lss_sem);
 
         RETURN(rc);
 }
@@ -331,9 +331,9 @@ int seq_server_alloc_meta(struct lu_server_seq *seq,
         int rc;
         ENTRY;
 
-        down(&seq->lss_sem);
+        cfs_down(&seq->lss_sem);
         rc = __seq_server_alloc_meta(seq, in, out, env);
-        up(&seq->lss_sem);
+        cfs_up(&seq->lss_sem);
 
         RETURN(rc);
 }
@@ -540,7 +540,7 @@ int seq_server_init(struct lu_server_seq *seq,
         seq->lss_type = type;
         seq->lss_site = ms;
         range_init(&seq->lss_space);
-        sema_init(&seq->lss_sem, 1);
+        cfs_sema_init(&seq->lss_sem, 1);
 
         seq->lss_width = is_srv ?
                 LUSTRE_SEQ_META_WIDTH : LUSTRE_SEQ_SUPER_WIDTH;
index 9939c82..62d7349 100644 (file)
@@ -145,7 +145,7 @@ int seq_client_replay_super(struct lu_client_seq *seq,
         int rc;
         ENTRY;
 
-        down(&seq->lcs_sem);
+        cfs_down(&seq->lcs_sem);
 
 #ifdef __KERNEL__
         if (seq->lcs_srv) {
@@ -159,7 +159,7 @@ int seq_client_replay_super(struct lu_client_seq *seq,
 #ifdef __KERNEL__
         }
 #endif
-        up(&seq->lcs_sem);
+        cfs_up(&seq->lcs_sem);
         RETURN(rc);
 }
 
@@ -234,7 +234,7 @@ int seq_client_alloc_fid(struct lu_client_seq *seq, struct lu_fid *fid)
         LASSERT(seq != NULL);
         LASSERT(fid != NULL);
 
-        down(&seq->lcs_sem);
+        cfs_down(&seq->lcs_sem);
 
         if (fid_is_zero(&seq->lcs_fid) ||
             fid_oid(&seq->lcs_fid) >= seq->lcs_width)
@@ -245,7 +245,7 @@ int seq_client_alloc_fid(struct lu_client_seq *seq, struct lu_fid *fid)
                 if (rc) {
                         CERROR("%s: Can't allocate new sequence, "
                                "rc %d\n", seq->lcs_name, rc);
-                        up(&seq->lcs_sem);
+                        cfs_up(&seq->lcs_sem);
                         RETURN(rc);
                 }
 
@@ -268,7 +268,7 @@ int seq_client_alloc_fid(struct lu_client_seq *seq, struct lu_fid *fid)
         }
 
         *fid = seq->lcs_fid;
-        up(&seq->lcs_sem);
+        cfs_up(&seq->lcs_sem);
 
         CDEBUG(D_INFO, "%s: Allocated FID "DFID"\n", seq->lcs_name,  PFID(fid));
         RETURN(rc);
@@ -282,7 +282,7 @@ EXPORT_SYMBOL(seq_client_alloc_fid);
 void seq_client_flush(struct lu_client_seq *seq)
 {
         LASSERT(seq != NULL);
-        down(&seq->lcs_sem);
+        cfs_down(&seq->lcs_sem);
         fid_zero(&seq->lcs_fid);
         /**
          * this id shld not be used for seq range allocation.
@@ -292,7 +292,7 @@ void seq_client_flush(struct lu_client_seq *seq)
         seq->lcs_space.lsr_mdt = -1;
 
         range_init(&seq->lcs_space);
-        up(&seq->lcs_sem);
+        cfs_up(&seq->lcs_sem);
 }
 EXPORT_SYMBOL(seq_client_flush);
 
@@ -367,7 +367,7 @@ int seq_client_init(struct lu_client_seq *seq,
         seq->lcs_exp = exp;
         seq->lcs_srv = srv;
         seq->lcs_type = type;
-        sema_init(&seq->lcs_sem, 1);
+        cfs_sema_init(&seq->lcs_sem, 1);
         seq->lcs_width = LUSTRE_SEQ_MAX_WIDTH;
 
         /* Make sure that things are clear before work is started. */
index e9976f9..b8fdca3 100644 (file)
@@ -112,16 +112,16 @@ seq_server_proc_write_space(struct file *file, const char *buffer,
 
         LASSERT(seq != NULL);
 
-       down(&seq->lss_sem);
+        cfs_down(&seq->lss_sem);
        rc = seq_proc_write_common(file, buffer, count,
                                    data, &seq->lss_space);
        if (rc == 0) {
                CDEBUG(D_INFO, "%s: Space: "DRANGE"\n",
                        seq->lss_name, PRANGE(&seq->lss_space));
        }
-       
-       up(&seq->lss_sem);
-       
+
+        cfs_up(&seq->lss_sem);
+
         RETURN(count);
 }
 
@@ -135,11 +135,11 @@ seq_server_proc_read_space(char *page, char **start, off_t off,
 
         LASSERT(seq != NULL);
 
-       down(&seq->lss_sem);
+        cfs_down(&seq->lss_sem);
        rc = seq_proc_read_common(page, start, off, count, eof,
                                   data, &seq->lss_space);
-       up(&seq->lss_sem);
-       
+        cfs_up(&seq->lss_sem);
+
        RETURN(rc);
 }
 
@@ -167,7 +167,7 @@ seq_server_proc_read_server(char *page, char **start, off_t off,
        } else {
                rc = snprintf(page, count, "<none>\n");
        }
-       
+
        RETURN(rc);
 }
 
@@ -181,7 +181,7 @@ seq_server_proc_write_width(struct file *file, const char *buffer,
 
         LASSERT(seq != NULL);
 
-       down(&seq->lss_sem);
+        cfs_down(&seq->lss_sem);
 
         rc = lprocfs_write_helper(buffer, count, &val);
         if (rc)
@@ -193,9 +193,9 @@ seq_server_proc_write_width(struct file *file, const char *buffer,
                CDEBUG(D_INFO, "%s: Width: "LPU64"\n",
                        seq->lss_name, seq->lss_width);
        }
-       
-       up(&seq->lss_sem);
-       
+
+        cfs_up(&seq->lss_sem);
+
         RETURN(count);
 }
 
@@ -209,10 +209,10 @@ seq_server_proc_read_width(char *page, char **start, off_t off,
 
         LASSERT(seq != NULL);
 
-       down(&seq->lss_sem);
+        cfs_down(&seq->lss_sem);
         rc = snprintf(page, count, LPU64"\n", seq->lss_width);
-       up(&seq->lss_sem);
-       
+        cfs_up(&seq->lss_sem);
+
        RETURN(rc);
 }
 
@@ -227,7 +227,7 @@ seq_client_proc_write_space(struct file *file, const char *buffer,
 
         LASSERT(seq != NULL);
 
-       down(&seq->lcs_sem);
+        cfs_down(&seq->lcs_sem);
        rc = seq_proc_write_common(file, buffer, count,
                                    data, &seq->lcs_space);
 
@@ -235,9 +235,9 @@ seq_client_proc_write_space(struct file *file, const char *buffer,
                CDEBUG(D_INFO, "%s: Space: "DRANGE"\n",
                        seq->lcs_name, PRANGE(&seq->lcs_space));
        }
-       
-       up(&seq->lcs_sem);
-       
+
+        cfs_up(&seq->lcs_sem);
+
         RETURN(count);
 }
 
@@ -251,11 +251,11 @@ seq_client_proc_read_space(char *page, char **start, off_t off,
 
         LASSERT(seq != NULL);
 
-       down(&seq->lcs_sem);
+        cfs_down(&seq->lcs_sem);
        rc = seq_proc_read_common(page, start, off, count, eof,
                                   data, &seq->lcs_space);
-       up(&seq->lcs_sem);
-       
+        cfs_up(&seq->lcs_sem);
+
        RETURN(rc);
 }
 
@@ -269,7 +269,7 @@ seq_client_proc_write_width(struct file *file, const char *buffer,
 
         LASSERT(seq != NULL);
 
-       down(&seq->lcs_sem);
+        cfs_down(&seq->lcs_sem);
 
         rc = lprocfs_write_helper(buffer, count, &val);
         if (rc)
@@ -283,9 +283,9 @@ seq_client_proc_write_width(struct file *file, const char *buffer,
                                seq->lcs_name, seq->lcs_width);
                 }
         }
-       
-       up(&seq->lcs_sem);
-       
+
+        cfs_up(&seq->lcs_sem);
+
         RETURN(count);
 }
 
@@ -299,10 +299,10 @@ seq_client_proc_read_width(char *page, char **start, off_t off,
 
         LASSERT(seq != NULL);
 
-       down(&seq->lcs_sem);
+        cfs_down(&seq->lcs_sem);
         rc = snprintf(page, count, LPU64"\n", seq->lcs_width);
-       up(&seq->lcs_sem);
-       
+        cfs_up(&seq->lcs_sem);
+
        RETURN(rc);
 }
 
@@ -316,10 +316,10 @@ seq_client_proc_read_fid(char *page, char **start, off_t off,
 
         LASSERT(seq != NULL);
 
-       down(&seq->lcs_sem);
+        cfs_down(&seq->lcs_sem);
         rc = snprintf(page, count, DFID"\n", PFID(&seq->lcs_fid));
-       up(&seq->lcs_sem);
-       
+        cfs_up(&seq->lcs_sem);
+
        RETURN(rc);
 }
 
index 359f5f2..2337953 100644 (file)
@@ -88,7 +88,7 @@ struct fld_cache *fld_cache_init(const char *name,
         CFS_INIT_LIST_HEAD(&cache->fci_lru);
 
         cache->fci_cache_count = 0;
-        spin_lock_init(&cache->fci_lock);
+        cfs_spin_lock_init(&cache->fci_lock);
 
         strncpy(cache->fci_name, name,
                 sizeof(cache->fci_name));
@@ -139,8 +139,8 @@ void fld_cache_fini(struct fld_cache *cache)
 static inline void fld_cache_entry_delete(struct fld_cache *cache,
                                           struct fld_cache_entry *node)
 {
-        list_del(&node->fce_list);
-        list_del(&node->fce_lru);
+        cfs_list_del(&node->fce_list);
+        cfs_list_del(&node->fce_lru);
         cache->fci_cache_count--;
         OBD_FREE_PTR(node);
 }
@@ -154,12 +154,12 @@ static void fld_fix_new_list(struct fld_cache *cache)
         struct fld_cache_entry *f_next;
         struct lu_seq_range *c_range;
         struct lu_seq_range *n_range;
-        struct list_head *head = &cache->fci_entries_head;
+        cfs_list_t *head = &cache->fci_entries_head;
         ENTRY;
 
 restart_fixup:
 
-        list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
+        cfs_list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
                 c_range = &f_curr->fce_range;
                 n_range = &f_next->fce_range;
 
@@ -214,10 +214,10 @@ restart_fixup:
  */
 static inline void fld_cache_entry_add(struct fld_cache *cache,
                                        struct fld_cache_entry *f_new,
-                                       struct list_head *pos)
+                                       cfs_list_t *pos)
 {
-        list_add(&f_new->fce_list, pos);
-        list_add(&f_new->fce_lru, &cache->fci_lru);
+        cfs_list_add(&f_new->fce_list, pos);
+        cfs_list_add(&f_new->fce_lru, &cache->fci_lru);
 
         cache->fci_cache_count++;
         fld_fix_new_list(cache);
@@ -230,7 +230,7 @@ static inline void fld_cache_entry_add(struct fld_cache *cache,
 static int fld_cache_shrink(struct fld_cache *cache)
 {
         struct fld_cache_entry *flde;
-        struct list_head *curr;
+        cfs_list_t *curr;
         int num = 0;
         ENTRY;
 
@@ -244,7 +244,7 @@ static int fld_cache_shrink(struct fld_cache *cache)
         while (cache->fci_cache_count + cache->fci_threshold >
                cache->fci_cache_size && curr != &cache->fci_lru) {
 
-                flde = list_entry(curr, struct fld_cache_entry, fce_lru);
+                flde = cfs_list_entry(curr, struct fld_cache_entry, fce_lru);
                 curr = curr->prev;
                 fld_cache_entry_delete(cache, flde);
                 num++;
@@ -263,10 +263,10 @@ void fld_cache_flush(struct fld_cache *cache)
 {
         ENTRY;
 
-        spin_lock(&cache->fci_lock);
+        cfs_spin_lock(&cache->fci_lock);
         cache->fci_cache_size = 0;
         fld_cache_shrink(cache);
-        spin_unlock(&cache->fci_lock);
+        cfs_spin_unlock(&cache->fci_lock);
 
         EXIT;
 }
@@ -391,8 +391,8 @@ void fld_cache_insert(struct fld_cache *cache,
         struct fld_cache_entry *f_new;
         struct fld_cache_entry *f_curr;
         struct fld_cache_entry *n;
-        struct list_head *head;
-        struct list_head *prev = NULL;
+        cfs_list_t *head;
+        cfs_list_t *prev = NULL;
         const seqno_t new_start  = range->lsr_start;
         const seqno_t new_end  = range->lsr_end;
         ENTRY;
@@ -413,12 +413,12 @@ void fld_cache_insert(struct fld_cache *cache,
          * So we don't need to search new entry before starting insertion loop.
          */
 
-        spin_lock(&cache->fci_lock);
+        cfs_spin_lock(&cache->fci_lock);
         fld_cache_shrink(cache);
 
         head = &cache->fci_entries_head;
 
-        list_for_each_entry_safe(f_curr, n, head, fce_list) {
+        cfs_list_for_each_entry_safe(f_curr, n, head, fce_list) {
                 /* add list if next is end of list */
                 if (new_end < f_curr->fce_range.lsr_start)
                         break;
@@ -437,7 +437,7 @@ void fld_cache_insert(struct fld_cache *cache,
         /* Add new entry to cache and lru list. */
         fld_cache_entry_add(cache, f_new, prev);
 out:
-        spin_unlock(&cache->fci_lock);
+        cfs_spin_unlock(&cache->fci_lock);
         EXIT;
 }
 
@@ -448,15 +448,15 @@ int fld_cache_lookup(struct fld_cache *cache,
                      const seqno_t seq, struct lu_seq_range *range)
 {
         struct fld_cache_entry *flde;
-        struct list_head *head;
+        cfs_list_t *head;
         ENTRY;
 
 
-        spin_lock(&cache->fci_lock);
+        cfs_spin_lock(&cache->fci_lock);
         head = &cache->fci_entries_head;
 
         cache->fci_stat.fst_count++;
-        list_for_each_entry(flde, head, fce_list) {
+        cfs_list_for_each_entry(flde, head, fce_list) {
                 if (flde->fce_range.lsr_start > seq)
                         break;
 
@@ -464,12 +464,12 @@ int fld_cache_lookup(struct fld_cache *cache,
                         *range = flde->fce_range;
 
                         /* update position of this entry in lru list. */
-                        list_move(&flde->fce_lru, &cache->fci_lru);
+                        cfs_list_move(&flde->fce_lru, &cache->fci_lru);
                         cache->fci_stat.fst_cache++;
-                        spin_unlock(&cache->fci_lock);
+                        cfs_spin_unlock(&cache->fci_lock);
                         RETURN(0);
                 }
         }
-        spin_unlock(&cache->fci_lock);
+        cfs_spin_unlock(&cache->fci_lock);
         RETURN(-ENOENT);
 }
index 5092ac1..a799088 100644 (file)
@@ -135,7 +135,7 @@ int fld_server_create(struct lu_server_fld *fld,
         ENTRY;
 
         info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
-        mutex_lock(&fld->lsf_lock);
+        cfs_mutex_lock(&fld->lsf_lock);
 
         erange = &info->fti_lrange;
         new = &info->fti_irange;
@@ -229,7 +229,7 @@ out:
         if (rc == 0)
                 fld_cache_insert(fld->lsf_cache, new);
 
-        mutex_unlock(&fld->lsf_lock);
+        cfs_mutex_unlock(&fld->lsf_lock);
 
         CDEBUG((rc != 0 ? D_ERROR : D_INFO),
                "%s: FLD create: given range : "DRANGE
@@ -480,7 +480,7 @@ int fld_server_init(struct lu_server_fld *fld, struct dt_device *dt,
         cache_threshold = cache_size *
                 FLD_SERVER_CACHE_THRESHOLD / 100;
 
-        mutex_init(&fld->lsf_lock);
+        cfs_mutex_init(&fld->lsf_lock);
         fld->lsf_cache = fld_cache_init(fld->lsf_name,
                                         cache_size, cache_threshold);
         if (IS_ERR(fld->lsf_cache)) {
index 2c93de3..39682f4 100644 (file)
@@ -71,8 +71,8 @@ struct lu_fld_hash {
 };
 
 struct fld_cache_entry {
-        struct list_head         fce_lru;
-        struct list_head         fce_list;
+        cfs_list_t               fce_lru;
+        cfs_list_t               fce_list;
         /**
          * fld cache entries are sorted on range->lsr_start field. */
         struct lu_seq_range      fce_range;
@@ -83,7 +83,7 @@ struct fld_cache {
          * Cache guard, protects fci_hash mostly because others immutable after
          * init is finished.
          */
-        spinlock_t               fci_lock;
+        cfs_spinlock_t           fci_lock;
 
         /**
          * Cache shrink threshold */
@@ -99,11 +99,11 @@ struct fld_cache {
 
         /**
          * LRU list fld entries. */
-        struct list_head         fci_lru;
+        cfs_list_t               fci_lru;
 
         /**
          * sorted fld entries. */
-        struct list_head         fci_entries_head;
+        cfs_list_t               fci_entries_head;
 
         /**
          * Cache statistics. */
index eb02a43..951f589 100644 (file)
@@ -75,7 +75,7 @@ static int fld_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
         int rc;
         ENTRY;
         client_obd_list_lock(&cli->cl_loi_list_lock);
-        rc = list_empty(&mcw->mcw_entry);
+        rc = cfs_list_empty(&mcw->mcw_entry);
         client_obd_list_unlock(&cli->cl_loi_list_lock);
         RETURN(rc);
 };
@@ -87,7 +87,7 @@ static void fld_enter_request(struct client_obd *cli)
 
         client_obd_list_lock(&cli->cl_loi_list_lock);
         if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
-                list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
+                cfs_list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
                 cfs_waitq_init(&mcw.mcw_waitq);
                 client_obd_list_unlock(&cli->cl_loi_list_lock);
                 l_wait_event(mcw.mcw_waitq, fld_req_avail(cli, &mcw), &lwi);
@@ -99,20 +99,20 @@ static void fld_enter_request(struct client_obd *cli)
 
 static void fld_exit_request(struct client_obd *cli)
 {
-        struct list_head *l, *tmp;
+        cfs_list_t *l, *tmp;
         struct mdc_cache_waiter *mcw;
 
         client_obd_list_lock(&cli->cl_loi_list_lock);
         cli->cl_r_in_flight--;
-        list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
+        cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
 
                 if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
                         /* No free request slots anymore */
                         break;
                 }
 
-                mcw = list_entry(l, struct mdc_cache_waiter, mcw_entry);
-                list_del_init(&mcw->mcw_entry);
+                mcw = cfs_list_entry(l, struct mdc_cache_waiter, mcw_entry);
+                cfs_list_del_init(&mcw->mcw_entry);
                 cli->cl_r_in_flight++;
                 cfs_waitq_signal(&mcw->mcw_waitq);
         }
@@ -135,7 +135,7 @@ fld_rrb_scan(struct lu_client_fld *fld, seqno_t seq)
 
         hash = fld_rrb_hash(fld, seq);
 
-        list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
+        cfs_list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
                 if (target->ft_idx == hash)
                         RETURN(target);
         }
@@ -144,7 +144,7 @@ fld_rrb_scan(struct lu_client_fld *fld, seqno_t seq)
                "Targets (%d):\n", fld->lcf_name, hash, seq,
                fld->lcf_count);
 
-        list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
+        cfs_list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
                 const char *srv_name = target->ft_srv != NULL  ?
                         target->ft_srv->lsf_name : "<null>";
                 const char *exp_name = target->ft_exp != NULL ?
@@ -184,9 +184,9 @@ fld_client_get_target(struct lu_client_fld *fld,
 
         LASSERT(fld->lcf_hash != NULL);
 
-        spin_lock(&fld->lcf_lock);
+        cfs_spin_lock(&fld->lcf_lock);
         target = fld->lcf_hash->fh_scan_func(fld, seq);
-        spin_unlock(&fld->lcf_lock);
+        cfs_spin_unlock(&fld->lcf_lock);
 
         if (target != NULL) {
                 CDEBUG(D_INFO, "%s: Found target (idx "LPU64
@@ -226,10 +226,10 @@ int fld_client_add_target(struct lu_client_fld *fld,
         if (target == NULL)
                 RETURN(-ENOMEM);
 
-        spin_lock(&fld->lcf_lock);
-        list_for_each_entry(tmp, &fld->lcf_targets, ft_chain) {
+        cfs_spin_lock(&fld->lcf_lock);
+        cfs_list_for_each_entry(tmp, &fld->lcf_targets, ft_chain) {
                 if (tmp->ft_idx == tar->ft_idx) {
-                        spin_unlock(&fld->lcf_lock);
+                        cfs_spin_unlock(&fld->lcf_lock);
                         OBD_FREE_PTR(target);
                         CERROR("Target %s exists in FLD and known as %s:#"LPU64"\n",
                                name, fld_target_name(tmp), tmp->ft_idx);
@@ -243,11 +243,11 @@ int fld_client_add_target(struct lu_client_fld *fld,
         target->ft_srv = tar->ft_srv;
         target->ft_idx = tar->ft_idx;
 
-        list_add_tail(&target->ft_chain,
-                      &fld->lcf_targets);
+        cfs_list_add_tail(&target->ft_chain,
+                          &fld->lcf_targets);
 
         fld->lcf_count++;
-        spin_unlock(&fld->lcf_lock);
+        cfs_spin_unlock(&fld->lcf_lock);
 
         RETURN(0);
 }
@@ -260,13 +260,13 @@ int fld_client_del_target(struct lu_client_fld *fld,
         struct lu_fld_target *target, *tmp;
         ENTRY;
 
-        spin_lock(&fld->lcf_lock);
-        list_for_each_entry_safe(target, tmp,
-                                 &fld->lcf_targets, ft_chain) {
+        cfs_spin_lock(&fld->lcf_lock);
+        cfs_list_for_each_entry_safe(target, tmp,
+                                     &fld->lcf_targets, ft_chain) {
                 if (target->ft_idx == idx) {
                         fld->lcf_count--;
-                        list_del(&target->ft_chain);
-                        spin_unlock(&fld->lcf_lock);
+                        cfs_list_del(&target->ft_chain);
+                        cfs_spin_unlock(&fld->lcf_lock);
 
                         if (target->ft_exp != NULL)
                                 class_export_put(target->ft_exp);
@@ -275,7 +275,7 @@ int fld_client_del_target(struct lu_client_fld *fld,
                         RETURN(0);
                 }
         }
-        spin_unlock(&fld->lcf_lock);
+        cfs_spin_unlock(&fld->lcf_lock);
         RETURN(-ENOENT);
 }
 EXPORT_SYMBOL(fld_client_del_target);
@@ -360,7 +360,7 @@ int fld_client_init(struct lu_client_fld *fld,
         }
 
         fld->lcf_count = 0;
-        spin_lock_init(&fld->lcf_lock);
+        cfs_spin_lock_init(&fld->lcf_lock);
         fld->lcf_hash = &fld_hash[hash];
         fld->lcf_flags = LUSTRE_FLD_INIT;
         CFS_INIT_LIST_HEAD(&fld->lcf_targets);
@@ -400,16 +400,16 @@ void fld_client_fini(struct lu_client_fld *fld)
 
         fld_client_proc_fini(fld);
 
-        spin_lock(&fld->lcf_lock);
-        list_for_each_entry_safe(target, tmp,
-                                 &fld->lcf_targets, ft_chain) {
+        cfs_spin_lock(&fld->lcf_lock);
+        cfs_list_for_each_entry_safe(target, tmp,
+                                     &fld->lcf_targets, ft_chain) {
                 fld->lcf_count--;
-                list_del(&target->ft_chain);
+                cfs_list_del(&target->ft_chain);
                 if (target->ft_exp != NULL)
                         class_export_put(target->ft_exp);
                 OBD_FREE_PTR(target);
         }
-        spin_unlock(&fld->lcf_lock);
+        cfs_spin_unlock(&fld->lcf_lock);
 
         if (fld->lcf_cache != NULL) {
                 if (!IS_ERR(fld->lcf_cache))
index b2e37fd..f2dd369 100644 (file)
@@ -73,9 +73,9 @@ fld_proc_read_targets(char *page, char **start, off_t off,
 
         LASSERT(fld != NULL);
 
-        spin_lock(&fld->lcf_lock);
-        list_for_each_entry(target,
-                            &fld->lcf_targets, ft_chain)
+        cfs_spin_lock(&fld->lcf_lock);
+        cfs_list_for_each_entry(target,
+                                &fld->lcf_targets, ft_chain)
         {
                 rc = snprintf(page, count, "%s\n",
                               fld_target_name(target));
@@ -85,7 +85,7 @@ fld_proc_read_targets(char *page, char **start, off_t off,
                 if (count == 0)
                         break;
         }
-        spin_unlock(&fld->lcf_lock);
+        cfs_spin_unlock(&fld->lcf_lock);
        RETURN(total);
 }
 
@@ -99,10 +99,10 @@ fld_proc_read_hash(char *page, char **start, off_t off,
 
         LASSERT(fld != NULL);
 
-        spin_lock(&fld->lcf_lock);
+        cfs_spin_lock(&fld->lcf_lock);
         rc = snprintf(page, count, "%s\n",
                       fld->lcf_hash->fh_name);
-        spin_unlock(&fld->lcf_lock);
+        cfs_spin_unlock(&fld->lcf_lock);
 
        RETURN(rc);
 }
@@ -129,9 +129,9 @@ fld_proc_write_hash(struct file *file, const char *buffer,
         }
 
         if (hash != NULL) {
-                spin_lock(&fld->lcf_lock);
+                cfs_spin_lock(&fld->lcf_lock);
                 fld->lcf_hash = hash;
-                spin_unlock(&fld->lcf_lock);
+                cfs_spin_unlock(&fld->lcf_lock);
 
                 CDEBUG(D_INFO, "%s: Changed hash to \"%s\"\n",
                        fld->lcf_name, hash->fh_name);
index 47db8b2..26e6553 100644 (file)
@@ -383,16 +383,16 @@ struct cl_object_header {
          */
         /** @{ */
         /** Lock protecting page tree. */
-        spinlock_t               coh_page_guard;
+        cfs_spinlock_t           coh_page_guard;
         /** Lock protecting lock list. */
-        spinlock_t               coh_lock_guard;
+        cfs_spinlock_t           coh_lock_guard;
         /** @} locks */
         /** Radix tree of cl_page's, cached for this object. */
         struct radix_tree_root   coh_tree;
         /** # of pages in radix tree. */
         unsigned long            coh_pages;
         /** List of cl_lock's granted for this object. */
-        struct list_head         coh_locks;
+        cfs_list_t               coh_locks;
 
         /**
          * Parent object. It is assumed that an object has a well-defined
@@ -409,7 +409,7 @@ struct cl_object_header {
          *
          * \todo XXX this can be read/write lock if needed.
          */
-        spinlock_t               coh_attr_guard;
+        cfs_spinlock_t           coh_attr_guard;
         /**
          * Number of objects above this one: 0 for a top-object, 1 for its
          * sub-object, etc.
@@ -421,18 +421,18 @@ struct cl_object_header {
  * Helper macro: iterate over all layers of the object \a obj, assigning every
  * layer top-to-bottom to \a slice.
  */
-#define cl_object_for_each(slice, obj)                                  \
-        list_for_each_entry((slice),                                    \
-                            &(obj)->co_lu.lo_header->loh_layers,        \
-                            co_lu.lo_linkage)
+#define cl_object_for_each(slice, obj)                                      \
+        cfs_list_for_each_entry((slice),                                    \
+                                &(obj)->co_lu.lo_header->loh_layers,        \
+                                co_lu.lo_linkage)
 /**
  * Helper macro: iterate over all layers of the object \a obj, assigning every
  * layer bottom-to-top to \a slice.
  */
-#define cl_object_for_each_reverse(slice, obj)                          \
-        list_for_each_entry_reverse((slice),                            \
-                                    &(obj)->co_lu.lo_header->loh_layers, \
-                                    co_lu.lo_linkage)
+#define cl_object_for_each_reverse(slice, obj)                               \
+        cfs_list_for_each_entry_reverse((slice),                             \
+                                        &(obj)->co_lu.lo_header->loh_layers, \
+                                        co_lu.lo_linkage)
 /** @} cl_object */
 
 #ifndef pgoff_t
@@ -696,13 +696,13 @@ enum cl_page_flags {
  */
 struct cl_page {
         /** Reference counter. */
-        atomic_t                 cp_ref;
+        cfs_atomic_t             cp_ref;
         /** An object this page is a part of. Immutable after creation. */
         struct cl_object        *cp_obj;
         /** Logical page index within the object. Immutable after creation. */
         pgoff_t                  cp_index;
         /** List of slices. Immutable after creation. */
-        struct list_head         cp_layers;
+        cfs_list_t               cp_layers;
         /** Parent page, NULL for top-level page. Immutable after creation. */
         struct cl_page          *cp_parent;
         /** Lower-layer page. NULL for bottommost page. Immutable after
@@ -716,11 +716,11 @@ struct cl_page {
         /**
          * Linkage of pages within some group. Protected by
          * cl_page::cp_mutex. */
-        struct list_head         cp_batch;
+        cfs_list_t               cp_batch;
         /** Mutex serializing membership of a page in a batch. */
-        struct mutex             cp_mutex;
+        cfs_mutex_t              cp_mutex;
         /** Linkage of pages within cl_req. */
-        struct list_head         cp_flight;
+        cfs_list_t               cp_flight;
         /** Transfer error. */
         int                      cp_error;
 
@@ -771,7 +771,7 @@ struct cl_page_slice {
         struct cl_object                *cpl_obj;
         const struct cl_page_operations *cpl_ops;
         /** Linkage into cl_page::cp_layers. Immutable after creation. */
-        struct list_head                 cpl_linkage;
+        cfs_list_t                       cpl_linkage;
 };
 
 /**
@@ -1064,7 +1064,7 @@ struct cl_page_operations {
 do {                                                                    \
         static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask);              \
                                                                         \
-        if (cdebug_show(mask, DEBUG_SUBSYSTEM)) {                       \
+        if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) {                   \
                 cl_page_print(env, &__info, lu_cdebug_printer, page);   \
                 CDEBUG(mask, format , ## __VA_ARGS__);                  \
         }                                                               \
@@ -1077,7 +1077,7 @@ do {                                                                    \
 do {                                                                    \
         static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask);              \
                                                                         \
-        if (cdebug_show(mask, DEBUG_SUBSYSTEM)) {                       \
+        if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) {                   \
                 cl_page_header_print(env, &__info, lu_cdebug_printer, page); \
                 CDEBUG(mask, format , ## __VA_ARGS__);                  \
         }                                                               \
@@ -1476,7 +1476,7 @@ struct cl_lock_closure {
          * List of enclosed locks, so far. Locks are linked here through
          * cl_lock::cll_inclosure.
          */
-        struct list_head  clc_list;
+        cfs_list_t        clc_list;
         /**
          * True iff closure is in a `wait' mode. This determines what
          * cl_lock_enclosure() does when a lock L to be added to the closure
@@ -1502,14 +1502,14 @@ struct cl_lock_closure {
  */
 struct cl_lock {
         /** Reference counter. */
-        atomic_t              cll_ref;
+        cfs_atomic_t          cll_ref;
         /** List of slices. Immutable after creation. */
-        struct list_head      cll_layers;
+        cfs_list_t            cll_layers;
         /**
          * Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected
          * by cl_lock::cll_descr::cld_obj::coh_lock_guard.
          */
-        struct list_head      cll_linkage;
+        cfs_list_t            cll_linkage;
         /**
          * Parameters of this lock. Protected by
          * cl_lock::cll_descr::cld_obj::coh_lock_guard nested within
@@ -1534,7 +1534,7 @@ struct cl_lock {
          *
          * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
          */
-        struct mutex          cll_guard;
+        cfs_mutex_t           cll_guard;
         cfs_task_t           *cll_guarder;
         int                   cll_depth;
 
@@ -1568,7 +1568,7 @@ struct cl_lock {
          *
          * \see cl_lock_closure
          */
-        struct list_head      cll_inclosure;
+        cfs_list_t            cll_inclosure;
         /**
          * Confict lock at queuing time.
          */
@@ -1603,7 +1603,7 @@ struct cl_lock_slice {
         struct cl_object                *cls_obj;
         const struct cl_lock_operations *cls_ops;
         /** Linkage into cl_lock::cll_layers. Immutable after creation. */
-        struct list_head                 cls_linkage;
+        cfs_list_t                       cls_linkage;
 };
 
 /**
@@ -1788,7 +1788,7 @@ struct cl_lock_operations {
 do {                                                                    \
         static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask);              \
                                                                         \
-        if (cdebug_show(mask, DEBUG_SUBSYSTEM)) {                       \
+        if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) {                   \
                 cl_lock_print(env, &__info, lu_cdebug_printer, lock);   \
                 CDEBUG(mask, format , ## __VA_ARGS__);                  \
         }                                                               \
@@ -1816,9 +1816,9 @@ do {                                                                    \
  * @{
  */
 struct cl_page_list {
-        unsigned         pl_nr;
-        struct list_head pl_pages;
-        cfs_task_t      *pl_owner;
+        unsigned             pl_nr;
+        cfs_list_t           pl_pages;
+        cfs_task_t          *pl_owner;
 };
 
 /** 
@@ -1966,7 +1966,7 @@ struct cl_io_slice {
          * linkage into a list of all slices for a given cl_io, hanging off
          * cl_io::ci_layers. Immutable after creation.
          */
-        struct list_head               cis_linkage;
+        cfs_list_t                     cis_linkage;
 };
 
 
@@ -2163,7 +2163,7 @@ enum cl_enq_flags {
  */
 struct cl_io_lock_link {
         /** linkage into one of cl_lockset lists. */
-        struct list_head     cill_linkage;
+        cfs_list_t           cill_linkage;
         struct cl_lock_descr cill_descr;
         struct cl_lock      *cill_lock;
         /** optional destructor */
@@ -2202,11 +2202,11 @@ struct cl_io_lock_link {
  */
 struct cl_lockset {
         /** locks to be acquired. */
-        struct list_head cls_todo;
+        cfs_list_t  cls_todo;
         /** locks currently being processed. */
-        struct list_head cls_curr;
+        cfs_list_t  cls_curr;
         /** locks acquired. */
-        struct list_head cls_done;
+        cfs_list_t  cls_done;
 };
 
 /**
@@ -2251,7 +2251,7 @@ struct cl_io {
          */
         struct cl_io                  *ci_parent;
         /** List of slices. Immutable after creation. */
-        struct list_head               ci_layers;
+        cfs_list_t                     ci_layers;
         /** list of locks (to be) acquired by this io. */
         struct cl_lockset              ci_lockset;
         /** lock requirements, this is just a help info for sublayers. */
@@ -2448,16 +2448,16 @@ struct cl_req_obj {
  * req's pages.
  */
 struct cl_req {
-        enum cl_req_type    crq_type;
+        enum cl_req_type      crq_type;
         /** A list of pages being transfered */
-        struct list_head    crq_pages;
+        cfs_list_t            crq_pages;
         /** Number of pages in cl_req::crq_pages */
-        unsigned            crq_nrpages;
+        unsigned              crq_nrpages;
         /** An array of objects which pages are in ->crq_pages */
-        struct cl_req_obj  *crq_o;
+        struct cl_req_obj    *crq_o;
         /** Number of elements in cl_req::crq_objs[] */
-        unsigned            crq_nrobjs;
-        struct list_head    crq_layers;
+        unsigned              crq_nrobjs;
+        cfs_list_t            crq_layers;
 };
 
 /**
@@ -2466,7 +2466,7 @@ struct cl_req {
 struct cl_req_slice {
         struct cl_req    *crs_req;
         struct cl_device *crs_dev;
-        struct list_head  crs_linkage;
+        cfs_list_t        crs_linkage;
         const struct cl_req_operations *crs_ops;
 };
 
@@ -2478,16 +2478,16 @@ struct cl_req_slice {
 struct cache_stats {
         const char    *cs_name;
         /** how many entities were created at all */
-        atomic_t       cs_created;
+        cfs_atomic_t   cs_created;
         /** how many cache lookups were performed */
-        atomic_t       cs_lookup;
+        cfs_atomic_t   cs_lookup;
         /** how many times cache lookup resulted in a hit */
-        atomic_t       cs_hit;
+        cfs_atomic_t   cs_hit;
         /** how many entities are in the cache right now */
-        atomic_t       cs_total;
+        cfs_atomic_t   cs_total;
         /** how many entities in the cache are actively used (and cannot be
          * evicted) right now */
-        atomic_t       cs_busy;
+        cfs_atomic_t   cs_busy;
 };
 
 /** These are not exported so far */
@@ -2513,8 +2513,8 @@ struct cl_site {
          */
         struct cache_stats    cs_pages;
         struct cache_stats    cs_locks;
-        atomic_t              cs_pages_state[CPS_NR];
-        atomic_t              cs_locks_state[CLS_NR];
+        cfs_atomic_t          cs_pages_state[CPS_NR];
+        cfs_atomic_t          cs_locks_state[CLS_NR];
 };
 
 int  cl_site_init (struct cl_site *s, struct cl_device *top);
@@ -2847,7 +2847,7 @@ void cl_lock_signal      (const struct lu_env *env, struct cl_lock *lock);
 int  cl_lock_state_wait  (const struct lu_env *env, struct cl_lock *lock);
 void cl_lock_state_set   (const struct lu_env *env, struct cl_lock *lock,
                           enum cl_lock_state state);
-int  cl_queue_match      (const struct list_head *queue,
+int  cl_queue_match      (const cfs_list_t *queue,
                           const struct cl_lock_descr *need);
 
 void cl_lock_mutex_get  (const struct lu_env *env, struct cl_lock *lock);
@@ -2957,13 +2957,13 @@ do {                                                                    \
  * Iterate over pages in a page list.
  */
 #define cl_page_list_for_each(page, list)                               \
-        list_for_each_entry((page), &(list)->pl_pages, cp_batch)
+        cfs_list_for_each_entry((page), &(list)->pl_pages, cp_batch)
 
 /**
  * Iterate over pages in a page list, taking possible removals into account.
  */
 #define cl_page_list_for_each_safe(page, temp, list)                    \
-        list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
+        cfs_list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
 
 void cl_page_list_init   (struct cl_page_list *plist);
 void cl_page_list_add    (struct cl_page_list *plist, struct cl_page *page);
@@ -3022,11 +3022,11 @@ void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret);
  */
 struct cl_sync_io {
         /** number of pages yet to be transferred. */
-        atomic_t             csi_sync_nr;
+        cfs_atomic_t          csi_sync_nr;
         /** completion to be signaled when transfer is complete. */
         cfs_waitq_t          csi_waitq;
         /** error code. */
-        int                  csi_sync_rc;
+        int                   csi_sync_rc;
 };
 
 void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages);
index a8ec09c..5978551 100644 (file)
@@ -101,7 +101,7 @@ struct dt_device_operations {
          * Return device-wide statistics.
          */
         int   (*dt_statfs)(const struct lu_env *env,
-                           struct dt_device *dev, struct kstatfs *sfs);
+                           struct dt_device *dev, cfs_kstatfs_t *sfs);
         /**
          * Start transaction, described by \a param.
          */
@@ -334,7 +334,7 @@ struct dt_object_operations {
         void  (*do_ah_init)(const struct lu_env *env,
                             struct dt_allocation_hint *ah,
                             struct dt_object *parent,
-                            umode_t child_mode);
+                            cfs_umode_t child_mode);
         /**
          * Create new object on this device.
          *
@@ -491,7 +491,7 @@ struct dt_device {
          * way, because callbacks are supposed to be added/deleted only during
          * single-threaded start-up shut-down procedures.
          */
-        struct list_head                   dd_txn_callbacks;
+        cfs_list_t                         dd_txn_callbacks;
 };
 
 int  dt_device_init(struct dt_device *dev, struct lu_device_type *t);
@@ -593,9 +593,9 @@ struct dt_txn_callback {
                             struct thandle *txn, void *cookie);
         int (*dtc_txn_commit)(const struct lu_env *env,
                               struct thandle *txn, void *cookie);
-        void            *dtc_cookie;
-        __u32            dtc_tag;
-        struct list_head dtc_linkage;
+        void                *dtc_cookie;
+        __u32                dtc_tag;
+        cfs_list_t           dtc_linkage;
 };
 
 void dt_txn_callback_add(struct dt_device *dev, struct dt_txn_callback *cb);
index 4886479..529d198 100644 (file)
@@ -167,7 +167,7 @@ struct ccc_object {
          *
          * \see ccc_page::cpg_pending_linkage
          */
-        struct list_head        cob_pending_list;
+        cfs_list_t             cob_pending_list;
 
         /**
          * Access this counter is protected by inode->i_sem. Now that
@@ -180,7 +180,7 @@ struct ccc_object {
          *
          * \see ll_vm_open(), ll_vm_close().
          */
-        atomic_t                cob_mmap_cnt;
+        cfs_atomic_t            cob_mmap_cnt;
 };
 
 /**
@@ -198,7 +198,7 @@ struct ccc_page {
          * that is, never iterated through, only checked for list_empty(), but
          * having a list is useful for debugging.
          */
-        struct list_head     cpg_pending_linkage;
+        cfs_list_t           cpg_pending_linkage;
         /** VM page */
         cfs_page_t          *cpg_page;
 };
index e72fdbf..6d3bddd 100644 (file)
@@ -54,7 +54,7 @@
 
 #define loff_t long long
 #define ERESTART 2001
-typedef unsigned short umode_t;
+typedef unsigned short cfs_umode_t;
 
 #endif
 
@@ -87,18 +87,18 @@ void *inter_module_get(char *arg);
 static __inline__ int ext2_set_bit(int nr, void *addr)
 {
 #ifdef __BIG_ENDIAN
-        return set_bit((nr ^ ((BITS_PER_LONG-1) & ~0x7)), addr);
+        return cfs_set_bit((nr ^ ((BITS_PER_LONG-1) & ~0x7)), addr);
 #else
-        return set_bit(nr, addr);
+        return cfs_set_bit(nr, addr);
 #endif
 }
 
 static __inline__ int ext2_clear_bit(int nr, void *addr)
 {
 #ifdef __BIG_ENDIAN
-        return clear_bit((nr ^ ((BITS_PER_LONG-1) & ~0x7)), addr);
+        return cfs_clear_bit((nr ^ ((BITS_PER_LONG-1) & ~0x7)), addr);
 #else
-        return clear_bit(nr, addr);
+        return cfs_clear_bit(nr, addr);
 #endif
 }
 
@@ -108,7 +108,7 @@ static __inline__ int ext2_test_bit(int nr, void *addr)
         __const__ unsigned char *tmp = (__const__ unsigned char *) addr;
         return (tmp[nr >> 3] >> (nr & 7)) & 1;
 #else
-        return test_bit(nr, addr);
+        return cfs_test_bit(nr, addr);
 #endif
 }
 
@@ -130,7 +130,7 @@ extern int echo_client_init(void);
 
 #define EXPORT_SYMBOL(S)
 
-struct rcu_head { };
+typedef struct cfs_rcu_head { } cfs_rcu_head_t;
 
 typedef __u64 kdev_t;
 
@@ -157,17 +157,17 @@ typedef __u64 kdev_t;
 #ifndef ERESTARTSYS
 #define ERESTARTSYS ERESTART
 #endif
-#define HZ 1
+#define CFS_HZ 1
 
 /* random */
 
-void get_random_bytes(void *ptr, int size);
+void cfs_get_random_bytes(void *ptr, int size);
 
 /* memory */
 
 /* memory size: used for some client tunables */
-#define num_physpages      (256 * 1024) /* 1GB */
-#define CFS_NUM_CACHEPAGES num_physpages
+#define cfs_num_physpages  (256 * 1024) /* 1GB */
+#define CFS_NUM_CACHEPAGES cfs_num_physpages
 
 
 /* VFS stuff */
@@ -191,7 +191,7 @@ void get_random_bytes(void *ptr, int size);
 
 struct iattr {
         unsigned int    ia_valid;
-        umode_t         ia_mode;
+        cfs_umode_t     ia_mode;
         uid_t           ia_uid;
         gid_t           ia_gid;
         loff_t          ia_size;
@@ -279,12 +279,12 @@ typedef struct task_struct cfs_task_t;
 #define cfs_curproc_comm()      (current->comm)
 
 extern struct task_struct *current;
-int in_group_p(gid_t gid);
+int cfs_curproc_is_in_groups(gid_t gid);
 
-#define set_current_state(foo) do { current->state = foo; } while (0)
+#define cfs_set_current_state(foo) do { current->state = foo; } while (0)
 
-#define wait_event_interruptible(wq, condition)                         \
-({                                                                      \
+#define cfs_wait_event_interruptible(wq, condition, ret)                \
+                                                                      \
         struct l_wait_info lwi;                                         \
         int timeout = 100000000;/* for ever */                          \
         int ret;                                                        \
@@ -293,20 +293,20 @@ int in_group_p(gid_t gid);
         ret = l_wait_event(NULL, condition, &lwi);                      \
                                                                         \
         ret;                                                            \
-})
+}
 
-#define lock_kernel() do {} while (0)
-#define unlock_kernel() do {} while (0)
+#define cfs_lock_kernel() do {} while (0)
+#define cfs_unlock_kernel() do {} while (0)
 #define daemonize(l) do {} while (0)
 #define sigfillset(l) do {} while (0)
 #define recalc_sigpending(l) do {} while (0)
-#define kernel_thread(l,m,n) LBUG()
+#define cfs_kernel_thread(l,m,n) LBUG()
 
 #define USERMODEHELPER(path, argv, envp) (0)
 #define SIGNAL_MASK_ASSERT()
-#define KERN_INFO
+#define CFS_KERN_INFO
 
-#if HZ != 1
+#if CFS_HZ != 1
 #error "liblustre's jiffies currently expects HZ to be 1"
 #endif
 #define jiffies                                 \
@@ -326,12 +326,12 @@ int in_group_p(gid_t gid);
 #define unlikely(exp) (exp)
 #endif
 
-#define might_sleep()
+#define cfs_might_sleep()
 #define might_sleep_if(c)
 #define smp_mb()
 
-#define libcfs_memory_pressure_get() (0) 
-#define libcfs_memory_pressure_put() do {} while (0) 
+#define libcfs_memory_pressure_get() (0)
+#define libcfs_memory_pressure_put() do {} while (0)
 #define libcfs_memory_pressure_clr() do {} while (0)
 
 /* FIXME sys/capability will finally included linux/fs.h thus
@@ -360,10 +360,10 @@ static inline void libcfs_run_lbug_upcall(char *file, const char *fn,
 
 
 struct liblustre_wait_callback {
-        struct list_head    llwc_list;
-        const char         *llwc_name;
-        int               (*llwc_fn)(void *arg);
-        void               *llwc_arg;
+        cfs_list_t              llwc_list;
+        const char             *llwc_name;
+        int                   (*llwc_fn)(void *arg);
+        void                   *llwc_arg;
 };
 
 void *liblustre_register_wait_callback(const char *name,
@@ -384,9 +384,9 @@ struct nfs_lock_info {
 };
 
 typedef struct file_lock {
-        struct file_lock *fl_next;      /* singly linked list for this inode  */
-        struct list_head fl_link;       /* doubly linked list of all locks */
-        struct list_head fl_block;      /* circular list of blocked processes */
+        struct file_lock *fl_next;  /* singly linked list for this inode  */
+        cfs_list_t fl_link;   /* doubly linked list of all locks */
+        cfs_list_t fl_block;  /* circular list of blocked processes */
         void *fl_owner;
         unsigned int fl_pid;
         cfs_waitq_t fl_wait;
@@ -438,7 +438,7 @@ struct posix_acl_entry {
 };
 
 struct posix_acl {
-        atomic_t                a_refcount;
+        cfs_atomic_t            a_refcount;
         unsigned int            a_count;
         struct posix_acl_entry  a_entries[0];
 };
index 1c0234a..f1c22c9 100644 (file)
@@ -59,8 +59,8 @@
 #include <libcfs/libcfs.h>
 #include <linux/statfs.h>
 
-#else 
-#  define kstatfs statfs
+#else
+typedef struct statfs cfs_kstatfs_t;
 #endif
 
 #endif /* LPROCFS_SNMP_H */
index 46b72a7..81f8cc1 100644 (file)
@@ -83,12 +83,12 @@ static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
         struct dentry *old_pwd;
         struct vfsmount *old_pwdmnt;
 
-        write_lock(&fs->lock);
+        cfs_write_lock(&fs->lock);
         old_pwd = fs->pwd;
         old_pwdmnt = fs->pwdmnt;
         fs->pwdmnt = mntget(mnt);
         fs->pwd = dget(dentry);
-        write_unlock(&fs->lock);
+        cfs_write_unlock(&fs->lock);
 
         if (old_pwd) {
                 dput(old_pwd);
@@ -104,24 +104,26 @@ static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
 #define ATTR_BLOCKS    (1 << 27)
 
 #if HAVE_INODE_I_MUTEX
-#define UNLOCK_INODE_MUTEX(inode) do {mutex_unlock(&(inode)->i_mutex); } while(0)
-#define LOCK_INODE_MUTEX(inode) do {mutex_lock(&(inode)->i_mutex); } while(0)
+#define UNLOCK_INODE_MUTEX(inode) \
+do {cfs_mutex_unlock(&(inode)->i_mutex); } while(0)
+#define LOCK_INODE_MUTEX(inode) \
+do {cfs_mutex_lock(&(inode)->i_mutex); } while(0)
 #define LOCK_INODE_MUTEX_PARENT(inode) \
-do {mutex_lock_nested(&(inode)->i_mutex, I_MUTEX_PARENT); } while(0)
-#define TRYLOCK_INODE_MUTEX(inode) mutex_trylock(&(inode)->i_mutex)
+do {cfs_mutex_lock_nested(&(inode)->i_mutex, I_MUTEX_PARENT); } while(0)
+#define TRYLOCK_INODE_MUTEX(inode) cfs_mutex_trylock(&(inode)->i_mutex)
 #else
-#define UNLOCK_INODE_MUTEX(inode) do {up(&(inode)->i_sem); } while(0)
-#define LOCK_INODE_MUTEX(inode) do {down(&(inode)->i_sem); } while(0)
+#define UNLOCK_INODE_MUTEX(inode) do  cfs_up(&(inode)->i_sem); } while(0)
+#define LOCK_INODE_MUTEX(inode) do  cfs_down(&(inode)->i_sem); } while(0)
 #define TRYLOCK_INODE_MUTEX(inode) (!down_trylock(&(inode)->i_sem))
 #define LOCK_INODE_MUTEX_PARENT(inode) LOCK_INODE_MUTEX(inode)
 #endif /* HAVE_INODE_I_MUTEX */
 
 #ifdef HAVE_SEQ_LOCK
-#define LL_SEQ_LOCK(seq) mutex_lock(&(seq)->lock)
-#define LL_SEQ_UNLOCK(seq) mutex_unlock(&(seq)->lock)
+#define LL_SEQ_LOCK(seq) cfs_mutex_lock(&(seq)->lock)
+#define LL_SEQ_UNLOCK(seq) cfs_mutex_unlock(&(seq)->lock)
 #else
-#define LL_SEQ_LOCK(seq) down(&(seq)->sem)
-#define LL_SEQ_UNLOCK(seq) up(&(seq)->sem)
+#define LL_SEQ_LOCK(seq) cfs_down(&(seq)->sem)
+#define LL_SEQ_UNLOCK(seq) cfs_up(&(seq)->sem)
 #endif
 
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
@@ -130,11 +132,11 @@ do {mutex_lock_nested(&(inode)->i_mutex, I_MUTEX_PARENT); } while(0)
 #endif
 
 #ifdef HAVE_DQUOTOFF_MUTEX
-#define UNLOCK_DQONOFF_MUTEX(dqopt) do {mutex_unlock(&(dqopt)->dqonoff_mutex); } while(0)
-#define LOCK_DQONOFF_MUTEX(dqopt) do {mutex_lock(&(dqopt)->dqonoff_mutex); } while(0)
+#define UNLOCK_DQONOFF_MUTEX(dqopt) cfs_mutex_unlock(&(dqopt)->dqonoff_mutex)
+#define LOCK_DQONOFF_MUTEX(dqopt) cfs_mutex_lock(&(dqopt)->dqonoff_mutex)
 #else
-#define UNLOCK_DQONOFF_MUTEX(dqopt) do {up(&(dqopt)->dqonoff_sem); } while(0)
-#define LOCK_DQONOFF_MUTEX(dqopt) do {down(&(dqopt)->dqonoff_sem); } while(0)
+#define UNLOCK_DQONOFF_MUTEX(dqopt) cfs_up(&(dqopt)->dqonoff_sem)
+#define LOCK_DQONOFF_MUTEX(dqopt) cfs_down(&(dqopt)->dqonoff_sem)
 #endif /* HAVE_DQUOTOFF_MUTEX */
 
 #define current_ngroups current->group_info->ngroups
@@ -149,8 +151,8 @@ do {mutex_lock_nested(&(inode)->i_mutex, I_MUTEX_PARENT); } while(0)
 #define gfp_t int
 #endif
 
-#define lock_dentry(___dentry)          spin_lock(&(___dentry)->d_lock)
-#define unlock_dentry(___dentry)        spin_unlock(&(___dentry)->d_lock)
+#define lock_dentry(___dentry)          cfs_spin_lock(&(___dentry)->d_lock)
+#define unlock_dentry(___dentry)        cfs_spin_unlock(&(___dentry)->d_lock)
 
 #define ll_kernel_locked()      kernel_locked()
 
@@ -178,8 +180,8 @@ do {mutex_lock_nested(&(inode)->i_mutex, I_MUTEX_PARENT); } while(0)
 #define ll_path_lookup                  path_lookup
 #define ll_permission(inode,mask,nd)    permission(inode,mask,nd)
 
-#define ll_pgcache_lock(mapping)          spin_lock(&mapping->page_lock)
-#define ll_pgcache_unlock(mapping)        spin_unlock(&mapping->page_lock)
+#define ll_pgcache_lock(mapping)          cfs_spin_lock(&mapping->page_lock)
+#define ll_pgcache_unlock(mapping)        cfs_spin_unlock(&mapping->page_lock)
 #define ll_call_writepage(inode, page)  \
                                 (inode)->i_mapping->a_ops->writepage(page, NULL)
 #define ll_invalidate_inode_pages(inode) \
@@ -197,7 +199,7 @@ do {mutex_lock_nested(&(inode)->i_mutex, I_MUTEX_PARENT); } while(0)
 
 #include <linux/writeback.h>
 
-static inline int cleanup_group_info(void)
+static inline int cfs_cleanup_group_info(void)
 {
         struct group_info *ginfo;
 
@@ -256,9 +258,9 @@ static inline int mapping_has_pages(struct address_space *mapping)
         int rc = 1;
 
         ll_pgcache_lock(mapping);
-        if (list_empty(&mapping->dirty_pages) &&
-            list_empty(&mapping->clean_pages) &&
-            list_empty(&mapping->locked_pages)) {
+        if (cfs_list_empty(&mapping->dirty_pages) &&
+            cfs_list_empty(&mapping->clean_pages) &&
+            cfs_list_empty(&mapping->locked_pages)) {
                 rc = 0;
         }
         ll_pgcache_unlock(mapping);
@@ -295,9 +297,9 @@ static inline int mapping_has_pages(struct address_space *mapping)
 #define ll_set_dflags(dentry, flags) do { dentry->d_vfs_flags |= flags; } while(0)
 #else
 #define ll_set_dflags(dentry, flags) do { \
-                spin_lock(&dentry->d_lock); \
+                cfs_spin_lock(&dentry->d_lock); \
                 dentry->d_flags |= flags; \
-                spin_unlock(&dentry->d_lock); \
+                cfs_spin_unlock(&dentry->d_lock); \
         } while(0)
 #endif
 
@@ -367,7 +369,7 @@ ll_kern_mount(const char *fstype, int flags, const char *name, void *data)
         if (!type)
                 return ERR_PTR(-ENODEV);
         mnt = vfs_kern_mount(type, flags, name, data);
-        module_put(type->owner);
+        cfs_module_put(type->owner);
         return mnt;
 }
 #else
@@ -394,8 +396,8 @@ static inline u32 get_sb_time_gran(struct super_block *sb)
 #define TREE_READ_LOCK_IRQ(mapping)     read_lock_irq(&(mapping)->tree_lock)
 #define TREE_READ_UNLOCK_IRQ(mapping) read_unlock_irq(&(mapping)->tree_lock)
 #else
-#define TREE_READ_LOCK_IRQ(mapping) spin_lock_irq(&(mapping)->tree_lock)
-#define TREE_READ_UNLOCK_IRQ(mapping) spin_unlock_irq(&(mapping)->tree_lock)
+#define TREE_READ_LOCK_IRQ(mapping) cfs_spin_lock_irq(&(mapping)->tree_lock)
+#define TREE_READ_UNLOCK_IRQ(mapping) cfs_spin_unlock_irq(&(mapping)->tree_lock)
 #endif
 
 #ifdef HAVE_UNREGISTER_BLKDEV_RETURN_INT
@@ -648,8 +650,10 @@ static inline int ll_crypto_hmac(struct crypto_tfm *tfm,
                 vfs_rename(old,old_dir,new,new_dir)
 #endif /* HAVE_SECURITY_PLUG */
 
-#ifndef for_each_possible_cpu
-#define for_each_possible_cpu(i) for_each_cpu(i)
+#ifdef for_each_possible_cpu
+#define cfs_for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
+#elif defined(for_each_cpu)
+#define cfs_for_each_possible_cpu(cpu) for_each_cpu(cpu)
 #endif
 
 #ifndef cpu_to_node
@@ -657,10 +661,10 @@ static inline int ll_crypto_hmac(struct crypto_tfm *tfm,
 #endif
 
 #ifdef HAVE_REGISTER_SHRINKER
-typedef int (*shrinker_t)(int nr_to_scan, gfp_t gfp_mask);
+typedef int (*cfs_shrinker_t)(int nr_to_scan, gfp_t gfp_mask);
 
 static inline
-struct shrinker *set_shrinker(int seek, shrinker_t func)
+struct shrinker *cfs_set_shrinker(int seek, cfs_shrinker_t func)
 {
         struct shrinker *s;
 
@@ -677,7 +681,7 @@ struct shrinker *set_shrinker(int seek, shrinker_t func)
 }
 
 static inline
-void remove_shrinker(struct shrinker *shrinker) 
+void cfs_remove_shrinker(struct shrinker *shrinker)
 {
         if (shrinker == NULL)
                 return;
@@ -719,11 +723,6 @@ static inline long labs(long x)
 }
 #endif /* HAVE_REGISTER_SHRINKER */
 
-/* Using kernel fls(). Userspace will use one defined in user-bitops.h. */
-#ifndef __fls
-#define __fls fls
-#endif
-
 #ifdef HAVE_INVALIDATE_INODE_PAGES
 #define invalidate_mapping_pages(mapping,s,e) invalidate_inode_pages(mapping)
 #endif
@@ -735,7 +734,9 @@ static inline long labs(long x)
 #endif
 
 #ifndef SLAB_DESTROY_BY_RCU
-#define SLAB_DESTROY_BY_RCU 0
+#define CFS_SLAB_DESTROY_BY_RCU 0
+#else
+#define CFS_SLAB_DESTROY_BY_RCU SLAB_DESTROY_BY_RCU
 #endif
 
 #ifdef HAVE_SB_HAS_QUOTA_ACTIVE
index f7ef0ca..4910f62 100644 (file)
@@ -60,8 +60,8 @@ struct fsfilt_objinfo {
 
 struct lustre_dquot;
 struct fsfilt_operations {
-        struct list_head fs_list;
-        struct module *fs_owner;
+        cfs_list_t fs_list;
+        cfs_module_t *fs_owner;
         char   *fs_type;
         char   *(* fs_getlabel)(struct super_block *sb);
         int     (* fs_setlabel)(struct super_block *sb, char *label);
@@ -105,7 +105,7 @@ struct fsfilt_operations {
         int     (* fs_map_inode_pages)(struct inode *inode, struct page **page,
                                        int pages, unsigned long *blocks,
                                        int *created, int create,
-                                       struct semaphore *sem);
+                                       cfs_semaphore_t *sem);
         int     (* fs_write_record)(struct file *, void *, int size, loff_t *,
                                     int force_sync);
         int     (* fs_read_record)(struct file *, void *, int size, loff_t *);
@@ -120,7 +120,7 @@ struct fsfilt_operations {
         int     (* fs_quotainfo)(struct lustre_quota_info *lqi, int type,
                                  int cmd);
         int     (* fs_qids)(struct file *file, struct inode *inode, int type,
-                            struct list_head *list);
+                            cfs_list_t *list);
         int     (* fs_get_mblk)(struct super_block *sb, int *count,
                                 struct inode *inode, int frags);
         int     (* fs_dquot)(struct lustre_dquot *dquot, int cmd);
@@ -182,19 +182,19 @@ static inline lvfs_sbdev_type fsfilt_journal_sbdev(struct obd_device *obd,
 #define FSFILT_OP_UNLINK_PARTIAL_PARENT 22
 #define FSFILT_OP_CREATE_PARTIAL_CHILD  23
 
-#define __fsfilt_check_slow(obd, start, msg)                            \
-do {                                                                    \
-        if (time_before(jiffies, start + 15 * HZ))                      \
-                break;                                                  \
-        else if (time_before(jiffies, start + 30 * HZ))                 \
-                CDEBUG(D_VFSTRACE, "%s: slow %s %lus\n", obd->obd_name, \
-                       msg, (jiffies-start) / HZ);                      \
-        else if (time_before(jiffies, start + DISK_TIMEOUT * HZ))       \
-                CWARN("%s: slow %s %lus\n", obd->obd_name, msg,         \
-                      (jiffies - start) / HZ);                          \
-        else                                                            \
-                CERROR("%s: slow %s %lus\n", obd->obd_name, msg,        \
-                       (jiffies - start) / HZ);                         \
+#define __fsfilt_check_slow(obd, start, msg)                              \
+do {                                                                      \
+        if (cfs_time_before(jiffies, start + 15 * CFS_HZ))                \
+                break;                                                    \
+        else if (cfs_time_before(jiffies, start + 30 * CFS_HZ))           \
+                CDEBUG(D_VFSTRACE, "%s: slow %s %lus\n", obd->obd_name,   \
+                       msg, (jiffies-start) / CFS_HZ);                    \
+        else if (cfs_time_before(jiffies, start + DISK_TIMEOUT * CFS_HZ)) \
+                CWARN("%s: slow %s %lus\n", obd->obd_name, msg,           \
+                      (jiffies - start) / CFS_HZ);                        \
+        else                                                              \
+                CERROR("%s: slow %s %lus\n", obd->obd_name, msg,          \
+                       (jiffies - start) / CFS_HZ);                       \
 } while (0)
 
 #define fsfilt_check_slow(obd, start, msg)              \
@@ -436,7 +436,7 @@ static inline int fsfilt_quotainfo(struct obd_device *obd,
 
 static inline int fsfilt_qids(struct obd_device *obd, struct file *file,
                               struct inode *inode, int type,
-                              struct list_head *list)
+                              cfs_list_t *list)
 {
         if (obd->obd_fsops->fs_qids)
                 return obd->obd_fsops->fs_qids(file, inode, type, list);
@@ -464,7 +464,7 @@ static inline int fsfilt_map_inode_pages(struct obd_device *obd,
                                          struct inode *inode,
                                          struct page **page, int pages,
                                          unsigned long *blocks, int *created,
-                                         int create, struct semaphore *sem)
+                                         int create, cfs_semaphore_t *sem)
 {
         return obd->obd_fsops->fs_map_inode_pages(inode, page, pages, blocks,
                                                   created, create, sem);
index afbe6e5..2aed070 100644 (file)
@@ -52,8 +52,9 @@
 
 # ifdef HAVE_RCU
 #  include <linux/rcupdate.h> /* for rcu_head{} */
+typedef struct rcu_head cfs_rcu_head_t;
 # else
-struct rcu_head { };
+typedef struct cfs_rcu_head { } cfs_rcu_head_t;
 # endif
 
 #endif /* ifdef __KERNEL__ */
index 85a3ab1..7c21350 100644 (file)
@@ -77,12 +77,12 @@ typedef struct percpu_counter lcounter_t;
 #define lcounter_destroy(counter)       percpu_counter_destroy(counter)
 
 #else
-typedef struct { atomic_t count; } lcounter_t;
+typedef struct { cfs_atomic_t count; } lcounter_t;
 
-#define lcounter_read(counter)          atomic_read(&counter->count)
-#define lcounter_inc(counter)           atomic_inc(&counter->count)
-#define lcounter_dec(counter)           atomic_dec(&counter->count)
-#define lcounter_init(counter)          atomic_set(&counter->count, 0)
+#define lcounter_read(counter)          cfs_atomic_read(&counter->count)
+#define lcounter_inc(counter)           cfs_atomic_inc(&counter->count)
+#define lcounter_dec(counter)           cfs_atomic_dec(&counter->count)
+#define lcounter_init(counter)          cfs_atomic_set(&counter->count, 0)
 #define lcounter_destroy(counter)       
 
 #endif /* if defined HAVE_PERCPU_COUNTER */
index ce53ac1..da2dfc4 100644 (file)
@@ -55,13 +55,13 @@ static inline void ll_remove_from_page_cache(struct page *page)
 #ifdef HAVE_RW_TREE_LOCK
         write_lock_irq(&mapping->tree_lock);
 #else
-       spin_lock_irq(&mapping->tree_lock);
+       cfs_spin_lock_irq(&mapping->tree_lock);
 #endif
         radix_tree_delete(&mapping->page_tree, page->index);
         page->mapping = NULL;
         mapping->nrpages--;
 #ifdef HAVE_NR_PAGECACHE
-        atomic_add(-1, &nr_pagecache); // XXX pagecache_acct(-1);
+        cfs_atomic_add(-1, &nr_pagecache); // XXX pagecache_acct(-1);
 #else
         __dec_zone_page_state(page, NR_FILE_PAGES);
 #endif
@@ -69,7 +69,7 @@ static inline void ll_remove_from_page_cache(struct page *page)
 #ifdef HAVE_RW_TREE_LOCK
         write_unlock_irq(&mapping->tree_lock);
 #else
-       spin_unlock_irq(&mapping->tree_lock);
+       cfs_spin_unlock_irq(&mapping->tree_lock);
 #endif
 }
 
index e90b155..3d5aa2d 100644 (file)
@@ -49,7 +49,6 @@
 #include <linux/lustre_compat25.h>
 #include <linux/lvfs_linux.h>
 #else
-struct group_info { /* unused */ };
 #include <liblustre.h>
 #endif
 
@@ -103,7 +102,7 @@ int lustre_rename(struct dentry *dir, struct vfsmount *mnt, char *oldname,
 int lustre_fread(struct file *file, void *buf, int len, loff_t *off);
 int lustre_fwrite(struct file *file, const void *buf, int len, loff_t *off);
 int lustre_fsync(struct file *file);
-long l_readdir(struct file * file, struct list_head *dentry_list);
+long l_readdir(struct file * file, cfs_list_t *dentry_list);
 int l_notify_change(struct vfsmount *mnt, struct dentry *dchild,
                     struct iattr *newattrs);
 int simple_truncate(struct dentry *dir, struct vfsmount *mnt,
@@ -114,7 +113,7 @@ static inline void l_dput(struct dentry *de)
         if (!de || IS_ERR(de))
                 return;
         //shrink_dcache_parent(de);
-        LASSERT(atomic_read(&de->d_count) > 0);
+        LASSERT(cfs_atomic_read(&de->d_count) > 0);
         dput(de);
 }
 
@@ -147,9 +146,9 @@ static inline struct dentry *ll_lookup_one_len(const char *fid_name,
 
 static inline void ll_sleep(int t)
 {
-        set_current_state(TASK_INTERRUPTIBLE);
-        schedule_timeout(t * HZ);
-        set_current_state(TASK_RUNNING);
+        cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+        cfs_schedule_timeout(t * CFS_HZ);
+        cfs_set_current_state(CFS_TASK_RUNNING);
 }
 #endif
 
index 204524e..38e2c2a 100644 (file)
@@ -56,14 +56,14 @@ struct l_file *l_dentry_open(struct lvfs_run_ctxt *, struct l_dentry *,
                              int flags);
 
 struct l_linux_dirent {
-        struct list_head lld_list;
+        cfs_list_t      lld_list;
         ino_t           lld_ino;
         unsigned long   lld_off;
         char            lld_name[LL_FID_NAMELEN];
 };
 struct l_readdir_callback {
         struct l_linux_dirent *lrc_dirent;
-        struct list_head      *lrc_list;
+        cfs_list_t            *lrc_list;
 };
 
 #define LVFS_DENTRY_PARAM_MAGIC         20070216UL
index 1ce61a9..d041140 100644 (file)
@@ -57,7 +57,7 @@
 #endif
 
 typedef struct {
-        spinlock_t          lock;
+        cfs_spinlock_t          lock;
 
 #ifdef CLIENT_OBD_LIST_LOCK_DEBUG
         unsigned long       time;
@@ -75,7 +75,7 @@ static inline void __client_obd_list_lock(client_obd_lock_t *lock,
 {
         unsigned long cur = jiffies;
         while (1) {
-                if (spin_trylock(&lock->lock)) {
+                if (cfs_spin_trylock(&lock->lock)) {
                         LASSERT(lock->task == NULL);
                         lock->task = current;
                         lock->func = func;
@@ -84,8 +84,8 @@ static inline void __client_obd_list_lock(client_obd_lock_t *lock,
                         break;
                 }
 
-                if ((jiffies - cur > 5 * HZ) &&
-                    (jiffies - lock->time > 5 * HZ)) {
+                if ((jiffies - cur > 5 * CFS_HZ) &&
+                    (jiffies - lock->time > 5 * CFS_HZ)) {
                         LCONSOLE_WARN("LOCK UP! the lock %p was acquired"
                                       " by <%s:%d:%s:%d> %lu time, I'm %s:%d\n",
                                       lock, lock->task->comm, lock->task->pid,
@@ -98,7 +98,7 @@ static inline void __client_obd_list_lock(client_obd_lock_t *lock,
                         LCONSOLE_WARN("====== for current process =====\n");
                         libcfs_debug_dumpstack(NULL);
                         LCONSOLE_WARN("====== end =======\n");
-                        cfs_pause(1000HZ);
+                        cfs_pause(1000 * CFS_HZ);
                 }
         }
 }
@@ -111,25 +111,25 @@ static inline void client_obd_list_unlock(client_obd_lock_t *lock)
         LASSERT(lock->task != NULL);
         lock->task = NULL;
         lock->time = jiffies;
-        spin_unlock(&lock->lock);
+        cfs_spin_unlock(&lock->lock);
 }
 
 #else /* ifdef CLIENT_OBD_LIST_LOCK_DEBUG */
 static inline void client_obd_list_lock(client_obd_lock_t *lock)
 {
-       spin_lock(&lock->lock);
+       cfs_spin_lock(&lock->lock);
 }
 
 static inline void client_obd_list_unlock(client_obd_lock_t *lock)
 {
-        spin_unlock(&lock->lock);
+        cfs_spin_unlock(&lock->lock);
 }
 
 #endif /* ifdef CLIENT_OBD_LIST_LOCK_DEBUG */
 
 static inline void client_obd_list_lock_init(client_obd_lock_t *lock)
 {
-        spin_lock_init(&lock->lock);
+        cfs_spin_lock_init(&lock->lock);
 }
 
 static inline void client_obd_list_lock_done(client_obd_lock_t *lock)
index cad6c92..8670666 100644 (file)
@@ -78,7 +78,7 @@ struct lprocfs_static_vars {
 /* if we find more consumers this could be generalized */
 #define OBD_HIST_MAX 32
 struct obd_histogram {
-        spinlock_t      oh_lock;
+        cfs_spinlock_t      oh_lock;
         unsigned long   oh_buckets[OBD_HIST_MAX];
 };
 
@@ -139,8 +139,8 @@ enum {
 };
 
 struct lprocfs_atomic {
-        atomic_t               la_entry;
-        atomic_t               la_exit;
+        cfs_atomic_t               la_entry;
+        cfs_atomic_t               la_exit;
 };
 
 #define LC_MIN_INIT ((~(__u64)0) >> 1)
@@ -186,7 +186,7 @@ enum lprocfs_fields_flags {
 struct lprocfs_stats {
         unsigned int           ls_num;     /* # of counters */
         int                    ls_flags; /* See LPROCFS_STATS_FLAG_* */
-        spinlock_t             ls_lock;  /* Lock used only when there are
+        cfs_spinlock_t         ls_lock;  /* Lock used only when there are
                                           * no percpu stats areas */
         struct lprocfs_percpu *ls_percpu[0];
 };
@@ -356,10 +356,10 @@ static inline int lprocfs_stats_lock(struct lprocfs_stats *stats, int type)
                         rc = 1;
                 if (type & LPROCFS_GET_SMP_ID)
                         rc = 0;
-                spin_lock(&stats->ls_lock);
+                cfs_spin_lock(&stats->ls_lock);
         } else {
                 if (type & LPROCFS_GET_NUM_CPU)
-                        rc = num_possible_cpus();
+                        rc = cfs_num_possible_cpus();
                 if (type & LPROCFS_GET_SMP_ID) {
                         stats->ls_flags |= LPROCFS_STATS_GET_SMP_ID;
                         rc = cfs_get_cpu();
@@ -371,7 +371,7 @@ static inline int lprocfs_stats_lock(struct lprocfs_stats *stats, int type)
 static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats)
 {
         if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU)
-                spin_unlock(&stats->ls_lock);
+                cfs_spin_unlock(&stats->ls_lock);
         else if (stats->ls_flags & LPROCFS_STATS_GET_SMP_ID)
                 cfs_put_cpu();
 }
@@ -403,7 +403,7 @@ static inline __u64 lprocfs_stats_collector(struct lprocfs_stats *stats,
         int i;
 
         LASSERT(stats != NULL);
-        for (i = 0; i < num_possible_cpus(); i++)
+        for (i = 0; i < cfs_num_possible_cpus(); i++)
                 ret += lprocfs_read_helper(&(stats->ls_percpu[i]->lp_cntr[idx]),
                                            field);
         return ret;
@@ -573,12 +573,12 @@ int lprocfs_obd_rd_hash(char *page, char **start, off_t off,
 extern int lprocfs_seq_release(struct inode *, struct file *);
 
 /* in lprocfs_stat.c, to protect the private data for proc entries */
-extern struct rw_semaphore _lprocfs_lock;
+extern cfs_rw_semaphore_t _lprocfs_lock;
 #define LPROCFS_ENTRY()           do {  \
-        down_read(&_lprocfs_lock);      \
+        cfs_down_read(&_lprocfs_lock);  \
 } while(0)
 #define LPROCFS_EXIT()            do {  \
-        up_read(&_lprocfs_lock);        \
+        cfs_up_read(&_lprocfs_lock);    \
 } while(0)
 
 #ifdef HAVE_PROCFS_DELETED
@@ -602,10 +602,10 @@ int LPROCFS_ENTRY_AND_CHECK(struct proc_dir_entry *dp)
 #endif
 
 #define LPROCFS_WRITE_ENTRY()     do {  \
-        down_write(&_lprocfs_lock);     \
+        cfs_down_write(&_lprocfs_lock); \
 } while(0)
 #define LPROCFS_WRITE_EXIT()      do {  \
-        up_write(&_lprocfs_lock);       \
+        cfs_up_write(&_lprocfs_lock);   \
 } while(0)
 
 
@@ -613,14 +613,14 @@ int LPROCFS_ENTRY_AND_CHECK(struct proc_dir_entry *dp)
  * the import in a client obd_device for a lprocfs entry */
 #define LPROCFS_CLIMP_CHECK(obd) do {           \
         typecheck(struct obd_device *, obd);    \
-        down_read(&(obd)->u.cli.cl_sem);        \
+        cfs_down_read(&(obd)->u.cli.cl_sem);    \
         if ((obd)->u.cli.cl_import == NULL) {   \
-             up_read(&(obd)->u.cli.cl_sem);     \
+             cfs_up_read(&(obd)->u.cli.cl_sem); \
              return -ENODEV;                    \
         }                                       \
 } while(0)
 #define LPROCFS_CLIMP_EXIT(obd)                 \
-        up_read(&(obd)->u.cli.cl_sem);
+        cfs_up_read(&(obd)->u.cli.cl_sem);
 
 
 /* write the name##_seq_show function, call LPROC_SEQ_FOPS_RO for read-only
index 7767097..c1bf4d8 100644 (file)
@@ -257,7 +257,7 @@ struct lu_device {
          *
          * \todo XXX which means that atomic_t is probably too small.
          */
-        atomic_t                           ld_ref;
+        cfs_atomic_t                       ld_ref;
         /**
          * Pointer to device type. Never modified once set.
          */
@@ -328,7 +328,7 @@ struct lu_device_type {
          *
          * \see lu_device_types.
          */
-        struct list_head                        ldt_linkage;
+        cfs_list_t                              ldt_linkage;
 };
 
 /**
@@ -466,7 +466,7 @@ struct lu_object {
         /**
          * Linkage into list of all layers.
          */
-        struct list_head                   lo_linkage;
+        cfs_list_t                         lo_linkage;
         /**
          * Depth. Top level layer depth is 0.
          */
@@ -516,37 +516,37 @@ struct lu_object_header {
          * Object flags from enum lu_object_header_flags. Set and checked
          * atomically.
          */
-        unsigned long       loh_flags;
+        unsigned long          loh_flags;
         /**
          * Object reference count. Protected by lu_site::ls_guard.
          */
-        atomic_t            loh_ref;
+        cfs_atomic_t           loh_ref;
         /**
          * Fid, uniquely identifying this object.
          */
-        struct lu_fid       loh_fid;
+        struct lu_fid          loh_fid;
         /**
          * Common object attributes, cached for efficiency. From enum
          * lu_object_header_attr.
          */
-        __u32               loh_attr;
+        __u32                  loh_attr;
         /**
          * Linkage into per-site hash table. Protected by lu_site::ls_guard.
          */
-        struct hlist_node   loh_hash;
+        cfs_hlist_node_t       loh_hash;
         /**
          * Linkage into per-site LRU list. Protected by lu_site::ls_guard.
          */
-        struct list_head    loh_lru;
+        cfs_list_t             loh_lru;
         /**
          * Linkage into list of layers. Never modified once set (except lately
          * during object destruction). No locking is necessary.
          */
-        struct list_head    loh_layers;
+        cfs_list_t             loh_layers;
         /**
          * A list of references to this object, for debugging.
          */
-        struct lu_ref       loh_reference;
+        struct lu_ref          loh_reference;
 };
 
 struct fld;
@@ -576,23 +576,23 @@ struct lu_site {
          *
          * yes, it's heavy.
          */
-        rwlock_t              ls_guard;
+        cfs_rwlock_t              ls_guard;
         /**
          * Hash-table where objects are indexed by fid.
          */
-        struct hlist_head    *ls_hash;
+        cfs_hlist_head_t         *ls_hash;
         /**
          * Bit-mask for hash-table size.
          */
-        int                   ls_hash_mask;
+        int                       ls_hash_mask;
         /**
          * Order of hash-table.
          */
-        int                   ls_hash_bits;
+        int                       ls_hash_bits;
         /**
          * Number of buckets in the hash-table.
          */
-        int                   ls_hash_size;
+        int                       ls_hash_size;
 
         /**
          * LRU list, updated on each access to object. Protected by
@@ -602,22 +602,22 @@ struct lu_site {
          * moved to the lu_site::ls_lru.prev (this is due to the non-existence
          * of list_for_each_entry_safe_reverse()).
          */
-        struct list_head      ls_lru;
+        cfs_list_t                ls_lru;
         /**
          * Total number of objects in this site. Protected by
          * lu_site::ls_guard.
          */
-        unsigned              ls_total;
+        unsigned                  ls_total;
         /**
          * Total number of objects in this site with reference counter greater
          * than 0. Protected by lu_site::ls_guard.
          */
-        unsigned              ls_busy;
+        unsigned                  ls_busy;
 
         /**
          * Top-level device for this stack.
          */
-        struct lu_device     *ls_top_dev;
+        struct lu_device         *ls_top_dev;
 
         /**
          * Wait-queue signaled when an object in this site is ultimately
@@ -630,7 +630,7 @@ struct lu_site {
          *
          * \see htable_lookup().
          */
-        cfs_waitq_t           ls_marche_funebre;
+        cfs_waitq_t               ls_marche_funebre;
 
         /** statistical counters. Protected by nothing, races are accepted. */
         struct {
@@ -660,8 +660,8 @@ struct lu_site {
         /**
          * Linkage into global list of sites.
          */
-        struct list_head      ls_linkage;
-        struct lprocfs_stats *ls_time_stats;
+        cfs_list_t                ls_linkage;
+        struct lprocfs_stats     *ls_time_stats;
 };
 
 /** \name ctors
@@ -707,8 +707,8 @@ void lu_types_stop(void);
  */
 static inline void lu_object_get(struct lu_object *o)
 {
-        LASSERT(atomic_read(&o->lo_header->loh_ref) > 0);
-        atomic_inc(&o->lo_header->loh_ref);
+        LASSERT(cfs_atomic_read(&o->lo_header->loh_ref) > 0);
+        cfs_atomic_inc(&o->lo_header->loh_ref);
 }
 
 /**
@@ -717,7 +717,7 @@ static inline void lu_object_get(struct lu_object *o)
  */
 static inline int lu_object_is_dying(const struct lu_object_header *h)
 {
-        return test_bit(LU_OBJECT_HEARD_BANSHEE, &h->loh_flags);
+        return cfs_test_bit(LU_OBJECT_HEARD_BANSHEE, &h->loh_flags);
 }
 
 void lu_object_put(const struct lu_env *env, struct lu_object *o);
@@ -749,7 +749,7 @@ struct lu_object *lu_object_find_slice(const struct lu_env *env,
  */
 static inline struct lu_object *lu_object_top(struct lu_object_header *h)
 {
-        LASSERT(!list_empty(&h->loh_layers));
+        LASSERT(!cfs_list_empty(&h->loh_layers));
         return container_of0(h->loh_layers.next, struct lu_object, lo_linkage);
 }
 
@@ -815,7 +815,7 @@ int lu_cdebug_printer(const struct lu_env *env,
 do {                                                                    \
         static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask);              \
                                                                         \
-        if (cdebug_show(mask, DEBUG_SUBSYSTEM)) {                       \
+        if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) {                   \
                 lu_object_print(env, &__info, lu_cdebug_printer, object); \
                 CDEBUG(mask, format , ## __VA_ARGS__);                  \
         }                                                               \
@@ -828,7 +828,7 @@ do {                                                                    \
 do {                                                                    \
         static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask);              \
                                                                         \
-        if (cdebug_show(mask, DEBUG_SUBSYSTEM)) {                       \
+        if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) {                   \
                 lu_object_header_print(env, &__info, lu_cdebug_printer, \
                                        (object)->lo_header);            \
                 lu_cdebug_printer(env, &__info, "\n");                  \
@@ -985,7 +985,7 @@ struct lu_context {
          * `non-transient' contexts, i.e., ones created for service threads
          * are placed here.
          */
-        struct list_head       lc_remember;
+        cfs_list_t             lc_remember;
         /**
          * Version counter used to skip calls to lu_context_refill() when no
          * keys were registered.
@@ -1119,11 +1119,11 @@ struct lu_context_key {
          * Internal implementation detail: number of values created for this
          * key.
          */
-        atomic_t lct_used;
+        cfs_atomic_t lct_used;
         /**
          * Internal implementation detail: module for this key.
          */
-        struct module *lct_owner;
+        cfs_module_t *lct_owner;
         /**
          * References to this key. For debugging.
          */
index fca9945..4652ac3 100644 (file)
@@ -123,34 +123,34 @@ struct lu_ref {
         /**
          * Spin-lock protecting lu_ref::lf_list.
          */
-        spinlock_t       lf_guard;
+        cfs_spinlock_t       lf_guard;
         /**
          * List of all outstanding references (each represented by struct
          * lu_ref_link), pointing to this object.
          */
-        struct list_head lf_list;
+        cfs_list_t           lf_list;
         /**
          * # of links.
          */
-        short            lf_refs;
+        short                lf_refs;
         /**
          * Flag set when lu_ref_add() failed to allocate lu_ref_link. It is
          * used to mask spurious failure of the following lu_ref_del().
          */
-        short            lf_failed;
+        short                lf_failed;
         /**
          * flags - attribute for the lu_ref, for pad and future use.
          */
-        short            lf_flags;
+        short                lf_flags;
         /**
          * Where was I initialized?
          */
-        short            lf_line;
-        const char      *lf_func;
+        short                lf_line;
+        const char          *lf_func;
         /**
          * Linkage into a global list of all lu_ref's (lu_ref_refs).
          */
-        struct list_head lf_linkage;
+        cfs_list_t           lf_linkage;
 };
 
 void lu_ref_init_loc(struct lu_ref *ref, const char *func, const int line);
index aae74e9..811f47e 100644 (file)
@@ -52,15 +52,15 @@ struct lu_target {
         /** Server last transaction number */
         __u64                    lut_last_transno;
         /** Lock protecting last transaction number */
-        spinlock_t               lut_translock;
+        cfs_spinlock_t           lut_translock;
         /** Lock protecting client bitmap */
-        spinlock_t               lut_client_bitmap_lock;
+        cfs_spinlock_t           lut_client_bitmap_lock;
         /** Bitmap of known clients */
         unsigned long            lut_client_bitmap[LR_CLIENT_BITMAP_SIZE];
         /** Number of mounts */
         __u64                    lut_mount_count;
         __u32                    lut_stale_export_age;
-        spinlock_t               lut_trans_table_lock;
+        cfs_spinlock_t           lut_trans_table_lock;
 };
 
 typedef void (*lut_cb_t)(struct lu_target *lut, __u64 transno,
index 4f3695b..03a4423 100644 (file)
@@ -577,13 +577,14 @@ struct hsm_action_list {
 static __inline__ struct hsm_action_item * hai_zero(struct hsm_action_list *hal)
 {
         return (struct hsm_action_item *)(hal->hal_fsname +
-                                          size_round(strlen(hal->hal_fsname)));
+                                          cfs_size_round(strlen(hal-> \
+                                                                hal_fsname)));
 }
 /* Return pointer to next hai */
 static __inline__ struct hsm_action_item * hai_next(struct hsm_action_item *hai)
 {
         return (struct hsm_action_item *)((char *)hai +
-                                          size_round(hai->hai_len));
+                                          cfs_size_round(hai->hai_len));
 }
 
 
index 44a06b7..3561f8e 100644 (file)
@@ -66,21 +66,21 @@ struct capa_hmac_alg {
 }
 
 struct client_capa {
-        struct inode             *inode;      
-        struct list_head          lli_list;     /* link to lli_oss_capas */
+        struct inode             *inode;
+        cfs_list_t                lli_list;     /* link to lli_oss_capas */
 };
 
 struct target_capa {
-        struct hlist_node         c_hash;       /* link to capa hash */
+        cfs_hlist_node_t          c_hash;       /* link to capa hash */
 };
 
 struct obd_capa {
-        struct list_head          c_list;       /* link to capa_list */
+        cfs_list_t                c_list;       /* link to capa_list */
 
         struct lustre_capa        c_capa;       /* capa */
-        atomic_t                  c_refc;       /* ref count */
+        cfs_atomic_t              c_refc;       /* ref count */
         cfs_time_t                c_expiry;     /* jiffies */
-        spinlock_t                c_lock;       /* protect capa content */
+        cfs_spinlock_t            c_lock;       /* protect capa content */
         int                       c_site;
 
         union {
@@ -168,17 +168,18 @@ CDEBUG(level, fmt " capability key@%p mdsid "LPU64" keyid %u\n",               \
 typedef int (* renew_capa_cb_t)(struct obd_capa *, struct lustre_capa *);
 
 /* obdclass/capa.c */
-extern struct list_head capa_list[];
-extern spinlock_t capa_lock;
+extern cfs_list_t capa_list[];
+extern cfs_spinlock_t capa_lock;
 extern int capa_count[];
 extern cfs_mem_cache_t *capa_cachep;
 
-struct hlist_head *init_capa_hash(void);
-void cleanup_capa_hash(struct hlist_head *hash);
+cfs_hlist_head_t *init_capa_hash(void);
+void cleanup_capa_hash(cfs_hlist_head_t *hash);
 
-struct obd_capa *capa_add(struct hlist_head *hash, struct lustre_capa *capa);
-struct obd_capa *capa_lookup(struct hlist_head *hash, struct lustre_capa *capa,
-                             int alive);
+struct obd_capa *capa_add(cfs_hlist_head_t *hash,
+                          struct lustre_capa *capa);
+struct obd_capa *capa_lookup(cfs_hlist_head_t *hash,
+                             struct lustre_capa *capa, int alive);
 
 int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key);
 int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen);
@@ -197,8 +198,8 @@ static inline struct obd_capa *alloc_capa(int site)
                 return ERR_PTR(-ENOMEM);
 
         CFS_INIT_LIST_HEAD(&ocapa->c_list);
-        atomic_set(&ocapa->c_refc, 1);
-        spin_lock_init(&ocapa->c_lock);
+        cfs_atomic_set(&ocapa->c_refc, 1);
+        cfs_spin_lock_init(&ocapa->c_lock);
         ocapa->c_site = site;
         if (ocapa->c_site == CAPA_SITE_CLIENT)
                 CFS_INIT_LIST_HEAD(&ocapa->u.cli.lli_list);
@@ -216,7 +217,7 @@ static inline struct obd_capa *capa_get(struct obd_capa *ocapa)
         if (!ocapa)
                 return NULL;
 
-        atomic_inc(&ocapa->c_refc);
+        cfs_atomic_inc(&ocapa->c_refc);
         return ocapa;
 }
 
@@ -225,17 +226,17 @@ static inline void capa_put(struct obd_capa *ocapa)
         if (!ocapa)
                 return;
 
-        if (atomic_read(&ocapa->c_refc) == 0) {
+        if (cfs_atomic_read(&ocapa->c_refc) == 0) {
                 DEBUG_CAPA(D_ERROR, &ocapa->c_capa, "refc is 0 for");
                 LBUG();
         }
 
-        if (atomic_dec_and_test(&ocapa->c_refc)) {
-                LASSERT(list_empty(&ocapa->c_list));
+        if (cfs_atomic_dec_and_test(&ocapa->c_refc)) {
+                LASSERT(cfs_list_empty(&ocapa->c_list));
                 if (ocapa->c_site == CAPA_SITE_CLIENT) {
-                        LASSERT(list_empty(&ocapa->u.cli.lli_list));
+                        LASSERT(cfs_list_empty(&ocapa->u.cli.lli_list));
                 } else {
-                        struct hlist_node *hnode;
+                        cfs_hlist_node_t *hnode;
 
                         hnode = &ocapa->u.tgt.c_hash;
                         LASSERT(!hnode->next && !hnode->pprev);
@@ -285,7 +286,7 @@ static inline int capa_opc_supported(struct lustre_capa *capa, __u64 opc)
 }
 
 struct filter_capa_key {
-        struct list_head        k_list;
+        cfs_list_t              k_list;
         struct lustre_capa_key  k_key;
 };
 
index e1a03e7..df002e0 100644 (file)
@@ -45,7 +45,7 @@
 #define LUSTRE_CFG_MAX_BUFCOUNT 8
 
 #define LCFG_HDR_SIZE(count) \
-    size_round(offsetof (struct lustre_cfg, lcfg_buflens[(count)]))
+    cfs_size_round(offsetof (struct lustre_cfg, lcfg_buflens[(count)]))
 
 /* If the LCFG_REQUIRED bit is set in a configuration command,
  * then the client is required to understand this parameter
@@ -160,7 +160,7 @@ static inline void *lustre_cfg_buf(struct lustre_cfg *lcfg, int index)
 
         offset = LCFG_HDR_SIZE(lcfg->lcfg_bufcount);
         for (i = 0; i < index; i++)
-                offset += size_round(lcfg->lcfg_buflens[i]);
+                offset += cfs_size_round(lcfg->lcfg_buflens[i]);
         return (char *)lcfg + offset;
 }
 
@@ -191,7 +191,7 @@ static inline char *lustre_cfg_string(struct lustre_cfg *lcfg, int index)
          */
         if (s[lcfg->lcfg_buflens[index] - 1] != '\0') {
                 int last = min((int)lcfg->lcfg_buflens[index], 
-                               size_round(lcfg->lcfg_buflens[index]) - 1);
+                               cfs_size_round(lcfg->lcfg_buflens[index]) - 1);
                 char lost = s[last];
                 s[last] = '\0';
                 if (lost != '\0') {
@@ -210,9 +210,9 @@ static inline int lustre_cfg_len(__u32 bufcount, __u32 *buflens)
 
         len = LCFG_HDR_SIZE(bufcount);
         for (i = 0; i < bufcount; i++)
-                len += size_round(buflens[i]);
+                len += cfs_size_round(buflens[i]);
 
-        RETURN(size_round(len));
+        RETURN(cfs_size_round(len));
 }
 
 
index 336045b..8141fce 100644 (file)
@@ -428,7 +428,7 @@ struct lustre_sb_info {
         struct lustre_disk_data  *lsi_ldd;     /* mount info on-disk */
         struct ll_sb_info        *lsi_llsbi;   /* add'l client sbi info */
         struct vfsmount          *lsi_srv_mnt; /* the one server mount */
-        atomic_t                  lsi_mounts;  /* references to the srv_mnt */
+        cfs_atomic_t              lsi_mounts;  /* references to the srv_mnt */
 };
 
 #define LSI_SERVER                       0x00000001
@@ -445,10 +445,10 @@ struct lustre_sb_info {
 /****************** mount lookup info *********************/
 
 struct lustre_mount_info {
-        char               *lmi_name;
-        struct super_block *lmi_sb;
-        struct vfsmount    *lmi_mnt;
-        struct list_head    lmi_list_chain;
+        char                 *lmi_name;
+        struct super_block   *lmi_sb;
+        struct vfsmount      *lmi_mnt;
+        cfs_list_t            lmi_list_chain;
 };
 
 /****************** prototypes *********************/
index 509756d..4ac220d 100644 (file)
@@ -63,7 +63,7 @@ struct obd_device;
 /* 1.5 times the maximum 128 tasks available in VN mode */
 #define LDLM_DEFAULT_LRU_SIZE 196
 #else
-#define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus())
+#define LDLM_DEFAULT_LRU_SIZE (100 * cfs_num_online_cpus())
 #endif
 #define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000))
 #define LDLM_CTIME_AGE_LIMIT (10)
@@ -304,27 +304,27 @@ struct ldlm_pool {
         /**
          * Lock for protecting slv/clv updates.
          */
-        spinlock_t             pl_lock;
+        cfs_spinlock_t         pl_lock;
         /**
          * Number of allowed locks in in pool, both, client and server side.
          */
-        atomic_t               pl_limit;
+        cfs_atomic_t           pl_limit;
         /**
          * Number of granted locks in
          */
-        atomic_t               pl_granted;
+        cfs_atomic_t           pl_granted;
         /**
          * Grant rate per T.
          */
-        atomic_t               pl_grant_rate;
+        cfs_atomic_t           pl_grant_rate;
         /**
          * Cancel rate per T.
          */
-        atomic_t               pl_cancel_rate;
+        cfs_atomic_t           pl_cancel_rate;
         /**
          * Grant speed (GR-CR) per T.
          */
-        atomic_t               pl_grant_speed;
+        cfs_atomic_t           pl_grant_speed;
         /**
          * Server lock volume. Protected by pl_lock.
          */
@@ -337,7 +337,7 @@ struct ldlm_pool {
          * Lock volume factor. SLV on client is calculated as following:
          * server_slv * lock_volume_factor.
          */
-        atomic_t               pl_lock_volume_factor;
+        cfs_atomic_t           pl_lock_volume_factor;
         /**
          * Time when last slv from server was obtained.
          */
@@ -409,8 +409,8 @@ struct ldlm_namespace {
         /**
          * Hash table for namespace.
          */
-        struct list_head      *ns_hash;
-        spinlock_t             ns_hash_lock;
+        cfs_list_t            *ns_hash;
+        cfs_spinlock_t         ns_hash_lock;
 
          /**
           * Count of resources in the hash.
@@ -420,19 +420,19 @@ struct ldlm_namespace {
          /**
           * All root resources in namespace.
           */
-        struct list_head       ns_root_list;
+        cfs_list_t             ns_root_list;
 
         /**
          * Position in global namespace list.
          */
-        struct list_head       ns_list_chain;
+        cfs_list_t             ns_list_chain;
 
         /**
          * All root resources in namespace.
          */
-        struct list_head       ns_unused_list;
+        cfs_list_t             ns_unused_list;
         int                    ns_nr_unused;
-        spinlock_t             ns_unused_lock;
+        cfs_spinlock_t         ns_unused_lock;
 
         unsigned int           ns_max_unused;
         unsigned int           ns_max_age;
@@ -447,7 +447,7 @@ struct ldlm_namespace {
          */
         cfs_time_t             ns_next_dump;
 
-        atomic_t               ns_locks;
+        cfs_atomic_t           ns_locks;
         __u64                  ns_resources;
         ldlm_res_policy        ns_policy;
         struct ldlm_valblock_ops *ns_lvbo;
@@ -530,7 +530,7 @@ typedef unsigned long (*ldlm_weigh_callback)(struct ldlm_lock *lock);
 /* Interval node data for each LDLM_EXTENT lock */
 struct ldlm_interval {
         struct interval_node li_node;   /* node for tree mgmt */
-        struct list_head     li_group;  /* the locks which have the same
+        cfs_list_t           li_group;  /* the locks which have the same
                                          * policy - group of the policy */
 };
 #define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node)
@@ -554,12 +554,12 @@ struct ldlm_lock {
         /**
          * Lock reference count.
          */
-        atomic_t                 l_refc;
+        cfs_atomic_t             l_refc;
         /**
          * Internal spinlock protects l_resource.  we should hold this lock
          * first before grabbing res_lock.
          */
-        spinlock_t               l_lock;
+        cfs_spinlock_t           l_lock;
         /**
          * ldlm_lock_change_resource() can change this.
          */
@@ -567,11 +567,11 @@ struct ldlm_lock {
         /**
          * Protected by ns_hash_lock. List item for client side lru list.
          */
-        struct list_head         l_lru;
+        cfs_list_t               l_lru;
         /**
          * Protected by lr_lock, linkage to resource's lock queues.
          */
-        struct list_head         l_res_link;
+        cfs_list_t               l_res_link;
         /**
          * Tree node for ldlm_extent.
          */
@@ -580,7 +580,7 @@ struct ldlm_lock {
          * Protected by per-bucket exp->exp_lock_hash locks. Per export hash
          * of locks.
          */
-        struct hlist_node        l_exp_hash;
+        cfs_hlist_node_t         l_exp_hash;
         /**
          * Protected by lr_lock. Requested mode.
          */
@@ -665,10 +665,10 @@ struct ldlm_lock {
         void                 *l_lvb_data;
 
         void                 *l_ast_data;
-        spinlock_t            l_extents_list_lock;
-        struct list_head      l_extents_list;
+        cfs_spinlock_t        l_extents_list_lock;
+        cfs_list_t            l_extents_list;
 
-        struct list_head      l_cache_locks_list;
+        cfs_list_t            l_cache_locks_list;
 
         /*
          * Server-side-only members.
@@ -680,7 +680,7 @@ struct ldlm_lock {
         /**
          * Protected by elt_lock. Callbacks pending.
          */
-        struct list_head      l_pending_chain;
+        cfs_list_t            l_pending_chain;
 
         cfs_time_t            l_callback_timeout;
 
@@ -692,15 +692,15 @@ struct ldlm_lock {
         /**
          * For ldlm_add_ast_work_item().
          */
-        struct list_head      l_bl_ast;
+        cfs_list_t            l_bl_ast;
         /**
          * For ldlm_add_ast_work_item().
          */
-        struct list_head      l_cp_ast;
+        cfs_list_t            l_cp_ast;
         /**
          * For ldlm_add_ast_work_item().
          */
-        struct list_head      l_rk_ast;
+        cfs_list_t            l_rk_ast;
 
         struct ldlm_lock     *l_blocking_lock;
         int                   l_bl_ast_run;
@@ -708,8 +708,8 @@ struct ldlm_lock {
         /**
          * Protected by lr_lock, linkages to "skip lists".
          */
-        struct list_head      l_sl_mode;
-        struct list_head      l_sl_policy;
+        cfs_list_t            l_sl_mode;
+        cfs_list_t            l_sl_policy;
         struct lu_ref         l_reference;
 #if LUSTRE_TRACKS_LOCK_EXP_REFS
         /* Debugging stuff for bug 20498, for tracking export
@@ -717,7 +717,7 @@ struct ldlm_lock {
         /** number of export references taken */
         int                   l_exp_refs_nr;
         /** link all locks referencing one export */
-        struct list_head      l_exp_refs_link;
+        cfs_list_t            l_exp_refs_link;
         /** referenced export object */
         struct obd_export    *l_exp_refs_target;
 #endif
@@ -727,26 +727,26 @@ struct ldlm_resource {
         struct ldlm_namespace *lr_namespace;
 
         /* protected by ns_hash_lock */
-        struct list_head       lr_hash;
+        cfs_list_t             lr_hash;
         struct ldlm_resource  *lr_parent;   /* 0 for a root resource */
-        struct list_head       lr_children; /* list head for child resources */
-        struct list_head       lr_childof;  /* part of ns_root_list if root res,
+        cfs_list_t             lr_children; /* list head for child resources */
+        cfs_list_t             lr_childof;  /* part of ns_root_list if root res,
                                              * part of lr_children if child */
-        spinlock_t             lr_lock;
+        cfs_spinlock_t         lr_lock;
 
         /* protected by lr_lock */
-        struct list_head       lr_granted;
-        struct list_head       lr_converting;
-        struct list_head       lr_waiting;
+        cfs_list_t             lr_granted;
+        cfs_list_t             lr_converting;
+        cfs_list_t             lr_waiting;
         ldlm_mode_t            lr_most_restr;
         ldlm_type_t            lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK} */
         struct ldlm_res_id     lr_name;
-        atomic_t               lr_refcount;
+        cfs_atomic_t           lr_refcount;
 
         struct ldlm_interval_tree lr_itree[LCK_MODE_NUM];  /* interval trees*/
 
         /* Server-side-only lock value block elements */
-        struct semaphore       lr_lvb_sem;
+        cfs_semaphore_t        lr_lvb_sem;
         __u32                  lr_lvb_len;
         void                  *lr_lvb_data;
 
@@ -759,13 +759,13 @@ struct ldlm_resource {
 };
 
 struct ldlm_ast_work {
-        struct ldlm_lock *w_lock;
-        int               w_blocking;
-        struct ldlm_lock_desc w_desc;
-        struct list_head   w_list;
-        int w_flags;
-        void *w_data;
-        int w_datalen;
+        struct ldlm_lock      *w_lock;
+        int                    w_blocking;
+        struct ldlm_lock_desc  w_desc;
+        cfs_list_t             w_list;
+        int                    w_flags;
+        void                  *w_data;
+        int                    w_datalen;
 };
 
 /* ldlm_enqueue parameters common */
@@ -787,7 +787,7 @@ extern char *ldlm_typename[];
 extern char *ldlm_it2str(int it);
 #ifdef LIBCFS_DEBUG
 #define ldlm_lock_debug(cdls, level, lock, file, func, line, fmt, a...) do { \
-        CHECK_STACK();                                                  \
+        CFS_CHECK_STACK();                                              \
                                                                         \
         if (((level) & D_CANTMASK) != 0 ||                              \
             ((libcfs_debug & (level)) != 0 &&                           \
@@ -832,7 +832,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, __u32 mask,
 
 typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, int *flags,
                                       int first_enq, ldlm_error_t *err,
-                                      struct list_head *work_list);
+                                      cfs_list_t *work_list);
 
 /*
  * Iterators.
@@ -955,17 +955,17 @@ do {                                            \
         lock;                                   \
 })
 
-#define ldlm_lock_list_put(head, member, count)                 \
-({                                                              \
-        struct ldlm_lock *_lock, *_next;                        \
-        int c = count;                                          \
-        list_for_each_entry_safe(_lock, _next, head, member) {  \
-                if (c-- == 0)                                   \
-                        break;                                  \
-                list_del_init(&_lock->member);                  \
-                LDLM_LOCK_RELEASE(_lock);                       \
-        }                                                       \
-        LASSERT(c <= 0);                                        \
+#define ldlm_lock_list_put(head, member, count)                     \
+({                                                                  \
+        struct ldlm_lock *_lock, *_next;                            \
+        int c = count;                                              \
+        cfs_list_for_each_entry_safe(_lock, _next, head, member) {  \
+                if (c-- == 0)                                       \
+                        break;                                      \
+                cfs_list_del_init(&_lock->member);                  \
+                LDLM_LOCK_RELEASE(_lock);                           \
+        }                                                           \
+        LASSERT(c <= 0);                                            \
 })
 
 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
@@ -1021,7 +1021,8 @@ struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns,
                                         ldlm_type_t type, int create);
 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res);
 int ldlm_resource_putref(struct ldlm_resource *res);
-void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
+void ldlm_resource_add_lock(struct ldlm_resource *res,
+                            cfs_list_t *head,
                             struct ldlm_lock *lock);
 void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
@@ -1062,12 +1063,12 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
                      int async);
 int ldlm_prep_enqueue_req(struct obd_export *exp,
                           struct ptlrpc_request *req,
-                          struct list_head *cancels,
+                          cfs_list_t *cancels,
                           int count);
 int ldlm_prep_elc_req(struct obd_export *exp,
                       struct ptlrpc_request *req,
                       int version, int opc, int canceloff,
-                      struct list_head *cancels, int count);
+                      cfs_list_t *cancels, int count);
 int ldlm_handle_enqueue0(struct ldlm_namespace *ns, struct ptlrpc_request *req,
                          const struct ldlm_request *dlm_req,
                          const struct ldlm_callback_suite *cbs);
@@ -1098,14 +1099,14 @@ int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
                                     const struct ldlm_res_id *res_id,
                                     ldlm_policy_data_t *policy,
                                     ldlm_mode_t mode, int flags, void *opaque);
-int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *head,
+int ldlm_cli_cancel_req(struct obd_export *exp, cfs_list_t *head,
                         int count, int flags);
 int ldlm_cancel_resource_local(struct ldlm_resource *res,
-                               struct list_head *cancels,
+                               cfs_list_t *cancels,
                                ldlm_policy_data_t *policy,
                                ldlm_mode_t mode, int lock_flags,
                                int cancel_flags, void *opaque);
-int ldlm_cli_cancel_list(struct list_head *head, int count,
+int ldlm_cli_cancel_list(cfs_list_t *head, int count,
                          struct ptlrpc_request *req, int flags);
 
 /* mds/handler.c */
@@ -1135,19 +1136,19 @@ enum lock_res_type {
 
 static inline void lock_res(struct ldlm_resource *res)
 {
-        spin_lock(&res->lr_lock);
+        cfs_spin_lock(&res->lr_lock);
 }
 
 static inline void lock_res_nested(struct ldlm_resource *res,
                                    enum lock_res_type mode)
 {
-        spin_lock_nested(&res->lr_lock, mode);
+        cfs_spin_lock_nested(&res->lr_lock, mode);
 }
 
 
 static inline void unlock_res(struct ldlm_resource *res)
 {
-        spin_unlock(&res->lr_lock);
+        cfs_spin_unlock(&res->lr_lock);
 }
 
 static inline void check_res_locked(struct ldlm_resource *res)
index 75457a2..b319457 100644 (file)
@@ -48,7 +48,7 @@ struct mdt_idmap_table;
 
 struct lu_export_data {
         /** Protects led_lcd below */
-        struct semaphore        led_lcd_lock;
+        cfs_semaphore_t         led_lcd_lock;
         /** Per-client data for each export */
         struct lsd_client_data *led_lcd;
         /** Offset of record in last_rcvd file */
@@ -59,10 +59,10 @@ struct lu_export_data {
 
 struct mdt_export_data {
         struct lu_export_data   med_led;
-        struct list_head        med_open_head;
-        spinlock_t              med_open_lock; /* lock med_open_head, mfd_list*/
+        cfs_list_t              med_open_head;
+        cfs_spinlock_t          med_open_lock; /* lock med_open_head, mfd_list*/
         __u64                   med_ibits_known;
-        struct semaphore           med_idmap_sem;
+        cfs_semaphore_t         med_idmap_sem;
         struct lustre_idmap_table *med_idmap;
 };
 
@@ -72,9 +72,9 @@ struct mdt_export_data {
 #define med_lr_idx      med_led.led_lr_idx
 
 struct osc_creator {
-        spinlock_t              oscc_lock;
-        struct list_head        oscc_wait_create_list;
-        struct obd_device       *oscc_obd;
+        cfs_spinlock_t          oscc_lock;
+        cfs_list_t              oscc_wait_create_list;
+        struct obd_device      *oscc_obd;
         obd_id                  oscc_last_id;//last available pre-created object
         obd_id                  oscc_next_id;// what object id to give out next
         int                     oscc_grow_count;
@@ -88,16 +88,16 @@ struct osc_creator {
 };
 
 struct ec_export_data { /* echo client */
-        struct list_head eced_locks;
+        cfs_list_t eced_locks;
 };
 
 /* In-memory access to client data from OST struct */
 struct filter_export_data {
         struct lu_export_data      fed_led;
-        spinlock_t                 fed_lock;     /**< protects fed_mod_list */
+        cfs_spinlock_t             fed_lock;     /**< protects fed_mod_list */
         long                       fed_dirty;    /* in bytes */
         long                       fed_grant;    /* in bytes */
-        struct list_head           fed_mod_list; /* files being modified */
+        cfs_list_t                 fed_mod_list; /* files being modified */
         int                        fed_mod_count;/* items in fed_writing list */
         long                       fed_pending;  /* bytes just being written */
         __u32                      fed_group;
@@ -110,26 +110,26 @@ struct filter_export_data {
 
 typedef struct nid_stat {
         lnet_nid_t               nid;
-        struct hlist_node        nid_hash;
-        struct list_head         nid_list;
+        cfs_hlist_node_t         nid_hash;
+        cfs_list_t               nid_list;
         struct obd_device       *nid_obd;
         struct proc_dir_entry   *nid_proc;
         struct lprocfs_stats    *nid_stats;
         struct lprocfs_stats    *nid_ldlm_stats;
         struct brw_stats        *nid_brw_stats;
-        atomic_t                 nid_exp_ref_count; /* for obd_nid_stats_hash
+        cfs_atomic_t             nid_exp_ref_count; /* for obd_nid_stats_hash
                                                            exp_nid_stats */
 }nid_stat_t;
 
 #define nidstat_getref(nidstat)                                                \
 do {                                                                           \
-        atomic_inc(&(nidstat)->nid_exp_ref_count);                             \
+        cfs_atomic_inc(&(nidstat)->nid_exp_ref_count);                         \
 } while(0)
 
 #define nidstat_putref(nidstat)                                                \
 do {                                                                           \
-        atomic_dec(&(nidstat)->nid_exp_ref_count);                             \
-        LASSERTF(atomic_read(&(nidstat)->nid_exp_ref_count) >= 0,              \
+        cfs_atomic_dec(&(nidstat)->nid_exp_ref_count);                         \
+        LASSERTF(cfs_atomic_read(&(nidstat)->nid_exp_ref_count) >= 0,          \
                  "stat %p nid_exp_ref_count < 0\n", nidstat);                  \
 } while(0)
 
@@ -141,27 +141,27 @@ enum obd_option {
 
 struct obd_export {
         struct portals_handle     exp_handle;
-        atomic_t                  exp_refcount;
+        cfs_atomic_t              exp_refcount;
         /**
          * Set of counters below is to track where export references are
          * kept. The exp_rpc_count is used for reconnect handling also,
          * the cb_count and locks_count are for debug purposes only for now.
          * The sum of them should be less than exp_refcount by 3
          */
-        atomic_t                  exp_rpc_count; /** RPC references */
-        atomic_t                  exp_cb_count; /** Commit callback references */
-        atomic_t                  exp_locks_count; /** Lock references */
+        cfs_atomic_t              exp_rpc_count; /* RPC references */
+        cfs_atomic_t              exp_cb_count; /* Commit callback references */
+        cfs_atomic_t              exp_locks_count; /** Lock references */
 #if LUSTRE_TRACKS_LOCK_EXP_REFS
-        struct list_head          exp_locks_list;
-        spinlock_t                exp_locks_list_guard;
+        cfs_list_t                exp_locks_list;
+        cfs_spinlock_t            exp_locks_list_guard;
 #endif
-        atomic_t                  exp_replay_count;
+        cfs_atomic_t              exp_replay_count;
         struct obd_uuid           exp_client_uuid;
-        struct list_head          exp_obd_chain;
-        struct hlist_node         exp_uuid_hash; /* uuid-export hash*/
-        struct hlist_node         exp_nid_hash; /* nid-export hash */
+        cfs_list_t                exp_obd_chain;
+        cfs_hlist_node_t          exp_uuid_hash; /* uuid-export hash*/
+        cfs_hlist_node_t          exp_nid_hash; /* nid-export hash */
         /* exp_obd_chain_timed fo ping evictor, protected by obd_dev_lock */
-        struct list_head          exp_obd_chain_timed;
+        cfs_list_t                exp_obd_chain_timed;
         struct obd_device        *exp_obd;
         struct obd_import        *exp_imp_reverse; /* to make RPCs backwards */
         struct nid_stat          *exp_nid_stats;
@@ -169,14 +169,14 @@ struct obd_export {
         struct ptlrpc_connection *exp_connection;
         __u32                     exp_conn_cnt;
         cfs_hash_t               *exp_lock_hash; /* existing lock hash */
-        spinlock_t                exp_lock_hash_lock;
-        struct list_head          exp_outstanding_replies;
-        struct list_head          exp_uncommitted_replies;
-        spinlock_t                exp_uncommitted_replies_lock;
+        cfs_spinlock_t            exp_lock_hash_lock;
+        cfs_list_t                exp_outstanding_replies;
+        cfs_list_t                exp_uncommitted_replies;
+        cfs_spinlock_t            exp_uncommitted_replies_lock;
         __u64                     exp_last_committed;
         cfs_time_t                exp_last_request_time;
-        struct list_head          exp_req_replay_queue;
-        spinlock_t                exp_lock; /* protects flags int below */
+        cfs_list_t                exp_req_replay_queue;
+        cfs_spinlock_t            exp_lock; /* protects flags int below */
         /* ^ protects exp_outstanding_replies too */
         __u64                     exp_connect_flags;
         enum obd_option           exp_flags;
@@ -197,7 +197,7 @@ struct obd_export {
                                   /* client timed out and tried to reconnect,
                                    * but couldn't because of active rpcs */
                                   exp_abort_active_req:1;
-        struct list_head          exp_queued_rpc;  /* RPC to be handled */
+        cfs_list_t                exp_queued_rpc;  /* RPC to be handled */
         /* also protected by exp_lock */
         enum lustre_sec_part      exp_sp_peer;
         struct sptlrpc_flavor     exp_flvr;             /* current */
index 6f3a29d..926e3e6 100644 (file)
@@ -136,7 +136,7 @@ struct lu_server_seq;
 struct lu_client_seq {
         /* Sequence-controller export. */
         struct obd_export      *lcs_exp;
-        struct semaphore        lcs_sem;
+        cfs_semaphore_t         lcs_sem;
 
         /*
          * Range of allowed for allocation sequeces. When using lu_client_seq on
@@ -194,7 +194,7 @@ struct lu_server_seq {
         struct lu_client_seq   *lss_cli;
 
         /* Semaphore for protecting allocation */
-        struct semaphore        lss_sem;
+        cfs_semaphore_t         lss_sem;
 
         /*
          * Service uuid, passed from MDT + seq name to form unique seq name to
index ec65b99..9d46541 100644 (file)
@@ -61,7 +61,7 @@ enum {
 
 
 struct lu_fld_target {
-        struct list_head         ft_chain;
+        cfs_list_t               ft_chain;
         struct obd_export       *ft_exp;
         struct lu_server_fld    *ft_srv;
         __u64                    ft_idx;
@@ -87,7 +87,7 @@ struct lu_server_fld {
 
         /**
          * Protect index modifications */
-        struct mutex            lsf_lock;
+        cfs_mutex_t              lsf_lock;
 
         /**
          * Fld service name in form "fld-srv-lustre-MDTXXX" */
@@ -101,7 +101,7 @@ struct lu_client_fld {
 
         /**
          * List of exports client FLD knows about. */
-        struct list_head         lcf_targets;
+        cfs_list_t               lcf_targets;
 
         /**
          * Current hash to be used to chose an export. */
@@ -113,7 +113,7 @@ struct lu_client_fld {
 
         /**
          * Lock protecting exports list and fld_hash. */
-        spinlock_t               lcf_lock;
+        cfs_spinlock_t           lcf_lock;
 
         /**
          * Client FLD cache. */
index 5bc386d..aa05d4a 100644 (file)
@@ -47,6 +47,8 @@
 #error Unsupported operating system.
 #endif
 
+#include <libcfs/libcfs.h>
+
 typedef void (*portals_handle_addref_cb)(void *object);
 
 /* These handles are most easily used by having them appear at the very top of
@@ -62,15 +64,15 @@ typedef void (*portals_handle_addref_cb)(void *object);
  * uses some offsetof() magic. */
 
 struct portals_handle {
-        struct list_head h_link;
+        cfs_list_t h_link;
         __u64 h_cookie;
         portals_handle_addref_cb h_addref;
 
         /* newly added fields to handle the RCU issue. -jxiong */
-        spinlock_t h_lock;
+        cfs_spinlock_t h_lock;
         void *h_ptr;
         void (*h_free_cb)(void *, size_t);
-        struct rcu_head h_rcu;
+        cfs_rcu_head_t h_rcu;
         unsigned int h_size;
         __u8 h_in:1;
         __u8 h_unused[3];
@@ -84,7 +86,7 @@ void class_handle_hash(struct portals_handle *, portals_handle_addref_cb);
 void class_handle_unhash(struct portals_handle *);
 void class_handle_hash_back(struct portals_handle *);
 void *class_handle2object(__u64 cookie);
-void class_handle_free_cb(struct rcu_head *);
+void class_handle_free_cb(cfs_rcu_head_t *);
 int class_handle_init(void);
 void class_handle_cleanup(void);
 
index a155791..364a651 100644 (file)
@@ -69,12 +69,12 @@ enum lustre_idmap_idx {
 };
 
 struct lustre_idmap_table {
-        spinlock_t       lit_lock;
-        struct list_head lit_idmaps[CFS_IDMAP_N_HASHES][CFS_IDMAP_HASHSIZE];
+        cfs_spinlock_t   lit_lock;
+        cfs_list_t       lit_idmaps[CFS_IDMAP_N_HASHES][CFS_IDMAP_HASHSIZE];
 };
 
-extern void lustre_groups_from_list(struct group_info *ginfo, gid_t *glist);
-extern void lustre_groups_sort(struct group_info *group_info);
+extern void lustre_groups_from_list(cfs_group_info_t *ginfo, gid_t *glist);
+extern void lustre_groups_sort(cfs_group_info_t *group_info);
 extern int lustre_in_group_p(struct md_ucred *mu, gid_t grp);
 
 extern int lustre_idmap_add(struct lustre_idmap_table *t,
index 7cc9718..98124db 100644 (file)
 #define AT_FLG_NOHIST 0x1          /* use last reported value only */
 
 struct adaptive_timeout {
-        time_t       at_binstart;         /* bin start time */
-        unsigned int at_hist[AT_BINS];    /* timeout history bins */
-        unsigned int at_flags;
-        unsigned int at_current;          /* current timeout value */
-        unsigned int at_worst_ever;       /* worst-ever timeout value */
-        time_t       at_worst_time;       /* worst-ever timeout timestamp */
-        spinlock_t   at_lock;
+        time_t           at_binstart;         /* bin start time */
+        unsigned int     at_hist[AT_BINS];    /* timeout history bins */
+        unsigned int     at_flags;
+        unsigned int     at_current;          /* current timeout value */
+        unsigned int     at_worst_ever;       /* worst-ever timeout value */
+        time_t           at_worst_time;       /* worst-ever timeout timestamp */
+        cfs_spinlock_t   at_lock;
 };
 
 enum lustre_imp_state {
@@ -70,11 +70,11 @@ enum lustre_imp_state {
 };
 
 struct ptlrpc_at_array {
-        struct list_head *paa_reqs_array; /* array to hold requests */
+        cfs_list_t       *paa_reqs_array; /* array to hold requests */
         __u32             paa_size;       /* the size of array */
         __u32             paa_count;      /* the total count of reqs */
-        time_t            paa_deadline;   /* the earliest deadline of reqs */
-        __u32            *paa_reqs_count; /* the count of reqs in each entry */
+        time_t            paa_deadline;   /* earliest deadline of reqs */
+        __u32            *paa_reqs_count; /* count of reqs in each entry */
 };
 
 static inline char * ptlrpc_import_state_name(enum lustre_imp_state state)
@@ -98,7 +98,7 @@ enum obd_import_event {
 };
 
 struct obd_import_conn {
-        struct list_head          oic_item;
+        cfs_list_t                oic_item;
         struct ptlrpc_connection *oic_conn;
         struct obd_uuid           oic_uuid;
         __u64                     oic_last_attempt; /* jiffies, 64-bit */
@@ -120,31 +120,31 @@ struct import_state_hist {
 
 struct obd_import {
         struct portals_handle     imp_handle;
-        atomic_t                  imp_refcount;
+        cfs_atomic_t              imp_refcount;
         struct lustre_handle      imp_dlm_handle; /* client's ldlm export */
         struct ptlrpc_connection *imp_connection;
         struct ptlrpc_client     *imp_client;
-        struct list_head          imp_pinger_chain;
-        struct list_head          imp_zombie_chain; /* queue for destruction */
+        cfs_list_t                imp_pinger_chain;
+        cfs_list_t                imp_zombie_chain; /* queue for destruction */
 
         /* Lists of requests that are retained for replay, waiting for a reply,
          * or waiting for recovery to complete, respectively.
          */
-        struct list_head          imp_replay_list;
-        struct list_head          imp_sending_list;
-        struct list_head          imp_delayed_list;
+        cfs_list_t                imp_replay_list;
+        cfs_list_t                imp_sending_list;
+        cfs_list_t                imp_delayed_list;
 
         struct obd_device        *imp_obd;
         struct ptlrpc_sec        *imp_sec;
-        struct semaphore          imp_sec_mutex;
+        cfs_semaphore_t           imp_sec_mutex;
         cfs_time_t                imp_sec_expire;
         cfs_waitq_t               imp_recovery_waitq;
 
-        atomic_t                  imp_inflight;
-        atomic_t                  imp_unregistering;
-        atomic_t                  imp_replay_inflight;
-        atomic_t                  imp_inval_count;  /* in-progress invalidations */
-        atomic_t                  imp_timeouts;
+        cfs_atomic_t              imp_inflight;
+        cfs_atomic_t              imp_unregistering;
+        cfs_atomic_t              imp_replay_inflight;
+        cfs_atomic_t              imp_inval_count;  /* in-progress invalidations */
+        cfs_atomic_t              imp_timeouts;
         enum lustre_imp_state     imp_state;
         struct import_state_hist  imp_state_hist[IMP_STATE_HIST_LEN];
         int                       imp_state_hist_idx;
@@ -159,11 +159,11 @@ struct obd_import {
         __u64                     imp_last_success_conn;   /* jiffies, 64-bit */
 
         /* all available obd_import_conn linked here */
-        struct list_head          imp_conn_list;
+        cfs_list_t                imp_conn_list;
         struct obd_import_conn   *imp_conn_current;
 
         /* Protects flags, level, generation, conn_cnt, *_list */
-        spinlock_t                imp_lock;
+        cfs_spinlock_t            imp_lock;
 
         /* flags */
         unsigned long             imp_no_timeout:1,       /* timeouts are disabled */
@@ -201,7 +201,7 @@ typedef void (*obd_import_callback)(struct obd_import *imp, void *closure,
                                     int event, void *event_arg, void *cb_data);
 
 struct obd_import_observer {
-        struct list_head     oio_chain;
+        cfs_list_t           oio_chain;
         obd_import_callback  oio_cb;
         void                *oio_cb_data;
 };
@@ -233,7 +233,7 @@ static inline void at_init(struct adaptive_timeout *at, int val, int flags) {
         at->at_worst_ever = val;
         at->at_worst_time = cfs_time_current_sec();
         at->at_flags = flags;
-        spin_lock_init(&at->at_lock);
+        cfs_spin_lock_init(&at->at_lock);
 }
 static inline int at_get(struct adaptive_timeout *at) {
         return at->at_current;
index e0e26fa..21cb37d 100644 (file)
@@ -114,15 +114,15 @@ struct obd_client_handle {
 #define OBD_CLIENT_HANDLE_MAGIC 0xd15ea5ed
 
 /* statfs_pack.c */
-void statfs_pack(struct obd_statfs *osfs, struct kstatfs *sfs);
-void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs);
+void statfs_pack(struct obd_statfs *osfs, cfs_kstatfs_t *sfs);
+void statfs_unpack(cfs_kstatfs_t *sfs, struct obd_statfs *osfs);
 
 /* l_lock.c */
 struct lustre_lock {
         int l_depth;
         cfs_task_t *l_owner;
-        struct semaphore l_sem;
-        spinlock_t l_spin;
+        cfs_semaphore_t l_sem;
+        cfs_spinlock_t l_spin;
 };
 
 void l_lock_init(struct lustre_lock *);
@@ -190,11 +190,11 @@ struct obd_ioctl_hdr {
 
 static inline int obd_ioctl_packlen(struct obd_ioctl_data *data)
 {
-        int len = size_round(sizeof(struct obd_ioctl_data));
-        len += size_round(data->ioc_inllen1);
-        len += size_round(data->ioc_inllen2);
-        len += size_round(data->ioc_inllen3);
-        len += size_round(data->ioc_inllen4);
+        int len = cfs_size_round(sizeof(struct obd_ioctl_data));
+        len += cfs_size_round(data->ioc_inllen1);
+        len += cfs_size_round(data->ioc_inllen2);
+        len += cfs_size_round(data->ioc_inllen3);
+        len += cfs_size_round(data->ioc_inllen4);
         return len;
 }
 
@@ -343,7 +343,7 @@ static inline int obd_ioctl_getdata(char **buf, int *len, void *arg)
         int offset = 0;
         ENTRY;
 
-        err = copy_from_user(&hdr, (void *)arg, sizeof(hdr));
+        err = cfs_copy_from_user(&hdr, (void *)arg, sizeof(hdr));
         if (err)
                 RETURN(err);
 
@@ -374,7 +374,7 @@ static inline int obd_ioctl_getdata(char **buf, int *len, void *arg)
         *len = hdr.ioc_len;
         data = (struct obd_ioctl_data *)*buf;
 
-        err = copy_from_user(*buf, (void *)arg, hdr.ioc_len);
+        err = cfs_copy_from_user(*buf, (void *)arg, hdr.ioc_len);
         if (err) {
                 OBD_VFREE(*buf, hdr.ioc_len);
                 RETURN(err);
@@ -388,17 +388,17 @@ static inline int obd_ioctl_getdata(char **buf, int *len, void *arg)
 
         if (data->ioc_inllen1) {
                 data->ioc_inlbuf1 = &data->ioc_bulk[0];
-                offset += size_round(data->ioc_inllen1);
+                offset += cfs_size_round(data->ioc_inllen1);
         }
 
         if (data->ioc_inllen2) {
                 data->ioc_inlbuf2 = &data->ioc_bulk[0] + offset;
-                offset += size_round(data->ioc_inllen2);
+                offset += cfs_size_round(data->ioc_inllen2);
         }
 
         if (data->ioc_inllen3) {
                 data->ioc_inlbuf3 = &data->ioc_bulk[0] + offset;
-                offset += size_round(data->ioc_inllen3);
+                offset += cfs_size_round(data->ioc_inllen3);
         }
 
         if (data->ioc_inllen4) {
@@ -410,7 +410,7 @@ static inline int obd_ioctl_getdata(char **buf, int *len, void *arg)
 
 static inline int obd_ioctl_popdata(void *arg, void *data, int len)
 {
-        int err = copy_to_user(arg, data, len);
+        int err = cfs_copy_to_user(arg, data, len);
         if (err)
                 err = -EFAULT;
         return err;
@@ -683,7 +683,7 @@ do {                                                                           \
                 __blocked = l_w_e_set_sigs(0);                                 \
                                                                                \
         for (;;) {                                                             \
-                set_current_state(TASK_INTERRUPTIBLE);                         \
+                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);                 \
                                                                                \
                 if (condition)                                                 \
                         break;                                                 \
@@ -735,7 +735,7 @@ do {                                                                           \
                                                                                \
         cfs_block_sigs(__blocked);                                             \
                                                                                \
-        set_current_state(TASK_RUNNING);                                       \
+        cfs_set_current_state(CFS_TASK_RUNNING);                               \
         cfs_waitq_del(&wq, &__wait);                                           \
 } while (0)
 
@@ -805,7 +805,7 @@ do {                                                                    \
         __ret;                                                  \
 })
 
-#define cfs_wait_event(wq, condition)                          \
+#define l_cfs_wait_event(wq, condition)                         \
 ({                                                              \
         struct l_wait_info lwi = { 0 };                         \
         l_wait_event(wq, condition, &lwi);                      \
index 86e92f1..c9c1129 100644 (file)
@@ -124,8 +124,8 @@ struct lustre_client_ocd {
          * mount is connected to. This field is updated by ll_ocd_update()
          * under ->lco_lock.
          */
-        __u64      lco_flags;
-        struct semaphore   lco_lock;
+        __u64              lco_flags;
+        cfs_semaphore_t    lco_lock;
         struct obd_export *lco_md_exp;
         struct obd_export *lco_dt_exp;
 };
index 05d85ab..d1d9d59 100644 (file)
 #define LLOG_EEMPTY 4711
 
 struct plain_handle_data {
-        struct list_head    phd_entry;
+        cfs_list_t          phd_entry;
         struct llog_handle *phd_cat_handle;
         struct llog_cookie  phd_cookie; /* cookie of this log in its cat */
         int                 phd_last_idx;
 };
 
 struct cat_handle_data {
-        struct list_head        chd_head;
+        cfs_list_t              chd_head;
         struct llog_handle     *chd_current_log; /* currently open log */
 };
 
 /* In-memory descriptor for a log object or log catalog */
 struct llog_handle {
-        struct rw_semaphore     lgh_lock;
+        cfs_rw_semaphore_t      lgh_lock;
         struct llog_logid       lgh_id;              /* id of this log */
         struct llog_log_hdr    *lgh_hdr;
         struct file            *lgh_file;
@@ -274,8 +274,8 @@ struct llog_ctxt {
         struct llog_handle      *loc_handle;
         struct llog_commit_master *loc_lcm;
         struct llog_canceld_ctxt *loc_llcd;
-        struct semaphore         loc_sem; /* protects loc_llcd and loc_imp */
-        atomic_t                 loc_refcount;
+        cfs_semaphore_t          loc_sem; /* protects loc_llcd and loc_imp */
+        cfs_atomic_t             loc_refcount;
         void                    *llog_proc_cb;
         long                     loc_flags; /* flags, see above defines */
 };
@@ -290,11 +290,11 @@ struct llog_commit_master {
         /**
          * Number of llcds onthis lcm.
          */
-        atomic_t                   lcm_count;
+        cfs_atomic_t               lcm_count;
         /**
          * The refcount for lcm
          */
-         atomic_t                  lcm_refcount;
+         cfs_atomic_t              lcm_refcount;
         /**
          * Thread control structure. Used for control commit thread.
          */
@@ -302,11 +302,11 @@ struct llog_commit_master {
         /**
          * Lock protecting list of llcds.
          */
-        spinlock_t                 lcm_lock;
+        cfs_spinlock_t             lcm_lock;
         /**
          * Llcds in flight for debugging purposes.
          */
-        struct list_head           lcm_llcds;
+        cfs_list_t                 lcm_llcds;
         /**
          * Commit thread name buffer. Only used for thread start.
          */
@@ -316,15 +316,15 @@ struct llog_commit_master {
 static inline struct llog_commit_master
 *lcm_get(struct llog_commit_master *lcm)
 {
-        LASSERT(atomic_read(&lcm->lcm_refcount) > 0);
-        atomic_inc(&lcm->lcm_refcount);
+        LASSERT(cfs_atomic_read(&lcm->lcm_refcount) > 0);
+        cfs_atomic_inc(&lcm->lcm_refcount);
         return lcm;
 }
 
 static inline void
 lcm_put(struct llog_commit_master *lcm)
 {
-        if (!atomic_dec_and_test(&lcm->lcm_refcount)) {
+        if (!cfs_atomic_dec_and_test(&lcm->lcm_refcount)) {
                 return ;
         }
         OBD_FREE_PTR(lcm);
@@ -350,7 +350,7 @@ struct llog_canceld_ctxt {
         /**
          * Link to lcm llcds list.
          */
-        struct list_head           llcd_list;
+        cfs_list_t                 llcd_list;
         /**
          * Current llcd size while gathering cookies. This should not be
          * more than ->llcd_size. Used for determining if we need to
@@ -421,15 +421,15 @@ static inline int llog_handle2ops(struct llog_handle *loghandle,
 
 static inline int llog_data_len(int len)
 {
-        return size_round(len);
+        return cfs_size_round(len);
 }
 
 static inline struct llog_ctxt *llog_ctxt_get(struct llog_ctxt *ctxt)
 {
-        LASSERT(atomic_read(&ctxt->loc_refcount) > 0);
-        atomic_inc(&ctxt->loc_refcount);
+        LASSERT(cfs_atomic_read(&ctxt->loc_refcount) > 0);
+        cfs_atomic_inc(&ctxt->loc_refcount);
         CDEBUG(D_INFO, "GETting ctxt %p : new refcount %d\n", ctxt,
-               atomic_read(&ctxt->loc_refcount));
+               cfs_atomic_read(&ctxt->loc_refcount));
         return ctxt;
 }
 
@@ -437,18 +437,18 @@ static inline void llog_ctxt_put(struct llog_ctxt *ctxt)
 {
         if (ctxt == NULL)
                 return;
-        LASSERT(atomic_read(&ctxt->loc_refcount) > 0);
-        LASSERT(atomic_read(&ctxt->loc_refcount) < 0x5a5a5a);
+        LASSERT(cfs_atomic_read(&ctxt->loc_refcount) > 0);
+        LASSERT(cfs_atomic_read(&ctxt->loc_refcount) < 0x5a5a5a);
         CDEBUG(D_INFO, "PUTting ctxt %p : new refcount %d\n", ctxt,
-               atomic_read(&ctxt->loc_refcount) - 1);
+               cfs_atomic_read(&ctxt->loc_refcount) - 1);
         __llog_ctxt_put(ctxt);
 }
 
 static inline void llog_group_init(struct obd_llog_group *olg, int group)
 {
         cfs_waitq_init(&olg->olg_waitq);
-        spin_lock_init(&olg->olg_lock);
-        sema_init(&olg->olg_cat_processing, 1);
+        cfs_spin_lock_init(&olg->olg_lock);
+        cfs_sema_init(&olg->olg_cat_processing, 1);
         olg->olg_group = group;
 }
 
@@ -457,13 +457,13 @@ static inline void llog_group_set_export(struct obd_llog_group *olg,
 {
         LASSERT(exp != NULL);
 
-        spin_lock(&olg->olg_lock);
+        cfs_spin_lock(&olg->olg_lock);
         if (olg->olg_exp != NULL && olg->olg_exp != exp)
                 CWARN("%s: export for group %d is changed: 0x%p -> 0x%p\n",
                       exp->exp_obd->obd_name, olg->olg_group,
                       olg->olg_exp, exp);
         olg->olg_exp = exp;
-        spin_unlock(&olg->olg_lock);
+        cfs_spin_unlock(&olg->olg_lock);
 }
 
 static inline int llog_group_set_ctxt(struct obd_llog_group *olg,
@@ -471,13 +471,13 @@ static inline int llog_group_set_ctxt(struct obd_llog_group *olg,
 {
         LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
 
-        spin_lock(&olg->olg_lock);
+        cfs_spin_lock(&olg->olg_lock);
         if (olg->olg_ctxts[index] != NULL) {
-                spin_unlock(&olg->olg_lock);
+                cfs_spin_unlock(&olg->olg_lock);
                 return -EEXIST;
         }
         olg->olg_ctxts[index] = ctxt;
-        spin_unlock(&olg->olg_lock);
+        cfs_spin_unlock(&olg->olg_lock);
         return 0;
 }
 
@@ -488,13 +488,13 @@ static inline struct llog_ctxt *llog_group_get_ctxt(struct obd_llog_group *olg,
 
         LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
 
-        spin_lock(&olg->olg_lock);
+        cfs_spin_lock(&olg->olg_lock);
         if (olg->olg_ctxts[index] == NULL) {
                 ctxt = NULL;
         } else {
                 ctxt = llog_ctxt_get(olg->olg_ctxts[index]);
         }
-        spin_unlock(&olg->olg_lock);
+        cfs_spin_unlock(&olg->olg_lock);
         return ctxt;
 }
 
@@ -536,7 +536,7 @@ static inline int llog_write_rec(struct llog_handle *handle,
                                 + sizeof(struct llog_rec_tail);
         else
                 buflen = rec->lrh_len;
-        LASSERT(size_round(buflen) == buflen);
+        LASSERT(cfs_size_round(buflen) == buflen);
 
         raised = cfs_cap_raised(CFS_CAP_SYS_RESOURCE);
         if (!raised)
index 81abda6..a30c2ec 100644 (file)
@@ -71,13 +71,13 @@ struct ptlrpc_request;
 struct obd_device;
 
 struct mdc_rpc_lock {
-        struct semaphore rpcl_sem;
+        cfs_semaphore_t  rpcl_sem;
         struct lookup_intent *rpcl_it;
 };
 
 static inline void mdc_init_rpc_lock(struct mdc_rpc_lock *lck)
 {
-        sema_init(&lck->rpcl_sem, 1);
+        cfs_sema_init(&lck->rpcl_sem, 1);
         lck->rpcl_it = NULL;
 }
 
@@ -86,7 +86,7 @@ static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck,
 {
         ENTRY;
         if (!it || (it->it_op != IT_GETATTR && it->it_op != IT_LOOKUP)) {
-                down(&lck->rpcl_sem);
+                cfs_down(&lck->rpcl_sem);
                 LASSERT(lck->rpcl_it == NULL);
                 lck->rpcl_it = it;
         }
@@ -98,7 +98,7 @@ static inline void mdc_put_rpc_lock(struct mdc_rpc_lock *lck,
         if (!it || (it->it_op != IT_GETATTR && it->it_op != IT_LOOKUP)) {
                 LASSERT(it == lck->rpcl_it);
                 lck->rpcl_it = NULL;
-                up(&lck->rpcl_sem);
+                cfs_up(&lck->rpcl_sem);
         }
         EXIT;
 }
@@ -119,7 +119,7 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
 
 
 struct mdc_cache_waiter {
-        struct list_head        mcw_entry;
+        cfs_list_t              mcw_entry;
         cfs_waitq_t             mcw_waitq;
 };
 
index 2a6dc54..9fd2e72 100644 (file)
  */
 
 #define LDLM_THREADS_AUTO_MIN (2)
-#define LDLM_THREADS_AUTO_MAX min(num_online_cpus()*num_online_cpus()*32, 128)
+#define LDLM_THREADS_AUTO_MAX min(cfs_num_online_cpus() * \
+                                  cfs_num_online_cpus() * 32, 128)
 #define LDLM_BL_THREADS  LDLM_THREADS_AUTO_MIN
-#define LDLM_NBUFS      (64 * num_online_cpus())
+#define LDLM_NBUFS      (64 * cfs_num_online_cpus())
 #define LDLM_BUFSIZE    (8 * 1024)
 #define LDLM_MAXREQSIZE (5 * 1024)
 #define LDLM_MAXREPSIZE (1024)
 #define MDT_MIN_THREADS 2UL
 #define MDT_MAX_THREADS 512UL
 #define MDT_NUM_THREADS max(min_t(unsigned long, MDT_MAX_THREADS, \
-                                  num_physpages >> (25 - CFS_PAGE_SHIFT)), 2UL)
+                                  cfs_num_physpages >> (25 - CFS_PAGE_SHIFT)), \
+                                  2UL)
 #define FLD_NUM_THREADS max(min_t(unsigned long, MDT_MAX_THREADS, \
-                                  num_physpages >> (25 - CFS_PAGE_SHIFT)), 2UL)
+                                  cfs_num_physpages >> (25 - CFS_PAGE_SHIFT)), \
+                                  2UL)
 #define SEQ_NUM_THREADS max(min_t(unsigned long, MDT_MAX_THREADS, \
-                                  num_physpages >> (25 - CFS_PAGE_SHIFT)), 2UL)
+                                  cfs_num_physpages >> (25 - CFS_PAGE_SHIFT)), \
+                                  2UL)
 
 /* Absolute limits */
 #define MDS_THREADS_MIN 2
 #define MDS_THREADS_MAX 512
 #define MDS_THREADS_MIN_READPAGE 2
-#define MDS_NBUFS       (64 * num_online_cpus())
+#define MDS_NBUFS       (64 * cfs_num_online_cpus())
 #define MDS_BUFSIZE     (8 * 1024)
 /* Assume file name length = FNAME_MAX = 256 (true for ext3).
  *        path name length = PATH_MAX = 4096
 
 #define MGS_THREADS_AUTO_MIN 2
 #define MGS_THREADS_AUTO_MAX 32
-#define MGS_NBUFS       (64 * num_online_cpus())
+#define MGS_NBUFS       (64 * cfs_num_online_cpus())
 #define MGS_BUFSIZE     (8 * 1024)
 #define MGS_MAXREQSIZE  (7 * 1024)
 #define MGS_MAXREPSIZE  (9 * 1024)
 /* Absolute limits */
 #define OSS_THREADS_MIN 3       /* difficult replies, HPQ, others */
 #define OSS_THREADS_MAX 512
-#define OST_NBUFS       (64 * num_online_cpus())
+#define OST_NBUFS       (64 * cfs_num_online_cpus())
 #define OST_BUFSIZE     (8 * 1024)
 /* OST_MAXREQSIZE ~= 4768 bytes =
  * lustre_msg + obdo + 16 * obd_ioobj + 256 * niobuf_remote
 #define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
 
 struct ptlrpc_connection {
-        struct hlist_node       c_hash;
+        cfs_hlist_node_t        c_hash;
         lnet_nid_t              c_self;
         lnet_process_id_t       c_peer;
         struct obd_uuid         c_remote_uuid;
-        atomic_t                c_refcount;
+        cfs_atomic_t            c_refcount;
 };
 
 struct ptlrpc_client {
@@ -219,21 +223,21 @@ struct ptlrpc_request_set;
 typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int);
 
 struct ptlrpc_request_set {
-        int               set_remaining; /* # uncompleted requests */
-        cfs_waitq_t       set_waitq;
-        cfs_waitq_t      *set_wakeup_ptr;
-        struct list_head  set_requests;
-        struct list_head  set_cblist; /* list of completion callbacks */
-        set_interpreter_func    set_interpret; /* completion callback */
-        void              *set_arg; /* completion context */
+        int                   set_remaining; /* # uncompleted requests */
+        cfs_waitq_t           set_waitq;
+        cfs_waitq_t          *set_wakeup_ptr;
+        cfs_list_t            set_requests;
+        cfs_list_t            set_cblist; /* list of completion callbacks */
+        set_interpreter_func  set_interpret; /* completion callback */
+        void                 *set_arg; /* completion context */
         /* locked so that any old caller can communicate requests to
          * the set holder who can then fold them into the lock-free set */
-        spinlock_t        set_new_req_lock;
-        struct list_head  set_new_requests;
+        cfs_spinlock_t        set_new_req_lock;
+        cfs_list_t            set_new_requests;
 };
 
 struct ptlrpc_set_cbdata {
-        struct list_head        psc_item;
+        cfs_list_t              psc_item;
         set_interpreter_func    psc_interpret;
         void                   *psc_data;
 };
@@ -253,14 +257,14 @@ struct ptlrpc_cb_id {
 
 struct ptlrpc_reply_state {
         struct ptlrpc_cb_id    rs_cb_id;
-        struct list_head       rs_list;
-        struct list_head       rs_exp_list;
-        struct list_head       rs_obd_list;
+        cfs_list_t             rs_list;
+        cfs_list_t             rs_exp_list;
+        cfs_list_t             rs_obd_list;
 #if RS_DEBUG
-        struct list_head       rs_debug_list;
+        cfs_list_t             rs_debug_list;
 #endif
         /* A spinlock to protect the reply state flags */
-        spinlock_t             rs_lock;
+        cfs_spinlock_t         rs_lock;
         /* Reply state flags */
         unsigned long          rs_difficult:1;     /* ACK/commit stuff */
         unsigned long          rs_no_ack:1;    /* no ACK, even for
@@ -280,7 +284,7 @@ struct ptlrpc_reply_state {
         struct obd_export     *rs_export;
         struct ptlrpc_service *rs_service;
         lnet_handle_md_t       rs_md_h;
-        atomic_t               rs_refcount;
+        cfs_atomic_t           rs_refcount;
 
         struct ptlrpc_svc_ctx *rs_svc_ctx;
         struct lustre_msg     *rs_repbuf;       /* wrapper */
@@ -312,8 +316,8 @@ typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env,
                                     void *arg, int rc);
 
 struct ptlrpc_request_pool {
-        spinlock_t prp_lock;
-        struct list_head prp_req_list;    /* list of ptlrpc_request structs */
+        cfs_spinlock_t prp_lock;
+        cfs_list_t prp_req_list;    /* list of ptlrpc_request structs */
         int prp_rq_size;
         void (*prp_populate)(struct ptlrpc_request_pool *, int);
 };
@@ -340,16 +344,16 @@ struct ptlrpc_hpreq_ops {
  */
 struct ptlrpc_request {
         int rq_type; /* one of PTL_RPC_MSG_* */
-        struct list_head rq_list;
-        struct list_head rq_timed_list;         /* server-side early replies */
-        struct list_head rq_history_list;       /* server-side history */
-        struct list_head rq_exp_list;           /* server-side per-export list */
-        struct ptlrpc_hpreq_ops *rq_ops;        /* server-side hp handlers */
-        __u64            rq_history_seq;        /* history sequence # */
+        cfs_list_t rq_list;
+        cfs_list_t rq_timed_list;    /* server-side early replies */
+        cfs_list_t rq_history_list;  /* server-side history */
+        cfs_list_t rq_exp_list;      /* server-side per-export list */
+        struct ptlrpc_hpreq_ops *rq_ops;       /* server-side hp handlers */
+        __u64 rq_history_seq;   /* history sequence # */
         /* the index of service's srv_at_array into which request is linked */
         time_t rq_at_index;
         int rq_status;
-        spinlock_t rq_lock;
+        cfs_spinlock_t rq_lock;
         /* client-side flags are serialized by rq_lock */
         unsigned long rq_intr:1, rq_replied:1, rq_err:1,
                 rq_timedout:1, rq_resend:1, rq_restart:1,
@@ -376,8 +380,8 @@ struct ptlrpc_request {
 
         enum rq_phase rq_phase; /* one of RQ_PHASE_* */
         enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
-        atomic_t rq_refcount;   /* client-side refcount for SENT race,
-                                   server-side refcounf for multiple replies */
+        cfs_atomic_t rq_refcount;/* client-side refcount for SENT race,
+                                    server-side refcounf for multiple replies */
 
         struct ptlrpc_thread *rq_svc_thread; /* initial thread servicing req */
 
@@ -393,11 +397,11 @@ struct ptlrpc_request {
         struct lustre_msg *rq_repmsg;
         __u64 rq_transno;
         __u64 rq_xid;
-        struct list_head rq_replay_list;
+        cfs_list_t rq_replay_list;
 
         struct ptlrpc_cli_ctx   *rq_cli_ctx;     /* client's half ctx */
         struct ptlrpc_svc_ctx   *rq_svc_ctx;     /* server's half ctx */
-        struct list_head         rq_ctx_chain;   /* link to waited ctx */
+        cfs_list_t               rq_ctx_chain;   /* link to waited ctx */
 
         struct sptlrpc_flavor    rq_flvr;        /* client & server */
         enum lustre_sec_part     rq_sp_from;
@@ -488,7 +492,7 @@ struct ptlrpc_request {
         int    rq_timeout;               /* service time estimate (secs) */
 
         /* Multi-rpc bits */
-        struct list_head rq_set_chain;
+        cfs_list_t rq_set_chain;
         struct ptlrpc_request_set *rq_set;
         /** Async completion handler */
         ptlrpc_interpterer_t rq_interpret_reply;
@@ -598,7 +602,7 @@ void _debug_req(struct ptlrpc_request *req, __u32 mask,
 
 #define debug_req(cdls, level, req, file, func, line, fmt, a...)              \
 do {                                                                          \
-        CHECK_STACK();                                                        \
+        CFS_CHECK_STACK();                                                    \
                                                                               \
         if (((level) & D_CANTMASK) != 0 ||                                    \
             ((libcfs_debug & (level)) != 0 &&                                 \
@@ -622,7 +626,7 @@ do {                                                                          \
 } while (0)
 
 struct ptlrpc_bulk_page {
-        struct list_head bp_link;
+        cfs_list_t       bp_link;
         int              bp_buflen;
         int              bp_pageoffset; /* offset within a page */
         struct page     *bp_page;
@@ -638,7 +642,7 @@ struct ptlrpc_bulk_desc {
         unsigned long bd_network_rw:1;           /* accessible to the network */
         unsigned long bd_type:2;                 /* {put,get}{source,sink} */
         unsigned long bd_registered:1;           /* client side */
-        spinlock_t   bd_lock;                   /* serialise with callback */
+        cfs_spinlock_t bd_lock;                   /* serialise with callback */
         int bd_import_generation;
         struct obd_export *bd_export;
         struct obd_import *bd_import;
@@ -672,7 +676,7 @@ struct ptlrpc_thread {
         /**
          * active threads in svc->srv_threads
          */
-        struct list_head t_link;
+        cfs_list_t t_link;
         /**
          * thread-private data (preallocated memory)
          */
@@ -699,8 +703,8 @@ struct ptlrpc_thread {
 };
 
 struct ptlrpc_request_buffer_desc {
-        struct list_head       rqbd_list;
-        struct list_head       rqbd_reqs;
+        cfs_list_t             rqbd_list;
+        cfs_list_t             rqbd_reqs;
         struct ptlrpc_service *rqbd_service;
         lnet_handle_md_t       rqbd_md_h;
         int                    rqbd_refcount;
@@ -716,7 +720,7 @@ typedef int (*svc_hpreq_handler_t)(struct ptlrpc_request *);
 #define PTLRPC_SVC_HP_RATIO 10
 
 struct ptlrpc_service {
-        struct list_head srv_list;              /* chain thru all services */
+        cfs_list_t       srv_list;              /* chain thru all services */
         int              srv_max_req_size;      /* biggest request to receive */
         int              srv_max_reply_size;    /* biggest reply to send */
         int              srv_buf_size;          /* size of individual buffers */
@@ -726,7 +730,7 @@ struct ptlrpc_service {
         int              srv_threads_max;       /* thread upper limit */
         int              srv_threads_started;   /* index of last started thread */
         int              srv_threads_running;   /* # running threads */
-        atomic_t         srv_n_difficult_replies; /* # 'difficult' replies */
+        cfs_atomic_t     srv_n_difficult_replies; /* # 'difficult' replies */
         int              srv_n_active_reqs;     /* # reqs being served */
         int              srv_n_hpreq;           /* # HPreqs being served */
         cfs_duration_t   srv_rqbd_timeout;      /* timeout before re-posting reqs, in tick */
@@ -741,61 +745,61 @@ struct ptlrpc_service {
 
         /* AT stuff */
         struct adaptive_timeout srv_at_estimate;/* estimated rpc service time */
-        spinlock_t        srv_at_lock;
+        cfs_spinlock_t   srv_at_lock;
         struct ptlrpc_at_array  srv_at_array;   /* reqs waiting for replies */
-        cfs_timer_t       srv_at_timer;         /* early reply timer */
-
-        int               srv_n_queued_reqs;    /* # reqs in either of the queues below */
-        int               srv_hpreq_count;      /* # hp requests handled */
-        int               srv_hpreq_ratio;      /* # hp per lp reqs to handle */
-        struct list_head  srv_req_in_queue;     /* incoming reqs */
-        struct list_head  srv_request_queue;    /* reqs waiting for service */
-        struct list_head  srv_request_hpq;      /* high priority queue */
-
-        struct list_head  srv_request_history;  /* request history */
-        __u64             srv_request_seq;      /* next request sequence # */
-        __u64             srv_request_max_cull_seq; /* highest seq culled from history */
-        svcreq_printfn_t  srv_request_history_print_fn; /* service-specific print fn */
-
-        struct list_head  srv_idle_rqbds;       /* request buffers to be reposted */
-        struct list_head  srv_active_rqbds;     /* req buffers receiving */
-        struct list_head  srv_history_rqbds;    /* request buffer history */
-        int               srv_nrqbd_receiving;  /* # posted request buffers */
-        int               srv_n_history_rqbds;  /* # request buffers in history */
-        int               srv_max_history_rqbds;/* max # request buffers in history */
-
-        atomic_t          srv_outstanding_replies;
-        struct list_head  srv_active_replies;   /* all the active replies */
+        cfs_timer_t      srv_at_timer;      /* early reply timer */
+
+        int              srv_n_queued_reqs; /* # reqs in either of the queues below */
+        int              srv_hpreq_count;   /* # hp requests handled */
+        int              srv_hpreq_ratio;   /* # hp per lp reqs to handle */
+        cfs_list_t       srv_req_in_queue;  /* incoming reqs */
+        cfs_list_t       srv_request_queue; /* reqs waiting for service */
+        cfs_list_t       srv_request_hpq;   /* high priority queue */
+
+        cfs_list_t       srv_request_history;  /* request history */
+        __u64            srv_request_seq;      /* next request sequence # */
+        __u64            srv_request_max_cull_seq; /* highest seq culled from history */
+        svcreq_printfn_t srv_request_history_print_fn; /* service-specific print fn */
+
+        cfs_list_t       srv_idle_rqbds;    /* request buffers to be reposted */
+        cfs_list_t       srv_active_rqbds;  /* req buffers receiving */
+        cfs_list_t       srv_history_rqbds; /* request buffer history */
+        int              srv_nrqbd_receiving; /* # posted request buffers */
+        int              srv_n_history_rqbds;  /* # request buffers in history */
+        int              srv_max_history_rqbds;/* max # request buffers in history */
+
+        cfs_atomic_t     srv_outstanding_replies;
+        cfs_list_t       srv_active_replies;   /* all the active replies */
 #ifndef __KERNEL__
-        struct list_head  srv_reply_queue;      /* replies waiting for service */
+        cfs_list_t       srv_reply_queue;  /* replies waiting for service */
 #endif
-        cfs_waitq_t       srv_waitq; /* all threads sleep on this. This
-                                      * wait-queue is signalled when new
-                                      * incoming request arrives and when
-                                      * difficult reply has to be handled. */
+        cfs_waitq_t      srv_waitq; /* all threads sleep on this. This
+                                     * wait-queue is signalled when new
+                                     * incoming request arrives and when
+                                     * difficult reply has to be handled. */
 
-        struct list_head   srv_threads;         /* service thread list */
-        svc_handler_t      srv_handler;
-        svc_hpreq_handler_t srv_hpreq_handler;  /* hp request handler */
+        cfs_list_t       srv_threads;       /* service thread list */
+        svc_handler_t    srv_handler;
+        svc_hpreq_handler_t  srv_hpreq_handler; /* hp request handler */
 
         char *srv_name; /* only statically allocated strings here; we don't clean them */
         char *srv_thread_name; /* only statically allocated strings here; we don't clean them */
 
-        spinlock_t               srv_lock;
+        cfs_spinlock_t        srv_lock;
 
-        cfs_proc_dir_entry_t    *srv_procroot;
-        struct lprocfs_stats    *srv_stats;
+        cfs_proc_dir_entry_t *srv_procroot;
+        struct lprocfs_stats *srv_stats;
 
         /* List of free reply_states */
-        struct list_head         srv_free_rs_list;
+        cfs_list_t           srv_free_rs_list;
         /* waitq to run, when adding stuff to srv_free_rs_list */
-        cfs_waitq_t              srv_free_rs_waitq;
+        cfs_waitq_t          srv_free_rs_waitq;
 
         /*
          * Tags for lu_context associated with this thread, see struct
          * lu_context.
          */
-        __u32                    srv_ctx_tags;
+        __u32                srv_ctx_tags;
         /*
          * if non-NULL called during thread creation (ptlrpc_start_thread())
          * to initialize service specific per-thread state.
@@ -818,15 +822,15 @@ struct ptlrpcd_ctl {
         /**
          * Thread lock protecting structure fields.
          */
-        spinlock_t                  pc_lock;
+        cfs_spinlock_t              pc_lock;
         /**
          * Start completion.
          */
-        struct completion           pc_starting;
+        cfs_completion_t            pc_starting;
         /**
          * Stop completion.
          */
-        struct completion           pc_finishing;
+        cfs_completion_t            pc_finishing;
         /**
          * Thread requests set.
          */
@@ -915,9 +919,9 @@ static inline int ptlrpc_server_bulk_active(struct ptlrpc_bulk_desc *desc)
 
         LASSERT(desc != NULL);
 
-        spin_lock(&desc->bd_lock);
+        cfs_spin_lock(&desc->bd_lock);
         rc = desc->bd_network_rw;
-        spin_unlock(&desc->bd_lock);
+        cfs_spin_unlock(&desc->bd_lock);
         return rc;
 }
 
@@ -935,9 +939,9 @@ static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
         if (!desc)
                 return 0;
 
-        spin_lock(&desc->bd_lock);
+        cfs_spin_lock(&desc->bd_lock);
         rc = desc->bd_network_rw;
-        spin_unlock(&desc->bd_lock);
+        cfs_spin_unlock(&desc->bd_lock);
         return rc;
 }
 
@@ -1199,12 +1203,12 @@ ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
         if (new_phase == RQ_PHASE_UNREGISTERING) {
                 req->rq_next_phase = req->rq_phase;
                 if (req->rq_import)
-                        atomic_inc(&req->rq_import->imp_unregistering);
+                        cfs_atomic_inc(&req->rq_import->imp_unregistering);
         }
 
         if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
                 if (req->rq_import)
-                        atomic_dec(&req->rq_import->imp_unregistering);
+                        cfs_atomic_dec(&req->rq_import->imp_unregistering);
         }
 
         DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"",
@@ -1245,14 +1249,14 @@ ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
 {
         int rc;
 
-        spin_lock(&req->rq_lock);
+        cfs_spin_lock(&req->rq_lock);
         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
             req->rq_reply_deadline > cfs_time_current_sec()) {
-                spin_unlock(&req->rq_lock);
+                cfs_spin_unlock(&req->rq_lock);
                 return 1;
         }
         rc = req->rq_receiving_reply || req->rq_must_unlink;
-        spin_unlock(&req->rq_lock);
+        cfs_spin_unlock(&req->rq_lock);
         return rc;
 }
 
@@ -1268,15 +1272,15 @@ ptlrpc_client_wake_req(struct ptlrpc_request *req)
 static inline void
 ptlrpc_rs_addref(struct ptlrpc_reply_state *rs)
 {
-        LASSERT(atomic_read(&rs->rs_refcount) > 0);
-        atomic_inc(&rs->rs_refcount);
+        LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0);
+        cfs_atomic_inc(&rs->rs_refcount);
 }
 
 static inline void
 ptlrpc_rs_decref(struct ptlrpc_reply_state *rs)
 {
-        LASSERT(atomic_read(&rs->rs_refcount) > 0);
-        if (atomic_dec_and_test(&rs->rs_refcount))
+        LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0);
+        if (cfs_atomic_dec_and_test(&rs->rs_refcount))
                 lustre_free_reply_state(rs);
 }
 
@@ -1333,8 +1337,8 @@ int ptlrpc_pinger_add_import(struct obd_import *imp);
 int ptlrpc_pinger_del_import(struct obd_import *imp);
 int ptlrpc_add_timeout_client(int time, enum timeout_event event,
                               timeout_cb_t cb, void *data,
-                              struct list_head *obd_list);
-int ptlrpc_del_timeout_client(struct list_head *obd_list,
+                              cfs_list_t *obd_list);
+int ptlrpc_del_timeout_client(cfs_list_t *obd_list,
                               enum timeout_event event);
 struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp);
 int ptlrpc_obd_ping(struct obd_device *obd);
index 5582637..a271ebf 100644 (file)
@@ -174,9 +174,9 @@ struct lustre_mem_dqblk {
 
 struct lustre_dquot {
         /** Hash list in memory, protect by dquot_hash_lock */
-        struct list_head dq_hash;
+        cfs_list_t dq_hash;
         /** Protect the data in lustre_dquot */
-        struct semaphore dq_sem;
+        cfs_semaphore_t dq_sem;
         /** Use count */
         int dq_refcnt;
         /** Pointer of quota info it belongs to */
@@ -196,7 +196,7 @@ struct lustre_dquot {
 };
 
 struct dquot_id {
-        struct list_head        di_link;
+        cfs_list_t              di_link;
         __u32                   di_id;
         __u32                   di_flag;
 };
@@ -221,7 +221,7 @@ int lustre_read_dquot(struct lustre_dquot *dquot);
 int lustre_commit_dquot(struct lustre_dquot *dquot);
 int lustre_init_quota_info(struct lustre_quota_info *lqi, int type);
 int lustre_get_qids(struct file *file, struct inode *inode, int type,
-                    struct list_head *list);
+                    cfs_list_t *list);
 int lustre_quota_convert(struct lustre_quota_info *lqi, int type);
 
 typedef int (*dqacq_handler_t) (struct obd_device * obd, struct qunit_data * qd,
@@ -330,7 +330,7 @@ struct lustre_quota_ctxt {
          */
         int           lqc_sync_blk;
         /** guard lqc_imp_valid now */
-        spinlock_t    lqc_lock;
+        cfs_spinlock_t lqc_lock;
         /**
          * when mds isn't connected, threads
          * on osts who send the quota reqs
@@ -341,7 +341,7 @@ struct lustre_quota_ctxt {
         /** lquota statistics */
         struct lprocfs_stats  *lqc_stats;
         /** the number of used hashed lqs */
-        atomic_t      lqc_lqs;
+        cfs_atomic_t  lqc_lqs;
         /** no lqs are in use */
         cfs_waitq_t   lqc_lqs_waitq;
 };
@@ -350,7 +350,7 @@ struct lustre_quota_ctxt {
 #define QUOTA_MASTER_UNREADY(qctxt) (qctxt)->lqc_setup = 0
 
 struct lustre_qunit_size {
-        struct hlist_node lqs_hash; /** the hash entry */
+        cfs_hlist_node_t lqs_hash; /** the hash entry */
         unsigned int lqs_id;        /** id of user/group */
         unsigned long lqs_flags;    /** 31st bit is QB_SET, 30th bit is QI_SET
                                      * other bits are same as LQUOTA_FLAGS_*
@@ -372,10 +372,10 @@ struct lustre_qunit_size {
         long long lqs_ino_rec;
         /** when blocks are allocated/released, this value will record it */
         long long lqs_blk_rec;
-        atomic_t lqs_refcount;
+        cfs_atomic_t lqs_refcount;
         cfs_time_t lqs_last_bshrink;   /** time of last block shrink */
         cfs_time_t lqs_last_ishrink;   /** time of last inode shrink */
-        spinlock_t lqs_lock;
+        cfs_spinlock_t lqs_lock;
         unsigned long long lqs_key;    /** hash key */
         struct lustre_quota_ctxt *lqs_ctxt; /** quota ctxt */
 };
@@ -397,10 +397,10 @@ struct lustre_qunit_size {
 
 static inline void __lqs_getref(struct lustre_qunit_size *lqs)
 {
-        int count = atomic_inc_return(&lqs->lqs_refcount);
+        int count = cfs_atomic_inc_return(&lqs->lqs_refcount);
 
         if (count == 2) /* quota_create_lqs */
-                atomic_inc(&lqs->lqs_ctxt->lqc_lqs);
+                cfs_atomic_inc(&lqs->lqs_ctxt->lqc_lqs);
         CDEBUG(D_INFO, "lqs=%p refcount %d\n", lqs, count);
 }
 
@@ -411,13 +411,13 @@ static inline void lqs_getref(struct lustre_qunit_size *lqs)
 
 static inline void __lqs_putref(struct lustre_qunit_size *lqs)
 {
-        LASSERT(atomic_read(&lqs->lqs_refcount) > 0);
+        LASSERT(cfs_atomic_read(&lqs->lqs_refcount) > 0);
 
-        if (atomic_dec_return(&lqs->lqs_refcount) == 1)
-                if (atomic_dec_and_test(&lqs->lqs_ctxt->lqc_lqs))
+        if (cfs_atomic_dec_return(&lqs->lqs_refcount) == 1)
+                if (cfs_atomic_dec_and_test(&lqs->lqs_ctxt->lqc_lqs))
                         cfs_waitq_signal(&lqs->lqs_ctxt->lqc_lqs_waitq);
         CDEBUG(D_INFO, "lqs=%p refcount %d\n",
-               lqs, atomic_read(&lqs->lqs_refcount));
+               lqs, cfs_atomic_read(&lqs->lqs_refcount));
 }
 
 static inline void lqs_putref(struct lustre_qunit_size *lqs)
@@ -427,7 +427,7 @@ static inline void lqs_putref(struct lustre_qunit_size *lqs)
 
 static inline void lqs_initref(struct lustre_qunit_size *lqs)
 {
-        atomic_set(&lqs->lqs_refcount, 0);
+        cfs_atomic_set(&lqs->lqs_refcount, 0);
 }
 
 #else
@@ -464,7 +464,7 @@ struct quotacheck_thread_args {
         struct obd_device   *qta_obd;   /** obd device */
         struct obd_quotactl  qta_oqctl; /** obd_quotactl args */
         struct super_block  *qta_sb;    /** obd super block */
-        struct semaphore    *qta_sem;   /** obt_quotachecking */
+        cfs_semaphore_t     *qta_sem;   /** obt_quotachecking */
 };
 
 struct obd_trans_info;
index 1866a46..691c0c0 100644 (file)
@@ -375,17 +375,17 @@ struct ptlrpc_ctx_ops {
                                         PTLRPC_CTX_ERROR)
 
 struct ptlrpc_cli_ctx {
-        struct hlist_node       cc_cache;      /* linked into ctx cache */
-        atomic_t                cc_refcount;
+        cfs_hlist_node_t        cc_cache;      /* linked into ctx cache */
+        cfs_atomic_t            cc_refcount;
         struct ptlrpc_sec      *cc_sec;
         struct ptlrpc_ctx_ops  *cc_ops;
         cfs_time_t              cc_expire;     /* in seconds */
         unsigned int            cc_early_expire:1;
         unsigned long           cc_flags;
         struct vfs_cred         cc_vcred;
-        spinlock_t              cc_lock;
-        struct list_head        cc_req_list;   /* waiting reqs linked here */
-        struct list_head        cc_gc_chain;   /* linked to gc chain */
+        cfs_spinlock_t          cc_lock;
+        cfs_list_t              cc_req_list;   /* waiting reqs linked here */
+        cfs_list_t              cc_gc_chain;   /* linked to gc chain */
 };
 
 struct ptlrpc_sec_cops {
@@ -473,7 +473,7 @@ struct ptlrpc_sec_sops {
 };
 
 struct ptlrpc_sec_policy {
-        struct module                  *sp_owner;
+        cfs_module_t                   *sp_owner;
         char                           *sp_name;
         __u16                           sp_policy; /* policy number */
         struct ptlrpc_sec_cops         *sp_cops;   /* client ops */
@@ -488,18 +488,18 @@ struct ptlrpc_sec_policy {
 
 struct ptlrpc_sec {
         struct ptlrpc_sec_policy       *ps_policy;
-        atomic_t                        ps_refcount;
-        atomic_t                        ps_nctx;        /* statistic only */
+        cfs_atomic_t                    ps_refcount;
+        cfs_atomic_t                    ps_nctx;        /* statistic only */
         int                             ps_id;          /* unique identifier */
         struct sptlrpc_flavor           ps_flvr;        /* flavor */
         enum lustre_sec_part            ps_part;
         unsigned int                    ps_dying:1;
         struct obd_import              *ps_import;      /* owning import */
-        spinlock_t                      ps_lock;        /* protect ccache */
+        cfs_spinlock_t                  ps_lock;        /* protect ccache */
         /*
          * garbage collection
          */
-        struct list_head                ps_gc_list;
+        cfs_list_t                      ps_gc_list;
         cfs_time_t                      ps_gc_interval; /* in seconds */
         cfs_time_t                      ps_gc_next;     /* in seconds */
 };
@@ -516,7 +516,7 @@ static inline int sec_is_rootonly(struct ptlrpc_sec *sec)
 
 
 struct ptlrpc_svc_ctx {
-        atomic_t                        sc_refcount;
+        cfs_atomic_t                    sc_refcount;
         struct ptlrpc_sec_policy       *sc_policy;
 };
 
@@ -618,14 +618,14 @@ char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize);
 static inline
 struct ptlrpc_sec_policy *sptlrpc_policy_get(struct ptlrpc_sec_policy *policy)
 {
-        __module_get(policy->sp_owner);
+        __cfs_module_get(policy->sp_owner);
         return policy;
 }
 
 static inline
 void sptlrpc_policy_put(struct ptlrpc_sec_policy *policy)
 {
-        module_put(policy->sp_owner);
+        cfs_module_put(policy->sp_owner);
 }
 
 /*
index 1eca99d..f9c7ded 100644 (file)
@@ -70,15 +70,15 @@ struct md_identity {
         struct upcall_cache_entry *mi_uc_entry;
         uid_t                      mi_uid;
         gid_t                      mi_gid;
-        struct group_info         *mi_ginfo;
+        cfs_group_info_t          *mi_ginfo;
         int                        mi_nperms;
         struct md_perm            *mi_perms;
 };
 
 struct upcall_cache_entry {
-        struct list_head        ue_hash;
+        cfs_list_t              ue_hash;
         __u64                   ue_key;
-        atomic_t                ue_refcount;
+        cfs_atomic_t            ue_refcount;
         int                     ue_flags;
         cfs_waitq_t             ue_waitq;
         cfs_time_t              ue_acquire_expire;
@@ -111,9 +111,9 @@ struct upcall_cache_ops {
 };
 
 struct upcall_cache {
-        struct list_head        uc_hashtable[UC_CACHE_HASH_SIZE];
-        spinlock_t              uc_lock;
-        rwlock_t                uc_upcall_rwlock;
+        cfs_list_t              uc_hashtable[UC_CACHE_HASH_SIZE];
+        cfs_spinlock_t          uc_lock;
+        cfs_rwlock_t            uc_upcall_rwlock;
 
         char                    uc_name[40];            /* for upcall */
         char                    uc_upcall[UC_CACHE_UPCALL_MAXPATH];
index 5afe871..df6d505 100644 (file)
@@ -84,7 +84,7 @@ struct md_ucred {
         __u32               mu_suppgids[2];
         cfs_cap_t           mu_cap;
         __u32               mu_umask;
-        struct group_info  *mu_ginfo;
+        cfs_group_info_t   *mu_ginfo;
         struct md_identity *mu_identity;
 };
 
@@ -344,7 +344,7 @@ struct md_device_operations {
                                int *md_size, int *cookie_size);
 
         int (*mdo_statfs)(const struct lu_env *env, struct md_device *m,
-                          struct kstatfs *sfs);
+                          cfs_kstatfs_t *sfs);
 
         int (*mdo_init_capa_ctxt)(const struct lu_env *env, struct md_device *m,
                                   int mode, unsigned long timeout, __u32 alg,
@@ -447,7 +447,7 @@ enum md_upcall_event {
 struct md_upcall {
         /** this lock protects upcall using against its removal
          * read lock is for usage the upcall, write - for init/fini */
-        struct rw_semaphore     mu_upcall_sem;
+        cfs_rw_semaphore_t      mu_upcall_sem;
         /** device to call, upper layer normally */
         struct md_device       *mu_upcall_dev;
         /** upcall function */
@@ -463,38 +463,38 @@ struct md_device {
 
 static inline void md_upcall_init(struct md_device *m, void *upcl)
 {
-        init_rwsem(&m->md_upcall.mu_upcall_sem);
+        cfs_init_rwsem(&m->md_upcall.mu_upcall_sem);
         m->md_upcall.mu_upcall_dev = NULL;
         m->md_upcall.mu_upcall = upcl;
 }
 
 static inline void md_upcall_dev_set(struct md_device *m, struct md_device *up)
 {
-        down_write(&m->md_upcall.mu_upcall_sem);
+        cfs_down_write(&m->md_upcall.mu_upcall_sem);
         m->md_upcall.mu_upcall_dev = up;
-        up_write(&m->md_upcall.mu_upcall_sem);
+        cfs_up_write(&m->md_upcall.mu_upcall_sem);
 }
 
 static inline void md_upcall_fini(struct md_device *m)
 {
-        down_write(&m->md_upcall.mu_upcall_sem);
+        cfs_down_write(&m->md_upcall.mu_upcall_sem);
         m->md_upcall.mu_upcall_dev = NULL;
         m->md_upcall.mu_upcall = NULL;
-        up_write(&m->md_upcall.mu_upcall_sem);
+        cfs_up_write(&m->md_upcall.mu_upcall_sem);
 }
 
 static inline int md_do_upcall(const struct lu_env *env, struct md_device *m,
                                enum md_upcall_event ev, void *data)
 {
         int rc = 0;
-        down_read(&m->md_upcall.mu_upcall_sem);
+        cfs_down_read(&m->md_upcall.mu_upcall_sem);
         if (m->md_upcall.mu_upcall_dev != NULL &&
             m->md_upcall.mu_upcall_dev->md_upcall.mu_upcall != NULL) {
                 rc = m->md_upcall.mu_upcall_dev->md_upcall.mu_upcall(env,
                                               m->md_upcall.mu_upcall_dev,
                                               ev, data);
         }
-        up_read(&m->md_upcall.mu_upcall_sem);
+        cfs_up_read(&m->md_upcall.mu_upcall_sem);
         return rc;
 }
 
@@ -876,8 +876,8 @@ struct lu_local_obj_desc {
         const char                      *llod_name;
         __u32                            llod_oid;
         int                              llod_is_index;
-        const struct dt_index_features llod_feat;
-        struct list_head                 llod_linkage;
+        const struct dt_index_features  *llod_feat;
+        cfs_list_t                       llod_linkage;
 };
 
 struct md_object *llo_store_resolve(const struct lu_env *env,
index cdee0f3..05db5f8 100644 (file)
@@ -75,9 +75,9 @@
 
 /* this is really local to the OSC */
 struct loi_oap_pages {
-        struct list_head        lop_pending;
-        struct list_head        lop_urgent;
-        struct list_head        lop_pending_group;
+        cfs_list_t              lop_pending;
+        cfs_list_t              lop_urgent;
+        cfs_list_t              lop_pending_group;
         int                     lop_num_pending;
 };
 
@@ -96,10 +96,10 @@ struct lov_oinfo {                 /* per-stripe data structure */
         /* used by the osc to keep track of what objects to build into rpcs */
         struct loi_oap_pages loi_read_lop;
         struct loi_oap_pages loi_write_lop;
-        struct list_head loi_ready_item;
-        struct list_head loi_hp_ready_item;
-        struct list_head loi_write_item;
-        struct list_head loi_read_item;
+        cfs_list_t loi_ready_item;
+        cfs_list_t loi_hp_ready_item;
+        cfs_list_t loi_write_item;
+        cfs_list_t loi_read_item;
 
         unsigned long loi_kms_valid:1;
         __u64 loi_kms;             /* known minimum size */
@@ -128,7 +128,7 @@ static inline void loi_init(struct lov_oinfo *loi)
 }
 
 struct lov_stripe_md {
-        spinlock_t       lsm_lock;
+        cfs_spinlock_t   lsm_lock;
         pid_t            lsm_lock_owner; /* debugging */
 
         struct {
@@ -206,14 +206,14 @@ void lov_stripe_lock(struct lov_stripe_md *md);
 void lov_stripe_unlock(struct lov_stripe_md *md);
 
 struct obd_type {
-        struct list_head typ_chain;
+        cfs_list_t typ_chain;
         struct obd_ops *typ_dt_ops;
         struct md_ops *typ_md_ops;
         cfs_proc_dir_entry_t *typ_procroot;
         char *typ_name;
         int  typ_refcnt;
         struct lu_device_type *typ_lu;
-        spinlock_t obd_type_lock;
+        cfs_spinlock_t obd_type_lock;
 };
 
 struct brw_page {
@@ -235,19 +235,19 @@ struct obd_device_target {
         /** server data in last_rcvd file */
         struct lr_server_data    *obt_lsd;
         /** Lock protecting client bitmap */
-        spinlock_t                obt_client_bitmap_lock;
+        cfs_spinlock_t            obt_client_bitmap_lock;
         /** Bitmap of known clients */
         unsigned long            *obt_client_bitmap;
         /** Server last transaction number */
         __u64                     obt_last_transno;
         /** Lock protecting last transaction number */
-        spinlock_t                obt_translock;
+        cfs_spinlock_t            obt_translock;
         /** Number of mounts */
         __u64                     obt_mount_count;
-        struct semaphore          obt_quotachecking;
+        cfs_semaphore_t           obt_quotachecking;
         struct lustre_quota_ctxt  obt_qctxt;
         lustre_quota_version_t    obt_qfmt;
-        struct rw_semaphore       obt_rwsem;
+        cfs_rw_semaphore_t        obt_rwsem;
         struct vfsmount          *obt_vfsmnt;
         struct file              *obt_health_check_filp;
 };
@@ -294,17 +294,17 @@ struct filter_obd {
         cfs_dentry_t        *fo_dentry_O;
         cfs_dentry_t       **fo_dentry_O_groups;
         struct filter_subdirs   *fo_dentry_O_sub;
-        struct semaphore     fo_init_lock;      /* group initialization lock */
+        cfs_semaphore_t      fo_init_lock;      /* group initialization lock */
         int                  fo_committed_group;
 
 #define CLIENT_QUOTA_DEFAULT_RESENDS 10
 
-        spinlock_t           fo_objidlock;      /* protect fo_lastobjid */
+        cfs_spinlock_t       fo_objidlock;      /* protect fo_lastobjid */
 
         unsigned long        fo_destroys_in_progress;
-        struct semaphore     fo_create_locks[FILTER_SUBDIR_COUNT];
+        cfs_semaphore_t      fo_create_locks[FILTER_SUBDIR_COUNT];
 
-        struct list_head     fo_export_list;
+        cfs_list_t fo_export_list;
         int                  fo_subdir_count;
 
         obd_size             fo_tot_dirty;      /* protected by obd_osfs_lock */
@@ -325,10 +325,10 @@ struct filter_obd {
         __u64               *fo_last_objids; /* last created objid for groups,
                                               * protected by fo_objidlock */
 
-        struct semaphore     fo_alloc_lock;
+        cfs_semaphore_t      fo_alloc_lock;
 
-        atomic_t             fo_r_in_flight;
-        atomic_t             fo_w_in_flight;
+        cfs_atomic_t         fo_r_in_flight;
+        cfs_atomic_t         fo_w_in_flight;
 
         /*
          * per-filter pool of kiobuf's allocated by filter_common_setup() and
@@ -345,25 +345,25 @@ struct filter_obd {
         struct filter_iobuf    **fo_iobuf_pool;
         int                      fo_iobuf_count;
 
-        struct list_head         fo_llog_list;
-        spinlock_t               fo_llog_list_lock;
+        cfs_list_t               fo_llog_list;
+        cfs_spinlock_t           fo_llog_list_lock;
 
         struct brw_stats         fo_filter_stats;
         struct lustre_quota_ctxt fo_quota_ctxt;
-        spinlock_t               fo_quotacheck_lock;
-        atomic_t                 fo_quotachecking;
+        cfs_spinlock_t           fo_quotacheck_lock;
+        cfs_atomic_t             fo_quotachecking;
 
         int                      fo_fmd_max_num; /* per exp filter_mod_data */
         int                      fo_fmd_max_age; /* jiffies to fmd expiry */
 
         /* sptlrpc stuff */
-        rwlock_t                 fo_sptlrpc_lock;
+        cfs_rwlock_t             fo_sptlrpc_lock;
         struct sptlrpc_rule_set  fo_sptlrpc_rset;
 
         /* capability related */
         unsigned int             fo_fl_oss_capa;
-        struct list_head         fo_capa_keys;
-        struct hlist_head       *fo_capa_hash;
+        cfs_list_t               fo_capa_keys;
+        cfs_hlist_head_t        *fo_capa_hash;
         struct llog_commit_master *fo_lcm;
         int                      fo_sec_level;
 };
@@ -380,8 +380,8 @@ struct timeout_item {
         cfs_time_t         ti_timeout;
         timeout_cb_t       ti_cb;
         void              *ti_cb_data;
-        struct list_head   ti_obd_list;
-        struct list_head   ti_chain;
+        cfs_list_t         ti_obd_list;
+        cfs_list_t         ti_chain;
 };
 
 #define OSC_MAX_RIF_DEFAULT       8
@@ -396,7 +396,7 @@ struct timeout_item {
 struct mdc_rpc_lock;
 struct obd_import;
 struct client_obd {
-        struct rw_semaphore      cl_sem;
+        cfs_rw_semaphore_t       cl_sem;
         struct obd_uuid          cl_target_uuid;
         struct obd_import       *cl_import; /* ptlrpc connection state */
         int                      cl_conn_count;
@@ -419,10 +419,10 @@ struct client_obd {
         long                     cl_dirty_transit; /* dirty synchronous */
         long                     cl_avail_grant;   /* bytes of credit for ost */
         long                     cl_lost_grant;    /* lost credits (trunc) */
-        struct list_head         cl_cache_waiters; /* waiting for cache/grant */
+        cfs_list_t               cl_cache_waiters; /* waiting for cache/grant */
         cfs_time_t               cl_next_shrink_grant;   /* jiffies */
-        struct list_head         cl_grant_shrink_list;  /* Timeout event list */
-        struct semaphore         cl_grant_sem;   /*grant shrink list semaphore*/
+        cfs_list_t               cl_grant_shrink_list;  /* Timeout event list */
+        cfs_semaphore_t          cl_grant_sem;   /*grant shrink list cfs_semaphore*/
         int                      cl_grant_shrink_interval; /* seconds */
 
         /* keep track of objects that have lois that contain pages which
@@ -444,10 +444,10 @@ struct client_obd {
          * client_obd_list_lock_{init,done}() functions.
          */
         client_obd_lock_t        cl_loi_list_lock;
-        struct list_head         cl_loi_ready_list;
-        struct list_head         cl_loi_hp_ready_list;
-        struct list_head         cl_loi_write_list;
-        struct list_head         cl_loi_read_list;
+        cfs_list_t               cl_loi_ready_list;
+        cfs_list_t               cl_loi_hp_ready_list;
+        cfs_list_t               cl_loi_write_list;
+        cfs_list_t               cl_loi_read_list;
         int                      cl_r_in_flight;
         int                      cl_w_in_flight;
         /* just a sum of the loi/lop pending numbers to be exported by /proc */
@@ -463,7 +463,7 @@ struct client_obd {
         struct obd_histogram     cl_write_offset_hist;
 
         /* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
-        atomic_t                 cl_destroy_in_flight;
+        cfs_atomic_t             cl_destroy_in_flight;
         cfs_waitq_t              cl_destroy_waitq;
 
         struct mdc_rpc_lock     *cl_rpc_lock;
@@ -472,10 +472,10 @@ struct client_obd {
         struct osc_creator       cl_oscc;
 
         /* mgc datastruct */
-        struct semaphore         cl_mgc_sem;
+        cfs_semaphore_t          cl_mgc_sem;
         struct vfsmount         *cl_mgc_vfsmnt;
         struct dentry           *cl_mgc_configs_dir;
-        atomic_t                 cl_mgc_refcount;
+        cfs_atomic_t             cl_mgc_refcount;
         struct obd_export       *cl_mgc_mgsexp;
 
         /* checksumming for data sent over the network */
@@ -494,8 +494,8 @@ struct client_obd {
         /* sequence manager */
         struct lu_client_seq    *cl_seq;
 
-        atomic_t                 cl_resends; /* resend count */
-        atomic_t                 cl_quota_resends; /* quota related resend count */
+        cfs_atomic_t             cl_resends; /* resend count */
+        cfs_atomic_t             cl_quota_resends; /* quota related resend count */
 };
 #define obd2cli_tgt(obd) ((char *)(obd)->u.cli.cl_target_uuid.uuid)
 
@@ -507,8 +507,8 @@ struct mgs_obd {
         struct super_block              *mgs_sb;
         struct dentry                   *mgs_configs_dir;
         struct dentry                   *mgs_fid_de;
-        struct list_head                 mgs_fs_db_list;
-        struct semaphore                 mgs_sem;
+        cfs_list_t                       mgs_fs_db_list;
+        cfs_semaphore_t                  mgs_sem;
         cfs_proc_dir_entry_t            *mgs_proc_live;
 };
 
@@ -523,7 +523,7 @@ struct mds_obd {
         int                              mds_max_cookiesize;
         __u64                            mds_io_epoch;
         unsigned long                    mds_atime_diff;
-        struct semaphore                 mds_epoch_sem;
+        cfs_semaphore_t                  mds_epoch_sem;
         struct ll_fid                    mds_rootfid;
         cfs_dentry_t                    *mds_pending_dir;
         cfs_dentry_t                    *mds_logs_dir;
@@ -537,7 +537,7 @@ struct mds_obd {
         __u32                            mds_id;
 
         /* mark pages dirty for write. */
-        bitmap_t                        *mds_lov_page_dirty;
+        cfs_bitmap_t                    *mds_lov_page_dirty;
         /* array for store pages with obd_id */
         void                           **mds_lov_page_array;
         /* file for store objid */
@@ -549,8 +549,8 @@ struct mds_obd {
 
 
         struct lustre_quota_info         mds_quota_info;
-        struct semaphore                 mds_qonoff_sem;
-        struct semaphore                 mds_health_sem;
+        cfs_semaphore_t                  mds_qonoff_sem;
+        cfs_semaphore_t                  mds_health_sem;
         unsigned long                    mds_fl_user_xattr:1,
                                          mds_fl_acl:1,
                                          mds_evict_ost_nids:1,
@@ -564,7 +564,7 @@ struct mds_obd {
 
         /* for capability keys update */
         struct lustre_capa_key          *mds_capa_keys;
-        struct rw_semaphore              mds_notify_lock;
+        cfs_rw_semaphore_t               mds_notify_lock;
 };
 
 #define mds_transno_lock         mds_obt.obt_translock
@@ -596,34 +596,34 @@ struct obd_id_info {
 
 struct echo_obd {
         struct obdo          eo_oa;
-        spinlock_t           eo_lock;
+        cfs_spinlock_t       eo_lock;
         __u64                eo_lastino;
         struct lustre_handle eo_nl_lock;
-        atomic_t             eo_prep;
+        cfs_atomic_t         eo_prep;
 };
 
 struct ost_obd {
         struct ptlrpc_service *ost_service;
         struct ptlrpc_service *ost_create_service;
         struct ptlrpc_service *ost_io_service;
-        struct semaphore       ost_health_sem;
+        cfs_semaphore_t        ost_health_sem;
 };
 
 struct echo_client_obd {
         struct obd_export   *ec_exp;   /* the local connection to osc/lov */
-        spinlock_t           ec_lock;
-        struct list_head     ec_objects;
-        struct list_head     ec_locks;
+        cfs_spinlock_t       ec_lock;
+        cfs_list_t           ec_objects;
+        cfs_list_t           ec_locks;
         int                  ec_nstripes;
         __u64                ec_unique;
 };
 
 struct lov_qos_oss {
         struct obd_uuid     lqo_uuid;       /* ptlrpc's c_remote_uuid */
-        struct list_head    lqo_oss_list;   /* link to lov_qos */
+        cfs_list_t          lqo_oss_list;   /* link to lov_qos */
         __u64               lqo_bavail;     /* total bytes avail on OSS */
         __u64               lqo_penalty;    /* current penalty */
-        __u64               lqo_penalty_per_obj; /* penalty decrease every obj*/
+        __u64               lqo_penalty_per_obj;/* penalty decrease every obj*/
         time_t              lqo_used;       /* last used time, seconds */
         __u32               lqo_ost_count;  /* number of osts on this oss */
 };
@@ -639,11 +639,11 @@ struct ltd_qos {
 
 /* Generic subset of OSTs */
 struct ost_pool {
-        __u32              *op_array;        /* array of index of
-                                                lov_obd->lov_tgts */
-        unsigned int        op_count;        /* number of OSTs in the array */
-        unsigned int        op_size;         /* allocated size of lp_array */
-        struct rw_semaphore op_rw_sem;       /* to protect ost_pool use */
+        __u32              *op_array;      /* array of index of
+                                                   lov_obd->lov_tgts */
+        unsigned int        op_count;      /* number of OSTs in the array */
+        unsigned int        op_size;       /* allocated size of lp_array */
+        cfs_rw_semaphore_t  op_rw_sem;     /* to protect ost_pool use */
 };
 
 /* Round-robin allocator data */
@@ -661,8 +661,8 @@ struct lov_statfs_data {
 };
 /* Stripe placement optimization */
 struct lov_qos {
-        struct list_head    lq_oss_list;    /* list of OSSs that targets use */
-        struct rw_semaphore lq_rw_sem;
+        cfs_list_t          lq_oss_list; /* list of OSSs that targets use */
+        cfs_rw_semaphore_t  lq_rw_sem;
         __u32               lq_active_oss_count;
         unsigned int        lq_prio_free;   /* priority for free space */
         unsigned int        lq_threshold_rr;/* priority for rr */
@@ -671,7 +671,8 @@ struct lov_qos {
                             lq_same_space:1,/* the ost's all have approx.
                                                the same space avail */
                             lq_reset:1,     /* zero current penalties */
-                            lq_statfs_in_progress:1; /* statfs op in progress */
+                            lq_statfs_in_progress:1; /* statfs op in
+                                                        progress */
         /* qos statfs data */
         struct lov_statfs_data *lq_statfs_data;
         cfs_waitq_t         lq_statfs_waitq; /* waitqueue to notify statfs
@@ -679,7 +680,7 @@ struct lov_qos {
 };
 
 struct lov_tgt_desc {
-        struct list_head    ltd_kill;
+        cfs_list_t          ltd_kill;
         struct obd_uuid     ltd_uuid;
         struct obd_device  *ltd_obd;
         struct obd_export  *ltd_exp;
@@ -687,7 +688,7 @@ struct lov_tgt_desc {
         __u32               ltd_gen;
         __u32               ltd_index;   /* index in lov_obd->tgts */
         unsigned long       ltd_active:1,/* is this target up for requests */
-                            ltd_activate:1,/* should this target be activated */
+                            ltd_activate:1,/* should  target be activated */
                             ltd_reap:1;  /* should this target be deleted */
 };
 
@@ -701,10 +702,10 @@ struct lov_tgt_desc {
 struct pool_desc {
         char                  pool_name[LOV_MAXPOOLNAME + 1]; /* name of pool */
         struct ost_pool       pool_obds;              /* pool members */
-        atomic_t              pool_refcount;          /* pool ref. counter */
+        cfs_atomic_t          pool_refcount;          /* pool ref. counter */
         struct lov_qos_rr     pool_rr;                /* round robin qos */
-        struct hlist_node     pool_hash;              /* access by poolname */
-        struct list_head      pool_list;              /* serial access */
+        cfs_hlist_node_t      pool_hash;              /* access by poolname */
+        cfs_list_t            pool_list;              /* serial access */
         cfs_proc_dir_entry_t *pool_proc_entry;        /* file in /proc */
         struct lov_obd       *pool_lov;               /* lov obd to which this
                                                          pool belong */
@@ -715,10 +716,10 @@ struct lov_obd {
         struct lov_tgt_desc   **lov_tgts;              /* sparse array */
         struct ost_pool         lov_packed;            /* all OSTs in a packed
                                                           array */
-        struct semaphore        lov_lock;
+        cfs_semaphore_t         lov_lock;
         struct obd_connect_data lov_ocd;
         struct lov_qos          lov_qos;               /* qos info per lov */
-        atomic_t                lov_refcount;
+        cfs_atomic_t            lov_refcount;
         __u32                   lov_tgt_count;         /* how many OBD's */
         __u32                   lov_active_tgt_count;  /* how many active */
         __u32                   lov_death_row;/* tgts scheduled to be deleted */
@@ -726,7 +727,7 @@ struct lov_obd {
         int                     lov_connects;
         int                     lov_pool_count;
         cfs_hash_t             *lov_pools_hash_body; /* used for key access */
-        struct list_head        lov_pool_list; /* used for sequential access */
+        cfs_list_t              lov_pool_list; /* used for sequential access */
         cfs_proc_dir_entry_t   *lov_pool_proc_entry;
         enum lustre_sec_part    lov_sp_me;
 };
@@ -734,9 +735,9 @@ struct lov_obd {
 struct lmv_tgt_desc {
         struct obd_uuid         ltd_uuid;
         struct obd_export      *ltd_exp;
-        int                     ltd_active;   /* is this target up for requests */
+        int                     ltd_active; /* is this target up for requests */
         int                     ltd_idx;
-        struct semaphore        ltd_fid_sem;
+        cfs_semaphore_t         ltd_fid_sem;
 };
 
 enum placement_policy {
@@ -751,7 +752,7 @@ typedef enum placement_policy placement_policy_t;
 struct lmv_obd {
         int                     refcount;
         struct lu_client_fld    lmv_fld;
-        spinlock_t              lmv_lock;
+        cfs_spinlock_t          lmv_lock;
         placement_policy_t      lmv_placement;
         struct lmv_desc         desc;
         struct obd_uuid         cluuid;
@@ -762,7 +763,7 @@ struct lmv_obd {
         int                     max_def_easize;
         int                     max_cookiesize;
         int                     server_timeout;
-        struct semaphore        init_sem;
+        cfs_semaphore_t         init_sem;
 
         struct lmv_tgt_desc     *tgts;
         int                     tgts_size;
@@ -940,8 +941,8 @@ struct obd_notify_upcall {
 struct target_recovery_data {
         svc_handler_t     trd_recovery_handler;
         pid_t             trd_processing_task;
-        struct completion trd_starting;
-        struct completion trd_finishing;
+        cfs_completion_t  trd_starting;
+        cfs_completion_t  trd_finishing;
 };
 
 enum filter_groups {
@@ -1001,14 +1002,14 @@ static inline int obdo_is_mds(struct obdo *oa)
 }
 
 struct obd_llog_group {
-        struct list_head   olg_list;
+        cfs_list_t         olg_list;
         int                olg_group;
         struct llog_ctxt  *olg_ctxts[LLOG_MAX_CTXTS];
         cfs_waitq_t        olg_waitq;
-        spinlock_t         olg_lock;
+        cfs_spinlock_t     olg_lock;
         struct obd_export *olg_exp;
         int                olg_initializing;
-        struct semaphore   olg_cat_processing;
+        cfs_semaphore_t    olg_cat_processing;
 };
 
 /* corresponds to one of the obd's */
@@ -1051,39 +1052,39 @@ struct obd_device {
         cfs_hash_t             *obd_nid_hash;
         /* nid stats body */
         cfs_hash_t             *obd_nid_stats_hash;
-        struct list_head        obd_nid_stats;
-        atomic_t                obd_refcount;
+        cfs_list_t              obd_nid_stats;
+        cfs_atomic_t            obd_refcount;
         cfs_waitq_t             obd_refcount_waitq;
-        struct list_head        obd_exports;
-        struct list_head        obd_unlinked_exports;
-        struct list_head        obd_delayed_exports;
+        cfs_list_t              obd_exports;
+        cfs_list_t              obd_unlinked_exports;
+        cfs_list_t              obd_delayed_exports;
         int                     obd_num_exports;
-        spinlock_t              obd_nid_lock;
+        cfs_spinlock_t          obd_nid_lock;
         struct ldlm_namespace  *obd_namespace;
         struct ptlrpc_client    obd_ldlm_client; /* XXX OST/MDS only */
         /* a spinlock is OK for what we do now, may need a semaphore later */
-        spinlock_t              obd_dev_lock;
-        struct semaphore        obd_dev_sem;
+        cfs_spinlock_t          obd_dev_lock;
+        cfs_semaphore_t         obd_dev_sem;
         __u64                   obd_last_committed;
         struct fsfilt_operations *obd_fsops;
-        spinlock_t              obd_osfs_lock;
+        cfs_spinlock_t          obd_osfs_lock;
         struct obd_statfs       obd_osfs;       /* locked by obd_osfs_lock */
         __u64                   obd_osfs_age;
         struct lvfs_run_ctxt    obd_lvfs_ctxt;
         struct obd_llog_group   obd_olg; /* default llog group */
-        struct obd_device       *obd_observer;
-        struct rw_semaphore     obd_observer_link_sem;
+        struct obd_device      *obd_observer;
+        cfs_rw_semaphore_t      obd_observer_link_sem;
         struct obd_notify_upcall obd_upcall;
         struct obd_export       *obd_self_export;
         /* list of exports in LRU order, for ping evictor, with obd_dev_lock */
-        struct list_head        obd_exports_timed;
+        cfs_list_t              obd_exports_timed;
         time_t                  obd_eviction_timer; /* for ping evictor */
 
         int                              obd_max_recoverable_clients;
         int                              obd_connected_clients;
         int                              obd_stale_clients;
         int                              obd_delayed_clients;
-        spinlock_t                       obd_processing_task_lock; /* BH lock (timer) */
+        cfs_spinlock_t                   obd_processing_task_lock; /* BH lock (timer) */
         __u64                            obd_next_recovery_transno;
         int                              obd_replayed_requests;
         int                              obd_requests_queued_for_recovery;
@@ -1097,11 +1098,11 @@ struct obd_device {
         /* new recovery stuff from CMD2 */
         struct target_recovery_data      obd_recovery_data;
         int                              obd_replayed_locks;
-        atomic_t                         obd_req_replay_clients;
-        atomic_t                         obd_lock_replay_clients;
-        struct list_head                 obd_req_replay_queue;
-        struct list_head                 obd_lock_replay_queue;
-        struct list_head                 obd_final_req_queue;
+        cfs_atomic_t                     obd_req_replay_clients;
+        cfs_atomic_t                     obd_lock_replay_clients;
+        cfs_list_t                       obd_req_replay_queue;
+        cfs_list_t                       obd_lock_replay_queue;
+        cfs_list_t                       obd_final_req_queue;
         int                              obd_recovery_stage;
 
         union {
@@ -1127,14 +1128,14 @@ struct obd_device {
         cfs_proc_dir_entry_t  *obd_proc_exports_entry;
         cfs_proc_dir_entry_t  *obd_svc_procroot;
         struct lprocfs_stats  *obd_svc_stats;
-        atomic_t               obd_evict_inprogress;
+        cfs_atomic_t           obd_evict_inprogress;
         cfs_waitq_t            obd_evict_inprogress_waitq;
-        struct list_head       obd_evict_list; /* protected with pet_lock */
+        cfs_list_t             obd_evict_list; /* protected with pet_lock */
 
         /**
          * Ldlm pool part. Save last calculated SLV and Limit.
          */
-        rwlock_t               obd_pool_lock;
+        cfs_rwlock_t           obd_pool_lock;
         int                    obd_pool_limit;
         __u64                  obd_pool_slv;
 
@@ -1263,7 +1264,7 @@ struct md_enqueue_info {
 };
 
 struct obd_ops {
-        struct module *o_owner;
+        cfs_module_t *o_owner;
         int (*o_iocontrol)(unsigned int cmd, struct obd_export *exp, int len,
                            void *karg, void *uarg);
         int (*o_get_info)(struct obd_export *, __u32 keylen, void *key,
@@ -1467,7 +1468,7 @@ struct md_open_data {
         struct obd_client_handle *mod_och;
         struct ptlrpc_request    *mod_open_req;
         struct ptlrpc_request    *mod_close_req;
-        atomic_t                  mod_refcount;
+        cfs_atomic_t              mod_refcount;
 };
 
 struct lookup_intent;
@@ -1654,14 +1655,14 @@ static inline struct md_open_data *obd_mod_alloc(void)
         OBD_ALLOC_PTR(mod);
         if (mod == NULL)
                 return NULL;
-        atomic_set(&mod->mod_refcount, 1);
+        cfs_atomic_set(&mod->mod_refcount, 1);
         return mod;
 }
 
-#define obd_mod_get(mod) atomic_inc(&(mod)->mod_refcount)
+#define obd_mod_get(mod) cfs_atomic_inc(&(mod)->mod_refcount)
 #define obd_mod_put(mod)                                        \
 ({                                                              \
-        if (atomic_dec_and_test(&(mod)->mod_refcount)) {          \
+        if (cfs_atomic_dec_and_test(&(mod)->mod_refcount)) {          \
                 if ((mod)->mod_open_req)                          \
                         ptlrpc_req_finished((mod)->mod_open_req);   \
                 OBD_FREE_PTR(mod);                              \
index d280454..4dc266f 100644 (file)
@@ -60,7 +60,7 @@
 
 /* OBD Device Declarations */
 extern struct obd_device *obd_devs[MAX_OBD_DEVICES];
-extern spinlock_t obd_dev_lock;
+extern cfs_spinlock_t obd_dev_lock;
 
 /* OBD Operations Declarations */
 extern struct obd_device *class_conn2obd(struct lustre_handle *);
@@ -149,20 +149,21 @@ int class_config_dump_llog(struct llog_ctxt *ctxt, char *name,
 
 /* list of active configuration logs  */
 struct config_llog_data {
-        char               *cld_logname;
-        struct ldlm_res_id  cld_resid;
+        char                       *cld_logname;
+        struct ldlm_res_id          cld_resid;
         struct config_llog_instance cld_cfg;
-        struct list_head    cld_list_chain;
-        atomic_t            cld_refcount;
-        struct config_llog_data *cld_sptlrpc;/* depended sptlrpc log */
-        struct obd_export  *cld_mgcexp;
-        unsigned int        cld_stopping:1, /* we were told to stop watching */
-                            cld_lostlock:1, /* lock not requeued */
-                            cld_is_sptlrpc:1;
+        cfs_list_t                  cld_list_chain;
+        cfs_atomic_t                cld_refcount;
+        struct config_llog_data    *cld_sptlrpc;/* depended sptlrpc log */
+        struct obd_export          *cld_mgcexp;
+        unsigned int                cld_stopping:1, /* we were told to stop
+                                                     * watching */
+                                    cld_lostlock:1, /* lock not requeued */
+                                    cld_is_sptlrpc:1;
 };
 
 struct lustre_profile {
-        struct list_head lp_list;
+        cfs_list_t       lp_list;
         char            *lp_profile;
         char            *lp_dt;
         char            *lp_md;
@@ -187,54 +188,54 @@ extern void (*class_export_dump_hook)(struct obd_export *);
 
 #define class_export_rpc_get(exp)                                       \
 ({                                                                      \
-        atomic_inc(&(exp)->exp_rpc_count);                              \
+        cfs_atomic_inc(&(exp)->exp_rpc_count);                          \
         CDEBUG(D_INFO, "RPC GETting export %p : new rpc_count %d\n",    \
-               (exp), atomic_read(&(exp)->exp_rpc_count));              \
+               (exp), cfs_atomic_read(&(exp)->exp_rpc_count));          \
         class_export_get(exp);                                          \
 })
 
 #define class_export_rpc_put(exp)                                       \
 ({                                                                      \
-        LASSERT(atomic_read(&exp->exp_rpc_count) > 0);                  \
-        atomic_dec(&(exp)->exp_rpc_count);                              \
+        LASSERT(cfs_atomic_read(&exp->exp_rpc_count) > 0);              \
+        cfs_atomic_dec(&(exp)->exp_rpc_count);                          \
         CDEBUG(D_INFO, "RPC PUTting export %p : new rpc_count %d\n",    \
-               (exp), atomic_read(&(exp)->exp_rpc_count));              \
+               (exp), cfs_atomic_read(&(exp)->exp_rpc_count));          \
         class_export_put(exp);                                          \
 })
 
 #define class_export_lock_get(exp, lock)                                \
 ({                                                                      \
-        atomic_inc(&(exp)->exp_locks_count);                            \
+        cfs_atomic_inc(&(exp)->exp_locks_count);                        \
         __class_export_add_lock_ref(exp, lock);                         \
         CDEBUG(D_INFO, "lock GETting export %p : new locks_count %d\n", \
-               (exp), atomic_read(&(exp)->exp_locks_count));            \
+               (exp), cfs_atomic_read(&(exp)->exp_locks_count));        \
         class_export_get(exp);                                          \
 })
 
 #define class_export_lock_put(exp, lock)                                \
 ({                                                                      \
-        LASSERT(atomic_read(&exp->exp_locks_count) > 0);                \
-        atomic_dec(&(exp)->exp_locks_count);                            \
+        LASSERT(cfs_atomic_read(&exp->exp_locks_count) > 0);            \
+        cfs_atomic_dec(&(exp)->exp_locks_count);                        \
         __class_export_del_lock_ref(exp, lock);                         \
         CDEBUG(D_INFO, "lock PUTting export %p : new locks_count %d\n", \
-               (exp), atomic_read(&(exp)->exp_locks_count));            \
+               (exp), cfs_atomic_read(&(exp)->exp_locks_count));        \
         class_export_put(exp);                                          \
 })
 
 #define class_export_cb_get(exp)                                        \
 ({                                                                      \
-        atomic_inc(&(exp)->exp_cb_count);                               \
+        cfs_atomic_inc(&(exp)->exp_cb_count);                           \
         CDEBUG(D_INFO, "callback GETting export %p : new cb_count %d\n",\
-               (exp), atomic_read(&(exp)->exp_cb_count));               \
+               (exp), cfs_atomic_read(&(exp)->exp_cb_count));           \
         class_export_get(exp);                                          \
 })
 
 #define class_export_cb_put(exp)                                        \
 ({                                                                      \
-        LASSERT(atomic_read(&exp->exp_cb_count) > 0);                   \
-        atomic_dec(&(exp)->exp_cb_count);                               \
+        LASSERT(cfs_atomic_read(&exp->exp_cb_count) > 0);               \
+        cfs_atomic_dec(&(exp)->exp_cb_count);                           \
         CDEBUG(D_INFO, "callback PUTting export %p : new cb_count %d\n",\
-               (exp), atomic_read(&(exp)->exp_cb_count));               \
+               (exp), cfs_atomic_read(&(exp)->exp_cb_count));           \
         class_export_put(exp);                                          \
 })
 
@@ -1116,10 +1117,6 @@ obd_lvfs_open_llog(struct obd_export *exp, __u64 id_ino, struct dentry *dentry)
         return 0;
 }
 
-#ifndef time_before
-#define time_before(t1, t2) ((long)t2 - (long)t1 > 0)
-#endif
-
 /* @max_age is the oldest time in jiffies that we accept using a cached data.
  * If the cache is older than @max_age we will get a new value from the
  * target.  Use a value of "cfs_time_current() + HZ" to guarantee freshness. */
@@ -1147,9 +1144,9 @@ static inline int obd_statfs_async(struct obd_device *obd,
                        obd->obd_name, &obd->obd_osfs,
                        obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
                        obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
-                spin_lock(&obd->obd_osfs_lock);
+                cfs_spin_lock(&obd->obd_osfs_lock);
                 memcpy(oinfo->oi_osfs, &obd->obd_osfs, sizeof(*oinfo->oi_osfs));
-                spin_unlock(&obd->obd_osfs_lock);
+                cfs_spin_unlock(&obd->obd_osfs_lock);
                 oinfo->oi_flags |= OBD_STATFS_FROM_CACHE;
                 if (oinfo->oi_cb_up)
                         oinfo->oi_cb_up(oinfo, 0);
@@ -1199,10 +1196,10 @@ static inline int obd_statfs(struct obd_device *obd, struct obd_statfs *osfs,
         if (cfs_time_before_64(obd->obd_osfs_age, max_age)) {
                 rc = OBP(obd, statfs)(obd, osfs, max_age, flags);
                 if (rc == 0) {
-                        spin_lock(&obd->obd_osfs_lock);
+                        cfs_spin_lock(&obd->obd_osfs_lock);
                         memcpy(&obd->obd_osfs, osfs, sizeof(obd->obd_osfs));
                         obd->obd_osfs_age = cfs_time_current_64();
-                        spin_unlock(&obd->obd_osfs_lock);
+                        cfs_spin_unlock(&obd->obd_osfs_lock);
                 }
         } else {
                 CDEBUG(D_SUPER,"%s: use %p cache blocks "LPU64"/"LPU64
@@ -1210,9 +1207,9 @@ static inline int obd_statfs(struct obd_device *obd, struct obd_statfs *osfs,
                        obd->obd_name, &obd->obd_osfs,
                        obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
                        obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
-                spin_lock(&obd->obd_osfs_lock);
+                cfs_spin_lock(&obd->obd_osfs_lock);
                 memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
-                spin_unlock(&obd->obd_osfs_lock);
+                cfs_spin_unlock(&obd->obd_osfs_lock);
         }
         RETURN(rc);
 }
@@ -1595,7 +1592,7 @@ static inline int obd_quota_adjust_qunit(struct obd_export *exp,
 
 #if defined(LPROCFS) && defined(HAVE_QUOTA_SUPPORT)
         if (qctxt)
-                do_gettimeofday(&work_start);
+                cfs_gettimeofday(&work_start);
 #endif
         EXP_CHECK_DT_OP(exp, quota_adjust_qunit);
         EXP_COUNTER_INCREMENT(exp, quota_adjust_qunit);
@@ -1604,7 +1601,7 @@ static inline int obd_quota_adjust_qunit(struct obd_export *exp,
 
 #if defined(LPROCFS) && defined(HAVE_QUOTA_SUPPORT)
         if (qctxt) {
-                do_gettimeofday(&work_end);
+                cfs_gettimeofday(&work_end);
                 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
                 lprocfs_counter_add(qctxt->lqc_stats, LQUOTA_ADJUST_QUNIT,
                                     timediff);
@@ -1644,13 +1641,13 @@ static inline int obd_register_observer(struct obd_device *obd,
 {
         ENTRY;
         OBD_CHECK_DEV(obd);
-        down_write(&obd->obd_observer_link_sem);
+        cfs_down_write(&obd->obd_observer_link_sem);
         if (obd->obd_observer && observer) {
-                up_write(&obd->obd_observer_link_sem);
+                cfs_up_write(&obd->obd_observer_link_sem);
                 RETURN(-EALREADY);
         }
         obd->obd_observer = observer;
-        up_write(&obd->obd_observer_link_sem);
+        cfs_up_write(&obd->obd_observer_link_sem);
         RETURN(0);
 }
 
@@ -1658,10 +1655,10 @@ static inline int obd_pin_observer(struct obd_device *obd,
                                    struct obd_device **observer)
 {
         ENTRY;
-        down_read(&obd->obd_observer_link_sem);
+        cfs_down_read(&obd->obd_observer_link_sem);
         if (!obd->obd_observer) {
                 *observer = NULL;
-                up_read(&obd->obd_observer_link_sem);
+                cfs_up_read(&obd->obd_observer_link_sem);
                 RETURN(-ENOENT);
         }
         *observer = obd->obd_observer;
@@ -1671,7 +1668,7 @@ static inline int obd_pin_observer(struct obd_device *obd,
 static inline int obd_unpin_observer(struct obd_device *obd)
 {
         ENTRY;
-        up_read(&obd->obd_observer_link_sem);
+        cfs_up_read(&obd->obd_observer_link_sem);
         RETURN(0);
 }
 
index 81a3209..05bd9c6 100644 (file)
@@ -53,7 +53,7 @@ struct osc_brw_async_args {
         int                aa_resends;
         struct brw_page  **aa_ppga;
         struct client_obd *aa_cli;
-        struct list_head   aa_oaps;
+        cfs_list_t         aa_oaps;
         struct obd_capa   *aa_ocapa;
         struct cl_req     *aa_clerq;
 };
index 4661626..0d90654 100644 (file)
@@ -81,8 +81,8 @@ extern int at_early_margin;
 extern int at_extra;
 extern unsigned int obd_sync_filter;
 extern unsigned int obd_max_dirty_pages;
-extern atomic_t obd_dirty_pages;
-extern atomic_t obd_dirty_transit_pages;
+extern cfs_atomic_t obd_dirty_pages;
+extern cfs_atomic_t obd_dirty_transit_pages;
 extern cfs_waitq_t obd_race_waitq;
 extern int obd_race_state;
 extern unsigned int obd_alloc_fail_rate;
@@ -504,7 +504,7 @@ static inline void obd_race(__u32 id)
                 } else {
                         CERROR("obd_fail_race id %x waking\n", id);
                         obd_race_state = 1;
-                        wake_up(&obd_race_waitq);
+                        cfs_waitq_signal(&obd_race_waitq);
                 }
         }
 }
@@ -516,7 +516,7 @@ static inline void obd_race(__u32 id)
 
 #define fixme() CDEBUG(D_OTHER, "FIXME\n");
 
-extern atomic_t libcfs_kmemory;
+extern cfs_atomic_t libcfs_kmemory;
 
 #ifdef LPROCFS
 #define obd_memory_add(size)                                                  \
@@ -665,7 +665,7 @@ do {                                                                          \
                 CERROR("vmalloc of '" #ptr "' (%d bytes) failed\n",           \
                        (int)(size));                                          \
                 CERROR(LPU64" total bytes allocated by Lustre, %d by LNET\n", \
-                       obd_memory_sum(), atomic_read(&libcfs_kmemory));       \
+                       obd_memory_sum(), cfs_atomic_read(&libcfs_kmemory));   \
         } else {                                                              \
                 memset(ptr, 0, size);                                         \
                 OBD_ALLOC_POST(ptr, size, "vmalloced");                       \
@@ -747,7 +747,7 @@ do {                                                                          \
 })
 #define OBD_SLAB_ALLOC(ptr, slab, type, size)                                 \
 do {                                                                          \
-        LASSERT(ergo(type != CFS_ALLOC_ATOMIC, !in_interrupt()));             \
+        LASSERT(ergo(type != CFS_ALLOC_ATOMIC, !cfs_in_interrupt()));         \
         (ptr) = cfs_mem_cache_alloc(slab, (type));                            \
         if (likely((ptr) != NULL &&                                           \
                    (!HAS_FAIL_ALLOC_FLAG || obd_alloc_fail_rate == 0 ||       \
@@ -792,7 +792,7 @@ do {                                                                          \
                        obd_memory_sum(),                                      \
                        obd_pages_sum() << CFS_PAGE_SHIFT,                     \
                        obd_pages_sum(),                                       \
-                       atomic_read(&libcfs_kmemory));                         \
+                       cfs_atomic_read(&libcfs_kmemory));                     \
         } else {                                                              \
                 obd_pages_add(order);                                         \
                 CDEBUG(D_MALLOC, "alloc_pages '" #ptr "': %d page(s) / "      \
index 28e5ba9..22d63e3 100644 (file)
@@ -300,7 +300,7 @@ static struct lu_env *ccc_inode_fini_env = NULL;
  * A mutex serializing calls to slp_inode_fini() under extreme memory
  * pressure, when environments cannot be allocated.
  */
-static DEFINE_MUTEX(ccc_inode_fini_guard);
+static CFS_DEFINE_MUTEX(ccc_inode_fini_guard);
 static int dummy_refcheck;
 
 int ccc_global_init(struct lu_device_type *device_type)
@@ -1249,7 +1249,7 @@ void cl_inode_fini(struct inode *inode)
                 env = cl_env_get(&refcheck);
                 emergency = IS_ERR(env);
                 if (emergency) {
-                        mutex_lock(&ccc_inode_fini_guard);
+                        cfs_mutex_lock(&ccc_inode_fini_guard);
                         LASSERT(ccc_inode_fini_env != NULL);
                         cl_env_implant(ccc_inode_fini_env, &refcheck);
                         env = ccc_inode_fini_env;
@@ -1265,7 +1265,7 @@ void cl_inode_fini(struct inode *inode)
                 lli->lli_clob = NULL;
                 if (emergency) {
                         cl_env_unplant(ccc_inode_fini_env, &refcheck);
-                        mutex_unlock(&ccc_inode_fini_guard);
+                        cfs_mutex_unlock(&ccc_inode_fini_guard);
                 } else
                         cl_env_put(env, &refcheck);
                 cl_env_reexit(cookie);
index ee4fef3..777f082 100644 (file)
@@ -102,13 +102,13 @@ int cl_ocd_update(struct obd_device *host,
                 flags = cli->cl_import->imp_connect_data.ocd_connect_flags;
                 CDEBUG(D_SUPER, "Changing connect_flags: "LPX64" -> "LPX64"\n",
                        lco->lco_flags, flags);
-                mutex_down(&lco->lco_lock);
+                cfs_mutex_down(&lco->lco_lock);
                 lco->lco_flags &= flags;
                 /* for each osc event update ea size */
                 if (lco->lco_dt_exp)
                         cl_init_ea_size(lco->lco_md_exp, lco->lco_dt_exp);
 
-                mutex_up(&lco->lco_lock);
+                cfs_mutex_up(&lco->lco_lock);
                 result = 0;
         } else {
                 CERROR("unexpected notification from %s %s!\n",
index cb08f5b..88a56fa 100644 (file)
@@ -54,12 +54,12 @@ struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock)
 {
         struct ldlm_resource *res = NULL;
 
-        spin_lock(&lock->l_lock);
+        cfs_spin_lock(&lock->l_lock);
         res = lock->l_resource;
 
         if (ns_is_server(res->lr_namespace))
                 /* on server-side resource of lock doesn't change */
-                spin_unlock(&lock->l_lock);
+                cfs_spin_unlock(&lock->l_lock);
 
         lock_res(res);
         return res;
@@ -76,5 +76,5 @@ void unlock_res_and_lock(struct ldlm_lock *lock)
         }
 
         unlock_res(res);
-        spin_unlock(&lock->l_lock);
+        cfs_spin_unlock(&lock->l_lock);
 }
index 03172d6..780296d 100644 (file)
@@ -164,7 +164,7 @@ static void
 ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
                                     struct ldlm_extent *new_ex)
 {
-        struct list_head *tmp;
+        cfs_list_t *tmp;
         struct ldlm_resource *res = req->l_resource;
         ldlm_mode_t req_mode = req->l_req_mode;
         __u64 req_start = req->l_req_extent.start;
@@ -175,11 +175,11 @@ ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
         lockmode_verify(req_mode);
 
         /* for waiting locks */
-        list_for_each(tmp, &res->lr_waiting) {
+        cfs_list_for_each(tmp, &res->lr_waiting) {
                 struct ldlm_lock *lock;
                 struct ldlm_extent *l_extent;
 
-                lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+                lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
                 l_extent = &lock->l_policy_data.l_extent;
 
                 /* We already hit the minimum requested size, search no more */
@@ -296,7 +296,7 @@ static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
 }
 
 struct ldlm_extent_compat_args {
-        struct list_head *work_list;
+        cfs_list_t *work_list;
         struct ldlm_lock *lock;
         ldlm_mode_t mode;
         int *locks;
@@ -309,15 +309,15 @@ static enum interval_iter ldlm_extent_compat_cb(struct interval_node *n,
         struct ldlm_extent_compat_args *priv = data;
         struct ldlm_interval *node = to_ldlm_interval(n);
         struct ldlm_extent *extent;
-        struct list_head *work_list = priv->work_list;
+        cfs_list_t *work_list = priv->work_list;
         struct ldlm_lock *lock, *enq = priv->lock;
         ldlm_mode_t mode = priv->mode;
         int count = 0;
         ENTRY;
 
-        LASSERT(!list_empty(&node->li_group));
+        LASSERT(!cfs_list_empty(&node->li_group));
 
-        list_for_each_entry(lock, &node->li_group, l_sl_policy) {
+        cfs_list_for_each_entry(lock, &node->li_group, l_sl_policy) {
                 /* interval tree is for granted lock */
                 LASSERTF(mode == lock->l_granted_mode,
                          "mode = %s, lock->l_granted_mode = %s\n",
@@ -350,11 +350,11 @@ static enum interval_iter ldlm_extent_compat_cb(struct interval_node *n,
  * negative error, such as EWOULDBLOCK for group locks
  */
 static int
-ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
+ldlm_extent_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
                          int *flags, ldlm_error_t *err,
-                         struct list_head *work_list, int *contended_locks)
+                         cfs_list_t *work_list, int *contended_locks)
 {
-        struct list_head *tmp;
+        cfs_list_t *tmp;
         struct ldlm_lock *lock;
         struct ldlm_resource *res = req->l_resource;
         ldlm_mode_t req_mode = req->l_req_mode;
@@ -425,15 +425,16 @@ ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                         } else {
                                 interval_search(tree->lit_root, &ex,
                                                 ldlm_extent_compat_cb, &data);
-                                if (!list_empty(work_list) && compat)
+                                if (!cfs_list_empty(work_list) && compat)
                                         compat = 0;
                         }
                 }
         } else { /* for waiting queue */
-                list_for_each(tmp, queue) {
+                cfs_list_for_each(tmp, queue) {
                         check_contention = 1;
 
-                        lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+                        lock = cfs_list_entry(tmp, struct ldlm_lock,
+                                              l_res_link);
 
                         if (req == lock)
                                 break;
@@ -450,7 +451,7 @@ ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                                          * front of first non-GROUP lock */
 
                                         ldlm_resource_insert_lock_after(lock, req);
-                                        list_del_init(&lock->l_res_link);
+                                        cfs_list_del_init(&lock->l_res_link);
                                         ldlm_resource_insert_lock_after(req, lock);
                                         compat = 0;
                                         break;
@@ -544,7 +545,7 @@ ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                                            first non-GROUP lock */
 
                                         ldlm_resource_insert_lock_after(lock, req);
-                                        list_del_init(&lock->l_res_link);
+                                        cfs_list_del_init(&lock->l_res_link);
                                         ldlm_resource_insert_lock_after(req, lock);
                                         break;
                                 }
@@ -604,22 +605,22 @@ ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
 
         RETURN(compat);
 destroylock:
-        list_del_init(&req->l_res_link);
+        cfs_list_del_init(&req->l_res_link);
         ldlm_lock_destroy_nolock(req);
         *err = compat;
         RETURN(compat);
 }
 
-static void discard_bl_list(struct list_head *bl_list)
+static void discard_bl_list(cfs_list_t *bl_list)
 {
-        struct list_head *tmp, *pos;
+        cfs_list_t *tmp, *pos;
         ENTRY;
 
-        list_for_each_safe(pos, tmp, bl_list) {
+        cfs_list_for_each_safe(pos, tmp, bl_list) {
                 struct ldlm_lock *lock =
-                        list_entry(pos, struct ldlm_lock, l_bl_ast);
+                        cfs_list_entry(pos, struct ldlm_lock, l_bl_ast);
 
-                list_del_init(&lock->l_bl_ast);
+                cfs_list_del_init(&lock->l_bl_ast);
                 LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
                 lock->l_flags &= ~LDLM_FL_AST_SENT;
                 LASSERT(lock->l_bl_ast_run == 0);
@@ -639,7 +640,7 @@ static void discard_bl_list(struct list_head *bl_list)
   *   - blocking ASTs have not been sent
   *   - must call this function with the ns lock held once */
 int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
-                             ldlm_error_t *err, struct list_head *work_list)
+                             ldlm_error_t *err, cfs_list_t *work_list)
 {
         struct ldlm_resource *res = lock->l_resource;
         CFS_LIST_HEAD(rpc_list);
@@ -647,7 +648,7 @@ int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
         int contended_locks = 0;
         ENTRY;
 
-        LASSERT(list_empty(&res->lr_converting));
+        LASSERT(cfs_list_empty(&res->lr_converting));
         LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
                 !(lock->l_flags & LDLM_AST_DISCARD_DATA));
         check_res_locked(res);
@@ -704,7 +705,7 @@ int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
                  * bug 2322: we used to unlink and re-add here, which was a
                  * terrible folly -- if we goto restart, we could get
                  * re-ordered!  Causes deadlock, because ASTs aren't sent! */
-                if (list_empty(&lock->l_res_link))
+                if (cfs_list_empty(&lock->l_res_link))
                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
                 unlock_res(res);
                 rc = ldlm_run_ast_work(&rpc_list, LDLM_WORK_BL_AST);
@@ -750,7 +751,7 @@ int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
         }
         RETURN(0);
 out:
-        if (!list_empty(&rpc_list)) {
+        if (!cfs_list_empty(&rpc_list)) {
                 LASSERT(!(lock->l_flags & LDLM_AST_DISCARD_DATA));
                 discard_bl_list(&rpc_list);
         }
@@ -765,7 +766,7 @@ out:
 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
 {
         struct ldlm_resource *res = lock->l_resource;
-        struct list_head *tmp;
+        cfs_list_t *tmp;
         struct ldlm_lock *lck;
         __u64 kms = 0;
         ENTRY;
@@ -775,8 +776,8 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
          * calculation of the kms */
         lock->l_flags |= LDLM_FL_KMS_IGNORE;
 
-        list_for_each(tmp, &res->lr_granted) {
-                lck = list_entry(tmp, struct ldlm_lock, l_res_link);
+        cfs_list_for_each(tmp, &res->lr_granted) {
+                lck = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
 
                 if (lck->l_flags & LDLM_FL_KMS_IGNORE)
                         continue;
@@ -813,7 +814,7 @@ struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
 void ldlm_interval_free(struct ldlm_interval *node)
 {
         if (node) {
-                LASSERT(list_empty(&node->li_group));
+                LASSERT(cfs_list_empty(&node->li_group));
                 LASSERT(!interval_is_intree(&node->li_node));
                 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
         }
@@ -826,7 +827,7 @@ void ldlm_interval_attach(struct ldlm_interval *n,
         LASSERT(l->l_tree_node == NULL);
         LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
 
-        list_add_tail(&l->l_sl_policy, &n->li_group);
+        cfs_list_add_tail(&l->l_sl_policy, &n->li_group);
         l->l_tree_node = n;
 }
 
@@ -837,11 +838,11 @@ struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
         if (n == NULL)
                 return NULL;
 
-        LASSERT(!list_empty(&n->li_group));
+        LASSERT(!cfs_list_empty(&n->li_group));
         l->l_tree_node = NULL;
-        list_del_init(&l->l_sl_policy);
+        cfs_list_del_init(&l->l_sl_policy);
 
-        return (list_empty(&n->li_group) ? n : NULL);
+        return (cfs_list_empty(&n->li_group) ? n : NULL);
 }
 
 static inline int lock_mode_to_index(ldlm_mode_t mode)
index a983b89..752768f 100644 (file)
@@ -63,7 +63,7 @@ static CFS_LIST_HEAD(ldlm_flock_waitq);
 /**
  * Lock protecting access to ldlm_flock_waitq.
  */
-spinlock_t ldlm_flock_waitq_lock = SPIN_LOCK_UNLOCKED;
+cfs_spinlock_t ldlm_flock_waitq_lock = CFS_SPIN_LOCK_UNLOCKED;
 
 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
                             void *data, int flag);
@@ -105,9 +105,9 @@ ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, int flags)
                    mode, flags);
 
         /* Safe to not lock here, since it should be empty anyway */
-        LASSERT(list_empty(&lock->l_flock_waitq));
+        LASSERT(cfs_list_empty(&lock->l_flock_waitq));
 
-        list_del_init(&lock->l_res_link);
+        cfs_list_del_init(&lock->l_res_link);
         if (flags == LDLM_FL_WAIT_NOREPROC &&
             !(lock->l_flags & LDLM_FL_FAILED)) {
                 /* client side - set a flag to prevent sending a CANCEL */
@@ -131,9 +131,9 @@ ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *blocking_lock)
         pid_t blocking_pid = blocking_lock->l_policy_data.l_flock.pid;
         struct ldlm_lock *lock;
 
-        spin_lock(&ldlm_flock_waitq_lock);
+        cfs_spin_lock(&ldlm_flock_waitq_lock);
 restart:
-        list_for_each_entry(lock, &ldlm_flock_waitq, l_flock_waitq) {
+        cfs_list_for_each_entry(lock, &ldlm_flock_waitq, l_flock_waitq) {
                 if ((lock->l_policy_data.l_flock.pid != blocking_pid) ||
                     (lock->l_export != blocking_export))
                         continue;
@@ -142,25 +142,25 @@ restart:
                 blocking_export = (struct obd_export *)(long)
                         lock->l_policy_data.l_flock.blocking_export;
                 if (blocking_pid == req_pid && blocking_export == req_export) {
-                        spin_unlock(&ldlm_flock_waitq_lock);
+                        cfs_spin_unlock(&ldlm_flock_waitq_lock);
                         return 1;
                 }
 
                 goto restart;
         }
-        spin_unlock(&ldlm_flock_waitq_lock);
+        cfs_spin_unlock(&ldlm_flock_waitq_lock);
 
         return 0;
 }
 
 int
 ldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq,
-                        ldlm_error_t *err, struct list_head *work_list)
+                        ldlm_error_t *err, cfs_list_t *work_list)
 {
         struct ldlm_resource *res = req->l_resource;
         struct ldlm_namespace *ns = res->lr_namespace;
-        struct list_head *tmp;
-        struct list_head *ownlocks = NULL;
+        cfs_list_t *tmp;
+        cfs_list_t *ownlocks = NULL;
         struct ldlm_lock *lock = NULL;
         struct ldlm_lock *new = req;
         struct ldlm_lock *new2 = NULL;
@@ -192,8 +192,9 @@ reprocess:
         if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
                 /* This loop determines where this processes locks start
                  * in the resource lr_granted list. */
-                list_for_each(tmp, &res->lr_granted) {
-                        lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+                cfs_list_for_each(tmp, &res->lr_granted) {
+                        lock = cfs_list_entry(tmp, struct ldlm_lock,
+                                              l_res_link);
                         if (ldlm_same_flock_owner(lock, req)) {
                                 ownlocks = tmp;
                                 break;
@@ -204,8 +205,9 @@ reprocess:
 
                 /* This loop determines if there are existing locks
                  * that conflict with the new lock request. */
-                list_for_each(tmp, &res->lr_granted) {
-                        lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+                cfs_list_for_each(tmp, &res->lr_granted) {
+                        lock = cfs_list_entry(tmp, struct ldlm_lock,
+                                              l_res_link);
 
                         if (ldlm_same_flock_owner(lock, req)) {
                                 if (!ownlocks)
@@ -253,10 +255,11 @@ reprocess:
                         req->l_policy_data.l_flock.blocking_export =
                                 (long)(void *)lock->l_export;
 
-                        LASSERT(list_empty(&req->l_flock_waitq));
-                        spin_lock(&ldlm_flock_waitq_lock);
-                        list_add_tail(&req->l_flock_waitq, &ldlm_flock_waitq);
-                        spin_unlock(&ldlm_flock_waitq_lock);
+                        LASSERT(cfs_list_empty(&req->l_flock_waitq));
+                        cfs_spin_lock(&ldlm_flock_waitq_lock);
+                        cfs_list_add_tail(&req->l_flock_waitq,
+                                          &ldlm_flock_waitq);
+                        cfs_spin_unlock(&ldlm_flock_waitq_lock);
 
                         ldlm_resource_add_lock(res, &res->lr_waiting, req);
                         *flags |= LDLM_FL_BLOCK_GRANTED;
@@ -273,9 +276,9 @@ reprocess:
 
         /* In case we had slept on this lock request take it off of the
          * deadlock detection waitq. */
-        spin_lock(&ldlm_flock_waitq_lock);
-        list_del_init(&req->l_flock_waitq);
-        spin_unlock(&ldlm_flock_waitq_lock);
+        cfs_spin_lock(&ldlm_flock_waitq_lock);
+        cfs_list_del_init(&req->l_flock_waitq);
+        cfs_spin_unlock(&ldlm_flock_waitq_lock);
 
         /* Scan the locks owned by this process that overlap this request.
          * We may have to merge or split existing locks. */
@@ -284,7 +287,7 @@ reprocess:
                 ownlocks = &res->lr_granted;
 
         list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
-                lock = list_entry(ownlocks, struct ldlm_lock, l_res_link);
+                lock = cfs_list_entry(ownlocks, struct ldlm_lock, l_res_link);
 
                 if (!ldlm_same_flock_owner(lock, new))
                         break;
@@ -402,7 +405,7 @@ reprocess:
                 if (lock->l_export != NULL) {
                         new2->l_export = class_export_lock_get(lock->l_export, new2);
                         if (new2->l_export->exp_lock_hash &&
-                            hlist_unhashed(&new2->l_exp_hash))
+                            cfs_hlist_unhashed(&new2->l_exp_hash))
                                 cfs_hash_add(new2->l_export->exp_lock_hash,
                                              &new2->l_remote_handle,
                                              &new2->l_exp_hash);
@@ -426,7 +429,7 @@ reprocess:
 
         /* Add req to the granted queue before calling ldlm_reprocess_all(). */
         if (!added) {
-                list_del_init(&req->l_res_link);
+                cfs_list_del_init(&req->l_res_link);
                 /* insert new lock before ownlocks in list. */
                 ldlm_resource_add_lock(res, ownlocks, req);
         }
@@ -489,9 +492,9 @@ ldlm_flock_interrupted_wait(void *data)
         lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
 
         /* take lock off the deadlock detection waitq. */
-        spin_lock(&ldlm_flock_waitq_lock);
-        list_del_init(&lock->l_flock_waitq);
-        spin_unlock(&ldlm_flock_waitq_lock);
+        cfs_spin_lock(&ldlm_flock_waitq_lock);
+        cfs_list_del_init(&lock->l_flock_waitq);
+        cfs_spin_unlock(&ldlm_flock_waitq_lock);
 
         /* client side - set flag to prevent lock from being put on lru list */
         lock->l_flags |= LDLM_FL_CBPENDING;
@@ -568,9 +571,9 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data)
                 imp = obd->u.cli.cl_import;
 
         if (NULL != imp) {
-                spin_lock(&imp->imp_lock);
+                cfs_spin_lock(&imp->imp_lock);
                 fwd.fwd_generation = imp->imp_generation;
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
         }
 
         lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
@@ -603,12 +606,12 @@ granted:
         LDLM_DEBUG(lock, "client-side enqueue granted");
 
         /* take lock off the deadlock detection waitq. */
-        spin_lock(&ldlm_flock_waitq_lock);
-        list_del_init(&lock->l_flock_waitq);
-        spin_unlock(&ldlm_flock_waitq_lock);
+        cfs_spin_lock(&ldlm_flock_waitq_lock);
+        cfs_list_del_init(&lock->l_flock_waitq);
+        cfs_spin_unlock(&ldlm_flock_waitq_lock);
 
         /* ldlm_lock_enqueue() has already placed lock on the granted list. */
-        list_del_init(&lock->l_res_link);
+        cfs_list_del_init(&lock->l_res_link);
 
         if (flags & LDLM_FL_TEST_LOCK) {
                 /* fcntl(F_GETLK) request */
@@ -656,8 +659,8 @@ int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
         ns = lock->l_resource->lr_namespace;
 
         /* take lock off the deadlock detection waitq. */
-        spin_lock(&ldlm_flock_waitq_lock);
-        list_del_init(&lock->l_flock_waitq);
-        spin_unlock(&ldlm_flock_waitq_lock);
+        cfs_spin_lock(&ldlm_flock_waitq_lock);
+        cfs_list_del_init(&lock->l_flock_waitq);
+        cfs_spin_unlock(&ldlm_flock_waitq_lock);
         RETURN(0);
 }
index 548ee14..be9a073 100644 (file)
 
 /* Determine if the lock is compatible with all locks on the queue. */
 static int
-ldlm_inodebits_compat_queue(struct list_head *queue, struct ldlm_lock *req,
-                            struct list_head *work_list)
+ldlm_inodebits_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
+                            cfs_list_t *work_list)
 {
-        struct list_head *tmp;
+        cfs_list_t *tmp;
         struct ldlm_lock *lock;
         ldlm_mode_t req_mode = req->l_req_mode;
         __u64 req_bits = req->l_policy_data.l_inodebits.bits;
@@ -66,19 +66,19 @@ ldlm_inodebits_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                               I think. Also such a lock would be compatible
                                with any other bit lock */
 
-        list_for_each(tmp, queue) {
-                struct list_head *mode_tail;
+        cfs_list_for_each(tmp, queue) {
+                cfs_list_t *mode_tail;
 
-                lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+                lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
 
                 if (req == lock)
                         RETURN(compat);
 
                 /* last lock in mode group */
                 LASSERT(lock->l_sl_mode.prev != NULL);
-                mode_tail = &list_entry(lock->l_sl_mode.prev,
-                                        struct ldlm_lock,
-                                        l_sl_mode)->l_res_link;
+                mode_tail = &cfs_list_entry(lock->l_sl_mode.prev,
+                                            struct ldlm_lock,
+                                            l_sl_mode)->l_res_link;
 
                 /* locks are compatible, bits don't matter */
                 if (lockmode_compat(lock->l_req_mode, req_mode)) {
@@ -103,12 +103,12 @@ ldlm_inodebits_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                 }
 
                 for (;;) {
-                        struct list_head *head;
+                        cfs_list_t *head;
 
                         /* last lock in policy group */
-                        tmp = &list_entry(lock->l_sl_policy.prev,
-                                          struct ldlm_lock,
-                                          l_sl_policy)->l_res_link;
+                        tmp = &cfs_list_entry(lock->l_sl_policy.prev,
+                                              struct ldlm_lock,
+                                              l_sl_policy)->l_res_link;
 
                         /* locks with bits overlapped are conflicting locks */
                         if (lock->l_policy_data.l_inodebits.bits & req_bits) {
@@ -125,7 +125,7 @@ ldlm_inodebits_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                                         ldlm_add_ast_work_item(lock, req,
                                                                work_list);
                                 head = &lock->l_sl_policy;
-                                list_for_each_entry(lock, head, l_sl_policy)
+                                cfs_list_for_each_entry(lock, head, l_sl_policy)
                                         if (lock->l_blocking_ast)
                                                 ldlm_add_ast_work_item(lock, req,
                                                                        work_list);
@@ -134,7 +134,8 @@ ldlm_inodebits_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                                 break;
 
                         tmp = tmp->next;
-                        lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+                        lock = cfs_list_entry(tmp, struct ldlm_lock,
+                                              l_res_link);
                 } /* loop over policy groups within one mode group */
         } /* loop over mode groups within @queue */
 
@@ -150,14 +151,14 @@ ldlm_inodebits_compat_queue(struct list_head *queue, struct ldlm_lock *req,
   *   - must call this function with the ns lock held once */
 int ldlm_process_inodebits_lock(struct ldlm_lock *lock, int *flags,
                                 int first_enq, ldlm_error_t *err,
-                                struct list_head *work_list)
+                                cfs_list_t *work_list)
 {
         struct ldlm_resource *res = lock->l_resource;
         CFS_LIST_HEAD(rpc_list);
         int rc;
         ENTRY;
 
-        LASSERT(list_empty(&res->lr_converting));
+        LASSERT(cfs_list_empty(&res->lr_converting));
         check_res_locked(res);
 
         if (!first_enq) {
@@ -185,7 +186,7 @@ int ldlm_process_inodebits_lock(struct ldlm_lock *lock, int *flags,
                  * bug 2322: we used to unlink and re-add here, which was a
                  * terrible folly -- if we goto restart, we could get
                  * re-ordered!  Causes deadlock, because ASTs aren't sent! */
-                if (list_empty(&lock->l_res_link))
+                if (cfs_list_empty(&lock->l_res_link))
                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
                 unlock_res(res);
                 rc = ldlm_run_ast_work(&rpc_list, LDLM_WORK_BL_AST);
index c3f6937..f3f5a12 100644 (file)
 
 #define MAX_STRING_SIZE 128
 
-extern atomic_t ldlm_srv_namespace_nr;
-extern atomic_t ldlm_cli_namespace_nr;
-extern struct semaphore ldlm_srv_namespace_lock;
-extern struct list_head ldlm_srv_namespace_list;
-extern struct semaphore ldlm_cli_namespace_lock;
-extern struct list_head ldlm_cli_namespace_list;
-
-static inline atomic_t *ldlm_namespace_nr(ldlm_side_t client)
+extern cfs_atomic_t ldlm_srv_namespace_nr;
+extern cfs_atomic_t ldlm_cli_namespace_nr;
+extern cfs_semaphore_t ldlm_srv_namespace_lock;
+extern cfs_list_t ldlm_srv_namespace_list;
+extern cfs_semaphore_t ldlm_cli_namespace_lock;
+extern cfs_list_t ldlm_cli_namespace_list;
+
+static inline cfs_atomic_t *ldlm_namespace_nr(ldlm_side_t client)
 {
         return client == LDLM_NAMESPACE_SERVER ?
                 &ldlm_srv_namespace_nr : &ldlm_cli_namespace_nr;
 }
 
-static inline struct list_head *ldlm_namespace_list(ldlm_side_t client)
+static inline cfs_list_t *ldlm_namespace_list(ldlm_side_t client)
 {
         return client == LDLM_NAMESPACE_SERVER ?
                 &ldlm_srv_namespace_list : &ldlm_cli_namespace_list;
 }
 
-static inline struct semaphore *ldlm_namespace_lock(ldlm_side_t client)
+static inline cfs_semaphore_t *ldlm_namespace_lock(ldlm_side_t client)
 {
         return client == LDLM_NAMESPACE_SERVER ?
                 &ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock;
@@ -77,8 +77,9 @@ enum {
 
 int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync,
                     int flags);
-int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
-                          int count, int max, int cancel_flags, int flags);
+int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
+                          cfs_list_t *cancels, int count, int max,
+                          int cancel_flags, int flags);
 extern int ldlm_enqueue_min;
 int ldlm_get_enq_timeout(struct ldlm_lock *lock);
 
@@ -97,7 +98,7 @@ void ldlm_namespace_free_post(struct ldlm_namespace *ns);
 
 struct ldlm_cb_set_arg {
         struct ptlrpc_request_set *set;
-        atomic_t restart;
+        cfs_atomic_t restart;
         __u32 type; /* LDLM_BL_CALLBACK or LDLM_CP_CALLBACK */
 };
 
@@ -107,7 +108,7 @@ typedef enum {
         LDLM_WORK_REVOKE_AST
 } ldlm_desc_ast_t;
 
-void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list);
+void ldlm_grant_lock(struct ldlm_lock *lock, cfs_list_t *work_list);
 struct ldlm_lock *
 ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *,
                  ldlm_type_t type, ldlm_mode_t,
@@ -120,10 +121,10 @@ void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, __u32 mode);
 void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode);
 void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, __u32 mode);
 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
-                                struct list_head *work_list);
-int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue,
-                         struct list_head *work_list);
-int ldlm_run_ast_work(struct list_head *rpc_list, ldlm_desc_ast_t ast_type);
+                            cfs_list_t *work_list);
+int ldlm_reprocess_queue(struct ldlm_resource *res, cfs_list_t *queue,
+                         cfs_list_t *work_list);
+int ldlm_run_ast_work(cfs_list_t *rpc_list, ldlm_desc_ast_t ast_type);
 int ldlm_lock_remove_from_lru(struct ldlm_lock *lock);
 int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock);
 void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock);
@@ -137,29 +138,29 @@ void ldlm_cancel_locks_for_export(struct obd_export *export);
 int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
                            struct ldlm_lock *lock);
 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
-                           struct list_head *cancels, int count);
+                           cfs_list_t *cancels, int count);
 
 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
                              struct ldlm_lock_desc *ld, struct ldlm_lock *lock);
 
 /* ldlm_plain.c */
 int ldlm_process_plain_lock(struct ldlm_lock *lock, int *flags, int first_enq,
-                            ldlm_error_t *err, struct list_head *work_list);
+                            ldlm_error_t *err, cfs_list_t *work_list);
 
 /* ldlm_extent.c */
 int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
-                             ldlm_error_t *err, struct list_head *work_list);
+                             ldlm_error_t *err, cfs_list_t *work_list);
 void ldlm_extent_add_lock(struct ldlm_resource *res, struct ldlm_lock *lock);
 void ldlm_extent_unlink_lock(struct ldlm_lock *lock);
 
 /* ldlm_flock.c */
 int ldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq,
-                            ldlm_error_t *err, struct list_head *work_list);
+                            ldlm_error_t *err, cfs_list_t *work_list);
 
 /* ldlm_inodebits.c */
 int ldlm_process_inodebits_lock(struct ldlm_lock *lock, int *flags,
                                 int first_enq, ldlm_error_t *err,
-                                struct list_head *work_list);
+                                cfs_list_t *work_list);
 
 /* l_lock.c */
 void l_check_ns_lock(struct ldlm_namespace *ns);
@@ -187,9 +188,10 @@ static inline struct ldlm_extent *
 ldlm_interval_extent(struct ldlm_interval *node)
 {
         struct ldlm_lock *lock;
-        LASSERT(!list_empty(&node->li_group));
+        LASSERT(!cfs_list_empty(&node->li_group));
 
-        lock = list_entry(node->li_group.next, struct ldlm_lock, l_sl_policy);
+        lock = cfs_list_entry(node->li_group.next, struct ldlm_lock,
+                              l_sl_policy);
         return &lock->l_policy_data.l_extent;
 }
 
@@ -210,9 +212,9 @@ typedef enum ldlm_policy_res ldlm_policy_res_t;
                 struct ldlm_pool *pl = data;                                \
                 type tmp;                                                   \
                                                                             \
-                spin_lock(&pl->pl_lock);                                    \
+                cfs_spin_lock(&pl->pl_lock);                                \
                 tmp = pl->pl_##var;                                         \
-                spin_unlock(&pl->pl_lock);                                  \
+                cfs_spin_unlock(&pl->pl_lock);                              \
                                                                             \
                 return lprocfs_rd_uint(page, start, off, count, eof, &tmp); \
         }                                                                   \
@@ -232,9 +234,9 @@ typedef enum ldlm_policy_res ldlm_policy_res_t;
                         return rc;                                          \
                 }                                                           \
                                                                             \
-                spin_lock(&pl->pl_lock);                                    \
+                cfs_spin_lock(&pl->pl_lock);                                \
                 pl->pl_##var = tmp;                                         \
-                spin_unlock(&pl->pl_lock);                                  \
+                cfs_spin_unlock(&pl->pl_lock);                              \
                                                                             \
                 return rc;                                                  \
         }                                                                   \
index 2de9fea..350f3a9 100644 (file)
@@ -80,18 +80,19 @@ static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid,
                 }
         }
 
-        spin_lock(&imp->imp_lock);
-        list_for_each_entry(item, &imp->imp_conn_list, oic_item) {
+        cfs_spin_lock(&imp->imp_lock);
+        cfs_list_for_each_entry(item, &imp->imp_conn_list, oic_item) {
                 if (obd_uuid_equals(uuid, &item->oic_uuid)) {
                         if (priority) {
-                                list_del(&item->oic_item);
-                                list_add(&item->oic_item, &imp->imp_conn_list);
+                                cfs_list_del(&item->oic_item);
+                                cfs_list_add(&item->oic_item,
+                                             &imp->imp_conn_list);
                                 item->oic_last_attempt = 0;
                         }
                         CDEBUG(D_HA, "imp %p@%s: found existing conn %s%s\n",
                                imp, imp->imp_obd->obd_name, uuid->uuid,
                                (priority ? ", moved to head" : ""));
-                        spin_unlock(&imp->imp_lock);
+                        cfs_spin_unlock(&imp->imp_lock);
                         GOTO(out_free, rc = 0);
                 }
         }
@@ -101,18 +102,19 @@ static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid,
                 imp_conn->oic_uuid = *uuid;
                 imp_conn->oic_last_attempt = 0;
                 if (priority)
-                        list_add(&imp_conn->oic_item, &imp->imp_conn_list);
+                        cfs_list_add(&imp_conn->oic_item, &imp->imp_conn_list);
                 else
-                        list_add_tail(&imp_conn->oic_item, &imp->imp_conn_list);
+                        cfs_list_add_tail(&imp_conn->oic_item,
+                                          &imp->imp_conn_list);
                 CDEBUG(D_HA, "imp %p@%s: add connection %s at %s\n",
                        imp, imp->imp_obd->obd_name, uuid->uuid,
                        (priority ? "head" : "tail"));
         } else {
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
                 GOTO(out_free, rc = -ENOENT);
         }
 
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
         RETURN(0);
 out_free:
         if (imp_conn)
@@ -140,13 +142,13 @@ int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
         int rc = -ENOENT;
         ENTRY;
 
-        spin_lock(&imp->imp_lock);
-        if (list_empty(&imp->imp_conn_list)) {
+        cfs_spin_lock(&imp->imp_lock);
+        if (cfs_list_empty(&imp->imp_conn_list)) {
                 LASSERT(!imp->imp_connection);
                 GOTO(out, rc);
         }
 
-        list_for_each_entry(imp_conn, &imp->imp_conn_list, oic_item) {
+        cfs_list_for_each_entry(imp_conn, &imp->imp_conn_list, oic_item) {
                 if (!obd_uuid_equals(uuid, &imp_conn->oic_uuid))
                         continue;
                 LASSERT(imp_conn->oic_conn);
@@ -173,7 +175,7 @@ int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
                         }
                 }
 
-                list_del(&imp_conn->oic_item);
+                cfs_list_del(&imp_conn->oic_item);
                 ptlrpc_connection_put(imp_conn->oic_conn);
                 OBD_FREE(imp_conn, sizeof(*imp_conn));
                 CDEBUG(D_HA, "imp %p@%s: remove connection %s\n",
@@ -182,7 +184,7 @@ int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
                 break;
         }
 out:
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
         if (rc == -ENOENT)
                 CERROR("connection %s not found\n", uuid->uuid);
         RETURN(rc);
@@ -262,8 +264,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
                 RETURN(-EINVAL);
         }
 
-        init_rwsem(&cli->cl_sem);
-        sema_init(&cli->cl_mgc_sem, 1);
+        cfs_init_rwsem(&cli->cl_sem);
+        cfs_sema_init(&cli->cl_mgc_sem, 1);
         cli->cl_conn_count = 0;
         memcpy(server_uuid.uuid, lustre_cfg_buf(lcfg, 2),
                min_t(unsigned int, LUSTRE_CFG_BUFLEN(lcfg, 2),
@@ -273,8 +275,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
         cli->cl_avail_grant = 0;
         /* FIXME: should limit this for the sum of all cl_dirty_max */
         cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
-        if (cli->cl_dirty_max >> CFS_PAGE_SHIFT > num_physpages / 8)
-                cli->cl_dirty_max = num_physpages << (CFS_PAGE_SHIFT - 3);
+        if (cli->cl_dirty_max >> CFS_PAGE_SHIFT > cfs_num_physpages / 8)
+                cli->cl_dirty_max = cfs_num_physpages << (CFS_PAGE_SHIFT - 3);
         CFS_INIT_LIST_HEAD(&cli->cl_cache_waiters);
         CFS_INIT_LIST_HEAD(&cli->cl_loi_ready_list);
         CFS_INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
@@ -284,14 +286,14 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
         cli->cl_r_in_flight = 0;
         cli->cl_w_in_flight = 0;
 
-        spin_lock_init(&cli->cl_read_rpc_hist.oh_lock);
-        spin_lock_init(&cli->cl_write_rpc_hist.oh_lock);
-        spin_lock_init(&cli->cl_read_page_hist.oh_lock);
-        spin_lock_init(&cli->cl_write_page_hist.oh_lock);
-        spin_lock_init(&cli->cl_read_offset_hist.oh_lock);
-        spin_lock_init(&cli->cl_write_offset_hist.oh_lock);
+        cfs_spin_lock_init(&cli->cl_read_rpc_hist.oh_lock);
+        cfs_spin_lock_init(&cli->cl_write_rpc_hist.oh_lock);
+        cfs_spin_lock_init(&cli->cl_read_page_hist.oh_lock);
+        cfs_spin_lock_init(&cli->cl_write_page_hist.oh_lock);
+        cfs_spin_lock_init(&cli->cl_read_offset_hist.oh_lock);
+        cfs_spin_lock_init(&cli->cl_write_offset_hist.oh_lock);
         cfs_waitq_init(&cli->cl_destroy_waitq);
-        atomic_set(&cli->cl_destroy_in_flight, 0);
+        cfs_atomic_set(&cli->cl_destroy_in_flight, 0);
 #ifdef ENABLE_CHECKSUM
         /* Turn on checksumming by default. */
         cli->cl_checksum = 1;
@@ -302,8 +304,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
          */
         cli->cl_cksum_type = cli->cl_supp_cksum_types = OBD_CKSUM_CRC32;
 #endif
-        atomic_set(&cli->cl_resends, OSC_DEFAULT_RESENDS);
-        atomic_set(&cli->cl_quota_resends, CLIENT_QUOTA_DEFAULT_RESENDS);
+        cfs_atomic_set(&cli->cl_resends, OSC_DEFAULT_RESENDS);
+        cfs_atomic_set(&cli->cl_quota_resends, CLIENT_QUOTA_DEFAULT_RESENDS);
 
         /* This value may be changed at connect time in
            ptlrpc_connect_interpret. */
@@ -312,11 +314,11 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
 
         if (!strcmp(name, LUSTRE_MDC_NAME)) {
                 cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
-        } else if (num_physpages >> (20 - CFS_PAGE_SHIFT) <= 128 /* MB */) {
+        } else if (cfs_num_physpages >> (20 - CFS_PAGE_SHIFT) <= 128 /* MB */) {
                 cli->cl_max_rpcs_in_flight = 2;
-        } else if (num_physpages >> (20 - CFS_PAGE_SHIFT) <= 256 /* MB */) {
+        } else if (cfs_num_physpages >> (20 - CFS_PAGE_SHIFT) <= 256 /* MB */) {
                 cli->cl_max_rpcs_in_flight = 3;
-        } else if (num_physpages >> (20 - CFS_PAGE_SHIFT) <= 512 /* MB */) {
+        } else if (cfs_num_physpages >> (20 - CFS_PAGE_SHIFT) <= 512 /* MB */) {
                 cli->cl_max_rpcs_in_flight = 4;
         } else {
                 cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT;
@@ -359,9 +361,9 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
                         CDEBUG(D_HA, "marking %s %s->%s as inactive\n",
                                name, obddev->obd_name,
                                cli->cl_target_uuid.uuid);
-                        spin_lock(&imp->imp_lock);
+                        cfs_spin_lock(&imp->imp_lock);
                         imp->imp_deactive = 1;
-                        spin_unlock(&imp->imp_lock);
+                        cfs_spin_unlock(&imp->imp_lock);
                 }
         }
 
@@ -412,7 +414,7 @@ int client_connect_import(const struct lu_env *env,
         ENTRY;
 
         *exp = NULL;
-        down_write(&cli->cl_sem);
+        cfs_down_write(&cli->cl_sem);
         if (cli->cl_conn_count > 0 )
                 GOTO(out_sem, rc = -EALREADY);
 
@@ -461,7 +463,7 @@ out_ldlm:
                 *exp = NULL;
         }
 out_sem:
-        up_write(&cli->cl_sem);
+        cfs_up_write(&cli->cl_sem);
 
         return rc;
 }
@@ -483,7 +485,7 @@ int client_disconnect_export(struct obd_export *exp)
         cli = &obd->u.cli;
         imp = cli->cl_import;
 
-        down_write(&cli->cl_sem);
+        cfs_down_write(&cli->cl_sem);
         CDEBUG(D_INFO, "disconnect %s - %d\n", obd->obd_name,
                cli->cl_conn_count);
 
@@ -500,9 +502,9 @@ int client_disconnect_export(struct obd_export *exp)
         /* Mark import deactivated now, so we don't try to reconnect if any
          * of the cleanup RPCs fails (e.g. ldlm cancel, etc).  We don't
          * fully deactivate the import, or that would drop all requests. */
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
         imp->imp_deactive = 1;
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
 
         /* Some non-replayable imports (MDS's OSCs) are pinged, so just
          * delete it regardless.  (It's safe to delete an import that was
@@ -521,9 +523,9 @@ int client_disconnect_export(struct obd_export *exp)
          * there's no need to hold sem during disconnecting an import,
          * and actually it may cause deadlock in gss.
          */
-        up_write(&cli->cl_sem);
+        cfs_up_write(&cli->cl_sem);
         rc = ptlrpc_disconnect_import(imp, 0);
-        down_write(&cli->cl_sem);
+        cfs_down_write(&cli->cl_sem);
 
         ptlrpc_invalidate_import(imp);
 
@@ -543,7 +545,7 @@ int client_disconnect_export(struct obd_export *exp)
         if (!rc && err)
                 rc = err;
 
-        up_write(&cli->cl_sem);
+        cfs_up_write(&cli->cl_sem);
 
         RETURN(rc);
 }
@@ -563,21 +565,21 @@ int server_disconnect_export(struct obd_export *exp)
                 ldlm_cancel_locks_for_export(exp);
 
         /* complete all outstanding replies */
-        spin_lock(&exp->exp_lock);
-        while (!list_empty(&exp->exp_outstanding_replies)) {
+        cfs_spin_lock(&exp->exp_lock);
+        while (!cfs_list_empty(&exp->exp_outstanding_replies)) {
                 struct ptlrpc_reply_state *rs =
-                        list_entry(exp->exp_outstanding_replies.next,
-                                   struct ptlrpc_reply_state, rs_exp_list);
+                        cfs_list_entry(exp->exp_outstanding_replies.next,
+                                       struct ptlrpc_reply_state, rs_exp_list);
                 struct ptlrpc_service *svc = rs->rs_service;
 
-                spin_lock(&svc->srv_lock);
-                list_del_init(&rs->rs_exp_list);
-                spin_lock(&rs->rs_lock);
+                cfs_spin_lock(&svc->srv_lock);
+                cfs_list_del_init(&rs->rs_exp_list);
+                cfs_spin_lock(&rs->rs_lock);
                 ptlrpc_schedule_difficult_reply(rs);
-                spin_unlock(&rs->rs_lock);
-                spin_unlock(&svc->srv_lock);
+                cfs_spin_unlock(&rs->rs_lock);
+                cfs_spin_unlock(&svc->srv_lock);
         }
-        spin_unlock(&exp->exp_lock);
+        cfs_spin_unlock(&exp->exp_lock);
 
         /* release nid stat refererence */
         lprocfs_exp_cleanup(exp);
@@ -634,9 +636,9 @@ void target_client_add_cb(struct obd_device *obd, __u64 transno, void *cb_data,
         CDEBUG(D_RPCTRACE, "%s: committing for initial connect of %s\n",
                obd->obd_name, exp->exp_client_uuid.uuid);
 
-        spin_lock(&exp->exp_lock);
+        cfs_spin_lock(&exp->exp_lock);
         exp->exp_need_sync = 0;
-        spin_unlock(&exp->exp_lock);
+        cfs_spin_unlock(&exp->exp_lock);
         class_export_cb_put(exp);
 }
 EXPORT_SYMBOL(target_client_add_cb);
@@ -814,9 +816,9 @@ int target_handle_connect(struct ptlrpc_request *req)
                 class_export_put(export);
                 export = NULL;
         } else {
-                spin_lock(&export->exp_lock);
+                cfs_spin_lock(&export->exp_lock);
                 export->exp_connecting = 1;
-                spin_unlock(&export->exp_lock);
+                cfs_spin_unlock(&export->exp_lock);
                 class_export_put(export);
                 LASSERT(export->exp_obd == target);
 
@@ -828,25 +830,25 @@ int target_handle_connect(struct ptlrpc_request *req)
 no_export:
                 OBD_FAIL_TIMEOUT(OBD_FAIL_TGT_DELAY_CONNECT, 2 * obd_timeout);
         } else if (req->rq_export == NULL &&
-                   atomic_read(&export->exp_rpc_count) > 0) {
+                   cfs_atomic_read(&export->exp_rpc_count) > 0) {
                 CWARN("%s: refuse connection from %s/%s to 0x%p/%d\n",
                       target->obd_name, cluuid.uuid,
                       libcfs_nid2str(req->rq_peer.nid),
-                      export, atomic_read(&export->exp_refcount));
+                      export, cfs_atomic_read(&export->exp_refcount));
                 GOTO(out, rc = -EBUSY);
         } else if (req->rq_export != NULL &&
-                   (atomic_read(&export->exp_rpc_count) > 1)) {
+                   (cfs_atomic_read(&export->exp_rpc_count) > 1)) {
                 /* the current connect rpc has increased exp_rpc_count */
                 CWARN("%s: refuse reconnection from %s@%s to 0x%p/%d\n",
                       target->obd_name, cluuid.uuid,
                       libcfs_nid2str(req->rq_peer.nid),
-                      export, atomic_read(&export->exp_rpc_count) - 1);
-                spin_lock(&export->exp_lock);
+                      export, cfs_atomic_read(&export->exp_rpc_count) - 1);
+                cfs_spin_lock(&export->exp_lock);
                 if (req->rq_export->exp_conn_cnt <
                     lustre_msg_get_conn_cnt(req->rq_reqmsg))
                         /* try to abort active requests */
                         req->rq_export->exp_abort_active_req = 1;
-                spin_unlock(&export->exp_lock);
+                cfs_spin_unlock(&export->exp_lock);
                 GOTO(out, rc = -EBUSY);
         } else if (lustre_msg_get_conn_cnt(req->rq_reqmsg) == 1) {
                 CERROR("%s: NID %s (%s) reconnected with 1 conn_cnt; "
@@ -901,7 +903,8 @@ no_export:
                                "%d clients in recovery for "CFS_TIME_T"s\n",
                                target->obd_name,
                                libcfs_nid2str(req->rq_peer.nid), cluuid.uuid,
-                               atomic_read(&target->obd_lock_replay_clients),
+                               cfs_atomic_read(&target-> \
+                                               obd_lock_replay_clients),
                                cfs_duration_sec(t));
                         rc = -EBUSY;
                 } else {
@@ -947,9 +950,9 @@ dont_check_exports:
 
         req->rq_export = export;
 
-        spin_lock(&export->exp_lock);
+        cfs_spin_lock(&export->exp_lock);
         if (export->exp_conn_cnt >= lustre_msg_get_conn_cnt(req->rq_reqmsg)) {
-                spin_unlock(&export->exp_lock);
+                cfs_spin_unlock(&export->exp_lock);
                 CERROR("%s: %s already connected at higher conn_cnt: %d > %d\n",
                        cluuid.uuid, libcfs_nid2str(req->rq_peer.nid),
                        export->exp_conn_cnt,
@@ -964,19 +967,19 @@ dont_check_exports:
         /* request from liblustre?  Don't evict it for not pinging. */
         if (lustre_msg_get_op_flags(req->rq_reqmsg) & MSG_CONNECT_LIBCLIENT) {
                 export->exp_libclient = 1;
-                spin_unlock(&export->exp_lock);
+                cfs_spin_unlock(&export->exp_lock);
 
-                spin_lock(&target->obd_dev_lock);
-                list_del_init(&export->exp_obd_chain_timed);
-                spin_unlock(&target->obd_dev_lock);
+                cfs_spin_lock(&target->obd_dev_lock);
+                cfs_list_del_init(&export->exp_obd_chain_timed);
+                cfs_spin_unlock(&target->obd_dev_lock);
         } else {
-                spin_unlock(&export->exp_lock);
+                cfs_spin_unlock(&export->exp_lock);
         }
 
         if (export->exp_connection != NULL) {
                 /* Check to see if connection came from another NID */
                 if ((export->exp_connection->c_peer.nid != req->rq_peer.nid) &&
-                    !hlist_unhashed(&export->exp_nid_hash))
+                    !cfs_hlist_unhashed(&export->exp_nid_hash))
                         cfs_hash_del(export->exp_obd->obd_nid_hash,
                                      &export->exp_connection->c_peer.nid,
                                      &export->exp_nid_hash);
@@ -987,19 +990,19 @@ dont_check_exports:
         export->exp_connection = ptlrpc_connection_get(req->rq_peer,
                                                        req->rq_self,
                                                        &remote_uuid);
-        if (hlist_unhashed(&export->exp_nid_hash)) {
+        if (cfs_hlist_unhashed(&export->exp_nid_hash)) {
                 cfs_hash_add_unique(export->exp_obd->obd_nid_hash,
                                     &export->exp_connection->c_peer.nid,
                                     &export->exp_nid_hash);
         }
 
-        spin_lock_bh(&target->obd_processing_task_lock);
+        cfs_spin_lock_bh(&target->obd_processing_task_lock);
         if (target->obd_recovering && !export->exp_in_recovery) {
-                spin_lock(&export->exp_lock);
+                cfs_spin_lock(&export->exp_lock);
                 export->exp_in_recovery = 1;
                 export->exp_req_replay_needed = 1;
                 export->exp_lock_replay_needed = 1;
-                spin_unlock(&export->exp_lock);
+                cfs_spin_unlock(&export->exp_lock);
                 if ((lustre_msg_get_op_flags(req->rq_reqmsg) & MSG_CONNECT_TRANSNO)
                      && (data->ocd_transno == 0))
                         CWARN("Connect with zero transno!\n");
@@ -1008,13 +1011,13 @@ dont_check_exports:
                      && data->ocd_transno < target->obd_next_recovery_transno)
                         target->obd_next_recovery_transno = data->ocd_transno;
                 target->obd_connected_clients++;
-                atomic_inc(&target->obd_req_replay_clients);
-                atomic_inc(&target->obd_lock_replay_clients);
+                cfs_atomic_inc(&target->obd_req_replay_clients);
+                cfs_atomic_inc(&target->obd_lock_replay_clients);
                 if (target->obd_connected_clients ==
                     target->obd_max_recoverable_clients)
                         cfs_waitq_signal(&target->obd_next_transno_waitq);
         }
-        spin_unlock_bh(&target->obd_processing_task_lock);
+        cfs_spin_unlock_bh(&target->obd_processing_task_lock);
         tmp = req_capsule_client_get(&req->rq_pill, &RMF_CONN);
         conn = *tmp;
 
@@ -1061,9 +1064,9 @@ dont_check_exports:
         class_import_put(revimp);
 out:
         if (export) {
-                spin_lock(&export->exp_lock);
+                cfs_spin_lock(&export->exp_lock);
                 export->exp_connecting = 0;
-                spin_unlock(&export->exp_lock);
+                cfs_spin_unlock(&export->exp_lock);
         }
         if (targref)
                 class_decref(targref, __FUNCTION__, cfs_current());
@@ -1094,10 +1097,10 @@ void target_destroy_export(struct obd_export *exp)
         if (exp->exp_imp_reverse != NULL)
                 client_destroy_import(exp->exp_imp_reverse);
 
-        LASSERT(atomic_read(&exp->exp_locks_count) == 0);
-        LASSERT(atomic_read(&exp->exp_rpc_count) == 0);
-        LASSERT(atomic_read(&exp->exp_cb_count) == 0);
-        LASSERT(atomic_read(&exp->exp_replay_count) == 0);
+        LASSERT(cfs_atomic_read(&exp->exp_locks_count) == 0);
+        LASSERT(cfs_atomic_read(&exp->exp_rpc_count) == 0);
+        LASSERT(cfs_atomic_read(&exp->exp_cb_count) == 0);
+        LASSERT(cfs_atomic_read(&exp->exp_replay_count) == 0);
 }
 
 /*
@@ -1106,20 +1109,20 @@ void target_destroy_export(struct obd_export *exp)
 static void target_request_copy_get(struct ptlrpc_request *req)
 {
         class_export_rpc_get(req->rq_export);
-        LASSERT(list_empty(&req->rq_list));
+        LASSERT(cfs_list_empty(&req->rq_list));
         CFS_INIT_LIST_HEAD(&req->rq_replay_list);
         /* increase refcount to keep request in queue */
-        LASSERT(atomic_read(&req->rq_refcount));
-        atomic_inc(&req->rq_refcount);
+        LASSERT(cfs_atomic_read(&req->rq_refcount));
+        cfs_atomic_inc(&req->rq_refcount);
         /** let export know it has replays to be handled */
-        atomic_inc(&req->rq_export->exp_replay_count);
+        cfs_atomic_inc(&req->rq_export->exp_replay_count);
 }
 
 static void target_request_copy_put(struct ptlrpc_request *req)
 {
-        LASSERT(list_empty(&req->rq_replay_list));
-        LASSERT(atomic_read(&req->rq_export->exp_replay_count) > 0);
-        atomic_dec(&req->rq_export->exp_replay_count);
+        LASSERT(cfs_list_empty(&req->rq_replay_list));
+        LASSERT(cfs_atomic_read(&req->rq_export->exp_replay_count) > 0);
+        cfs_atomic_dec(&req->rq_export->exp_replay_count);
         class_export_rpc_put(req->rq_export);
         ptlrpc_server_drop_request(req);
 }
@@ -1133,9 +1136,9 @@ static int target_exp_enqueue_req_replay(struct ptlrpc_request *req)
 
         LASSERT(exp);
 
-        spin_lock(&exp->exp_lock);
-        list_for_each_entry(reqiter, &exp->exp_req_replay_queue,
-                            rq_replay_list) {
+        cfs_spin_lock(&exp->exp_lock);
+        cfs_list_for_each_entry(reqiter, &exp->exp_req_replay_queue,
+                                rq_replay_list) {
                 if (lustre_msg_get_transno(reqiter->rq_reqmsg) == transno) {
                         dup = 1;
                         break;
@@ -1149,21 +1152,22 @@ static int target_exp_enqueue_req_replay(struct ptlrpc_request *req)
                         CERROR("invalid flags %x of resent replay\n",
                                lustre_msg_get_flags(req->rq_reqmsg));
         } else {
-                list_add_tail(&req->rq_replay_list, &exp->exp_req_replay_queue);
+                cfs_list_add_tail(&req->rq_replay_list,
+                                  &exp->exp_req_replay_queue);
         }
 
-        spin_unlock(&exp->exp_lock);
+        cfs_spin_unlock(&exp->exp_lock);
         return dup;
 }
 
 static void target_exp_dequeue_req_replay(struct ptlrpc_request *req)
 {
-        LASSERT(!list_empty(&req->rq_replay_list));
+        LASSERT(!cfs_list_empty(&req->rq_replay_list));
         LASSERT(req->rq_export);
 
-        spin_lock(&req->rq_export->exp_lock);
-        list_del_init(&req->rq_replay_list);
-        spin_unlock(&req->rq_export->exp_lock);
+        cfs_spin_lock(&req->rq_export->exp_lock);
+        cfs_list_del_init(&req->rq_replay_list);
+        cfs_spin_unlock(&req->rq_export->exp_lock);
 }
 
 #ifdef __KERNEL__
@@ -1174,19 +1178,21 @@ static void target_finish_recovery(struct obd_device *obd)
                       obd->obd_name);
 
         ldlm_reprocess_all_ns(obd->obd_namespace);
-        spin_lock_bh(&obd->obd_processing_task_lock);
-        if (!list_empty(&obd->obd_req_replay_queue) ||
-            !list_empty(&obd->obd_lock_replay_queue) ||
-            !list_empty(&obd->obd_final_req_queue)) {
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
+        if (!cfs_list_empty(&obd->obd_req_replay_queue) ||
+            !cfs_list_empty(&obd->obd_lock_replay_queue) ||
+            !cfs_list_empty(&obd->obd_final_req_queue)) {
                 CERROR("%s: Recovery queues ( %s%s%s) are not empty\n",
                        obd->obd_name,
-                       list_empty(&obd->obd_req_replay_queue) ? "" : "req ",
-                       list_empty(&obd->obd_lock_replay_queue) ? "" : "lock ",
-                       list_empty(&obd->obd_final_req_queue) ? "" : "final ");
-                spin_unlock_bh(&obd->obd_processing_task_lock);
+                       cfs_list_empty(&obd->obd_req_replay_queue) ? "" : "req ",
+                       cfs_list_empty(&obd->obd_lock_replay_queue) ? \
+                               "" : "lock ",
+                       cfs_list_empty(&obd->obd_final_req_queue) ? \
+                               "" : "final ");
+                cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
                 LBUG();
         }
-        spin_unlock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
 
         obd->obd_recovery_end = cfs_time_current_sec();
 
@@ -1203,13 +1209,13 @@ static void target_finish_recovery(struct obd_device *obd)
 static void abort_req_replay_queue(struct obd_device *obd)
 {
         struct ptlrpc_request *req, *n;
-        struct list_head abort_list;
+        cfs_list_t abort_list;
 
         CFS_INIT_LIST_HEAD(&abort_list);
-        spin_lock_bh(&obd->obd_processing_task_lock);
-        list_splice_init(&obd->obd_req_replay_queue, &abort_list);
-        spin_unlock_bh(&obd->obd_processing_task_lock);
-        list_for_each_entry_safe(req, n, &abort_list, rq_list) {
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
+        cfs_list_splice_init(&obd->obd_req_replay_queue, &abort_list);
+        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
+        cfs_list_for_each_entry_safe(req, n, &abort_list, rq_list) {
                 DEBUG_REQ(D_WARNING, req, "aborted:");
                 req->rq_status = -ENOTCONN;
                 if (ptlrpc_error(req)) {
@@ -1224,13 +1230,13 @@ static void abort_req_replay_queue(struct obd_device *obd)
 static void abort_lock_replay_queue(struct obd_device *obd)
 {
         struct ptlrpc_request *req, *n;
-        struct list_head abort_list;
+        cfs_list_t abort_list;
 
         CFS_INIT_LIST_HEAD(&abort_list);
-        spin_lock_bh(&obd->obd_processing_task_lock);
-        list_splice_init(&obd->obd_lock_replay_queue, &abort_list);
-        spin_unlock_bh(&obd->obd_processing_task_lock);
-        list_for_each_entry_safe(req, n, &abort_list, rq_list){
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
+        cfs_list_splice_init(&obd->obd_lock_replay_queue, &abort_list);
+        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
+        cfs_list_for_each_entry_safe(req, n, &abort_list, rq_list){
                 DEBUG_REQ(D_ERROR, req, "aborted:");
                 req->rq_status = -ENOTCONN;
                 if (ptlrpc_error(req)) {
@@ -1254,34 +1260,34 @@ static void abort_lock_replay_queue(struct obd_device *obd)
 void target_cleanup_recovery(struct obd_device *obd)
 {
         struct ptlrpc_request *req, *n;
-        struct list_head clean_list;
+        cfs_list_t clean_list;
         ENTRY;
 
         CFS_INIT_LIST_HEAD(&clean_list);
-        spin_lock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
         if (!obd->obd_recovering) {
-                spin_unlock_bh(&obd->obd_processing_task_lock);
+                cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
                 EXIT;
                 return;
         }
         obd->obd_recovering = obd->obd_abort_recovery = 0;
         target_cancel_recovery_timer(obd);
 
-        list_splice_init(&obd->obd_req_replay_queue, &clean_list);
-        spin_unlock_bh(&obd->obd_processing_task_lock);
+        cfs_list_splice_init(&obd->obd_req_replay_queue, &clean_list);
+        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
 
-        list_for_each_entry_safe(req, n, &clean_list, rq_list) {
+        cfs_list_for_each_entry_safe(req, n, &clean_list, rq_list) {
                 LASSERT(req->rq_reply_state == 0);
                 target_exp_dequeue_req_replay(req);
                 target_request_copy_put(req);
         }
 
-        spin_lock_bh(&obd->obd_processing_task_lock);
-        list_splice_init(&obd->obd_lock_replay_queue, &clean_list);
-        list_splice_init(&obd->obd_final_req_queue, &clean_list);
-        spin_unlock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
+        cfs_list_splice_init(&obd->obd_lock_replay_queue, &clean_list);
+        cfs_list_splice_init(&obd->obd_final_req_queue, &clean_list);
+        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
 
-        list_for_each_entry_safe(req, n, &clean_list, rq_list){
+        cfs_list_for_each_entry_safe(req, n, &clean_list, rq_list){
                 LASSERT(req->rq_reply_state == 0);
                 target_request_copy_put(req);
         }
@@ -1304,9 +1310,9 @@ static void reset_recovery_timer(struct obd_device *obd, int duration,
         cfs_time_t now = cfs_time_current_sec();
         cfs_duration_t left;
 
-        spin_lock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
         if (!obd->obd_recovering || obd->obd_abort_recovery) {
-                spin_unlock_bh(&obd->obd_processing_task_lock);
+                cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
                 return;
         }
 
@@ -1333,23 +1339,23 @@ static void reset_recovery_timer(struct obd_device *obd, int duration,
                 left = cfs_time_sub(obd->obd_recovery_end, now);
                 cfs_timer_arm(&obd->obd_recovery_timer, cfs_time_shift(left));
         }
-        spin_unlock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
         CDEBUG(D_HA, "%s: recovery timer will expire in %u seconds\n",
                obd->obd_name, (unsigned)left);
 }
 
 static void check_and_start_recovery_timer(struct obd_device *obd)
 {
-        spin_lock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
         if (cfs_timer_is_armed(&obd->obd_recovery_timer)) {
-                spin_unlock_bh(&obd->obd_processing_task_lock);
+                cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
                 return;
         }
         CDEBUG(D_HA, "%s: starting recovery timer\n", obd->obd_name);
         obd->obd_recovery_start = cfs_time_current_sec();
         /* minimum */
         obd->obd_recovery_timeout = OBD_RECOVERY_FACTOR * obd_timeout;
-        spin_unlock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
 
         reset_recovery_timer(obd, obd->obd_recovery_timeout, 0);
 }
@@ -1405,13 +1411,13 @@ static inline int exp_connect_healthy(struct obd_export *exp)
 static inline int exp_req_replay_healthy(struct obd_export *exp)
 {
         return (!exp->exp_req_replay_needed ||
-                atomic_read(&exp->exp_replay_count) > 0);
+                cfs_atomic_read(&exp->exp_replay_count) > 0);
 }
 /** if export done lock_replay or has replay in queue */
 static inline int exp_lock_replay_healthy(struct obd_export *exp)
 {
         return (!exp->exp_lock_replay_needed ||
-                atomic_read(&exp->exp_replay_count) > 0);
+                cfs_atomic_read(&exp->exp_replay_count) > 0);
 }
 
 static inline int exp_vbr_healthy(struct obd_export *exp)
@@ -1443,18 +1449,18 @@ static int check_for_next_transno(struct obd_device *obd)
         int wake_up = 0, connected, completed, queue_len;
         __u64 next_transno, req_transno;
         ENTRY;
-        spin_lock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
 
-        if (!list_empty(&obd->obd_req_replay_queue)) {
-                req = list_entry(obd->obd_req_replay_queue.next,
-                                 struct ptlrpc_request, rq_list);
+        if (!cfs_list_empty(&obd->obd_req_replay_queue)) {
+                req = cfs_list_entry(obd->obd_req_replay_queue.next,
+                                     struct ptlrpc_request, rq_list);
                 req_transno = lustre_msg_get_transno(req->rq_reqmsg);
         } else {
                 req_transno = 0;
         }
 
         connected = obd->obd_connected_clients;
-        completed = connected - atomic_read(&obd->obd_req_replay_clients);
+        completed = connected - cfs_atomic_read(&obd->obd_req_replay_clients);
         queue_len = obd->obd_requests_queued_for_recovery;
         next_transno = obd->obd_next_recovery_transno;
 
@@ -1469,13 +1475,13 @@ static int check_for_next_transno(struct obd_device *obd)
         } else if (obd->obd_recovery_expired) {
                 CDEBUG(D_HA, "waking for expired recovery\n");
                 wake_up = 1;
-        } else if (atomic_read(&obd->obd_req_replay_clients) == 0) {
+        } else if (cfs_atomic_read(&obd->obd_req_replay_clients) == 0) {
                 CDEBUG(D_HA, "waking for completed recovery\n");
                 wake_up = 1;
         } else if (req_transno == next_transno) {
                 CDEBUG(D_HA, "waking for next ("LPD64")\n", next_transno);
                 wake_up = 1;
-        } else if (queue_len == atomic_read(&obd->obd_req_replay_clients)) {
+        } else if (queue_len == cfs_atomic_read(&obd->obd_req_replay_clients)) {
                 int d_lvl = D_HA;
                 /** handle gaps occured due to lost reply or VBR */
                 LASSERTF(req_transno >= next_transno,
@@ -1499,7 +1505,7 @@ static int check_for_next_transno(struct obd_device *obd)
                 obd->obd_next_recovery_transno = req_transno;
                 wake_up = 1;
         }
-        spin_unlock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
         return wake_up;
 }
 
@@ -1507,11 +1513,11 @@ static int check_for_next_lock(struct obd_device *obd)
 {
         int wake_up = 0;
 
-        spin_lock_bh(&obd->obd_processing_task_lock);
-        if (!list_empty(&obd->obd_lock_replay_queue)) {
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
+        if (!cfs_list_empty(&obd->obd_lock_replay_queue)) {
                 CDEBUG(D_HA, "waking for next lock\n");
                 wake_up = 1;
-        } else if (atomic_read(&obd->obd_lock_replay_clients) == 0) {
+        } else if (cfs_atomic_read(&obd->obd_lock_replay_clients) == 0) {
                 CDEBUG(D_HA, "waking for completed lock replay\n");
                 wake_up = 1;
         } else if (obd->obd_abort_recovery) {
@@ -1521,7 +1527,7 @@ static int check_for_next_lock(struct obd_device *obd)
                 CDEBUG(D_HA, "waking for expired recovery\n");
                 wake_up = 1;
         }
-        spin_unlock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
 
         return wake_up;
 }
@@ -1539,11 +1545,11 @@ static int target_recovery_overseer(struct obd_device *obd,
 
         do {
                 cfs_wait_event(obd->obd_next_transno_waitq, check_routine(obd));
-                spin_lock_bh(&obd->obd_processing_task_lock);
+                cfs_spin_lock_bh(&obd->obd_processing_task_lock);
                 abort = obd->obd_abort_recovery;
                 expired = obd->obd_recovery_expired;
                 obd->obd_recovery_expired = 0;
-                spin_unlock_bh(&obd->obd_processing_task_lock);
+                cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
                 if (abort) {
                         CWARN("recovery is aborted, evict exports in recovery\n");
                         /** evict exports which didn't finish recovery yet */
@@ -1554,9 +1560,9 @@ static int target_recovery_overseer(struct obd_device *obd,
                         /** evict cexports with no replay in queue, they are stalled */
                         class_disconnect_stale_exports(obd, health_check);
                         /** continue with VBR */
-                        spin_lock_bh(&obd->obd_processing_task_lock);
+                        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
                         obd->obd_version_recov = 1;
-                        spin_unlock_bh(&obd->obd_processing_task_lock);
+                        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
                         /**
                          * reset timer, recovery will proceed with versions now,
                          * timeout is set just to handle reconnection delays
@@ -1583,17 +1589,17 @@ static struct ptlrpc_request *target_next_replay_req(struct obd_device *obd)
                 abort_lock_replay_queue(obd);
         }
 
-        spin_lock_bh(&obd->obd_processing_task_lock);
-        if (!list_empty(&obd->obd_req_replay_queue)) {
-                req = list_entry(obd->obd_req_replay_queue.next,
-                                 struct ptlrpc_request, rq_list);
-                list_del_init(&req->rq_list);
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
+        if (!cfs_list_empty(&obd->obd_req_replay_queue)) {
+                req = cfs_list_entry(obd->obd_req_replay_queue.next,
+                                     struct ptlrpc_request, rq_list);
+                cfs_list_del_init(&req->rq_list);
                 obd->obd_requests_queued_for_recovery--;
-                spin_unlock_bh(&obd->obd_processing_task_lock);
+                cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
         } else {
-                spin_unlock_bh(&obd->obd_processing_task_lock);
-                LASSERT(list_empty(&obd->obd_req_replay_queue));
-                LASSERT(atomic_read(&obd->obd_req_replay_clients) == 0);
+                cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
+                LASSERT(cfs_list_empty(&obd->obd_req_replay_queue));
+                LASSERT(cfs_atomic_read(&obd->obd_req_replay_clients) == 0);
                 /** evict exports failed VBR */
                 class_disconnect_stale_exports(obd, exp_vbr_healthy);
         }
@@ -1609,16 +1615,16 @@ static struct ptlrpc_request *target_next_replay_lock(struct obd_device *obd)
                                      exp_lock_replay_healthy))
                 abort_lock_replay_queue(obd);
 
-        spin_lock_bh(&obd->obd_processing_task_lock);
-        if (!list_empty(&obd->obd_lock_replay_queue)) {
-                req = list_entry(obd->obd_lock_replay_queue.next,
-                                 struct ptlrpc_request, rq_list);
-                list_del_init(&req->rq_list);
-                spin_unlock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
+        if (!cfs_list_empty(&obd->obd_lock_replay_queue)) {
+                req = cfs_list_entry(obd->obd_lock_replay_queue.next,
+                                     struct ptlrpc_request, rq_list);
+                cfs_list_del_init(&req->rq_list);
+                cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
         } else {
-                spin_unlock_bh(&obd->obd_processing_task_lock);
-                LASSERT(list_empty(&obd->obd_lock_replay_queue));
-                LASSERT(atomic_read(&obd->obd_lock_replay_clients) == 0);
+                cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
+                LASSERT(cfs_list_empty(&obd->obd_lock_replay_queue));
+                LASSERT(cfs_atomic_read(&obd->obd_lock_replay_clients) == 0);
                 /** evict exports failed VBR */
                 class_disconnect_stale_exports(obd, exp_vbr_healthy);
         }
@@ -1629,18 +1635,18 @@ static struct ptlrpc_request *target_next_final_ping(struct obd_device *obd)
 {
         struct ptlrpc_request *req = NULL;
 
-        spin_lock_bh(&obd->obd_processing_task_lock);
-        if (!list_empty(&obd->obd_final_req_queue)) {
-                req = list_entry(obd->obd_final_req_queue.next,
-                                 struct ptlrpc_request, rq_list);
-                list_del_init(&req->rq_list);
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
+        if (!cfs_list_empty(&obd->obd_final_req_queue)) {
+                req = cfs_list_entry(obd->obd_final_req_queue.next,
+                                     struct ptlrpc_request, rq_list);
+                cfs_list_del_init(&req->rq_list);
                 if (req->rq_export->exp_in_recovery) {
-                        spin_lock(&req->rq_export->exp_lock);
+                        cfs_spin_lock(&req->rq_export->exp_lock);
                         req->rq_export->exp_in_recovery = 0;
-                        spin_unlock(&req->rq_export->exp_lock);
+                        cfs_spin_unlock(&req->rq_export->exp_lock);
                 }
         }
-        spin_unlock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
         return req;
 }
 
@@ -1721,7 +1727,7 @@ static int target_recovery_thread(void *arg)
         trd->trd_processing_task = cfs_curproc_pid();
 
         obd->obd_recovering = 1;
-        complete(&trd->trd_starting);
+        cfs_complete(&trd->trd_starting);
 
         /* first of all, we have to know the first transno to replay */
         if (target_recovery_overseer(obd, check_for_clients,
@@ -1734,7 +1740,7 @@ static int target_recovery_thread(void *arg)
         delta = jiffies;
         obd->obd_req_replaying = 1;
         CDEBUG(D_INFO, "1: request replay stage - %d clients from t"LPU64"\n",
-               atomic_read(&obd->obd_req_replay_clients),
+               cfs_atomic_read(&obd->obd_req_replay_clients),
                obd->obd_next_recovery_transno);
         while ((req = target_next_replay_req(obd))) {
                 LASSERT(trd->trd_processing_task == cfs_curproc_pid());
@@ -1747,9 +1753,9 @@ static int target_recovery_thread(void *arg)
                  * bz18031: increase next_recovery_transno before
                  * target_request_copy_put() will drop exp_rpc reference
                  */
-                spin_lock_bh(&obd->obd_processing_task_lock);
+                cfs_spin_lock_bh(&obd->obd_processing_task_lock);
                 obd->obd_next_recovery_transno++;
-                spin_unlock_bh(&obd->obd_processing_task_lock);
+                cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
                 target_exp_dequeue_req_replay(req);
                 target_request_copy_put(req);
                 obd->obd_replayed_requests++;
@@ -1759,7 +1765,7 @@ static int target_recovery_thread(void *arg)
          * The second stage: replay locks
          */
         CDEBUG(D_INFO, "2: lock replay stage - %d clients\n",
-               atomic_read(&obd->obd_lock_replay_clients));
+               cfs_atomic_read(&obd->obd_lock_replay_clients));
         while ((req = target_next_replay_lock(obd))) {
                 LASSERT(trd->trd_processing_task == cfs_curproc_pid());
                 DEBUG_REQ(D_HA, req, "processing lock from %s: ",
@@ -1779,10 +1785,10 @@ static int target_recovery_thread(void *arg)
         lut_boot_epoch_update(lut);
         /* We drop recoverying flag to forward all new requests
          * to regular mds_handle() since now */
-        spin_lock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
         obd->obd_recovering = obd->obd_abort_recovery = 0;
         target_cancel_recovery_timer(obd);
-        spin_unlock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
         while ((req = target_next_final_ping(obd))) {
                 LASSERT(trd->trd_processing_task == cfs_curproc_pid());
                 DEBUG_REQ(D_HA, req, "processing final ping from %s: ",
@@ -1792,7 +1798,7 @@ static int target_recovery_thread(void *arg)
                 target_request_copy_put(req);
         }
 
-        delta = (jiffies - delta) / HZ;
+        delta = (jiffies - delta) / CFS_HZ;
         CDEBUG(D_INFO,"4: recovery completed in %lus - %d/%d reqs/locks\n",
               delta, obd->obd_replayed_requests, obd->obd_replayed_locks);
         if (delta > obd_timeout * OBD_RECOVERY_FACTOR) {
@@ -1804,7 +1810,7 @@ static int target_recovery_thread(void *arg)
 
         lu_context_fini(&env.le_ctx);
         trd->trd_processing_task = 0;
-        complete(&trd->trd_finishing);
+        cfs_complete(&trd->trd_finishing);
         RETURN(rc);
 }
 
@@ -1816,12 +1822,12 @@ static int target_start_recovery_thread(struct lu_target *lut,
         struct target_recovery_data *trd = &obd->obd_recovery_data;
 
         memset(trd, 0, sizeof(*trd));
-        init_completion(&trd->trd_starting);
-        init_completion(&trd->trd_finishing);
+        cfs_init_completion(&trd->trd_starting);
+        cfs_init_completion(&trd->trd_finishing);
         trd->trd_recovery_handler = handler;
 
-        if (kernel_thread(target_recovery_thread, lut, 0) > 0) {
-                wait_for_completion(&trd->trd_starting);
+        if (cfs_kernel_thread(target_recovery_thread, lut, 0) > 0) {
+                cfs_wait_for_completion(&trd->trd_starting);
                 LASSERT(obd->obd_recovering != 0);
         } else
                 rc = -ECHILD;
@@ -1831,7 +1837,7 @@ static int target_start_recovery_thread(struct lu_target *lut,
 
 void target_stop_recovery_thread(struct obd_device *obd)
 {
-        spin_lock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
         if (obd->obd_recovery_data.trd_processing_task > 0) {
                 struct target_recovery_data *trd = &obd->obd_recovery_data;
                 /** recovery can be done but postrecovery is not yet */
@@ -1840,10 +1846,10 @@ void target_stop_recovery_thread(struct obd_device *obd)
                         obd->obd_abort_recovery = 1;
                         cfs_waitq_signal(&obd->obd_next_transno_waitq);
                 }
-                spin_unlock_bh(&obd->obd_processing_task_lock);
-                wait_for_completion(&trd->trd_finishing);
+                cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
+                cfs_wait_for_completion(&trd->trd_finishing);
         } else {
-                spin_unlock_bh(&obd->obd_processing_task_lock);
+                cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
         }
 }
 
@@ -1860,14 +1866,14 @@ static void target_recovery_expired(unsigned long castmeharder)
         struct obd_device *obd = (struct obd_device *)castmeharder;
         CDEBUG(D_HA, "%s: recovery timed out; %d clients are still in recovery"
                " after %lds (%d clients connected)\n",
-               obd->obd_name, atomic_read(&obd->obd_lock_replay_clients),
+               obd->obd_name, cfs_atomic_read(&obd->obd_lock_replay_clients),
                cfs_time_current_sec()- obd->obd_recovery_start,
                obd->obd_connected_clients);
 
-        spin_lock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
         obd->obd_recovery_expired = 1;
         cfs_waitq_signal(&obd->obd_next_transno_waitq);
-        spin_unlock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
 }
 
 void target_recovery_init(struct lu_target *lut, svc_handler_t handler)
@@ -1903,28 +1909,30 @@ static int target_process_req_flags(struct obd_device *obd,
         LASSERT(exp != NULL);
         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REQ_REPLAY_DONE) {
                 /* client declares he's ready to replay locks */
-                spin_lock_bh(&obd->obd_processing_task_lock);
+                cfs_spin_lock_bh(&obd->obd_processing_task_lock);
                 if (exp->exp_req_replay_needed) {
-                        LASSERT(atomic_read(&obd->obd_req_replay_clients) > 0);
-                        spin_lock(&exp->exp_lock);
+                        LASSERT(cfs_atomic_read(&obd->obd_req_replay_clients) >
+                                0);
+                        cfs_spin_lock(&exp->exp_lock);
                         exp->exp_req_replay_needed = 0;
-                        spin_unlock(&exp->exp_lock);
-                        atomic_dec(&obd->obd_req_replay_clients);
+                        cfs_spin_unlock(&exp->exp_lock);
+                        cfs_atomic_dec(&obd->obd_req_replay_clients);
                 }
-                spin_unlock_bh(&obd->obd_processing_task_lock);
+                cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
         }
         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LOCK_REPLAY_DONE) {
                 /* client declares he's ready to complete recovery
                  * so, we put the request on th final queue */
-                spin_lock_bh(&obd->obd_processing_task_lock);
+                cfs_spin_lock_bh(&obd->obd_processing_task_lock);
                 if (exp->exp_lock_replay_needed) {
-                        LASSERT(atomic_read(&obd->obd_lock_replay_clients) > 0);
-                        spin_lock(&exp->exp_lock);
+                        LASSERT(cfs_atomic_read(&obd->obd_lock_replay_clients) >
+                                0);
+                        cfs_spin_lock(&exp->exp_lock);
                         exp->exp_lock_replay_needed = 0;
-                        spin_unlock(&exp->exp_lock);
-                        atomic_dec(&obd->obd_lock_replay_clients);
+                        cfs_spin_unlock(&exp->exp_lock);
+                        cfs_atomic_dec(&obd->obd_lock_replay_clients);
                 }
-                spin_unlock_bh(&obd->obd_processing_task_lock);
+                cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
         }
 
         return 0;
@@ -1933,7 +1941,7 @@ static int target_process_req_flags(struct obd_device *obd,
 int target_queue_recovery_request(struct ptlrpc_request *req,
                                   struct obd_device *obd)
 {
-        struct list_head *tmp;
+        cfs_list_t *tmp;
         int inserted = 0;
         __u64 transno = lustre_msg_get_transno(req->rq_reqmsg);
         ENTRY;
@@ -1950,34 +1958,35 @@ int target_queue_recovery_request(struct ptlrpc_request *req,
                  * so, we put the request on th final queue */
                 target_request_copy_get(req);
                 DEBUG_REQ(D_HA, req, "queue final req");
-                spin_lock_bh(&obd->obd_processing_task_lock);
+                cfs_spin_lock_bh(&obd->obd_processing_task_lock);
                 cfs_waitq_signal(&obd->obd_next_transno_waitq);
                 if (obd->obd_recovering) {
-                        list_add_tail(&req->rq_list, &obd->obd_final_req_queue);
+                        cfs_list_add_tail(&req->rq_list,
+                                          &obd->obd_final_req_queue);
                 } else {
-                        spin_unlock_bh(&obd->obd_processing_task_lock);
+                        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
                         target_request_copy_put(req);
                         RETURN(obd->obd_stopping ? -ENOTCONN : 1);
                 }
-                spin_unlock_bh(&obd->obd_processing_task_lock);
+                cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
                 RETURN(0);
         }
         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REQ_REPLAY_DONE) {
                 /* client declares he's ready to replay locks */
                 target_request_copy_get(req);
                 DEBUG_REQ(D_HA, req, "queue lock replay req");
-                spin_lock_bh(&obd->obd_processing_task_lock);
+                cfs_spin_lock_bh(&obd->obd_processing_task_lock);
                 cfs_waitq_signal(&obd->obd_next_transno_waitq);
                 LASSERT(obd->obd_recovering);
                 /* usually due to recovery abort */
                 if (!req->rq_export->exp_in_recovery) {
-                        spin_unlock_bh(&obd->obd_processing_task_lock);
+                        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
                         target_request_copy_put(req);
                         RETURN(-ENOTCONN);
                 }
                 LASSERT(req->rq_export->exp_lock_replay_needed);
-                list_add_tail(&req->rq_list, &obd->obd_lock_replay_queue);
-                spin_unlock_bh(&obd->obd_processing_task_lock);
+                cfs_list_add_tail(&req->rq_list, &obd->obd_lock_replay_queue);
+                cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
                 RETURN(0);
         }
 
@@ -1991,7 +2000,7 @@ int target_queue_recovery_request(struct ptlrpc_request *req,
                 RETURN(1);
         }
 
-        spin_lock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
 
         /* If we're processing the queue, we want don't want to queue this
          * message.
@@ -2007,39 +2016,39 @@ int target_queue_recovery_request(struct ptlrpc_request *req,
               obd->obd_next_recovery_transno, transno, obd->obd_req_replaying);
         if (transno < obd->obd_next_recovery_transno && obd->obd_req_replaying) {
                 /* Processing the queue right now, don't re-add. */
-                LASSERT(list_empty(&req->rq_list));
-                spin_unlock_bh(&obd->obd_processing_task_lock);
+                LASSERT(cfs_list_empty(&req->rq_list));
+                cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
                 RETURN(1);
         }
-        spin_unlock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
 
         if (OBD_FAIL_CHECK(OBD_FAIL_TGT_REPLAY_DROP))
                 RETURN(0);
 
         target_request_copy_get(req);
-        spin_lock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
         LASSERT(obd->obd_recovering);
         if (!req->rq_export->exp_in_recovery) {
-                spin_unlock_bh(&obd->obd_processing_task_lock);
+                cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
                 target_request_copy_put(req);
                 RETURN(-ENOTCONN);
         }
         LASSERT(req->rq_export->exp_req_replay_needed);
 
         if (target_exp_enqueue_req_replay(req)) {
-                spin_unlock_bh(&obd->obd_processing_task_lock);
+                cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
                 DEBUG_REQ(D_ERROR, req, "dropping resent queued req");
                 target_request_copy_put(req);
                 RETURN(0);
         }
 
         /* XXX O(n^2) */
-        list_for_each(tmp, &obd->obd_req_replay_queue) {
+        cfs_list_for_each(tmp, &obd->obd_req_replay_queue) {
                 struct ptlrpc_request *reqiter =
-                        list_entry(tmp, struct ptlrpc_request, rq_list);
+                        cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
 
                 if (lustre_msg_get_transno(reqiter->rq_reqmsg) > transno) {
-                        list_add_tail(&req->rq_list, &reqiter->rq_list);
+                        cfs_list_add_tail(&req->rq_list, &reqiter->rq_list);
                         inserted = 1;
                         break;
                 }
@@ -2048,7 +2057,7 @@ int target_queue_recovery_request(struct ptlrpc_request *req,
                              transno)) {
                         DEBUG_REQ(D_ERROR, req, "dropping replay: transno "
                                   "has been claimed by another client");
-                        spin_unlock_bh(&obd->obd_processing_task_lock);
+                        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
                         target_exp_dequeue_req_replay(req);
                         target_request_copy_put(req);
                         RETURN(0);
@@ -2056,11 +2065,11 @@ int target_queue_recovery_request(struct ptlrpc_request *req,
         }
 
         if (!inserted)
-                list_add_tail(&req->rq_list, &obd->obd_req_replay_queue);
+                cfs_list_add_tail(&req->rq_list, &obd->obd_req_replay_queue);
 
         obd->obd_requests_queued_for_recovery++;
         cfs_waitq_signal(&obd->obd_next_transno_waitq);
-        spin_unlock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
         RETURN(0);
 }
 
@@ -2088,10 +2097,10 @@ int target_pack_pool_reply(struct ptlrpc_request *req)
          */
         obd = req->rq_export->exp_obd;
 
-        read_lock(&obd->obd_pool_lock);
+        cfs_read_lock(&obd->obd_pool_lock);
         lustre_msg_set_slv(req->rq_repmsg, obd->obd_pool_slv);
         lustre_msg_set_limit(req->rq_repmsg, obd->obd_pool_limit);
-        read_unlock(&obd->obd_pool_lock);
+        cfs_read_unlock(&obd->obd_pool_lock);
 
         RETURN(0);
 }
@@ -2148,8 +2157,8 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
         LASSERT (!rs->rs_handled);
         LASSERT (!rs->rs_on_net);
         LASSERT (rs->rs_export == NULL);
-        LASSERT (list_empty(&rs->rs_obd_list));
-        LASSERT (list_empty(&rs->rs_exp_list));
+        LASSERT (cfs_list_empty(&rs->rs_obd_list));
+        LASSERT (cfs_list_empty(&rs->rs_exp_list));
 
         exp = class_export_get (req->rq_export);
         obd = exp->exp_obd;
@@ -2162,25 +2171,25 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
         rs->rs_export    = exp;
         rs->rs_opc       = lustre_msg_get_opc(rs->rs_msg);
 
-        spin_lock(&exp->exp_uncommitted_replies_lock);
+        cfs_spin_lock(&exp->exp_uncommitted_replies_lock);
         CDEBUG(D_NET, "rs transno = "LPU64", last committed = "LPU64"\n",
                rs->rs_transno, exp->exp_last_committed);
         if (rs->rs_transno > exp->exp_last_committed) {
                 /* not committed already */
-                list_add_tail(&rs->rs_obd_list,
-                              &exp->exp_uncommitted_replies);
+                cfs_list_add_tail(&rs->rs_obd_list,
+                                  &exp->exp_uncommitted_replies);
         }
-        spin_unlock (&exp->exp_uncommitted_replies_lock);
+        cfs_spin_unlock (&exp->exp_uncommitted_replies_lock);
 
-        spin_lock(&exp->exp_lock);
-        list_add_tail(&rs->rs_exp_list, &exp->exp_outstanding_replies);
-        spin_unlock(&exp->exp_lock);
+        cfs_spin_lock(&exp->exp_lock);
+        cfs_list_add_tail(&rs->rs_exp_list, &exp->exp_outstanding_replies);
+        cfs_spin_unlock(&exp->exp_lock);
 
         netrc = target_send_reply_msg (req, rc, fail_id);
 
-        spin_lock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
 
-        atomic_inc(&svc->srv_n_difficult_replies);
+        cfs_atomic_inc(&svc->srv_n_difficult_replies);
 
         if (netrc != 0) {
                 /* error sending: reply is off the net.  Also we need +1
@@ -2190,22 +2199,22 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
                  * reply_out_callback leaves alone) */
                 rs->rs_on_net = 0;
                 ptlrpc_rs_addref(rs);
-                atomic_inc (&svc->srv_outstanding_replies);
+                cfs_atomic_inc (&svc->srv_outstanding_replies);
         }
 
-        spin_lock(&rs->rs_lock);
+        cfs_spin_lock(&rs->rs_lock);
         if (rs->rs_transno <= exp->exp_last_committed ||
             (!rs->rs_on_net && !rs->rs_no_ack) ||
-             list_empty(&rs->rs_exp_list) ||     /* completed already */
-             list_empty(&rs->rs_obd_list)) {
+             cfs_list_empty(&rs->rs_exp_list) ||     /* completed already */
+             cfs_list_empty(&rs->rs_obd_list)) {
                 CDEBUG(D_HA, "Schedule reply immediately\n");
                 ptlrpc_dispatch_difficult_reply(rs);
         } else {
-                list_add (&rs->rs_list, &svc->srv_active_replies);
+                cfs_list_add (&rs->rs_list, &svc->srv_active_replies);
                 rs->rs_scheduled = 0;           /* allow notifier to schedule */
         }
-        spin_unlock(&rs->rs_lock);
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&rs->rs_lock);
+        cfs_spin_unlock(&svc->srv_lock);
         EXIT;
 }
 
@@ -2300,9 +2309,9 @@ int target_handle_dqacq_callback(struct ptlrpc_request *req)
                 GOTO(out, rc);
         }
 
-        down_read(&obt->obt_rwsem);
+        cfs_down_read(&obt->obt_rwsem);
         if (qctxt->lqc_lqs_hash == NULL) {
-                up_read(&obt->obt_rwsem);
+                cfs_up_read(&obt->obt_rwsem);
                 /* quota_type has not been processed yet, return EAGAIN
                  * until we know whether or not quotas are supposed to
                  * be enabled */
@@ -2315,7 +2324,7 @@ int target_handle_dqacq_callback(struct ptlrpc_request *req)
         LASSERT(qctxt->lqc_handler);
         rc = qctxt->lqc_handler(master_obd, qdata,
                                 lustre_msg_get_opc(req->rq_reqmsg));
-        up_read(&obt->obt_rwsem);
+        cfs_up_read(&obt->obt_rwsem);
         if (rc && rc != -EDQUOT)
                 CDEBUG(rc == -EBUSY  ? D_QUOTA : D_ERROR,
                        "dqacq/dqrel failed! (rc:%d)\n", rc);
@@ -2484,15 +2493,15 @@ EXPORT_SYMBOL(ldlm_errno2error);
 #if LUSTRE_TRACKS_LOCK_EXP_REFS
 void ldlm_dump_export_locks(struct obd_export *exp)
 {
-        spin_lock(&exp->exp_locks_list_guard);
-        if (!list_empty(&exp->exp_locks_list)) {
+        cfs_spin_lock(&exp->exp_locks_list_guard);
+        if (!cfs_list_empty(&exp->exp_locks_list)) {
             struct ldlm_lock *lock;
 
             CERROR("dumping locks for export %p,"
                    "ignore if the unmount doesn't hang\n", exp);
-            list_for_each_entry(lock, &exp->exp_locks_list, l_exp_refs_link)
+            cfs_list_for_each_entry(lock, &exp->exp_locks_list, l_exp_refs_link)
                 ldlm_lock_dump(D_ERROR, lock, 0);
         }
-        spin_unlock(&exp->exp_locks_list_guard);
+        cfs_spin_unlock(&exp->exp_locks_list_guard);
 }
 #endif
index 096bf9b..a167aca 100644 (file)
@@ -132,7 +132,7 @@ void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg)
  */
 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
 {
-        atomic_inc(&lock->l_refc);
+        cfs_atomic_inc(&lock->l_refc);
         return lock;
 }
 
@@ -147,8 +147,8 @@ void ldlm_lock_put(struct ldlm_lock *lock)
         ENTRY;
 
         LASSERT(lock->l_resource != LP_POISON);
-        LASSERT(atomic_read(&lock->l_refc) > 0);
-        if (atomic_dec_and_test(&lock->l_refc)) {
+        LASSERT(cfs_atomic_read(&lock->l_refc) > 0);
+        if (cfs_atomic_dec_and_test(&lock->l_refc)) {
                 struct ldlm_resource *res;
 
                 LDLM_DEBUG(lock,
@@ -156,10 +156,10 @@ void ldlm_lock_put(struct ldlm_lock *lock)
 
                 res = lock->l_resource;
                 LASSERT(lock->l_destroyed);
-                LASSERT(list_empty(&lock->l_res_link));
-                LASSERT(list_empty(&lock->l_pending_chain));
+                LASSERT(cfs_list_empty(&lock->l_res_link));
+                LASSERT(cfs_list_empty(&lock->l_pending_chain));
 
-                atomic_dec(&res->lr_namespace->ns_locks);
+                cfs_atomic_dec(&res->lr_namespace->ns_locks);
                 lu_ref_del(&res->lr_reference, "lock", lock);
                 ldlm_resource_putref(res);
                 lock->l_resource = NULL;
@@ -183,10 +183,10 @@ void ldlm_lock_put(struct ldlm_lock *lock)
 int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
 {
         int rc = 0;
-        if (!list_empty(&lock->l_lru)) {
+        if (!cfs_list_empty(&lock->l_lru)) {
                 struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
                 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
-                list_del_init(&lock->l_lru);
+                cfs_list_del_init(&lock->l_lru);
                 LASSERT(ns->ns_nr_unused > 0);
                 ns->ns_nr_unused--;
                 rc = 1;
@@ -199,9 +199,9 @@ int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
         struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
         int rc;
         ENTRY;
-        spin_lock(&ns->ns_unused_lock);
+        cfs_spin_lock(&ns->ns_unused_lock);
         rc = ldlm_lock_remove_from_lru_nolock(lock);
-        spin_unlock(&ns->ns_unused_lock);
+        cfs_spin_unlock(&ns->ns_unused_lock);
         EXIT;
         return rc;
 }
@@ -210,9 +210,9 @@ void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
 {
         struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
         lock->l_last_used = cfs_time_current();
-        LASSERT(list_empty(&lock->l_lru));
+        LASSERT(cfs_list_empty(&lock->l_lru));
         LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
-        list_add_tail(&lock->l_lru, &ns->ns_unused_list);
+        cfs_list_add_tail(&lock->l_lru, &ns->ns_unused_list);
         LASSERT(ns->ns_nr_unused >= 0);
         ns->ns_nr_unused++;
 }
@@ -221,9 +221,9 @@ void ldlm_lock_add_to_lru(struct ldlm_lock *lock)
 {
         struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
         ENTRY;
-        spin_lock(&ns->ns_unused_lock);
+        cfs_spin_lock(&ns->ns_unused_lock);
         ldlm_lock_add_to_lru_nolock(lock);
-        spin_unlock(&ns->ns_unused_lock);
+        cfs_spin_unlock(&ns->ns_unused_lock);
         EXIT;
 }
 
@@ -231,12 +231,12 @@ void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
 {
         struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
         ENTRY;
-        spin_lock(&ns->ns_unused_lock);
-        if (!list_empty(&lock->l_lru)) {
+        cfs_spin_lock(&ns->ns_unused_lock);
+        if (!cfs_list_empty(&lock->l_lru)) {
                 ldlm_lock_remove_from_lru_nolock(lock);
                 ldlm_lock_add_to_lru_nolock(lock);
         }
-        spin_unlock(&ns->ns_unused_lock);
+        cfs_spin_unlock(&ns->ns_unused_lock);
         EXIT;
 }
 
@@ -255,21 +255,21 @@ int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
                 LBUG();
         }
 
-        if (!list_empty(&lock->l_res_link)) {
+        if (!cfs_list_empty(&lock->l_res_link)) {
                 LDLM_ERROR(lock, "lock still on resource");
                 ldlm_lock_dump(D_ERROR, lock, 0);
                 LBUG();
         }
 
         if (lock->l_destroyed) {
-                LASSERT(list_empty(&lock->l_lru));
+                LASSERT(cfs_list_empty(&lock->l_lru));
                 EXIT;
                 return 0;
         }
         lock->l_destroyed = 1;
 
         if (lock->l_export && lock->l_export->exp_lock_hash &&
-            !hlist_unhashed(&lock->l_exp_hash))
+            !cfs_hlist_unhashed(&lock->l_exp_hash))
                 cfs_hash_del(lock->l_export->exp_lock_hash,
                              &lock->l_remote_handle, &lock->l_exp_hash);
 
@@ -343,11 +343,11 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
         if (lock == NULL)
                 RETURN(NULL);
 
-        spin_lock_init(&lock->l_lock);
+        cfs_spin_lock_init(&lock->l_lock);
         lock->l_resource = ldlm_resource_getref(resource);
         lu_ref_add(&resource->lr_reference, "lock", lock);
 
-        atomic_set(&lock->l_refc, 2);
+        cfs_atomic_set(&lock->l_refc, 2);
         CFS_INIT_LIST_HEAD(&lock->l_res_link);
         CFS_INIT_LIST_HEAD(&lock->l_lru);
         CFS_INIT_LIST_HEAD(&lock->l_pending_chain);
@@ -360,12 +360,12 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
         CFS_INIT_LIST_HEAD(&lock->l_sl_policy);
         CFS_INIT_HLIST_NODE(&lock->l_exp_hash);
 
-        atomic_inc(&resource->lr_namespace->ns_locks);
+        cfs_atomic_inc(&resource->lr_namespace->ns_locks);
         CFS_INIT_LIST_HEAD(&lock->l_handle.h_link);
         class_handle_hash(&lock->l_handle, lock_handle_addref);
 
         CFS_INIT_LIST_HEAD(&lock->l_extents_list);
-        spin_lock_init(&lock->l_extents_list_lock);
+        cfs_spin_lock_init(&lock->l_extents_list_lock);
         CFS_INIT_LIST_HEAD(&lock->l_cache_locks_list);
         lu_ref_init(&lock->l_reference);
         lu_ref_add(&lock->l_reference, "hash", lock);
@@ -401,7 +401,7 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
         LASSERT(new_resid->name[0] != 0);
 
         /* This function assumes that the lock isn't on any lists */
-        LASSERT(list_empty(&lock->l_res_link));
+        LASSERT(cfs_list_empty(&lock->l_res_link));
 
         type = oldres->lr_type;
         unlock_res_and_lock(lock);
@@ -416,7 +416,7 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
          * lock->l_lock, and are taken in the memory address order to avoid
          * dead-locks.
          */
-        spin_lock(&lock->l_lock);
+        cfs_spin_lock(&lock->l_lock);
         oldres = lock->l_resource;
         if (oldres < newres) {
                 lock_res(oldres);
@@ -543,7 +543,7 @@ void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
 }
 
 void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
-                           struct list_head *work_list)
+                           cfs_list_t *work_list)
 {
         if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) {
                 LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
@@ -552,28 +552,28 @@ void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
                  * discard dirty data, rather than writing back. */
                 if (new->l_flags & LDLM_AST_DISCARD_DATA)
                         lock->l_flags |= LDLM_FL_DISCARD_DATA;
-                LASSERT(list_empty(&lock->l_bl_ast));
-                list_add(&lock->l_bl_ast, work_list);
+                LASSERT(cfs_list_empty(&lock->l_bl_ast));
+                cfs_list_add(&lock->l_bl_ast, work_list);
                 LDLM_LOCK_GET(lock);
                 LASSERT(lock->l_blocking_lock == NULL);
                 lock->l_blocking_lock = LDLM_LOCK_GET(new);
         }
 }
 
-void ldlm_add_cp_work_item(struct ldlm_lock *lock, struct list_head *work_list)
+void ldlm_add_cp_work_item(struct ldlm_lock *lock, cfs_list_t *work_list)
 {
         if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) {
                 lock->l_flags |= LDLM_FL_CP_REQD;
                 LDLM_DEBUG(lock, "lock granted; sending completion AST.");
-                LASSERT(list_empty(&lock->l_cp_ast));
-                list_add(&lock->l_cp_ast, work_list);
+                LASSERT(cfs_list_empty(&lock->l_cp_ast));
+                cfs_list_add(&lock->l_cp_ast, work_list);
                 LDLM_LOCK_GET(lock);
         }
 }
 
 /* must be called with lr_lock held */
 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
-                                struct list_head *work_list)
+                            cfs_list_t *work_list)
 {
         ENTRY;
         check_res_locked(lock->l_resource);
@@ -757,9 +757,9 @@ void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
 }
 
 struct sl_insert_point {
-        struct list_head *res_link;
-        struct list_head *mode_link;
-        struct list_head *policy_link;
+        cfs_list_t *res_link;
+        cfs_list_t *mode_link;
+        cfs_list_t *policy_link;
 };
 
 /*
@@ -776,19 +776,19 @@ struct sl_insert_point {
  * NOTE: called by
  *  - ldlm_grant_lock_with_skiplist
  */
-static void search_granted_lock(struct list_head *queue,
+static void search_granted_lock(cfs_list_t *queue,
                                 struct ldlm_lock *req,
                                 struct sl_insert_point *prev)
 {
-        struct list_head *tmp;
+        cfs_list_t *tmp;
         struct ldlm_lock *lock, *mode_end, *policy_end;
         ENTRY;
 
-        list_for_each(tmp, queue) {
-                lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+        cfs_list_for_each(tmp, queue) {
+                lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
 
-                mode_end = list_entry(lock->l_sl_mode.prev, struct ldlm_lock,
-                                      l_sl_mode);
+                mode_end = cfs_list_entry(lock->l_sl_mode.prev,
+                                          struct ldlm_lock, l_sl_mode);
 
                 if (lock->l_req_mode != req->l_req_mode) {
                         /* jump to last lock of mode group */
@@ -806,9 +806,10 @@ static void search_granted_lock(struct list_head *queue,
                         return;
                 } else if (lock->l_resource->lr_type == LDLM_IBITS) {
                         for (;;) {
-                                policy_end = list_entry(lock->l_sl_policy.prev,
-                                                        struct ldlm_lock,
-                                                        l_sl_policy);
+                                policy_end =
+                                        cfs_list_entry(lock->l_sl_policy.prev,
+                                                       struct ldlm_lock,
+                                                       l_sl_policy);
 
                                 if (lock->l_policy_data.l_inodebits.bits ==
                                     req->l_policy_data.l_inodebits.bits) {
@@ -830,8 +831,8 @@ static void search_granted_lock(struct list_head *queue,
 
                                 /* go to next policy group within mode group */
                                 tmp = policy_end->l_res_link.next;
-                                lock = list_entry(tmp, struct ldlm_lock,
-                                                  l_res_link);
+                                lock = cfs_list_entry(tmp, struct ldlm_lock,
+                                                      l_res_link);
                         }  /* loop over policy groups within the mode group */
 
                         /* insert point is last lock of the mode group,
@@ -873,13 +874,13 @@ static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
                 return;
         }
 
-        LASSERT(list_empty(&lock->l_res_link));
-        LASSERT(list_empty(&lock->l_sl_mode));
-        LASSERT(list_empty(&lock->l_sl_policy));
+        LASSERT(cfs_list_empty(&lock->l_res_link));
+        LASSERT(cfs_list_empty(&lock->l_sl_mode));
+        LASSERT(cfs_list_empty(&lock->l_sl_policy));
 
-        list_add(&lock->l_res_link, prev->res_link);
-        list_add(&lock->l_sl_mode, prev->mode_link);
-        list_add(&lock->l_sl_policy, prev->policy_link);
+        cfs_list_add(&lock->l_res_link, prev->res_link);
+        cfs_list_add(&lock->l_sl_mode, prev->mode_link);
+        cfs_list_add(&lock->l_sl_policy, prev->policy_link);
 
         EXIT;
 }
@@ -903,7 +904,7 @@ static void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock)
  *
  * must be called with lr_lock held
  */
-void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
+void ldlm_grant_lock(struct ldlm_lock *lock, cfs_list_t *work_list)
 {
         struct ldlm_resource *res = lock->l_resource;
         ENTRY;
@@ -930,19 +931,19 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
 
 /* returns a referenced lock or NULL.  See the flag descriptions below, in the
  * comment above ldlm_lock_match */
-static struct ldlm_lock *search_queue(struct list_head *queue,
+static struct ldlm_lock *search_queue(cfs_list_t *queue,
                                       ldlm_mode_t *mode,
                                       ldlm_policy_data_t *policy,
                                       struct ldlm_lock *old_lock,
                                       int flags, int unref)
 {
         struct ldlm_lock *lock;
-        struct list_head *tmp;
+        cfs_list_t       *tmp;
 
-        list_for_each(tmp, queue) {
+        cfs_list_for_each(tmp, queue) {
                 ldlm_mode_t match;
 
-                lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+                lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
 
                 if (lock == old_lock)
                         break;
@@ -1323,10 +1324,10 @@ out:
 }
 
 /* Must be called with namespace taken: queue is waiting or converting. */
-int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue,
-                         struct list_head *work_list)
+int ldlm_reprocess_queue(struct ldlm_resource *res, cfs_list_t *queue,
+                         cfs_list_t *work_list)
 {
-        struct list_head *tmp, *pos;
+        cfs_list_t *tmp, *pos;
         ldlm_processing_policy policy;
         int flags;
         int rc = LDLM_ITER_CONTINUE;
@@ -1338,9 +1339,9 @@ int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue,
         policy = ldlm_processing_policy_table[res->lr_type];
         LASSERT(policy);
 
-        list_for_each_safe(tmp, pos, queue) {
+        cfs_list_for_each_safe(tmp, pos, queue) {
                 struct ldlm_lock *pending;
-                pending = list_entry(tmp, struct ldlm_lock, l_res_link);
+                pending = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
 
                 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
 
@@ -1374,15 +1375,16 @@ ldlm_send_and_maybe_create_set(struct ldlm_cb_set_arg *arg, int do_create)
 }
 
 static int
-ldlm_work_bl_ast_lock(struct list_head *tmp, struct ldlm_cb_set_arg *arg)
+ldlm_work_bl_ast_lock(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg)
 {
         struct ldlm_lock_desc d;
-        struct ldlm_lock *lock = list_entry(tmp, struct ldlm_lock, l_bl_ast);
+        struct ldlm_lock *lock = cfs_list_entry(tmp, struct ldlm_lock,
+                                                l_bl_ast);
         ENTRY;
 
         /* nobody should touch l_bl_ast */
         lock_res_and_lock(lock);
-        list_del_init(&lock->l_bl_ast);
+        cfs_list_del_init(&lock->l_bl_ast);
 
         LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
         LASSERT(lock->l_bl_ast_run == 0);
@@ -1402,9 +1404,9 @@ ldlm_work_bl_ast_lock(struct list_head *tmp, struct ldlm_cb_set_arg *arg)
 }
 
 static int
-ldlm_work_cp_ast_lock(struct list_head *tmp, struct ldlm_cb_set_arg *arg)
+ldlm_work_cp_ast_lock(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg)
 {
-        struct ldlm_lock *lock = list_entry(tmp, struct ldlm_lock, l_cp_ast);
+        struct ldlm_lock *lock = cfs_list_entry(tmp, struct ldlm_lock, l_cp_ast);
         ldlm_completion_callback completion_callback;
         int rc = 0;
         ENTRY;
@@ -1422,7 +1424,7 @@ ldlm_work_cp_ast_lock(struct list_head *tmp, struct ldlm_cb_set_arg *arg)
 
         /* nobody should touch l_cp_ast */
         lock_res_and_lock(lock);
-        list_del_init(&lock->l_cp_ast);
+        cfs_list_del_init(&lock->l_cp_ast);
         LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
         /* save l_completion_ast since it can be changed by
          * mds_intent_policy(), see bug 14225 */
@@ -1440,13 +1442,14 @@ ldlm_work_cp_ast_lock(struct list_head *tmp, struct ldlm_cb_set_arg *arg)
 }
 
 static int
-ldlm_work_revoke_ast_lock(struct list_head *tmp, struct ldlm_cb_set_arg *arg)
+ldlm_work_revoke_ast_lock(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg)
 {
         struct ldlm_lock_desc desc;
-        struct ldlm_lock *lock = list_entry(tmp, struct ldlm_lock, l_rk_ast);
+        struct ldlm_lock *lock = cfs_list_entry(tmp, struct ldlm_lock,
+                                                l_rk_ast);
         ENTRY;
 
-        list_del_init(&lock->l_rk_ast);
+        cfs_list_del_init(&lock->l_rk_ast);
 
         /* the desc just pretend to exclusive */
         ldlm_lock2desc(lock, &desc);
@@ -1459,21 +1462,21 @@ ldlm_work_revoke_ast_lock(struct list_head *tmp, struct ldlm_cb_set_arg *arg)
         RETURN(1);
 }
 
-int ldlm_run_ast_work(struct list_head *rpc_list, ldlm_desc_ast_t ast_type)
+int ldlm_run_ast_work(cfs_list_t *rpc_list, ldlm_desc_ast_t ast_type)
 {
         struct ldlm_cb_set_arg arg;
-        struct list_head *tmp, *pos;
-        int (*work_ast_lock)(struct list_head *tmp,struct ldlm_cb_set_arg *arg);
+        cfs_list_t *tmp, *pos;
+        int (*work_ast_lock)(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg);
         int ast_count;
         ENTRY;
 
-        if (list_empty(rpc_list))
+        if (cfs_list_empty(rpc_list))
                 RETURN(0);
 
         arg.set = ptlrpc_prep_set();
         if (NULL == arg.set)
                 RETURN(-ERESTART);
-        atomic_set(&arg.restart, 0);
+        cfs_atomic_set(&arg.restart, 0);
         switch (ast_type) {
         case LDLM_WORK_BL_AST:
                 arg.type = LDLM_BL_CALLBACK;
@@ -1492,7 +1495,7 @@ int ldlm_run_ast_work(struct list_head *rpc_list, ldlm_desc_ast_t ast_type)
         }
 
         ast_count = 0;
-        list_for_each_safe(tmp, pos, rpc_list) {
+        cfs_list_for_each_safe(tmp, pos, rpc_list) {
                 ast_count += work_ast_lock(tmp, &arg);
 
                 /* Send the request set if it exceeds the PARALLEL_AST_LIMIT,
@@ -1513,7 +1516,7 @@ int ldlm_run_ast_work(struct list_head *rpc_list, ldlm_desc_ast_t ast_type)
                  * write memory leaking. */
                 ptlrpc_set_destroy(arg.set);
 
-        RETURN(atomic_read(&arg.restart) ? -ERESTART : 0);
+        RETURN(cfs_atomic_read(&arg.restart) ? -ERESTART : 0);
 }
 
 static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
@@ -1524,28 +1527,29 @@ static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
 
 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
 {
-        struct list_head *tmp;
+        cfs_list_t *tmp;
         int i, rc;
 
         if (ns == NULL)
                 return;
 
         ENTRY;
-        spin_lock(&ns->ns_hash_lock);
+        cfs_spin_lock(&ns->ns_hash_lock);
         for (i = 0; i < RES_HASH_SIZE; i++) {
                 tmp = ns->ns_hash[i].next;
                 while (tmp != &(ns->ns_hash[i])) {
                         struct ldlm_resource *res =
-                                list_entry(tmp, struct ldlm_resource, lr_hash);
+                                cfs_list_entry(tmp, struct ldlm_resource,
+                                               lr_hash);
 
                         ldlm_resource_getref(res);
-                        spin_unlock(&ns->ns_hash_lock);
+                        cfs_spin_unlock(&ns->ns_hash_lock);
                         LDLM_RESOURCE_ADDREF(res);
 
                         rc = reprocess_one_queue(res, NULL);
 
                         LDLM_RESOURCE_DELREF(res);
-                        spin_lock(&ns->ns_hash_lock);
+                        cfs_spin_lock(&ns->ns_hash_lock);
                         tmp = tmp->next;
                         ldlm_resource_putref_locked(res);
 
@@ -1554,7 +1558,7 @@ void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
                 }
         }
  out:
-        spin_unlock(&ns->ns_hash_lock);
+        cfs_spin_unlock(&ns->ns_hash_lock);
         EXIT;
 }
 
@@ -1579,7 +1583,7 @@ void ldlm_reprocess_all(struct ldlm_resource *res)
 
         rc = ldlm_run_ast_work(&rpc_list, LDLM_WORK_CP_AST);
         if (rc == -ERESTART) {
-                LASSERT(list_empty(&rpc_list));
+                LASSERT(cfs_list_empty(&rpc_list));
                 goto restart;
         }
         EXIT;
@@ -1609,8 +1613,8 @@ void ldlm_unlink_lock_skiplist(struct ldlm_lock *req)
             req->l_resource->lr_type != LDLM_IBITS)
                 return;
 
-        list_del_init(&req->l_sl_policy);
-        list_del_init(&req->l_sl_mode);
+        cfs_list_del_init(&req->l_sl_policy);
+        cfs_list_del_init(&req->l_sl_mode);
 }
 
 void ldlm_lock_cancel(struct ldlm_lock *lock)
@@ -1840,7 +1844,7 @@ void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos)
         }
 
         CDEBUG(level," -- Lock dump: %p/"LPX64" (rc: %d) (pos: %d) (pid: %d)\n",
-               lock, lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+               lock, lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
                pos, lock->l_pid);
         if (lock->l_conn_export != NULL)
                 obd = lock->l_conn_export->exp_obd;
@@ -1864,7 +1868,7 @@ void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos)
         CDEBUG(level, "  Req mode: %s, grant mode: %s, rc: %u, read: %d, "
                "write: %d flags: "LPX64"\n", ldlm_lockname[lock->l_req_mode],
                ldlm_lockname[lock->l_granted_mode],
-               atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers,
+               cfs_atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers,
                lock->l_flags);
         if (lock->l_resource->lr_type == LDLM_EXTENT)
                 CDEBUG(level, "  Extent: "LPU64" -> "LPU64
@@ -1913,13 +1917,13 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, __u32 level,
                        " ns: \?\? lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
                        "res: \?\? rrc=\?\? type: \?\?\? flags: "LPX64" remote: "
                        LPX64" expref: %d pid: %u timeout: %lu\n", lock,
-                       lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+                       lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
                        lock->l_readers, lock->l_writers,
                        ldlm_lockname[lock->l_granted_mode],
                        ldlm_lockname[lock->l_req_mode],
                        lock->l_flags, lock->l_remote_handle.cookie,
                        lock->l_export ?
-                       atomic_read(&lock->l_export->exp_refcount) : -99,
+                       cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
                        lock->l_pid, lock->l_callback_timeout);
                 va_end(args);
                 return;
@@ -1934,20 +1938,20 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, __u32 level,
                        "] (req "LPU64"->"LPU64") flags: "LPX64" remote: "LPX64
                        " expref: %d pid: %u timeout %lu\n",
                        lock->l_resource->lr_namespace->ns_name, lock,
-                       lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+                       lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
                        lock->l_readers, lock->l_writers,
                        ldlm_lockname[lock->l_granted_mode],
                        ldlm_lockname[lock->l_req_mode],
                        lock->l_resource->lr_name.name[0],
                        lock->l_resource->lr_name.name[1],
-                       atomic_read(&lock->l_resource->lr_refcount),
+                       cfs_atomic_read(&lock->l_resource->lr_refcount),
                        ldlm_typename[lock->l_resource->lr_type],
                        lock->l_policy_data.l_extent.start,
                        lock->l_policy_data.l_extent.end,
                        lock->l_req_extent.start, lock->l_req_extent.end,
                        lock->l_flags, lock->l_remote_handle.cookie,
                        lock->l_export ?
-                       atomic_read(&lock->l_export->exp_refcount) : -99,
+                       cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
                        lock->l_pid, lock->l_callback_timeout);
                 break;
 
@@ -1959,20 +1963,20 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, __u32 level,
                        "["LPU64"->"LPU64"] flags: "LPX64" remote: "LPX64
                        " expref: %d pid: %u timeout: %lu\n",
                        lock->l_resource->lr_namespace->ns_name, lock,
-                       lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+                       lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
                        lock->l_readers, lock->l_writers,
                        ldlm_lockname[lock->l_granted_mode],
                        ldlm_lockname[lock->l_req_mode],
                        lock->l_resource->lr_name.name[0],
                        lock->l_resource->lr_name.name[1],
-                       atomic_read(&lock->l_resource->lr_refcount),
+                       cfs_atomic_read(&lock->l_resource->lr_refcount),
                        ldlm_typename[lock->l_resource->lr_type],
                        lock->l_policy_data.l_flock.pid,
                        lock->l_policy_data.l_flock.start,
                        lock->l_policy_data.l_flock.end,
                        lock->l_flags, lock->l_remote_handle.cookie,
                        lock->l_export ?
-                       atomic_read(&lock->l_export->exp_refcount) : -99,
+                       cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
                        lock->l_pid, lock->l_callback_timeout);
                 break;
 
@@ -1985,18 +1989,18 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, __u32 level,
                        "pid: %u timeout: %lu\n",
                        lock->l_resource->lr_namespace->ns_name,
                        lock, lock->l_handle.h_cookie,
-                       atomic_read (&lock->l_refc),
+                       cfs_atomic_read (&lock->l_refc),
                        lock->l_readers, lock->l_writers,
                        ldlm_lockname[lock->l_granted_mode],
                        ldlm_lockname[lock->l_req_mode],
                        lock->l_resource->lr_name.name[0],
                        lock->l_resource->lr_name.name[1],
                        lock->l_policy_data.l_inodebits.bits,
-                       atomic_read(&lock->l_resource->lr_refcount),
+                       cfs_atomic_read(&lock->l_resource->lr_refcount),
                        ldlm_typename[lock->l_resource->lr_type],
                        lock->l_flags, lock->l_remote_handle.cookie,
                        lock->l_export ?
-                       atomic_read(&lock->l_export->exp_refcount) : -99,
+                       cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
                        lock->l_pid, lock->l_callback_timeout);
                 break;
 
@@ -2008,17 +2012,17 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, __u32 level,
                        "remote: "LPX64" expref: %d pid: %u timeout %lu\n",
                        lock->l_resource->lr_namespace->ns_name,
                        lock, lock->l_handle.h_cookie,
-                       atomic_read (&lock->l_refc),
+                       cfs_atomic_read (&lock->l_refc),
                        lock->l_readers, lock->l_writers,
                        ldlm_lockname[lock->l_granted_mode],
                        ldlm_lockname[lock->l_req_mode],
                        lock->l_resource->lr_name.name[0],
                        lock->l_resource->lr_name.name[1],
-                       atomic_read(&lock->l_resource->lr_refcount),
+                       cfs_atomic_read(&lock->l_resource->lr_refcount),
                        ldlm_typename[lock->l_resource->lr_type],
                        lock->l_flags, lock->l_remote_handle.cookie,
                        lock->l_export ?
-                       atomic_read(&lock->l_export->exp_refcount) : -99,
+                       cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
                        lock->l_pid, lock->l_callback_timeout);
                 break;
         }
index 7ff6bd8..78375dd 100644 (file)
@@ -63,7 +63,7 @@ CFS_MODULE_PARM(ldlm_num_threads, "i", int, 0444,
 
 extern cfs_mem_cache_t *ldlm_resource_slab;
 extern cfs_mem_cache_t *ldlm_lock_slab;
-static struct semaphore ldlm_ref_sem;
+static cfs_semaphore_t  ldlm_ref_sem;
 static int ldlm_refcount;
 
 /* LDLM state */
@@ -86,15 +86,15 @@ static inline unsigned int ldlm_get_rq_timeout(void)
 
 #ifdef __KERNEL__
 /* w_l_spinlock protects both waiting_locks_list and expired_lock_thread */
-static spinlock_t waiting_locks_spinlock;   /* BH lock (timer) */
-static struct list_head waiting_locks_list;
+static cfs_spinlock_t waiting_locks_spinlock;   /* BH lock (timer) */
+static cfs_list_t waiting_locks_list;
 static cfs_timer_t waiting_locks_timer;
 
 static struct expired_lock_thread {
         cfs_waitq_t               elt_waitq;
         int                       elt_state;
         int                       elt_dump;
-        struct list_head          elt_expired_locks;
+        cfs_list_t                elt_expired_locks;
 } expired_lock_thread;
 #endif
 
@@ -103,35 +103,35 @@ static struct expired_lock_thread {
 #define ELT_TERMINATE 2
 
 struct ldlm_bl_pool {
-        spinlock_t              blp_lock;
+        cfs_spinlock_t          blp_lock;
 
         /*
          * blp_prio_list is used for callbacks that should be handled
          * as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
          * see bug 13843
          */
-        struct list_head        blp_prio_list;
+        cfs_list_t              blp_prio_list;
 
         /*
          * blp_list is used for all other callbacks which are likely
          * to take longer to process.
          */
-        struct list_head        blp_list;
+        cfs_list_t              blp_list;
 
         cfs_waitq_t             blp_waitq;
-        struct completion       blp_comp;
-        atomic_t                blp_num_threads;
-        atomic_t                blp_busy_threads;
+        cfs_completion_t        blp_comp;
+        cfs_atomic_t            blp_num_threads;
+        cfs_atomic_t            blp_busy_threads;
         int                     blp_min_threads;
         int                     blp_max_threads;
 };
 
 struct ldlm_bl_work_item {
-        struct list_head        blwi_entry;
-        struct ldlm_namespace   *blwi_ns;
+        cfs_list_t              blwi_entry;
+        struct ldlm_namespace  *blwi_ns;
         struct ldlm_lock_desc   blwi_ld;
-        struct ldlm_lock        *blwi_lock;
-        struct list_head        blwi_head;
+        struct ldlm_lock       *blwi_lock;
+        cfs_list_t              blwi_head;
         int                     blwi_count;
 };
 
@@ -142,16 +142,16 @@ static inline int have_expired_locks(void)
         int need_to_run;
 
         ENTRY;
-        spin_lock_bh(&waiting_locks_spinlock);
-        need_to_run = !list_empty(&expired_lock_thread.elt_expired_locks);
-        spin_unlock_bh(&waiting_locks_spinlock);
+        cfs_spin_lock_bh(&waiting_locks_spinlock);
+        need_to_run = !cfs_list_empty(&expired_lock_thread.elt_expired_locks);
+        cfs_spin_unlock_bh(&waiting_locks_spinlock);
 
         RETURN(need_to_run);
 }
 
 static int expired_lock_main(void *arg)
 {
-        struct list_head *expired = &expired_lock_thread.elt_expired_locks;
+        cfs_list_t *expired = &expired_lock_thread.elt_expired_locks;
         struct l_wait_info lwi = { 0 };
         int do_dump;
 
@@ -167,9 +167,9 @@ static int expired_lock_main(void *arg)
                              expired_lock_thread.elt_state == ELT_TERMINATE,
                              &lwi);
 
-                spin_lock_bh(&waiting_locks_spinlock);
+                cfs_spin_lock_bh(&waiting_locks_spinlock);
                 if (expired_lock_thread.elt_dump) {
-                        spin_unlock_bh(&waiting_locks_spinlock);
+                        cfs_spin_unlock_bh(&waiting_locks_spinlock);
 
                         /* from waiting_locks_callback, but not in timer */
                         libcfs_debug_dumplog();
@@ -177,25 +177,25 @@ static int expired_lock_main(void *arg)
                                                 "waiting_locks_callback",
                                                 expired_lock_thread.elt_dump);
 
-                        spin_lock_bh(&waiting_locks_spinlock);
+                        cfs_spin_lock_bh(&waiting_locks_spinlock);
                         expired_lock_thread.elt_dump = 0;
                 }
 
                 do_dump = 0;
 
-                while (!list_empty(expired)) {
+                while (!cfs_list_empty(expired)) {
                         struct obd_export *export;
                         struct ldlm_lock *lock;
 
-                        lock = list_entry(expired->next, struct ldlm_lock,
+                        lock = cfs_list_entry(expired->next, struct ldlm_lock,
                                           l_pending_chain);
                         if ((void *)lock < LP_POISON + CFS_PAGE_SIZE &&
                             (void *)lock >= LP_POISON) {
-                                spin_unlock_bh(&waiting_locks_spinlock);
+                                cfs_spin_unlock_bh(&waiting_locks_spinlock);
                                 CERROR("free lock on elt list %p\n", lock);
                                 LBUG();
                         }
-                        list_del_init(&lock->l_pending_chain);
+                        cfs_list_del_init(&lock->l_pending_chain);
                         if ((void *)lock->l_export < LP_POISON + CFS_PAGE_SIZE &&
                             (void *)lock->l_export >= LP_POISON) {
                                 CERROR("lock with free export on elt list %p\n",
@@ -209,7 +209,7 @@ static int expired_lock_main(void *arg)
                                 continue;
                         }
                         export = class_export_lock_get(lock->l_export, lock);
-                        spin_unlock_bh(&waiting_locks_spinlock);
+                        cfs_spin_unlock_bh(&waiting_locks_spinlock);
 
                         do_dump++;
                         class_fail_export(export);
@@ -218,10 +218,10 @@ static int expired_lock_main(void *arg)
                         /* release extra ref grabbed by ldlm_add_waiting_lock()
                          * or ldlm_failed_ast() */
                         LDLM_LOCK_RELEASE(lock);
-                        spin_lock_bh(&waiting_locks_spinlock);
+
+                        cfs_spin_lock_bh(&waiting_locks_spinlock);
                 }
-                spin_unlock_bh(&waiting_locks_spinlock);
+                cfs_spin_unlock_bh(&waiting_locks_spinlock);
 
                 if (do_dump && obd_dump_on_eviction) {
                         CERROR("dump the log upon eviction\n");
@@ -252,15 +252,16 @@ static int ldlm_lock_busy(struct ldlm_lock *lock)
         if (lock->l_export == NULL)
                 return 0;
 
-        spin_lock(&lock->l_export->exp_lock);
-        list_for_each_entry(req, &lock->l_export->exp_queued_rpc, rq_exp_list) {
+        cfs_spin_lock(&lock->l_export->exp_lock);
+        cfs_list_for_each_entry(req, &lock->l_export->exp_queued_rpc,
+                                rq_exp_list) {
                 if (req->rq_ops->hpreq_lock_match) {
                         match = req->rq_ops->hpreq_lock_match(req, lock);
                         if (match)
                                 break;
                 }
         }
-        spin_unlock(&lock->l_export->exp_lock);
+        cfs_spin_unlock(&lock->l_export->exp_lock);
         RETURN(match);
 }
 
@@ -270,11 +271,12 @@ static void waiting_locks_callback(unsigned long unused)
         struct ldlm_lock *lock, *last = NULL;
 
 repeat:
-        spin_lock_bh(&waiting_locks_spinlock);
-        while (!list_empty(&waiting_locks_list)) {
-                lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
-                                  l_pending_chain);
-                if (cfs_time_after(lock->l_callback_timeout, cfs_time_current()) ||
+        cfs_spin_lock_bh(&waiting_locks_spinlock);
+        while (!cfs_list_empty(&waiting_locks_list)) {
+                lock = cfs_list_entry(waiting_locks_list.next, struct ldlm_lock,
+                                      l_pending_chain);
+                if (cfs_time_after(lock->l_callback_timeout,
+                                   cfs_time_current()) ||
                     (lock->l_req_mode == LCK_GROUP))
                         break;
 
@@ -289,8 +291,8 @@ repeat:
                                    lock->l_export->exp_connection->c_remote_uuid.uuid,
                                    libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
 
-                        list_del_init(&lock->l_pending_chain);
-                        spin_unlock_bh(&waiting_locks_spinlock);
+                        cfs_list_del_init(&lock->l_pending_chain);
+                        cfs_spin_unlock_bh(&waiting_locks_spinlock);
                         ldlm_add_waiting_lock(lock);
                         goto repeat;
                 }
@@ -305,8 +307,8 @@ repeat:
                                    lock->l_export->exp_connection->c_remote_uuid.uuid,
                                    libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
 
-                        list_del_init(&lock->l_pending_chain);
-                        spin_unlock_bh(&waiting_locks_spinlock);
+                        cfs_list_del_init(&lock->l_pending_chain);
+                        cfs_spin_unlock_bh(&waiting_locks_spinlock);
                         ldlm_add_waiting_lock(lock);
                         goto repeat;
                 }
@@ -321,11 +323,11 @@ repeat:
 
                         LDLM_LOCK_GET(lock);
 
-                        spin_unlock_bh(&waiting_locks_spinlock);
+                        cfs_spin_unlock_bh(&waiting_locks_spinlock);
                         LDLM_DEBUG(lock, "prolong the busy lock");
                         ldlm_refresh_waiting_lock(lock,
                                                   ldlm_get_enq_timeout(lock));
-                        spin_lock_bh(&waiting_locks_spinlock);
+                        cfs_spin_lock_bh(&waiting_locks_spinlock);
 
                         if (!cont) {
                                 LDLM_LOCK_RELEASE(lock);
@@ -347,12 +349,12 @@ repeat:
                 /* no needs to take an extra ref on the lock since it was in
                  * the waiting_locks_list and ldlm_add_waiting_lock()
                  * already grabbed a ref */
-                list_del(&lock->l_pending_chain);
-                list_add(&lock->l_pending_chain,
-                         &expired_lock_thread.elt_expired_locks);
+                cfs_list_del(&lock->l_pending_chain);
+                cfs_list_add(&lock->l_pending_chain,
+                             &expired_lock_thread.elt_expired_locks);
         }
 
-        if (!list_empty(&expired_lock_thread.elt_expired_locks)) {
+        if (!cfs_list_empty(&expired_lock_thread.elt_expired_locks)) {
                 if (obd_dump_on_timeout)
                         expired_lock_thread.elt_dump = __LINE__;
 
@@ -363,14 +365,14 @@ repeat:
          * Make sure the timer will fire again if we have any locks
          * left.
          */
-        if (!list_empty(&waiting_locks_list)) {
+        if (!cfs_list_empty(&waiting_locks_list)) {
                 cfs_time_t timeout_rounded;
-                lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
-                                  l_pending_chain);
+                lock = cfs_list_entry(waiting_locks_list.next, struct ldlm_lock,
+                                      l_pending_chain);
                 timeout_rounded = (cfs_time_t)round_timeout(lock->l_callback_timeout);
                 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
         }
-        spin_unlock_bh(&waiting_locks_spinlock);
+        cfs_spin_unlock_bh(&waiting_locks_spinlock);
 }
 
 /*
@@ -388,7 +390,7 @@ static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, int seconds)
         cfs_time_t timeout;
         cfs_time_t timeout_rounded;
 
-        if (!list_empty(&lock->l_pending_chain))
+        if (!cfs_list_empty(&lock->l_pending_chain))
                 return 0;
 
         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT) ||
@@ -408,7 +410,8 @@ static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, int seconds)
         }
         /* if the new lock has a shorter timeout than something earlier on
            the list, we'll wait the longer amount of time; no big deal. */
-        list_add_tail(&lock->l_pending_chain, &waiting_locks_list); /* FIFO */
+        /* FIFO */
+        cfs_list_add_tail(&lock->l_pending_chain, &waiting_locks_list);
         return 1;
 }
 
@@ -419,10 +422,10 @@ static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
 
         LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
 
-        spin_lock_bh(&waiting_locks_spinlock);
+        cfs_spin_lock_bh(&waiting_locks_spinlock);
         if (lock->l_destroyed) {
                 static cfs_time_t next;
-                spin_unlock_bh(&waiting_locks_spinlock);
+                cfs_spin_unlock_bh(&waiting_locks_spinlock);
                 LDLM_ERROR(lock, "not waiting on destroyed lock (bug 5653)");
                 if (cfs_time_after(cfs_time_current(), next)) {
                         next = cfs_time_shift(14400);
@@ -436,7 +439,7 @@ static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
                 /* grab ref on the lock if it has been added to the
                  * waiting list */
                 LDLM_LOCK_GET(lock);
-        spin_unlock_bh(&waiting_locks_spinlock);
+        cfs_spin_unlock_bh(&waiting_locks_spinlock);
 
         LDLM_DEBUG(lock, "%sadding to wait list(timeout: %d, AT: %s)",
                    ret == 0 ? "not re-" : "", timeout,
@@ -455,9 +458,9 @@ static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
  */
 static int __ldlm_del_waiting_lock(struct ldlm_lock *lock)
 {
-        struct list_head *list_next;
+        cfs_list_t *list_next;
 
-        if (list_empty(&lock->l_pending_chain))
+        if (cfs_list_empty(&lock->l_pending_chain))
                 return 0;
 
         list_next = lock->l_pending_chain.next;
@@ -468,13 +471,13 @@ static int __ldlm_del_waiting_lock(struct ldlm_lock *lock)
                         cfs_timer_disarm(&waiting_locks_timer);
                 } else {
                         struct ldlm_lock *next;
-                        next = list_entry(list_next, struct ldlm_lock,
-                                          l_pending_chain);
+                        next = cfs_list_entry(list_next, struct ldlm_lock,
+                                              l_pending_chain);
                         cfs_timer_arm(&waiting_locks_timer,
                                       round_timeout(next->l_callback_timeout));
                 }
         }
-        list_del_init(&lock->l_pending_chain);
+        cfs_list_del_init(&lock->l_pending_chain);
 
         return 1;
 }
@@ -489,9 +492,9 @@ int ldlm_del_waiting_lock(struct ldlm_lock *lock)
                 return 0;
         }
 
-        spin_lock_bh(&waiting_locks_spinlock);
+        cfs_spin_lock_bh(&waiting_locks_spinlock);
         ret = __ldlm_del_waiting_lock(lock);
-        spin_unlock_bh(&waiting_locks_spinlock);
+        cfs_spin_unlock_bh(&waiting_locks_spinlock);
         if (ret)
                 /* release lock ref if it has indeed been removed
                  * from a list */
@@ -514,10 +517,10 @@ int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
                 return 0;
         }
 
-        spin_lock_bh(&waiting_locks_spinlock);
+        cfs_spin_lock_bh(&waiting_locks_spinlock);
 
-        if (list_empty(&lock->l_pending_chain)) {
-                spin_unlock_bh(&waiting_locks_spinlock);
+        if (cfs_list_empty(&lock->l_pending_chain)) {
+                cfs_spin_unlock_bh(&waiting_locks_spinlock);
                 LDLM_DEBUG(lock, "wasn't waiting");
                 return 0;
         }
@@ -526,7 +529,7 @@ int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
          * release/take a lock reference */
         __ldlm_del_waiting_lock(lock);
         __ldlm_add_waiting_lock(lock, timeout);
-        spin_unlock_bh(&waiting_locks_spinlock);
+        cfs_spin_unlock_bh(&waiting_locks_spinlock);
 
         LDLM_DEBUG(lock, "refreshed");
         return 1;
@@ -561,14 +564,15 @@ static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,
         if (obd_dump_on_timeout)
                 libcfs_debug_dumplog();
 #ifdef __KERNEL__
-        spin_lock_bh(&waiting_locks_spinlock);
+        cfs_spin_lock_bh(&waiting_locks_spinlock);
         if (__ldlm_del_waiting_lock(lock) == 0)
                 /* the lock was not in any list, grab an extra ref before adding
                  * the lock to the expired list */
                 LDLM_LOCK_GET(lock);
-        list_add(&lock->l_pending_chain, &expired_lock_thread.elt_expired_locks);
+        cfs_list_add(&lock->l_pending_chain,
+                     &expired_lock_thread.elt_expired_locks);
         cfs_waitq_signal(&expired_lock_thread.elt_waitq);
-        spin_unlock_bh(&waiting_locks_spinlock);
+        cfs_spin_unlock_bh(&waiting_locks_spinlock);
 #else
         class_fail_export(lock->l_export);
 #endif
@@ -647,7 +651,7 @@ static int ldlm_cb_interpret(const struct lu_env *env,
         LDLM_LOCK_RELEASE(lock);
 
         if (rc == -ERESTART)
-                atomic_set(&arg->restart, 1);
+                cfs_atomic_set(&arg->restart, 1);
 
         RETURN(0);
 }
@@ -666,7 +670,7 @@ static inline int ldlm_bl_and_cp_ast_fini(struct ptlrpc_request *req,
                 if (rc == 0)
                         /* If we cancelled the lock, we need to restart
                          * ldlm_reprocess_queue */
-                        atomic_set(&arg->restart, 1);
+                        cfs_atomic_set(&arg->restart, 1);
         } else {
                 LDLM_LOCK_GET(lock);
                 ptlrpc_set_add_req(arg->set, req);
@@ -689,13 +693,14 @@ static void ldlm_lock_reorder_req(struct ldlm_lock *lock)
                 RETURN_EXIT;
         }
 
-        spin_lock(&lock->l_export->exp_lock);
-        list_for_each_entry(req, &lock->l_export->exp_queued_rpc, rq_exp_list) {
+        cfs_spin_lock(&lock->l_export->exp_lock);
+        cfs_list_for_each_entry(req, &lock->l_export->exp_queued_rpc,
+                                rq_exp_list) {
                 if (!req->rq_hp && req->rq_ops->hpreq_lock_match &&
                     req->rq_ops->hpreq_lock_match(req, lock))
                         ptlrpc_hpreq_reorder(req);
         }
-        spin_unlock(&lock->l_export->exp_lock);
+        cfs_spin_unlock(&lock->l_export->exp_lock);
         EXIT;
 }
 
@@ -1483,7 +1488,8 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
                 int to = cfs_time_seconds(1);
                 while (to > 0) {
-                        cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE, to);
+                        cfs_schedule_timeout_and_set_state(
+                                CFS_TASK_INTERRUPTIBLE, to);
                         if (lock->l_granted_mode == lock->l_req_mode ||
                             lock->l_destroyed)
                                 break;
@@ -1618,7 +1624,7 @@ static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
 #ifdef __KERNEL__
 static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
                              struct ldlm_lock_desc *ld, struct ldlm_lock *lock,
-                             struct list_head *cancels, int count)
+                             cfs_list_t *cancels, int count)
 {
         struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
         struct ldlm_bl_work_item *blwi;
@@ -1635,22 +1641,22 @@ static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
         if (ld != NULL)
                 blwi->blwi_ld = *ld;
         if (count) {
-                list_add(&blwi->blwi_head, cancels);
-                list_del_init(cancels);
+                cfs_list_add(&blwi->blwi_head, cancels);
+                cfs_list_del_init(cancels);
                 blwi->blwi_count = count;
         } else {
                 blwi->blwi_lock = lock;
         }
-        spin_lock(&blp->blp_lock);
+        cfs_spin_lock(&blp->blp_lock);
         if (lock && lock->l_flags & LDLM_FL_DISCARD_DATA) {
                 /* add LDLM_FL_DISCARD_DATA requests to the priority list */
-                list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
+                cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
         } else {
                 /* other blocking callbacks are added to the regular list */
-                list_add_tail(&blwi->blwi_entry, &blp->blp_list);
+                cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_list);
         }
         cfs_waitq_signal(&blp->blp_waitq);
-        spin_unlock(&blp->blp_lock);
+        cfs_spin_unlock(&blp->blp_lock);
 
         RETURN(0);
 }
@@ -1667,7 +1673,7 @@ int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
 }
 
 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
-                           struct list_head *cancels, int count)
+                           cfs_list_t *cancels, int count)
 {
 #ifdef __KERNEL__
         RETURN(ldlm_bl_to_thread(ns, ld, NULL, cancels, count));
@@ -1982,7 +1988,7 @@ static int ldlm_cancel_handler(struct ptlrpc_request *req)
 
 void ldlm_revoke_lock_cb(void *obj, void *data)
 {
-        struct list_head   *rpc_list = data;
+        cfs_list_t         *rpc_list = data;
         struct ldlm_lock   *lock = obj;
 
         lock_res_and_lock(lock);
@@ -2009,10 +2015,10 @@ void ldlm_revoke_lock_cb(void *obj, void *data)
 
         lock->l_flags |= LDLM_FL_AST_SENT;
         if (lock->l_export && lock->l_export->exp_lock_hash &&
-            !hlist_unhashed(&lock->l_exp_hash))
+            !cfs_hlist_unhashed(&lock->l_exp_hash))
                 cfs_hash_del(lock->l_export->exp_lock_hash,
                              &lock->l_remote_handle, &lock->l_exp_hash);
-        list_add_tail(&lock->l_rk_ast, rpc_list);
+        cfs_list_add_tail(&lock->l_rk_ast, rpc_list);
         LDLM_LOCK_GET(lock);
 
         unlock_res_and_lock(lock);
@@ -2020,7 +2026,7 @@ void ldlm_revoke_lock_cb(void *obj, void *data)
 
 void ldlm_revoke_export_locks(struct obd_export *exp)
 {
-        struct list_head  rpc_list;
+        cfs_list_t  rpc_list;
         ENTRY;
 
         CFS_INIT_LIST_HEAD(&rpc_list);
@@ -2037,23 +2043,24 @@ static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
         struct ldlm_bl_work_item *blwi = NULL;
         static unsigned int num_bl = 0;
 
-        spin_lock(&blp->blp_lock);
+        cfs_spin_lock(&blp->blp_lock);
         /* process a request from the blp_list at least every blp_num_threads */
-        if (!list_empty(&blp->blp_list) &&
-            (list_empty(&blp->blp_prio_list) || num_bl == 0))
-                blwi = list_entry(blp->blp_list.next,
-                                  struct ldlm_bl_work_item, blwi_entry);
+        if (!cfs_list_empty(&blp->blp_list) &&
+            (cfs_list_empty(&blp->blp_prio_list) || num_bl == 0))
+                blwi = cfs_list_entry(blp->blp_list.next,
+                                      struct ldlm_bl_work_item, blwi_entry);
         else
-                if (!list_empty(&blp->blp_prio_list))
-                        blwi = list_entry(blp->blp_prio_list.next,
-                                          struct ldlm_bl_work_item, blwi_entry);
+                if (!cfs_list_empty(&blp->blp_prio_list))
+                        blwi = cfs_list_entry(blp->blp_prio_list.next,
+                                              struct ldlm_bl_work_item,
+                                              blwi_entry);
 
         if (blwi) {
-                if (++num_bl >= atomic_read(&blp->blp_num_threads))
+                if (++num_bl >= cfs_atomic_read(&blp->blp_num_threads))
                         num_bl = 0;
-                list_del(&blwi->blwi_entry);
+                cfs_list_del(&blwi->blwi_entry);
         }
-        spin_unlock(&blp->blp_lock);
+        cfs_spin_unlock(&blp->blp_lock);
 
         return blwi;
 }
@@ -2062,7 +2069,7 @@ static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
 struct ldlm_bl_thread_data {
         char                    bltd_name[CFS_CURPROC_COMM_MAX];
         struct ldlm_bl_pool     *bltd_blp;
-        struct completion       bltd_comp;
+        cfs_completion_t        bltd_comp;
         int                     bltd_num;
 };
 
@@ -2073,14 +2080,14 @@ static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
         struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
         int rc;
 
-        init_completion(&bltd.bltd_comp);
+        cfs_init_completion(&bltd.bltd_comp);
         rc = cfs_kernel_thread(ldlm_bl_thread_main, &bltd, 0);
         if (rc < 0) {
                 CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %d\n",
-                       atomic_read(&blp->blp_num_threads), rc);
+                       cfs_atomic_read(&blp->blp_num_threads), rc);
                 return rc;
         }
-        wait_for_completion(&bltd.bltd_comp);
+        cfs_wait_for_completion(&bltd.bltd_comp);
 
         return 0;
 }
@@ -2095,14 +2102,15 @@ static int ldlm_bl_thread_main(void *arg)
 
                 blp = bltd->bltd_blp;
 
-                bltd->bltd_num = atomic_inc_return(&blp->blp_num_threads) - 1;
-                atomic_inc(&blp->blp_busy_threads);
+                bltd->bltd_num =
+                        cfs_atomic_inc_return(&blp->blp_num_threads) - 1;
+                cfs_atomic_inc(&blp->blp_busy_threads);
 
                 snprintf(bltd->bltd_name, sizeof(bltd->bltd_name) - 1,
                         "ldlm_bl_%02d", bltd->bltd_num);
                 cfs_daemonize(bltd->bltd_name);
 
-                complete(&bltd->bltd_comp);
+                cfs_complete(&bltd->bltd_comp);
                 /* cannot use bltd after this, it is only on caller's stack */
         }
 
@@ -2115,11 +2123,11 @@ static int ldlm_bl_thread_main(void *arg)
                 if (blwi == NULL) {
                         int busy;
 
-                        atomic_dec(&blp->blp_busy_threads);
+                        cfs_atomic_dec(&blp->blp_busy_threads);
                         l_wait_event_exclusive(blp->blp_waitq,
                                          (blwi = ldlm_bl_get_work(blp)) != NULL,
                                          &lwi);
-                        busy = atomic_inc_return(&blp->blp_busy_threads);
+                        busy = cfs_atomic_inc_return(&blp->blp_busy_threads);
 
                         if (blwi->blwi_ns == NULL)
                                 /* added by ldlm_cleanup() */
@@ -2127,7 +2135,7 @@ static int ldlm_bl_thread_main(void *arg)
 
                         /* Not fatal if racy and have a few too many threads */
                         if (unlikely(busy < blp->blp_max_threads &&
-                                    busy >= atomic_read(&blp->blp_num_threads)))
+                            busy >= cfs_atomic_read(&blp->blp_num_threads)))
                                 /* discard the return value, we tried */
                                 ldlm_bl_thread_start(blp);
                 } else {
@@ -2150,9 +2158,9 @@ static int ldlm_bl_thread_main(void *arg)
                 OBD_FREE(blwi, sizeof(*blwi));
         }
 
-        atomic_dec(&blp->blp_busy_threads);
-        atomic_dec(&blp->blp_num_threads);
-        complete(&blp->blp_comp);
+        cfs_atomic_dec(&blp->blp_busy_threads);
+        cfs_atomic_dec(&blp->blp_num_threads);
+        cfs_complete(&blp->blp_comp);
         RETURN(0);
 }
 
@@ -2165,13 +2173,13 @@ int ldlm_get_ref(void)
 {
         int rc = 0;
         ENTRY;
-        mutex_down(&ldlm_ref_sem);
+        cfs_mutex_down(&ldlm_ref_sem);
         if (++ldlm_refcount == 1) {
                 rc = ldlm_setup();
                 if (rc)
                         ldlm_refcount--;
         }
-        mutex_up(&ldlm_ref_sem);
+        cfs_mutex_up(&ldlm_ref_sem);
 
         RETURN(rc);
 }
@@ -2179,7 +2187,7 @@ int ldlm_get_ref(void)
 void ldlm_put_ref(void)
 {
         ENTRY;
-        mutex_down(&ldlm_ref_sem);
+        cfs_mutex_down(&ldlm_ref_sem);
         if (ldlm_refcount == 1) {
                 int rc = ldlm_cleanup();
                 if (rc)
@@ -2189,7 +2197,7 @@ void ldlm_put_ref(void)
         } else {
                 ldlm_refcount--;
         }
-        mutex_up(&ldlm_ref_sem);
+        cfs_mutex_up(&ldlm_ref_sem);
 
         EXIT;
 }
@@ -2204,41 +2212,41 @@ ldlm_export_lock_hash(cfs_hash_t *hs, void *key, unsigned mask)
 }
 
 static void *
-ldlm_export_lock_key(struct hlist_node *hnode)
+ldlm_export_lock_key(cfs_hlist_node_t *hnode)
 {
         struct ldlm_lock *lock;
         ENTRY;
 
-        lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+        lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
         RETURN(&lock->l_remote_handle);
 }
 
 static int
-ldlm_export_lock_compare(void *key, struct hlist_node *hnode)
+ldlm_export_lock_compare(void *key, cfs_hlist_node_t *hnode)
 {
         ENTRY;
         RETURN(lustre_handle_equal(ldlm_export_lock_key(hnode), key));
 }
 
 static void *
-ldlm_export_lock_get(struct hlist_node *hnode)
+ldlm_export_lock_get(cfs_hlist_node_t *hnode)
 {
         struct ldlm_lock *lock;
         ENTRY;
 
-        lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+        lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
         LDLM_LOCK_GET(lock);
 
         RETURN(lock);
 }
 
 static void *
-ldlm_export_lock_put(struct hlist_node *hnode)
+ldlm_export_lock_put(cfs_hlist_node_t *hnode)
 {
         struct ldlm_lock *lock;
         ENTRY;
 
-        lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+        lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
         LDLM_LOCK_RELEASE(lock);
 
         RETURN(lock);
@@ -2348,12 +2356,12 @@ static int ldlm_setup(void)
                 GOTO(out_proc, rc = -ENOMEM);
         ldlm_state->ldlm_bl_pool = blp;
 
-        spin_lock_init(&blp->blp_lock);
+        cfs_spin_lock_init(&blp->blp_lock);
         CFS_INIT_LIST_HEAD(&blp->blp_list);
         CFS_INIT_LIST_HEAD(&blp->blp_prio_list);
         cfs_waitq_init(&blp->blp_waitq);
-        atomic_set(&blp->blp_num_threads, 0);
-        atomic_set(&blp->blp_busy_threads, 0);
+        cfs_atomic_set(&blp->blp_num_threads, 0);
+        cfs_atomic_set(&blp->blp_busy_threads, 0);
         blp->blp_min_threads = ldlm_min_threads;
         blp->blp_max_threads = ldlm_max_threads;
 
@@ -2377,7 +2385,7 @@ static int ldlm_setup(void)
         cfs_waitq_init(&expired_lock_thread.elt_waitq);
 
         CFS_INIT_LIST_HEAD(&waiting_locks_list);
-        spin_lock_init(&waiting_locks_spinlock);
+        cfs_spin_lock_init(&waiting_locks_spinlock);
         cfs_timer_init(&waiting_locks_timer, waiting_locks_callback, 0);
 
         rc = cfs_kernel_thread(expired_lock_main, NULL, CLONE_VM | CLONE_FILES);
@@ -2386,8 +2394,8 @@ static int ldlm_setup(void)
                 GOTO(out_thread, rc);
         }
 
-        wait_event(expired_lock_thread.elt_waitq,
-                   expired_lock_thread.elt_state == ELT_READY);
+        cfs_wait_event(expired_lock_thread.elt_waitq,
+                       expired_lock_thread.elt_state == ELT_READY);
 #endif
 
 #ifdef __KERNEL__
@@ -2420,8 +2428,8 @@ static int ldlm_cleanup(void)
 #endif
         ENTRY;
 
-        if (!list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
-            !list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
+        if (!cfs_list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
+            !cfs_list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
                 CERROR("ldlm still has namespaces; clean these up first.\n");
                 ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
                 ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
@@ -2433,17 +2441,17 @@ static int ldlm_cleanup(void)
 #endif
 
 #ifdef __KERNEL__
-        while (atomic_read(&blp->blp_num_threads) > 0) {
+        while (cfs_atomic_read(&blp->blp_num_threads) > 0) {
                 struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
 
-                init_completion(&blp->blp_comp);
+                cfs_init_completion(&blp->blp_comp);
 
-                spin_lock(&blp->blp_lock);
-                list_add_tail(&blwi.blwi_entry, &blp->blp_list);
+                cfs_spin_lock(&blp->blp_lock);
+                cfs_list_add_tail(&blwi.blwi_entry, &blp->blp_list);
                 cfs_waitq_signal(&blp->blp_waitq);
-                spin_unlock(&blp->blp_lock);
+                cfs_spin_unlock(&blp->blp_lock);
 
-                wait_for_completion(&blp->blp_comp);
+                cfs_wait_for_completion(&blp->blp_comp);
         }
         OBD_FREE(blp, sizeof(*blp));
 
@@ -2453,8 +2461,8 @@ static int ldlm_cleanup(void)
 
         expired_lock_thread.elt_state = ELT_TERMINATE;
         cfs_waitq_signal(&expired_lock_thread.elt_waitq);
-        wait_event(expired_lock_thread.elt_waitq,
-                   expired_lock_thread.elt_state == ELT_STOPPED);
+        cfs_wait_event(expired_lock_thread.elt_waitq,
+                       expired_lock_thread.elt_state == ELT_STOPPED);
 #else
         ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
         ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
@@ -2468,18 +2476,18 @@ static int ldlm_cleanup(void)
 
 int __init ldlm_init(void)
 {
-        init_mutex(&ldlm_ref_sem);
-        init_mutex(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
-        init_mutex(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
+        cfs_init_mutex(&ldlm_ref_sem);
+        cfs_init_mutex(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
+        cfs_init_mutex(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
         ldlm_resource_slab = cfs_mem_cache_create("ldlm_resources",
                                                sizeof(struct ldlm_resource), 0,
-                                               SLAB_HWCACHE_ALIGN);
+                                               CFS_SLAB_HWCACHE_ALIGN);
         if (ldlm_resource_slab == NULL)
                 return -ENOMEM;
 
         ldlm_lock_slab = cfs_mem_cache_create("ldlm_locks",
-                                      sizeof(struct ldlm_lock), 0,
-                                      SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU);
+                              sizeof(struct ldlm_lock), 0,
+                              CFS_SLAB_HWCACHE_ALIGN | CFS_SLAB_DESTROY_BY_RCU);
         if (ldlm_lock_slab == NULL) {
                 cfs_mem_cache_destroy(ldlm_resource_slab);
                 return -ENOMEM;
@@ -2487,7 +2495,7 @@ int __init ldlm_init(void)
 
         ldlm_interval_slab = cfs_mem_cache_create("interval_node",
                                         sizeof(struct ldlm_interval),
-                                        0, SLAB_HWCACHE_ALIGN);
+                                        0, CFS_SLAB_HWCACHE_ALIGN);
         if (ldlm_interval_slab == NULL) {
                 cfs_mem_cache_destroy(ldlm_resource_slab);
                 cfs_mem_cache_destroy(ldlm_lock_slab);
index 1a19c5d..5240a32 100644 (file)
 #include "ldlm_internal.h"
 
 static inline int
-ldlm_plain_compat_queue(struct list_head *queue, struct ldlm_lock *req,
-                        struct list_head *work_list)
+ldlm_plain_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
+                        cfs_list_t *work_list)
 {
-        struct list_head *tmp;
+        cfs_list_t *tmp;
         struct ldlm_lock *lock;
         ldlm_mode_t req_mode = req->l_req_mode;
         int compat = 1;
@@ -63,16 +63,16 @@ ldlm_plain_compat_queue(struct list_head *queue, struct ldlm_lock *req,
 
         lockmode_verify(req_mode);
 
-        list_for_each(tmp, queue) {
-                lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+        cfs_list_for_each(tmp, queue) {
+                lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
 
                 if (req == lock)
                         RETURN(compat);
 
                  /* last lock in mode group */
-                 tmp = &list_entry(lock->l_sl_mode.prev,
-                                   struct ldlm_lock,
-                                   l_sl_mode)->l_res_link;
+                 tmp = &cfs_list_entry(lock->l_sl_mode.prev,
+                                       struct ldlm_lock,
+                                       l_sl_mode)->l_res_link;
 
                  if (lockmode_compat(lock->l_req_mode, req_mode))
                         continue;
@@ -88,10 +88,10 @@ ldlm_plain_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                         ldlm_add_ast_work_item(lock, req, work_list);
 
                 {
-                        struct list_head *head;
+                        cfs_list_t *head;
 
                         head = &lock->l_sl_mode;
-                        list_for_each_entry(lock, head, l_sl_mode)
+                        cfs_list_for_each_entry(lock, head, l_sl_mode)
                                 if (lock->l_blocking_ast)
                                         ldlm_add_ast_work_item(lock, req,
                                                                work_list);
@@ -109,7 +109,7 @@ ldlm_plain_compat_queue(struct list_head *queue, struct ldlm_lock *req,
  *   - blocking ASTs have not been sent
  *   - must call this function with the resource lock held */
 int ldlm_process_plain_lock(struct ldlm_lock *lock, int *flags, int first_enq,
-                            ldlm_error_t *err, struct list_head *work_list)
+                            ldlm_error_t *err, cfs_list_t *work_list)
 {
         struct ldlm_resource *res = lock->l_resource;
         CFS_LIST_HEAD(rpc_list);
@@ -117,7 +117,7 @@ int ldlm_process_plain_lock(struct ldlm_lock *lock, int *flags, int first_enq,
         ENTRY;
 
         check_res_locked(res);
-        LASSERT(list_empty(&res->lr_converting));
+        LASSERT(cfs_list_empty(&res->lr_converting));
 
         if (!first_enq) {
                 LASSERT(work_list != NULL);
@@ -144,7 +144,7 @@ int ldlm_process_plain_lock(struct ldlm_lock *lock, int *flags, int first_enq,
                  * bug 2322: we used to unlink and re-add here, which was a
                  * terrible folly -- if we goto restart, we could get
                  * re-ordered!  Causes deadlock, because ASTs aren't sent! */
-                if (list_empty(&lock->l_res_link))
+                if (cfs_list_empty(&lock->l_res_link))
                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
                 unlock_res(res);
                 rc = ldlm_run_ast_work(&rpc_list, LDLM_WORK_BL_AST);
index 4acf356..08bbec9 100644 (file)
@@ -233,7 +233,7 @@ static inline void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
         int granted, grant_step, limit;
 
         limit = ldlm_pool_get_limit(pl);
-        granted = atomic_read(&pl->pl_granted);
+        granted = cfs_atomic_read(&pl->pl_granted);
 
         grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
         grant_step = ((limit - granted) * grant_step) / 100;
@@ -254,7 +254,7 @@ static inline void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
         slv = pl->pl_server_lock_volume;
         grant_plan = pl->pl_grant_plan;
         limit = ldlm_pool_get_limit(pl);
-        granted = atomic_read(&pl->pl_granted);
+        granted = cfs_atomic_read(&pl->pl_granted);
 
         grant_usage = limit - (granted - grant_plan);
         if (grant_usage <= 0)
@@ -294,9 +294,9 @@ static inline void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
 {
         int grant_plan = pl->pl_grant_plan;
         __u64 slv = pl->pl_server_lock_volume;
-        int granted = atomic_read(&pl->pl_granted);
-        int grant_rate = atomic_read(&pl->pl_grant_rate);
-        int cancel_rate = atomic_read(&pl->pl_cancel_rate);
+        int granted = cfs_atomic_read(&pl->pl_granted);
+        int grant_rate = cfs_atomic_read(&pl->pl_grant_rate);
+        int cancel_rate = cfs_atomic_read(&pl->pl_cancel_rate);
 
         lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
                             slv);
@@ -326,9 +326,9 @@ static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl)
          */
         obd = ldlm_pl2ns(pl)->ns_obd;
         LASSERT(obd != NULL);
-        write_lock(&obd->obd_pool_lock);
+        cfs_write_lock(&obd->obd_pool_lock);
         obd->obd_pool_slv = pl->pl_server_lock_volume;
-        write_unlock(&obd->obd_pool_lock);
+        cfs_write_unlock(&obd->obd_pool_lock);
 }
 
 /**
@@ -341,7 +341,7 @@ static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
         time_t recalc_interval_sec;
         ENTRY;
 
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
         recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
         if (recalc_interval_sec >= pl->pl_recalc_period) {
                 /*
@@ -365,7 +365,7 @@ static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
                                     recalc_interval_sec);
         }
 
-        spin_unlock(&pl->pl_lock);
+        cfs_spin_unlock(&pl->pl_lock);
         RETURN(0);
 }
 
@@ -386,16 +386,16 @@ static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
          * VM is asking how many entries may be potentially freed.
          */
         if (nr == 0)
-                return atomic_read(&pl->pl_granted);
+                return cfs_atomic_read(&pl->pl_granted);
 
         /*
          * Client already canceled locks but server is already in shrinker
          * and can't cancel anything. Let's catch this race.
          */
-        if (atomic_read(&pl->pl_granted) == 0)
+        if (cfs_atomic_read(&pl->pl_granted) == 0)
                 RETURN(0);
 
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
 
         /*
          * We want shrinker to possibly cause cancelation of @nr locks from
@@ -420,7 +420,7 @@ static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
          * Make sure that pool informed obd of last SLV changes.
          */
         ldlm_srv_pool_push_slv(pl);
-        spin_unlock(&pl->pl_lock);
+        cfs_spin_unlock(&pl->pl_lock);
 
         /*
          * We did not really free any memory here so far, it only will be
@@ -440,9 +440,9 @@ static int ldlm_srv_pool_setup(struct ldlm_pool *pl, int limit)
         obd = ldlm_pl2ns(pl)->ns_obd;
         LASSERT(obd != NULL && obd != LP_POISON);
         LASSERT(obd->obd_type != LP_POISON);
-        write_lock(&obd->obd_pool_lock);
+        cfs_write_lock(&obd->obd_pool_lock);
         obd->obd_pool_limit = limit;
-        write_unlock(&obd->obd_pool_lock);
+        cfs_write_unlock(&obd->obd_pool_lock);
 
         ldlm_pool_set_limit(pl, limit);
         RETURN(0);
@@ -461,10 +461,10 @@ static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
          */
         obd = ldlm_pl2ns(pl)->ns_obd;
         LASSERT(obd != NULL);
-        read_lock(&obd->obd_pool_lock);
+        cfs_read_lock(&obd->obd_pool_lock);
         pl->pl_server_lock_volume = obd->obd_pool_slv;
         ldlm_pool_set_limit(pl, obd->obd_pool_limit);
-        read_unlock(&obd->obd_pool_lock);
+        cfs_read_unlock(&obd->obd_pool_lock);
 }
 
 /**
@@ -475,13 +475,13 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
         time_t recalc_interval_sec;
         ENTRY;
 
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
         /*
          * Check if we need to recalc lists now.
          */
         recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
         if (recalc_interval_sec < pl->pl_recalc_period) {
-                spin_unlock(&pl->pl_lock);
+                cfs_spin_unlock(&pl->pl_lock);
                 RETURN(0);
         }
 
@@ -493,7 +493,7 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
         pl->pl_recalc_time = cfs_time_current_sec();
         lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
                             recalc_interval_sec);
-        spin_unlock(&pl->pl_lock);
+        cfs_spin_unlock(&pl->pl_lock);
 
         /*
          * Do not cancel locks in case lru resize is disabled for this ns.
@@ -535,9 +535,9 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
          */
         ldlm_cli_pool_pop_slv(pl);
 
-        spin_lock(&ns->ns_unused_lock);
+        cfs_spin_lock(&ns->ns_unused_lock);
         unused = ns->ns_nr_unused;
-        spin_unlock(&ns->ns_unused_lock);
+        cfs_spin_unlock(&ns->ns_unused_lock);
         
         if (nr) {
                 canceled = ldlm_cancel_lru(ns, nr, LDLM_SYNC, 
@@ -573,7 +573,7 @@ int ldlm_pool_recalc(struct ldlm_pool *pl)
         time_t recalc_interval_sec;
         int count;
 
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
         recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
         if (recalc_interval_sec > 0) {
                 /*
@@ -584,11 +584,11 @@ int ldlm_pool_recalc(struct ldlm_pool *pl)
                 /*
                  * Zero out all rates and speed for the last period.
                  */
-                atomic_set(&pl->pl_grant_rate, 0);
-                atomic_set(&pl->pl_cancel_rate, 0);
-                atomic_set(&pl->pl_grant_speed, 0);
+                cfs_atomic_set(&pl->pl_grant_rate, 0);
+                cfs_atomic_set(&pl->pl_cancel_rate, 0);
+                cfs_atomic_set(&pl->pl_grant_speed, 0);
         }
-        spin_unlock(&pl->pl_lock);
+        cfs_spin_unlock(&pl->pl_lock);
 
         if (pl->pl_ops->po_recalc != NULL) {
                 count = pl->pl_ops->po_recalc(pl);
@@ -652,18 +652,18 @@ static int lprocfs_rd_pool_state(char *page, char **start, off_t off,
         __u64 slv, clv;
         __u32 limit;
 
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
         slv = pl->pl_server_lock_volume;
         clv = pl->pl_client_lock_volume;
         limit = ldlm_pool_get_limit(pl);
         grant_plan = pl->pl_grant_plan;
-        granted = atomic_read(&pl->pl_granted);
-        grant_rate = atomic_read(&pl->pl_grant_rate);
-        lvf = atomic_read(&pl->pl_lock_volume_factor);
-        grant_speed = atomic_read(&pl->pl_grant_speed);
-        cancel_rate = atomic_read(&pl->pl_cancel_rate);
+        granted = cfs_atomic_read(&pl->pl_granted);
+        grant_rate = cfs_atomic_read(&pl->pl_grant_rate);
+        lvf = cfs_atomic_read(&pl->pl_lock_volume_factor);
+        grant_speed = cfs_atomic_read(&pl->pl_grant_speed);
+        cancel_rate = cfs_atomic_read(&pl->pl_cancel_rate);
         grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
-        spin_unlock(&pl->pl_lock);
+        cfs_spin_unlock(&pl->pl_lock);
 
         nr += snprintf(page + nr, count - nr, "LDLM pool state (%s):\n",
                        pl->pl_name);
@@ -846,14 +846,14 @@ int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
         int rc;
         ENTRY;
 
-        spin_lock_init(&pl->pl_lock);
-        atomic_set(&pl->pl_granted, 0);
+        cfs_spin_lock_init(&pl->pl_lock);
+        cfs_atomic_set(&pl->pl_granted, 0);
         pl->pl_recalc_time = cfs_time_current_sec();
-        atomic_set(&pl->pl_lock_volume_factor, 1);
+        cfs_atomic_set(&pl->pl_lock_volume_factor, 1);
 
-        atomic_set(&pl->pl_grant_rate, 0);
-        atomic_set(&pl->pl_cancel_rate, 0);
-        atomic_set(&pl->pl_grant_speed, 0);
+        cfs_atomic_set(&pl->pl_grant_rate, 0);
+        cfs_atomic_set(&pl->pl_cancel_rate, 0);
+        cfs_atomic_set(&pl->pl_grant_speed, 0);
         pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
 
         snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
@@ -912,9 +912,9 @@ void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
         ENTRY;
 
         LDLM_DEBUG(lock, "add lock to pool");
-        atomic_inc(&pl->pl_granted);
-        atomic_inc(&pl->pl_grant_rate);
-        atomic_inc(&pl->pl_grant_speed);
+        cfs_atomic_inc(&pl->pl_granted);
+        cfs_atomic_inc(&pl->pl_grant_rate);
+        cfs_atomic_inc(&pl->pl_grant_speed);
 
         lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
         /*
@@ -942,10 +942,10 @@ void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
         ENTRY;
 
         LDLM_DEBUG(lock, "del lock from pool");
-        LASSERT(atomic_read(&pl->pl_granted) > 0);
-        atomic_dec(&pl->pl_granted);
-        atomic_inc(&pl->pl_cancel_rate);
-        atomic_dec(&pl->pl_grant_speed);
+        LASSERT(cfs_atomic_read(&pl->pl_granted) > 0);
+        cfs_atomic_dec(&pl->pl_granted);
+        cfs_atomic_inc(&pl->pl_cancel_rate);
+        cfs_atomic_dec(&pl->pl_grant_speed);
 
         lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
 
@@ -963,9 +963,9 @@ EXPORT_SYMBOL(ldlm_pool_del);
 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
 {
         __u64 slv;
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
         slv = pl->pl_server_lock_volume;
-        spin_unlock(&pl->pl_lock);
+        cfs_spin_unlock(&pl->pl_lock);
         return slv;
 }
 EXPORT_SYMBOL(ldlm_pool_get_slv);
@@ -977,9 +977,9 @@ EXPORT_SYMBOL(ldlm_pool_get_slv);
  */
 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
 {
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
         pl->pl_server_lock_volume = slv;
-        spin_unlock(&pl->pl_lock);
+        cfs_spin_unlock(&pl->pl_lock);
 }
 EXPORT_SYMBOL(ldlm_pool_set_slv);
 
@@ -991,9 +991,9 @@ EXPORT_SYMBOL(ldlm_pool_set_slv);
 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
 {
         __u64 slv;
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
         slv = pl->pl_client_lock_volume;
-        spin_unlock(&pl->pl_lock);
+        cfs_spin_unlock(&pl->pl_lock);
         return slv;
 }
 EXPORT_SYMBOL(ldlm_pool_get_clv);
@@ -1005,9 +1005,9 @@ EXPORT_SYMBOL(ldlm_pool_get_clv);
  */
 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
 {
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
         pl->pl_client_lock_volume = clv;
-        spin_unlock(&pl->pl_lock);
+        cfs_spin_unlock(&pl->pl_lock);
 }
 EXPORT_SYMBOL(ldlm_pool_set_clv);
 
@@ -1016,7 +1016,7 @@ EXPORT_SYMBOL(ldlm_pool_set_clv);
  */
 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
 {
-        return atomic_read(&pl->pl_limit);
+        return cfs_atomic_read(&pl->pl_limit);
 }
 EXPORT_SYMBOL(ldlm_pool_get_limit);
 
@@ -1025,7 +1025,7 @@ EXPORT_SYMBOL(ldlm_pool_get_limit);
  */
 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
 {
-        atomic_set(&pl->pl_limit, limit);
+        cfs_atomic_set(&pl->pl_limit, limit);
 }
 EXPORT_SYMBOL(ldlm_pool_set_limit);
 
@@ -1034,20 +1034,20 @@ EXPORT_SYMBOL(ldlm_pool_set_limit);
  */
 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
 {
-        return atomic_read(&pl->pl_lock_volume_factor);
+        return cfs_atomic_read(&pl->pl_lock_volume_factor);
 }
 EXPORT_SYMBOL(ldlm_pool_get_lvf);
 
 #ifdef __KERNEL__
 static int ldlm_pool_granted(struct ldlm_pool *pl)
 {
-        return atomic_read(&pl->pl_granted);
+        return cfs_atomic_read(&pl->pl_granted);
 }
 
 static struct ptlrpc_thread *ldlm_pools_thread;
-static struct shrinker *ldlm_pools_srv_shrinker;
-static struct shrinker *ldlm_pools_cli_shrinker;
-static struct completion ldlm_pools_comp;
+static struct cfs_shrinker *ldlm_pools_srv_shrinker;
+static struct cfs_shrinker *ldlm_pools_cli_shrinker;
+static cfs_completion_t ldlm_pools_comp;
 
 /*
  * Cancel \a nr locks from all namespaces (if possible). Returns number of
@@ -1072,19 +1072,19 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
         /*
          * Find out how many resources we may release.
          */
-        for (nr_ns = atomic_read(ldlm_namespace_nr(client));
+        for (nr_ns = cfs_atomic_read(ldlm_namespace_nr(client));
              nr_ns > 0; nr_ns--)
         {
-                mutex_down(ldlm_namespace_lock(client));
-                if (list_empty(ldlm_namespace_list(client))) {
-                        mutex_up(ldlm_namespace_lock(client));
+                cfs_mutex_down(ldlm_namespace_lock(client));
+                if (cfs_list_empty(ldlm_namespace_list(client))) {
+                        cfs_mutex_up(ldlm_namespace_lock(client));
                         cl_env_reexit(cookie);
                         return 0;
                 }
                 ns = ldlm_namespace_first_locked(client);
                 ldlm_namespace_get(ns);
                 ldlm_namespace_move_locked(ns, client);
-                mutex_up(ldlm_namespace_lock(client));
+                cfs_mutex_up(ldlm_namespace_lock(client));
                 total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
                 ldlm_namespace_put(ns, 1);
         }
@@ -1097,7 +1097,7 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
         /*
          * Shrink at least ldlm_namespace_nr(client) namespaces.
          */
-        for (nr_ns = atomic_read(ldlm_namespace_nr(client));
+        for (nr_ns = cfs_atomic_read(ldlm_namespace_nr(client));
              nr_ns > 0; nr_ns--)
         {
                 int cancel, nr_locks;
@@ -1105,9 +1105,9 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
                 /*
                  * Do not call shrink under ldlm_namespace_lock(client)
                  */
-                mutex_down(ldlm_namespace_lock(client));
-                if (list_empty(ldlm_namespace_list(client))) {
-                        mutex_up(ldlm_namespace_lock(client));
+                cfs_mutex_down(ldlm_namespace_lock(client));
+                if (cfs_list_empty(ldlm_namespace_list(client))) {
+                        cfs_mutex_up(ldlm_namespace_lock(client));
                         /*
                          * If list is empty, we can't return any @cached > 0,
                          * that probably would cause needless shrinker
@@ -1119,7 +1119,7 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
                 ns = ldlm_namespace_first_locked(client);
                 ldlm_namespace_get(ns);
                 ldlm_namespace_move_locked(ns, client);
-                mutex_up(ldlm_namespace_lock(client));
+                cfs_mutex_up(ldlm_namespace_lock(client));
 
                 nr_locks = ldlm_pool_granted(&ns->ns_pool);
                 cancel = 1 + nr_locks * nr / total;
@@ -1154,9 +1154,9 @@ void ldlm_pools_recalc(ldlm_side_t client)
                 /*
                  * Check all modest namespaces first.
                  */
-                mutex_down(ldlm_namespace_lock(client));
-                list_for_each_entry(ns, ldlm_namespace_list(client),
-                                    ns_list_chain)
+                cfs_mutex_down(ldlm_namespace_lock(client));
+                cfs_list_for_each_entry(ns, ldlm_namespace_list(client),
+                                        ns_list_chain)
                 {
                         if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
                                 continue;
@@ -1190,8 +1190,8 @@ void ldlm_pools_recalc(ldlm_side_t client)
                 /*
                  * The rest is given to greedy namespaces.
                  */
-                list_for_each_entry(ns, ldlm_namespace_list(client),
-                                    ns_list_chain)
+                cfs_list_for_each_entry(ns, ldlm_namespace_list(client),
+                                        ns_list_chain)
                 {
                         if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
                                 continue;
@@ -1203,25 +1203,27 @@ void ldlm_pools_recalc(ldlm_side_t client)
                                  * for _all_ pools.
                                  */
                                 l = LDLM_POOL_HOST_L /
-                                        atomic_read(ldlm_namespace_nr(client));
+                                        cfs_atomic_read(
+                                                ldlm_namespace_nr(client));
                         } else {
                                 /*
                                  * All the rest of greedy pools will have
                                  * all locks in equal parts.
                                  */
                                 l = (LDLM_POOL_HOST_L - nr_l) /
-                                        (atomic_read(ldlm_namespace_nr(client)) -
+                                        (cfs_atomic_read(
+                                                ldlm_namespace_nr(client)) -
                                          nr_p);
                         }
                         ldlm_pool_setup(&ns->ns_pool, l);
                 }
-                mutex_up(ldlm_namespace_lock(client));
+                cfs_mutex_up(ldlm_namespace_lock(client));
         }
 
         /*
          * Recalc at least ldlm_namespace_nr(client) namespaces.
          */
-        for (nr = atomic_read(ldlm_namespace_nr(client)); nr > 0; nr--) {
+        for (nr = cfs_atomic_read(ldlm_namespace_nr(client)); nr > 0; nr--) {
                 int     skip;
                 /*
                  * Lock the list, get first @ns in the list, getref, move it
@@ -1230,14 +1232,14 @@ void ldlm_pools_recalc(ldlm_side_t client)
                  * rid of potential deadlock on client nodes when canceling
                  * locks synchronously.
                  */
-                mutex_down(ldlm_namespace_lock(client));
-                if (list_empty(ldlm_namespace_list(client))) {
-                        mutex_up(ldlm_namespace_lock(client));
+                cfs_mutex_down(ldlm_namespace_lock(client));
+                if (cfs_list_empty(ldlm_namespace_list(client))) {
+                        cfs_mutex_up(ldlm_namespace_lock(client));
                         break;
                 }
                 ns = ldlm_namespace_first_locked(client);
 
-                spin_lock(&ns->ns_hash_lock);
+                cfs_spin_lock(&ns->ns_hash_lock);
                 /*
                  * skip ns which is being freed, and we don't want to increase
                  * its refcount again, not even temporarily. bz21519.
@@ -1248,10 +1250,10 @@ void ldlm_pools_recalc(ldlm_side_t client)
                         skip = 0;
                         ldlm_namespace_get_locked(ns);
                 }
-                spin_unlock(&ns->ns_hash_lock);
+                cfs_spin_unlock(&ns->ns_hash_lock);
 
                 ldlm_namespace_move_locked(ns, client);
-                mutex_up(ldlm_namespace_lock(client));
+                cfs_mutex_up(ldlm_namespace_lock(client));
 
                 /*
                  * After setup is done - recalc the pool.
@@ -1310,7 +1312,7 @@ static int ldlm_pools_thread_main(void *arg)
         CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
                t_name, cfs_curproc_pid());
 
-        complete_and_exit(&ldlm_pools_comp, 0);
+        cfs_complete_and_exit(&ldlm_pools_comp, 0);
 }
 
 static int ldlm_pools_thread_start(void)
@@ -1326,7 +1328,7 @@ static int ldlm_pools_thread_start(void)
         if (ldlm_pools_thread == NULL)
                 RETURN(-ENOMEM);
 
-        init_completion(&ldlm_pools_comp);
+        cfs_init_completion(&ldlm_pools_comp);
         cfs_waitq_init(&ldlm_pools_thread->t_ctl_waitq);
 
         /*
@@ -1364,7 +1366,7 @@ static void ldlm_pools_thread_stop(void)
          * This fixes possible race and oops due to accessing freed memory
          * in pools thread.
          */
-        wait_for_completion(&ldlm_pools_comp);
+        cfs_wait_for_completion(&ldlm_pools_comp);
         OBD_FREE_PTR(ldlm_pools_thread);
         ldlm_pools_thread = NULL;
         EXIT;
@@ -1377,10 +1379,12 @@ int ldlm_pools_init(void)
 
         rc = ldlm_pools_thread_start();
         if (rc == 0) {
-                ldlm_pools_srv_shrinker = set_shrinker(DEFAULT_SEEKS,
-                                                       ldlm_pools_srv_shrink);
-                ldlm_pools_cli_shrinker = set_shrinker(DEFAULT_SEEKS,
-                                                       ldlm_pools_cli_shrink);
+                ldlm_pools_srv_shrinker =
+                        cfs_set_shrinker(CFS_DEFAULT_SEEKS,
+                                         ldlm_pools_srv_shrink);
+                ldlm_pools_cli_shrinker =
+                        cfs_set_shrinker(CFS_DEFAULT_SEEKS,
+                                         ldlm_pools_cli_shrink);
         }
         RETURN(rc);
 }
@@ -1389,11 +1393,11 @@ EXPORT_SYMBOL(ldlm_pools_init);
 void ldlm_pools_fini(void)
 {
         if (ldlm_pools_srv_shrinker != NULL) {
-                remove_shrinker(ldlm_pools_srv_shrinker);
+                cfs_remove_shrinker(ldlm_pools_srv_shrinker);
                 ldlm_pools_srv_shrinker = NULL;
         }
         if (ldlm_pools_cli_shrinker != NULL) {
-                remove_shrinker(ldlm_pools_cli_shrinker);
+                cfs_remove_shrinker(ldlm_pools_cli_shrinker);
                 ldlm_pools_cli_shrinker = NULL;
         }
         ldlm_pools_thread_stop();
index 3cabd3d..b65b432 100644 (file)
@@ -246,9 +246,9 @@ noreproc:
         }
 
         if (imp != NULL) {
-                spin_lock(&imp->imp_lock);
+                cfs_spin_lock(&imp->imp_lock);
                 lwd.lwd_conn_cnt = imp->imp_conn_cnt;
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
         }
 
         if (ns_is_client(lock->l_resource->lr_namespace) &&
@@ -672,7 +672,7 @@ static inline int ldlm_format_handles_avail(struct obd_import *imp,
  * @count locks in @cancels. */
 int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
                       int version, int opc, int canceloff,
-                      struct list_head *cancels, int count)
+                      cfs_list_t *cancels, int count)
 {
         struct ldlm_namespace   *ns = exp->exp_obd->obd_namespace;
         struct req_capsule      *pill = &req->rq_pill;
@@ -735,7 +735,7 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
 }
 
 int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req,
-                          struct list_head *cancels, int count)
+                          cfs_list_t *cancels, int count)
 {
         return ldlm_prep_elc_req(exp, req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE,
                                  LDLM_ENQUEUE_CANCEL_OFF, cancels, count);
@@ -1034,7 +1034,7 @@ static int ldlm_cli_cancel_local(struct ldlm_lock *lock)
 /* Pack @count locks in @head into ldlm_request buffer at the offset @off,
    of the request @req. */
 static void ldlm_cancel_pack(struct ptlrpc_request *req,
-                             struct list_head *head, int count)
+                             cfs_list_t *head, int count)
 {
         struct ldlm_request *dlm;
         struct ldlm_lock *lock;
@@ -1054,7 +1054,7 @@ static void ldlm_cancel_pack(struct ptlrpc_request *req,
         /* XXX: it would be better to pack lock handles grouped by resource.
          * so that the server cancel would call filter_lvbo_update() less
          * frequently. */
-        list_for_each_entry(lock, head, l_bl_ast) {
+        cfs_list_for_each_entry(lock, head, l_bl_ast) {
                 if (!count--)
                         break;
                 LASSERT(lock->l_conn_export);
@@ -1069,7 +1069,7 @@ static void ldlm_cancel_pack(struct ptlrpc_request *req,
 
 /* Prepare and send a batched cancel rpc, it will include count lock handles
  * of locks given in @head. */
-int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels,
+int ldlm_cli_cancel_req(struct obd_export *exp, cfs_list_t *cancels,
                         int count, int flags)
 {
         struct ptlrpc_request *req = NULL;
@@ -1207,11 +1207,11 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req)
          * alive in cleanup time. Evil races are possible which may cause
          * oops in that time.
          */
-        write_lock(&obd->obd_pool_lock);
+        cfs_write_lock(&obd->obd_pool_lock);
         old_slv = obd->obd_pool_slv;
         obd->obd_pool_slv = new_slv;
         obd->obd_pool_limit = new_limit;
-        write_unlock(&obd->obd_pool_lock);
+        cfs_write_unlock(&obd->obd_pool_lock);
 
         RETURN(0);
 }
@@ -1241,8 +1241,8 @@ int ldlm_cli_cancel(struct lustre_handle *lockh)
         /* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL
          * rpc which goes to canceld portal, so we can cancel other lru locks
          * here and send them all as one LDLM_CANCEL rpc. */
-        LASSERT(list_empty(&lock->l_bl_ast));
-        list_add(&lock->l_bl_ast, &cancels);
+        LASSERT(cfs_list_empty(&lock->l_bl_ast));
+        cfs_list_add(&lock->l_bl_ast, &cancels);
 
         exp = lock->l_conn_export;
         if (exp_connect_cancelset(exp)) {
@@ -1263,14 +1263,14 @@ int ldlm_cli_cancel(struct lustre_handle *lockh)
 
 /* XXX until we will have compound requests and can cut cancels from generic rpc
  * we need send cancels with LDLM_FL_BL_AST flag as separate rpc */
-static int ldlm_cancel_list(struct list_head *cancels, int count, int flags)
+static int ldlm_cancel_list(cfs_list_t *cancels, int count, int flags)
 {
         CFS_LIST_HEAD(head);
         struct ldlm_lock *lock, *next;
         int left = 0, bl_ast = 0, rc;
 
         left = count;
-        list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {
+        cfs_list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {
                 if (left-- == 0)
                         break;
 
@@ -1282,14 +1282,14 @@ static int ldlm_cancel_list(struct list_head *cancels, int count, int flags)
                 }
                 if (!(flags & LDLM_FL_BL_AST) && (rc == LDLM_FL_BL_AST)) {
                         LDLM_DEBUG(lock, "Cancel lock separately");
-                        list_del_init(&lock->l_bl_ast);
-                        list_add(&lock->l_bl_ast, &head);
+                        cfs_list_del_init(&lock->l_bl_ast);
+                        cfs_list_add(&lock->l_bl_ast, &head);
                         bl_ast ++;
                         continue;
                 }
                 if (rc == LDLM_FL_LOCAL_ONLY) {
                         /* CANCEL RPC should not be sent to server. */
-                        list_del_init(&lock->l_bl_ast);
+                        cfs_list_del_init(&lock->l_bl_ast);
                         LDLM_LOCK_RELEASE(lock);
                         count--;
                 }
@@ -1466,7 +1466,7 @@ ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
  *
  * flags & LDLM_CANCEL_AGED -   cancel alocks according to "aged policy".
  */
-int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
+int ldlm_cancel_lru_local(struct ldlm_namespace *ns, cfs_list_t *cancels,
                           int count, int max, int cancel_flags, int flags)
 {
         ldlm_cancel_lru_policy_t pf;
@@ -1474,7 +1474,7 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
         int added = 0, unused;
         ENTRY;
 
-        spin_lock(&ns->ns_unused_lock);
+        cfs_spin_lock(&ns->ns_unused_lock);
         unused = ns->ns_nr_unused;
 
         if (!ns_connect_lru_resize(ns))
@@ -1483,12 +1483,13 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
         pf = ldlm_cancel_lru_policy(ns, flags);
         LASSERT(pf != NULL);
 
-        while (!list_empty(&ns->ns_unused_list)) {
+        while (!cfs_list_empty(&ns->ns_unused_list)) {
                 /* For any flags, stop scanning if @max is reached. */
                 if (max && added >= max)
                         break;
 
-                list_for_each_entry_safe(lock, next, &ns->ns_unused_list, l_lru){
+                cfs_list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
+                                             l_lru){
                         /* No locks which got blocking requests. */
                         LASSERT(!(lock->l_flags & LDLM_FL_BL_AST));
 
@@ -1503,7 +1504,7 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
                         break;
 
                 LDLM_LOCK_GET(lock);
-                spin_unlock(&ns->ns_unused_lock);
+                cfs_spin_unlock(&ns->ns_unused_lock);
                 lu_ref_add(&lock->l_reference, __FUNCTION__, cfs_current());
 
                 /* Pass the lock through the policy filter and see if it
@@ -1524,7 +1525,7 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
                         lu_ref_del(&lock->l_reference,
                                    __FUNCTION__, cfs_current());
                         LDLM_LOCK_RELEASE(lock);
-                        spin_lock(&ns->ns_unused_lock);
+                        cfs_spin_lock(&ns->ns_unused_lock);
                         break;
                 }
 
@@ -1541,7 +1542,7 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
                         lu_ref_del(&lock->l_reference,
                                    __FUNCTION__, cfs_current());
                         LDLM_LOCK_RELEASE(lock);
-                        spin_lock(&ns->ns_unused_lock);
+                        cfs_spin_lock(&ns->ns_unused_lock);
                         continue;
                 }
                 LASSERT(!lock->l_readers && !lock->l_writers);
@@ -1567,15 +1568,15 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
                  * and can't use l_pending_chain as it is used both on
                  * server and client nevertheless bug 5666 says it is
                  * used only on server */
-                LASSERT(list_empty(&lock->l_bl_ast));
-                list_add(&lock->l_bl_ast, cancels);
+                LASSERT(cfs_list_empty(&lock->l_bl_ast));
+                cfs_list_add(&lock->l_bl_ast, cancels);
                 unlock_res_and_lock(lock);
                 lu_ref_del(&lock->l_reference, __FUNCTION__, cfs_current());
-                spin_lock(&ns->ns_unused_lock);
+                cfs_spin_lock(&ns->ns_unused_lock);
                 added++;
                 unused--;
         }
-        spin_unlock(&ns->ns_unused_lock);
+        cfs_spin_unlock(&ns->ns_unused_lock);
         RETURN(ldlm_cancel_list(cancels, added, cancel_flags));
 }
 
@@ -1610,7 +1611,7 @@ int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync,
  * given policy, mode. GET the found locks and add them into the @cancels
  * list. */
 int ldlm_cancel_resource_local(struct ldlm_resource *res,
-                               struct list_head *cancels,
+                               cfs_list_t *cancels,
                                ldlm_policy_data_t *policy,
                                ldlm_mode_t mode, int lock_flags,
                                int cancel_flags, void *opaque)
@@ -1620,7 +1621,7 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res,
         ENTRY;
 
         lock_res(res);
-        list_for_each_entry(lock, &res->lr_granted, l_res_link) {
+        cfs_list_for_each_entry(lock, &res->lr_granted, l_res_link) {
                 if (opaque != NULL && lock->l_ast_data != opaque) {
                         LDLM_ERROR(lock, "data %p doesn't match opaque %p",
                                    lock->l_ast_data, opaque);
@@ -1656,8 +1657,8 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res,
                 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING |
                                  lock_flags;
 
-                LASSERT(list_empty(&lock->l_bl_ast));
-                list_add(&lock->l_bl_ast, cancels);
+                LASSERT(cfs_list_empty(&lock->l_bl_ast));
+                cfs_list_add(&lock->l_bl_ast, cancels);
                 LDLM_LOCK_GET(lock);
                 count++;
         }
@@ -1672,14 +1673,14 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res,
  * If @req is not NULL, put handles of locks in @cancels into the request
  * buffer at the offset @off.
  * Destroy @cancels at the end. */
-int ldlm_cli_cancel_list(struct list_head *cancels, int count,
+int ldlm_cli_cancel_list(cfs_list_t *cancels, int count,
                          struct ptlrpc_request *req, int flags)
 {
         struct ldlm_lock *lock;
         int res = 0;
         ENTRY;
 
-        if (list_empty(cancels) || count == 0)
+        if (cfs_list_empty(cancels) || count == 0)
                 RETURN(0);
 
         /* XXX: requests (both batched and not) could be sent in parallel.
@@ -1688,8 +1689,9 @@ int ldlm_cli_cancel_list(struct list_head *cancels, int count,
          * It would also speed up the case when the server does not support
          * the feature. */
         while (count > 0) {
-                LASSERT(!list_empty(cancels));
-                lock = list_entry(cancels->next, struct ldlm_lock, l_bl_ast);
+                LASSERT(!cfs_list_empty(cancels));
+                lock = cfs_list_entry(cancels->next, struct ldlm_lock,
+                                      l_bl_ast);
                 LASSERT(lock->l_conn_export);
 
                 if (exp_connect_cancelset(lock->l_conn_export)) {
@@ -1751,10 +1753,10 @@ static inline int have_no_nsresource(struct ldlm_namespace *ns)
 {
         int no_resource = 0;
 
-        spin_lock(&ns->ns_hash_lock);
+        cfs_spin_lock(&ns->ns_hash_lock);
         if (ns->ns_resources == 0)
                 no_resource = 1;
-        spin_unlock(&ns->ns_hash_lock);
+        cfs_spin_unlock(&ns->ns_hash_lock);
 
         RETURN(no_resource);
 }
@@ -1779,17 +1781,18 @@ int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
                                                        LCK_MINMODE, flags,
                                                        opaque));
 
-        spin_lock(&ns->ns_hash_lock);
+        cfs_spin_lock(&ns->ns_hash_lock);
         for (i = 0; i < RES_HASH_SIZE; i++) {
-                struct list_head *tmp;
+                cfs_list_t *tmp;
                 tmp = ns->ns_hash[i].next;
                 while (tmp != &(ns->ns_hash[i])) {
                         struct ldlm_resource *res;
                         int rc;
 
-                        res = list_entry(tmp, struct ldlm_resource, lr_hash);
+                        res = cfs_list_entry(tmp, struct ldlm_resource,
+                                             lr_hash);
                         ldlm_resource_getref(res);
-                        spin_unlock(&ns->ns_hash_lock);
+                        cfs_spin_unlock(&ns->ns_hash_lock);
 
                         LDLM_RESOURCE_ADDREF(res);
                         rc = ldlm_cli_cancel_unused_resource(ns, &res->lr_name,
@@ -1801,12 +1804,12 @@ int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
                                        res->lr_name.name[0], rc);
 
                         LDLM_RESOURCE_DELREF(res);
-                        spin_lock(&ns->ns_hash_lock);
+                        cfs_spin_lock(&ns->ns_hash_lock);
                         tmp = tmp->next;
                         ldlm_resource_putref_locked(res);
                 }
         }
-        spin_unlock(&ns->ns_hash_lock);
+        cfs_spin_unlock(&ns->ns_hash_lock);
 
         RETURN(ELDLM_OK);
 }
@@ -1816,7 +1819,7 @@ int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
 int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
                           void *closure)
 {
-        struct list_head *tmp, *next;
+        cfs_list_t *tmp, *next;
         struct ldlm_lock *lock;
         int rc = LDLM_ITER_CONTINUE;
 
@@ -1826,22 +1829,22 @@ int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
                 RETURN(LDLM_ITER_CONTINUE);
 
         lock_res(res);
-        list_for_each_safe(tmp, next, &res->lr_granted) {
-                lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+        cfs_list_for_each_safe(tmp, next, &res->lr_granted) {
+                lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
 
                 if (iter(lock, closure) == LDLM_ITER_STOP)
                         GOTO(out, rc = LDLM_ITER_STOP);
         }
 
-        list_for_each_safe(tmp, next, &res->lr_converting) {
-                lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+        cfs_list_for_each_safe(tmp, next, &res->lr_converting) {
+                lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
 
                 if (iter(lock, closure) == LDLM_ITER_STOP)
                         GOTO(out, rc = LDLM_ITER_STOP);
         }
 
-        list_for_each_safe(tmp, next, &res->lr_waiting) {
-                lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+        cfs_list_for_each_safe(tmp, next, &res->lr_waiting) {
+                lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
 
                 if (iter(lock, closure) == LDLM_ITER_STOP)
                         GOTO(out, rc = LDLM_ITER_STOP);
@@ -1879,22 +1882,23 @@ int ldlm_namespace_foreach_res(struct ldlm_namespace *ns,
 {
         int i, rc = LDLM_ITER_CONTINUE;
         struct ldlm_resource *res;
-        struct list_head *tmp;
+        cfs_list_t *tmp;
 
         ENTRY;
-        spin_lock(&ns->ns_hash_lock);
+        cfs_spin_lock(&ns->ns_hash_lock);
         for (i = 0; i < RES_HASH_SIZE; i++) {
                 tmp = ns->ns_hash[i].next;
                 while (tmp != &(ns->ns_hash[i])) {
-                        res = list_entry(tmp, struct ldlm_resource, lr_hash);
+                        res = cfs_list_entry(tmp, struct ldlm_resource,
+                                             lr_hash);
                         ldlm_resource_getref(res);
-                        spin_unlock(&ns->ns_hash_lock);
+                        cfs_spin_unlock(&ns->ns_hash_lock);
                         LDLM_RESOURCE_ADDREF(res);
 
                         rc = iter(res, closure);
 
                         LDLM_RESOURCE_DELREF(res);
-                        spin_lock(&ns->ns_hash_lock);
+                        cfs_spin_lock(&ns->ns_hash_lock);
                         tmp = tmp->next;
                         ldlm_resource_putref_locked(res);
                         if (rc == LDLM_ITER_STOP)
@@ -1902,7 +1906,7 @@ int ldlm_namespace_foreach_res(struct ldlm_namespace *ns,
                 }
         }
  out:
-        spin_unlock(&ns->ns_hash_lock);
+        cfs_spin_unlock(&ns->ns_hash_lock);
         RETURN(rc);
 }
 
@@ -1936,17 +1940,18 @@ void ldlm_resource_iterate(struct ldlm_namespace *ns,
 
 static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
 {
-        struct list_head *list = closure;
+        cfs_list_t *list = closure;
 
         /* we use l_pending_chain here, because it's unused on clients. */
-        LASSERTF(list_empty(&lock->l_pending_chain),"lock %p next %p prev %p\n",
+        LASSERTF(cfs_list_empty(&lock->l_pending_chain),
+                 "lock %p next %p prev %p\n",
                  lock, &lock->l_pending_chain.next,&lock->l_pending_chain.prev);
         /* bug 9573: don't replay locks left after eviction, or
          * bug 17614: locks being actively cancelled. Get a reference
          * on a lock so that it does not disapear under us (e.g. due to cancel)
          */
         if (!(lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_CANCELING))) {
-                list_add(&lock->l_pending_chain, list);
+                cfs_list_add(&lock->l_pending_chain, list);
                 LDLM_LOCK_GET(lock);
         }
 
@@ -1963,7 +1968,7 @@ static int replay_lock_interpret(const struct lu_env *env,
         struct obd_export    *exp;
 
         ENTRY;
-        atomic_dec(&req->rq_import->imp_replay_inflight);
+        cfs_atomic_dec(&req->rq_import->imp_replay_inflight);
         if (rc != ELDLM_OK)
                 GOTO(out, rc);
 
@@ -2043,7 +2048,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED;
         else if (lock->l_granted_mode)
                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_CONV;
-        else if (!list_empty(&lock->l_res_link))
+        else if (!cfs_list_empty(&lock->l_res_link))
                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT;
         else
                 flags = LDLM_FL_REPLAY;
@@ -2075,7 +2080,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
 
         LDLM_DEBUG(lock, "replaying lock:");
 
-        atomic_inc(&req->rq_import->imp_replay_inflight);
+        cfs_atomic_inc(&req->rq_import->imp_replay_inflight);
         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
         aa = ptlrpc_req_async_args(req);
         aa->lock_handle = body->lock_handle[0];
@@ -2094,15 +2099,15 @@ int ldlm_replay_locks(struct obd_import *imp)
 
         ENTRY;
 
-        LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
+        LASSERT(cfs_atomic_read(&imp->imp_replay_inflight) == 0);
 
         /* ensure this doesn't fall to 0 before all have been queued */
-        atomic_inc(&imp->imp_replay_inflight);
+        cfs_atomic_inc(&imp->imp_replay_inflight);
 
         (void)ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
 
-        list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
-                list_del_init(&lock->l_pending_chain);
+        cfs_list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
+                cfs_list_del_init(&lock->l_pending_chain);
                 if (rc) {
                         LDLM_LOCK_PUT(lock);
                         continue; /* or try to do the rest? */
@@ -2111,7 +2116,7 @@ int ldlm_replay_locks(struct obd_import *imp)
                 LDLM_LOCK_PUT(lock);
         }
 
-        atomic_dec(&imp->imp_replay_inflight);
+        cfs_atomic_dec(&imp->imp_replay_inflight);
 
         RETURN(rc);
 }
index af4ad55..20e9e16 100644 (file)
 
 cfs_mem_cache_t *ldlm_resource_slab, *ldlm_lock_slab;
 
-atomic_t ldlm_srv_namespace_nr = ATOMIC_INIT(0);
-atomic_t ldlm_cli_namespace_nr = ATOMIC_INIT(0);
+cfs_atomic_t ldlm_srv_namespace_nr = CFS_ATOMIC_INIT(0);
+cfs_atomic_t ldlm_cli_namespace_nr = CFS_ATOMIC_INIT(0);
 
-struct semaphore ldlm_srv_namespace_lock;
+cfs_semaphore_t ldlm_srv_namespace_lock;
 CFS_LIST_HEAD(ldlm_srv_namespace_list);
 
-struct semaphore ldlm_cli_namespace_lock;
+cfs_semaphore_t ldlm_cli_namespace_lock;
 CFS_LIST_HEAD(ldlm_cli_namespace_list);
 
 cfs_proc_dir_entry_t *ldlm_type_proc_dir = NULL;
@@ -154,7 +154,7 @@ static int lprocfs_wr_lru_size(struct file *file, const char *buffer,
         int lru_resize;
 
         dummy[MAX_STRING_SIZE] = '\0';
-        if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
+        if (cfs_copy_from_user(dummy, buffer, MAX_STRING_SIZE))
                 return -EFAULT;
 
         if (strncmp(dummy, "clear", 5) == 0) {
@@ -317,7 +317,7 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
                                           ldlm_side_t client, ldlm_appetite_t apt)
 {
         struct ldlm_namespace *ns = NULL;
-        struct list_head *bucket;
+        cfs_list_t *bucket;
         int rc, idx, namelen;
         ENTRY;
 
@@ -351,8 +351,8 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
         CFS_INIT_LIST_HEAD(&ns->ns_list_chain);
         ns->ns_refcount = 0;
         ns->ns_client = client;
-        spin_lock_init(&ns->ns_hash_lock);
-        atomic_set(&ns->ns_locks, 0);
+        cfs_spin_lock_init(&ns->ns_hash_lock);
+        cfs_atomic_set(&ns->ns_locks, 0);
         ns->ns_resources = 0;
         cfs_waitq_init(&ns->ns_waitq);
         ns->ns_max_nolock_size = NS_DEFAULT_MAX_NOLOCK_BYTES;
@@ -369,12 +369,12 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
         ns->ns_max_age = LDLM_DEFAULT_MAX_ALIVE;
         ns->ns_ctime_age_limit = LDLM_CTIME_AGE_LIMIT;
         ns->ns_timeouts = 0;
-        spin_lock_init(&ns->ns_unused_lock);
+        cfs_spin_lock_init(&ns->ns_unused_lock);
         ns->ns_orig_connect_flags = 0;
         ns->ns_connect_flags = 0;
         ldlm_proc_namespace(ns);
 
-        idx = atomic_read(ldlm_namespace_nr(client));
+        idx = cfs_atomic_read(ldlm_namespace_nr(client));
         rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
         if (rc) {
                 CERROR("Can't initialize lock pool, rc %d\n", rc);
@@ -404,10 +404,10 @@ extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
  * as a result--notably, that we shouldn't cancel locks with refs. -phil
  *
  * Called with the ns_lock held. */
-static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
+static void cleanup_resource(struct ldlm_resource *res, cfs_list_t *q,
                              int flags)
 {
-        struct list_head *tmp;
+        cfs_list_t *tmp;
         int rc = 0, client = ns_is_client(res->lr_namespace);
         int local_only = (flags & LDLM_FL_LOCAL_ONLY);
         ENTRY;
@@ -419,8 +419,9 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
                 /* first, we look for non-cleaned-yet lock
                  * all cleaned locks are marked by CLEANED flag */
                 lock_res(res);
-                list_for_each(tmp, q) {
-                        lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+                cfs_list_for_each(tmp, q) {
+                        lock = cfs_list_entry(tmp, struct ldlm_lock,
+                                              l_res_link);
                         if (lock->l_flags & LDLM_FL_CLEANED) {
                                 lock = NULL;
                                 continue;
@@ -481,7 +482,7 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
 
 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags)
 {
-        struct list_head *tmp;
+        cfs_list_t *tmp;
         int i;
 
         if (ns == NULL) {
@@ -490,20 +491,21 @@ int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags)
         }
 
         for (i = 0; i < RES_HASH_SIZE; i++) {
-                spin_lock(&ns->ns_hash_lock);
+                cfs_spin_lock(&ns->ns_hash_lock);
                 tmp = ns->ns_hash[i].next;
                 while (tmp != &(ns->ns_hash[i])) {
                         struct ldlm_resource *res;
-                        res = list_entry(tmp, struct ldlm_resource, lr_hash);
+                        res = cfs_list_entry(tmp, struct ldlm_resource,
+                                             lr_hash);
                         ldlm_resource_getref(res);
-                        spin_unlock(&ns->ns_hash_lock);
+                        cfs_spin_unlock(&ns->ns_hash_lock);
                         LDLM_RESOURCE_ADDREF(res);
 
                         cleanup_resource(res, &res->lr_granted, flags);
                         cleanup_resource(res, &res->lr_converting, flags);
                         cleanup_resource(res, &res->lr_waiting, flags);
 
-                        spin_lock(&ns->ns_hash_lock);
+                        cfs_spin_lock(&ns->ns_hash_lock);
                         tmp = tmp->next;
 
                         /* XXX: former stuff caused issues in case of race
@@ -514,16 +516,20 @@ int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags)
                         LDLM_RESOURCE_DELREF(res);
                         if (!ldlm_resource_putref_locked(res)) {
                                 CERROR("Namespace %s resource refcount nonzero "
-                                       "(%d) after lock cleanup; forcing cleanup.\n",
-                                       ns->ns_name, atomic_read(&res->lr_refcount));
+                                       "(%d) after lock cleanup; forcing "
+                                       "cleanup.\n",
+                                       ns->ns_name,
+                                       cfs_atomic_read(&res->lr_refcount));
                                 CERROR("Resource: %p ("LPU64"/"LPU64"/"LPU64"/"
                                        LPU64") (rc: %d)\n", res,
-                                       res->lr_name.name[0], res->lr_name.name[1],
-                                       res->lr_name.name[2], res->lr_name.name[3],
-                                       atomic_read(&res->lr_refcount));
+                                       res->lr_name.name[0],
+                                       res->lr_name.name[1],
+                                       res->lr_name.name[2],
+                                       res->lr_name.name[3],
+                                       cfs_atomic_read(&res->lr_refcount));
                         }
                 }
-                spin_unlock(&ns->ns_hash_lock);
+                cfs_spin_unlock(&ns->ns_hash_lock);
         }
 
         return ELDLM_OK;
@@ -544,7 +550,7 @@ static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
                        ns->ns_name, ns->ns_refcount);
 force_wait:
                 if (force)
-                        lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
+                        lwi = LWI_TIMEOUT(obd_timeout * CFS_HZ / 4, NULL, NULL);
 
                 rc = l_wait_event(ns->ns_waitq,
                                   ns->ns_refcount == 0, &lwi);
@@ -659,7 +665,7 @@ void ldlm_namespace_free_post(struct ldlm_namespace *ns)
          * Namespace \a ns should be not on list in this time, otherwise this
          * will cause issues realted to using freed \a ns in pools thread.
          */
-        LASSERT(list_empty(&ns->ns_list_chain));
+        LASSERT(cfs_list_empty(&ns->ns_list_chain));
         OBD_FREE_PTR(ns);
         ldlm_put_ref();
         EXIT;
@@ -699,9 +705,9 @@ void ldlm_namespace_get_locked(struct ldlm_namespace *ns)
 
 void ldlm_namespace_get(struct ldlm_namespace *ns)
 {
-        spin_lock(&ns->ns_hash_lock);
+        cfs_spin_lock(&ns->ns_hash_lock);
         ldlm_namespace_get_locked(ns);
-        spin_unlock(&ns->ns_hash_lock);
+        cfs_spin_unlock(&ns->ns_hash_lock);
 }
 
 void ldlm_namespace_put_locked(struct ldlm_namespace *ns, int wakeup)
@@ -709,54 +715,54 @@ void ldlm_namespace_put_locked(struct ldlm_namespace *ns, int wakeup)
         LASSERT(ns->ns_refcount > 0);
         ns->ns_refcount--;
         if (ns->ns_refcount == 0 && wakeup)
-                wake_up(&ns->ns_waitq);
+                cfs_waitq_signal(&ns->ns_waitq);
 }
 
 void ldlm_namespace_put(struct ldlm_namespace *ns, int wakeup)
 {
-        spin_lock(&ns->ns_hash_lock);
+        cfs_spin_lock(&ns->ns_hash_lock);
         ldlm_namespace_put_locked(ns, wakeup);
-        spin_unlock(&ns->ns_hash_lock);
+        cfs_spin_unlock(&ns->ns_hash_lock);
 }
 
 /* Register @ns in the list of namespaces */
 void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client)
 {
-        mutex_down(ldlm_namespace_lock(client));
-        LASSERT(list_empty(&ns->ns_list_chain));
-        list_add(&ns->ns_list_chain, ldlm_namespace_list(client));
-        atomic_inc(ldlm_namespace_nr(client));
-        mutex_up(ldlm_namespace_lock(client));
+        cfs_mutex_down(ldlm_namespace_lock(client));
+        LASSERT(cfs_list_empty(&ns->ns_list_chain));
+        cfs_list_add(&ns->ns_list_chain, ldlm_namespace_list(client));
+        cfs_atomic_inc(ldlm_namespace_nr(client));
+        cfs_mutex_up(ldlm_namespace_lock(client));
 }
 
 /* Unregister @ns from the list of namespaces */
 void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client)
 {
-        mutex_down(ldlm_namespace_lock(client));
-        LASSERT(!list_empty(&ns->ns_list_chain));
+        cfs_mutex_down(ldlm_namespace_lock(client));
+        LASSERT(!cfs_list_empty(&ns->ns_list_chain));
         /*
          * Some asserts and possibly other parts of code still using
          * list_empty(&ns->ns_list_chain). This is why it is important
          * to use list_del_init() here.
          */
-        list_del_init(&ns->ns_list_chain);
-        atomic_dec(ldlm_namespace_nr(client));
-        mutex_up(ldlm_namespace_lock(client));
+        cfs_list_del_init(&ns->ns_list_chain);
+        cfs_atomic_dec(ldlm_namespace_nr(client));
+        cfs_mutex_up(ldlm_namespace_lock(client));
 }
 
 /* Should be called under ldlm_namespace_lock(client) taken */
 void ldlm_namespace_move_locked(struct ldlm_namespace *ns, ldlm_side_t client)
 {
-        LASSERT(!list_empty(&ns->ns_list_chain));
+        LASSERT(!cfs_list_empty(&ns->ns_list_chain));
         LASSERT_SEM_LOCKED(ldlm_namespace_lock(client));
-        list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
+        cfs_list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
 }
 
 /* Should be called under ldlm_namespace_lock(client) taken */
 struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
 {
         LASSERT_SEM_LOCKED(ldlm_namespace_lock(client));
-        LASSERT(!list_empty(ldlm_namespace_list(client)));
+        LASSERT(!cfs_list_empty(ldlm_namespace_list(client)));
         return container_of(ldlm_namespace_list(client)->next,
                 struct ldlm_namespace, ns_list_chain);
 }
@@ -798,13 +804,13 @@ static struct ldlm_resource *ldlm_resource_new(void)
                 res->lr_itree[idx].lit_root = NULL;
         }
 
-        atomic_set(&res->lr_refcount, 1);
-        spin_lock_init(&res->lr_lock);
+        cfs_atomic_set(&res->lr_refcount, 1);
+        cfs_spin_lock_init(&res->lr_lock);
         lu_ref_init(&res->lr_reference);
 
         /* one who creates the resource must unlock
          * the semaphore after lvb initialization */
-        init_MUTEX_LOCKED(&res->lr_lvb_sem);
+        cfs_init_mutex_locked(&res->lr_lvb_sem);
 
         return res;
 }
@@ -814,14 +820,14 @@ static struct ldlm_resource *
 ldlm_resource_find(struct ldlm_namespace *ns, const struct ldlm_res_id *name,
                    __u32 hash)
 {
-        struct list_head *bucket, *tmp;
+        cfs_list_t *bucket, *tmp;
         struct ldlm_resource *res;
 
         LASSERT_SPIN_LOCKED(&ns->ns_hash_lock);
         bucket = ns->ns_hash + hash;
 
-        list_for_each(tmp, bucket) {
-                res = list_entry(tmp, struct ldlm_resource, lr_hash);
+        cfs_list_for_each(tmp, bucket) {
+                res = cfs_list_entry(tmp, struct ldlm_resource, lr_hash);
                 if (memcmp(&res->lr_name, name, sizeof(res->lr_name)) == 0)
                         return res;
         }
@@ -835,7 +841,7 @@ static struct ldlm_resource *
 ldlm_resource_add(struct ldlm_namespace *ns, struct ldlm_resource *parent,
                   const struct ldlm_res_id *name, __u32 hash, ldlm_type_t type)
 {
-        struct list_head *bucket;
+        cfs_list_t *bucket;
         struct ldlm_resource *res, *old_res;
         ENTRY;
 
@@ -851,34 +857,34 @@ ldlm_resource_add(struct ldlm_namespace *ns, struct ldlm_resource *parent,
         res->lr_type = type;
         res->lr_most_restr = LCK_NL;
 
-        spin_lock(&ns->ns_hash_lock);
+        cfs_spin_lock(&ns->ns_hash_lock);
         old_res = ldlm_resource_find(ns, name, hash);
         if (old_res) {
                 /* someone won the race and added the resource before */
                 ldlm_resource_getref(old_res);
-                spin_unlock(&ns->ns_hash_lock);
+                cfs_spin_unlock(&ns->ns_hash_lock);
                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
                 /* synchronize WRT resource creation */
                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
-                        down(&old_res->lr_lvb_sem);
-                        up(&old_res->lr_lvb_sem);
+                        cfs_down(&old_res->lr_lvb_sem);
+                        cfs_up(&old_res->lr_lvb_sem);
                 }
                 RETURN(old_res);
         }
 
         /* we won! let's add the resource */
         bucket = ns->ns_hash + hash;
-        list_add(&res->lr_hash, bucket);
+        cfs_list_add(&res->lr_hash, bucket);
         ns->ns_resources++;
         ldlm_namespace_get_locked(ns);
 
         if (parent == NULL) {
-                list_add(&res->lr_childof, &ns->ns_root_list);
+                cfs_list_add(&res->lr_childof, &ns->ns_root_list);
         } else {
                 res->lr_parent = parent;
-                list_add(&res->lr_childof, &parent->lr_children);
+                cfs_list_add(&res->lr_childof, &parent->lr_children);
         }
-        spin_unlock(&ns->ns_hash_lock);
+        cfs_spin_unlock(&ns->ns_hash_lock);
 
         if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
                 int rc;
@@ -889,7 +895,7 @@ ldlm_resource_add(struct ldlm_namespace *ns, struct ldlm_resource *parent,
                         CERROR("lvbo_init failed for resource "
                                LPU64": rc %d\n", name->name[0], rc);
                 /* we create resource with locked lr_lvb_sem */
-                up(&res->lr_lvb_sem);
+                cfs_up(&res->lr_lvb_sem);
         }
 
         RETURN(res);
@@ -910,19 +916,19 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
         LASSERT(ns->ns_hash != NULL);
         LASSERT(name->name[0] != 0);
 
-        spin_lock(&ns->ns_hash_lock);
+        cfs_spin_lock(&ns->ns_hash_lock);
         res = ldlm_resource_find(ns, name, hash);
         if (res) {
                 ldlm_resource_getref(res);
-                spin_unlock(&ns->ns_hash_lock);
+                cfs_spin_unlock(&ns->ns_hash_lock);
                 /* synchronize WRT resource creation */
                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
-                        down(&res->lr_lvb_sem);
-                        up(&res->lr_lvb_sem);
+                        cfs_down(&res->lr_lvb_sem);
+                        cfs_up(&res->lr_lvb_sem);
                 }
                 RETURN(res);
         }
-        spin_unlock(&ns->ns_hash_lock);
+        cfs_spin_unlock(&ns->ns_hash_lock);
 
         if (create == 0)
                 RETURN(NULL);
@@ -935,9 +941,9 @@ struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
 {
         LASSERT(res != NULL);
         LASSERT(res != LP_POISON);
-        atomic_inc(&res->lr_refcount);
+        cfs_atomic_inc(&res->lr_refcount);
         CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
-               atomic_read(&res->lr_refcount));
+               cfs_atomic_read(&res->lr_refcount));
         return res;
 }
 
@@ -947,22 +953,22 @@ void __ldlm_resource_putref_final(struct ldlm_resource *res)
 
         LASSERT_SPIN_LOCKED(&ns->ns_hash_lock);
 
-        if (!list_empty(&res->lr_granted)) {
+        if (!cfs_list_empty(&res->lr_granted)) {
                 ldlm_resource_dump(D_ERROR, res);
                 LBUG();
         }
 
-        if (!list_empty(&res->lr_converting)) {
+        if (!cfs_list_empty(&res->lr_converting)) {
                 ldlm_resource_dump(D_ERROR, res);
                 LBUG();
         }
 
-        if (!list_empty(&res->lr_waiting)) {
+        if (!cfs_list_empty(&res->lr_waiting)) {
                 ldlm_resource_dump(D_ERROR, res);
                 LBUG();
         }
 
-        if (!list_empty(&res->lr_children)) {
+        if (!cfs_list_empty(&res->lr_children)) {
                 ldlm_resource_dump(D_ERROR, res);
                 LBUG();
         }
@@ -970,13 +976,13 @@ void __ldlm_resource_putref_final(struct ldlm_resource *res)
         /* Pass 0 here to not wake ->ns_waitq up yet, we will do it few
          * lines below when all children are freed. */
         ldlm_namespace_put_locked(ns, 0);
-        list_del_init(&res->lr_hash);
-        list_del_init(&res->lr_childof);
+        cfs_list_del_init(&res->lr_hash);
+        cfs_list_del_init(&res->lr_childof);
         lu_ref_fini(&res->lr_reference);
 
         ns->ns_resources--;
         if (ns->ns_resources == 0)
-                wake_up(&ns->ns_waitq);
+                cfs_waitq_signal(&ns->ns_waitq);
 }
 
 /* Returns 1 if the resource was freed, 0 if it remains. */
@@ -987,15 +993,15 @@ int ldlm_resource_putref(struct ldlm_resource *res)
         ENTRY;
 
         CDEBUG(D_INFO, "putref res: %p count: %d\n", res,
-               atomic_read(&res->lr_refcount) - 1);
-        LASSERTF(atomic_read(&res->lr_refcount) > 0, "%d",
-                 atomic_read(&res->lr_refcount));
-        LASSERTF(atomic_read(&res->lr_refcount) < LI_POISON, "%d",
-                 atomic_read(&res->lr_refcount));
+               cfs_atomic_read(&res->lr_refcount) - 1);
+        LASSERTF(cfs_atomic_read(&res->lr_refcount) > 0, "%d",
+                 cfs_atomic_read(&res->lr_refcount));
+        LASSERTF(cfs_atomic_read(&res->lr_refcount) < LI_POISON, "%d",
+                 cfs_atomic_read(&res->lr_refcount));
 
-        if (atomic_dec_and_lock(&res->lr_refcount, &ns->ns_hash_lock)) {
+        if (cfs_atomic_dec_and_lock(&res->lr_refcount, &ns->ns_hash_lock)) {
                 __ldlm_resource_putref_final(res);
-                spin_unlock(&ns->ns_hash_lock);
+                cfs_spin_unlock(&ns->ns_hash_lock);
                 if (res->lr_lvb_data)
                         OBD_FREE(res->lr_lvb_data, res->lr_lvb_len);
                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
@@ -1012,12 +1018,12 @@ int ldlm_resource_putref_locked(struct ldlm_resource *res)
         ENTRY;
 
         CDEBUG(D_INFO, "putref res: %p count: %d\n", res,
-               atomic_read(&res->lr_refcount) - 1);
-        LASSERT(atomic_read(&res->lr_refcount) > 0);
-        LASSERT(atomic_read(&res->lr_refcount) < LI_POISON);
+               cfs_atomic_read(&res->lr_refcount) - 1);
+        LASSERT(cfs_atomic_read(&res->lr_refcount) > 0);
+        LASSERT(cfs_atomic_read(&res->lr_refcount) < LI_POISON);
 
-        LASSERT(atomic_read(&res->lr_refcount) >= 0);
-        if (atomic_dec_and_test(&res->lr_refcount)) {
+        LASSERT(cfs_atomic_read(&res->lr_refcount) >= 0);
+        if (cfs_atomic_dec_and_test(&res->lr_refcount)) {
                 __ldlm_resource_putref_final(res);
                 if (res->lr_lvb_data)
                         OBD_FREE(res->lr_lvb_data, res->lr_lvb_len);
@@ -1028,7 +1034,7 @@ int ldlm_resource_putref_locked(struct ldlm_resource *res)
         RETURN(rc);
 }
 
-void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
+void ldlm_resource_add_lock(struct ldlm_resource *res, cfs_list_t *head,
                             struct ldlm_lock *lock)
 {
         check_res_locked(res);
@@ -1042,9 +1048,9 @@ void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
                 return;
         }
 
-        LASSERT(list_empty(&lock->l_res_link));
+        LASSERT(cfs_list_empty(&lock->l_res_link));
 
-        list_add_tail(&lock->l_res_link, head);
+        cfs_list_add_tail(&lock->l_res_link, head);
 }
 
 void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
@@ -1063,9 +1069,9 @@ void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
                 goto out;
         }
 
-        LASSERT(list_empty(&new->l_res_link));
+        LASSERT(cfs_list_empty(&new->l_res_link));
 
-        list_add(&new->l_res_link, &original->l_res_link);
+        cfs_list_add(&new->l_res_link, &original->l_res_link);
  out:;
 }
 
@@ -1078,7 +1084,7 @@ void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
                 ldlm_unlink_lock_skiplist(lock);
         else if (type == LDLM_EXTENT)
                 ldlm_extent_unlink_lock(lock);
-        list_del_init(&lock->l_res_link);
+        cfs_list_del_init(&lock->l_res_link);
 }
 
 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
@@ -1089,25 +1095,25 @@ void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
 
 void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
 {
-        struct list_head *tmp;
+        cfs_list_t *tmp;
 
         if (!((libcfs_debug | D_ERROR) & level))
                 return;
 
-        mutex_down(ldlm_namespace_lock(client));
+        cfs_mutex_down(ldlm_namespace_lock(client));
 
-        list_for_each(tmp, ldlm_namespace_list(client)) {
+        cfs_list_for_each(tmp, ldlm_namespace_list(client)) {
                 struct ldlm_namespace *ns;
-                ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
+                ns = cfs_list_entry(tmp, struct ldlm_namespace, ns_list_chain);
                 ldlm_namespace_dump(level, ns);
         }
 
-        mutex_up(ldlm_namespace_lock(client));
+        cfs_mutex_up(ldlm_namespace_lock(client));
 }
 
 void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
 {
-        struct list_head *tmp;
+        cfs_list_t *tmp;
 
         if (!((libcfs_debug | D_ERROR) & level))
                 return;
@@ -1119,14 +1125,14 @@ void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
         if (cfs_time_before(cfs_time_current(), ns->ns_next_dump))
                 return;
 
-        spin_lock(&ns->ns_hash_lock);
+        cfs_spin_lock(&ns->ns_hash_lock);
         tmp = ns->ns_root_list.next;
         while (tmp != &ns->ns_root_list) {
                 struct ldlm_resource *res;
-                res = list_entry(tmp, struct ldlm_resource, lr_childof);
+                res = cfs_list_entry(tmp, struct ldlm_resource, lr_childof);
 
                 ldlm_resource_getref(res);
-                spin_unlock(&ns->ns_hash_lock);
+                cfs_spin_unlock(&ns->ns_hash_lock);
                 LDLM_RESOURCE_ADDREF(res);
 
                 lock_res(res);
@@ -1134,17 +1140,17 @@ void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
                 unlock_res(res);
 
                 LDLM_RESOURCE_DELREF(res);
-                spin_lock(&ns->ns_hash_lock);
+                cfs_spin_lock(&ns->ns_hash_lock);
                 tmp = tmp->next;
                 ldlm_resource_putref_locked(res);
         }
         ns->ns_next_dump = cfs_time_shift(10);
-        spin_unlock(&ns->ns_hash_lock);
+        cfs_spin_unlock(&ns->ns_hash_lock);
 }
 
 void ldlm_resource_dump(int level, struct ldlm_resource *res)
 {
-        struct list_head *tmp;
+        cfs_list_t *tmp;
         int pos;
 
         CLASSERT(RES_NAME_SIZE == 4);
@@ -1155,32 +1161,35 @@ void ldlm_resource_dump(int level, struct ldlm_resource *res)
         CDEBUG(level, "--- Resource: %p ("LPU64"/"LPU64"/"LPU64"/"LPU64
                ") (rc: %d)\n", res, res->lr_name.name[0], res->lr_name.name[1],
                res->lr_name.name[2], res->lr_name.name[3],
-               atomic_read(&res->lr_refcount));
+               cfs_atomic_read(&res->lr_refcount));
 
-        if (!list_empty(&res->lr_granted)) {
+        if (!cfs_list_empty(&res->lr_granted)) {
                 pos = 0;
                 CDEBUG(level, "Granted locks:\n");
-                list_for_each(tmp, &res->lr_granted) {
+                cfs_list_for_each(tmp, &res->lr_granted) {
                         struct ldlm_lock *lock;
-                        lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+                        lock = cfs_list_entry(tmp, struct ldlm_lock,
+                                              l_res_link);
                         ldlm_lock_dump(level, lock, ++pos);
                 }
         }
-        if (!list_empty(&res->lr_converting)) {
+        if (!cfs_list_empty(&res->lr_converting)) {
                 pos = 0;
                 CDEBUG(level, "Converting locks:\n");
-                list_for_each(tmp, &res->lr_converting) {
+                cfs_list_for_each(tmp, &res->lr_converting) {
                         struct ldlm_lock *lock;
-                        lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+                        lock = cfs_list_entry(tmp, struct ldlm_lock,
+                                              l_res_link);
                         ldlm_lock_dump(level, lock, ++pos);
                 }
         }
-        if (!list_empty(&res->lr_waiting)) {
+        if (!cfs_list_empty(&res->lr_waiting)) {
                 pos = 0;
                 CDEBUG(level, "Waiting locks:\n");
-                list_for_each(tmp, &res->lr_waiting) {
+                cfs_list_for_each(tmp, &res->lr_waiting) {
                         struct ldlm_lock *lock;
-                        lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+                        lock = cfs_list_entry(tmp, struct ldlm_lock,
+                                              l_res_link);
                         ldlm_lock_dump(level, lock, ++pos);
                 }
         }
index 0039f3d..ce812de 100644 (file)
@@ -61,8 +61,6 @@
 #include <file.h>
 #endif
 
-#undef LIST_HEAD
-
 #ifdef HAVE_LINUX_UNISTD_H
 #include <linux/unistd.h>
 #elif defined(HAVE_UNISTD_H)
index 3cef55c..4810ef3 100644 (file)
@@ -60,8 +60,6 @@
 #include <file.h>
 #endif
 
-#undef LIST_HEAD
-
 #include "llite_lib.h"
 
 /* Pack the required supplementary groups into the supplied groups array.
@@ -73,13 +71,13 @@ void ll_i2gids(__u32 *suppgids, struct inode *i1, struct inode *i2)
         LASSERT(i1 != NULL);
         LASSERT(suppgids != NULL);
 
-        if (in_group_p(i1->i_stbuf.st_gid))
+        if (cfs_curproc_is_in_groups(i1->i_stbuf.st_gid))
                 suppgids[0] = i1->i_stbuf.st_gid;
         else
                 suppgids[0] = -1;
 
         if (i2) {
-                if (in_group_p(i2->i_stbuf.st_gid))
+                if (cfs_curproc_is_in_groups(i2->i_stbuf.st_gid))
                         suppgids[1] = i2->i_stbuf.st_gid;
                 else
                         suppgids[1] = -1;
@@ -112,7 +110,7 @@ void llu_prep_md_op_data(struct md_op_data *op_data, struct inode *i1,
         op_data->op_name = name;
         op_data->op_mode = mode;
         op_data->op_namelen = namelen;
-        op_data->op_mod_time = CURRENT_TIME;
+        op_data->op_mod_time = CFS_CURRENT_TIME;
         op_data->op_data = NULL;
 }
 
index bac1928..9003491 100644 (file)
 #include <file.h>
 #endif
 
-/* both sys/queue.h (libsysio require it) and portals/lists.h have definition
- * of 'LIST_HEAD'. undef it to suppress warnings
- */
-#undef LIST_HEAD
 #include <liblustre.h>
 #include <lnet/lnetctl.h>     /* needed for parse_dump */
 
index bf51702..b2626ca 100644 (file)
@@ -68,7 +68,7 @@ struct llu_sb_info {
         struct lu_fid            ll_root_fid;
         int                      ll_flags;
         struct lustre_client_ocd ll_lco;
-        struct list_head         ll_conn_chain;
+        cfs_list_t               ll_conn_chain;
 
         struct obd_uuid          ll_mds_uuid;
         struct obd_uuid          ll_mds_peer_uuid;
@@ -90,7 +90,7 @@ struct llu_inode_info {
 
         struct lov_stripe_md   *lli_smd;
         char                   *lli_symlink_name;
-        struct semaphore        lli_open_sem;
+        cfs_semaphore_t         lli_open_sem;
         __u64                   lli_maxbytes;
         unsigned long           lli_flags;
         __u64                   lli_ioepoch;
index ba0fd2f..d2752e1 100644 (file)
@@ -182,7 +182,7 @@ static void init_capability(__u32 *res)
 #endif
 }
 
-int in_group_p(gid_t gid)
+int cfs_curproc_is_in_groups(gid_t gid)
 {
         int i;
 
index b748e6e..3fea45d 100644 (file)
@@ -60,8 +60,6 @@
 #include <file.h>
 #endif
 
-#undef LIST_HEAD
-
 #include "llite_lib.h"
 
 void ll_intent_drop_lock(struct lookup_intent *it)
@@ -332,7 +330,7 @@ static int lookup_it_finish(struct ptlrpc_request *request, int offset,
                 if (it_disposition(it, DISP_OPEN_CREATE) &&
                     !it_open_error(DISP_OPEN_CREATE, it)) {
                         LASSERT(request);
-                        LASSERT(atomic_read(&request->rq_refcount) > 1);
+                        LASSERT(cfs_atomic_read(&request->rq_refcount) > 1);
                         CDEBUG(D_INODE, "dec a ref of req %p\n", request);
                         ptlrpc_req_finished(request);
                 }
index 7fdf5ae..6c848de 100644 (file)
@@ -61,8 +61,6 @@
 #include <file.h>
 #endif
 
-#undef LIST_HEAD
-
 #include "llite_lib.h"
 
 typedef ssize_t llu_file_piov_t(const struct iovec *iovec, int iovlen,
@@ -453,7 +451,7 @@ int llu_iop_read(struct inode *ino,
         int ret;
 
         /* BUG: 5972 */
-        st->st_atime = CURRENT_TIME;
+        st->st_atime = CFS_CURRENT_TIME;
 
         env = cl_env_get(&refcheck);
         if (IS_ERR(env))
@@ -477,7 +475,7 @@ int llu_iop_write(struct inode *ino,
         int refcheck;
         int ret;
 
-        st->st_mtime = st->st_ctime = CURRENT_TIME;
+        st->st_mtime = st->st_ctime = CFS_CURRENT_TIME;
 
         env = cl_env_get(&refcheck);
         if (IS_ERR(env))
index 9ecabb6..6c53633 100644 (file)
@@ -65,8 +65,6 @@
 #include <file.h>
 #endif
 
-#undef LIST_HEAD
-
 #include "llite_lib.h"
 
 #ifndef MAY_EXEC
@@ -84,7 +82,7 @@ static int ll_permission(struct inode *inode, int mask)
 
         if (current->fsuid == st->st_uid)
                 mode >>= 6;
-        else if (in_group_p(st->st_gid))
+        else if (cfs_curproc_is_in_groups(st->st_gid))
                 mode >>= 3;
 
         if ((mode & mask & (MAY_READ|MAY_WRITE|MAY_EXEC)) == mask)
@@ -111,7 +109,7 @@ static void llu_fsop_gone(struct filesys *fs)
         int next = 0;
         ENTRY;
 
-        list_del(&sbi->ll_conn_chain);
+        cfs_list_del(&sbi->ll_conn_chain);
         cl_sb_fini(sbi);
         obd_disconnect(sbi->ll_dt_exp);
         obd_disconnect(sbi->ll_md_exp);
@@ -648,7 +646,8 @@ static int inode_setattr(struct inode * inode, struct iattr * attr)
                 st->st_ctime = attr->ia_ctime;
         if (ia_valid & ATTR_MODE) {
                 st->st_mode = attr->ia_mode;
-                if (!in_group_p(st->st_gid) && !cfs_capable(CFS_CAP_FSETID))
+                if (!cfs_curproc_is_in_groups(st->st_gid) &&
+                    !cfs_capable(CFS_CAP_FSETID))
                         st->st_mode &= ~S_ISGID;
         }
         /* mark_inode_dirty(inode); */
@@ -766,15 +765,15 @@ int llu_setattr_raw(struct inode *inode, struct iattr *attr)
 
         /* We mark all of the fields "set" so MDS/OST does not re-set them */
         if (attr->ia_valid & ATTR_CTIME) {
-                attr->ia_ctime = CURRENT_TIME;
+                attr->ia_ctime = CFS_CURRENT_TIME;
                 attr->ia_valid |= ATTR_CTIME_SET;
         }
         if (!(ia_valid & ATTR_ATIME_SET) && (attr->ia_valid & ATTR_ATIME)) {
-                attr->ia_atime = CURRENT_TIME;
+                attr->ia_atime = CFS_CURRENT_TIME;
                 attr->ia_valid |= ATTR_ATIME_SET;
         }
         if (!(ia_valid & ATTR_MTIME_SET) && (attr->ia_valid & ATTR_MTIME)) {
-                attr->ia_mtime = CURRENT_TIME;
+                attr->ia_mtime = CFS_CURRENT_TIME;
                 attr->ia_valid |= ATTR_MTIME_SET;
         }
         if ((attr->ia_valid & ATTR_CTIME) && !(attr->ia_valid & ATTR_MTIME)) {
@@ -792,7 +791,7 @@ int llu_setattr_raw(struct inode *inode, struct iattr *attr)
                 CDEBUG(D_INODE, "setting mtime "CFS_TIME_T", ctime "CFS_TIME_T
                       ", now = "CFS_TIME_T"\n",
                        LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
-                       LTIME_S(CURRENT_TIME));
+                       LTIME_S(CFS_CURRENT_TIME));
 
         /* NB: ATTR_SIZE will only be set after this point if the size
          * resides on the MDS, ie, this file has no objects. */
@@ -902,7 +901,7 @@ static int llu_iop_setattr(struct pnode *pno,
         }
 
         iattr.ia_valid |= ATTR_RAW | ATTR_CTIME;
-        iattr.ia_ctime = CURRENT_TIME;
+        iattr.ia_ctime = CFS_CURRENT_TIME;
 
         rc = llu_setattr_raw(ino, &iattr);
         liblustre_wait_idle();
@@ -1211,7 +1210,7 @@ static int llu_statfs(struct llu_sb_info *sbi, struct statfs *sfs)
         /* For now we will always get up-to-date statfs values, but in the
          * future we may allow some amount of caching on the client (e.g.
          * from QOS or lprocfs updates). */
-        rc = llu_statfs_internal(sbi, &osfs, cfs_time_current_64() - HZ);
+        rc = llu_statfs_internal(sbi, &osfs, cfs_time_current_64() - CFS_HZ);
         if (rc)
                 return rc;
 
@@ -1672,7 +1671,7 @@ static int llu_lov_dir_setstripe(struct inode *ino, unsigned long arg)
         LASSERT(sizeof(lum) == sizeof(*lump));
         LASSERT(sizeof(lum.lmm_objects[0]) ==
                 sizeof(lump->lmm_objects[0]));
-        if (copy_from_user(&lum, lump, sizeof(lum)))
+        if (cfs_copy_from_user(&lum, lump, sizeof(lum)))
                 return(-EFAULT);
 
         switch (lum.lmm_magic) {
@@ -1780,7 +1779,7 @@ static int llu_lov_file_setstripe(struct inode *ino, unsigned long arg)
 
         LASSERT(sizeof(lum) == sizeof(*lump));
         LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lump->lmm_objects[0]));
-        if (copy_from_user(&lum, lump, sizeof(lum)))
+        if (cfs_copy_from_user(&lum, lump, sizeof(lum)))
                 RETURN(-EFAULT);
 
         rc = llu_lov_setstripe_ea_info(ino, flags, &lum, sizeof(lum));
index fe56468..79eaf6b 100644 (file)
@@ -425,7 +425,7 @@ int ll_revalidate_it(struct dentry *de, int lookup_flags,
                 /* Check for the proper lock. */
                 if (!ll_have_md_lock(inode, MDS_INODELOCK_LOOKUP))
                         goto do_lock;
-                down(&lli->lli_och_sem);
+                cfs_down(&lli->lli_och_sem);
                 if (*och_p) { /* Everything is open already, do nothing */
                         /*(*och_usecount)++;  Do not let them steal our open
                           handle from under us */
@@ -436,11 +436,11 @@ int ll_revalidate_it(struct dentry *de, int lookup_flags,
                            hope the lock won't be invalidated in between. But
                            if it would be, we'll reopen the open request to
                            MDS later during file open path */
-                        up(&lli->lli_och_sem);
+                        cfs_up(&lli->lli_och_sem);
                         ll_finish_md_op_data(op_data);
                         RETURN(1);
                 } else {
-                        up(&lli->lli_och_sem);
+                        cfs_up(&lli->lli_och_sem);
                 }
         }
 
@@ -605,36 +605,36 @@ static void ll_pin(struct dentry *de, struct vfsmount *mnt, int flag)
         ENTRY;
         LASSERT(ldd);
 
-        lock_kernel();
+        cfs_lock_kernel();
         /* Strictly speaking this introduces an additional race: the
          * increments should wait until the rpc has returned.
          * However, given that at present the function is void, this
          * issue is moot. */
         if (flag == 1 && (++ldd->lld_mnt_count) > 1) {
-                unlock_kernel();
+                cfs_unlock_kernel();
                 EXIT;
                 return;
         }
 
         if (flag == 0 && (++ldd->lld_cwd_count) > 1) {
-                unlock_kernel();
+                cfs_unlock_kernel();
                 EXIT;
                 return;
         }
-        unlock_kernel();
+        cfs_unlock_kernel();
 
         handle = (flag) ? &ldd->lld_mnt_och : &ldd->lld_cwd_och;
         oc = ll_mdscapa_get(inode);
         rc = obd_pin(sbi->ll_md_exp, ll_inode2fid(inode), oc, handle, flag);
         capa_put(oc);
         if (rc) {
-                lock_kernel();
+                cfs_lock_kernel();
                 memset(handle, 0, sizeof(*handle));
                 if (flag == 0)
                         ldd->lld_cwd_count--;
                 else
                         ldd->lld_mnt_count--;
-                unlock_kernel();
+                cfs_unlock_kernel();
         }
 
         EXIT;
@@ -650,7 +650,7 @@ static void ll_unpin(struct dentry *de, struct vfsmount *mnt, int flag)
         ENTRY;
         LASSERT(ldd);
 
-        lock_kernel();
+        cfs_lock_kernel();
         /* Strictly speaking this introduces an additional race: the
          * increments should wait until the rpc has returned.
          * However, given that at present the function is void, this
@@ -658,7 +658,7 @@ static void ll_unpin(struct dentry *de, struct vfsmount *mnt, int flag)
         handle = (flag) ? ldd->lld_mnt_och : ldd->lld_cwd_och;
         if (handle.och_magic != OBD_CLIENT_HANDLE_MAGIC) {
                 /* the "pin" failed */
-                unlock_kernel();
+                cfs_unlock_kernel();
                 EXIT;
                 return;
         }
@@ -667,7 +667,7 @@ static void ll_unpin(struct dentry *de, struct vfsmount *mnt, int flag)
                 count = --ldd->lld_mnt_count;
         else
                 count = --ldd->lld_cwd_count;
-        unlock_kernel();
+        cfs_unlock_kernel();
 
         if (count != 0) {
                 EXIT;
index ff45b5f..e6acdf9 100644 (file)
@@ -781,11 +781,11 @@ static int ll_dir_ioctl(struct inode *inode, struct file *file,
                 LASSERT(sizeof(lumv3.lmm_objects[0]) ==
                         sizeof(lumv3p->lmm_objects[0]));
                 /* first try with v1 which is smaller than v3 */
-                if (copy_from_user(lumv1, lumv1p, sizeof(*lumv1)))
+                if (cfs_copy_from_user(lumv1, lumv1p, sizeof(*lumv1)))
                         RETURN(-EFAULT);
 
                 if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
-                        if (copy_from_user(&lumv3, lumv3p, sizeof(lumv3)))
+                        if (cfs_copy_from_user(&lumv3, lumv3p, sizeof(lumv3)))
                                 RETURN(-EFAULT);
                 }
 
@@ -846,7 +846,7 @@ static int ll_dir_ioctl(struct inode *inode, struct file *file,
                         lmdp = (struct lov_user_mds_data *)arg;
                         lump = &lmdp->lmd_lmm;
                 }
-                if (copy_to_user(lump, lmm, lmmsize))
+                if (cfs_copy_to_user(lump, lmm, lmmsize))
                         GOTO(out_req, rc = -EFAULT);
         skip_lmm:
                 if (cmd == IOC_MDC_GETFILEINFO || cmd == LL_IOC_MDC_GETINFO) {
@@ -868,7 +868,7 @@ static int ll_dir_ioctl(struct inode *inode, struct file *file,
                         st.st_ino     = inode->i_ino;
 
                         lmdp = (struct lov_user_mds_data *)arg;
-                        if (copy_to_user(&lmdp->lmd_st, &st, sizeof(st)))
+                        if (cfs_copy_to_user(&lmdp->lmd_st, &st, sizeof(st)))
                                 GOTO(out_req, rc = -EFAULT);
                 }
 
@@ -896,7 +896,7 @@ static int ll_dir_ioctl(struct inode *inode, struct file *file,
                         RETURN(rc);
 
                 OBD_ALLOC(lmm, lmmsize);
-                if (copy_from_user(lmm, lum, lmmsize))
+                if (cfs_copy_from_user(lmm, lum, lmmsize))
                         GOTO(free_lmm, rc = -EFAULT);
 
                 switch (lmm->lmm_magic) {
@@ -933,7 +933,7 @@ static int ll_dir_ioctl(struct inode *inode, struct file *file,
                 if (rc)
                         GOTO(free_lsm, rc);
 
-                if (copy_to_user(&lumd->lmd_st, &st, sizeof(st)))
+                if (cfs_copy_to_user(&lumd->lmd_st, &st, sizeof(st)))
                         GOTO(free_lsm, rc = -EFAULT);
 
                 EXIT;
@@ -992,7 +992,8 @@ static int ll_dir_ioctl(struct inode *inode, struct file *file,
                 if (!rc) {
                         str = req_capsule_server_get(&req->rq_pill,
                                                      &RMF_STRING);
-                        if (copy_to_user(data->ioc_pbuf1, str, data->ioc_plen1))
+                        if (cfs_copy_to_user(data->ioc_pbuf1, str,
+                                             data->ioc_plen1))
                                 rc = -EFAULT;
                 }
                 ptlrpc_req_finished(req);
@@ -1041,7 +1042,8 @@ static int ll_dir_ioctl(struct inode *inode, struct file *file,
                                    NULL);
                 if (rc) {
                         CDEBUG(D_QUOTA, "mdc ioctl %d failed: %d\n", cmd, rc);
-                        if (copy_to_user((void *)arg, check, sizeof(*check)))
+                        if (cfs_copy_to_user((void *)arg, check,
+                                             sizeof(*check)))
                                 rc = -EFAULT;
                         GOTO(out_poll, rc);
                 }
@@ -1050,7 +1052,8 @@ static int ll_dir_ioctl(struct inode *inode, struct file *file,
                                    NULL);
                 if (rc) {
                         CDEBUG(D_QUOTA, "osc ioctl %d failed: %d\n", cmd, rc);
-                        if (copy_to_user((void *)arg, check, sizeof(*check)))
+                        if (cfs_copy_to_user((void *)arg, check,
+                                             sizeof(*check)))
                                 rc = -EFAULT;
                         GOTO(out_poll, rc);
                 }
@@ -1066,7 +1069,7 @@ static int ll_dir_ioctl(struct inode *inode, struct file *file,
                 if (!qctl)
                         RETURN(-ENOMEM);
 
-                if (copy_from_user(qctl, (void *)arg, sizeof(*qctl)))
+                if (cfs_copy_from_user(qctl, (void *)arg, sizeof(*qctl)))
                         GOTO(out_quotactl, rc = -EFAULT);
 
                 cmd = qctl->qc_cmd;
@@ -1162,7 +1165,7 @@ static int ll_dir_ioctl(struct inode *inode, struct file *file,
                         }
                 }
 
-                if (copy_to_user((void *)arg, qctl, sizeof(*qctl)))
+                if (cfs_copy_to_user((void *)arg, qctl, sizeof(*qctl)))
                         rc = -EFAULT;
 
         out_quotactl:
@@ -1173,8 +1176,8 @@ static int ll_dir_ioctl(struct inode *inode, struct file *file,
                 struct obd_device *obd = class_exp2obd(sbi->ll_dt_exp);
                 if (!obd)
                         RETURN(-EFAULT);
-                if (copy_to_user((void *)arg, obd->obd_name,
-                                strlen(obd->obd_name) + 1))
+                if (cfs_copy_to_user((void *)arg, obd->obd_name,
+                                     strlen(obd->obd_name) + 1))
                         RETURN (-EFAULT);
                 RETURN(0);
         }
@@ -1199,7 +1202,7 @@ static int ll_dir_ioctl(struct inode *inode, struct file *file,
         case LL_IOC_GETOBDCOUNT: {
                 int count;
 
-                if (copy_from_user(&count, (int *)arg, sizeof(int)))
+                if (cfs_copy_from_user(&count, (int *)arg, sizeof(int)))
                         RETURN(-EFAULT);
 
                 if (!count) {
@@ -1212,14 +1215,14 @@ static int ll_dir_ioctl(struct inode *inode, struct file *file,
                         count = lmv->desc.ld_tgt_count;
                 }
 
-                if (copy_to_user((int *)arg, &count, sizeof(int)))
+                if (cfs_copy_to_user((int *)arg, &count, sizeof(int)))
                         RETURN(-EFAULT);
 
                 RETURN(0);
         }
         case LL_IOC_PATH2FID:
-                if (copy_to_user((void *)arg, ll_inode2fid(inode),
-                                 sizeof(struct lu_fid)))
+                if (cfs_copy_to_user((void *)arg, ll_inode2fid(inode),
+                                     sizeof(struct lu_fid)))
                         RETURN(-EFAULT);
                 RETURN(0);
         case OBD_IOC_CHANGELOG_CLEAR: {
@@ -1229,7 +1232,7 @@ static int ll_dir_ioctl(struct inode *inode, struct file *file,
                 OBD_ALLOC_PTR(icc);
                 if (icc == NULL)
                         RETURN(-ENOMEM);
-                if (copy_from_user(icc, (void *)arg, sizeof(*icc)))
+                if (cfs_copy_from_user(icc, (void *)arg, sizeof(*icc)))
                         GOTO(icc_free, rc = -EFAULT);
 
                 rc = obd_iocontrol(cmd, sbi->ll_md_exp, sizeof(*icc), icc,NULL);
index cb32570..39624c7 100644 (file)
@@ -200,15 +200,15 @@ int ll_md_real_close(struct inode *inode, int flags)
                 och_usecount = &lli->lli_open_fd_read_count;
         }
 
-        down(&lli->lli_och_sem);
+        cfs_down(&lli->lli_och_sem);
         if (*och_usecount) { /* There are still users of this handle, so
                                 skip freeing it. */
-                up(&lli->lli_och_sem);
+                cfs_up(&lli->lli_och_sem);
                 RETURN(0);
         }
         och=*och_p;
         *och_p = NULL;
-        up(&lli->lli_och_sem);
+        cfs_up(&lli->lli_och_sem);
 
         if (och) { /* There might be a race and somebody have freed this och
                       already */
@@ -240,7 +240,7 @@ int ll_md_close(struct obd_export *md_exp, struct inode *inode,
                 struct inode *inode = file->f_dentry->d_inode;
                 ldlm_policy_data_t policy = {.l_inodebits={MDS_INODELOCK_OPEN}};
 
-                down(&lli->lli_och_sem);
+                cfs_down(&lli->lli_och_sem);
                 if (fd->fd_omode & FMODE_WRITE) {
                         lockmode = LCK_CW;
                         LASSERT(lli->lli_open_fd_write_count);
@@ -254,7 +254,7 @@ int ll_md_close(struct obd_export *md_exp, struct inode *inode,
                         LASSERT(lli->lli_open_fd_read_count);
                         lli->lli_open_fd_read_count--;
                 }
-                up(&lli->lli_och_sem);
+                cfs_up(&lli->lli_och_sem);
 
                 if (!md_lock_match(md_exp, flags, ll_inode2fid(inode),
                                    LDLM_IBITS, &policy, lockmode,
@@ -514,14 +514,14 @@ int ll_file_open(struct inode *inode, struct file *file)
 
         fd->fd_file = file;
         if (S_ISDIR(inode->i_mode)) {
-                spin_lock(&lli->lli_lock);
+                cfs_spin_lock(&lli->lli_lock);
                 if (lli->lli_opendir_key == NULL && lli->lli_opendir_pid == 0) {
                         LASSERT(lli->lli_sai == NULL);
                         lli->lli_opendir_key = fd;
                         lli->lli_opendir_pid = cfs_curproc_pid();
                         opendir_set = 1;
                 }
-                spin_unlock(&lli->lli_lock);
+                cfs_spin_unlock(&lli->lli_lock);
         }
 
         if (inode->i_sb->s_root == file->f_dentry) {
@@ -571,14 +571,14 @@ restart:
                 och_usecount = &lli->lli_open_fd_read_count;
         }
 
-        down(&lli->lli_och_sem);
+        cfs_down(&lli->lli_och_sem);
         if (*och_p) { /* Open handle is present */
                 if (it_disposition(it, DISP_OPEN_OPEN)) {
                         /* Well, there's extra open request that we do not need,
                            let's close it somehow. This will decref request. */
                         rc = it_open_error(DISP_OPEN_OPEN, it);
                         if (rc) {
-                                up(&lli->lli_och_sem);
+                                cfs_up(&lli->lli_och_sem);
                                 ll_file_data_put(fd);
                                 GOTO(out_openerr, rc);
                         }
@@ -591,7 +591,7 @@ restart:
                 rc = ll_local_open(file, it, fd, NULL);
                 if (rc) {
                         (*och_usecount)--;
-                        up(&lli->lli_och_sem);
+                        cfs_up(&lli->lli_och_sem);
                         ll_file_data_put(fd);
                         GOTO(out_openerr, rc);
                 }
@@ -603,7 +603,7 @@ restart:
                            could be cancelled, and since blocking ast handler
                            would attempt to grab och_sem as well, that would
                            result in a deadlock */
-                        up(&lli->lli_och_sem);
+                        cfs_up(&lli->lli_och_sem);
                         it->it_create_mode |= M_CHECK_STALE;
                         rc = ll_intent_file_open(file, NULL, 0, it);
                         it->it_create_mode &= ~M_CHECK_STALE;
@@ -645,7 +645,7 @@ restart:
                         GOTO(out_och_free, rc);
                 }
         }
-        up(&lli->lli_och_sem);
+        cfs_up(&lli->lli_och_sem);
 
         /* Must do this outside lli_och_sem lock to prevent deadlock where
            different kind of OPEN lock for this same inode gets cancelled
@@ -676,7 +676,7 @@ out_och_free:
                         *och_p = NULL; /* OBD_FREE writes some magic there */
                         (*och_usecount)--;
                 }
-                up(&lli->lli_och_sem);
+                cfs_up(&lli->lli_och_sem);
 out_openerr:
                 if (opendir_set != 0)
                         ll_stop_statahead(inode, lli->lli_opendir_key);
@@ -845,7 +845,7 @@ static ssize_t ll_file_io_generic(const struct lu_env *env,
 #endif
                         if ((iot == CIT_WRITE) &&
                             !(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
-                                down(&lli->lli_write_sem);
+                                cfs_down(&lli->lli_write_sem);
                                 write_sem_locked = 1;
                         }
                         break;
@@ -863,7 +863,7 @@ static ssize_t ll_file_io_generic(const struct lu_env *env,
                 }
                 result = cl_io_loop(env, io);
                 if (write_sem_locked)
-                        up(&lli->lli_write_sem);
+                        cfs_up(&lli->lli_write_sem);
         } else {
                 /* cl_io_rw_init() handled IO */
                 result = io->ci_result;
@@ -1202,8 +1202,8 @@ static int ll_lov_recreate_obj(struct inode *inode, struct file *file,
         if (!cfs_capable(CFS_CAP_SYS_ADMIN))
                 RETURN(-EPERM);
 
-        if (copy_from_user(&ucreatp, (struct ll_recreate_obj *)arg,
-                           sizeof(struct ll_recreate_obj)))
+        if (cfs_copy_from_user(&ucreatp, (struct ll_recreate_obj *)arg,
+                               sizeof(struct ll_recreate_obj)))
                 RETURN(-EFAULT);
 
         OBDO_ALLOC(oa);
@@ -1369,7 +1369,7 @@ static int ll_lov_setea(struct inode *inode, struct file *file,
         if (lump == NULL) {
                 RETURN(-ENOMEM);
         }
-        if (copy_from_user(lump, (struct lov_user_md  *)arg, lum_size)) {
+        if (cfs_copy_from_user(lump, (struct lov_user_md  *)arg, lum_size)) {
                 OBD_FREE(lump, lum_size);
                 RETURN(-EFAULT);
         }
@@ -1394,12 +1394,12 @@ static int ll_lov_setstripe(struct inode *inode, struct file *file,
 
         /* first try with v1 which is smaller than v3 */
         lum_size = sizeof(struct lov_user_md_v1);
-        if (copy_from_user(lumv1, lumv1p, lum_size))
+        if (cfs_copy_from_user(lumv1, lumv1p, lum_size))
                 RETURN(-EFAULT);
 
         if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
                 lum_size = sizeof(struct lov_user_md_v3);
-                if (copy_from_user(&lumv3, lumv3p, lum_size))
+                if (cfs_copy_from_user(&lumv3, lumv3p, lum_size))
                         RETURN(-EFAULT);
         }
 
@@ -1435,24 +1435,24 @@ int ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
         if (ll_file_nolock(file))
                 RETURN(-EOPNOTSUPP);
 
-        spin_lock(&lli->lli_lock);
+        cfs_spin_lock(&lli->lli_lock);
         if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
                 CWARN("group lock already existed with gid %lu\n",
                        fd->fd_grouplock.cg_gid);
-                spin_unlock(&lli->lli_lock);
+                cfs_spin_unlock(&lli->lli_lock);
                 RETURN(-EINVAL);
         }
         LASSERT(fd->fd_grouplock.cg_lock == NULL);
-        spin_unlock(&lli->lli_lock);
+        cfs_spin_unlock(&lli->lli_lock);
 
         rc = cl_get_grouplock(cl_i2info(inode)->lli_clob,
                               arg, (file->f_flags & O_NONBLOCK), &grouplock);
         if (rc)
                 RETURN(rc);
 
-        spin_lock(&lli->lli_lock);
+        cfs_spin_lock(&lli->lli_lock);
         if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
-                spin_unlock(&lli->lli_lock);
+                cfs_spin_unlock(&lli->lli_lock);
                 CERROR("another thread just won the race\n");
                 cl_put_grouplock(&grouplock);
                 RETURN(-EINVAL);
@@ -1460,7 +1460,7 @@ int ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
 
         fd->fd_flags |= LL_FILE_GROUP_LOCKED;
         fd->fd_grouplock = grouplock;
-        spin_unlock(&lli->lli_lock);
+        cfs_spin_unlock(&lli->lli_lock);
 
         CDEBUG(D_INFO, "group lock %lu obtained\n", arg);
         RETURN(0);
@@ -1473,9 +1473,9 @@ int ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg)
         struct ccc_grouplock    grouplock;
         ENTRY;
 
-        spin_lock(&lli->lli_lock);
+        cfs_spin_lock(&lli->lli_lock);
         if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
-                spin_unlock(&lli->lli_lock);
+                cfs_spin_unlock(&lli->lli_lock);
                 CWARN("no group lock held\n");
                 RETURN(-EINVAL);
         }
@@ -1484,14 +1484,14 @@ int ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg)
         if (fd->fd_grouplock.cg_gid != arg) {
                 CWARN("group lock %lu doesn't match current id %lu\n",
                        arg, fd->fd_grouplock.cg_gid);
-                spin_unlock(&lli->lli_lock);
+                cfs_spin_unlock(&lli->lli_lock);
                 RETURN(-EINVAL);
         }
 
         grouplock = fd->fd_grouplock;
         memset(&fd->fd_grouplock, 0, sizeof(fd->fd_grouplock));
         fd->fd_flags &= ~LL_FILE_GROUP_LOCKED;
-        spin_unlock(&lli->lli_lock);
+        cfs_spin_unlock(&lli->lli_lock);
 
         cl_put_grouplock(&grouplock);
         CDEBUG(D_INFO, "group lock %lu released\n", arg);
@@ -1596,7 +1596,7 @@ int ll_fid2path(struct obd_export *exp, void *arg)
         OBD_ALLOC_PTR(gfin);
         if (gfin == NULL)
                 RETURN(-ENOMEM);
-        if (copy_from_user(gfin, arg, sizeof(*gfin))) {
+        if (cfs_copy_from_user(gfin, arg, sizeof(*gfin))) {
                 OBD_FREE_PTR(gfin);
                 RETURN(-EFAULT);
         }
@@ -1614,7 +1614,7 @@ int ll_fid2path(struct obd_export *exp, void *arg)
         rc = obd_iocontrol(OBD_IOC_FID2PATH, exp, outsize, gfout, NULL);
         if (rc)
                 GOTO(gf_free, rc);
-        if (copy_to_user(arg, gfout, outsize))
+        if (cfs_copy_to_user(arg, gfout, outsize))
                 rc = -EFAULT;
 
 gf_free:
@@ -1688,15 +1688,16 @@ int ll_file_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
                 if (fiemap_s == NULL)
                         RETURN(-ENOMEM);
 
-                if (copy_from_user(fiemap_s,(struct ll_user_fiemap __user *)arg,
-                                   sizeof(*fiemap_s)))
+                if (cfs_copy_from_user(fiemap_s,
+                                       (struct ll_user_fiemap __user *)arg,
+                                       sizeof(*fiemap_s)))
                         GOTO(error, rc = -EFAULT);
 
                 if (fiemap_s->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) {
                         fiemap_s->fm_flags = fiemap_s->fm_flags &
                                                     ~LUSTRE_FIEMAP_FLAGS_COMPAT;
-                        if (copy_to_user((char *)arg, fiemap_s,
-                                         sizeof(*fiemap_s)))
+                        if (cfs_copy_to_user((char *)arg, fiemap_s,
+                                             sizeof(*fiemap_s)))
                                 GOTO(error, rc = -EFAULT);
 
                         GOTO(error, rc = -EBADR);
@@ -1706,7 +1707,7 @@ int ll_file_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
                  * it is used to calculate end_offset and device from previous
                  * fiemap call. */
                 if (extent_count) {
-                        if (copy_from_user(&fiemap_s->fm_extents[0],
+                        if (cfs_copy_from_user(&fiemap_s->fm_extents[0],
                             (char __user *)arg + sizeof(*fiemap_s),
                             sizeof(struct ll_fiemap_extent)))
                                 GOTO(error, rc = -EFAULT);
@@ -1730,7 +1731,7 @@ int ll_file_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
                         ret_bytes += (fiemap_s->fm_mapped_extents *
                                          sizeof(struct ll_fiemap_extent));
 
-                if (copy_to_user((void *)arg, fiemap_s, ret_bytes))
+                if (cfs_copy_to_user((void *)arg, fiemap_s, ret_bytes))
                         rc = -EFAULT;
 
 error:
@@ -1759,8 +1760,8 @@ error:
         case LL_IOC_FLUSHCTX:
                 RETURN(ll_flush_ctx(inode));
         case LL_IOC_PATH2FID: {
-                if (copy_to_user((void *)arg, ll_inode2fid(inode),
-                                 sizeof(struct lu_fid)))
+                if (cfs_copy_to_user((void *)arg, ll_inode2fid(inode),
+                                     sizeof(struct lu_fid)))
                         RETURN(-EFAULT);
 
                 RETURN(0);
@@ -2245,9 +2246,9 @@ int lustre_check_acl(struct inode *inode, int mask)
         int rc;
         ENTRY;
 
-        spin_lock(&lli->lli_lock);
+        cfs_spin_lock(&lli->lli_lock);
         acl = posix_acl_dup(lli->lli_posix_acl);
-        spin_unlock(&lli->lli_lock);
+        cfs_spin_unlock(&lli->lli_lock);
 
         if (!acl)
                 RETURN(-EAGAIN);
@@ -2326,7 +2327,7 @@ int ll_inode_permission(struct inode *inode, int mask, struct nameidata *nd)
                 return rc;
         } else {
 check_groups:
-                if (in_group_p(inode->i_gid))
+                if (cfs_curproc_is_in_groups(inode->i_gid))
                         mode >>= 3;
         }
         if ((mode & mask & S_IRWXO) == mask)
@@ -2444,8 +2445,8 @@ struct inode_operations ll_file_inode_operations = {
 
 /* dynamic ioctl number support routins */
 static struct llioc_ctl_data {
-        struct rw_semaphore ioc_sem;
-        struct list_head    ioc_head;
+        cfs_rw_semaphore_t      ioc_sem;
+        cfs_list_t              ioc_head;
 } llioc = {
         __RWSEM_INITIALIZER(llioc.ioc_sem),
         CFS_LIST_HEAD_INIT(llioc.ioc_head)
@@ -2453,7 +2454,7 @@ static struct llioc_ctl_data {
 
 
 struct llioc_data {
-        struct list_head        iocd_list;
+        cfs_list_t              iocd_list;
         unsigned int            iocd_size;
         llioc_callback_t        iocd_cb;
         unsigned int            iocd_count;
@@ -2481,9 +2482,9 @@ void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd)
         in_data->iocd_count = count;
         memcpy(in_data->iocd_cmd, cmd, sizeof(unsigned int) * count);
 
-        down_write(&llioc.ioc_sem);
-        list_add_tail(&in_data->iocd_list, &llioc.ioc_head);
-        up_write(&llioc.ioc_sem);
+        cfs_down_write(&llioc.ioc_sem);
+        cfs_list_add_tail(&in_data->iocd_list, &llioc.ioc_head);
+        cfs_up_write(&llioc.ioc_sem);
 
         RETURN(in_data);
 }
@@ -2495,19 +2496,19 @@ void ll_iocontrol_unregister(void *magic)
         if (magic == NULL)
                 return;
 
-        down_write(&llioc.ioc_sem);
-        list_for_each_entry(tmp, &llioc.ioc_head, iocd_list) {
+        cfs_down_write(&llioc.ioc_sem);
+        cfs_list_for_each_entry(tmp, &llioc.ioc_head, iocd_list) {
                 if (tmp == magic) {
                         unsigned int size = tmp->iocd_size;
 
-                        list_del(&tmp->iocd_list);
-                        up_write(&llioc.ioc_sem);
+                        cfs_list_del(&tmp->iocd_list);
+                        cfs_up_write(&llioc.ioc_sem);
 
                         OBD_FREE(tmp, size);
                         return;
                 }
         }
-        up_write(&llioc.ioc_sem);
+        cfs_up_write(&llioc.ioc_sem);
 
         CWARN("didn't find iocontrol register block with magic: %p\n", magic);
 }
@@ -2522,8 +2523,8 @@ enum llioc_iter ll_iocontrol_call(struct inode *inode, struct file *file,
         struct llioc_data *data;
         int rc = -EINVAL, i;
 
-        down_read(&llioc.ioc_sem);
-        list_for_each_entry(data, &llioc.ioc_head, iocd_list) {
+        cfs_down_read(&llioc.ioc_sem);
+        cfs_list_for_each_entry(data, &llioc.ioc_head, iocd_list) {
                 for (i = 0; i < data->iocd_count; i++) {
                         if (cmd != data->iocd_cmd[i])
                                 continue;
@@ -2535,7 +2536,7 @@ enum llioc_iter ll_iocontrol_call(struct inode *inode, struct file *file,
                 if (ret == LLIOC_STOP)
                         break;
         }
-        up_read(&llioc.ioc_sem);
+        cfs_up_read(&llioc.ioc_sem);
 
         if (rcp)
                 *rcp = rc;
index 1a7bd1f..ed5824b 100644 (file)
  */
 
 /* capas for oss writeback and those failed to renew */
-static LIST_HEAD(ll_idle_capas);
+static CFS_LIST_HEAD(ll_idle_capas);
 static struct ptlrpc_thread ll_capa_thread;
-static struct list_head *ll_capa_list = &capa_list[CAPA_SITE_CLIENT];
+static cfs_list_t *ll_capa_list = &capa_list[CAPA_SITE_CLIENT];
 
 /* llite capa renewal timer */
 struct timer_list ll_capa_timer;
 /* for debug: indicate whether capa on llite is enabled or not */
-static atomic_t ll_capa_debug = ATOMIC_INIT(0);
+static cfs_atomic_t ll_capa_debug = CFS_ATOMIC_INIT(0);
 static unsigned long long ll_capa_renewed = 0;
 static unsigned long long ll_capa_renewal_noent = 0;
 static unsigned long long ll_capa_renewal_failed = 0;
@@ -71,7 +71,7 @@ static unsigned long long ll_capa_renewal_retries = 0;
 
 static inline void update_capa_timer(struct obd_capa *ocapa, cfs_time_t expiry)
 {
-        if (time_before(expiry, ll_capa_timer.expires) ||
+        if (cfs_time_before(expiry, ll_capa_timer.expires) ||
             !timer_pending(&ll_capa_timer)) {
                 mod_timer(&ll_capa_timer, expiry);
                 DEBUG_CAPA(D_SEC, &ocapa->c_capa,
@@ -98,19 +98,21 @@ static inline int have_expired_capa(void)
         /* if ll_capa_list has client capa to expire or ll_idle_capas has
          * expired capa, return 1.
          */
-        spin_lock(&capa_lock);
-        if (!list_empty(ll_capa_list)) {
-                ocapa = list_entry(ll_capa_list->next, struct obd_capa, c_list);
+        cfs_spin_lock(&capa_lock);
+        if (!cfs_list_empty(ll_capa_list)) {
+                ocapa = cfs_list_entry(ll_capa_list->next, struct obd_capa,
+                                       c_list);
                 expired = capa_is_to_expire(ocapa);
                 if (!expired)
                         update_capa_timer(ocapa, capa_renewal_time(ocapa));
-        } else if (!list_empty(&ll_idle_capas)) {
-                ocapa = list_entry(ll_idle_capas.next, struct obd_capa, c_list);
+        } else if (!cfs_list_empty(&ll_idle_capas)) {
+                ocapa = cfs_list_entry(ll_idle_capas.next, struct obd_capa,
+                                       c_list);
                 expired = capa_is_expired(ocapa);
                 if (!expired)
                         update_capa_timer(ocapa, ocapa->c_expiry);
         }
-        spin_unlock(&capa_lock);
+        cfs_spin_unlock(&capa_lock);
 
         if (expired)
                 DEBUG_CAPA(D_SEC, &ocapa->c_capa, "expired");
@@ -122,13 +124,13 @@ static inline int ll_capa_check_stop(void)
         return (ll_capa_thread.t_flags & SVC_STOPPING) ? 1: 0;
 }
 
-static void sort_add_capa(struct obd_capa *ocapa, struct list_head *head)
+static void sort_add_capa(struct obd_capa *ocapa, cfs_list_t *head)
 {
         struct obd_capa *tmp;
-        struct list_head *before = NULL;
+        cfs_list_t *before = NULL;
 
         /* TODO: client capa is sorted by expiry, this could be optimized */
-        list_for_each_entry_reverse(tmp, head, c_list) {
+        cfs_list_for_each_entry_reverse(tmp, head, c_list) {
                 if (cfs_time_aftereq(ocapa->c_expiry, tmp->c_expiry)) {
                         before = &tmp->c_list;
                         break;
@@ -136,13 +138,13 @@ static void sort_add_capa(struct obd_capa *ocapa, struct list_head *head)
         }
 
         LASSERT(&ocapa->c_list != before);
-        list_add(&ocapa->c_list, before ?: head);
+        cfs_list_add(&ocapa->c_list, before ?: head);
 }
 
 static inline int obd_capa_open_count(struct obd_capa *oc)
 {
         struct ll_inode_info *lli = ll_i2info(oc->u.cli.inode);
-        return atomic_read(&lli->lli_open_count);
+        return cfs_atomic_read(&lli->lli_open_count);
 }
 
 static void ll_delete_capa(struct obd_capa *ocapa)
@@ -153,11 +155,11 @@ static void ll_delete_capa(struct obd_capa *ocapa)
                 LASSERT(lli->lli_mds_capa == ocapa);
                 lli->lli_mds_capa = NULL;
         } else if (capa_for_oss(&ocapa->c_capa)) {
-                list_del_init(&ocapa->u.cli.lli_list);
+                cfs_list_del_init(&ocapa->u.cli.lli_list);
         }
 
         DEBUG_CAPA(D_SEC, &ocapa->c_capa, "free client");
-        list_del_init(&ocapa->c_list);
+        cfs_list_del_init(&ocapa->c_list);
         capa_count[CAPA_SITE_CLIENT]--;
         /* release the ref when alloc */
         capa_put(ocapa);
@@ -179,7 +181,7 @@ static int capa_thread_main(void *unused)
         cfs_daemonize("ll_capa");
 
         ll_capa_thread.t_flags = SVC_RUNNING;
-        wake_up(&ll_capa_thread.t_ctl_waitq);
+        cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
 
         while (1) {
                 l_wait_event(ll_capa_thread.t_ctl_waitq,
@@ -191,8 +193,8 @@ static int capa_thread_main(void *unused)
 
                 next = NULL;
 
-                spin_lock(&capa_lock);
-                list_for_each_entry_safe(ocapa, tmp, ll_capa_list, c_list) {
+                cfs_spin_lock(&capa_lock);
+                cfs_list_for_each_entry_safe(ocapa, tmp, ll_capa_list, c_list) {
                         LASSERT(ocapa->c_capa.lc_opc != CAPA_OPC_OSS_TRUNC);
 
                         if (!capa_is_to_expire(ocapa)) {
@@ -200,7 +202,7 @@ static int capa_thread_main(void *unused)
                                 break;
                         }
 
-                        list_del_init(&ocapa->c_list);
+                        cfs_list_del_init(&ocapa->c_list);
 
                         /* for MDS capability, only renew those which belong to
                          * dir, or its inode is opened, or client holds LOOKUP
@@ -238,10 +240,10 @@ static int capa_thread_main(void *unused)
 
                         capa_get(ocapa);
                         ll_capa_renewed++;
-                        spin_unlock(&capa_lock);
+                        cfs_spin_unlock(&capa_lock);
                         rc = md_renew_capa(ll_i2mdexp(inode), ocapa,
                                            ll_update_capa);
-                        spin_lock(&capa_lock);
+                        cfs_spin_lock(&capa_lock);
                         if (rc) {
                                 DEBUG_CAPA(D_ERROR, &ocapa->c_capa,
                                            "renew failed: %d", rc);
@@ -252,19 +254,20 @@ static int capa_thread_main(void *unused)
                 if (next)
                         update_capa_timer(next, capa_renewal_time(next));
 
-                list_for_each_entry_safe(ocapa, tmp, &ll_idle_capas, c_list) {
+                cfs_list_for_each_entry_safe(ocapa, tmp, &ll_idle_capas,
+                                             c_list) {
                         if (!capa_is_expired(ocapa)) {
                                 if (!next)
                                         update_capa_timer(ocapa, ocapa->c_expiry);
                                 break;
                         }
 
-                        if (atomic_read(&ocapa->c_refc) > 1) {
+                        if (cfs_atomic_read(&ocapa->c_refc) > 1) {
                                 DEBUG_CAPA(D_SEC, &ocapa->c_capa,
                                            "expired(c_refc %d), don't release",
-                                           atomic_read(&ocapa->c_refc));
+                                           cfs_atomic_read(&ocapa->c_refc));
                                 /* don't try to renew any more */
-                                list_del_init(&ocapa->c_list);
+                                cfs_list_del_init(&ocapa->c_list);
                                 continue;
                         }
 
@@ -273,17 +276,17 @@ static int capa_thread_main(void *unused)
                         ll_delete_capa(ocapa);
                 }
 
-                spin_unlock(&capa_lock);
+                cfs_spin_unlock(&capa_lock);
         }
 
         ll_capa_thread.t_flags = SVC_STOPPED;
-        wake_up(&ll_capa_thread.t_ctl_waitq);
+        cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
         RETURN(0);
 }
 
 void ll_capa_timer_callback(unsigned long unused)
 {
-        wake_up(&ll_capa_thread.t_ctl_waitq);
+        cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
 }
 
 int ll_capa_thread_start(void)
@@ -291,15 +294,15 @@ int ll_capa_thread_start(void)
         int rc;
         ENTRY;
 
-        init_waitqueue_head(&ll_capa_thread.t_ctl_waitq);
+        cfs_waitq_init(&ll_capa_thread.t_ctl_waitq);
 
-        rc = kernel_thread(capa_thread_main, NULL, 0);
+        rc = cfs_kernel_thread(capa_thread_main, NULL, 0);
         if (rc < 0) {
                 CERROR("cannot start expired capa thread: rc %d\n", rc);
                 RETURN(rc);
         }
-        wait_event(ll_capa_thread.t_ctl_waitq,
-                   ll_capa_thread.t_flags & SVC_RUNNING);
+        cfs_wait_event(ll_capa_thread.t_ctl_waitq,
+                       ll_capa_thread.t_flags & SVC_RUNNING);
 
         RETURN(0);
 }
@@ -307,9 +310,9 @@ int ll_capa_thread_start(void)
 void ll_capa_thread_stop(void)
 {
         ll_capa_thread.t_flags = SVC_STOPPING;
-        wake_up(&ll_capa_thread.t_ctl_waitq);
-        wait_event(ll_capa_thread.t_ctl_waitq,
-                   ll_capa_thread.t_flags & SVC_STOPPED);
+        cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
+        cfs_wait_event(ll_capa_thread.t_ctl_waitq,
+                       ll_capa_thread.t_flags & SVC_STOPPED);
 }
 
 struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc)
@@ -326,8 +329,8 @@ struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc)
         LASSERT(opc == CAPA_OPC_OSS_WRITE || opc == CAPA_OPC_OSS_RW ||
                 opc == CAPA_OPC_OSS_TRUNC);
 
-        spin_lock(&capa_lock);
-        list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
+        cfs_spin_lock(&capa_lock);
+        cfs_list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
                 if (capa_is_expired(ocapa))
                         continue;
                 if ((opc & CAPA_OPC_OSS_WRITE) &&
@@ -357,13 +360,13 @@ struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc)
         } else {
                 ocapa = NULL;
 
-                if (atomic_read(&ll_capa_debug)) {
+                if (cfs_atomic_read(&ll_capa_debug)) {
                         CERROR("no capability for "DFID" opc "LPX64"\n",
                                PFID(&lli->lli_fid), opc);
-                        atomic_set(&ll_capa_debug, 0);
+                        cfs_atomic_set(&ll_capa_debug, 0);
                 }
         }
-        spin_unlock(&capa_lock);
+        cfs_spin_unlock(&capa_lock);
 
         RETURN(ocapa);
 }
@@ -380,12 +383,12 @@ struct obd_capa *ll_mdscapa_get(struct inode *inode)
         if ((ll_i2sbi(inode)->ll_flags & LL_SBI_MDS_CAPA) == 0)
                 RETURN(NULL);
 
-        spin_lock(&capa_lock);
+        cfs_spin_lock(&capa_lock);
         ocapa = capa_get(lli->lli_mds_capa);
-        spin_unlock(&capa_lock);
-        if (!ocapa && atomic_read(&ll_capa_debug)) {
+        cfs_spin_unlock(&capa_lock);
+        if (!ocapa && cfs_atomic_read(&ll_capa_debug)) {
                 CERROR("no mds capability for "DFID"\n", PFID(&lli->lli_fid));
-                atomic_set(&ll_capa_debug, 0);
+                cfs_atomic_set(&ll_capa_debug, 0);
         }
 
         RETURN(ocapa);
@@ -405,9 +408,9 @@ static struct obd_capa *do_add_mds_capa(struct inode *inode,
 
                 DEBUG_CAPA(D_SEC, capa, "add MDS");
         } else {
-                spin_lock(&old->c_lock);
+                cfs_spin_lock(&old->c_lock);
                 old->c_capa = *capa;
-                spin_unlock(&old->c_lock);
+                cfs_spin_unlock(&old->c_lock);
 
                 DEBUG_CAPA(D_SEC, capa, "update MDS");
 
@@ -423,7 +426,7 @@ static struct obd_capa *do_lookup_oss_capa(struct inode *inode, int opc)
         struct obd_capa *ocapa;
 
         /* inside capa_lock */
-        list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
+        cfs_list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
                 if ((capa_opc(&ocapa->c_capa) & opc) != opc)
                         continue;
 
@@ -443,18 +446,18 @@ static inline void inode_add_oss_capa(struct inode *inode,
 {
         struct ll_inode_info *lli = ll_i2info(inode);
         struct obd_capa *tmp;
-        struct list_head *next = NULL;
+        cfs_list_t *next = NULL;
 
         /* capa is sorted in lli_oss_capas so lookup can always find the
          * latest one */
-        list_for_each_entry(tmp, &lli->lli_oss_capas, u.cli.lli_list) {
+        cfs_list_for_each_entry(tmp, &lli->lli_oss_capas, u.cli.lli_list) {
                 if (cfs_time_after(ocapa->c_expiry, tmp->c_expiry)) {
                         next = &tmp->u.cli.lli_list;
                         break;
                 }
         }
         LASSERT(&ocapa->u.cli.lli_list != next);
-        list_move_tail(&ocapa->u.cli.lli_list, next ?: &lli->lli_oss_capas);
+        cfs_list_move_tail(&ocapa->u.cli.lli_list, next ?: &lli->lli_oss_capas);
 }
 
 static struct obd_capa *do_add_oss_capa(struct inode *inode,
@@ -471,14 +474,14 @@ static struct obd_capa *do_add_oss_capa(struct inode *inode,
         old = do_lookup_oss_capa(inode, capa_opc(capa) & CAPA_OPC_OSS_ONLY);
         if (!old) {
                 ocapa->u.cli.inode = inode;
-                INIT_LIST_HEAD(&ocapa->u.cli.lli_list);
+                CFS_INIT_LIST_HEAD(&ocapa->u.cli.lli_list);
                 capa_count[CAPA_SITE_CLIENT]++;
 
                 DEBUG_CAPA(D_SEC, capa, "add OSS");
         } else {
-                spin_lock(&old->c_lock);
+                cfs_spin_lock(&old->c_lock);
                 old->c_capa = *capa;
-                spin_unlock(&old->c_lock);
+                cfs_spin_unlock(&old->c_lock);
 
                 DEBUG_CAPA(D_SEC, capa, "update OSS");
 
@@ -492,22 +495,22 @@ static struct obd_capa *do_add_oss_capa(struct inode *inode,
 
 struct obd_capa *ll_add_capa(struct inode *inode, struct obd_capa *ocapa)
 {
-        spin_lock(&capa_lock);
+        cfs_spin_lock(&capa_lock);
         ocapa = capa_for_mds(&ocapa->c_capa) ? do_add_mds_capa(inode, ocapa) :
                                                do_add_oss_capa(inode, ocapa);
 
         /* truncate capa won't renew */
         if (ocapa->c_capa.lc_opc != CAPA_OPC_OSS_TRUNC) {
                 set_capa_expiry(ocapa);
-                list_del_init(&ocapa->c_list);
+                cfs_list_del_init(&ocapa->c_list);
                 sort_add_capa(ocapa, ll_capa_list);
 
                 update_capa_timer(ocapa, capa_renewal_time(ocapa));
         }
 
-        spin_unlock(&capa_lock);
+        cfs_spin_unlock(&capa_lock);
 
-        atomic_set(&ll_capa_debug, 1);
+        cfs_atomic_set(&ll_capa_debug, 1);
         return ocapa;
 }
 
@@ -528,7 +531,7 @@ int ll_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa)
         if (IS_ERR(capa)) {
                 /* set error code */
                 rc = PTR_ERR(capa);
-                spin_lock(&capa_lock);
+                cfs_spin_lock(&capa_lock);
                 if (rc == -ENOENT) {
                         DEBUG_CAPA(D_SEC, &ocapa->c_capa,
                                    "renewal canceled because object removed");
@@ -550,32 +553,32 @@ int ll_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa)
                         }
                 }
 
-                list_del_init(&ocapa->c_list);
+                cfs_list_del_init(&ocapa->c_list);
                 sort_add_capa(ocapa, &ll_idle_capas);
-                spin_unlock(&capa_lock);
+                cfs_spin_unlock(&capa_lock);
 
                 capa_put(ocapa);
                 iput(inode);
                 RETURN(rc);
         }
 
-        spin_lock(&ocapa->c_lock);
+        cfs_spin_lock(&ocapa->c_lock);
         LASSERT(!memcmp(&ocapa->c_capa, capa,
                         offsetof(struct lustre_capa, lc_opc)));
         ocapa->c_capa = *capa;
         set_capa_expiry(ocapa);
-        spin_unlock(&ocapa->c_lock);
+        cfs_spin_unlock(&ocapa->c_lock);
 
-        spin_lock(&capa_lock);
+        cfs_spin_lock(&capa_lock);
         if (capa_for_oss(capa))
                 inode_add_oss_capa(inode, ocapa);
         DEBUG_CAPA(D_SEC, capa, "renew");
         EXIT;
 retry:
-        list_del_init(&ocapa->c_list);
+        cfs_list_del_init(&ocapa->c_list);
         sort_add_capa(ocapa, ll_capa_list);
         update_capa_timer(ocapa, capa_renewal_time(ocapa));
-        spin_unlock(&capa_lock);
+        cfs_spin_unlock(&capa_lock);
 
         capa_put(ocapa);
         iput(inode);
@@ -593,7 +596,7 @@ void ll_capa_open(struct inode *inode)
         if (!S_ISREG(inode->i_mode))
                 return;
 
-        atomic_inc(&lli->lli_open_count);
+        cfs_atomic_inc(&lli->lli_open_count);
 }
 
 void ll_capa_close(struct inode *inode)
@@ -607,7 +610,7 @@ void ll_capa_close(struct inode *inode)
         if (!S_ISREG(inode->i_mode))
                 return;
 
-        atomic_dec(&lli->lli_open_count);
+        cfs_atomic_dec(&lli->lli_open_count);
 }
 
 /* delete CAPA_OPC_OSS_TRUNC only */
@@ -622,9 +625,9 @@ void ll_truncate_free_capa(struct obd_capa *ocapa)
         /* release ref when find */
         capa_put(ocapa);
         if (likely(ocapa->c_capa.lc_opc == CAPA_OPC_OSS_TRUNC)) {
-                spin_lock(&capa_lock);
+                cfs_spin_lock(&capa_lock);
                 ll_delete_capa(ocapa);
-                spin_unlock(&capa_lock);
+                cfs_spin_unlock(&capa_lock);
         }
 }
 
@@ -633,15 +636,15 @@ void ll_clear_inode_capas(struct inode *inode)
         struct ll_inode_info *lli = ll_i2info(inode);
         struct obd_capa *ocapa, *tmp;
 
-        spin_lock(&capa_lock);
+        cfs_spin_lock(&capa_lock);
         ocapa = lli->lli_mds_capa;
         if (ocapa)
                 ll_delete_capa(ocapa);
 
-        list_for_each_entry_safe(ocapa, tmp, &lli->lli_oss_capas,
-                                 u.cli.lli_list)
+        cfs_list_for_each_entry_safe(ocapa, tmp, &lli->lli_oss_capas,
+                                     u.cli.lli_list)
                 ll_delete_capa(ocapa);
-        spin_unlock(&capa_lock);
+        cfs_spin_unlock(&capa_lock);
 }
 
 void ll_print_capa_stat(struct ll_sb_info *sbi)
index 614de70..a952fb1 100644 (file)
@@ -52,11 +52,12 @@ void vvp_write_pending(struct ccc_object *club, struct ccc_page *page)
         struct ll_inode_info *lli = ll_i2info(club->cob_inode);
 
         ENTRY;
-        spin_lock(&lli->lli_lock);
+        cfs_spin_lock(&lli->lli_lock);
         lli->lli_flags |= LLIF_SOM_DIRTY;
-        if (page != NULL && list_empty(&page->cpg_pending_linkage))
-                list_add(&page->cpg_pending_linkage, &club->cob_pending_list);
-        spin_unlock(&lli->lli_lock);
+        if (page != NULL && cfs_list_empty(&page->cpg_pending_linkage))
+                cfs_list_add(&page->cpg_pending_linkage,
+                             &club->cob_pending_list);
+        cfs_spin_unlock(&lli->lli_lock);
         EXIT;
 }
 
@@ -67,12 +68,12 @@ void vvp_write_complete(struct ccc_object *club, struct ccc_page *page)
         int rc = 0;
 
         ENTRY;
-        spin_lock(&lli->lli_lock);
-        if (page != NULL && !list_empty(&page->cpg_pending_linkage)) {
-                list_del_init(&page->cpg_pending_linkage);
+        cfs_spin_lock(&lli->lli_lock);
+        if (page != NULL && !cfs_list_empty(&page->cpg_pending_linkage)) {
+                cfs_list_del_init(&page->cpg_pending_linkage);
                 rc = 1;
         }
-        spin_unlock(&lli->lli_lock);
+        cfs_spin_unlock(&lli->lli_lock);
         if (rc)
                 ll_queue_done_writing(club->cob_inode, 0);
         EXIT;
@@ -87,11 +88,11 @@ void ll_queue_done_writing(struct inode *inode, unsigned long flags)
         struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
         ENTRY;
 
-        spin_lock(&lli->lli_lock);
+        cfs_spin_lock(&lli->lli_lock);
         lli->lli_flags |= flags;
 
         if ((lli->lli_flags & LLIF_DONE_WRITING) &&
-            list_empty(&club->cob_pending_list)) {
+            cfs_list_empty(&club->cob_pending_list)) {
                 struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
 
                 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
@@ -100,12 +101,12 @@ void ll_queue_done_writing(struct inode *inode, unsigned long flags)
                                inode->i_ino, inode->i_generation,
                                lli->lli_flags);
                 /* DONE_WRITING is allowed and inode has no dirty page. */
-                spin_lock(&lcq->lcq_lock);
+                cfs_spin_lock(&lcq->lcq_lock);
 
-                LASSERT(list_empty(&lli->lli_close_list));
+                LASSERT(cfs_list_empty(&lli->lli_close_list));
                 CDEBUG(D_INODE, "adding inode %lu/%u to close list\n",
                        inode->i_ino, inode->i_generation);
-                list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
+                cfs_list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
 
                 /* Avoid a concurrent insertion into the close thread queue:
                  * an inode is already in the close thread, open(), write(),
@@ -115,10 +116,10 @@ void ll_queue_done_writing(struct inode *inode, unsigned long flags)
                  * it. */
                 lli->lli_flags &= ~LLIF_DONE_WRITING;
 
-                wake_up(&lcq->lcq_waitq);
-                spin_unlock(&lcq->lcq_lock);
+                cfs_waitq_signal(&lcq->lcq_waitq);
+                cfs_spin_unlock(&lcq->lcq_lock);
         }
-        spin_unlock(&lli->lli_lock);
+        cfs_spin_unlock(&lli->lli_lock);
         EXIT;
 }
 
@@ -151,8 +152,8 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
         struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
         ENTRY;
 
-        spin_lock(&lli->lli_lock);
-        if (!(list_empty(&club->cob_pending_list))) {
+        cfs_spin_lock(&lli->lli_lock);
+        if (!(cfs_list_empty(&club->cob_pending_list))) {
                 if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
                         LASSERT(*och != NULL);
                         LASSERT(lli->lli_pending_och == NULL);
@@ -160,7 +161,7 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
                          * request yet, DONE_WRITE is to be sent later. */
                         lli->lli_flags |= LLIF_EPOCH_PENDING;
                         lli->lli_pending_och = *och;
-                        spin_unlock(&lli->lli_lock);
+                        cfs_spin_unlock(&lli->lli_lock);
 
                         inode = igrab(inode);
                         LASSERT(inode);
@@ -172,7 +173,7 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
                          * and try DONE_WRITE again later. */
                         LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
                         lli->lli_flags |= LLIF_DONE_WRITING;
-                        spin_unlock(&lli->lli_lock);
+                        cfs_spin_unlock(&lli->lli_lock);
 
                         inode = igrab(inode);
                         LASSERT(inode);
@@ -192,21 +193,21 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
         } else {
                 /* Pack Size-on-MDS inode attributes only if they has changed */
                 if (!(lli->lli_flags & LLIF_SOM_DIRTY)) {
-                        spin_unlock(&lli->lli_lock);
+                        cfs_spin_unlock(&lli->lli_lock);
                         GOTO(out, 0);
                 }
 
                 /* There is a pending DONE_WRITE -- close epoch with no
                  * attribute change. */
                 if (lli->lli_flags & LLIF_EPOCH_PENDING) {
-                        spin_unlock(&lli->lli_lock);
+                        cfs_spin_unlock(&lli->lli_lock);
                         GOTO(out, 0);
                 }
         }
 
-        LASSERT(list_empty(&club->cob_pending_list));
+        LASSERT(cfs_list_empty(&club->cob_pending_list));
         lli->lli_flags &= ~LLIF_SOM_DIRTY;
-        spin_unlock(&lli->lli_lock);
+        cfs_spin_unlock(&lli->lli_lock);
         ll_done_writing_attr(inode, op_data);
 
         EXIT;
@@ -334,16 +335,16 @@ static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
 {
         struct ll_inode_info *lli = NULL;
 
-        spin_lock(&lcq->lcq_lock);
+        cfs_spin_lock(&lcq->lcq_lock);
 
-        if (!list_empty(&lcq->lcq_head)) {
-                lli = list_entry(lcq->lcq_head.next, struct ll_inode_info,
-                                 lli_close_list);
-                list_del_init(&lli->lli_close_list);
-        } else if (atomic_read(&lcq->lcq_stop))
+        if (!cfs_list_empty(&lcq->lcq_head)) {
+                lli = cfs_list_entry(lcq->lcq_head.next, struct ll_inode_info,
+                                     lli_close_list);
+                cfs_list_del_init(&lli->lli_close_list);
+        } else if (cfs_atomic_read(&lcq->lcq_stop))
                 lli = ERR_PTR(-EALREADY);
 
-        spin_unlock(&lcq->lcq_lock);
+        cfs_spin_unlock(&lcq->lcq_lock);
         return lli;
 }
 
@@ -358,7 +359,7 @@ static int ll_close_thread(void *arg)
                 cfs_daemonize(name);
         }
 
-        complete(&lcq->lcq_comp);
+        cfs_complete(&lcq->lcq_comp);
 
         while (1) {
                 struct l_wait_info lwi = { 0 };
@@ -379,7 +380,7 @@ static int ll_close_thread(void *arg)
         }
 
         CDEBUG(D_INFO, "ll_close exiting\n");
-        complete(&lcq->lcq_comp);
+        cfs_complete(&lcq->lcq_comp);
         RETURN(0);
 }
 
@@ -395,27 +396,27 @@ int ll_close_thread_start(struct ll_close_queue **lcq_ret)
         if (lcq == NULL)
                 return -ENOMEM;
 
-        spin_lock_init(&lcq->lcq_lock);
-        INIT_LIST_HEAD(&lcq->lcq_head);
-        init_waitqueue_head(&lcq->lcq_waitq);
-        init_completion(&lcq->lcq_comp);
+        cfs_spin_lock_init(&lcq->lcq_lock);
+        CFS_INIT_LIST_HEAD(&lcq->lcq_head);
+        cfs_waitq_init(&lcq->lcq_waitq);
+        cfs_init_completion(&lcq->lcq_comp);
 
-        pid = kernel_thread(ll_close_thread, lcq, 0);
+        pid = cfs_kernel_thread(ll_close_thread, lcq, 0);
         if (pid < 0) {
                 OBD_FREE(lcq, sizeof(*lcq));
                 return pid;
         }
 
-        wait_for_completion(&lcq->lcq_comp);
+        cfs_wait_for_completion(&lcq->lcq_comp);
         *lcq_ret = lcq;
         return 0;
 }
 
 void ll_close_thread_shutdown(struct ll_close_queue *lcq)
 {
-        init_completion(&lcq->lcq_comp);
-        atomic_inc(&lcq->lcq_stop);
-        wake_up(&lcq->lcq_waitq);
-        wait_for_completion(&lcq->lcq_comp);
+        cfs_init_completion(&lcq->lcq_comp);
+        cfs_atomic_inc(&lcq->lcq_stop);
+        cfs_waitq_signal(&lcq->lcq_waitq);
+        cfs_wait_for_completion(&lcq->lcq_comp);
         OBD_FREE(lcq, sizeof(*lcq));
 }
index bca670e..a47dc9f 100644 (file)
@@ -89,7 +89,7 @@ extern struct file_operations ll_pgcache_seq_fops;
 
 /* llite setxid/access permission for user on remote client */
 struct ll_remote_perm {
-        struct hlist_node       lrp_list;
+        cfs_hlist_node_t        lrp_list;
         uid_t                   lrp_uid;
         gid_t                   lrp_gid;
         uid_t                   lrp_fsuid;
@@ -118,10 +118,10 @@ enum lli_flags {
 
 struct ll_inode_info {
         int                     lli_inode_magic;
-        struct semaphore        lli_size_sem;           /* protect open and change size */
+        cfs_semaphore_t         lli_size_sem;           /* protect open and change size */
         void                   *lli_size_sem_owner;
-        struct semaphore        lli_write_sem;
-        struct semaphore        lli_trunc_sem;
+        cfs_semaphore_t         lli_write_sem;
+        cfs_semaphore_t         lli_trunc_sem;
         char                   *lli_symlink_name;
         __u64                   lli_maxbytes;
         __u64                   lli_ioepoch;
@@ -129,8 +129,8 @@ struct ll_inode_info {
         cfs_time_t              lli_contention_time;
 
         /* this lock protects posix_acl, pending_write_llaps, mmap_cnt */
-        spinlock_t              lli_lock;
-        struct list_head        lli_close_list;
+        cfs_spinlock_t          lli_lock;
+        cfs_list_t              lli_close_list;
         /* handle is to be sent to MDS later on done_writing and setattr.
          * Open handle data are needed for the recovery to reconstruct
          * the inode state on the MDS. XXX: recovery is not ready yet. */
@@ -142,13 +142,13 @@ struct ll_inode_info {
         struct posix_acl       *lli_posix_acl;
 
         /* remote permission hash */
-        struct hlist_head      *lli_remote_perms;
+        cfs_hlist_head_t       *lli_remote_perms;
         unsigned long           lli_rmtperm_utime;
-        struct semaphore        lli_rmtperm_sem;
+        cfs_semaphore_t         lli_rmtperm_sem;
 
-        struct list_head        lli_dead_list;
+        cfs_list_t              lli_dead_list;
 
-        struct semaphore        lli_och_sem; /* Protects access to och pointers
+        cfs_semaphore_t         lli_och_sem; /* Protects access to och pointers
                                                 and their usage counters */
         /* We need all three because every inode may be opened in different
            modes */
@@ -168,9 +168,9 @@ struct ll_inode_info {
         /* fid capability */
         /* open count currently used by capability only, indicate whether
          * capability needs renewal */
-        atomic_t                lli_open_count;
+        cfs_atomic_t            lli_open_count;
         struct obd_capa        *lli_mds_capa;
-        struct list_head        lli_oss_capas;
+        cfs_list_t              lli_oss_capas;
 
         /* metadata stat-ahead */
         /*
@@ -229,7 +229,7 @@ enum ra_stat {
 };
 
 struct ll_ra_info {
-        atomic_t                  ra_cur_pages;
+        cfs_atomic_t              ra_cur_pages;
         unsigned long             ra_max_pages;
         unsigned long             ra_max_pages_per_file;
         unsigned long             ra_max_read_ahead_whole_pages;
@@ -310,20 +310,20 @@ enum stats_track_type {
 #define RCE_HASHES      32
 
 struct rmtacl_ctl_entry {
-        struct list_head rce_list;
+        cfs_list_t       rce_list;
         pid_t            rce_key; /* hash key */
         int              rce_ops; /* acl operation type */
 };
 
 struct rmtacl_ctl_table {
-        spinlock_t       rct_lock;
-        struct list_head rct_entries[RCE_HASHES];
+        cfs_spinlock_t   rct_lock;
+        cfs_list_t       rct_entries[RCE_HASHES];
 };
 
 #define EE_HASHES       32
 
 struct eacl_entry {
-        struct list_head      ee_list;
+        cfs_list_t            ee_list;
         pid_t                 ee_key; /* hash key */
         struct lu_fid         ee_fid;
         int                   ee_type; /* ACL type for ACCESS or DEFAULT */
@@ -331,17 +331,17 @@ struct eacl_entry {
 };
 
 struct eacl_table {
-        spinlock_t       et_lock;
-        struct list_head et_entries[EE_HASHES];
+        cfs_spinlock_t   et_lock;
+        cfs_list_t       et_entries[EE_HASHES];
 };
 
 struct ll_sb_info {
-        struct list_head          ll_list;
+        cfs_list_t                ll_list;
         /* this protects pglist and ra_info.  It isn't safe to
          * grab from interrupt contexts */
-        spinlock_t                ll_lock;
-        spinlock_t                ll_pp_extent_lock; /* Lock for pp_extent entries */
-        spinlock_t                ll_process_lock; /* Lock for ll_rw_process_info */
+        cfs_spinlock_t            ll_lock;
+        cfs_spinlock_t            ll_pp_extent_lock; /* Lock for pp_extent entries */
+        cfs_spinlock_t            ll_process_lock; /* Lock for ll_rw_process_info */
         struct obd_uuid           ll_sb_uuid;
         struct obd_export        *ll_md_exp;
         struct obd_export        *ll_dt_exp;
@@ -349,10 +349,10 @@ struct ll_sb_info {
         struct lu_fid             ll_root_fid; /* root object fid */
 
         int                       ll_flags;
-        struct list_head          ll_conn_chain; /* per-conn chain of SBs */
+        cfs_list_t                ll_conn_chain; /* per-conn chain of SBs */
         struct lustre_client_ocd  ll_lco;
 
-        struct list_head          ll_orphan_dentry_list; /*please don't ask -p*/
+        cfs_list_t                ll_orphan_dentry_list; /*please don't ask -p*/
         struct ll_close_queue    *ll_lcq;
 
         struct lprocfs_stats     *ll_stats; /* lprocfs stats counter */
@@ -367,8 +367,8 @@ struct ll_sb_info {
         struct file_operations   *ll_fop;
 
 #ifdef HAVE_EXPORT___IGET
-        struct list_head          ll_deathrow; /* inodes to be destroyed (b1443) */
-        spinlock_t                ll_deathrow_lock;
+        cfs_list_t                ll_deathrow;/*inodes to be destroyed (b1443)*/
+        cfs_spinlock_t            ll_deathrow_lock;
 #endif
         /* =0 - hold lock over whole read/write
          * >0 - max. chunk to be read/written w/o lock re-acquiring */
@@ -389,9 +389,9 @@ struct ll_sb_info {
 
         /* metadata stat-ahead */
         unsigned int              ll_sa_max;     /* max statahead RPCs */
-        atomic_t                  ll_sa_total;   /* statahead thread started
+        cfs_atomic_t              ll_sa_total;   /* statahead thread started
                                                   * count */
-        atomic_t                  ll_sa_wrong;   /* statahead thread stopped for
+        cfs_atomic_t              ll_sa_wrong;   /* statahead thread stopped for
                                                   * low hit ratio */
 
         dev_t                     ll_sdev_orig; /* save s_dev before assign for
@@ -406,14 +406,14 @@ struct ll_ra_read {
         pgoff_t             lrr_start;
         pgoff_t             lrr_count;
         struct task_struct *lrr_reader;
-        struct list_head    lrr_linkage;
+        cfs_list_t          lrr_linkage;
 };
 
 /*
  * per file-descriptor read-ahead data.
  */
 struct ll_readahead_state {
-        spinlock_t      ras_lock;
+        cfs_spinlock_t  ras_lock;
         /*
          * index of the last page that read(2) needed and that wasn't in the
          * cache. Used by ras_update() to detect seeks.
@@ -471,7 +471,7 @@ struct ll_readahead_state {
          * progress against this file descriptor. Used by read-ahead code,
          * protected by ->ras_lock.
          */
-        struct list_head ras_read_beads;
+        cfs_list_t      ras_read_beads;
         /*
          * The following 3 items are used for detecting the stride I/O
          * mode.
@@ -483,16 +483,16 @@ struct ll_readahead_state {
          * ras_stride_pages = stride_pages;
          * Note: all these three items are counted by pages.
          */
-        unsigned long ras_stride_length;
-        unsigned long ras_stride_pages;
-        pgoff_t ras_stride_offset;
+        unsigned long   ras_stride_length;
+        unsigned long   ras_stride_pages;
+        pgoff_t         ras_stride_offset;
         /*
          * number of consecutive stride request count, and it is similar as
          * ras_consecutive_requests, but used for stride I/O mode.
          * Note: only more than 2 consecutive stride request are detected,
          * stride read-ahead will be enable
          */
-        unsigned long ras_consecutive_stride_requests;
+        unsigned long   ras_consecutive_stride_requests;
 };
 
 struct ll_file_dir {
@@ -511,7 +511,7 @@ struct ll_file_data {
 
 struct lov_stripe_md;
 
-extern spinlock_t inode_lock;
+extern cfs_spinlock_t inode_lock;
 
 extern struct proc_dir_entry *proc_lustre_fs_root;
 
@@ -742,11 +742,11 @@ extern struct inode_operations ll_fast_symlink_inode_operations;
 
 /* llite/llite_close.c */
 struct ll_close_queue {
-        spinlock_t              lcq_lock;
-        struct list_head        lcq_head;
-        wait_queue_head_t       lcq_waitq;
-        struct completion       lcq_comp;
-        atomic_t                lcq_stop;
+        cfs_spinlock_t          lcq_lock;
+        cfs_list_t              lcq_head;
+        cfs_waitq_t             lcq_waitq;
+        cfs_completion_t        lcq_comp;
+        cfs_atomic_t            lcq_stop;
 };
 
 struct ccc_object *cl_inode2ccc(struct inode *inode);
@@ -926,8 +926,8 @@ typedef struct rb_node  rb_node_t;
 struct ll_lock_tree_node;
 struct ll_lock_tree {
         rb_root_t                       lt_root;
-        struct list_head                lt_locked_list;
-        struct ll_file_data             *lt_fd;
+        cfs_list_t                      lt_locked_list;
+        struct ll_file_data            *lt_fd;
 };
 
 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last);
@@ -1013,8 +1013,8 @@ int ll_removexattr(struct dentry *dentry, const char *name);
 extern cfs_mem_cache_t *ll_remote_perm_cachep;
 extern cfs_mem_cache_t *ll_rmtperm_hash_cachep;
 
-struct hlist_head *alloc_rmtperm_hash(void);
-void free_rmtperm_hash(struct hlist_head *hash);
+cfs_hlist_head_t *alloc_rmtperm_hash(void);
+void free_rmtperm_hash(cfs_hlist_head_t *hash);
 int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm);
 int lustre_check_remote_perm(struct inode *inode, int mask);
 
@@ -1089,7 +1089,7 @@ void et_fini(struct eacl_table *et);
 struct ll_statahead_info {
         struct inode           *sai_inode;
         unsigned int            sai_generation; /* generation for statahead */
-        atomic_t                sai_refcount;   /* when access this struct, hold
+        cfs_atomic_t            sai_refcount;   /* when access this struct, hold
                                                  * refcount */
         unsigned int            sai_sent;       /* stat requests sent count */
         unsigned int            sai_replied;    /* stat requests which received
@@ -1115,9 +1115,9 @@ struct ll_statahead_info {
                                                  * hidden entries */
         cfs_waitq_t             sai_waitq;      /* stat-ahead wait queue */
         struct ptlrpc_thread    sai_thread;     /* stat-ahead thread */
-        struct list_head        sai_entries_sent;     /* entries sent out */
-        struct list_head        sai_entries_received; /* entries returned */
-        struct list_head        sai_entries_stated;   /* entries stated */
+        cfs_list_t              sai_entries_sent;     /* entries sent out */
+        cfs_list_t              sai_entries_received; /* entries returned */
+        cfs_list_t              sai_entries_stated;   /* entries stated */
 };
 
 int do_statahead_enter(struct inode *dir, struct dentry **dentry, int lookup);
@@ -1139,10 +1139,10 @@ void ll_statahead_mark(struct inode *dir, struct dentry *dentry)
         if (lli->lli_opendir_pid != cfs_curproc_pid())
                 return;
 
-        spin_lock(&lli->lli_lock);
+        cfs_spin_lock(&lli->lli_lock);
         if (likely(lli->lli_sai != NULL && ldd != NULL))
                 ldd->lld_sa_generation = lli->lli_sai->sai_generation;
-        spin_unlock(&lli->lli_lock);
+        cfs_spin_unlock(&lli->lli_lock);
 }
 
 static inline
index ed63c2b..2c1db98 100644 (file)
 
 cfs_mem_cache_t *ll_file_data_slab;
 
-LIST_HEAD(ll_super_blocks);
-spinlock_t ll_sb_lock = SPIN_LOCK_UNLOCKED;
+CFS_LIST_HEAD(ll_super_blocks);
+cfs_spinlock_t ll_sb_lock = CFS_SPIN_LOCK_UNLOCKED;
 
 extern struct address_space_operations ll_aops;
 extern struct address_space_operations ll_dir_aops;
 
 #ifndef log2
-#define log2(n) ffz(~(n))
+#define log2(n) cfs_ffz(~(n))
 #endif
 
 static struct ll_sb_info *ll_init_sbi(void)
@@ -82,10 +82,10 @@ static struct ll_sb_info *ll_init_sbi(void)
         if (!sbi)
                 RETURN(NULL);
 
-        spin_lock_init(&sbi->ll_lock);
-        init_mutex(&sbi->ll_lco.lco_lock);
-        spin_lock_init(&sbi->ll_pp_extent_lock);
-        spin_lock_init(&sbi->ll_process_lock);
+        cfs_spin_lock_init(&sbi->ll_lock);
+        cfs_init_mutex(&sbi->ll_lco.lco_lock);
+        cfs_spin_lock_init(&sbi->ll_pp_extent_lock);
+        cfs_spin_lock_init(&sbi->ll_process_lock);
         sbi->ll_rw_stats_on = 0;
 
         si_meminfo(&si);
@@ -105,16 +105,16 @@ static struct ll_sb_info *ll_init_sbi(void)
         sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
         sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
                                            SBI_DEFAULT_READAHEAD_WHOLE_MAX;
-        INIT_LIST_HEAD(&sbi->ll_conn_chain);
-        INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
+        CFS_INIT_LIST_HEAD(&sbi->ll_conn_chain);
+        CFS_INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
 
         ll_generate_random_uuid(uuid);
         class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
         CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
 
-        spin_lock(&ll_sb_lock);
-        list_add_tail(&sbi->ll_list, &ll_super_blocks);
-        spin_unlock(&ll_sb_lock);
+        cfs_spin_lock(&ll_sb_lock);
+        cfs_list_add_tail(&sbi->ll_list, &ll_super_blocks);
+        cfs_spin_unlock(&ll_sb_lock);
 
 #ifdef ENABLE_LLITE_CHECKSUM
         sbi->ll_flags |= LL_SBI_CHECKSUM;
@@ -125,12 +125,14 @@ static struct ll_sb_info *ll_init_sbi(void)
 #endif
 
 #ifdef HAVE_EXPORT___IGET
-        INIT_LIST_HEAD(&sbi->ll_deathrow);
-        spin_lock_init(&sbi->ll_deathrow_lock);
+        CFS_INIT_LIST_HEAD(&sbi->ll_deathrow);
+        cfs_spin_lock_init(&sbi->ll_deathrow_lock);
 #endif
         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
-                spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].pp_r_hist.oh_lock);
-                spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].pp_w_hist.oh_lock);
+                cfs_spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i]. \
+                                   pp_r_hist.oh_lock);
+                cfs_spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i]. \
+                                   pp_w_hist.oh_lock);
         }
 
         /* metadata statahead is enabled by default */
@@ -147,9 +149,9 @@ void ll_free_sbi(struct super_block *sb)
         ENTRY;
 
         if (sbi != NULL) {
-                spin_lock(&ll_sb_lock);
-                list_del(&sbi->ll_list);
-                spin_unlock(&ll_sb_lock);
+                cfs_spin_lock(&ll_sb_lock);
+                cfs_list_del(&sbi->ll_list);
+                cfs_spin_unlock(&ll_sb_lock);
                 OBD_FREE(sbi, sizeof(*sbi));
         }
         EXIT;
@@ -252,7 +254,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
                 GOTO(out_md, err);
         }
 
-        err = obd_statfs(obd, &osfs, cfs_time_current_64() - HZ, 0);
+        err = obd_statfs(obd, &osfs, cfs_time_current_64() - CFS_HZ, 0);
         if (err)
                 GOTO(out_md_fid, err);
 
@@ -389,11 +391,11 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
                 GOTO(out_dt, err);
         }
 
-        mutex_down(&sbi->ll_lco.lco_lock);
+        cfs_mutex_down(&sbi->ll_lco.lco_lock);
         sbi->ll_lco.lco_flags = data->ocd_connect_flags;
         sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
         sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
-        mutex_up(&sbi->ll_lco.lco_lock);
+        cfs_mutex_up(&sbi->ll_lco.lco_lock);
 
         fid_zero(&sbi->ll_root_fid);
         err = md_getstatus(sbi->ll_md_exp, &sbi->ll_root_fid, &oc);
@@ -563,7 +565,7 @@ void lustre_dump_dentry(struct dentry *dentry, int recur)
         if (recur == 0)
                 return;
 
-        list_for_each(tmp, &dentry->d_subdirs) {
+       list_for_each(tmp, &dentry->d_subdirs) {
                 struct dentry *d = list_entry(tmp, struct dentry, d_child);
                 lustre_dump_dentry(d, recur - 1);
         }
@@ -619,7 +621,8 @@ static void prune_deathrow_one(struct ll_inode_info *lli)
                 goto out;
 
         CDEBUG(D_INODE, "inode %lu/%u(%d) looks a good candidate for prune\n",
-               inode->i_ino,inode->i_generation, atomic_read(&inode->i_count));
+               inode->i_ino,inode->i_generation,
+               atomic_read(&inode->i_count));
 
         /* seems nobody uses it anymore */
         inode->i_nlink = 0;
@@ -639,23 +642,23 @@ static void prune_deathrow(struct ll_sb_info *sbi, int try)
                         break;
 
                 if (try) {
-                        if (!spin_trylock(&sbi->ll_deathrow_lock))
+                        if (!cfs_spin_trylock(&sbi->ll_deathrow_lock))
                                 break;
                 } else {
-                        spin_lock(&sbi->ll_deathrow_lock);
+                        cfs_spin_lock(&sbi->ll_deathrow_lock);
                 }
 
                 empty = 1;
                 lli = NULL;
-                if (!list_empty(&sbi->ll_deathrow)) {
-                        lli = list_entry(sbi->ll_deathrow.next,
-                                         struct ll_inode_info,
-                                         lli_dead_list);
-                        list_del_init(&lli->lli_dead_list);
-                        if (!list_empty(&sbi->ll_deathrow))
+                if (!cfs_list_empty(&sbi->ll_deathrow)) {
+                        lli = cfs_list_entry(sbi->ll_deathrow.next,
+                                             struct ll_inode_info,
+                                             lli_dead_list);
+                        cfs_list_del_init(&lli->lli_dead_list);
+                        if (!cfs_list_empty(&sbi->ll_deathrow))
                                 empty = 0;
                 }
-                spin_unlock(&sbi->ll_deathrow_lock);
+                cfs_spin_unlock(&sbi->ll_deathrow_lock);
 
                 if (lli)
                         prune_deathrow_one(lli);
@@ -687,7 +690,7 @@ void client_common_put_super(struct super_block *sb)
         /* destroy inodes in deathrow */
         prune_deathrow(sbi, 0);
 
-        list_del(&sbi->ll_conn_chain);
+        cfs_list_del(&sbi->ll_conn_chain);
 
         obd_fid_fini(sbi->ll_dt_exp);
         obd_disconnect(sbi->ll_dt_exp);
@@ -862,24 +865,24 @@ next:
 void ll_lli_init(struct ll_inode_info *lli)
 {
         lli->lli_inode_magic = LLI_INODE_MAGIC;
-        sema_init(&lli->lli_size_sem, 1);
-        sema_init(&lli->lli_write_sem, 1);
-        sema_init(&lli->lli_trunc_sem, 1);
+        cfs_sema_init(&lli->lli_size_sem, 1);
+        cfs_sema_init(&lli->lli_write_sem, 1);
+        cfs_sema_init(&lli->lli_trunc_sem, 1);
         lli->lli_flags = 0;
         lli->lli_maxbytes = PAGE_CACHE_MAXBYTES;
-        spin_lock_init(&lli->lli_lock);
-        INIT_LIST_HEAD(&lli->lli_close_list);
+        cfs_spin_lock_init(&lli->lli_lock);
+        CFS_INIT_LIST_HEAD(&lli->lli_close_list);
         lli->lli_inode_magic = LLI_INODE_MAGIC;
-        sema_init(&lli->lli_och_sem, 1);
+        cfs_sema_init(&lli->lli_och_sem, 1);
         lli->lli_mds_read_och = lli->lli_mds_write_och = NULL;
         lli->lli_mds_exec_och = NULL;
         lli->lli_open_fd_read_count = lli->lli_open_fd_write_count = 0;
         lli->lli_open_fd_exec_count = 0;
-        INIT_LIST_HEAD(&lli->lli_dead_list);
+        CFS_INIT_LIST_HEAD(&lli->lli_dead_list);
         lli->lli_remote_perms = NULL;
         lli->lli_rmtperm_utime = 0;
-        sema_init(&lli->lli_rmtperm_sem, 1);
-        INIT_LIST_HEAD(&lli->lli_oss_capas);
+        cfs_sema_init(&lli->lli_rmtperm_sem, 1);
+        CFS_INIT_LIST_HEAD(&lli->lli_oss_capas);
 }
 
 int ll_fill_super(struct super_block *sb)
@@ -901,7 +904,7 @@ int ll_fill_super(struct super_block *sb)
         /* client additional sb info */
         lsi->lsi_llsbi = sbi = ll_init_sbi();
         if (!sbi) {
-                cfs_module_put();
+                cfs_module_put(THIS_MODULE);
                 RETURN(-ENOMEM);
         }
 
@@ -1021,7 +1024,7 @@ void ll_put_super(struct super_block *sb)
 
         LCONSOLE_WARN("client %s umount complete\n", ll_instance);
 
-        cfs_module_put();
+        cfs_module_put(THIS_MODULE);
 
         EXIT;
 } /* client_put_super */
@@ -1107,7 +1110,7 @@ void ll_clear_inode(struct inode *inode)
         }
 #ifdef CONFIG_FS_POSIX_ACL
         else if (lli->lli_posix_acl) {
-                LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
+                LASSERT(cfs_atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
                 LASSERT(lli->lli_remote_perms == NULL);
                 posix_acl_release(lli->lli_posix_acl);
                 lli->lli_posix_acl = NULL;
@@ -1116,9 +1119,9 @@ void ll_clear_inode(struct inode *inode)
         lli->lli_inode_magic = LLI_INODE_DEAD;
 
 #ifdef HAVE_EXPORT___IGET
-        spin_lock(&sbi->ll_deathrow_lock);
-        list_del_init(&lli->lli_dead_list);
-        spin_unlock(&sbi->ll_deathrow_lock);
+        cfs_spin_lock(&sbi->ll_deathrow_lock);
+        cfs_list_del_init(&lli->lli_dead_list);
+        cfs_spin_unlock(&sbi->ll_deathrow_lock);
 #endif
         ll_clear_inode_capas(inode);
         /*
@@ -1290,15 +1293,15 @@ int ll_setattr_raw(struct inode *inode, struct iattr *attr)
 
         /* We mark all of the fields "set" so MDS/OST does not re-set them */
         if (attr->ia_valid & ATTR_CTIME) {
-                attr->ia_ctime = CURRENT_TIME;
+                attr->ia_ctime = CFS_CURRENT_TIME;
                 attr->ia_valid |= ATTR_CTIME_SET;
         }
         if (!(ia_valid & ATTR_ATIME_SET) && (attr->ia_valid & ATTR_ATIME)) {
-                attr->ia_atime = CURRENT_TIME;
+                attr->ia_atime = CFS_CURRENT_TIME;
                 attr->ia_valid |= ATTR_ATIME_SET;
         }
         if (!(ia_valid & ATTR_MTIME_SET) && (attr->ia_valid & ATTR_MTIME)) {
-                attr->ia_mtime = CURRENT_TIME;
+                attr->ia_mtime = CFS_CURRENT_TIME;
                 attr->ia_valid |= ATTR_MTIME_SET;
         }
         if ((attr->ia_valid & ATTR_CTIME) && !(attr->ia_valid & ATTR_MTIME)) {
@@ -1332,7 +1335,7 @@ int ll_setattr_raw(struct inode *inode, struct iattr *attr)
         UNLOCK_INODE_MUTEX(inode);
         if (ia_valid & ATTR_SIZE)
                 UP_WRITE_I_ALLOC_SEM(inode);
-        down(&lli->lli_trunc_sem);
+        cfs_down(&lli->lli_trunc_sem);
         LOCK_INODE_MUTEX(inode);
         if (ia_valid & ATTR_SIZE)
                 DOWN_WRITE_I_ALLOC_SEM(inode);
@@ -1367,7 +1370,7 @@ out:
                         rc1 = ll_setattr_done_writing(inode, op_data, mod);
                 ll_finish_md_op_data(op_data);
         }
-        up(&lli->lli_trunc_sem);
+        cfs_up(&lli->lli_trunc_sem);
         return rc ? rc : rc1;
 }
 
@@ -1491,7 +1494,7 @@ void ll_inode_size_lock(struct inode *inode, int lock_lsm)
 
         lli = ll_i2info(inode);
         LASSERT(lli->lli_size_sem_owner != current);
-        down(&lli->lli_size_sem);
+        cfs_down(&lli->lli_size_sem);
         LASSERT(lli->lli_size_sem_owner == NULL);
         lli->lli_size_sem_owner = current;
         lsm = lli->lli_smd;
@@ -1514,7 +1517,7 @@ void ll_inode_size_unlock(struct inode *inode, int unlock_lsm)
                 lov_stripe_unlock(lsm);
         LASSERT(lli->lli_size_sem_owner == current);
         lli->lli_size_sem_owner = NULL;
-        up(&lli->lli_size_sem);
+        cfs_up(&lli->lli_size_sem);
 }
 
 void ll_update_inode(struct inode *inode, struct lustre_md *md)
@@ -1567,11 +1570,11 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
         }
 #ifdef CONFIG_FS_POSIX_ACL
         else if (body->valid & OBD_MD_FLACL) {
-                spin_lock(&lli->lli_lock);
+                cfs_spin_lock(&lli->lli_lock);
                 if (lli->lli_posix_acl)
                         posix_acl_release(lli->lli_posix_acl);
                 lli->lli_posix_acl = md->posix_acl;
-                spin_unlock(&lli->lli_lock);
+                cfs_spin_unlock(&lli->lli_lock);
         }
 #endif
         inode->i_ino = cl_fid_build_ino(&body->fid1);
@@ -1923,11 +1926,11 @@ void ll_umount_begin(struct super_block *sb)
          * and then continue.  For now, we just invalidate the requests,
          * schedule() and sleep one second if needed, and hope.
          */
-        schedule();
+        cfs_schedule();
 #ifdef HAVE_UMOUNTBEGIN_VFSMOUNT
         if (atomic_read(&vfsmnt->mnt_count) > 2) {
-                cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE,
-                                     cfs_time_seconds(1));
+                cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
+                                                   cfs_time_seconds(1));
                 if (atomic_read(&vfsmnt->mnt_count) > 2)
                         LCONSOLE_WARN("Mount still busy with %d refs! You "
                                       "may try to umount it a bit later\n",
index 740eeb4..30fa9bf 100644 (file)
@@ -311,8 +311,8 @@ static void ll_vm_open(struct vm_area_struct * vma)
 
         ENTRY;
         LASSERT(vma->vm_file);
-        LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
-        atomic_inc(&vob->cob_mmap_cnt);
+        LASSERT(cfs_atomic_read(&vob->cob_mmap_cnt) >= 0);
+        cfs_atomic_inc(&vob->cob_mmap_cnt);
         EXIT;
 }
 
@@ -326,8 +326,8 @@ static void ll_vm_close(struct vm_area_struct *vma)
 
         ENTRY;
         LASSERT(vma->vm_file);
-        atomic_dec(&vob->cob_mmap_cnt);
-        LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
+        cfs_atomic_dec(&vob->cob_mmap_cnt);
+        LASSERT(cfs_atomic_read(&vob->cob_mmap_cnt) >= 0);
         EXIT;
 }
 
index bf11ebe..7d7b38b 100644 (file)
@@ -90,8 +90,8 @@ static struct rmtacl_ctl_entry *rce_alloc(pid_t key, int ops)
 
 static void rce_free(struct rmtacl_ctl_entry *rce)
 {
-        if (!list_empty(&rce->rce_list))
-                list_del(&rce->rce_list);
+        if (!cfs_list_empty(&rce->rce_list))
+                cfs_list_del(&rce->rce_list);
 
         OBD_FREE_PTR(rce);
 }
@@ -100,9 +100,9 @@ static struct rmtacl_ctl_entry *__rct_search(struct rmtacl_ctl_table *rct,
                                            pid_t key)
 {
         struct rmtacl_ctl_entry *rce;
-        struct list_head *head = &rct->rct_entries[rce_hashfunc(key)];
+        cfs_list_t *head = &rct->rct_entries[rce_hashfunc(key)];
 
-        list_for_each_entry(rce, head, rce_list)
+        cfs_list_for_each_entry(rce, head, rce_list)
                 if (rce->rce_key == key)
                         return rce;
 
@@ -113,9 +113,9 @@ struct rmtacl_ctl_entry *rct_search(struct rmtacl_ctl_table *rct, pid_t key)
 {
         struct rmtacl_ctl_entry *rce;
 
-        spin_lock(&rct->rct_lock);
+        cfs_spin_lock(&rct->rct_lock);
         rce = __rct_search(rct, key);
-        spin_unlock(&rct->rct_lock);
+        cfs_spin_unlock(&rct->rct_lock);
         return rce;
 }
 
@@ -127,15 +127,15 @@ int rct_add(struct rmtacl_ctl_table *rct, pid_t key, int ops)
         if (rce == NULL)
                 return -ENOMEM;
 
-        spin_lock(&rct->rct_lock);
+        cfs_spin_lock(&rct->rct_lock);
         e = __rct_search(rct, key);
         if (unlikely(e != NULL)) {
                 CWARN("Unexpected stale rmtacl_entry found: "
                       "[key: %d] [ops: %d]\n", (int)key, ops);
                 rce_free(e);
         }
-        list_add_tail(&rce->rce_list, &rct->rct_entries[rce_hashfunc(key)]);
-        spin_unlock(&rct->rct_lock);
+        cfs_list_add_tail(&rce->rce_list, &rct->rct_entries[rce_hashfunc(key)]);
+        cfs_spin_unlock(&rct->rct_lock);
 
         return 0;
 }
@@ -144,11 +144,11 @@ int rct_del(struct rmtacl_ctl_table *rct, pid_t key)
 {
         struct rmtacl_ctl_entry *rce;
 
-        spin_lock(&rct->rct_lock);
+        cfs_spin_lock(&rct->rct_lock);
         rce = __rct_search(rct, key);
         if (rce)
                 rce_free(rce);
-        spin_unlock(&rct->rct_lock);
+        cfs_spin_unlock(&rct->rct_lock);
 
         return rce ? 0 : -ENOENT;
 }
@@ -157,7 +157,7 @@ void rct_init(struct rmtacl_ctl_table *rct)
 {
         int i;
 
-        spin_lock_init(&rct->rct_lock);
+        cfs_spin_lock_init(&rct->rct_lock);
         for (i = 0; i < RCE_HASHES; i++)
                 CFS_INIT_LIST_HEAD(&rct->rct_entries[i]);
 }
@@ -167,14 +167,14 @@ void rct_fini(struct rmtacl_ctl_table *rct)
         struct rmtacl_ctl_entry *rce;
         int i;
 
-        spin_lock(&rct->rct_lock);
+        cfs_spin_lock(&rct->rct_lock);
         for (i = 0; i < RCE_HASHES; i++)
-                while (!list_empty(&rct->rct_entries[i])) {
-                        rce = list_entry(rct->rct_entries[i].next,
-                                         struct rmtacl_ctl_entry, rce_list);
+                while (!cfs_list_empty(&rct->rct_entries[i])) {
+                        rce = cfs_list_entry(rct->rct_entries[i].next,
+                                             struct rmtacl_ctl_entry, rce_list);
                         rce_free(rce);
                 }
-        spin_unlock(&rct->rct_lock);
+        cfs_spin_unlock(&rct->rct_lock);
 }
 
 
@@ -198,8 +198,8 @@ static struct eacl_entry *ee_alloc(pid_t key, struct lu_fid *fid, int type,
 
 void ee_free(struct eacl_entry *ee)
 {
-        if (!list_empty(&ee->ee_list))
-                list_del(&ee->ee_list);
+        if (!cfs_list_empty(&ee->ee_list))
+                cfs_list_del(&ee->ee_list);
 
         if (ee->ee_acl)
                 lustre_ext_acl_xattr_free(ee->ee_acl);
@@ -211,14 +211,14 @@ static struct eacl_entry *__et_search_del(struct eacl_table *et, pid_t key,
                                         struct lu_fid *fid, int type)
 {
         struct eacl_entry *ee;
-        struct list_head *head = &et->et_entries[ee_hashfunc(key)];
+        cfs_list_t *head = &et->et_entries[ee_hashfunc(key)];
 
         LASSERT(fid != NULL);
-        list_for_each_entry(ee, head, ee_list)
+        cfs_list_for_each_entry(ee, head, ee_list)
                 if (ee->ee_key == key) {
                         if (lu_fid_eq(&ee->ee_fid, fid) &&
                             ee->ee_type == type) {
-                                list_del_init(&ee->ee_list);
+                                cfs_list_del_init(&ee->ee_list);
                                 return ee;
                         }
                 }
@@ -231,23 +231,23 @@ struct eacl_entry *et_search_del(struct eacl_table *et, pid_t key,
 {
         struct eacl_entry *ee;
 
-        spin_lock(&et->et_lock);
+        cfs_spin_lock(&et->et_lock);
         ee = __et_search_del(et, key, fid, type);
-        spin_unlock(&et->et_lock);
+        cfs_spin_unlock(&et->et_lock);
         return ee;
 }
 
 void et_search_free(struct eacl_table *et, pid_t key)
 {
         struct eacl_entry *ee, *next;
-        struct list_head *head = &et->et_entries[ee_hashfunc(key)];
+        cfs_list_t *head = &et->et_entries[ee_hashfunc(key)];
 
-        spin_lock(&et->et_lock);
-        list_for_each_entry_safe(ee, next, head, ee_list)
+        cfs_spin_lock(&et->et_lock);
+        cfs_list_for_each_entry_safe(ee, next, head, ee_list)
                 if (ee->ee_key == key)
                         ee_free(ee);
 
-        spin_unlock(&et->et_lock);
+        cfs_spin_unlock(&et->et_lock);
 }
 
 int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type,
@@ -259,7 +259,7 @@ int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type,
         if (ee == NULL)
                 return -ENOMEM;
 
-        spin_lock(&et->et_lock);
+        cfs_spin_lock(&et->et_lock);
         e = __et_search_del(et, key, fid, type);
         if (unlikely(e != NULL)) {
                 CWARN("Unexpected stale eacl_entry found: "
@@ -267,8 +267,8 @@ int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type,
                       (int)key, PFID(fid), type);
                 ee_free(e);
         }
-        list_add_tail(&ee->ee_list, &et->et_entries[ee_hashfunc(key)]);
-        spin_unlock(&et->et_lock);
+        cfs_list_add_tail(&ee->ee_list, &et->et_entries[ee_hashfunc(key)]);
+        cfs_spin_unlock(&et->et_lock);
 
         return 0;
 }
@@ -277,7 +277,7 @@ void et_init(struct eacl_table *et)
 {
         int i;
 
-        spin_lock_init(&et->et_lock);
+        cfs_spin_lock_init(&et->et_lock);
         for (i = 0; i < EE_HASHES; i++)
                 CFS_INIT_LIST_HEAD(&et->et_entries[i]);
 }
@@ -287,14 +287,14 @@ void et_fini(struct eacl_table *et)
         struct eacl_entry *ee;
         int i;
 
-        spin_lock(&et->et_lock);
+        cfs_spin_lock(&et->et_lock);
         for (i = 0; i < EE_HASHES; i++)
-                while (!list_empty(&et->et_entries[i])) {
-                        ee = list_entry(et->et_entries[i].next,
-                                        struct eacl_entry, ee_list);
+                while (!cfs_list_empty(&et->et_entries[i])) {
+                        ee = cfs_list_entry(et->et_entries[i].next,
+                                            struct eacl_entry, ee_list);
                         ee_free(ee);
                 }
-        spin_unlock(&et->et_lock);
+        cfs_spin_unlock(&et->et_lock);
 }
 
 #endif
index 0acd41e..bbbb0ea 100644 (file)
@@ -136,14 +136,14 @@ struct lloop_device {
 
         int                  old_gfp_mask;
 
-        spinlock_t           lo_lock;
+        cfs_spinlock_t       lo_lock;
         struct bio          *lo_bio;
         struct bio          *lo_biotail;
         int                  lo_state;
-        struct semaphore     lo_sem;
-        struct semaphore     lo_ctl_mutex;
-        atomic_t             lo_pending;
-        wait_queue_head_t    lo_bh_wait;
+        cfs_semaphore_t      lo_sem;
+        cfs_semaphore_t      lo_ctl_mutex;
+        cfs_atomic_t         lo_pending;
+        cfs_waitq_t          lo_bh_wait;
 
         struct request_queue *lo_queue;
 
@@ -170,7 +170,7 @@ static int lloop_major;
 static int max_loop = MAX_LOOP_DEFAULT;
 static struct lloop_device *loop_dev;
 static struct gendisk **disks;
-static struct semaphore lloop_mutex;
+static cfs_semaphore_t lloop_mutex;
 static void *ll_iocontrol_magic = NULL;
 
 static loff_t get_loop_size(struct lloop_device *lo, struct file *file)
@@ -280,17 +280,17 @@ static void loop_add_bio(struct lloop_device *lo, struct bio *bio)
 {
         unsigned long flags;
 
-        spin_lock_irqsave(&lo->lo_lock, flags);
+        cfs_spin_lock_irqsave(&lo->lo_lock, flags);
         if (lo->lo_biotail) {
                 lo->lo_biotail->bi_next = bio;
                 lo->lo_biotail = bio;
         } else
                 lo->lo_bio = lo->lo_biotail = bio;
-        spin_unlock_irqrestore(&lo->lo_lock, flags);
+        cfs_spin_unlock_irqrestore(&lo->lo_lock, flags);
 
-        atomic_inc(&lo->lo_pending);
-        if (waitqueue_active(&lo->lo_bh_wait))
-                wake_up(&lo->lo_bh_wait);
+        cfs_atomic_inc(&lo->lo_pending);
+        if (cfs_waitq_active(&lo->lo_bh_wait))
+                cfs_waitq_signal(&lo->lo_bh_wait);
 }
 
 /*
@@ -304,10 +304,10 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
         unsigned int page_count = 0;
         int rw;
 
-        spin_lock_irq(&lo->lo_lock);
+        cfs_spin_lock_irq(&lo->lo_lock);
         first = lo->lo_bio;
         if (unlikely(first == NULL)) {
-                spin_unlock_irq(&lo->lo_lock);
+                cfs_spin_unlock_irq(&lo->lo_lock);
                 return 0;
         }
 
@@ -338,7 +338,7 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
                 lo->lo_bio = NULL;
         }
         *req = first;
-        spin_unlock_irq(&lo->lo_lock);
+        cfs_spin_unlock_irq(&lo->lo_lock);
         return count;
 }
 
@@ -354,9 +354,9 @@ static int loop_make_request(struct request_queue *q, struct bio *old_bio)
         CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
                (unsigned long long)old_bio->bi_sector, old_bio->bi_size);
 
-        spin_lock_irq(&lo->lo_lock);
+        cfs_spin_lock_irq(&lo->lo_lock);
         inactive = (lo->lo_state != LLOOP_BOUND);
-        spin_unlock_irq(&lo->lo_lock);
+        cfs_spin_unlock_irq(&lo->lo_lock);
         if (inactive)
                 goto err;
 
@@ -401,7 +401,8 @@ static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
 
 static inline int loop_active(struct lloop_device *lo)
 {
-        return atomic_read(&lo->lo_pending) || (lo->lo_state == LLOOP_RUNDOWN);
+        return cfs_atomic_read(&lo->lo_pending) ||
+                (lo->lo_state == LLOOP_RUNDOWN);
 }
 
 /*
@@ -438,15 +439,15 @@ static int loop_thread(void *data)
         /*
          * up sem, we are running
          */
-        up(&lo->lo_sem);
+        cfs_up(&lo->lo_sem);
 
         for (;;) {
-                wait_event(lo->lo_bh_wait, loop_active(lo));
-                if (!atomic_read(&lo->lo_pending)) {
+                cfs_wait_event(lo->lo_bh_wait, loop_active(lo));
+                if (!cfs_atomic_read(&lo->lo_pending)) {
                         int exiting = 0;
-                        spin_lock_irq(&lo->lo_lock);
+                        cfs_spin_lock_irq(&lo->lo_lock);
                         exiting = (lo->lo_state == LLOOP_RUNDOWN);
-                        spin_unlock_irq(&lo->lo_lock);
+                        cfs_spin_unlock_irq(&lo->lo_lock);
                         if (exiting)
                                 break;
                 }
@@ -471,14 +472,14 @@ static int loop_thread(void *data)
                 }
 
                 LASSERT(bio != NULL);
-                LASSERT(count <= atomic_read(&lo->lo_pending));
+                LASSERT(count <= cfs_atomic_read(&lo->lo_pending));
                 loop_handle_bio(lo, bio);
-                atomic_sub(count, &lo->lo_pending);
+                cfs_atomic_sub(count, &lo->lo_pending);
         }
         cl_env_put(env, &refcheck);
 
 out:
-        up(&lo->lo_sem);
+        cfs_up(&lo->lo_sem);
         return ret;
 }
 
@@ -491,7 +492,7 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused,
         int                   error;
         loff_t                size;
 
-        if (!try_module_get(THIS_MODULE))
+        if (!cfs_try_module_get(THIS_MODULE))
                 return -ENODEV;
 
         error = -EBUSY;
@@ -551,13 +552,13 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused,
 
         set_blocksize(bdev, lo->lo_blocksize);
 
-        kernel_thread(loop_thread, lo, CLONE_KERNEL);
-        down(&lo->lo_sem);
+        cfs_kernel_thread(loop_thread, lo, CLONE_KERNEL);
+        cfs_down(&lo->lo_sem);
         return 0;
 
  out:
         /* This is safe: open() is still holding a reference. */
-        module_put(THIS_MODULE);
+        cfs_module_put(THIS_MODULE);
         return error;
 }
 
@@ -576,12 +577,12 @@ static int loop_clr_fd(struct lloop_device *lo, struct block_device *bdev,
         if (filp == NULL)
                 return -EINVAL;
 
-        spin_lock_irq(&lo->lo_lock);
+        cfs_spin_lock_irq(&lo->lo_lock);
         lo->lo_state = LLOOP_RUNDOWN;
-        spin_unlock_irq(&lo->lo_lock);
-        wake_up(&lo->lo_bh_wait);
+        cfs_spin_unlock_irq(&lo->lo_lock);
+        cfs_waitq_signal(&lo->lo_bh_wait);
 
-        down(&lo->lo_sem);
+        cfs_down(&lo->lo_sem);
         lo->lo_backing_file = NULL;
         lo->ioctl = NULL;
         lo->lo_device = NULL;
@@ -595,7 +596,7 @@ static int loop_clr_fd(struct lloop_device *lo, struct block_device *bdev,
         lo->lo_state = LLOOP_UNBOUND;
         fput(filp);
         /* This is safe: open() is still holding a reference. */
-        module_put(THIS_MODULE);
+        cfs_module_put(THIS_MODULE);
         return 0;
 }
 
@@ -603,9 +604,9 @@ static int lo_open(struct inode *inode, struct file *file)
 {
         struct lloop_device *lo = inode->i_bdev->bd_disk->private_data;
 
-        down(&lo->lo_ctl_mutex);
+        cfs_down(&lo->lo_ctl_mutex);
         lo->lo_refcnt++;
-        up(&lo->lo_ctl_mutex);
+        cfs_up(&lo->lo_ctl_mutex);
 
         return 0;
 }
@@ -614,9 +615,9 @@ static int lo_release(struct inode *inode, struct file *file)
 {
         struct lloop_device *lo = inode->i_bdev->bd_disk->private_data;
 
-        down(&lo->lo_ctl_mutex);
+        cfs_down(&lo->lo_ctl_mutex);
         --lo->lo_refcnt;
-        up(&lo->lo_ctl_mutex);
+        cfs_up(&lo->lo_ctl_mutex);
 
         return 0;
 }
@@ -629,7 +630,7 @@ static int lo_ioctl(struct inode *inode, struct file *unused,
         struct block_device *bdev = inode->i_bdev;
         int err = 0;
 
-        down(&lloop_mutex);
+        cfs_down(&lloop_mutex);
         switch (cmd) {
         case LL_IOC_LLOOP_DETACH: {
                 err = loop_clr_fd(lo, bdev, 2);
@@ -653,7 +654,7 @@ static int lo_ioctl(struct inode *inode, struct file *unused,
                 err = -EINVAL;
                 break;
         }
-        up(&lloop_mutex);
+        cfs_up(&lloop_mutex);
 
         return err;
 }
@@ -689,7 +690,7 @@ static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file,
 
         CWARN("Enter llop_ioctl\n");
 
-        down(&lloop_mutex);
+        cfs_down(&lloop_mutex);
         switch (cmd) {
         case LL_IOC_LLOOP_ATTACH: {
                 struct lloop_device *lo_free = NULL;
@@ -759,7 +760,7 @@ static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file,
         }
 
 out:
-        up(&lloop_mutex);
+        cfs_up(&lloop_mutex);
 out1:
         if (rcp)
                 *rcp = err;
@@ -805,7 +806,7 @@ static int __init lloop_init(void)
                         goto out_mem3;
         }
 
-        init_MUTEX(&lloop_mutex);
+        cfs_init_mutex(&lloop_mutex);
 
         for (i = 0; i < max_loop; i++) {
                 struct lloop_device *lo = &loop_dev[i];
@@ -815,11 +816,11 @@ static int __init lloop_init(void)
                 if (!lo->lo_queue)
                         goto out_mem4;
 
-                init_MUTEX(&lo->lo_ctl_mutex);
-                init_MUTEX_LOCKED(&lo->lo_sem);
-                init_waitqueue_head(&lo->lo_bh_wait);
+                cfs_init_mutex(&lo->lo_ctl_mutex);
+                cfs_init_mutex_locked(&lo->lo_sem);
+                cfs_waitq_init(&lo->lo_bh_wait);
                 lo->lo_number = i;
-                spin_lock_init(&lo->lo_lock);
+                cfs_spin_lock_init(&lo->lo_lock);
                 disk->major = lloop_major;
                 disk->first_minor = i;
                 disk->fops = &lo_fops;
index 943123e..de1eefb 100644 (file)
@@ -60,7 +60,7 @@ static int ll_rd_blksize(char *page, char **start, off_t off, int count,
         int rc;
 
         LASSERT(sb != NULL);
-        rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - HZ,
+        rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - CFS_HZ,
                                 OBD_STATFS_NODELAY);
         if (!rc) {
               *eof = 1;
@@ -78,7 +78,7 @@ static int ll_rd_kbytestotal(char *page, char **start, off_t off, int count,
         int rc;
 
         LASSERT(sb != NULL);
-        rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - HZ,
+        rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - CFS_HZ,
                                 OBD_STATFS_NODELAY);
         if (!rc) {
                 __u32 blk_size = osfs.os_bsize >> 10;
@@ -102,7 +102,7 @@ static int ll_rd_kbytesfree(char *page, char **start, off_t off, int count,
         int rc;
 
         LASSERT(sb != NULL);
-        rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - HZ,
+        rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - CFS_HZ,
                                 OBD_STATFS_NODELAY);
         if (!rc) {
                 __u32 blk_size = osfs.os_bsize >> 10;
@@ -125,7 +125,7 @@ static int ll_rd_kbytesavail(char *page, char **start, off_t off, int count,
         int rc;
 
         LASSERT(sb != NULL);
-        rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - HZ,
+        rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - CFS_HZ,
                                 OBD_STATFS_NODELAY);
         if (!rc) {
                 __u32 blk_size = osfs.os_bsize >> 10;
@@ -148,7 +148,7 @@ static int ll_rd_filestotal(char *page, char **start, off_t off, int count,
         int rc;
 
         LASSERT(sb != NULL);
-        rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - HZ,
+        rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - CFS_HZ,
                                 OBD_STATFS_NODELAY);
         if (!rc) {
                  *eof = 1;
@@ -165,7 +165,7 @@ static int ll_rd_filesfree(char *page, char **start, off_t off, int count,
         int rc;
 
         LASSERT(sb != NULL);
-        rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - HZ,
+        rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - CFS_HZ,
                                 OBD_STATFS_NODELAY);
         if (!rc) {
                  *eof = 1;
@@ -233,16 +233,16 @@ static int ll_rd_max_readahead_mb(char *page, char **start, off_t off,
         long pages_number;
         int mult;
 
-        spin_lock(&sbi->ll_lock);
+        cfs_spin_lock(&sbi->ll_lock);
         pages_number = sbi->ll_ra_info.ra_max_pages;
-        spin_unlock(&sbi->ll_lock);
+        cfs_spin_unlock(&sbi->ll_lock);
 
         mult = 1 << (20 - PAGE_CACHE_SHIFT);
         return lprocfs_read_frac_helper(page, count, pages_number, mult);
 }
 
 static int ll_wr_max_readahead_mb(struct file *file, const char *buffer,
-                                   unsigned long count, void *data)
+                                  unsigned long count, void *data)
 {
         struct super_block *sb = data;
         struct ll_sb_info *sbi = ll_s2sbi(sb);
@@ -253,30 +253,30 @@ static int ll_wr_max_readahead_mb(struct file *file, const char *buffer,
         if (rc)
                 return rc;
 
-        if (pages_number < 0 || pages_number > num_physpages / 2) {
+        if (pages_number < 0 || pages_number > cfs_num_physpages / 2) {
                 CERROR("can't set file readahead more than %lu MB\n",
-                        num_physpages >> (20 - CFS_PAGE_SHIFT + 1)); /*1/2 of RAM*/
+                       cfs_num_physpages >> (20 - CFS_PAGE_SHIFT + 1)); /*1/2 of RAM*/
                 return -ERANGE;
         }
 
-        spin_lock(&sbi->ll_lock);
+        cfs_spin_lock(&sbi->ll_lock);
         sbi->ll_ra_info.ra_max_pages = pages_number;
-        spin_unlock(&sbi->ll_lock);
+        cfs_spin_unlock(&sbi->ll_lock);
 
         return count;
 }
 
 static int ll_rd_max_readahead_per_file_mb(char *page, char **start, off_t off,
-                                          int count, int *eof, void *data)
+                                           int count, int *eof, void *data)
 {
         struct super_block *sb = data;
         struct ll_sb_info *sbi = ll_s2sbi(sb);
         long pages_number;
         int mult;
 
-        spin_lock(&sbi->ll_lock);
+        cfs_spin_lock(&sbi->ll_lock);
         pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
-        spin_unlock(&sbi->ll_lock);
+        cfs_spin_unlock(&sbi->ll_lock);
 
         mult = 1 << (20 - CFS_PAGE_SHIFT);
         return lprocfs_read_frac_helper(page, count, pages_number, mult);
@@ -297,35 +297,36 @@ static int ll_wr_max_readahead_per_file_mb(struct file *file, const char *buffer
         if (pages_number < 0 ||
                 pages_number > sbi->ll_ra_info.ra_max_pages) {
                 CERROR("can't set file readahead more than"
-                       "max_read_ahead_mb %lu MB\n", sbi->ll_ra_info.ra_max_pages);
+                       "max_read_ahead_mb %lu MB\n",
+                       sbi->ll_ra_info.ra_max_pages);
                 return -ERANGE;
         }
 
-        spin_lock(&sbi->ll_lock);
+        cfs_spin_lock(&sbi->ll_lock);
         sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
-        spin_unlock(&sbi->ll_lock);
+        cfs_spin_unlock(&sbi->ll_lock);
 
         return count;
 }
 
 static int ll_rd_max_read_ahead_whole_mb(char *page, char **start, off_t off,
-                                       int count, int *eof, void *data)
+                                         int count, int *eof, void *data)
 {
         struct super_block *sb = data;
         struct ll_sb_info *sbi = ll_s2sbi(sb);
         long pages_number;
         int mult;
 
-        spin_lock(&sbi->ll_lock);
+        cfs_spin_lock(&sbi->ll_lock);
         pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
-        spin_unlock(&sbi->ll_lock);
+        cfs_spin_unlock(&sbi->ll_lock);
 
         mult = 1 << (20 - CFS_PAGE_SHIFT);
         return lprocfs_read_frac_helper(page, count, pages_number, mult);
 }
 
 static int ll_wr_max_read_ahead_whole_mb(struct file *file, const char *buffer,
-                                       unsigned long count, void *data)
+                                         unsigned long count, void *data)
 {
         struct super_block *sb = data;
         struct ll_sb_info *sbi = ll_s2sbi(sb);
@@ -346,9 +347,9 @@ static int ll_wr_max_read_ahead_whole_mb(struct file *file, const char *buffer,
                 return -ERANGE;
         }
 
-        spin_lock(&sbi->ll_lock);
+        cfs_spin_lock(&sbi->ll_lock);
         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
-        spin_unlock(&sbi->ll_lock);
+        cfs_spin_unlock(&sbi->ll_lock);
 
         return count;
 }
@@ -361,16 +362,16 @@ static int ll_rd_max_cached_mb(char *page, char **start, off_t off,
         long pages_number;
         int mult;
 
-        spin_lock(&sbi->ll_lock);
+        cfs_spin_lock(&sbi->ll_lock);
         pages_number = sbi->ll_async_page_max;
-        spin_unlock(&sbi->ll_lock);
+        cfs_spin_unlock(&sbi->ll_lock);
 
         mult = 1 << (20 - CFS_PAGE_SHIFT);
         return lprocfs_read_frac_helper(page, count, pages_number, mult);;
 }
 
 static int ll_wr_max_cached_mb(struct file *file, const char *buffer,
-                                  unsigned long count, void *data)
+                               unsigned long count, void *data)
 {
         struct super_block *sb = data;
         struct ll_sb_info *sbi = ll_s2sbi(sb);
@@ -381,15 +382,15 @@ static int ll_wr_max_cached_mb(struct file *file, const char *buffer,
         if (rc)
                 return rc;
 
-        if (pages_number < 0 || pages_number > num_physpages) {
+        if (pages_number < 0 || pages_number > cfs_num_physpages) {
                 CERROR("can't set max cache more than %lu MB\n",
-                        num_physpages >> (20 - CFS_PAGE_SHIFT));
+                        cfs_num_physpages >> (20 - CFS_PAGE_SHIFT));
                 return -ERANGE;
         }
 
-        spin_lock(&sbi->ll_lock);
+        cfs_spin_lock(&sbi->ll_lock);
         sbi->ll_async_page_max = pages_number ;
-        spin_unlock(&sbi->ll_lock);
+        cfs_spin_unlock(&sbi->ll_lock);
 
         if (!sbi->ll_dt_exp)
                 /* Not set up yet, don't call llap_shrink_cache */
@@ -924,7 +925,7 @@ static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
         int k;
 
-        do_gettimeofday(&now);
+        cfs_gettimeofday(&now);
 
         if (!sbi->ll_rw_stats_on) {
                 seq_printf(seq, "disabled\n"
@@ -938,7 +939,7 @@ static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
         seq_printf(seq, "%13s   %14s %4s %4s  | %14s %4s %4s\n",
                    "extents", "calls", "%", "cum%",
                    "calls", "%", "cum%");
-        spin_lock(&sbi->ll_pp_extent_lock);
+        cfs_spin_lock(&sbi->ll_pp_extent_lock);
         for(k = 0; k < LL_PROCESS_HIST_MAX; k++) {
                 if(io_extents->pp_extents[k].pid != 0) {
                         seq_printf(seq, "\nPID: %d\n",
@@ -946,7 +947,7 @@ static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
                         ll_display_extents_info(io_extents, seq, k);
                 }
         }
-        spin_unlock(&sbi->ll_pp_extent_lock);
+        cfs_spin_unlock(&sbi->ll_pp_extent_lock);
         return 0;
 }
 
@@ -970,13 +971,13 @@ static ssize_t ll_rw_extents_stats_pp_seq_write(struct file *file,
         else
                 sbi->ll_rw_stats_on = 1;
 
-        spin_lock(&sbi->ll_pp_extent_lock);
+        cfs_spin_lock(&sbi->ll_pp_extent_lock);
         for(i = 0; i < LL_PROCESS_HIST_MAX; i++) {
                 io_extents->pp_extents[i].pid = 0;
                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
         }
-        spin_unlock(&sbi->ll_pp_extent_lock);
+        cfs_spin_unlock(&sbi->ll_pp_extent_lock);
         return len;
 }
 
@@ -988,7 +989,7 @@ static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
         struct ll_sb_info *sbi = seq->private;
         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
 
-        do_gettimeofday(&now);
+        cfs_gettimeofday(&now);
 
         if (!sbi->ll_rw_stats_on) {
                 seq_printf(seq, "disabled\n"
@@ -1003,9 +1004,9 @@ static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
         seq_printf(seq, "%13s   %14s %4s %4s  | %14s %4s %4s\n",
                    "extents", "calls", "%", "cum%",
                    "calls", "%", "cum%");
-        spin_lock(&sbi->ll_lock);
+        cfs_spin_lock(&sbi->ll_lock);
         ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX);
-        spin_unlock(&sbi->ll_lock);
+        cfs_spin_unlock(&sbi->ll_lock);
 
         return 0;
 }
@@ -1028,14 +1029,14 @@ static ssize_t ll_rw_extents_stats_seq_write(struct file *file, const char *buf,
                 sbi->ll_rw_stats_on = 0;
         else
                 sbi->ll_rw_stats_on = 1;
-        spin_lock(&sbi->ll_pp_extent_lock);
+        cfs_spin_lock(&sbi->ll_pp_extent_lock);
         for(i = 0; i <= LL_PROCESS_HIST_MAX; i++)
         {
                 io_extents->pp_extents[i].pid = 0;
                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
         }
-        spin_unlock(&sbi->ll_pp_extent_lock);
+        cfs_spin_unlock(&sbi->ll_pp_extent_lock);
 
         return len;
 }
@@ -1058,7 +1059,7 @@ void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
         process = sbi->ll_rw_process_info;
         offset = sbi->ll_rw_offset_info;
 
-        spin_lock(&sbi->ll_pp_extent_lock);
+        cfs_spin_lock(&sbi->ll_pp_extent_lock);
         /* Extent statistics */
         for(i = 0; i < LL_PROCESS_HIST_MAX; i++) {
                 if(io_extents->pp_extents[i].pid == pid) {
@@ -1086,9 +1087,9 @@ void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
                 io_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++;
                 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_w_hist.oh_buckets[i]++;
         }
-        spin_unlock(&sbi->ll_pp_extent_lock);
+        cfs_spin_unlock(&sbi->ll_pp_extent_lock);
 
-        spin_lock(&sbi->ll_process_lock);
+        cfs_spin_lock(&sbi->ll_process_lock);
         /* Offset statistics */
         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
                 if (process[i].rw_pid == pid) {
@@ -1099,7 +1100,7 @@ void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
                                 process[i].rw_largest_extent = count;
                                 process[i].rw_offset = 0;
                                 process[i].rw_last_file = file;
-                                spin_unlock(&sbi->ll_process_lock);
+                                cfs_spin_unlock(&sbi->ll_process_lock);
                                 return;
                         }
                         if (process[i].rw_last_file_pos != pos) {
@@ -1129,7 +1130,7 @@ void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
                         if(process[i].rw_largest_extent < count)
                                 process[i].rw_largest_extent = count;
                         process[i].rw_last_file_pos = pos + count;
-                        spin_unlock(&sbi->ll_process_lock);
+                        cfs_spin_unlock(&sbi->ll_process_lock);
                         return;
                 }
         }
@@ -1142,7 +1143,7 @@ void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
         process[*process_count].rw_largest_extent = count;
         process[*process_count].rw_offset = 0;
         process[*process_count].rw_last_file = file;
-        spin_unlock(&sbi->ll_process_lock);
+        cfs_spin_unlock(&sbi->ll_process_lock);
 }
 
 static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
@@ -1153,7 +1154,7 @@ static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
         struct ll_rw_process_info *process = sbi->ll_rw_process_info;
         int i;
 
-        do_gettimeofday(&now);
+        cfs_gettimeofday(&now);
 
         if (!sbi->ll_rw_stats_on) {
                 seq_printf(seq, "disabled\n"
@@ -1161,7 +1162,7 @@ static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
                                 "then 0 or \"[D/d]isabled\" to deactivate\n");
                 return 0;
         }
-        spin_lock(&sbi->ll_process_lock);
+        cfs_spin_lock(&sbi->ll_process_lock);
 
         seq_printf(seq, "snapshot_time:         %lu.%lu (secs.usecs)\n",
                    now.tv_sec, now.tv_usec);
@@ -1192,7 +1193,7 @@ static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
                                    (unsigned long)process[i].rw_largest_extent,
                                    process[i].rw_offset);
         }
-        spin_unlock(&sbi->ll_process_lock);
+        cfs_spin_unlock(&sbi->ll_process_lock);
 
         return 0;
 }
@@ -1217,14 +1218,14 @@ static ssize_t ll_rw_offset_stats_seq_write(struct file *file, const char *buf,
         else
                 sbi->ll_rw_stats_on = 1;
 
-        spin_lock(&sbi->ll_process_lock);
+        cfs_spin_lock(&sbi->ll_process_lock);
         sbi->ll_offset_process_count = 0;
         sbi->ll_rw_offset_entry_count = 0;
         memset(process_info, 0, sizeof(struct ll_rw_process_info) *
                LL_PROCESS_HIST_MAX);
         memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
                LL_OFFSET_HIST_MAX);
-        spin_unlock(&sbi->ll_process_lock);
+        cfs_spin_unlock(&sbi->ll_process_lock);
 
         return len;
 }
index 61fb628..f3e2dac 100644 (file)
@@ -268,7 +268,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
 
 __u32 ll_i2suppgid(struct inode *i)
 {
-        if (in_group_p(i->i_gid))
+        if (cfs_curproc_is_in_groups(i->i_gid))
                 return (__u32)i->i_gid;
         else
                 return (__u32)(-1);
index 08c4bc1..9d26e87 100644 (file)
@@ -65,7 +65,7 @@ static inline struct ll_remote_perm *alloc_ll_remote_perm(void)
 
         OBD_SLAB_ALLOC_PTR_GFP(lrp, ll_remote_perm_cachep, GFP_KERNEL);
         if (lrp)
-                INIT_HLIST_NODE(&lrp->lrp_list);
+                CFS_INIT_HLIST_NODE(&lrp->lrp_list);
         return lrp;
 }
 
@@ -74,14 +74,14 @@ static inline void free_ll_remote_perm(struct ll_remote_perm *lrp)
         if (!lrp)
                 return;
 
-        if (!hlist_unhashed(&lrp->lrp_list))
-                hlist_del(&lrp->lrp_list);
+        if (!cfs_hlist_unhashed(&lrp->lrp_list))
+                cfs_hlist_del(&lrp->lrp_list);
         OBD_SLAB_FREE(lrp, ll_remote_perm_cachep, sizeof(*lrp));
 }
 
-struct hlist_head *alloc_rmtperm_hash(void)
+cfs_hlist_head_t *alloc_rmtperm_hash(void)
 {
-        struct hlist_head *hash;
+        cfs_hlist_head_t *hash;
         int i;
 
         OBD_SLAB_ALLOC(hash, ll_rmtperm_hash_cachep, GFP_KERNEL,
@@ -91,22 +91,23 @@ struct hlist_head *alloc_rmtperm_hash(void)
                 return NULL;
 
         for (i = 0; i < REMOTE_PERM_HASHSIZE; i++)
-                INIT_HLIST_HEAD(hash + i);
+                CFS_INIT_HLIST_HEAD(hash + i);
 
         return hash;
 }
 
-void free_rmtperm_hash(struct hlist_head *hash)
+void free_rmtperm_hash(cfs_hlist_head_t *hash)
 {
         int i;
         struct ll_remote_perm *lrp;
-        struct hlist_node *node, *next;
+        cfs_hlist_node_t *node, *next;
 
         if(!hash)
                 return;
 
         for (i = 0; i < REMOTE_PERM_HASHSIZE; i++)
-                hlist_for_each_entry_safe(lrp, node, next, hash + i, lrp_list)
+                cfs_hlist_for_each_entry_safe(lrp, node, next, hash + i,
+                                              lrp_list)
                         free_ll_remote_perm(lrp);
         OBD_SLAB_FREE(hash, ll_rmtperm_hash_cachep,
                       REMOTE_PERM_HASHSIZE * sizeof(*hash));
@@ -121,9 +122,9 @@ static inline int remote_perm_hashfunc(uid_t uid)
  * MDT when client get remote permission. */
 static int do_check_remote_perm(struct ll_inode_info *lli, int mask)
 {
-        struct hlist_head *head;
+        cfs_hlist_head_t *head;
         struct ll_remote_perm *lrp;
-        struct hlist_node *node;
+        cfs_hlist_node_t *node;
         int found = 0, rc;
         ENTRY;
 
@@ -132,8 +133,8 @@ static int do_check_remote_perm(struct ll_inode_info *lli, int mask)
 
         head = lli->lli_remote_perms + remote_perm_hashfunc(current->uid);
 
-        spin_lock(&lli->lli_lock);
-        hlist_for_each_entry(lrp, node, head, lrp_list) {
+        cfs_spin_lock(&lli->lli_lock);
+        cfs_hlist_for_each_entry(lrp, node, head, lrp_list) {
                 if (lrp->lrp_uid != current->uid)
                         continue;
                 if (lrp->lrp_gid != current->gid)
@@ -155,7 +156,7 @@ static int do_check_remote_perm(struct ll_inode_info *lli, int mask)
         rc = ((lrp->lrp_access_perm & mask) == mask) ? 0 : -EACCES;
 
 out:
-        spin_unlock(&lli->lli_lock);
+        cfs_spin_unlock(&lli->lli_lock);
         return rc;
 }
 
@@ -163,8 +164,8 @@ int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm)
 {
         struct ll_inode_info *lli = ll_i2info(inode);
         struct ll_remote_perm *lrp = NULL, *tmp = NULL;
-        struct hlist_head *head, *perm_hash = NULL;
-        struct hlist_node *node;
+        cfs_hlist_head_t *head, *perm_hash = NULL;
+        cfs_hlist_node_t *node;
         ENTRY;
 
         LASSERT(ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT);
@@ -192,7 +193,7 @@ int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm)
                 }
         }
 
-        spin_lock(&lli->lli_lock);
+        cfs_spin_lock(&lli->lli_lock);
 
         if (!lli->lli_remote_perms)
                 lli->lli_remote_perms = perm_hash;
@@ -202,7 +203,7 @@ int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm)
         head = lli->lli_remote_perms + remote_perm_hashfunc(perm->rp_uid);
 
 again:
-        hlist_for_each_entry(tmp, node, head, lrp_list) {
+        cfs_hlist_for_each_entry(tmp, node, head, lrp_list) {
                 if (tmp->lrp_uid != perm->rp_uid)
                         continue;
                 if (tmp->lrp_gid != perm->rp_gid)
@@ -218,13 +219,13 @@ again:
         }
 
         if (!lrp) {
-                spin_unlock(&lli->lli_lock);
+                cfs_spin_unlock(&lli->lli_lock);
                 lrp = alloc_ll_remote_perm();
                 if (!lrp) {
                         CERROR("alloc memory for ll_remote_perm failed!\n");
                         RETURN(-ENOMEM);
                 }
-                spin_lock(&lli->lli_lock);
+                cfs_spin_lock(&lli->lli_lock);
                 goto again;
         }
 
@@ -234,10 +235,10 @@ again:
                 lrp->lrp_gid         = perm->rp_gid;
                 lrp->lrp_fsuid       = perm->rp_fsuid;
                 lrp->lrp_fsgid       = perm->rp_fsgid;
-                hlist_add_head(&lrp->lrp_list, head);
+                cfs_hlist_add_head(&lrp->lrp_list, head);
         }
         lli->lli_rmtperm_utime = jiffies;
-        spin_unlock(&lli->lli_lock);
+        cfs_spin_unlock(&lli->lli_lock);
 
         CDEBUG(D_SEC, "new remote perm@%p: %u/%u/%u/%u - %#x\n",
                lrp, lrp->lrp_uid, lrp->lrp_gid, lrp->lrp_fsuid, lrp->lrp_fsgid,
@@ -263,14 +264,14 @@ int lustre_check_remote_perm(struct inode *inode, int mask)
                 if (!rc || (rc != -ENOENT && i))
                         break;
 
-                might_sleep();
+                cfs_might_sleep();
 
-                down(&lli->lli_rmtperm_sem);
+                cfs_down(&lli->lli_rmtperm_sem);
                 /* check again */
                 if (utime != lli->lli_rmtperm_utime) {
                         rc = do_check_remote_perm(lli, mask);
                         if (!rc || (rc != -ENOENT && i)) {
-                                up(&lli->lli_rmtperm_sem);
+                                cfs_up(&lli->lli_rmtperm_sem);
                                 break;
                         }
                 }
@@ -285,20 +286,20 @@ int lustre_check_remote_perm(struct inode *inode, int mask)
                                         ll_i2suppgid(inode), &req);
                 capa_put(oc);
                 if (rc) {
-                        up(&lli->lli_rmtperm_sem);
+                        cfs_up(&lli->lli_rmtperm_sem);
                         break;
                 }
 
                 perm = req_capsule_server_swab_get(&req->rq_pill, &RMF_ACL,
                                                    lustre_swab_mdt_remote_perm);
                 if (unlikely(perm == NULL)) {
-                        up(&lli->lli_rmtperm_sem);
+                        cfs_up(&lli->lli_rmtperm_sem);
                         rc = -EPROTO;
                         break;
                 }
 
                 rc = ll_update_remote_perm(inode, perm);
-                up(&lli->lli_rmtperm_sem);
+                cfs_up(&lli->lli_rmtperm_sem);
                 if (rc == -ENOMEM)
                         break;
 
@@ -315,20 +316,21 @@ int lustre_check_remote_perm(struct inode *inode, int mask)
 void ll_free_remote_perms(struct inode *inode)
 {
         struct ll_inode_info *lli = ll_i2info(inode);
-        struct hlist_head *hash = lli->lli_remote_perms;
+        cfs_hlist_head_t *hash = lli->lli_remote_perms;
         struct ll_remote_perm *lrp;
-        struct hlist_node *node, *next;
+        cfs_hlist_node_t *node, *next;
         int i;
 
         LASSERT(hash);
 
-        spin_lock(&lli->lli_lock);
+        cfs_spin_lock(&lli->lli_lock);
 
         for (i = 0; i < REMOTE_PERM_HASHSIZE; i++) {
-                hlist_for_each_entry_safe(lrp, node, next, hash + i, lrp_list)
+                cfs_hlist_for_each_entry_safe(lrp, node, next, hash + i,
+                                              lrp_list)
                         free_ll_remote_perm(lrp);
         }
 
-        spin_unlock(&lli->lli_lock);
+        cfs_spin_unlock(&lli->lli_lock);
 }
 #endif
index 3f9c15c..af18871 100644 (file)
@@ -384,12 +384,12 @@ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
          * otherwise it will form small read RPC(< 1M), which hurt server
          * performance a lot.
          */
-        ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages), len);
+        ret = min(ra->ra_max_pages - cfs_atomic_read(&ra->ra_cur_pages), len);
         if ((int)ret < 0 || ret < min((unsigned long)PTLRPC_MAX_BRW_PAGES, len))
                 GOTO(out, ret = 0);
 
-        if (atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
-                atomic_sub(ret, &ra->ra_cur_pages);
+        if (cfs_atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
+                cfs_atomic_sub(ret, &ra->ra_cur_pages);
                 ret = 0;
         }
 out:
@@ -399,7 +399,7 @@ out:
 void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
 {
         struct ll_ra_info *ra = &sbi->ll_ra_info;
-        atomic_sub(len, &ra->ra_cur_pages);
+        cfs_atomic_sub(len, &ra->ra_cur_pages);
 }
 
 static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which)
@@ -452,14 +452,14 @@ void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
 
         ras = ll_ras_get(f);
 
-        spin_lock(&ras->ras_lock);
+        cfs_spin_lock(&ras->ras_lock);
         ras->ras_requests++;
         ras->ras_request_index = 0;
         ras->ras_consecutive_requests++;
         rar->lrr_reader = current;
 
-        list_add(&rar->lrr_linkage, &ras->ras_read_beads);
-        spin_unlock(&ras->ras_lock);
+        cfs_list_add(&rar->lrr_linkage, &ras->ras_read_beads);
+        cfs_spin_unlock(&ras->ras_lock);
 }
 
 void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
@@ -468,16 +468,16 @@ void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
 
         ras = ll_ras_get(f);
 
-        spin_lock(&ras->ras_lock);
-        list_del_init(&rar->lrr_linkage);
-        spin_unlock(&ras->ras_lock);
+        cfs_spin_lock(&ras->ras_lock);
+        cfs_list_del_init(&rar->lrr_linkage);
+        cfs_spin_unlock(&ras->ras_lock);
 }
 
 static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
 {
         struct ll_ra_read *scan;
 
-        list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
+        cfs_list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
                 if (scan->lrr_reader == current)
                         return scan;
         }
@@ -491,9 +491,9 @@ struct ll_ra_read *ll_ra_read_get(struct file *f)
 
         ras = ll_ras_get(f);
 
-        spin_lock(&ras->ras_lock);
+        cfs_spin_lock(&ras->ras_lock);
         bead = ll_ra_read_get_locked(ras);
-        spin_unlock(&ras->ras_lock);
+        cfs_spin_unlock(&ras->ras_lock);
         return bead;
 }
 
@@ -766,7 +766,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
                 RETURN(0);
         }
 
-        spin_lock(&ras->ras_lock);
+        cfs_spin_lock(&ras->ras_lock);
         if (vio->cui_ra_window_set)
                 bead = &vio->cui_bead;
         else
@@ -797,7 +797,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
                 ria->ria_length = ras->ras_stride_length;
                 ria->ria_pages = ras->ras_stride_pages;
         }
-        spin_unlock(&ras->ras_lock);
+        cfs_spin_unlock(&ras->ras_lock);
 
         if (end == 0) {
                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
@@ -833,14 +833,14 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
                ra_end, end, ria->ria_end);
 
         if (ra_end != end + 1) {
-                spin_lock(&ras->ras_lock);
+                cfs_spin_lock(&ras->ras_lock);
                 if (ra_end < ras->ras_next_readahead &&
                     index_in_window(ra_end, ras->ras_window_start, 0,
                                     ras->ras_window_len)) {
                         ras->ras_next_readahead = ra_end;
                                RAS_CDEBUG(ras);
                 }
-                spin_unlock(&ras->ras_lock);
+                cfs_spin_unlock(&ras->ras_lock);
         }
 
         RETURN(ret);
@@ -875,10 +875,10 @@ static void ras_stride_reset(struct ll_readahead_state *ras)
 
 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
 {
-        spin_lock_init(&ras->ras_lock);
+        cfs_spin_lock_init(&ras->ras_lock);
         ras_reset(ras, 0);
         ras->ras_requests = 0;
-        INIT_LIST_HEAD(&ras->ras_read_beads);
+        CFS_INIT_LIST_HEAD(&ras->ras_read_beads);
 }
 
 /*
@@ -1000,8 +1000,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
         int zero = 0, stride_detect = 0, ra_miss = 0;
         ENTRY;
 
-        spin_lock(&sbi->ll_lock);
-        spin_lock(&ras->ras_lock);
+        cfs_spin_lock(&sbi->ll_lock);
+        cfs_spin_lock(&ras->ras_lock);
 
         ll_ra_stats_inc_sbi(sbi, hit ? RA_STAT_HIT : RA_STAT_MISS);
 
@@ -1132,8 +1132,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
 out_unlock:
         RAS_CDEBUG(ras);
         ras->ras_request_index++;
-        spin_unlock(&ras->ras_lock);
-        spin_unlock(&sbi->ll_lock);
+        cfs_spin_unlock(&ras->ras_lock);
+        cfs_spin_unlock(&sbi->ll_lock);
         return;
 }
 
index 2eb1b8b..743a81a 100644 (file)
@@ -50,7 +50,7 @@
 #include "llite_internal.h"
 
 struct ll_sai_entry {
-        struct list_head        se_list;
+        cfs_list_t              se_list;
         unsigned int            se_index;
         int                     se_stat;
         struct ptlrpc_request  *se_req;
@@ -63,7 +63,7 @@ enum {
 };
 
 static unsigned int sai_generation = 0;
-static spinlock_t sai_generation_lock = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t sai_generation_lock = CFS_SPIN_LOCK_UNLOCKED;
 
 /**
  * Check whether first entry was stated already or not.
@@ -76,9 +76,9 @@ static int ll_sai_entry_stated(struct ll_statahead_info *sai)
         struct ll_sai_entry  *entry;
         int                   rc = 0;
 
-        if (!list_empty(&sai->sai_entries_stated)) {
-                entry = list_entry(sai->sai_entries_stated.next,
-                                   struct ll_sai_entry, se_list);
+        if (!cfs_list_empty(&sai->sai_entries_stated)) {
+                entry = cfs_list_entry(sai->sai_entries_stated.next,
+                                       struct ll_sai_entry, se_list);
                 if (entry->se_index == sai->sai_index_next)
                         rc = 1;
         }
@@ -87,7 +87,7 @@ static int ll_sai_entry_stated(struct ll_statahead_info *sai)
 
 static inline int sa_received_empty(struct ll_statahead_info *sai)
 {
-        return list_empty(&sai->sai_entries_received);
+        return cfs_list_empty(&sai->sai_entries_received);
 }
 
 static inline int sa_not_full(struct ll_statahead_info *sai)
@@ -146,7 +146,7 @@ static void ll_sai_entry_cleanup(struct ll_sai_entry *entry, int free)
                 ptlrpc_req_finished(req);
         }
         if (free) {
-                LASSERT(list_empty(&entry->se_list));
+                LASSERT(cfs_list_empty(&entry->se_list));
                 OBD_FREE_PTR(entry);
         }
 
@@ -161,12 +161,12 @@ static struct ll_statahead_info *ll_sai_alloc(void)
         if (!sai)
                 return NULL;
 
-        spin_lock(&sai_generation_lock);
+        cfs_spin_lock(&sai_generation_lock);
         sai->sai_generation = ++sai_generation;
         if (unlikely(sai_generation == 0))
                 sai->sai_generation = ++sai_generation;
-        spin_unlock(&sai_generation_lock);
-        atomic_set(&sai->sai_refcount, 1);
+        cfs_spin_unlock(&sai_generation_lock);
+        cfs_atomic_set(&sai->sai_refcount, 1);
         sai->sai_max = LL_SA_RPC_MIN;
         cfs_waitq_init(&sai->sai_waitq);
         cfs_waitq_init(&sai->sai_thread.t_ctl_waitq);
@@ -180,7 +180,7 @@ static inline
 struct ll_statahead_info *ll_sai_get(struct ll_statahead_info *sai)
 {
         LASSERT(sai);
-        atomic_inc(&sai->sai_refcount);
+        cfs_atomic_inc(&sai->sai_refcount);
         return sai;
 }
 
@@ -194,14 +194,14 @@ static void ll_sai_put(struct ll_statahead_info *sai)
         lli = ll_i2info(inode);
         LASSERT(lli->lli_sai == sai);
 
-        if (atomic_dec_and_test(&sai->sai_refcount)) {
+        if (cfs_atomic_dec_and_test(&sai->sai_refcount)) {
                 struct ll_sai_entry *entry, *next;
 
-                spin_lock(&lli->lli_lock);
-                if (unlikely(atomic_read(&sai->sai_refcount) > 0)) {
+                cfs_spin_lock(&lli->lli_lock);
+                if (unlikely(cfs_atomic_read(&sai->sai_refcount) > 0)) {
                         /* It is race case, the interpret callback just hold
                          * a reference count */
-                        spin_unlock(&lli->lli_lock);
+                        cfs_spin_unlock(&lli->lli_lock);
                         EXIT;
                         return;
                 }
@@ -209,7 +209,7 @@ static void ll_sai_put(struct ll_statahead_info *sai)
                 LASSERT(lli->lli_opendir_key == NULL);
                 lli->lli_sai = NULL;
                 lli->lli_opendir_pid = 0;
-                spin_unlock(&lli->lli_lock);
+                cfs_spin_unlock(&lli->lli_lock);
 
                 LASSERT(sa_is_stopped(sai));
 
@@ -219,19 +219,21 @@ static void ll_sai_put(struct ll_statahead_info *sai)
                               PFID(&lli->lli_fid),
                               sai->sai_sent, sai->sai_replied);
 
-                list_for_each_entry_safe(entry, next, &sai->sai_entries_sent,
-                                         se_list) {
-                        list_del_init(&entry->se_list);
+                cfs_list_for_each_entry_safe(entry, next,
+                                             &sai->sai_entries_sent, se_list) {
+                        cfs_list_del_init(&entry->se_list);
                         ll_sai_entry_cleanup(entry, 1);
                 }
-                list_for_each_entry_safe(entry, next, &sai->sai_entries_received,
-                                         se_list) {
-                        list_del_init(&entry->se_list);
+                cfs_list_for_each_entry_safe(entry, next,
+                                             &sai->sai_entries_received,
+                                             se_list) {
+                        cfs_list_del_init(&entry->se_list);
                         ll_sai_entry_cleanup(entry, 1);
                 }
-                list_for_each_entry_safe(entry, next, &sai->sai_entries_stated,
-                                         se_list) {
-                        list_del_init(&entry->se_list);
+                cfs_list_for_each_entry_safe(entry, next,
+                                             &sai->sai_entries_stated,
+                                             se_list) {
+                        cfs_list_del_init(&entry->se_list);
                         ll_sai_entry_cleanup(entry, 1);
                 }
                 iput(inode);
@@ -259,9 +261,9 @@ ll_sai_entry_init(struct ll_statahead_info *sai, unsigned int index)
         entry->se_index = index;
         entry->se_stat = SA_ENTRY_UNSTATED;
 
-        spin_lock(&lli->lli_lock);
-        list_add_tail(&entry->se_list, &sai->sai_entries_sent);
-        spin_unlock(&lli->lli_lock);
+        cfs_spin_lock(&lli->lli_lock);
+        cfs_list_add_tail(&entry->se_list, &sai->sai_entries_sent);
+        cfs_spin_unlock(&lli->lli_lock);
 
         RETURN(entry);
 }
@@ -277,20 +279,20 @@ static int ll_sai_entry_fini(struct ll_statahead_info *sai)
         int rc = 0;
         ENTRY;
 
-        spin_lock(&lli->lli_lock);
+        cfs_spin_lock(&lli->lli_lock);
         sai->sai_index_next++;
-        if (likely(!list_empty(&sai->sai_entries_stated))) {
-                entry = list_entry(sai->sai_entries_stated.next,
-                                   struct ll_sai_entry, se_list);
+        if (likely(!cfs_list_empty(&sai->sai_entries_stated))) {
+                entry = cfs_list_entry(sai->sai_entries_stated.next,
+                                       struct ll_sai_entry, se_list);
                 if (entry->se_index < sai->sai_index_next) {
-                        list_del(&entry->se_list);
+                        cfs_list_del(&entry->se_list);
                         rc = entry->se_stat;
                         OBD_FREE_PTR(entry);
                 }
         } else {
                 LASSERT(sa_is_stopped(sai));
         }
-        spin_unlock(&lli->lli_lock);
+        cfs_spin_unlock(&lli->lli_lock);
 
         RETURN(rc);
 }
@@ -307,8 +309,9 @@ ll_sai_entry_set(struct ll_statahead_info *sai, unsigned int index, int stat,
         struct ll_sai_entry *entry;
         ENTRY;
 
-        if (!list_empty(&sai->sai_entries_sent)) {
-                list_for_each_entry(entry, &sai->sai_entries_sent, se_list) {
+        if (!cfs_list_empty(&sai->sai_entries_sent)) {
+                cfs_list_for_each_entry(entry, &sai->sai_entries_sent,
+                                        se_list) {
                         if (entry->se_index == index) {
                                 entry->se_stat = stat;
                                 entry->se_req = ptlrpc_request_addref(req);
@@ -330,9 +333,9 @@ ll_sai_entry_set(struct ll_statahead_info *sai, unsigned int index, int stat,
 static inline void
 ll_sai_entry_to_received(struct ll_statahead_info *sai, struct ll_sai_entry *entry)
 {
-        if (!list_empty(&entry->se_list))
-                list_del_init(&entry->se_list);
-        list_add_tail(&entry->se_list, &sai->sai_entries_received);
+        if (!cfs_list_empty(&entry->se_list))
+                cfs_list_del_init(&entry->se_list);
+        cfs_list_add_tail(&entry->se_list, &sai->sai_entries_received);
 }
 
 /**
@@ -348,20 +351,20 @@ ll_sai_entry_to_stated(struct ll_statahead_info *sai, struct ll_sai_entry *entry
 
         ll_sai_entry_cleanup(entry, 0);
 
-        spin_lock(&lli->lli_lock);
-        if (!list_empty(&entry->se_list))
-                list_del_init(&entry->se_list);
+        cfs_spin_lock(&lli->lli_lock);
+        if (!cfs_list_empty(&entry->se_list))
+                cfs_list_del_init(&entry->se_list);
 
         if (unlikely(entry->se_index < sai->sai_index_next)) {
-                spin_unlock(&lli->lli_lock);
+                cfs_spin_unlock(&lli->lli_lock);
                 OBD_FREE_PTR(entry);
                 RETURN(0);
         }
 
-        list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
+        cfs_list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
                 if (se->se_index < entry->se_index) {
-                        list_add(&entry->se_list, &se->se_list);
-                        spin_unlock(&lli->lli_lock);
+                        cfs_list_add(&entry->se_list, &se->se_list);
+                        cfs_spin_unlock(&lli->lli_lock);
                         RETURN(1);
                 }
         }
@@ -369,8 +372,8 @@ ll_sai_entry_to_stated(struct ll_statahead_info *sai, struct ll_sai_entry *entry
         /*
          * I am the first entry.
          */
-        list_add(&entry->se_list, &sai->sai_entries_stated);
-        spin_unlock(&lli->lli_lock);
+        cfs_list_add(&entry->se_list, &sai->sai_entries_stated);
+        cfs_spin_unlock(&lli->lli_lock);
         RETURN(1);
 }
 
@@ -389,12 +392,12 @@ static int do_statahead_interpret(struct ll_statahead_info *sai)
         struct mdt_body        *body;
         ENTRY;
 
-        spin_lock(&lli->lli_lock);
+        cfs_spin_lock(&lli->lli_lock);
         LASSERT(!sa_received_empty(sai));
-        entry = list_entry(sai->sai_entries_received.next, struct ll_sai_entry,
-                           se_list);
-        list_del_init(&entry->se_list);
-        spin_unlock(&lli->lli_lock);
+        entry = cfs_list_entry(sai->sai_entries_received.next,
+                               struct ll_sai_entry, se_list);
+        cfs_list_del_init(&entry->se_list);
+        cfs_spin_unlock(&lli->lli_lock);
 
         if (unlikely(entry->se_index < sai->sai_index_next)) {
                 CWARN("Found stale entry: [index %u] [next %u]\n",
@@ -501,10 +504,10 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
         CDEBUG(D_READA, "interpret statahead %.*s rc %d\n",
                dentry->d_name.len, dentry->d_name.name, rc);
 
-        spin_lock(&lli->lli_lock);
+        cfs_spin_lock(&lli->lli_lock);
         if (unlikely(lli->lli_sai == NULL ||
             lli->lli_sai->sai_generation != minfo->mi_generation)) {
-                spin_unlock(&lli->lli_lock);
+                cfs_spin_unlock(&lli->lli_lock);
                 ll_intent_release(it);
                 dput(dentry);
                 iput(dir);
@@ -520,13 +523,13 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
                 if (likely(sa_is_running(sai))) {
                         ll_sai_entry_to_received(sai, entry);
                         sai->sai_replied++;
-                        spin_unlock(&lli->lli_lock);
+                        cfs_spin_unlock(&lli->lli_lock);
                         cfs_waitq_signal(&sai->sai_thread.t_ctl_waitq);
                 } else {
-                        if (!list_empty(&entry->se_list))
-                                list_del_init(&entry->se_list);
+                        if (!cfs_list_empty(&entry->se_list))
+                                cfs_list_del_init(&entry->se_list);
                         sai->sai_replied++;
-                        spin_unlock(&lli->lli_lock);
+                        cfs_spin_unlock(&lli->lli_lock);
                         ll_sai_entry_cleanup(entry, 1);
                 }
                 ll_sai_put(sai);
@@ -763,9 +766,9 @@ static int ll_statahead_thread(void *arg)
         }
 
         atomic_inc(&sbi->ll_sa_total);
-        spin_lock(&lli->lli_lock);
+        cfs_spin_lock(&lli->lli_lock);
         thread->t_flags = SVC_RUNNING;
-        spin_unlock(&lli->lli_lock);
+        cfs_spin_unlock(&lli->lli_lock);
         cfs_waitq_signal(&thread->t_ctl_waitq);
         CDEBUG(D_READA, "start doing statahead for %s\n", parent->d_name.name);
 
@@ -886,9 +889,9 @@ keep_de:
 
 out:
         ll_dir_chain_fini(&chain);
-        spin_lock(&lli->lli_lock);
+        cfs_spin_lock(&lli->lli_lock);
         thread->t_flags = SVC_STOPPED;
-        spin_unlock(&lli->lli_lock);
+        cfs_spin_unlock(&lli->lli_lock);
         cfs_waitq_signal(&sai->sai_waitq);
         cfs_waitq_signal(&thread->t_ctl_waitq);
         ll_sai_put(sai);
@@ -908,9 +911,9 @@ void ll_stop_statahead(struct inode *inode, void *key)
         if (unlikely(key == NULL))
                 return;
 
-        spin_lock(&lli->lli_lock);
+        cfs_spin_lock(&lli->lli_lock);
         if (lli->lli_opendir_key != key || lli->lli_opendir_pid == 0) {
-                spin_unlock(&lli->lli_lock);
+                cfs_spin_unlock(&lli->lli_lock);
                 return;
         }
 
@@ -922,7 +925,7 @@ void ll_stop_statahead(struct inode *inode, void *key)
 
                 if (!sa_is_stopped(lli->lli_sai)) {
                         thread->t_flags = SVC_STOPPING;
-                        spin_unlock(&lli->lli_lock);
+                        cfs_spin_unlock(&lli->lli_lock);
                         cfs_waitq_signal(&thread->t_ctl_waitq);
 
                         CDEBUG(D_READA, "stopping statahead thread, pid %d\n",
@@ -931,7 +934,7 @@ void ll_stop_statahead(struct inode *inode, void *key)
                                      sa_is_stopped(lli->lli_sai),
                                      &lwi);
                 } else {
-                        spin_unlock(&lli->lli_lock);
+                        cfs_spin_unlock(&lli->lli_lock);
                 }
 
                 /*
@@ -942,7 +945,7 @@ void ll_stop_statahead(struct inode *inode, void *key)
                 ll_sai_put(lli->lli_sai);
         } else {
                 lli->lli_opendir_pid = 0;
-                spin_unlock(&lli->lli_lock);
+                cfs_spin_unlock(&lli->lli_lock);
         }
 }
 
@@ -1088,7 +1091,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, int lookup)
 
         if (sai) {
                 if (unlikely(sa_is_stopped(sai) &&
-                             list_empty(&sai->sai_entries_stated)))
+                             cfs_list_empty(&sai->sai_entries_stated)))
                         RETURN(-EBADFD);
 
                 if ((*dentryp)->d_name.name[0] == '.') {
@@ -1223,10 +1226,10 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, int lookup)
         RETURN(-EEXIST);
 
 out:
-        spin_lock(&lli->lli_lock);
+        cfs_spin_lock(&lli->lli_lock);
         lli->lli_opendir_key = NULL;
         lli->lli_opendir_pid = 0;
-        spin_unlock(&lli->lli_lock);
+        cfs_spin_unlock(&lli->lli_lock);
         return rc;
 }
 
@@ -1273,10 +1276,10 @@ void ll_statahead_exit(struct inode *dir, struct dentry *dentry, int result)
                                PFID(&lli->lli_fid), sai->sai_hit,
                                sai->sai_miss, sai->sai_sent,
                                sai->sai_replied, cfs_curproc_pid());
-                        spin_lock(&lli->lli_lock);
+                        cfs_spin_lock(&lli->lli_lock);
                         if (!sa_is_stopped(sai))
                                 sai->sai_thread.t_flags = SVC_STOPPING;
-                        spin_unlock(&lli->lli_lock);
+                        cfs_spin_unlock(&lli->lli_lock);
                 }
         }
 
index b860101..f0c22ba 100644 (file)
@@ -74,7 +74,7 @@ int ll_init_inodecache(void)
 {
         ll_inode_cachep = cfs_mem_cache_create("lustre_inode_cache",
                                                sizeof(struct ll_inode_info),
-                                               0, SLAB_HWCACHE_ALIGN);
+                                               0, CFS_SLAB_HWCACHE_ALIGN);
         if (ll_inode_cachep == NULL)
                 return -ENOMEM;
         return 0;
@@ -125,7 +125,7 @@ static int __init init_lustre_lite(void)
                 return -ENOMEM;
         ll_file_data_slab = cfs_mem_cache_create("ll_file_data",
                                                  sizeof(struct ll_file_data), 0,
-                                                 SLAB_HWCACHE_ALIGN);
+                                                 CFS_SLAB_HWCACHE_ALIGN);
         if (ll_file_data_slab == NULL) {
                 ll_destroy_inodecache();
                 return -ENOMEM;
@@ -143,7 +143,7 @@ static int __init init_lustre_lite(void)
 
         ll_rmtperm_hash_cachep = cfs_mem_cache_create("ll_rmtperm_hash_cache",
                                                    REMOTE_PERM_HASHSIZE *
-                                                   sizeof(struct list_head),
+                                                   sizeof(cfs_list_t),
                                                    0, 0);
         if (ll_rmtperm_hash_cachep == NULL) {
                 cfs_mem_cache_destroy(ll_remote_perm_cachep);
@@ -175,7 +175,7 @@ static int __init init_lustre_lite(void)
                 }
         }
 
-        do_gettimeofday(&tv);
+        cfs_gettimeofday(&tv);
         ll_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
 
         init_timer(&ll_capa_timer);
index 6549139..33b7d21 100644 (file)
@@ -308,10 +308,10 @@ static struct cl_object *vvp_pgcache_obj(const struct lu_env *env,
                                          struct lu_device *dev,
                                          struct vvp_pgcache_id *id)
 {
-        struct hlist_head       *bucket;
+        cfs_hlist_head_t        *bucket;
         struct lu_object_header *hdr;
         struct lu_site          *site;
-        struct hlist_node       *scan;
+        cfs_hlist_node_t        *scan;
         struct lu_object_header *found;
         struct cl_object        *clob;
         unsigned                 depth;
@@ -325,18 +325,19 @@ static struct cl_object *vvp_pgcache_obj(const struct lu_env *env,
         clob   = NULL;
 
         /* XXX copy of lu_object.c:htable_lookup() */
-        read_lock(&site->ls_guard);
-        hlist_for_each_entry(hdr, scan, bucket, loh_hash) {
+        cfs_read_lock(&site->ls_guard);
+        cfs_hlist_for_each_entry(hdr, scan, bucket, loh_hash) {
                 if (depth-- == 0) {
                         if (!lu_object_is_dying(hdr)) {
-                                if (atomic_add_return(1, &hdr->loh_ref) == 1)
+                                if (cfs_atomic_add_return(1,
+                                                          &hdr->loh_ref) == 1)
                                         ++ site->ls_busy;
                                 found = hdr;
                         }
                         break;
                 }
         }
-        read_unlock(&site->ls_guard);
+        cfs_read_unlock(&site->ls_guard);
 
         if (found != NULL) {
                 struct lu_object *lu_obj;
@@ -374,7 +375,7 @@ static loff_t vvp_pgcache_find(const struct lu_env *env,
                         /* got an object. Find next page. */
                         hdr = cl_object_header(clob);
 
-                        spin_lock(&hdr->coh_page_guard);
+                        cfs_spin_lock(&hdr->coh_page_guard);
                         nr = radix_tree_gang_lookup(&hdr->coh_tree,
                                                     (void **)&pg,
                                                     id.vpi_index, 1);
@@ -383,7 +384,7 @@ static loff_t vvp_pgcache_find(const struct lu_env *env,
                                 /* Cant support over 16T file */
                                 nr = !(pg->cp_index > 0xffffffff);
                         }
-                        spin_unlock(&hdr->coh_page_guard);
+                        cfs_spin_unlock(&hdr->coh_page_guard);
 
                         lu_object_ref_del(&clob->co_lu, "dump", cfs_current());
                         cl_object_put(env, clob);
@@ -400,7 +401,7 @@ static loff_t vvp_pgcache_find(const struct lu_env *env,
 }
 
 #define seq_page_flag(seq, page, flag, has_flags) do {                  \
-        if (test_bit(PG_##flag, &(page)->flags)) {                      \
+        if (cfs_test_bit(PG_##flag, &(page)->flags)) {                  \
                 seq_printf(seq, "%s"#flag, has_flags ? "|" : "");       \
                 has_flags = 1;                                          \
         }                                                               \
@@ -460,9 +461,9 @@ static int vvp_pgcache_show(struct seq_file *f, void *v)
                 if (clob != NULL) {
                         hdr = cl_object_header(clob);
 
-                        spin_lock(&hdr->coh_page_guard);
+                        cfs_spin_lock(&hdr->coh_page_guard);
                         page = cl_page_lookup(hdr, id.vpi_index);
-                        spin_unlock(&hdr->coh_page_guard);
+                        cfs_spin_unlock(&hdr->coh_page_guard);
 
                         seq_printf(f, "%8x@"DFID": ",
                                    id.vpi_index, PFID(&hdr->coh_lu.loh_fid));
index b22535a..f208b0e 100644 (file)
@@ -350,9 +350,9 @@ static int vvp_io_trunc_start(const struct lu_env *env,
                 struct cl_object_header *hdr;
 
                 hdr = cl_object_header(obj);
-                spin_lock(&hdr->coh_page_guard);
+                cfs_spin_lock(&hdr->coh_page_guard);
                 vio->cui_partpage = cl_page_lookup(hdr, start);
-                spin_unlock(&hdr->coh_page_guard);
+                cfs_spin_unlock(&hdr->coh_page_guard);
 
                 if (vio->cui_partpage != NULL)
                         /*
index 846934c..444a9c2 100644 (file)
@@ -69,7 +69,7 @@ static unsigned long vvp_lock_weigh(const struct lu_env *env,
         struct ccc_object *cob = cl2ccc(slice->cls_obj);
 
         ENTRY;
-        RETURN(atomic_read(&cob->cob_mmap_cnt) > 0 ? ~0UL >> 2 : 0);
+        RETURN(cfs_atomic_read(&cob->cob_mmap_cnt) > 0 ? ~0UL >> 2 : 0);
 }
 
 static const struct cl_lock_operations vvp_lock_ops = {
index 412a877..21837e4 100644 (file)
@@ -65,8 +65,9 @@ static int vvp_object_print(const struct lu_env *env, void *cookie,
         struct ll_inode_info *lli;
 
         (*p)(env, cookie, "(%s %i %i) inode: %p ",
-             list_empty(&obj->cob_pending_list) ? "-" : "+",
-             obj->cob_transient_pages, atomic_read(&obj->cob_mmap_cnt), inode);
+             cfs_list_empty(&obj->cob_pending_list) ? "-" : "+",
+             obj->cob_transient_pages, cfs_atomic_read(&obj->cob_mmap_cnt),
+             inode);
         if (inode) {
                 lli = ll_i2info(inode);
                 (*p)(env, cookie, "%lu/%u %o %u %i %p "DFID,
index f00f4f0..9fd0842 100644 (file)
@@ -315,9 +315,9 @@ int ll_getxattr_common(struct inode *inode, const char *name,
                 struct ll_inode_info *lli = ll_i2info(inode);
                 struct posix_acl *acl;
 
-                spin_lock(&lli->lli_lock);
+                cfs_spin_lock(&lli->lli_lock);
                 acl = posix_acl_dup(lli->lli_posix_acl);
-                spin_unlock(&lli->lli_lock);
+                cfs_spin_unlock(&lli->lli_lock);
 
                 if (!acl)
                         RETURN(-ENODATA);
index 55f4807..5c98cb7 100644 (file)
@@ -42,8 +42,8 @@
 
 #define LMV_MAX_TGT_COUNT 128
 
-#define lmv_init_lock(lmv)   down(&lmv->init_sem);
-#define lmv_init_unlock(lmv) up(&lmv->init_sem);
+#define lmv_init_lock(lmv)   cfs_down(&lmv->init_sem);
+#define lmv_init_unlock(lmv) cfs_up(&lmv->init_sem);
 
 #define LL_IT2STR(it)                                  \
        ((it) ? ldlm_it2str((it)->it_op) : "0")
@@ -73,11 +73,11 @@ struct lmv_object {
         /**
          * Link to global objects list.
          */
-        struct list_head        lo_list;
+        cfs_list_t              lo_list;
         /**
          * Sema for protecting fields.
          */
-        struct semaphore        lo_guard;
+        cfs_semaphore_t         lo_guard;
         /**
          * Object state like O_FREEING.
          */
@@ -85,7 +85,7 @@ struct lmv_object {
         /**
          * Object ref counter.
          */
-        atomic_t                lo_count;
+        cfs_atomic_t            lo_count;
         /**
          * Object master fid.
          */
@@ -115,14 +115,14 @@ static inline void
 lmv_object_lock(struct lmv_object *obj)
 {
         LASSERT(obj);
-        down(&obj->lo_guard);
+        cfs_down(&obj->lo_guard);
 }
 
 static inline void
 lmv_object_unlock(struct lmv_object *obj)
 {
         LASSERT(obj);
-        up(&obj->lo_guard);
+        cfs_up(&obj->lo_guard);
 }
 
 void lmv_object_add(struct lmv_object *obj);
index 3e7dfc8..caa7e8b 100644 (file)
@@ -65,7 +65,7 @@
 
 /* object cache. */
 cfs_mem_cache_t *lmv_object_cache;
-atomic_t lmv_object_count = ATOMIC_INIT(0);
+cfs_atomic_t lmv_object_count = CFS_ATOMIC_INIT(0);
 
 static void lmv_activate_target(struct lmv_obd *lmv,
                                 struct lmv_tgt_desc *tgt,
@@ -97,7 +97,7 @@ static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid,
         CDEBUG(D_INFO, "Searching in lmv %p for uuid %s (activate=%d)\n",
                lmv, uuid->uuid, activate);
 
-        spin_lock(&lmv->lmv_lock);
+        cfs_spin_lock(&lmv->lmv_lock);
         for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
                 if (tgt->ltd_exp == NULL)
                         continue;
@@ -133,7 +133,7 @@ static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid,
         EXIT;
 
  out_lmv_lock:
-        spin_unlock(&lmv->lmv_lock);
+        cfs_spin_unlock(&lmv->lmv_lock);
         return rc;
 }
 
@@ -146,7 +146,7 @@ static int lmv_set_mdc_data(struct lmv_obd *lmv, struct obd_uuid *uuid,
 
         LASSERT(data != NULL);
 
-        spin_lock(&lmv->lmv_lock);
+        cfs_spin_lock(&lmv->lmv_lock);
         for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
                 if (tgt->ltd_exp == NULL)
                         continue;
@@ -156,7 +156,7 @@ static int lmv_set_mdc_data(struct lmv_obd *lmv, struct obd_uuid *uuid,
                         break;
                 }
         }
-        spin_unlock(&lmv->lmv_lock);
+        cfs_spin_unlock(&lmv->lmv_lock);
         RETURN(0);
 }
 
@@ -463,7 +463,7 @@ int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
 
         CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n",
                 mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
-                atomic_read(&obd->obd_refcount));
+                cfs_atomic_read(&obd->obd_refcount));
 
 #ifdef __KERNEL__
         lmv_proc_dir = lprocfs_srch(obd->obd_proc_entry, "target_obds");
@@ -527,18 +527,18 @@ int lmv_add_target(struct obd_device *obd, struct obd_uuid *tgt_uuid)
                         CERROR("lmv failed to setup llogging subsystems\n");
                 }
         }
-        spin_lock(&lmv->lmv_lock);
+        cfs_spin_lock(&lmv->lmv_lock);
         tgt = lmv->tgts + lmv->desc.ld_tgt_count++;
         tgt->ltd_uuid = *tgt_uuid;
-        spin_unlock(&lmv->lmv_lock);
+        cfs_spin_unlock(&lmv->lmv_lock);
 
         if (lmv->connected) {
                 rc = lmv_connect_mdc(obd, tgt);
                 if (rc) {
-                        spin_lock(&lmv->lmv_lock);
+                        cfs_spin_lock(&lmv->lmv_lock);
                         lmv->desc.ld_tgt_count--;
                         memset(tgt, 0, sizeof(*tgt));
-                        spin_unlock(&lmv->lmv_lock);
+                        cfs_spin_unlock(&lmv->lmv_lock);
                 } else {
                         int easize = sizeof(struct lmv_stripe_md) +
                                      lmv->desc.ld_tgt_count *
@@ -753,13 +753,14 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
                         RETURN(-EINVAL);
 
                 rc = obd_statfs(mdc_obd, &stat_buf,
-                                cfs_time_current_64() - HZ, 0);
+                                cfs_time_current_64() - CFS_HZ, 0);
                 if (rc)
                         RETURN(rc);
-                if (copy_to_user(data->ioc_pbuf1, &stat_buf, data->ioc_plen1))
+                if (cfs_copy_to_user(data->ioc_pbuf1, &stat_buf,
+                                     data->ioc_plen1))
                         RETURN(-EFAULT);
-                if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd),
-                                  data->ioc_plen2))
+                if (cfs_copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd),
+                                     data->ioc_plen2))
                         RETURN(-EFAULT);
                 break;
         }
@@ -976,7 +977,7 @@ int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid,
          * New seq alloc and FLD setup should be atomic. Otherwise we may find
          * on server that seq in new allocated fid is not yet known.
          */
-        down(&tgt->ltd_fid_sem);
+        cfs_down(&tgt->ltd_fid_sem);
 
         if (!tgt->ltd_active)
                 GOTO(out, rc = -ENODEV);
@@ -992,7 +993,7 @@ int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid,
 
         EXIT;
 out:
-        up(&tgt->ltd_fid_sem);
+        cfs_up(&tgt->ltd_fid_sem);
         return rc;
 }
 
@@ -1063,7 +1064,7 @@ static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
                 RETURN(-ENOMEM);
 
         for (i = 0; i < LMV_MAX_TGT_COUNT; i++) {
-                sema_init(&lmv->tgts[i].ltd_fid_sem, 1);
+                cfs_sema_init(&lmv->tgts[i].ltd_fid_sem, 1);
                 lmv->tgts[i].ltd_idx = i;
         }
 
@@ -1081,8 +1082,8 @@ static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
         lmv->max_easize = 0;
         lmv->lmv_placement = PLACEMENT_CHAR_POLICY;
 
-        spin_lock_init(&lmv->lmv_lock);
-        sema_init(&lmv->init_sem, 1);
+        cfs_spin_lock_init(&lmv->lmv_lock);
+        cfs_sema_init(&lmv->init_sem, 1);
 
         rc = lmv_object_setup(obd);
         if (rc) {
@@ -3043,7 +3044,7 @@ int __init lmv_init(void)
 
         lprocfs_lmv_init_vars(&lvars);
 
-        request_module("lquota");
+        cfs_request_module("lquota");
         quota_interface = PORTAL_SYMBOL_GET(lmv_quota_interface);
         init_obd_quota_ops(quota_interface, &lmv_obd_ops);
 
@@ -3066,9 +3067,9 @@ static void lmv_exit(void)
 
         class_unregister_type(LUSTRE_LMV_NAME);
 
-        LASSERTF(atomic_read(&lmv_object_count) == 0,
+        LASSERTF(cfs_atomic_read(&lmv_object_count) == 0,
                  "Can't free lmv objects cache, %d object(s) busy\n",
-                 atomic_read(&lmv_object_count));
+                 cfs_atomic_read(&lmv_object_count));
         cfs_mem_cache_destroy(lmv_object_cache);
 }
 
index b96f9f9..820e0d6 100644 (file)
 #include "lmv_internal.h"
 
 extern cfs_mem_cache_t *lmv_object_cache;
-extern atomic_t lmv_object_count;
+extern cfs_atomic_t lmv_object_count;
 
 static CFS_LIST_HEAD(obj_list);
-static spinlock_t obj_list_lock = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t obj_list_lock = CFS_SPIN_LOCK_UNLOCKED;
 
 struct lmv_object *lmv_object_alloc(struct obd_device *obd,
                                     const struct lu_fid *fid,
@@ -82,15 +82,15 @@ struct lmv_object *lmv_object_alloc(struct obd_device *obd,
         if (!obj)
                 return NULL;
 
-        atomic_inc(&lmv_object_count);
+        cfs_atomic_inc(&lmv_object_count);
 
         obj->lo_fid = *fid;
         obj->lo_obd = obd;
         obj->lo_state = 0;
         obj->lo_hashtype = mea->mea_magic;
 
-        init_MUTEX(&obj->lo_guard);
-        atomic_set(&obj->lo_count, 0);
+        cfs_init_mutex(&obj->lo_guard);
+        cfs_atomic_set(&obj->lo_count, 0);
         obj->lo_objcount = mea->mea_count;
 
         obj_size = sizeof(struct lmv_stripe) * 
@@ -133,54 +133,54 @@ void lmv_object_free(struct lmv_object *obj)
         struct lmv_obd          *lmv = &obj->lo_obd->u.lmv;
         unsigned int             obj_size;
 
-        LASSERT(!atomic_read(&obj->lo_count));
+        LASSERT(!cfs_atomic_read(&obj->lo_count));
 
         obj_size = sizeof(struct lmv_stripe) *
                 lmv->desc.ld_tgt_count;
 
         OBD_FREE(obj->lo_stripes, obj_size);
         OBD_SLAB_FREE(obj, lmv_object_cache, sizeof(*obj));
-        atomic_dec(&lmv_object_count);
+        cfs_atomic_dec(&lmv_object_count);
 }
 
 static void __lmv_object_add(struct lmv_object *obj)
 {
-        atomic_inc(&obj->lo_count);
-        list_add(&obj->lo_list, &obj_list);
+        cfs_atomic_inc(&obj->lo_count);
+        cfs_list_add(&obj->lo_list, &obj_list);
 }
 
 void lmv_object_add(struct lmv_object *obj)
 {
-        spin_lock(&obj_list_lock);
+        cfs_spin_lock(&obj_list_lock);
         __lmv_object_add(obj);
-        spin_unlock(&obj_list_lock);
+        cfs_spin_unlock(&obj_list_lock);
 }
 
 static void __lmv_object_del(struct lmv_object *obj)
 {
-        list_del(&obj->lo_list);
+        cfs_list_del(&obj->lo_list);
         lmv_object_free(obj);
 }
 
 void lmv_object_del(struct lmv_object *obj)
 {
-        spin_lock(&obj_list_lock);
+        cfs_spin_lock(&obj_list_lock);
         __lmv_object_del(obj);
-        spin_unlock(&obj_list_lock);
+        cfs_spin_unlock(&obj_list_lock);
 }
 
 static struct lmv_object *__lmv_object_get(struct lmv_object *obj)
 {
         LASSERT(obj != NULL);
-        atomic_inc(&obj->lo_count);
+        cfs_atomic_inc(&obj->lo_count);
         return obj;
 }
 
 struct lmv_object *lmv_object_get(struct lmv_object *obj)
 {
-        spin_lock(&obj_list_lock);
+        cfs_spin_lock(&obj_list_lock);
         __lmv_object_get(obj);
-        spin_unlock(&obj_list_lock);
+        cfs_spin_unlock(&obj_list_lock);
         return obj;
 }
 
@@ -188,7 +188,7 @@ static void __lmv_object_put(struct lmv_object *obj)
 {
         LASSERT(obj);
 
-        if (atomic_dec_and_test(&obj->lo_count)) {
+        if (cfs_atomic_dec_and_test(&obj->lo_count)) {
                 CDEBUG(D_INODE, "Last reference to "DFID" - "
                        "destroying\n", PFID(&obj->lo_fid));
                 __lmv_object_del(obj);
@@ -197,9 +197,9 @@ static void __lmv_object_put(struct lmv_object *obj)
 
 void lmv_object_put(struct lmv_object *obj)
 {
-        spin_lock(&obj_list_lock);
+        cfs_spin_lock(&obj_list_lock);
         __lmv_object_put(obj);
-        spin_unlock(&obj_list_lock);
+        cfs_spin_unlock(&obj_list_lock);
 }
 
 void lmv_object_put_unlock(struct lmv_object *obj)
@@ -211,14 +211,14 @@ void lmv_object_put_unlock(struct lmv_object *obj)
 static struct lmv_object *__lmv_object_find(struct obd_device *obd, const struct lu_fid *fid)
 {
         struct lmv_object       *obj;
-        struct list_head        *cur;
+        cfs_list_t              *cur;
 
-        list_for_each(cur, &obj_list) {
-                obj = list_entry(cur, struct lmv_object, lo_list);
+        cfs_list_for_each(cur, &obj_list) {
+                obj = cfs_list_entry(cur, struct lmv_object, lo_list);
 
-                /* 
+                /*
                  * Check if object is in destroying phase. If so - skip
-                 * it. 
+                 * it.
                  */
                 if (obj->lo_state & O_FREEING)
                         continue;
@@ -233,8 +233,8 @@ static struct lmv_object *__lmv_object_find(struct obd_device *obd, const struct
                 if (obj->lo_obd != obd)
                         continue;
 
-                /* 
-                 * Check if this is what we're looking for. 
+                /*
+                 * Check if this is what we're looking for.
                  */
                 if (lu_fid_eq(&obj->lo_fid, fid))
                         return __lmv_object_get(obj);
@@ -249,9 +249,9 @@ struct lmv_object *lmv_object_find(struct obd_device *obd,
         struct lmv_object       *obj;
         ENTRY;
 
-        spin_lock(&obj_list_lock);
+        cfs_spin_lock(&obj_list_lock);
         obj = __lmv_object_find(obd, fid);
-        spin_unlock(&obj_list_lock);
+        cfs_spin_unlock(&obj_list_lock);
 
         RETURN(obj);
 }
@@ -289,13 +289,13 @@ static struct lmv_object *__lmv_object_create(struct obd_device *obd,
          * Check if someone created it already while we were dealing with
          * allocating @obj. 
          */
-        spin_lock(&obj_list_lock);
+        cfs_spin_lock(&obj_list_lock);
         obj = __lmv_object_find(obd, fid);
         if (obj) {
                 /* 
                  * Someone created it already - put @obj and getting out. 
                  */
-                spin_unlock(&obj_list_lock);
+                cfs_spin_unlock(&obj_list_lock);
                 lmv_object_free(new);
                 RETURN(obj);
         }
@@ -303,7 +303,7 @@ static struct lmv_object *__lmv_object_create(struct obd_device *obd,
         __lmv_object_add(new);
         __lmv_object_get(new);
 
-        spin_unlock(&obj_list_lock);
+        cfs_spin_unlock(&obj_list_lock);
 
         CDEBUG(D_INODE, "New obj in lmv cache: "DFID"\n",
                PFID(fid));
@@ -391,7 +391,7 @@ int lmv_object_delete(struct obd_export *exp, const struct lu_fid *fid)
         int                      rc = 0;
         ENTRY;
 
-        spin_lock(&obj_list_lock);
+        cfs_spin_lock(&obj_list_lock);
         obj = __lmv_object_find(obd, fid);
         if (obj) {
                 obj->lo_state |= O_FREEING;
@@ -399,7 +399,7 @@ int lmv_object_delete(struct obd_export *exp, const struct lu_fid *fid)
                 __lmv_object_put(obj);
                 rc = 1;
         }
-        spin_unlock(&obj_list_lock);
+        cfs_spin_unlock(&obj_list_lock);
         RETURN(rc);
 }
 
@@ -416,28 +416,29 @@ int lmv_object_setup(struct obd_device *obd)
 
 void lmv_object_cleanup(struct obd_device *obd)
 {
-        struct list_head        *cur;
-        struct list_head        *tmp;
+        cfs_list_t              *cur;
+        cfs_list_t              *tmp;
         struct lmv_object       *obj;
         ENTRY;
 
         CDEBUG(D_INFO, "LMV object manager cleanup (%s)\n",
                obd->obd_uuid.uuid);
 
-        spin_lock(&obj_list_lock);
-        list_for_each_safe(cur, tmp, &obj_list) {
-                obj = list_entry(cur, struct lmv_object, lo_list);
+        cfs_spin_lock(&obj_list_lock);
+        cfs_list_for_each_safe(cur, tmp, &obj_list) {
+                obj = cfs_list_entry(cur, struct lmv_object, lo_list);
 
                 if (obj->lo_obd != obd)
                         continue;
 
                 obj->lo_state |= O_FREEING;
-                if (atomic_read(&obj->lo_count) > 1) {
+                if (cfs_atomic_read(&obj->lo_count) > 1) {
                         CERROR("Object "DFID" has count (%d)\n", 
-                               PFID(&obj->lo_fid), atomic_read(&obj->lo_count));
+                               PFID(&obj->lo_fid),
+                               cfs_atomic_read(&obj->lo_count));
                 }
                 __lmv_object_put(obj);
         }
-        spin_unlock(&obj_list_lock);
+        cfs_spin_unlock(&obj_list_lock);
         EXIT;
 }
index 555f4dc..55a27a2 100644 (file)
@@ -106,7 +106,7 @@ static int lmv_wr_placement(struct file *file, const char *buffer,
         placement_policy_t       policy;
         struct lmv_obd          *lmv;
 
-        if (copy_from_user(dummy, buffer, MAX_POLICY_STRING_SIZE))
+        if (cfs_copy_from_user(dummy, buffer, MAX_POLICY_STRING_SIZE))
                 return -EFAULT;
 
         LASSERT(dev != NULL);
@@ -121,9 +121,9 @@ static int lmv_wr_placement(struct file *file, const char *buffer,
 
         policy = placement_name2policy(dummy, len);
         if (policy != PLACEMENT_INVAL_POLICY) {
-                spin_lock(&lmv->lmv_lock);
+                cfs_spin_lock(&lmv->lmv_lock);
                 lmv->lmv_placement = policy;
-                spin_unlock(&lmv->lmv_lock);
+                cfs_spin_unlock(&lmv->lmv_lock);
         } else {
                 CERROR("Invalid placement policy \"%s\"!\n", dummy);
                 return -EINVAL;
index 2a06147..21b0ff4 100644 (file)
@@ -158,7 +158,7 @@ struct lov_device {
          * Serializes access to lov_device::ld_emrg in low-memory
          * conditions.
          */
-        struct mutex              ld_mutex;
+        cfs_mutex_t               ld_mutex;
 };
 
 /**
@@ -196,7 +196,7 @@ struct lov_object {
          *
          * \see lov_object::lo_type
          */
-        struct rw_semaphore    lo_type_guard;
+        cfs_rw_semaphore_t     lo_type_guard;
         /**
          * Type of an object. Protected by lov_object::lo_type_guard.
          */
@@ -366,7 +366,7 @@ struct lov_lock_link {
          * A linkage into per sub-lock list of all corresponding top-locks,
          * hanging off lovsub_lock::lss_parents.
          */
-        struct list_head lll_list;
+        cfs_list_t       lll_list;
 };
 
 /**
@@ -378,7 +378,7 @@ struct lovsub_lock {
          * List of top-locks that have given sub-lock as their part. Protected
          * by cl_lock::cll_guard mutex.
          */
-        struct list_head      lss_parents;
+        cfs_list_t            lss_parents;
         /**
          * Top-lock that initiated current operation on this sub-lock. This is
          * only set during top-to-bottom lock operations like enqueue, and is
@@ -430,7 +430,7 @@ struct lov_io_sub {
          * Linkage into a list (hanging off lov_io::lis_active) of all
          * sub-io's active for the current IO iteration.
          */
-        struct list_head     sub_linkage;
+        cfs_list_t           sub_linkage;
         /**
          * true, iff cl_io_init() was successfully executed against
          * lov_io_sub::sub_io.
@@ -506,7 +506,7 @@ struct lov_io {
         /**
          * List of active sub-io's.
          */
-        struct list_head   lis_active;
+        cfs_list_t         lis_active;
 };
 
 struct lov_session {
index 01cdd3c..86a6fbb 100644 (file)
@@ -60,7 +60,7 @@ cfs_mem_cache_t *lovsub_req_kmem;
 cfs_mem_cache_t *lov_lock_link_kmem;
 
 /** Lock class of lov_device::ld_mutex. */
-struct lock_class_key cl_lov_device_mutex_class;
+cfs_lock_class_key_t cl_lov_device_mutex_class;
 
 struct lu_kmem_descr lov_caches[] = {
         {
@@ -167,7 +167,7 @@ static void lov_key_fini(const struct lu_context *ctx,
                          struct lu_context_key *key, void *data)
 {
         struct lov_thread_info *info = data;
-        LINVRNT(list_empty(&info->lti_closure.clc_list));
+        LINVRNT(cfs_list_empty(&info->lti_closure.clc_list));
         OBD_SLAB_FREE_PTR(info, lov_thread_kmem);
 }
 
@@ -387,7 +387,7 @@ static int lov_expand_targets(const struct lu_env *env, struct lov_device *dev)
 
                 OBD_ALLOC(newd, tgt_size * sz);
                 if (newd != NULL) {
-                        mutex_lock(&dev->ld_mutex);
+                        cfs_mutex_lock(&dev->ld_mutex);
                         if (sub_size > 0) {
                                 memcpy(newd, dev->ld_target, sub_size * sz);
                                 OBD_FREE(dev->ld_target, sub_size * sz);
@@ -398,7 +398,7 @@ static int lov_expand_targets(const struct lu_env *env, struct lov_device *dev)
                         if (dev->ld_emrg != NULL)
                                 lov_emerg_free(dev->ld_emrg, sub_size);
                         dev->ld_emrg = emerg;
-                        mutex_unlock(&dev->ld_mutex);
+                        cfs_mutex_unlock(&dev->ld_mutex);
                 } else {
                         lov_emerg_free(emerg, tgt_size);
                         result = -ENOMEM;
@@ -504,8 +504,8 @@ static struct lu_device *lov_device_alloc(const struct lu_env *env,
         d->ld_ops        = &lov_lu_ops;
         ld->ld_cl.cd_ops = &lov_cl_ops;
 
-        mutex_init(&ld->ld_mutex);
-        lockdep_set_class(&ld->ld_mutex, &cl_lov_device_mutex_class);
+        cfs_mutex_init(&ld->ld_mutex);
+        cfs_lockdep_set_class(&ld->ld_mutex, &cl_lov_device_mutex_class);
 
         /* setup the LOV OBD */
         obd = class_name2obd(lustre_cfg_string(cfg, 0));
index e35162d..e7e5255 100644 (file)
@@ -42,7 +42,7 @@
 
 struct lov_lock_handles {
         struct portals_handle   llh_handle;
-        atomic_t                llh_refcount;
+        cfs_atomic_t            llh_refcount;
         int                     llh_stripe_count;
         struct lustre_handle    llh_handles[0];
 };
@@ -51,7 +51,7 @@ struct lov_request {
         struct obd_info          rq_oi;
         struct lov_request_set  *rq_rqset;
 
-        struct list_head         rq_link;
+        cfs_list_t               rq_link;
 
         int                      rq_idx;        /* index in lov->tgts array */
         int                      rq_stripe;     /* stripe number */
@@ -66,7 +66,7 @@ struct lov_request {
 struct lov_request_set {
         struct ldlm_enqueue_info*set_ei;
         struct obd_info         *set_oi;
-        atomic_t                 set_refcount;
+        cfs_atomic_t             set_refcount;
         struct obd_export       *set_exp;
         /* XXX: There is @set_exp already, however obd_statfs gets obd_device
            only. */
@@ -80,9 +80,9 @@ struct lov_request_set {
         obd_count                set_oabufs;
         struct brw_page         *set_pga;
         struct lov_lock_handles *set_lockh;
-        struct list_head         set_list;
+        cfs_list_t               set_list;
         cfs_waitq_t              set_waitq;
-        spinlock_t               set_lock;
+        cfs_spinlock_t           set_lock;
 };
 
 extern cfs_mem_cache_t *lov_oinfo_slab;
@@ -90,9 +90,9 @@ extern cfs_mem_cache_t *lov_oinfo_slab;
 static inline void lov_llh_addref(void *llhp)
 {
         struct lov_lock_handles *llh = llhp;
-        atomic_inc(&llh->llh_refcount);
+        cfs_atomic_inc(&llh->llh_refcount);
         CDEBUG(D_INFO, "GETting llh %p : new refcount %d\n", llh,
-               atomic_read(&llh->llh_refcount));
+               cfs_atomic_read(&llh->llh_refcount));
 }
 
 static inline struct lov_lock_handles *lov_llh_new(struct lov_stripe_md *lsm)
@@ -103,7 +103,7 @@ static inline struct lov_lock_handles *lov_llh_new(struct lov_stripe_md *lsm)
                   sizeof(*llh->llh_handles) * lsm->lsm_stripe_count);
         if (llh == NULL)
                 return NULL;
-        atomic_set(&llh->llh_refcount, 2);
+        cfs_atomic_set(&llh->llh_refcount, 2);
         llh->llh_stripe_count = lsm->lsm_stripe_count;
         CFS_INIT_LIST_HEAD(&llh->llh_handle.h_link);
         class_handle_hash(&llh->llh_handle, lov_llh_addref);
@@ -115,13 +115,13 @@ void lov_finish_set(struct lov_request_set *set);
 static inline void lov_get_reqset(struct lov_request_set *set)
 {
         LASSERT(set != NULL);
-        LASSERT(atomic_read(&set->set_refcount) > 0);
-        atomic_inc(&set->set_refcount);
+        LASSERT(cfs_atomic_read(&set->set_refcount) > 0);
+        cfs_atomic_inc(&set->set_refcount);
 }
 
 static inline void lov_put_reqset(struct lov_request_set *set)
 {
-        if (atomic_dec_and_test(&set->set_refcount))
+        if (cfs_atomic_dec_and_test(&set->set_refcount))
                 lov_finish_set(set);
 }
 
@@ -135,14 +135,14 @@ lov_handle2llh(struct lustre_handle *handle)
 static inline void lov_llh_put(struct lov_lock_handles *llh)
 {
         CDEBUG(D_INFO, "PUTting llh %p : new refcount %d\n", llh,
-               atomic_read(&llh->llh_refcount) - 1);
-        LASSERT(atomic_read(&llh->llh_refcount) > 0 &&
-                atomic_read(&llh->llh_refcount) < 0x5a5a);
-        if (atomic_dec_and_test(&llh->llh_refcount)) {
+               cfs_atomic_read(&llh->llh_refcount) - 1);
+        LASSERT(cfs_atomic_read(&llh->llh_refcount) > 0 &&
+                cfs_atomic_read(&llh->llh_refcount) < 0x5a5a);
+        if (cfs_atomic_dec_and_test(&llh->llh_refcount)) {
                 class_handle_unhash(&llh->llh_handle);
                 /* The structure may be held by other threads because RCU.
                  *   -jxiong */
-                if (atomic_read(&llh->llh_refcount))
+                if (cfs_atomic_read(&llh->llh_refcount))
                         return;
 
                 OBD_FREE_RCU(llh, sizeof *llh +
index 80bfb20..8b71f49 100644 (file)
@@ -153,7 +153,7 @@ static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio,
         sub->sub_borrowed = 0;
 
         if (lio->lis_mem_frozen) {
-                LASSERT(mutex_is_locked(&ld->ld_mutex));
+                LASSERT(cfs_mutex_is_locked(&ld->ld_mutex));
                 sub->sub_io  = &ld->ld_emrg[stripe]->emrg_subio;
                 sub->sub_env = ld->ld_emrg[stripe]->emrg_env;
                 sub->sub_borrowed = 1;
@@ -402,7 +402,7 @@ static int lov_io_iter_init(const struct lu_env *env,
                         rc = PTR_ERR(sub);
 
                 if (!rc)
-                        list_add_tail(&sub->sub_linkage, &lio->lis_active);
+                        cfs_list_add_tail(&sub->sub_linkage, &lio->lis_active);
                 else
                         break;
         }
@@ -453,7 +453,7 @@ static int lov_io_call(const struct lu_env *env, struct lov_io *lio,
         int rc = 0;
 
         ENTRY;
-        list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
+        cfs_list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
                 lov_sub_enter(sub);
                 rc = iofunc(sub->sub_env, sub->sub_io);
                 lov_sub_exit(sub);
@@ -519,8 +519,8 @@ static void lov_io_iter_fini(const struct lu_env *env,
         ENTRY;
         rc = lov_io_call(env, lio, lov_io_iter_fini_wrapper);
         LASSERT(rc == 0);
-        while (!list_empty(&lio->lis_active))
-                list_del_init(lio->lis_active.next);
+        while (!cfs_list_empty(&lio->lis_active))
+                cfs_list_del_init(lio->lis_active.next);
         EXIT;
 }
 
@@ -612,7 +612,7 @@ static int lov_io_submit(const struct lu_env *env,
                  * In order to not make things worse, even don't try to
                  * allocate the memory with __GFP_NOWARN. -jay
                  */
-                mutex_lock(&ld->ld_mutex);
+                cfs_mutex_lock(&ld->ld_mutex);
                 lio->lis_mem_frozen = 1;
         }
 
@@ -626,7 +626,7 @@ static int lov_io_submit(const struct lu_env *env,
                 struct lov_io_sub   *sub;
                 struct cl_page_list *sub_qin = QIN(stripe);
 
-                if (list_empty(&sub_qin->pl_pages))
+                if (cfs_list_empty(&sub_qin->pl_pages))
                         continue;
 
                 cl_page_list_splice(sub_qin, &cl2q->c2_qin);
@@ -646,7 +646,7 @@ static int lov_io_submit(const struct lu_env *env,
         for (stripe = 0; stripe < lio->lis_nr_subios; stripe++) {
                 struct cl_page_list *sub_qin = QIN(stripe);
 
-                if (list_empty(&sub_qin->pl_pages))
+                if (cfs_list_empty(&sub_qin->pl_pages))
                         continue;
 
                 cl_page_list_splice(sub_qin, qin);
@@ -665,7 +665,7 @@ static int lov_io_submit(const struct lu_env *env,
                                 lov_io_sub_fini(env, lio, &lio->lis_subs[i]);
                 }
                 lio->lis_mem_frozen = 0;
-                mutex_unlock(&ld->ld_mutex);
+                cfs_mutex_unlock(&ld->ld_mutex);
         }
 
         RETURN(rc);
index 25e3d1b..563b625 100644 (file)
@@ -122,7 +122,7 @@ static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
         lck->lls_sub[idx].sub_lock = lsl;
         lck->lls_nr_filled++;
         LASSERT(lck->lls_nr_filled <= lck->lls_nr);
-        list_add_tail(&link->lll_list, &lsl->lss_parents);
+        cfs_list_add_tail(&link->lll_list, &lsl->lss_parents);
         link->lll_idx = idx;
         link->lll_super = lck;
         cl_lock_get(parent);
@@ -205,7 +205,7 @@ static int lov_sublock_lock(const struct lu_env *env,
         int                 result = 0;
         ENTRY;
 
-        LASSERT(list_empty(&closure->clc_list));
+        LASSERT(cfs_list_empty(&closure->clc_list));
 
         sublock = lls->sub_lock;
         child = sublock->lss_cl.cls_lock;
@@ -1067,7 +1067,7 @@ void lov_lock_unlink(const struct lu_env *env,
         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
         ENTRY;
 
-        list_del_init(&link->lll_list);
+        cfs_list_del_init(&link->lll_list);
         LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
         /* yank this sub-lock from parent's array */
         lck->lls_sub[link->lll_idx].sub_lock = NULL;
@@ -1088,7 +1088,7 @@ struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
         ENTRY;
 
-        list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+        cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) {
                 if (scan->lll_super == lck)
                         RETURN(scan);
         }
@@ -1211,7 +1211,7 @@ static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
         struct cl_lock_closure *closure;
 
         closure = &lov_env_info(env)->lti_closure;
-        LASSERT(list_empty(&closure->clc_list));
+        LASSERT(cfs_list_empty(&closure->clc_list));
         cl_lock_closure_init(env, closure, parent, 1);
         return closure;
 }
index f8bdec2..9f84ece 100644 (file)
@@ -75,9 +75,9 @@ static void lov_getref(struct obd_device *obd)
         struct lov_obd *lov = &obd->u.lov;
 
         /* nobody gets through here until lov_putref is done */
-        mutex_down(&lov->lov_lock);
-        atomic_inc(&lov->lov_refcount);
-        mutex_up(&lov->lov_lock);
+        cfs_mutex_down(&lov->lov_lock);
+        cfs_atomic_inc(&lov->lov_refcount);
+        cfs_mutex_up(&lov->lov_lock);
         return;
 }
 
@@ -87,9 +87,9 @@ static void lov_putref(struct obd_device *obd)
 {
         struct lov_obd *lov = &obd->u.lov;
 
-        mutex_down(&lov->lov_lock);
+        cfs_mutex_down(&lov->lov_lock);
         /* ok to dec to 0 more than once -- ltd_exp's will be null */
-        if (atomic_dec_and_test(&lov->lov_refcount) && lov->lov_death_row) {
+        if (cfs_atomic_dec_and_test(&lov->lov_refcount) && lov->lov_death_row) {
                 CFS_LIST_HEAD(kill);
                 int i;
                 struct lov_tgt_desc *tgt, *n;
@@ -100,7 +100,7 @@ static void lov_putref(struct obd_device *obd)
 
                         if (!tgt || !tgt->ltd_reap)
                                 continue;
-                        list_add(&tgt->ltd_kill, &kill);
+                        cfs_list_add(&tgt->ltd_kill, &kill);
                         /* XXX - right now there is a dependency on ld_tgt_count
                          * being the maximum tgt index for computing the
                          * mds_max_easize. So we can't shrink it. */
@@ -108,15 +108,15 @@ static void lov_putref(struct obd_device *obd)
                         lov->lov_tgts[i] = NULL;
                         lov->lov_death_row--;
                 }
-                mutex_up(&lov->lov_lock);
+                cfs_mutex_up(&lov->lov_lock);
 
-                list_for_each_entry_safe(tgt, n, &kill, ltd_kill) {
-                        list_del(&tgt->ltd_kill);
+                cfs_list_for_each_entry_safe(tgt, n, &kill, ltd_kill) {
+                        cfs_list_del(&tgt->ltd_kill);
                         /* Disconnect */
                         __lov_del_obd(obd, tgt);
                 }
         } else {
-                mutex_up(&lov->lov_lock);
+                cfs_mutex_up(&lov->lov_lock);
         }
 }
 
@@ -531,13 +531,13 @@ int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
         if (tgt_obd == NULL)
                 RETURN(-EINVAL);
 
-        mutex_down(&lov->lov_lock);
+        cfs_mutex_down(&lov->lov_lock);
 
         if ((index < lov->lov_tgt_size) && (lov->lov_tgts[index] != NULL)) {
                 tgt = lov->lov_tgts[index];
                 CERROR("UUID %s already assigned at LOV target index %d\n",
                        obd_uuid2str(&tgt->ltd_uuid), index);
-                mutex_up(&lov->lov_lock);
+                cfs_mutex_up(&lov->lov_lock);
                 RETURN(-EEXIST);
         }
 
@@ -551,7 +551,7 @@ int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
                         newsize = newsize << 1;
                 OBD_ALLOC(newtgts, sizeof(*newtgts) * newsize);
                 if (newtgts == NULL) {
-                        mutex_up(&lov->lov_lock);
+                        cfs_mutex_up(&lov->lov_lock);
                         RETURN(-ENOMEM);
                 }
 
@@ -576,13 +576,13 @@ int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
 
         OBD_ALLOC_PTR(tgt);
         if (!tgt) {
-                mutex_up(&lov->lov_lock);
+                cfs_mutex_up(&lov->lov_lock);
                 RETURN(-ENOMEM);
         }
 
         rc = lov_ost_pool_add(&lov->lov_packed, index, lov->lov_tgt_size);
         if (rc) {
-                mutex_up(&lov->lov_lock);
+                cfs_mutex_up(&lov->lov_lock);
                 OBD_FREE_PTR(tgt);
                 RETURN(rc);
         }
@@ -598,7 +598,7 @@ int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
         if (index >= lov->desc.ld_tgt_count)
                 lov->desc.ld_tgt_count = index + 1;
 
-        mutex_up(&lov->lov_lock);
+        cfs_mutex_up(&lov->lov_lock);
 
         CDEBUG(D_CONFIG, "idx=%d ltd_gen=%d ld_tgt_count=%d\n",
                 index, tgt->ltd_gen, lov->desc.ld_tgt_count);
@@ -785,10 +785,10 @@ int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
         lov->desc = *desc;
         lov->lov_tgt_size = 0;
 
-        sema_init(&lov->lov_lock, 1);
-        atomic_set(&lov->lov_refcount, 0);
+        cfs_sema_init(&lov->lov_lock, 1);
+        cfs_atomic_set(&lov->lov_refcount, 0);
         CFS_INIT_LIST_HEAD(&lov->lov_qos.lq_oss_list);
-        init_rwsem(&lov->lov_qos.lq_rw_sem);
+        cfs_init_rwsem(&lov->lov_qos.lq_rw_sem);
         lov->lov_sp_me = LUSTRE_SP_CLI;
         lov->lov_qos.lq_dirty = 1;
         lov->lov_qos.lq_rr.lqr_dirty = 1;
@@ -866,11 +866,11 @@ static int lov_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
 static int lov_cleanup(struct obd_device *obd)
 {
         struct lov_obd *lov = &obd->u.lov;
-        struct list_head *pos, *tmp;
+        cfs_list_t *pos, *tmp;
         struct pool_desc *pool;
 
-        list_for_each_safe(pos, tmp, &lov->lov_pool_list) {
-                pool = list_entry(pos, struct pool_desc, pool_list);
+        cfs_list_for_each_safe(pos, tmp, &lov->lov_pool_list) {
+                pool = cfs_list_entry(pos, struct pool_desc, pool_list);
                 /* free pool structs */
                 CDEBUG(D_INFO, "delete pool %p\n", pool);
                 lov_pool_del(obd, pool->pool_name);
@@ -888,14 +888,14 @@ static int lov_cleanup(struct obd_device *obd)
 
                         /* Inactive targets may never have connected */
                         if (lov->lov_tgts[i]->ltd_active ||
-                            atomic_read(&lov->lov_refcount))
+                            cfs_atomic_read(&lov->lov_refcount))
                             /* We should never get here - these
                                should have been removed in the
                              disconnect. */
                                 CERROR("lov tgt %d not cleaned!"
                                        " deathrow=%d, lovrc=%d\n",
                                        i, lov->lov_death_row,
-                                       atomic_read(&lov->lov_refcount));
+                                       cfs_atomic_read(&lov->lov_refcount));
                         lov_del_target(obd, i, 0, 0);
                 }
                 obd_putref(obd);
@@ -977,7 +977,7 @@ out:
 }
 
 #ifndef log2
-#define log2(n) ffz(~(n))
+#define log2(n) cfs_ffz(~(n))
 #endif
 
 static int lov_clear_orphans(struct obd_export *export, struct obdo *src_oa,
@@ -1130,13 +1130,14 @@ static int lov_create(struct obd_export *exp, struct obdo *src_oa,
          * later in alloc_qos(), we will wait for those rpcs to complete if
          * the osfs age is older than 2 * qos_maxage */
         qos_statfs_update(exp->exp_obd,
-                          cfs_time_shift_64(-lov->desc.ld_qos_maxage) + HZ, 0);
+                          cfs_time_shift_64(-lov->desc.ld_qos_maxage) + CFS_HZ,
+                          0);
 
         rc = lov_prep_create_set(exp, &oinfo, ea, src_oa, oti, &set);
         if (rc)
                 GOTO(out, rc);
 
-        list_for_each_entry(req, &set->set_list, rq_link) {
+        cfs_list_for_each_entry(req, &set->set_list, rq_link) {
                 /* XXX: LOV STACKING: use real "obj_mdp" sub-data */
                 rc = obd_create_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
                                       &req->rq_oi, &req->rq_oi.oi_md, oti);
@@ -1171,7 +1172,7 @@ static int lov_destroy(struct obd_export *exp, struct obdo *oa,
         struct lov_request_set *set;
         struct obd_info oinfo;
         struct lov_request *req;
-        struct list_head *pos;
+        cfs_list_t *pos;
         struct lov_obd *lov;
         int rc = 0, err = 0;
         ENTRY;
@@ -1192,8 +1193,8 @@ static int lov_destroy(struct obd_export *exp, struct obdo *oa,
         if (rc)
                 GOTO(out, rc);
 
-        list_for_each (pos, &set->set_list) {
-                req = list_entry(pos, struct lov_request, rq_link);
+        cfs_list_for_each (pos, &set->set_list) {
+                req = cfs_list_entry(pos, struct lov_request, rq_link);
 
                 if (oa->o_valid & OBD_MD_FLCOOKIE)
                         oti->oti_logcookies = set->set_cookies + req->rq_stripe;
@@ -1225,7 +1226,7 @@ static int lov_getattr(struct obd_export *exp, struct obd_info *oinfo)
 {
         struct lov_request_set *set;
         struct lov_request *req;
-        struct list_head *pos;
+        cfs_list_t *pos;
         struct lov_obd *lov;
         int err = 0, rc = 0;
         ENTRY;
@@ -1242,8 +1243,8 @@ static int lov_getattr(struct obd_export *exp, struct obd_info *oinfo)
         if (rc)
                 RETURN(rc);
 
-        list_for_each (pos, &set->set_list) {
-                req = list_entry(pos, struct lov_request, rq_link);
+        cfs_list_for_each (pos, &set->set_list) {
+                req = cfs_list_entry(pos, struct lov_request, rq_link);
 
                 CDEBUG(D_INFO, "objid "LPX64"[%d] has subobj "LPX64" at idx "
                        "%u\n", oinfo->oi_oa->o_id, req->rq_stripe,
@@ -1286,7 +1287,7 @@ static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
 {
         struct lov_request_set *lovset;
         struct lov_obd *lov;
-        struct list_head *pos;
+        cfs_list_t *pos;
         struct lov_request *req;
         int rc = 0, err;
         ENTRY;
@@ -1307,8 +1308,8 @@ static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
                oinfo->oi_md->lsm_object_id, oinfo->oi_md->lsm_stripe_count,
                oinfo->oi_md->lsm_stripe_size);
 
-        list_for_each (pos, &lovset->set_list) {
-                req = list_entry(pos, struct lov_request, rq_link);
+        cfs_list_for_each (pos, &lovset->set_list) {
+                req = cfs_list_entry(pos, struct lov_request, rq_link);
 
                 CDEBUG(D_INFO, "objid "LPX64"[%d] has subobj "LPX64" at idx "
                        "%u\n", oinfo->oi_oa->o_id, req->rq_stripe,
@@ -1324,7 +1325,7 @@ static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
                 }
         }
 
-        if (!list_empty(&rqset->set_requests)) {
+        if (!cfs_list_empty(&rqset->set_requests)) {
                 LASSERT(rc == 0);
                 LASSERT (rqset->set_interpret == NULL);
                 rqset->set_interpret = lov_getattr_interpret;
@@ -1343,7 +1344,7 @@ static int lov_setattr(struct obd_export *exp, struct obd_info *oinfo,
 {
         struct lov_request_set *set;
         struct lov_obd *lov;
-        struct list_head *pos;
+        cfs_list_t *pos;
         struct lov_request *req;
         int err = 0, rc = 0;
         ENTRY;
@@ -1367,8 +1368,8 @@ static int lov_setattr(struct obd_export *exp, struct obd_info *oinfo,
         if (rc)
                 RETURN(rc);
 
-        list_for_each (pos, &set->set_list) {
-                req = list_entry(pos, struct lov_request, rq_link);
+        cfs_list_for_each (pos, &set->set_list) {
+                req = cfs_list_entry(pos, struct lov_request, rq_link);
 
                 rc = obd_setattr(lov->lov_tgts[req->rq_idx]->ltd_exp,
                                  &req->rq_oi, NULL);
@@ -1409,7 +1410,7 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
 {
         struct lov_request_set *set;
         struct lov_request *req;
-        struct list_head *pos;
+        cfs_list_t *pos;
         struct lov_obd *lov;
         int rc = 0;
         ENTRY;
@@ -1433,8 +1434,8 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
                oinfo->oi_md->lsm_object_id, oinfo->oi_md->lsm_stripe_count,
                oinfo->oi_md->lsm_stripe_size);
 
-        list_for_each (pos, &set->set_list) {
-                req = list_entry(pos, struct lov_request, rq_link);
+        cfs_list_for_each (pos, &set->set_list) {
+                req = cfs_list_entry(pos, struct lov_request, rq_link);
 
                 if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
                         oti->oti_logcookies = set->set_cookies + req->rq_stripe;
@@ -1456,7 +1457,7 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
         }
 
         /* If we are not waiting for responses on async requests, return. */
-        if (rc || !rqset || list_empty(&rqset->set_requests)) {
+        if (rc || !rqset || cfs_list_empty(&rqset->set_requests)) {
                 int err;
                 if (rc)
                         set->set_completes = 0;
@@ -1493,7 +1494,7 @@ static int lov_punch(struct obd_export *exp, struct obd_info *oinfo,
 {
         struct lov_request_set *set;
         struct lov_obd *lov;
-        struct list_head *pos;
+        cfs_list_t *pos;
         struct lov_request *req;
         int rc = 0;
         ENTRY;
@@ -1509,8 +1510,8 @@ static int lov_punch(struct obd_export *exp, struct obd_info *oinfo,
         if (rc)
                 RETURN(rc);
 
-        list_for_each (pos, &set->set_list) {
-                req = list_entry(pos, struct lov_request, rq_link);
+        cfs_list_for_each (pos, &set->set_list) {
+                req = cfs_list_entry(pos, struct lov_request, rq_link);
 
                 rc = obd_punch(lov->lov_tgts[req->rq_idx]->ltd_exp,
                                &req->rq_oi, NULL, rqset);
@@ -1523,7 +1524,7 @@ static int lov_punch(struct obd_export *exp, struct obd_info *oinfo,
                 }
         }
 
-        if (rc || list_empty(&rqset->set_requests)) {
+        if (rc || cfs_list_empty(&rqset->set_requests)) {
                 int err;
                 err = lov_fini_punch_set(set);
                 RETURN(rc ? rc : err);
@@ -1543,7 +1544,7 @@ static int lov_sync(struct obd_export *exp, struct obdo *oa,
         struct lov_request_set *set;
         struct obd_info oinfo;
         struct lov_obd *lov;
-        struct list_head *pos;
+        cfs_list_t *pos;
         struct lov_request *req;
         int err = 0, rc = 0;
         ENTRY;
@@ -1558,8 +1559,8 @@ static int lov_sync(struct obd_export *exp, struct obdo *oa,
         if (rc)
                 RETURN(rc);
 
-        list_for_each (pos, &set->set_list) {
-                req = list_entry(pos, struct lov_request, rq_link);
+        cfs_list_for_each (pos, &set->set_list) {
+                req = cfs_list_entry(pos, struct lov_request, rq_link);
 
                 rc = obd_sync(lov->lov_tgts[req->rq_idx]->ltd_exp,
                               req->rq_oi.oi_oa, NULL,
@@ -1620,7 +1621,7 @@ static int lov_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo,
 {
         struct lov_request_set *set;
         struct lov_request *req;
-        struct list_head *pos;
+        cfs_list_t *pos;
         struct lov_obd *lov = &exp->exp_obd->u.lov;
         int err, rc = 0;
         ENTRY;
@@ -1636,10 +1637,10 @@ static int lov_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo,
         if (rc)
                 RETURN(rc);
 
-        list_for_each (pos, &set->set_list) {
+        cfs_list_for_each (pos, &set->set_list) {
                 struct obd_export *sub_exp;
                 struct brw_page *sub_pga;
-                req = list_entry(pos, struct lov_request, rq_link);
+                req = cfs_list_entry(pos, struct lov_request, rq_link);
 
                 sub_exp = lov->lov_tgts[req->rq_idx]->ltd_exp;
                 sub_pga = set->set_pga + req->rq_pgaidx;
@@ -1672,7 +1673,7 @@ static int lov_enqueue(struct obd_export *exp, struct obd_info *oinfo,
         ldlm_mode_t mode = einfo->ei_mode;
         struct lov_request_set *set;
         struct lov_request *req;
-        struct list_head *pos;
+        cfs_list_t *pos;
         struct lov_obd *lov;
         ldlm_error_t rc;
         ENTRY;
@@ -1692,8 +1693,8 @@ static int lov_enqueue(struct obd_export *exp, struct obd_info *oinfo,
         if (rc)
                 RETURN(rc);
 
-        list_for_each (pos, &set->set_list) {
-                req = list_entry(pos, struct lov_request, rq_link);
+        cfs_list_for_each (pos, &set->set_list) {
+                req = cfs_list_entry(pos, struct lov_request, rq_link);
 
                 rc = obd_enqueue(lov->lov_tgts[req->rq_idx]->ltd_exp,
                                  &req->rq_oi, einfo, rqset);
@@ -1701,7 +1702,7 @@ static int lov_enqueue(struct obd_export *exp, struct obd_info *oinfo,
                         GOTO(out, rc);
         }
 
-        if (rqset && !list_empty(&rqset->set_requests)) {
+        if (rqset && !cfs_list_empty(&rqset->set_requests)) {
                 LASSERT(rc == 0);
                 LASSERT(rqset->set_interpret == NULL);
                 rqset->set_interpret = lov_enqueue_interpret;
@@ -1753,7 +1754,7 @@ static int lov_cancel(struct obd_export *exp, struct lov_stripe_md *lsm,
         struct lov_request_set *set;
         struct obd_info oinfo;
         struct lov_request *req;
-        struct list_head *pos;
+        cfs_list_t *pos;
         struct lov_obd *lov;
         struct lustre_handle *lov_lockhp;
         int err = 0, rc = 0;
@@ -1771,8 +1772,8 @@ static int lov_cancel(struct obd_export *exp, struct lov_stripe_md *lsm,
         if (rc)
                 RETURN(rc);
 
-        list_for_each (pos, &set->set_list) {
-                req = list_entry(pos, struct lov_request, rq_link);
+        cfs_list_for_each (pos, &set->set_list) {
+                req = cfs_list_entry(pos, struct lov_request, rq_link);
                 lov_lockhp = set->set_lockh->llh_handles + req->rq_stripe;
 
                 rc = obd_cancel(lov->lov_tgts[req->rq_idx]->ltd_exp,
@@ -1868,7 +1869,7 @@ static int lov_statfs_async(struct obd_device *obd, struct obd_info *oinfo,
 {
         struct lov_request_set *set;
         struct lov_request *req;
-        struct list_head *pos;
+        cfs_list_t *pos;
         struct lov_obd *lov;
         int rc = 0;
         ENTRY;
@@ -1881,10 +1882,10 @@ static int lov_statfs_async(struct obd_device *obd, struct obd_info *oinfo,
         if (rc)
                 RETURN(rc);
 
-        list_for_each (pos, &set->set_list) {
+        cfs_list_for_each (pos, &set->set_list) {
                 struct obd_device *osc_obd;
 
-                req = list_entry(pos, struct lov_request, rq_link);
+                req = cfs_list_entry(pos, struct lov_request, rq_link);
 
                 osc_obd = class_exp2obd(lov->lov_tgts[req->rq_idx]->ltd_exp);
                 rc = obd_statfs_async(osc_obd, &req->rq_oi, max_age, rqset);
@@ -1892,7 +1893,7 @@ static int lov_statfs_async(struct obd_device *obd, struct obd_info *oinfo,
                         break;
         }
 
-        if (rc || list_empty(&rqset->set_requests)) {
+        if (rc || cfs_list_empty(&rqset->set_requests)) {
                 int err;
                 if (rc)
                         set->set_completes = 0;
@@ -1965,14 +1966,15 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
 
                 /* got statfs data */
                 rc = obd_statfs(osc_obd, &stat_buf,
-                                cfs_time_current_64() - HZ, 0);
+                                cfs_time_current_64() - CFS_HZ, 0);
                 if (rc)
                         RETURN(rc);
-                if (copy_to_user(data->ioc_pbuf1, &stat_buf, data->ioc_plen1))
+                if (cfs_copy_to_user(data->ioc_pbuf1, &stat_buf,
+                                     data->ioc_plen1))
                         RETURN(-EFAULT);
                 /* copy UUID */
-                if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(osc_obd),
-                                 data->ioc_plen2))
+                if (cfs_copy_to_user(data->ioc_pbuf2, obd2cli_tgt(osc_obd),
+                                     data->ioc_plen2))
                         RETURN(-EFAULT);
                 break;
         }
@@ -2016,7 +2018,7 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
                         *genp = lov->lov_tgts[i]->ltd_gen;
                 }
 
-                if (copy_to_user((void *)uarg, buf, len))
+                if (cfs_copy_to_user((void *)uarg, buf, len))
                         rc = -EFAULT;
                 obd_ioctl_freedata(buf, len);
                 break;
@@ -2700,7 +2702,7 @@ static int lov_extent_calc(struct obd_export *exp, struct lov_stripe_md *lsm,
 void lov_stripe_lock(struct lov_stripe_md *md)
 {
         LASSERT(md->lsm_lock_owner != cfs_curproc_pid());
-        spin_lock(&md->lsm_lock);
+        cfs_spin_lock(&md->lsm_lock);
         LASSERT(md->lsm_lock_owner == 0);
         md->lsm_lock_owner = cfs_curproc_pid();
 }
@@ -2710,7 +2712,7 @@ void lov_stripe_unlock(struct lov_stripe_md *md)
 {
         LASSERT(md->lsm_lock_owner == cfs_curproc_pid());
         md->lsm_lock_owner = 0;
-        spin_unlock(&md->lsm_lock);
+        cfs_spin_unlock(&md->lsm_lock);
 }
 EXPORT_SYMBOL(lov_stripe_unlock);
 
@@ -2781,14 +2783,14 @@ int __init lov_init(void)
 
         lov_oinfo_slab = cfs_mem_cache_create("lov_oinfo",
                                               sizeof(struct lov_oinfo),
-                                              0, SLAB_HWCACHE_ALIGN);
+                                              0, CFS_SLAB_HWCACHE_ALIGN);
         if (lov_oinfo_slab == NULL) {
                 lu_kmem_fini(lov_caches);
                 return -ENOMEM;
         }
         lprocfs_lov_init_vars(&lvars);
 
-        request_module("lquota");
+        cfs_request_module("lquota");
         quota_interface = PORTAL_SYMBOL_GET(lov_quota_interface);
         init_obd_quota_ops(quota_interface, &lov_obd_ops);
 
index 5886885..079be23 100644 (file)
@@ -263,7 +263,7 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
                 waiter = &lov_env_info(env)->lti_waiter;
                 cfs_waitlink_init(waiter);
                 cfs_waitq_add(&site->ls_marche_funebre, waiter);
-                set_current_state(CFS_TASK_UNINT);
+                cfs_set_current_state(CFS_TASK_UNINT);
 
                 while (r0->lo_sub[idx] == los)
                         /* this wait-queue is signaled at the end of
@@ -448,10 +448,10 @@ const static struct lov_layout_operations lov_dispatch[] = {
                                                                         \
         __lock &= __obj->lo_owner != cfs_current();                     \
         if (__lock)                                                     \
-                down_read(&__obj->lo_type_guard);                       \
+                cfs_down_read(&__obj->lo_type_guard);                   \
         __result = LOV_2DISPATCH_NOLOCK(obj, op, __VA_ARGS__);          \
         if (__lock)                                                     \
-                up_read(&__obj->lo_type_guard);                         \
+                cfs_up_read(&__obj->lo_type_guard);                     \
         __result;                                                       \
 })
 
@@ -467,12 +467,12 @@ do {                                                                    \
         enum lov_layout_type                    __llt;                  \
                                                                         \
         if (__obj->lo_owner != cfs_current())                           \
-                down_read(&__obj->lo_type_guard);                       \
+                cfs_down_read(&__obj->lo_type_guard);                   \
         __llt = __obj->lo_type;                                         \
         LASSERT(0 <= __llt && __llt < ARRAY_SIZE(lov_dispatch));        \
         lov_dispatch[__llt].op(__VA_ARGS__);                            \
         if (__obj->lo_owner != cfs_current())                           \
-                up_read(&__obj->lo_type_guard);                         \
+                cfs_up_read(&__obj->lo_type_guard);                     \
 } while (0)
 
 static int lov_layout_change(const struct lu_env *env,
@@ -509,7 +509,7 @@ static int lov_layout_change(const struct lu_env *env,
                 cl_env_reexit(cookie);
 
                 old_ops->llo_fini(env, obj, &obj->u);
-                LASSERT(list_empty(&hdr->coh_locks));
+                LASSERT(cfs_list_empty(&hdr->coh_locks));
                 LASSERT(hdr->coh_tree.rnode == NULL);
                 LASSERT(hdr->coh_pages == 0);
 
@@ -537,7 +537,7 @@ int lov_object_init(const struct lu_env *env, struct lu_object *obj,
         int result;
 
         ENTRY;
-        init_rwsem(&lov->lo_type_guard);
+        cfs_init_rwsem(&lov->lo_type_guard);
 
         /* no locking is necessary, as object is being created */
         lov->lo_type = cconf->u.coc_md->lsm != NULL ? LLT_RAID0 : LLT_EMPTY;
@@ -561,7 +561,7 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
          * Currently only LLT_EMPTY -> LLT_RAID0 transition is supported.
          */
         LASSERT(lov->lo_owner != cfs_current());
-        down_write(&lov->lo_type_guard);
+        cfs_down_write(&lov->lo_type_guard);
         LASSERT(lov->lo_owner == NULL);
         lov->lo_owner = cfs_current();
         if (lov->lo_type == LLT_EMPTY && conf->u.coc_md->lsm != NULL)
@@ -569,7 +569,7 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
         else
                 result = -EOPNOTSUPP;
         lov->lo_owner = NULL;
-        up_write(&lov->lo_type_guard);
+        cfs_up_write(&lov->lo_type_guard);
         RETURN(result);
 }
 
index 0ed24bb..4163434 100644 (file)
@@ -303,7 +303,7 @@ int lov_alloc_memmd(struct lov_stripe_md **lsmp, int stripe_count,
                 RETURN(-ENOMEM);
         }
 
-        spin_lock_init(&(*lsmp)->lsm_lock);
+        cfs_spin_lock_init(&(*lsmp)->lsm_lock);
         (*lsmp)->lsm_magic = magic;
         (*lsmp)->lsm_stripe_count = stripe_count;
         (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES * stripe_count;
@@ -395,7 +395,7 @@ static int __lov_setstripe(struct obd_export *exp, int max_lmm_size,
         int rc;
         ENTRY;
 
-        if (copy_from_user(&lumv3, lump, sizeof(struct lov_user_md_v1)))
+        if (cfs_copy_from_user(&lumv3, lump, sizeof(struct lov_user_md_v1)))
                 RETURN(-EFAULT);
 
         lmm_magic = lumv1->lmm_magic;
@@ -404,10 +404,10 @@ static int __lov_setstripe(struct obd_export *exp, int max_lmm_size,
                 lustre_swab_lov_user_md_v1(lumv1);
                 lmm_magic = LOV_USER_MAGIC_V1;
         } else if (lmm_magic == LOV_USER_MAGIC_V3) {
-                if (copy_from_user(&lumv3, lump, sizeof(lumv3)))
+                if (cfs_copy_from_user(&lumv3, lump, sizeof(lumv3)))
                         RETURN(-EFAULT);
         } else if (lmm_magic == __swab32(LOV_USER_MAGIC_V3)) {
-                if (copy_from_user(&lumv3, lump, sizeof(lumv3)))
+                if (cfs_copy_from_user(&lumv3, lump, sizeof(lumv3)))
                         RETURN(-EFAULT);
                 lustre_swab_lov_user_md_v3(&lumv3);
                 lmm_magic = LOV_USER_MAGIC_V3;
@@ -597,7 +597,7 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
         /* we only need the header part from user space to get lmm_magic and
          * lmm_stripe_count, (the header part is common to v1 and v3) */
         lum_size = sizeof(struct lov_user_md_v1);
-        if (copy_from_user(&lum, lump, lum_size))
+        if (cfs_copy_from_user(&lum, lump, lum_size))
                 GOTO(out_set, rc = -EFAULT);
         else if ((lum.lmm_magic != LOV_USER_MAGIC) &&
                  (lum.lmm_magic != LOV_USER_MAGIC_V3))
@@ -607,7 +607,7 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
             (lum.lmm_stripe_count < lsm->lsm_stripe_count)) {
                 /* Return right size of stripe to user */
                 lum.lmm_stripe_count = lsm->lsm_stripe_count;
-                rc = copy_to_user(lump, &lum, lum_size);
+                rc = cfs_copy_to_user(lump, &lum, lum_size);
                 GOTO(out_set, rc = -EOVERFLOW);
         }
         rc = lov_packmd(exp, &lmmk, lsm);
@@ -656,7 +656,7 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
         lum.lmm_stripe_count = lmmk->lmm_stripe_count;
         ((struct lov_user_md*)lmmk)->lmm_stripe_offset = 0;
         ((struct lov_user_md*)lmmk)->lmm_stripe_count = lum.lmm_stripe_count;
-        if (copy_to_user(lump, lmmk, lmm_size))
+        if (cfs_copy_to_user(lump, lmmk, lmm_size))
                 rc = -EFAULT;
 
         obd_free_diskmd(exp, &lmmk);
index 208ec04..d3cd4f9 100644 (file)
 static void lov_pool_getref(struct pool_desc *pool)
 {
         CDEBUG(D_INFO, "pool %p\n", pool);
-        atomic_inc(&pool->pool_refcount);
+        cfs_atomic_inc(&pool->pool_refcount);
 }
 
 void lov_pool_putref(struct pool_desc *pool) 
 {
         CDEBUG(D_INFO, "pool %p\n", pool);
-        if (atomic_dec_and_test(&pool->pool_refcount)) {
-                LASSERT(hlist_unhashed(&pool->pool_hash));
-                LASSERT(list_empty(&pool->pool_list));
+        if (cfs_atomic_dec_and_test(&pool->pool_refcount)) {
+                LASSERT(cfs_hlist_unhashed(&pool->pool_hash));
+                LASSERT(cfs_list_empty(&pool->pool_list));
                 LASSERT(pool->pool_proc_entry == NULL);
                 lov_ost_pool_free(&(pool->pool_rr.lqr_pool));
                 lov_ost_pool_free(&(pool->pool_obds));
@@ -97,40 +97,40 @@ static __u32 pool_hashfn(cfs_hash_t *hash_body, void *key, unsigned mask)
         return (result % mask);
 }
 
-static void *pool_key(struct hlist_node *hnode)
+static void *pool_key(cfs_hlist_node_t *hnode)
 {
         struct pool_desc *pool;
 
-        pool = hlist_entry(hnode, struct pool_desc, pool_hash);
+        pool = cfs_hlist_entry(hnode, struct pool_desc, pool_hash);
         return (pool->pool_name);
 }
 
-static int pool_hashkey_compare(void *key, struct hlist_node *compared_hnode)
+static int pool_hashkey_compare(void *key, cfs_hlist_node_t *compared_hnode)
 {
         char *pool_name;
         struct pool_desc *pool;
         int rc;
 
         pool_name = (char *)key;
-        pool = hlist_entry(compared_hnode, struct pool_desc, pool_hash);
+        pool = cfs_hlist_entry(compared_hnode, struct pool_desc, pool_hash);
         rc = strncmp(pool_name, pool->pool_name, LOV_MAXPOOLNAME);
         return (!rc);
 }
 
-static void *pool_hashrefcount_get(struct hlist_node *hnode)
+static void *pool_hashrefcount_get(cfs_hlist_node_t *hnode)
 {
         struct pool_desc *pool;
 
-        pool = hlist_entry(hnode, struct pool_desc, pool_hash);
+        pool = cfs_hlist_entry(hnode, struct pool_desc, pool_hash);
         lov_pool_getref(pool);
         return (pool);
 }
 
-static void *pool_hashrefcount_put(struct hlist_node *hnode)
+static void *pool_hashrefcount_put(cfs_hlist_node_t *hnode)
 {
         struct pool_desc *pool;
 
-        pool = hlist_entry(hnode, struct pool_desc, pool_hash);
+        pool = cfs_hlist_entry(hnode, struct pool_desc, pool_hash);
         lov_pool_putref(pool);
         return (pool);
 }
@@ -174,14 +174,14 @@ static void *pool_proc_next(struct seq_file *s, void *v, loff_t *pos)
 
         /* iterate to find a non empty entry */
         prev_idx = iter->idx;
-        down_read(&pool_tgt_rw_sem(iter->pool));
+        cfs_down_read(&pool_tgt_rw_sem(iter->pool));
         iter->idx++;
         if (iter->idx == pool_tgt_count(iter->pool)) {
                 iter->idx = prev_idx; /* we stay on the last entry */
-                up_read(&pool_tgt_rw_sem(iter->pool));
+                cfs_up_read(&pool_tgt_rw_sem(iter->pool));
                 return NULL;
         }
-        up_read(&pool_tgt_rw_sem(iter->pool));
+        cfs_up_read(&pool_tgt_rw_sem(iter->pool));
         (*pos)++;
         /* return != NULL to continue */
         return iter;
@@ -251,9 +251,9 @@ static int pool_proc_show(struct seq_file *s, void *v)
         LASSERT(iter->pool != NULL);
         LASSERT(iter->idx <= pool_tgt_count(iter->pool));
 
-        down_read(&pool_tgt_rw_sem(iter->pool));
+        cfs_down_read(&pool_tgt_rw_sem(iter->pool));
         tgt = pool_tgt(iter->pool, iter->idx);
-        up_read(&pool_tgt_rw_sem(iter->pool));
+        cfs_up_read(&pool_tgt_rw_sem(iter->pool));
         if (tgt)
                 seq_printf(s, "%s\n", obd_uuid2str(&(tgt->ltd_uuid)));
 
@@ -295,7 +295,7 @@ void lov_dump_pool(int level, struct pool_desc *pool)
 
         CDEBUG(level, "pool "LOV_POOLNAMEF" has %d members\n",
                pool->pool_name, pool->pool_obds.op_count);
-        down_read(&pool_tgt_rw_sem(pool));
+        cfs_down_read(&pool_tgt_rw_sem(pool));
 
         for (i = 0; i < pool_tgt_count(pool) ; i++) {
                 if (!pool_tgt(pool, i) || !(pool_tgt(pool, i))->ltd_exp)
@@ -305,7 +305,7 @@ void lov_dump_pool(int level, struct pool_desc *pool)
                        obd_uuid2str(&((pool_tgt(pool, i))->ltd_uuid)));
         }
 
-        up_read(&pool_tgt_rw_sem(pool));
+        cfs_up_read(&pool_tgt_rw_sem(pool));
         lov_pool_putref(pool);
 }
 
@@ -318,7 +318,7 @@ int lov_ost_pool_init(struct ost_pool *op, unsigned int count)
                 count = LOV_POOL_INIT_COUNT;
         op->op_array = NULL;
         op->op_count = 0;
-        init_rwsem(&op->op_rw_sem);
+        cfs_init_rwsem(&op->op_rw_sem);
         op->op_size = count;
         OBD_ALLOC(op->op_array, op->op_size * sizeof(op->op_array[0]));
         if (op->op_array == NULL) {
@@ -358,7 +358,7 @@ int lov_ost_pool_add(struct ost_pool *op, __u32 idx, unsigned int min_count)
         int rc = 0, i;
         ENTRY;
 
-        down_write(&op->op_rw_sem);
+        cfs_down_write(&op->op_rw_sem);
 
         rc = lov_ost_pool_extend(op, min_count);
         if (rc)
@@ -374,7 +374,7 @@ int lov_ost_pool_add(struct ost_pool *op, __u32 idx, unsigned int min_count)
         op->op_count++;
         EXIT;
 out:
-        up_write(&op->op_rw_sem);
+        cfs_up_write(&op->op_rw_sem);
         return rc;
 }
 
@@ -383,20 +383,20 @@ int lov_ost_pool_remove(struct ost_pool *op, __u32 idx)
         int i;
         ENTRY;
 
-        down_write(&op->op_rw_sem);
+        cfs_down_write(&op->op_rw_sem);
 
         for (i = 0; i < op->op_count; i++) {
                 if (op->op_array[i] == idx) {
                         memmove(&op->op_array[i], &op->op_array[i + 1],
                                 (op->op_count - i - 1) * sizeof(op->op_array[0]));
                         op->op_count--;
-                        up_write(&op->op_rw_sem);
+                        cfs_up_write(&op->op_rw_sem);
                         EXIT;
                         return 0;
                 }
         }
 
-        up_write(&op->op_rw_sem);
+        cfs_up_write(&op->op_rw_sem);
         RETURN(-EINVAL);
 }
 
@@ -407,14 +407,14 @@ int lov_ost_pool_free(struct ost_pool *op)
         if (op->op_size == 0)
                 RETURN(0);
 
-        down_write(&op->op_rw_sem);
+        cfs_down_write(&op->op_rw_sem);
 
         OBD_FREE(op->op_array, op->op_size * sizeof(op->op_array[0]));
         op->op_array = NULL;
         op->op_count = 0;
         op->op_size = 0;
 
-        up_write(&op->op_rw_sem);
+        cfs_up_write(&op->op_rw_sem);
         RETURN(0);
 }
 
@@ -441,7 +441,7 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
         /* ref count init to 1 because when created a pool is always used
          * up to deletion
          */
-        atomic_set(&new_pool->pool_refcount, 1);
+        cfs_atomic_set(&new_pool->pool_refcount, 1);
         rc = lov_ost_pool_init(&new_pool->pool_obds, 0);
         if (rc)
                GOTO(out_err, rc);
@@ -451,7 +451,7 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
         if (rc)
                 GOTO(out_free_pool_obds, rc);
 
-        INIT_HLIST_NODE(&new_pool->pool_hash);
+        CFS_INIT_HLIST_NODE(&new_pool->pool_hash);
 
 #ifdef LPROCFS
         /* we need this assert seq_file is not implementated for liblustre */
@@ -469,10 +469,10 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
         CDEBUG(D_INFO, "pool %p - proc %p\n", new_pool, new_pool->pool_proc_entry);
 #endif
 
-        spin_lock(&obd->obd_dev_lock);
-        list_add_tail(&new_pool->pool_list, &lov->lov_pool_list);
+        cfs_spin_lock(&obd->obd_dev_lock);
+        cfs_list_add_tail(&new_pool->pool_list, &lov->lov_pool_list);
         lov->lov_pool_count++;
-        spin_unlock(&obd->obd_dev_lock);
+        cfs_spin_unlock(&obd->obd_dev_lock);
 
         /* add to find only when it fully ready  */
         rc = cfs_hash_add_unique(lov->lov_pools_hash_body, poolname,
@@ -486,10 +486,10 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
         RETURN(0);
 
 out_err:
-        spin_lock(&obd->obd_dev_lock);
-        list_del_init(&new_pool->pool_list);
+        cfs_spin_lock(&obd->obd_dev_lock);
+        cfs_list_del_init(&new_pool->pool_list);
         lov->lov_pool_count--;
-        spin_unlock(&obd->obd_dev_lock);
+        cfs_spin_unlock(&obd->obd_dev_lock);
 
         lprocfs_remove(&new_pool->pool_proc_entry);
 
@@ -519,10 +519,10 @@ int lov_pool_del(struct obd_device *obd, char *poolname)
                 lov_pool_putref(pool);
         }
 
-        spin_lock(&obd->obd_dev_lock);
-        list_del_init(&pool->pool_list);
+        cfs_spin_lock(&obd->obd_dev_lock);
+        cfs_list_del_init(&pool->pool_list);
         lov->lov_pool_count--;
-        spin_unlock(&obd->obd_dev_lock);
+        cfs_spin_unlock(&obd->obd_dev_lock);
 
         /* release last reference */
         lov_pool_putref(pool);
@@ -635,7 +635,7 @@ int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool)
          */
         lov_pool_getref(pool);
 
-        down_read(&pool_tgt_rw_sem(pool));
+        cfs_down_read(&pool_tgt_rw_sem(pool));
 
         for (i = 0; i < pool_tgt_count(pool); i++) {
                 if (pool_tgt_array(pool)[i] == idx)
@@ -644,7 +644,7 @@ int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool)
         rc = -ENOENT;
         EXIT;
 out:
-        up_read(&pool_tgt_rw_sem(pool));
+        cfs_up_read(&pool_tgt_rw_sem(pool));
 
         lov_pool_putref(pool);
         return rc;
index eaa2eb1..59f20c3 100644 (file)
@@ -72,9 +72,9 @@ int qos_add_tgt(struct obd_device *obd, __u32 index)
                 RETURN(-ENOTCONN);
         }
 
-        down_write(&lov->lov_qos.lq_rw_sem);
-        mutex_down(&lov->lov_lock);
-        list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+        cfs_down_write(&lov->lov_qos.lq_rw_sem);
+        cfs_mutex_down(&lov->lov_lock);
+        cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
                 if (obd_uuid_equals(&oss->lqo_uuid,
                                     &exp->exp_connection->c_remote_uuid)) {
                         found++;
@@ -91,7 +91,7 @@ int qos_add_tgt(struct obd_device *obd, __u32 index)
                        sizeof(oss->lqo_uuid));
         } else {
                 /* Assume we have to move this one */
-                list_del(&oss->lqo_oss_list);
+                cfs_list_del(&oss->lqo_oss_list);
         }
 
         oss->lqo_ost_count++;
@@ -99,13 +99,14 @@ int qos_add_tgt(struct obd_device *obd, __u32 index)
 
         /* Add sorted by # of OSTs.  Find the first entry that we're
            bigger than... */
-        list_for_each_entry(temposs, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+        cfs_list_for_each_entry(temposs, &lov->lov_qos.lq_oss_list,
+                                lqo_oss_list) {
                 if (oss->lqo_ost_count > temposs->lqo_ost_count)
                         break;
         }
         /* ...and add before it.  If we're the first or smallest, temposs
            points to the list head, and we add to the end. */
-        list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list);
+        cfs_list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list);
 
         lov->lov_qos.lq_dirty = 1;
         lov->lov_qos.lq_rr.lqr_dirty = 1;
@@ -116,8 +117,8 @@ int qos_add_tgt(struct obd_device *obd, __u32 index)
                oss->lqo_ost_count);
 
 out:
-        mutex_up(&lov->lov_lock);
-        up_write(&lov->lov_qos.lq_rw_sem);
+        cfs_mutex_up(&lov->lov_lock);
+        cfs_up_write(&lov->lov_qos.lq_rw_sem);
         RETURN(rc);
 }
 
@@ -128,7 +129,7 @@ int qos_del_tgt(struct obd_device *obd, struct lov_tgt_desc *tgt)
         int rc = 0;
         ENTRY;
 
-        down_write(&lov->lov_qos.lq_rw_sem);
+        cfs_down_write(&lov->lov_qos.lq_rw_sem);
 
         oss = tgt->ltd_qos.ltq_oss;
         if (!oss)
@@ -138,14 +139,14 @@ int qos_del_tgt(struct obd_device *obd, struct lov_tgt_desc *tgt)
         if (oss->lqo_ost_count == 0) {
                 CDEBUG(D_QOS, "removing OSS %s\n",
                        obd_uuid2str(&oss->lqo_uuid));
-                list_del(&oss->lqo_oss_list);
+                cfs_list_del(&oss->lqo_oss_list);
                 OBD_FREE_PTR(oss);
         }
 
         lov->lov_qos.lq_dirty = 1;
         lov->lov_qos.lq_rr.lqr_dirty = 1;
 out:
-        up_write(&lov->lov_qos.lq_rw_sem);
+        cfs_up_write(&lov->lov_qos.lq_rw_sem);
         RETURN(rc);
 }
 
@@ -169,7 +170,7 @@ static int qos_calc_ppo(struct obd_device *obd)
                 GOTO(out, rc = -EAGAIN);
 
         /* find bavail on each OSS */
-        list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+        cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
                 oss->lqo_bavail = 0;
         }
         lov->lov_qos.lq_active_oss_count = 0;
@@ -230,7 +231,7 @@ static int qos_calc_ppo(struct obd_device *obd)
         }
 
         /* Per-OSS penalty is prio * oss_avail / oss_osts / (num_oss - 1) / 2 */
-        list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+        cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
                 temp = oss->lqo_bavail >> 1;
                 do_div(temp, oss->lqo_ost_count * num_active);
                 oss->lqo_penalty_per_obj = (temp * prio_wide) >> 8;
@@ -311,7 +312,7 @@ static int qos_used(struct lov_obd *lov, struct ost_pool *osts,
                 lov->lov_qos.lq_active_oss_count;
 
         /* Decrease all OSS penalties */
-        list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+        cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
                 if (oss->lqo_penalty < oss->lqo_penalty_per_obj)
                         oss->lqo_penalty = 0;
                 else
@@ -372,7 +373,7 @@ static int qos_calc_rr(struct lov_obd *lov, struct ost_pool *src_pool,
         }
 
         /* Do actual allocation. */
-        down_write(&lov->lov_qos.lq_rw_sem);
+        cfs_down_write(&lov->lov_qos.lq_rw_sem);
 
         /*
          * Check again. While we were sleeping on @lq_rw_sem something could
@@ -380,7 +381,7 @@ static int qos_calc_rr(struct lov_obd *lov, struct ost_pool *src_pool,
          */
         if (!lqr->lqr_dirty) {
                 LASSERT(lqr->lqr_pool.op_size);
-                up_write(&lov->lov_qos.lq_rw_sem);
+                cfs_up_write(&lov->lov_qos.lq_rw_sem);
                 RETURN(0);
         }
 
@@ -393,7 +394,7 @@ static int qos_calc_rr(struct lov_obd *lov, struct ost_pool *src_pool,
         lqr->lqr_pool.op_count = real_count;
         rc = lov_ost_pool_extend(&lqr->lqr_pool, real_count);
         if (rc) {
-                up_write(&lov->lov_qos.lq_rw_sem);
+                cfs_up_write(&lov->lov_qos.lq_rw_sem);
                 RETURN(rc);
         }
         for (i = 0; i < lqr->lqr_pool.op_count; i++)
@@ -401,7 +402,7 @@ static int qos_calc_rr(struct lov_obd *lov, struct ost_pool *src_pool,
 
         /* Place all the OSTs from 1 OSS at the same time. */
         placed = 0;
-        list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+        cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
                 int j = 0;
                 for (i = 0; i < lqr->lqr_pool.op_count; i++) {
                         if (lov->lov_tgts[src_pool->op_array[i]] &&
@@ -419,7 +420,7 @@ static int qos_calc_rr(struct lov_obd *lov, struct ost_pool *src_pool,
         }
 
         lqr->lqr_dirty = 0;
-        up_write(&lov->lov_qos.lq_rw_sem);
+        cfs_up_write(&lov->lov_qos.lq_rw_sem);
 
         if (placed != real_count) {
                 /* This should never happen */
@@ -555,7 +556,7 @@ static int alloc_rr(struct lov_obd *lov, int *idx_arr, int *stripe_cnt,
                 osts = &(lov->lov_packed);
                 lqr = &(lov->lov_qos.lq_rr);
         } else {
-                down_read(&pool_tgt_rw_sem(pool));
+                cfs_down_read(&pool_tgt_rw_sem(pool));
                 osts = &(pool->pool_obds);
                 lqr = &(pool->pool_rr);
         }
@@ -578,7 +579,7 @@ static int alloc_rr(struct lov_obd *lov, int *idx_arr, int *stripe_cnt,
                 if (*stripe_cnt > 1 && (osts->op_count % (*stripe_cnt)) != 1)
                         ++lqr->lqr_offset_idx;
         }
-        down_read(&lov->lov_qos.lq_rw_sem);
+        cfs_down_read(&lov->lov_qos.lq_rw_sem);
         ost_start_idx_temp = lqr->lqr_start_idx;
 
 repeat_find:
@@ -628,12 +629,12 @@ repeat_find:
                 goto repeat_find;
         }
 
-        up_read(&lov->lov_qos.lq_rw_sem);
+        cfs_up_read(&lov->lov_qos.lq_rw_sem);
 
         *stripe_cnt = idx_pos - idx_arr;
 out:
         if (pool != NULL) {
-                up_read(&pool_tgt_rw_sem(pool));
+                cfs_up_read(&pool_tgt_rw_sem(pool));
                 /* put back ref got by lov_find_pool() */
                 lov_pool_putref(pool);
         }
@@ -656,7 +657,7 @@ static int alloc_specific(struct lov_obd *lov, struct lov_stripe_md *lsm,
         if (pool == NULL) {
                 osts = &(lov->lov_packed);
         } else {
-                down_read(&pool_tgt_rw_sem(pool));
+                cfs_down_read(&pool_tgt_rw_sem(pool));
                 osts = &(pool->pool_obds);
         }
 
@@ -725,7 +726,7 @@ repeat_find:
         rc = -EFBIG;
 out:
         if (pool != NULL) {
-                up_read(&pool_tgt_rw_sem(pool));
+                cfs_up_read(&pool_tgt_rw_sem(pool));
                 /* put back ref got by lov_find_pool() */
                 lov_pool_putref(pool);
         }
@@ -757,7 +758,7 @@ static int alloc_qos(struct obd_export *exp, int *idx_arr, int *stripe_cnt,
                 osts = &(lov->lov_packed);
                 lqr = &(lov->lov_qos.lq_rr);
         } else {
-                down_read(&pool_tgt_rw_sem(pool));
+                cfs_down_read(&pool_tgt_rw_sem(pool));
                 osts = &(pool->pool_obds);
                 lqr = &(pool->pool_rr);
         }
@@ -774,7 +775,7 @@ static int alloc_qos(struct obd_export *exp, int *idx_arr, int *stripe_cnt,
                 GOTO(out_nolock, rc = -EAGAIN);
 
         /* Do actual allocation, use write lock here. */
-        down_write(&lov->lov_qos.lq_rw_sem);
+        cfs_down_write(&lov->lov_qos.lq_rw_sem);
 
         /*
          * Check again, while we were sleeping on @lq_rw_sem things could
@@ -892,11 +893,11 @@ static int alloc_qos(struct obd_export *exp, int *idx_arr, int *stripe_cnt,
         LASSERT(nfound == *stripe_cnt);
 
 out:
-        up_write(&lov->lov_qos.lq_rw_sem);
+        cfs_up_write(&lov->lov_qos.lq_rw_sem);
 
 out_nolock:
         if (pool != NULL) {
-                up_read(&pool_tgt_rw_sem(pool));
+                cfs_up_read(&pool_tgt_rw_sem(pool));
                 /* put back ref got by lov_find_pool() */
                 lov_pool_putref(pool);
         }
@@ -1089,11 +1090,11 @@ void qos_update(struct lov_obd *lov)
 void qos_statfs_done(struct lov_obd *lov)
 {
         LASSERT(lov->lov_qos.lq_statfs_in_progress);
-        down_write(&lov->lov_qos.lq_rw_sem);
+        cfs_down_write(&lov->lov_qos.lq_rw_sem);
         lov->lov_qos.lq_statfs_in_progress = 0;
         /* wake up any threads waiting for the statfs rpcs to complete */
         cfs_waitq_signal(&lov->lov_qos.lq_statfs_waitq);
-        up_write(&lov->lov_qos.lq_rw_sem);
+        cfs_up_write(&lov->lov_qos.lq_rw_sem);
 }
 
 static int qos_statfs_ready(struct obd_device *obd, __u64 max_age)
@@ -1101,10 +1102,10 @@ static int qos_statfs_ready(struct obd_device *obd, __u64 max_age)
         struct lov_obd         *lov = &obd->u.lov;
         int rc;
         ENTRY;
-        down_read(&lov->lov_qos.lq_rw_sem);
+        cfs_down_read(&lov->lov_qos.lq_rw_sem);
         rc = lov->lov_qos.lq_statfs_in_progress == 0 ||
              cfs_time_beforeq_64(max_age, obd->obd_osfs_age);
-        up_read(&lov->lov_qos.lq_rw_sem);
+        cfs_up_read(&lov->lov_qos.lq_rw_sem);
         RETURN(rc);
 }
 
@@ -1131,14 +1132,14 @@ void qos_statfs_update(struct obd_device *obd, __u64 max_age, int wait)
                 /* statfs already in progress */
                 RETURN_EXIT;
 
-        down_write(&lov->lov_qos.lq_rw_sem);
+        cfs_down_write(&lov->lov_qos.lq_rw_sem);
         if (lov->lov_qos.lq_statfs_in_progress) {
-                up_write(&lov->lov_qos.lq_rw_sem);
+                cfs_up_write(&lov->lov_qos.lq_rw_sem);
                 GOTO(out, rc = 0);
         }
         /* no statfs in flight, send rpcs */
         lov->lov_qos.lq_statfs_in_progress = 1;
-        up_write(&lov->lov_qos.lq_rw_sem);
+        cfs_up_write(&lov->lov_qos.lq_rw_sem);
 
         if (wait)
                 CDEBUG(D_QOS, "%s: did not manage to get fresh statfs data "
@@ -1159,7 +1160,7 @@ void qos_statfs_update(struct obd_device *obd, __u64 max_age, int wait)
                 GOTO(out_failed, rc = -ENOMEM);
 
         rc = obd_statfs_async(obd, oinfo, max_age, set);
-        if (rc || list_empty(&set->set_requests)) {
+        if (rc || cfs_list_empty(&set->set_requests)) {
                 if (rc)
                         CWARN("statfs failed with %d\n", rc);
                 GOTO(out_failed, rc);
@@ -1170,11 +1171,11 @@ void qos_statfs_update(struct obd_device *obd, __u64 max_age, int wait)
         GOTO(out, rc);
 
 out_failed:
-        down_write(&lov->lov_qos.lq_rw_sem);
+        cfs_down_write(&lov->lov_qos.lq_rw_sem);
         lov->lov_qos.lq_statfs_in_progress = 0;
         /* wake up any threads waiting for the statfs rpcs to complete */
         cfs_waitq_signal(&lov->lov_qos.lq_statfs_waitq);
-        up_write(&lov->lov_qos.lq_rw_sem);
+        cfs_up_write(&lov->lov_qos.lq_rw_sem);
         wait = 0;
 out:
         if (set)
index c34f3fc..7c0e040 100644 (file)
@@ -58,21 +58,22 @@ static void lov_init_set(struct lov_request_set *set)
         set->set_success = 0;
         set->set_cookies = 0;
         CFS_INIT_LIST_HEAD(&set->set_list);
-        atomic_set(&set->set_refcount, 1);
+        cfs_atomic_set(&set->set_refcount, 1);
         cfs_waitq_init(&set->set_waitq);
-        spin_lock_init(&set->set_lock);
+        cfs_spin_lock_init(&set->set_lock);
 }
 
 void lov_finish_set(struct lov_request_set *set)
 {
-        struct list_head *pos, *n;
+        cfs_list_t *pos, *n;
         ENTRY;
 
         LASSERT(set);
-        list_for_each_safe(pos, n, &set->set_list) {
-                struct lov_request *req = list_entry(pos, struct lov_request,
-                                                     rq_link);
-                list_del_init(&req->rq_link);
+        cfs_list_for_each_safe(pos, n, &set->set_list) {
+                struct lov_request *req = cfs_list_entry(pos,
+                                                         struct lov_request,
+                                                         rq_link);
+                cfs_list_del_init(&req->rq_link);
 
                 if (req->rq_oi.oi_oa)
                         OBDO_FREE(req->rq_oi.oi_oa);
@@ -135,7 +136,7 @@ int lov_update_common_set(struct lov_request_set *set,
 
 void lov_set_add_req(struct lov_request *req, struct lov_request_set *set)
 {
-        list_add_tail(&req->rq_link, &set->set_list);
+        cfs_list_add_tail(&req->rq_link, &set->set_list);
         set->set_count++;
         req->rq_rqset = set;
 }
@@ -221,7 +222,7 @@ static int enqueue_done(struct lov_request_set *set, __u32 mode)
                 RETURN(0);
 
         /* cancel enqueued/matched locks */
-        list_for_each_entry(req, &set->set_list, rq_link) {
+        cfs_list_for_each_entry(req, &set->set_list, rq_link) {
                 struct lustre_handle *lov_lockhp;
 
                 if (!req->rq_complete || req->rq_rc)
@@ -565,7 +566,7 @@ static int create_done(struct obd_export *exp, struct lov_request_set *set,
         /* try alloc objects on other osts if osc_create fails for
          * exceptions: RPC failure, ENOSPC, etc */
         if (set->set_count != set->set_success) {
-                list_for_each_entry (req, &set->set_list, rq_link) {
+                cfs_list_for_each_entry (req, &set->set_list, rq_link) {
                         if (req->rq_rc == 0)
                                 continue;
 
@@ -590,7 +591,7 @@ static int create_done(struct obd_export *exp, struct lov_request_set *set,
         if (ret_oa == NULL)
                 GOTO(cleanup, rc = -ENOMEM);
 
-        list_for_each_entry(req, &set->set_list, rq_link) {
+        cfs_list_for_each_entry(req, &set->set_list, rq_link) {
                 if (!req->rq_complete || req->rq_rc)
                         continue;
                 lov_merge_attrs(ret_oa, req->rq_oi.oi_oa,
@@ -613,7 +614,7 @@ static int create_done(struct obd_export *exp, struct lov_request_set *set,
         GOTO(done, rc = 0);
 
 cleanup:
-        list_for_each_entry(req, &set->set_list, rq_link) {
+        cfs_list_for_each_entry(req, &set->set_list, rq_link) {
                 struct obd_export *sub_exp;
                 int err = 0;
 
@@ -680,12 +681,12 @@ int lov_update_create_set(struct lov_request_set *set,
                 }
         }
 
-        spin_lock(&set->set_lock);
+        cfs_spin_lock(&set->set_lock);
         req->rq_stripe = set->set_success;
         loi = lsm->lsm_oinfo[req->rq_stripe];
         if (rc) {
                 lov_update_set(set, req, rc);
-                spin_unlock(&set->set_lock);
+                cfs_spin_unlock(&set->set_lock);
                 RETURN(rc);
         }
 
@@ -700,7 +701,7 @@ int lov_update_create_set(struct lov_request_set *set,
                 set->set_cookie_sent++;
 
         lov_update_set(set, req, rc);
-        spin_unlock(&set->set_lock);
+        cfs_spin_unlock(&set->set_lock);
 
         CDEBUG(D_INODE, "objid "LPX64" has subobj "LPX64"/"LPU64" at idx %d\n",
                lsm->lsm_object_id, loi->loi_id, loi->loi_id, req->rq_idx);
@@ -755,7 +756,7 @@ int lov_prep_create_set(struct obd_export *exp, struct obd_info *oinfo,
 
 static int common_attr_done(struct lov_request_set *set)
 {
-        struct list_head *pos;
+        cfs_list_t *pos;
         struct lov_request *req;
         struct obdo *tmp_oa;
         int rc = 0, attrset = 0;
@@ -773,8 +774,8 @@ static int common_attr_done(struct lov_request_set *set)
         if (tmp_oa == NULL)
                 GOTO(out, rc = -ENOMEM);
 
-        list_for_each (pos, &set->set_list) {
-                req = list_entry(pos, struct lov_request, rq_link);
+        cfs_list_for_each (pos, &set->set_list) {
+                req = cfs_list_entry(pos, struct lov_request, rq_link);
 
                 if (!req->rq_complete || req->rq_rc)
                         continue;
@@ -809,12 +810,12 @@ static int brw_done(struct lov_request_set *set)
 {
         struct lov_stripe_md *lsm = set->set_oi->oi_md;
         struct lov_oinfo     *loi = NULL;
-        struct list_head *pos;
+        cfs_list_t *pos;
         struct lov_request *req;
         ENTRY;
 
-        list_for_each (pos, &set->set_list) {
-                req = list_entry(pos, struct lov_request, rq_link);
+        cfs_list_for_each (pos, &set->set_list) {
+                req = cfs_list_entry(pos, struct lov_request, rq_link);
 
                 if (!req->rq_complete || req->rq_rc)
                         continue;
@@ -1487,10 +1488,10 @@ int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs,int success)
                 if (osfs->os_ffree != LOV_U64_MAX)
                         do_div(osfs->os_ffree, expected_stripes);
 
-                spin_lock(&obd->obd_osfs_lock);
+                cfs_spin_lock(&obd->obd_osfs_lock);
                 memcpy(&obd->obd_osfs, osfs, sizeof(*osfs));
                 obd->obd_osfs_age = cfs_time_current_64();
-                spin_unlock(&obd->obd_osfs_lock);
+                cfs_spin_unlock(&obd->obd_osfs_lock);
                 RETURN(0);
         }
 
@@ -1605,18 +1606,18 @@ static int cb_statfs_update(void *cookie, int rc)
         lov_update_set(lovreq->rq_rqset, lovreq, rc);
         if (rc)
                 GOTO(out, rc);
+
         obd_getref(lovobd);
         tgt = lov->lov_tgts[lovreq->rq_idx];
         if (!tgt || !tgt->ltd_active)
                 GOTO(out_update, rc);
 
         tgtobd = class_exp2obd(tgt->ltd_exp);
-        spin_lock(&tgtobd->obd_osfs_lock);
+        cfs_spin_lock(&tgtobd->obd_osfs_lock);
         memcpy(&tgtobd->obd_osfs, lov_sfs, sizeof(*lov_sfs));
         if ((oinfo->oi_flags & OBD_STATFS_FROM_CACHE) == 0)
                 tgtobd->obd_osfs_age = cfs_time_current_64();
-        spin_unlock(&tgtobd->obd_osfs_lock);
+        cfs_spin_unlock(&tgtobd->obd_osfs_lock);
 
 out_update:
         lov_update_statfs(osfs, lov_sfs, success);
index 663547a..2b495da 100644 (file)
@@ -59,7 +59,7 @@ static void lovsub_lock_fini(const struct lu_env *env,
 
         ENTRY;
         lsl = cl2lovsub_lock(slice);
-        LASSERT(list_empty(&lsl->lss_parents));
+        LASSERT(cfs_list_empty(&lsl->lss_parents));
         OBD_SLAB_FREE_PTR(lsl, lovsub_lock_kmem);
         EXIT;
 }
@@ -103,7 +103,7 @@ static void lovsub_lock_state(const struct lu_env *env,
         LASSERT(cl_lock_is_mutexed(slice->cls_lock));
         ENTRY;
 
-        list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+        cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) {
                 struct lov_lock *lov    = scan->lll_super;
                 struct cl_lock  *parent = lov->lls_cl.cls_lock;
 
@@ -131,7 +131,7 @@ static unsigned long lovsub_lock_weigh(const struct lu_env *env,
 
         LASSERT(cl_lock_is_mutexed(slice->cls_lock));
 
-        if (!list_empty(&lock->lss_parents)) {
+        if (!cfs_list_empty(&lock->lss_parents)) {
                 /*
                  * It is not clear whether all parents have to be asked and
                  * their estimations summed, or it is enough to ask one. For
@@ -248,7 +248,7 @@ static int lovsub_lock_modify(const struct lu_env *env,
 
         LASSERT(cl_lock_mode_match(d->cld_mode,
                                    s->cls_lock->cll_descr.cld_mode));
-        list_for_each_entry(scan, &lock->lss_parents, lll_list) {
+        cfs_list_for_each_entry(scan, &lock->lss_parents, lll_list) {
                 int rc;
 
                 lov = scan->lll_super;
@@ -275,7 +275,7 @@ static int lovsub_lock_closure(const struct lu_env *env,
         sub    = cl2lovsub_lock(slice);
         result = 0;
 
-        list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+        cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) {
                 parent = scan->lll_super->lls_cl.cls_lock;
                 result = cl_lock_closure_build(env, parent, closure);
                 if (result != 0)
@@ -416,8 +416,8 @@ static void lovsub_lock_delete(const struct lu_env *env,
                 struct lov_lock_sub  *subdata;
 
                 restart = 0;
-                list_for_each_entry_safe(scan, temp,
-                                         &sub->lss_parents, lll_list) {
+                cfs_list_for_each_entry_safe(scan, temp,
+                                             &sub->lss_parents, lll_list) {
                         lov     = scan->lll_super;
                         subdata = &lov->lls_sub[scan->lll_idx];
                         lovsub_parent_lock(env, lov);
@@ -442,7 +442,7 @@ static int lovsub_lock_print(const struct lu_env *env, void *cookie,
         struct lov_lock      *lov;
         struct lov_lock_link *scan;
 
-        list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+        cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) {
                 lov = scan->lll_super;
                 (*p)(env, cookie, "[%d %p ", scan->lll_idx, lov);
                 if (lov != NULL)
index b14b9f9..bb1e821 100644 (file)
@@ -52,10 +52,10 @@ CFS_LIST_HEAD(fsfilt_types);
 static struct fsfilt_operations *fsfilt_search_type(const char *type)
 {
         struct fsfilt_operations *found;
-        struct list_head *p;
+        cfs_list_t *p;
 
-        list_for_each(p, &fsfilt_types) {
-                found = list_entry(p, struct fsfilt_operations, fs_list);
+        cfs_list_for_each(p, &fsfilt_types) {
+                found = cfs_list_entry(p, struct fsfilt_operations, fs_list);
                 if (!strcmp(found->fs_type, type)) {
                         return found;
                 }
@@ -77,7 +77,7 @@ int fsfilt_register_ops(struct fsfilt_operations *fs_ops)
                 }
         } else {
                 PORTAL_MODULE_USE;
-                list_add(&fs_ops->fs_list, &fsfilt_types);
+                cfs_list_add(&fs_ops->fs_list, &fsfilt_types);
         }
 
         /* unlock fsfilt_types list */
@@ -86,15 +86,15 @@ int fsfilt_register_ops(struct fsfilt_operations *fs_ops)
 
 void fsfilt_unregister_ops(struct fsfilt_operations *fs_ops)
 {
-        struct list_head *p;
+        cfs_list_t *p;
 
         /* lock fsfilt_types list */
-        list_for_each(p, &fsfilt_types) {
+        cfs_list_for_each(p, &fsfilt_types) {
                 struct fsfilt_operations *found;
 
-                found = list_entry(p, typeof(*found), fs_list);
+                found = cfs_list_entry(p, typeof(*found), fs_list);
                 if (found == fs_ops) {
-                        list_del(p);
+                        cfs_list_del(p);
                         PORTAL_MODULE_UNUSE;
                         break;
                 }
@@ -114,7 +114,7 @@ struct fsfilt_operations *fsfilt_get_ops(const char *type)
                 snprintf(name, sizeof(name) - 1, "fsfilt_%s", type);
                 name[sizeof(name) - 1] = '\0';
 
-                if (!(rc = request_module("%s", name))) {
+                if (!(rc = cfs_request_module("%s", name))) {
                         fs_ops = fsfilt_search_type(type);
                         CDEBUG(D_INFO, "Loaded module '%s'\n", name);
                         if (!fs_ops)
@@ -127,7 +127,7 @@ struct fsfilt_operations *fsfilt_get_ops(const char *type)
                         /* unlock fsfilt_types list */
                 }
         }
-        try_module_get(fs_ops->fs_owner);
+        cfs_try_module_get(fs_ops->fs_owner);
         /* unlock fsfilt_types list */
 
         return fs_ops;
@@ -135,7 +135,7 @@ struct fsfilt_operations *fsfilt_get_ops(const char *type)
 
 void fsfilt_put_ops(struct fsfilt_operations *fs_ops)
 {
-        module_put(fs_ops->fs_owner);
+        cfs_module_put(fs_ops->fs_owner);
 }
 
 
index fa2e510..bde0093 100644 (file)
@@ -584,7 +584,7 @@ static int fsfilt_ext3_setattr(struct dentry *dentry, void *handle,
                 if (iattr->ia_valid & ATTR_MODE) {
                         inode->i_mode = iattr->ia_mode;
 
-                        if (!in_group_p(inode->i_gid) &&
+                        if (!cfs_curproc_is_in_groups(inode->i_gid) &&
                             !cfs_capable(CFS_CAP_FSETID))
                                 inode->i_mode &= ~S_ISGID;
                 }
@@ -1201,7 +1201,7 @@ int fsfilt_ext3_map_bm_inode_pages(struct inode *inode, struct page **page,
 int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
                                 int pages, unsigned long *blocks,
                                 int *created, int create,
-                                struct semaphore *optional_sem)
+                                cfs_semaphore_t *optional_sem)
 {
         int rc;
 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
@@ -1212,11 +1212,11 @@ int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
         }
 #endif
         if (optional_sem != NULL)
-                down(optional_sem);
+                cfs_down(optional_sem);
         rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks,
                                             created, create);
         if (optional_sem != NULL)
-                up(optional_sem);
+                cfs_up(optional_sem);
 
         return rc;
 }
@@ -1228,10 +1228,10 @@ int fsfilt_ext3_read(struct inode *inode, void *buf, int size, loff_t *offs)
         int err, blocksize, csize, boffs, osize = size;
 
         /* prevent reading after eof */
-        lock_kernel();
+        cfs_lock_kernel();
         if (i_size_read(inode) < *offs + size) {
                 size = i_size_read(inode) - *offs;
-                unlock_kernel();
+                cfs_unlock_kernel();
                 if (size < 0) {
                         CDEBUG(D_EXT2, "size %llu is too short for read @%llu\n",
                                i_size_read(inode), *offs);
@@ -1240,7 +1240,7 @@ int fsfilt_ext3_read(struct inode *inode, void *buf, int size, loff_t *offs)
                         return 0;
                 }
         } else {
-                unlock_kernel();
+                cfs_unlock_kernel();
         }
 
         blocksize = 1 << inode->i_blkbits;
@@ -1323,14 +1323,14 @@ int fsfilt_ext3_write_handle(struct inode *inode, void *buf, int bufsize,
 
         /* correct in-core and on-disk sizes */
         if (new_size > i_size_read(inode)) {
-                lock_kernel();
+                cfs_lock_kernel();
                 if (new_size > i_size_read(inode))
                         i_size_write(inode, new_size);
                 if (i_size_read(inode) > EXT3_I(inode)->i_disksize)
                         EXT3_I(inode)->i_disksize = i_size_read(inode);
                 if (i_size_read(inode) > old_size)
                         mark_inode_dirty(inode);
-                unlock_kernel();
+                cfs_unlock_kernel();
         }
 
         if (err == 0)
@@ -1575,8 +1575,8 @@ out:
 }
 
 struct chk_dqblk{
-        struct hlist_node       dqb_hash;        /** quotacheck hash */
-        struct list_head        dqb_list;        /** in list also */
+        cfs_hlist_node_t        dqb_hash;        /** quotacheck hash */
+        cfs_list_t              dqb_list;        /** in list also */
         qid_t                   dqb_id;          /** uid/gid */
         short                   dqb_type;        /** USRQUOTA/GRPQUOTA */
         qsize_t                 dqb_bhardlimit;  /** block hard limit */
@@ -1599,13 +1599,13 @@ static inline unsigned int chkquot_hash(qid_t id, int type)
 }
 
 static inline struct chk_dqblk *
-find_chkquot(struct hlist_head *head, qid_t id, int type)
+find_chkquot(cfs_hlist_head_t *head, qid_t id, int type)
 {
-        struct hlist_node *node;
+        cfs_hlist_node_t *node;
         struct chk_dqblk *cdqb;
 
-        hlist_for_each(node, head) {
-                cdqb = hlist_entry(node, struct chk_dqblk, dqb_hash);
+        cfs_hlist_for_each(node, head) {
+                cdqb = cfs_hlist_entry(node, struct chk_dqblk, dqb_hash);
                 if (cdqb->dqb_id == id && cdqb->dqb_type == type)
                         return cdqb;
         }
@@ -1619,8 +1619,8 @@ static struct chk_dqblk *alloc_chkquot(qid_t id, int type)
 
         OBD_ALLOC_PTR(cdqb);
         if (cdqb) {
-                INIT_HLIST_NODE(&cdqb->dqb_hash);
-                INIT_LIST_HEAD(&cdqb->dqb_list);
+                CFS_INIT_HLIST_NODE(&cdqb->dqb_hash);
+                CFS_INIT_LIST_HEAD(&cdqb->dqb_list);
                 cdqb->dqb_id = id;
                 cdqb->dqb_type = type;
         }
@@ -1629,10 +1629,10 @@ static struct chk_dqblk *alloc_chkquot(qid_t id, int type)
 }
 
 static struct chk_dqblk *
-cqget(struct super_block *sb, struct hlist_head *hash, struct list_head *list,
-      qid_t id, int type, int first_check)
+cqget(struct super_block *sb, cfs_hlist_head_t *hash,
+      cfs_list_t *list, qid_t id, int type, int first_check)
 {
-        struct hlist_head *head = hash + chkquot_hash(id, type);
+        cfs_hlist_head_t *head = hash + chkquot_hash(id, type);
         struct if_dqblk dqb;
         struct chk_dqblk *cdqb;
         int rc;
@@ -1657,8 +1657,8 @@ cqget(struct super_block *sb, struct hlist_head *hash, struct list_head *list,
                 }
         }
 
-        hlist_add_head(&cdqb->dqb_hash, head);
-        list_add_tail(&cdqb->dqb_list, list);
+        cfs_hlist_add_head(&cdqb->dqb_hash, head);
+        cfs_list_add_tail(&cdqb->dqb_list, list);
 
         return cdqb;
 }
@@ -1737,10 +1737,10 @@ static __u32 ext3_itable_unused_count(struct super_block *sb,
 #endif
 
 struct qchk_ctxt {
-        struct hlist_head       qckt_hash[NR_DQHASH];        /* quotacheck hash */
-        struct list_head        qckt_list;                   /* quotacheck list */
+        cfs_hlist_head_t        qckt_hash[NR_DQHASH];      /* quotacheck hash */
+        cfs_list_t              qckt_list;                 /* quotacheck list */
         int                     qckt_first_check[MAXQUOTAS]; /* 1 if no old quotafile */
-        struct if_dqinfo        qckt_dqinfo[MAXQUOTAS];      /* old dqinfo */
+        struct if_dqinfo        qckt_dqinfo[MAXQUOTAS];    /* old dqinfo */
 };
 
 static int add_inode_quota(struct inode *inode, struct qchk_ctxt *qctxt,
@@ -1926,14 +1926,14 @@ static int prune_chkquots(struct super_block *sb,
         struct chk_dqblk *cdqb, *tmp;
         int rc;
 
-        list_for_each_entry_safe(cdqb, tmp, &qctxt->qckt_list, dqb_list) {
+        cfs_list_for_each_entry_safe(cdqb, tmp, &qctxt->qckt_list, dqb_list) {
                 if (!error) {
                         rc = commit_chkquot(sb, qctxt, cdqb);
                         if (rc)
                                 error = rc;
                 }
-                hlist_del_init(&cdqb->dqb_hash);
-                list_del(&cdqb->dqb_list);
+                cfs_hlist_del_init(&cdqb->dqb_hash);
+                cfs_list_del(&cdqb->dqb_list);
                 OBD_FREE_PTR(cdqb);
         }
 
@@ -1964,8 +1964,8 @@ static int fsfilt_ext3_quotacheck(struct super_block *sb,
         }
 
         for (i = 0; i < NR_DQHASH; i++)
-                INIT_HLIST_HEAD(&qctxt->qckt_hash[i]);
-        INIT_LIST_HEAD(&qctxt->qckt_list);
+                CFS_INIT_HLIST_HEAD(&qctxt->qckt_hash[i]);
+        CFS_INIT_LIST_HEAD(&qctxt->qckt_list);
 
         for (i = 0; i < MAXQUOTAS; i++) {
                 if (!Q_TYPESET(oqc, i))
@@ -2053,7 +2053,7 @@ static int fsfilt_ext3_quotacheck(struct super_block *sb,
          * has limits but hasn't file) */
 #ifdef HAVE_QUOTA_SUPPORT
         for (i = 0; i < MAXQUOTAS; i++) {
-                struct list_head id_list;
+                cfs_list_t id_list;
                 struct dquot_id *dqid, *tmp;
 
                 if (!Q_TYPESET(oqc, i))
@@ -2064,7 +2064,7 @@ static int fsfilt_ext3_quotacheck(struct super_block *sb,
 
 
                 LASSERT(sb_dqopt(sb)->files[i] != NULL);
-                INIT_LIST_HEAD(&id_list);
+                CFS_INIT_LIST_HEAD(&id_list);
 #ifndef KERNEL_SUPPORTS_QUOTA_READ
                 rc = lustre_get_qids(sb_dqopt(sb)->files[i], NULL, i, &id_list);
 #else
@@ -2073,8 +2073,8 @@ static int fsfilt_ext3_quotacheck(struct super_block *sb,
                 if (rc)
                         CERROR("read old limits failed. (rc:%d)\n", rc);
 
-                list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
-                        list_del_init(&dqid->di_link);
+                cfs_list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
+                        cfs_list_del_init(&dqid->di_link);
 
                         if (!rc)
                                 cqget(sb, qctxt->qckt_hash, &qctxt->qckt_list,
@@ -2148,7 +2148,7 @@ static int fsfilt_ext3_quotainfo(struct lustre_quota_info *lqi, int type,
 }
 
 static int fsfilt_ext3_qids(struct file *file, struct inode *inode, int type,
-                            struct list_head *list)
+                            cfs_list_t *list)
 {
         return lustre_get_qids(file, inode, type, list);
 }
@@ -2172,9 +2172,9 @@ static int fsfilt_ext3_dquot(struct lustre_dquot *dquot, int cmd)
                     dquot->dq_dqb.dqb_isoftlimit ||
                     dquot->dq_dqb.dqb_bhardlimit ||
                     dquot->dq_dqb.dqb_bsoftlimit)
-                        clear_bit(DQ_FAKE_B, &dquot->dq_flags);
+                        cfs_clear_bit(DQ_FAKE_B, &dquot->dq_flags);
                 else
-                        set_bit(DQ_FAKE_B, &dquot->dq_flags);
+                        cfs_set_bit(DQ_FAKE_B, &dquot->dq_flags);
 
                 rc = lustre_commit_dquot(dquot);
                 if (rc >= 0)
index 83db369..8527dbf 100644 (file)
@@ -97,7 +97,7 @@ static int fsfilt_reiserfs_setattr(struct dentry *dentry, void *handle,
         struct inode *inode = dentry->d_inode;
         int rc;
 
-        lock_kernel();
+        cfs_lock_kernel();
 
         /* A _really_ horrible hack to avoid removing the data stored
          * in the block pointers; this is really the "small" stripe MD data.
@@ -131,7 +131,7 @@ static int fsfilt_reiserfs_setattr(struct dentry *dentry, void *handle,
                         rc = inode_setattr(inode, iattr);
         }
 
-        unlock_kernel();
+        cfs_unlock_kernel();
 
         return rc;
 }
@@ -167,9 +167,9 @@ static int fsfilt_reiserfs_add_journal_cb(struct obd_device *obd,
 {
         static unsigned long next = 0;
 
-        if (time_after(jiffies, next)) {
+        if (cfs_time_after(jiffies, next)) {
                 CERROR("no journal callback kernel patch, faking it...\n");
-                next = jiffies + 300 * HZ;
+                next = jiffies + 300 * CFS_HZ;
         }
 
         cb_func(obd, last_rcvd, cb_data, 0);
index f675e4b..58cc7b7 100644 (file)
@@ -271,7 +271,7 @@ ssize_t write_blk(struct file *filp, uint blk, dqbuf_t buf)
 
 void lustre_mark_info_dirty(struct lustre_mem_dqinfo *info)
 {
-        set_bit(DQF_INFO_DIRTY_B, &info->dqi_flags);
+        cfs_set_bit(DQF_INFO_DIRTY_B, &info->dqi_flags);
 }
 
 /**
@@ -837,7 +837,7 @@ int lustre_read_dquot(struct lustre_dquot *dquot)
                                "VFS: Can't read quota structure for id %u.\n",
                                dquot->dq_id);
                 dquot->dq_off = 0;
-                set_bit(DQ_FAKE_B, &dquot->dq_flags);
+                cfs_set_bit(DQ_FAKE_B, &dquot->dq_flags);
                 memset(&dquot->dq_dqb, 0, sizeof(struct lustre_mem_dqblk));
                 ret = offset;
         } else {
@@ -877,12 +877,12 @@ int lustre_commit_dquot(struct lustre_dquot *dquot)
         struct inode *inode = dquot->dq_info->qi_files[dquot->dq_type]->f_dentry->d_inode;
 
         /* always clear the flag so we don't loop on an IO error... */
-        clear_bit(DQ_MOD_B, &dquot->dq_flags);
+        cfs_clear_bit(DQ_MOD_B, &dquot->dq_flags);
 
         /* The block/inode usage in admin quotafile isn't the real usage
          * over all cluster, so keep the fake dquot entry on disk is
          * meaningless, just remove it */
-        if (test_bit(DQ_FAKE_B, &dquot->dq_flags)) {
+        if (cfs_test_bit(DQ_FAKE_B, &dquot->dq_flags)) {
                 handle = lustre_quota_journal_start(inode, 1);
                 rc = lustre_delete_dquot(dquot, version);
                 lustre_quota_journal_stop(handle);
@@ -956,7 +956,7 @@ int lustre_init_quota_info(struct lustre_quota_info *lqi, int type)
 }
 
 static int walk_block_dqentry(struct file *filp, struct inode *inode, int type,
-                              uint blk, struct list_head *list)
+                              uint blk, cfs_list_t *list)
 {
         dqbuf_t buf = getdqbuf();
         loff_t ret = 0;
@@ -964,7 +964,7 @@ static int walk_block_dqentry(struct file *filp, struct inode *inode, int type,
             (struct lustre_disk_dqdbheader *)buf;
         struct dqblk *blk_item;
         struct dqblk *pos;
-        struct list_head *tmp;
+        cfs_list_t *tmp;
 
         if (!buf)
                 return -ENOMEM;
@@ -977,12 +977,12 @@ static int walk_block_dqentry(struct file *filp, struct inode *inode, int type,
         if (!le32_to_cpu(dqhead->dqdh_entries))
                 goto out_buf;
 
-        if (list_empty(list)) {
+        if (cfs_list_empty(list)) {
                 tmp = list;
                 goto done;
         }
 
-        list_for_each_entry(pos, list, link) {
+        cfs_list_for_each_entry(pos, list, link) {
                 if (blk == pos->blk)    /* we got this blk already */
                         goto out_buf;
                 if (blk > pos->blk)
@@ -997,9 +997,9 @@ done:
                 goto out_buf;
         }
         blk_item->blk = blk;
-        INIT_LIST_HEAD(&blk_item->link);
+        CFS_INIT_LIST_HEAD(&blk_item->link);
 
-        list_add_tail(&blk_item->link, tmp);
+        cfs_list_add_tail(&blk_item->link, tmp);
 
 out_buf:
         freedqbuf(buf);
@@ -1007,7 +1007,7 @@ out_buf:
 }
 
 int walk_tree_dqentry(struct file *filp, struct inode *inode, int type, 
-                      uint blk, int depth, struct list_head *list)
+                      uint blk, int depth, cfs_list_t *list)
 {
         dqbuf_t buf = getdqbuf();
         loff_t ret = 0;
@@ -1042,9 +1042,9 @@ out_buf:
  * Walk through the quota file (v2 format) to get all ids with quota limit
  */
 int lustre_get_qids(struct file *fp, struct inode *inode, int type,
-                    struct list_head *list)
+                    cfs_list_t *list)
 {
-        struct list_head blk_list;
+        cfs_list_t blk_list;
         struct dqblk *blk_item, *tmp;
         dqbuf_t buf = NULL;
         struct lustre_disk_dqblk_v2 *ddquot;
@@ -1062,18 +1062,18 @@ int lustre_get_qids(struct file *fp, struct inode *inode, int type,
                 RETURN(-EINVAL);
         }
 
-        if (!list_empty(list)) {
+        if (!cfs_list_empty(list)) {
                 CDEBUG(D_ERROR, "not empty list\n");
                 RETURN(-EINVAL);
         }
 
-        INIT_LIST_HEAD(&blk_list);
+        CFS_INIT_LIST_HEAD(&blk_list);
         rc = walk_tree_dqentry(fp, inode, type, LUSTRE_DQTREEOFF, 0, &blk_list);
         if (rc) {
                 CDEBUG(D_ERROR, "walk through quota file failed!(%d)\n", rc);
                 GOTO(out_free, rc);
         }
-        if (list_empty(&blk_list))
+        if (cfs_list_empty(&blk_list))
                 RETURN(0);
 
         buf = getdqbuf();
@@ -1081,7 +1081,7 @@ int lustre_get_qids(struct file *fp, struct inode *inode, int type,
                 RETURN(-ENOMEM);
         ddquot = (struct lustre_disk_dqblk_v2 *)GETENTRIES(buf, version);
 
-        list_for_each_entry(blk_item, &blk_list, link) {
+        cfs_list_for_each_entry(blk_item, &blk_list, link) {
                 loff_t ret = 0;
                 int i, dqblk_sz = lustre_disk_dqblk_sz[version];
 
@@ -1109,14 +1109,14 @@ int lustre_get_qids(struct file *fp, struct inode *inode, int type,
                         dqid->di_flag |= le64_to_cpu(ddquot[i].dqb_bhardlimit) ?
                                          QB_SET : 0;
 
-                        INIT_LIST_HEAD(&dqid->di_link);
-                        list_add(&dqid->di_link, list);
+                        CFS_INIT_LIST_HEAD(&dqid->di_link);
+                        cfs_list_add(&dqid->di_link, list);
                 }
         }
 
 out_free:
-        list_for_each_entry_safe(blk_item, tmp, &blk_list, link) {
-                list_del_init(&blk_item->link);
+        cfs_list_for_each_entry_safe(blk_item, tmp, &blk_list, link) {
+                cfs_list_del_init(&blk_item->link);
                 kfree(blk_item);
         }
         if (buf)
index d1aab02..5ca0f96 100644 (file)
@@ -143,10 +143,11 @@ typedef char *dqbuf_t;
 
 #define MAX_UL (0xffffffffUL)
 
-#define lustre_info_dirty(info) test_bit(DQF_INFO_DIRTY_B, &(info)->dqi_flags)
+#define lustre_info_dirty(info) \
+        cfs_test_bit(DQF_INFO_DIRTY_B, &(info)->dqi_flags)
 
 struct dqblk {
-        struct list_head link;
+        cfs_list_t link;
         uint blk;
 };
 
@@ -175,7 +176,7 @@ int insert_free_dqentry(struct file *filp,
 ssize_t quota_read(struct file *file, struct inode *inode, int type,
                    uint blk, dqbuf_t buf);
 int walk_tree_dqentry(struct file *filp, struct inode *inode, int type,
-                      uint blk, int depth, struct list_head *list);
+                      uint blk, int depth, cfs_list_t *list);
 int check_quota_file(struct file *f, struct inode *inode, int type,
                      lustre_quota_version_t version);
 int lustre_check_quota_file(struct lustre_quota_info *lqi, int type);
@@ -183,7 +184,7 @@ int lustre_read_dquot(struct lustre_dquot *dquot);
 int lustre_commit_dquot(struct lustre_dquot *dquot);
 int lustre_init_quota_info(struct lustre_quota_info *lqi, int type);
 int lustre_get_qids(struct file *fp, struct inode *inode, int type,
-                    struct list_head *list);
+                    cfs_list_t *list);
 ssize_t lustre_read_quota(struct file *f, struct inode *inode, int type,
                           char *buf, int count, loff_t pos);
 
index 95b59d4..6c6c6e0 100644 (file)
@@ -66,7 +66,7 @@ int obd_alloc_fail(const void *ptr, const char *name, const char *type,
                        obd_memory_sum(),
                        obd_pages_sum() << CFS_PAGE_SHIFT,
                        obd_pages_sum(),
-                       atomic_read(&libcfs_kmemory));
+                       cfs_atomic_read(&libcfs_kmemory));
                 return 1;
         }
         return 0;
@@ -75,13 +75,13 @@ EXPORT_SYMBOL(obd_alloc_fail);
 
 int __obd_fail_check_set(__u32 id, __u32 value, int set)
 {
-        static atomic_t obd_fail_count = ATOMIC_INIT(0);
+        static cfs_atomic_t obd_fail_count = CFS_ATOMIC_INIT(0);
 
         LASSERT(!(id & OBD_FAIL_ONCE));
 
         if ((obd_fail_loc & (OBD_FAILED | OBD_FAIL_ONCE)) ==
             (OBD_FAILED | OBD_FAIL_ONCE)) {
-                atomic_set(&obd_fail_count, 0); /* paranoia */
+                cfs_atomic_set(&obd_fail_count, 0); /* paranoia */
                 return 0;
         }
 
@@ -93,18 +93,18 @@ int __obd_fail_check_set(__u32 id, __u32 value, int set)
 
         /* Skip the first obd_fail_val, then fail */
         if (obd_fail_loc & OBD_FAIL_SKIP) {
-                if (atomic_inc_return(&obd_fail_count) <= obd_fail_val)
+                if (cfs_atomic_inc_return(&obd_fail_count) <= obd_fail_val)
                         return 0;
         }
 
         /* Fail obd_fail_val times, overridden by FAIL_ONCE */
         if (obd_fail_loc & OBD_FAIL_SOME &&
             (!(obd_fail_loc & OBD_FAIL_ONCE) || obd_fail_val <= 1)) {
-                int count = atomic_inc_return(&obd_fail_count);
+                int count = cfs_atomic_inc_return(&obd_fail_count);
 
                 if (count >= obd_fail_val) {
-                        set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);
-                        atomic_set(&obd_fail_count, 0);
+                        cfs_set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);
+                        cfs_atomic_set(&obd_fail_count, 0);
                         /* we are lost race to increase obd_fail_count */
                         if (count > obd_fail_val)
                                 return 0;
@@ -113,10 +113,10 @@ int __obd_fail_check_set(__u32 id, __u32 value, int set)
 
         if ((set == OBD_FAIL_LOC_ORSET || set == OBD_FAIL_LOC_RESET) &&
             (value & OBD_FAIL_ONCE))
-                set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);
+                cfs_set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);
 
         /* Lost race to set OBD_FAILED_BIT. */
-        if (test_and_set_bit(OBD_FAILED_BIT, &obd_fail_loc)) {
+        if (cfs_test_and_set_bit(OBD_FAILED_BIT, &obd_fail_loc)) {
                 /* If OBD_FAIL_ONCE is valid, only one process can fail,
                  * otherwise multi-process can fail at the same time. */
                 if (obd_fail_loc & OBD_FAIL_ONCE)
@@ -149,9 +149,9 @@ int __obd_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
         if (ret) {
                 CERROR("obd_fail_timeout id %x sleeping for %dms\n",
                        id, ms);
-                cfs_schedule_timeout(CFS_TASK_UNINT,
-                                     cfs_time_seconds(ms) / 1000);
-                set_current_state(CFS_TASK_RUNNING);
+                cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
+                                                   cfs_time_seconds(ms) / 1000);
+                cfs_set_current_state(CFS_TASK_RUNNING);
                 CERROR("obd_fail_timeout id %x awake\n", id);
         }
         return ret;
@@ -173,7 +173,7 @@ void lprocfs_counter_add(struct lprocfs_stats *stats, int idx,
         smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID);
 
         percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
-        atomic_inc(&percpu_cntr->lc_cntl.la_entry);
+        cfs_atomic_inc(&percpu_cntr->lc_cntl.la_entry);
         percpu_cntr->lc_count++;
 
         if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX) {
@@ -188,7 +188,7 @@ void lprocfs_counter_add(struct lprocfs_stats *stats, int idx,
                 if (amount > percpu_cntr->lc_max)
                         percpu_cntr->lc_max = amount;
         }
-        atomic_inc(&percpu_cntr->lc_cntl.la_exit);
+        cfs_atomic_inc(&percpu_cntr->lc_cntl.la_exit);
         lprocfs_stats_unlock(stats);
 }
 EXPORT_SYMBOL(lprocfs_counter_add);
@@ -207,7 +207,7 @@ void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx,
         smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID);
 
         percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
-        atomic_inc(&percpu_cntr->lc_cntl.la_entry);
+        cfs_atomic_inc(&percpu_cntr->lc_cntl.la_entry);
         if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX) {
                 /*
                  * currently lprocfs_count_add() can only be called in thread
@@ -222,7 +222,7 @@ void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx,
                 else
                         percpu_cntr->lc_sum -= amount;
         }
-        atomic_inc(&percpu_cntr->lc_cntl.la_exit);
+        cfs_atomic_inc(&percpu_cntr->lc_cntl.la_exit);
         lprocfs_stats_unlock(stats);
 }
 EXPORT_SYMBOL(lprocfs_counter_sub);
index 82b2d66..7e01051 100644 (file)
@@ -67,7 +67,7 @@
 __u64 obd_max_pages = 0;
 __u64 obd_max_alloc = 0;
 struct lprocfs_stats *obd_memory = NULL;
-spinlock_t obd_updatemax_lock = SPIN_LOCK_UNLOCKED;
+cfs_spinlock_t obd_updatemax_lock = CFS_SPIN_LOCK_UNLOCKED;
 /* refine later and change to seqlock or simlar from libcfs */
 
 /* Debugging check only needed during development */
@@ -117,8 +117,8 @@ void push_ctxt(struct lvfs_run_ctxt *save, struct lvfs_run_ctxt *new_ctx,
         OBD_SET_CTXT_MAGIC(save);
 
         save->fs = get_fs();
-        LASSERT(atomic_read(&cfs_fs_pwd(current->fs)->d_count));
-        LASSERT(atomic_read(&new_ctx->pwd->d_count));
+        LASSERT(cfs_atomic_read(&cfs_fs_pwd(current->fs)->d_count));
+        LASSERT(cfs_atomic_read(&new_ctx->pwd->d_count));
         save->pwd = dget(cfs_fs_pwd(current->fs));
         save->pwdmnt = mntget(cfs_fs_mnt(current->fs));
         save->luc.luc_umask = current->fs->umask;
@@ -391,7 +391,7 @@ static int l_filldir(void *__buf, const char *name, int namlen, loff_t offset,
         if (!dirent)
                 return -ENOMEM;
 
-        list_add_tail(&dirent->lld_list, buf->lrc_list);
+        cfs_list_add_tail(&dirent->lld_list, buf->lrc_list);
 
         buf->lrc_dirent = dirent;
         dirent->lld_ino = ino;
@@ -401,7 +401,7 @@ static int l_filldir(void *__buf, const char *name, int namlen, loff_t offset,
         return 0;
 }
 
-long l_readdir(struct file *file, struct list_head *dentry_list)
+long l_readdir(struct file *file, cfs_list_t *dentry_list)
 {
         struct l_linux_dirent *lastdirent;
         struct l_readdir_callback buf;
@@ -528,12 +528,12 @@ void obd_update_maxusage()
         max1 = obd_pages_sum();
         max2 = obd_memory_sum();
 
-        spin_lock(&obd_updatemax_lock);
+        cfs_spin_lock(&obd_updatemax_lock);
         if (max1 > obd_max_pages)
                 obd_max_pages = max1;
         if (max2 > obd_max_alloc)
                 obd_max_alloc = max2;
-        spin_unlock(&obd_updatemax_lock);
+        cfs_spin_unlock(&obd_updatemax_lock);
 
 }
 
@@ -541,9 +541,9 @@ __u64 obd_memory_max(void)
 {
         __u64 ret;
 
-        spin_lock(&obd_updatemax_lock);
+        cfs_spin_lock(&obd_updatemax_lock);
         ret = obd_max_alloc;
-        spin_unlock(&obd_updatemax_lock);
+        cfs_spin_unlock(&obd_updatemax_lock);
 
         return ret;
 }
@@ -552,9 +552,9 @@ __u64 obd_pages_max(void)
 {
         __u64 ret;
 
-        spin_lock(&obd_updatemax_lock);
+        cfs_spin_lock(&obd_updatemax_lock);
         ret = obd_max_pages;
-        spin_unlock(&obd_updatemax_lock);
+        cfs_spin_unlock(&obd_updatemax_lock);
 
         return ret;
 }
@@ -574,7 +574,7 @@ __s64 lprocfs_read_helper(struct lprocfs_counter *lc,
         if (!lc)
                 RETURN(0);
         do {
-                centry = atomic_read(&lc->lc_cntl.la_entry);
+                centry = cfs_atomic_read(&lc->lc_cntl.la_entry);
 
                 switch (field) {
                         case LPROCFS_FIELDS_FLAGS_CONFIG:
@@ -601,8 +601,8 @@ __s64 lprocfs_read_helper(struct lprocfs_counter *lc,
                         default:
                                 break;
                 };
-        } while (centry != atomic_read(&lc->lc_cntl.la_entry) &&
-                 centry != atomic_read(&lc->lc_cntl.la_exit));
+        } while (centry != cfs_atomic_read(&lc->lc_cntl.la_entry) &&
+                 centry != cfs_atomic_read(&lc->lc_cntl.la_exit));
 
         RETURN(ret);
 }
index 53fdb2a..193fc8e 100644 (file)
@@ -47,7 +47,7 @@
 
 #ifndef __KERNEL__
 #include <liblustre.h>
-#define get_random_bytes(val, size)     (*val) = 0
+#define cfs_get_random_bytes(val, size)     (*val) = 0
 #endif
 #include <obd_class.h>
 #if defined(HAVE_LINUX_RANDOM_H)
@@ -112,7 +112,7 @@ void ll_get_random_bytes(void *buf, int size)
 
         rem = min((int)((unsigned long)buf & (sizeof(int) - 1)), size);
         if (rem) {
-                get_random_bytes(&tmp, sizeof(tmp));
+                cfs_get_random_bytes(&tmp, sizeof(tmp));
                 tmp ^= ll_rand();
                 memcpy(buf, &tmp, rem);
                 p = buf + rem;
@@ -120,14 +120,14 @@ void ll_get_random_bytes(void *buf, int size)
         }
 
         while (size >= sizeof(int)) {
-                get_random_bytes(&tmp, sizeof(tmp));
+                cfs_get_random_bytes(&tmp, sizeof(tmp));
                 *p = ll_rand() ^ tmp;
                 size -= sizeof(int);
                 p++;
         }
         buf = p;
         if (size) {
-                get_random_bytes(&tmp, sizeof(tmp));
+                cfs_get_random_bytes(&tmp, sizeof(tmp));
                 tmp ^= ll_rand();
                 memcpy(buf, &tmp, size);
         }
index b360570..1f46d9a 100644 (file)
@@ -230,7 +230,7 @@ static struct lustre_dquot *get_rand_dquot(struct lustre_quota_info *lqi)
         if (dquot == NULL)
                 return NULL;
 
-        get_random_bytes(&rand, sizeof(rand));
+        ll_get_random_bytes(&rand, sizeof(rand));
         if (!rand)
                 rand = 1000;
 
@@ -273,7 +273,7 @@ static int write_check_dquot(struct lustre_quota_info *lqi)
                 GOTO(out, rc);
         }
 
-        clear_bit(DQ_FAKE_B, &dquot->dq_flags);
+        cfs_clear_bit(DQ_FAKE_B, &dquot->dq_flags);
         /* for already exists entry, we rewrite it */
         rc = lustre_commit_dquot(dquot);
         if (rc) {
@@ -308,7 +308,7 @@ static int quotfmt_test_3(struct lustre_quota_info *lqi)
         if (dquot == NULL)
                 RETURN(-ENOMEM);
       repeat:
-        clear_bit(DQ_FAKE_B, &dquot->dq_flags);
+        cfs_clear_bit(DQ_FAKE_B, &dquot->dq_flags);
         /* write a new dquot */
         rc = lustre_commit_dquot(dquot);
         if (rc) {
@@ -324,13 +324,13 @@ static int quotfmt_test_3(struct lustre_quota_info *lqi)
                 CERROR("read dquot failed! (rc:%d)\n", rc);
                 GOTO(out, rc);
         }
-        if (!dquot->dq_off || test_bit(DQ_FAKE_B, &dquot->dq_flags)) {
+        if (!dquot->dq_off || cfs_test_bit(DQ_FAKE_B, &dquot->dq_flags)) {
                 CERROR("the dquot isn't committed\n");
                 GOTO(out, rc = -EINVAL);
         }
 
         /* remove this dquot */
-        set_bit(DQ_FAKE_B, &dquot->dq_flags);
+        cfs_set_bit(DQ_FAKE_B, &dquot->dq_flags);
         dquot->dq_dqb.dqb_curspace = 0;
         dquot->dq_dqb.dqb_curinodes = 0;
         rc = lustre_commit_dquot(dquot);
@@ -340,14 +340,14 @@ static int quotfmt_test_3(struct lustre_quota_info *lqi)
         }
 
         /* check if the dquot is really removed */
-        clear_bit(DQ_FAKE_B, &dquot->dq_flags);
+        cfs_clear_bit(DQ_FAKE_B, &dquot->dq_flags);
         dquot->dq_off = 0;
         rc = lustre_read_dquot(dquot);
         if (rc) {
                 CERROR("read dquot failed! (rc:%d)\n", rc);
                 GOTO(out, rc);
         }
-        if (!test_bit(DQ_FAKE_B, &dquot->dq_flags) || dquot->dq_off) {
+        if (!cfs_test_bit(DQ_FAKE_B, &dquot->dq_flags) || dquot->dq_off) {
                 CERROR("the dquot isn't removed!\n");
                 GOTO(out, rc = -EINVAL);
         }
@@ -386,18 +386,18 @@ static int quotfmt_test_5(struct lustre_quota_info *lqi)
         int i, rc = 0;
 
         for (i = USRQUOTA; i < MAXQUOTAS && !rc; i++) {
-                struct list_head list;
+                cfs_list_t list;
                 struct dquot_id *dqid, *tmp;
 
-                INIT_LIST_HEAD(&list);
+                CFS_INIT_LIST_HEAD(&list);
                 rc = lustre_get_qids(lqi->qi_files[i], NULL, i, &list);
                 if (rc) {
                         CERROR("%s get all %ss (rc:%d):\n",
                                rc ? "error" : "success",
                                i == USRQUOTA ? "uid" : "gid", rc);
                 }
-                list_for_each_entry_safe(dqid, tmp, &list, di_link) {
-                        list_del_init(&dqid->di_link);
+                cfs_list_for_each_entry_safe(dqid, tmp, &list, di_link) {
+                        cfs_list_del_init(&dqid->di_link);
                         if (rc == 0)
                                 CDEBUG(D_INFO, "%d ", dqid->di_id);
                         kfree(dqid);
index 084e6ef..eb2e825 100644 (file)
@@ -74,10 +74,10 @@ static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache,
                 return NULL;
 
         UC_CACHE_SET_NEW(entry);
-        INIT_LIST_HEAD(&entry->ue_hash);
+        CFS_INIT_LIST_HEAD(&entry->ue_hash);
         entry->ue_key = key;
-        atomic_set(&entry->ue_refcount, 0);
-        init_waitqueue_head(&entry->ue_waitq);
+        cfs_atomic_set(&entry->ue_refcount, 0);
+        cfs_waitq_init(&entry->ue_waitq);
         if (cache->uc_ops->init_entry)
                 cache->uc_ops->init_entry(entry, args);
         return entry;
@@ -90,7 +90,7 @@ static void free_entry(struct upcall_cache *cache,
         if (cache->uc_ops->free_entry)
                 cache->uc_ops->free_entry(cache, entry);
 
-        list_del(&entry->ue_hash);
+        cfs_list_del(&entry->ue_hash);
         CDEBUG(D_OTHER, "destroy cache entry %p for key "LPU64"\n",
                entry, entry->ue_key);
         OBD_FREE_PTR(entry);
@@ -124,13 +124,13 @@ static inline int downcall_compare(struct upcall_cache *cache,
 
 static inline void get_entry(struct upcall_cache_entry *entry)
 {
-        atomic_inc(&entry->ue_refcount);
+        cfs_atomic_inc(&entry->ue_refcount);
 }
 
 static inline void put_entry(struct upcall_cache *cache,
                              struct upcall_cache_entry *entry)
 {
-        if (atomic_dec_and_test(&entry->ue_refcount) &&
+        if (cfs_atomic_dec_and_test(&entry->ue_refcount) &&
             (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
                 free_entry(cache, entry);
         }
@@ -140,21 +140,21 @@ static int check_unlink_entry(struct upcall_cache *cache,
                               struct upcall_cache_entry *entry)
 {
         if (UC_CACHE_IS_VALID(entry) &&
-            time_before(jiffies, entry->ue_expire))
+            cfs_time_before(jiffies, entry->ue_expire))
                 return 0;
 
         if (UC_CACHE_IS_ACQUIRING(entry)) {
-                if (time_before(jiffies, entry->ue_acquire_expire))
+                if (cfs_time_before(jiffies, entry->ue_acquire_expire))
                         return 0;
 
                 UC_CACHE_SET_EXPIRED(entry);
-                wake_up_all(&entry->ue_waitq);
+                cfs_waitq_broadcast(&entry->ue_waitq);
         } else if (!UC_CACHE_IS_INVALID(entry)) {
                 UC_CACHE_SET_EXPIRED(entry);
         }
 
-        list_del_init(&entry->ue_hash);
-        if (!atomic_read(&entry->ue_refcount))
+        cfs_list_del_init(&entry->ue_hash);
+        if (!cfs_atomic_read(&entry->ue_refcount))
                 free_entry(cache, entry);
         return 1;
 }
@@ -170,8 +170,8 @@ struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
                                                   __u64 key, void *args)
 {
         struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
-        struct list_head *head;
-        wait_queue_t wait;
+        cfs_list_t *head;
+        cfs_waitlink_t wait;
         int rc, found;
         ENTRY;
 
@@ -180,8 +180,8 @@ struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
 find_again:
         found = 0;
-        spin_lock(&cache->uc_lock);
-        list_for_each_entry_safe(entry, next, head, ue_hash) {
+        cfs_spin_lock(&cache->uc_lock);
+        cfs_list_for_each_entry_safe(entry, next, head, ue_hash) {
                 /* check invalid & expired items */
                 if (check_unlink_entry(cache, entry))
                         continue;
@@ -193,7 +193,7 @@ find_again:
 
         if (!found) { /* didn't find it */
                 if (!new) {
-                        spin_unlock(&cache->uc_lock);
+                        cfs_spin_unlock(&cache->uc_lock);
                         new = alloc_entry(cache, key, args);
                         if (!new) {
                                 CERROR("fail to alloc entry\n");
@@ -201,7 +201,7 @@ find_again:
                         }
                         goto find_again;
                 } else {
-                        list_add(&new->ue_hash, head);
+                        cfs_list_add(&new->ue_hash, head);
                         entry = new;
                 }
         } else {
@@ -209,7 +209,7 @@ find_again:
                         free_entry(cache, new);
                         new = NULL;
                 }
-                list_move(&entry->ue_hash, head);
+                cfs_list_move(&entry->ue_hash, head);
         }
         get_entry(entry);
 
@@ -218,9 +218,9 @@ find_again:
                 UC_CACHE_SET_ACQUIRING(entry);
                 UC_CACHE_CLEAR_NEW(entry);
                 entry->ue_acquire_expire = jiffies + cache->uc_acquire_expire;
-                spin_unlock(&cache->uc_lock);
+                cfs_spin_unlock(&cache->uc_lock);
                 rc = refresh_entry(cache, entry);
-                spin_lock(&cache->uc_lock);
+                cfs_spin_lock(&cache->uc_lock);
                 if (rc < 0) {
                         UC_CACHE_CLEAR_ACQUIRING(entry);
                         UC_CACHE_SET_INVALID(entry);
@@ -237,18 +237,20 @@ find_again:
         if (UC_CACHE_IS_ACQUIRING(entry)) {
                 unsigned long expiry = jiffies + cache->uc_acquire_expire;
 
-                init_waitqueue_entry(&wait, current);
-                add_wait_queue(&entry->ue_waitq, &wait);
-                set_current_state(TASK_INTERRUPTIBLE);
-                spin_unlock(&cache->uc_lock);
+                cfs_waitlink_init(&wait);
+                cfs_waitq_add(&entry->ue_waitq, &wait);
+                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+                cfs_spin_unlock(&cache->uc_lock);
 
-                schedule_timeout(cache->uc_acquire_expire);
+                cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE, 
+                                    cache->uc_acquire_expire);
 
-                spin_lock(&cache->uc_lock);
-                remove_wait_queue(&entry->ue_waitq, &wait);
+                cfs_spin_lock(&cache->uc_lock);
+                cfs_waitq_del(&entry->ue_waitq, &wait);
                 if (UC_CACHE_IS_ACQUIRING(entry)) {
                         /* we're interrupted or upcall failed in the middle */
-                        rc = time_before(jiffies, expiry) ? -EINTR : -ETIMEDOUT;
+                        rc = cfs_time_before(jiffies, expiry) ? \
+                                -EINTR : -ETIMEDOUT;
                         put_entry(cache, entry);
                         CERROR("acquire timeout exceeded for key "LPU64
                                "\n", entry->ue_key);
@@ -275,7 +277,7 @@ find_again:
                  */
                 if (entry != new) {
                         put_entry(cache, entry);
-                        spin_unlock(&cache->uc_lock);
+                        cfs_spin_unlock(&cache->uc_lock);
                         new = NULL;
                         goto find_again;
                 }
@@ -283,7 +285,7 @@ find_again:
 
         /* Now we know it's good */
 out:
-        spin_unlock(&cache->uc_lock);
+        cfs_spin_unlock(&cache->uc_lock);
         RETURN(entry);
 }
 EXPORT_SYMBOL(upcall_cache_get_entry);
@@ -298,10 +300,10 @@ void upcall_cache_put_entry(struct upcall_cache *cache,
                 return;
         }
 
-        LASSERT(atomic_read(&entry->ue_refcount) > 0);
-        spin_lock(&cache->uc_lock);
+        LASSERT(cfs_atomic_read(&entry->ue_refcount) > 0);
+        cfs_spin_lock(&cache->uc_lock);
         put_entry(cache, entry);
-        spin_unlock(&cache->uc_lock);
+        cfs_spin_unlock(&cache->uc_lock);
         EXIT;
 }
 EXPORT_SYMBOL(upcall_cache_put_entry);
@@ -310,7 +312,7 @@ int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
                           void *args)
 {
         struct upcall_cache_entry *entry = NULL;
-        struct list_head *head;
+        cfs_list_t *head;
         int found = 0, rc = 0;
         ENTRY;
 
@@ -318,8 +320,8 @@ int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
 
         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
 
-        spin_lock(&cache->uc_lock);
-        list_for_each_entry(entry, head, ue_hash) {
+        cfs_spin_lock(&cache->uc_lock);
+        cfs_list_for_each_entry(entry, head, ue_hash) {
                 if (downcall_compare(cache, entry, key, args) == 0) {
                         found = 1;
                         get_entry(entry);
@@ -331,7 +333,7 @@ int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
                 CDEBUG(D_OTHER, "%s: upcall for key "LPU64" not expected\n",
                        cache->uc_name, key);
                 /* haven't found, it's possible */
-                spin_unlock(&cache->uc_lock);
+                cfs_spin_unlock(&cache->uc_lock);
                 RETURN(-EINVAL);
         }
 
@@ -353,10 +355,10 @@ int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
                 GOTO(out, rc = -EINVAL);
         }
 
-        spin_unlock(&cache->uc_lock);
+        cfs_spin_unlock(&cache->uc_lock);
         if (cache->uc_ops->parse_downcall)
                 rc = cache->uc_ops->parse_downcall(cache, entry, args);
-        spin_lock(&cache->uc_lock);
+        cfs_spin_lock(&cache->uc_lock);
         if (rc)
                 GOTO(out, rc);
 
@@ -367,11 +369,11 @@ int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
 out:
         if (rc) {
                 UC_CACHE_SET_INVALID(entry);
-                list_del_init(&entry->ue_hash);
+                cfs_list_del_init(&entry->ue_hash);
         }
         UC_CACHE_CLEAR_ACQUIRING(entry);
-        spin_unlock(&cache->uc_lock);
-        wake_up_all(&entry->ue_waitq);
+        cfs_spin_unlock(&cache->uc_lock);
+        cfs_waitq_broadcast(&entry->ue_waitq);
         put_entry(cache, entry);
 
         RETURN(rc);
@@ -384,19 +386,19 @@ static void cache_flush(struct upcall_cache *cache, int force)
         int i;
         ENTRY;
 
-        spin_lock(&cache->uc_lock);
+        cfs_spin_lock(&cache->uc_lock);
         for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
-                list_for_each_entry_safe(entry, next,
+                cfs_list_for_each_entry_safe(entry, next,
                                          &cache->uc_hashtable[i], ue_hash) {
-                        if (!force && atomic_read(&entry->ue_refcount)) {
+                        if (!force && cfs_atomic_read(&entry->ue_refcount)) {
                                 UC_CACHE_SET_EXPIRED(entry);
                                 continue;
                         }
-                        LASSERT(!atomic_read(&entry->ue_refcount));
+                        LASSERT(!cfs_atomic_read(&entry->ue_refcount));
                         free_entry(cache, entry);
                 }
         }
-        spin_unlock(&cache->uc_lock);
+        cfs_spin_unlock(&cache->uc_lock);
         EXIT;
 }
 
@@ -414,15 +416,15 @@ EXPORT_SYMBOL(upcall_cache_flush_all);
 
 void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
 {
-        struct list_head *head;
+        cfs_list_t *head;
         struct upcall_cache_entry *entry;
         int found = 0;
         ENTRY;
 
         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
 
-        spin_lock(&cache->uc_lock);
-        list_for_each_entry(entry, head, ue_hash) {
+        cfs_spin_lock(&cache->uc_lock);
+        cfs_list_for_each_entry(entry, head, ue_hash) {
                 if (upcall_compare(cache, entry, key, args) == 0) {
                         found = 1;
                         break;
@@ -433,14 +435,14 @@ void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
                 CWARN("%s: flush entry %p: key "LPU64", ref %d, fl %x, "
                       "cur %lu, ex %ld/%ld\n",
                       cache->uc_name, entry, entry->ue_key,
-                      atomic_read(&entry->ue_refcount), entry->ue_flags,
+                      cfs_atomic_read(&entry->ue_refcount), entry->ue_flags,
                       get_seconds(), entry->ue_acquire_expire,
                       entry->ue_expire);
                 UC_CACHE_SET_EXPIRED(entry);
-                if (!atomic_read(&entry->ue_refcount))
+                if (!cfs_atomic_read(&entry->ue_refcount))
                         free_entry(cache, entry);
         }
-        spin_unlock(&cache->uc_lock);
+        cfs_spin_unlock(&cache->uc_lock);
 }
 EXPORT_SYMBOL(upcall_cache_flush_one);
 
@@ -455,15 +457,15 @@ struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
         if (!cache)
                 RETURN(ERR_PTR(-ENOMEM));
 
-        spin_lock_init(&cache->uc_lock);
-        rwlock_init(&cache->uc_upcall_rwlock);
+        cfs_spin_lock_init(&cache->uc_lock);
+        cfs_rwlock_init(&cache->uc_upcall_rwlock);
         for (i = 0; i < UC_CACHE_HASH_SIZE; i++)
-                INIT_LIST_HEAD(&cache->uc_hashtable[i]);
+                CFS_INIT_LIST_HEAD(&cache->uc_hashtable[i]);
         strncpy(cache->uc_name, name, sizeof(cache->uc_name) - 1);
         /* upcall pathname proc tunable */
         strncpy(cache->uc_upcall, upcall, sizeof(cache->uc_upcall) - 1);
-        cache->uc_entry_expire = 10 * 60 * HZ;
-        cache->uc_acquire_expire = 15 * HZ;
+        cache->uc_entry_expire = 10 * 60 * CFS_HZ;
+        cache->uc_acquire_expire = 15 * CFS_HZ;
         cache->uc_ops = ops;
 
         RETURN(cache);
index 8aa1c76..bc91243 100644 (file)
@@ -154,7 +154,7 @@ static int lproc_mdc_wr_changelog(struct file *file, const char *buffer,
         if (count != sizeof(cs))
                 return -EINVAL;
 
-        if (copy_from_user(&cs, buffer, sizeof(cs)))
+        if (cfs_copy_from_user(&cs, buffer, sizeof(cs)))
                 return -EFAULT;
 
         CDEBUG(D_CHANGELOG, "changelog to pid=%d start "LPU64"\n",
@@ -214,7 +214,7 @@ static int mdc_wr_netlink(struct file *file, const char *buffer,
         CWARN("message to pid %d\n", pid);
 
         len = sizeof(*lh) + sizeof(*hal) + MTI_NAME_MAXLEN +
-                /* for mockup below */ 2 * size_round(sizeof(*hai));
+                /* for mockup below */ 2 * cfs_size_round(sizeof(*hai));
 
         OBD_ALLOC(lh, len);
 
index ac709b0..58bcd37 100644 (file)
@@ -96,7 +96,7 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
                 struct ptlrpc_request **req, int extra_lock_flags);
 
 int mdc_resource_get_unused(struct obd_export *exp, struct lu_fid *fid,
-                            struct list_head *cancels, ldlm_mode_t mode,
+                            cfs_list_t *cancels, ldlm_mode_t mode,
                             __u64 bits);
 /* mdc/mdc_request.c */
 int mdc_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
index 1bab2b6..4d86793 100644 (file)
@@ -301,7 +301,7 @@ static void mdc_setattr_pack_rec(struct mdt_rec_setattr *rec,
         rec->sa_ctime  = LTIME_S(op_data->op_attr.ia_ctime);
         rec->sa_attr_flags = ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags;
         if ((op_data->op_attr.ia_valid & ATTR_GID) &&
-            in_group_p(op_data->op_attr.ia_gid))
+            cfs_curproc_is_in_groups(op_data->op_attr.ia_gid))
                 rec->sa_suppgid = op_data->op_attr.ia_gid;
         else
                 rec->sa_suppgid = op_data->op_suppgids[0];
@@ -481,7 +481,7 @@ static int mdc_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
         int rc;
         ENTRY;
         client_obd_list_lock(&cli->cl_loi_list_lock);
-        rc = list_empty(&mcw->mcw_entry);
+        rc = cfs_list_empty(&mcw->mcw_entry);
         client_obd_list_unlock(&cli->cl_loi_list_lock);
         RETURN(rc);
 };
@@ -496,7 +496,7 @@ void mdc_enter_request(struct client_obd *cli)
 
         client_obd_list_lock(&cli->cl_loi_list_lock);
         if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
-                list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
+                cfs_list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
                 cfs_waitq_init(&mcw.mcw_waitq);
                 client_obd_list_unlock(&cli->cl_loi_list_lock);
                 l_wait_event(mcw.mcw_waitq, mdc_req_avail(cli, &mcw), &lwi);
@@ -508,20 +508,20 @@ void mdc_enter_request(struct client_obd *cli)
 
 void mdc_exit_request(struct client_obd *cli)
 {
-        struct list_head *l, *tmp;
+        cfs_list_t *l, *tmp;
         struct mdc_cache_waiter *mcw;
 
         client_obd_list_lock(&cli->cl_loi_list_lock);
         cli->cl_r_in_flight--;
-        list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
+        cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
                 
                 if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
                         /* No free request slots anymore */
                         break;
                 }
 
-                mcw = list_entry(l, struct mdc_cache_waiter, mcw_entry);
-                list_del_init(&mcw->mcw_entry);
+                mcw = cfs_list_entry(l, struct mdc_cache_waiter, mcw_entry);
+                cfs_list_del_init(&mcw->mcw_entry);
                 cli->cl_r_in_flight++;
                 cfs_waitq_signal(&mcw->mcw_waitq);
         }
index 5606b46..1dc107f 100644 (file)
@@ -202,9 +202,9 @@ static inline void mdc_clear_replay_flag(struct ptlrpc_request *req, int rc)
 {
         /* Don't hold error requests for replay. */
         if (req->rq_replay) {
-                spin_lock(&req->rq_lock);
+                cfs_spin_lock(&req->rq_lock);
                 req->rq_replay = 0;
-                spin_unlock(&req->rq_lock);
+                cfs_spin_unlock(&req->rq_lock);
         }
         if (rc && req->rq_transno != 0) {
                 DEBUG_REQ(D_ERROR, req, "transno returned on error rc %d", rc);
@@ -305,9 +305,9 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp,
                 return NULL;
         }
 
-        spin_lock(&req->rq_lock);
+        cfs_spin_lock(&req->rq_lock);
         req->rq_replay = req->rq_import->imp_replayable;
-        spin_unlock(&req->rq_lock);
+        cfs_spin_unlock(&req->rq_lock);
 
         /* pack the intent */
         lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
index 445d71d..410ea84 100644 (file)
@@ -77,7 +77,7 @@ static int mdc_reint(struct ptlrpc_request *request,
  * found by @fid. Found locks are added into @cancel list. Returns the amount of
  * locks added to @cancels list. */
 int mdc_resource_get_unused(struct obd_export *exp, struct lu_fid *fid,
-                            struct list_head *cancels, ldlm_mode_t mode,
+                            cfs_list_t *cancels, ldlm_mode_t mode,
                             __u64 bits)
 {
         ldlm_policy_data_t policy = {{0}};
@@ -102,7 +102,7 @@ int mdc_resource_get_unused(struct obd_export *exp, struct lu_fid *fid,
 }
 
 static int mdc_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
-                            struct list_head *cancels, int count)
+                            cfs_list_t *cancels, int count)
 {
         return ldlm_prep_elc_req(exp, req, LUSTRE_MDS_VERSION, MDS_REINT,
                                  0, cancels, count);
index 3076ce8..dc0fefb 100644 (file)
@@ -692,9 +692,9 @@ void mdc_commit_open(struct ptlrpc_request *req)
          * be put along with freeing \var mod.
          */
         ptlrpc_request_addref(req);
-        spin_lock(&req->rq_lock);
+        cfs_spin_lock(&req->rq_lock);
         req->rq_committed = 1;
-        spin_unlock(&req->rq_lock);
+        cfs_spin_unlock(&req->rq_lock);
         req->rq_cb_data = NULL;
         obd_mod_put(mod);
 }
@@ -737,13 +737,13 @@ int mdc_set_open_replay_data(struct obd_export *exp,
                 obd_mod_get(mod);
                 obd_mod_get(mod);
 
-                spin_lock(&open_req->rq_lock);
+                cfs_spin_lock(&open_req->rq_lock);
                 och->och_mod = mod;
                 mod->mod_och = och;
                 mod->mod_open_req = open_req;
                 open_req->rq_cb_data = mod;
                 open_req->rq_commit_cb = mdc_commit_open;
-                spin_unlock(&open_req->rq_lock);
+                cfs_spin_unlock(&open_req->rq_lock);
         }
 
         rec->cr_fid2 = body->fid1;
@@ -829,9 +829,9 @@ int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
                 DEBUG_REQ(D_HA, mod->mod_open_req, "matched open");
                 /* We no longer want to preserve this open for replay even
                  * though the open was committed. b=3632, b=3633 */
-                spin_lock(&mod->mod_open_req->rq_lock);
+                cfs_spin_lock(&mod->mod_open_req->rq_lock);
                 mod->mod_open_req->rq_replay = 0;
-                spin_unlock(&mod->mod_open_req->rq_lock);
+                cfs_spin_unlock(&mod->mod_open_req->rq_lock);
         } else {
                  CDEBUG(D_HA, "couldn't find open req; expecting close error\n");
         }
@@ -921,9 +921,9 @@ int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
                 DEBUG_REQ(D_HA, mod->mod_open_req, "matched setattr");
                 /* We no longer want to preserve this setattr for replay even
                  * though the open was committed. b=3632, b=3633 */
-                spin_lock(&mod->mod_open_req->rq_lock);
+                cfs_spin_lock(&mod->mod_open_req->rq_lock);
                 mod->mod_open_req->rq_replay = 0;
-                spin_unlock(&mod->mod_open_req->rq_lock);
+                cfs_spin_unlock(&mod->mod_open_req->rq_lock);
         }
 
         mdc_close_pack(req, op_data);
@@ -1077,12 +1077,12 @@ static int mdc_ioc_fid2path(struct obd_export *exp, struct getinfo_fid2path *gf)
                 RETURN(-EOVERFLOW);
 
         /* Key is KEY_FID2PATH + getinfo_fid2path description */
-        keylen = size_round(sizeof(KEY_FID2PATH)) + sizeof(*gf);
+        keylen = cfs_size_round(sizeof(KEY_FID2PATH)) + sizeof(*gf);
         OBD_ALLOC(key, keylen);
         if (key == NULL)
                 RETURN(-ENOMEM);
         memcpy(key, KEY_FID2PATH, sizeof(KEY_FID2PATH));
-        memcpy(key + size_round(sizeof(KEY_FID2PATH)), gf, sizeof(*gf));
+        memcpy(key + cfs_size_round(sizeof(KEY_FID2PATH)), gf, sizeof(*gf));
 
         CDEBUG(D_IOCTL, "path get "DFID" from "LPU64" #%d\n",
                PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno);
@@ -1120,7 +1120,7 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
         int rc;
         ENTRY;
 
-        if (!try_module_get(THIS_MODULE)) {
+        if (!cfs_try_module_get(THIS_MODULE)) {
                 CERROR("Can't get module. Is it alive?");
                 return -EINVAL;
         }
@@ -1173,7 +1173,7 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
                 GOTO(out, rc = -ENOTTY);
         }
 out:
-        module_put(THIS_MODULE);
+        cfs_module_put(THIS_MODULE);
 
         return rc;
 }
@@ -1300,9 +1300,9 @@ int mdc_set_info_async(struct obd_export *exp,
         if (KEY_IS(KEY_INIT_RECOV)) {
                 if (vallen != sizeof(int))
                         RETURN(-EINVAL);
-                spin_lock(&imp->imp_lock);
+                cfs_spin_lock(&imp->imp_lock);
                 imp->imp_initial_recov = *(int *)val;
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
                 CDEBUG(D_HA, "%s: set imp_initial_recov = %d\n",
                        exp->exp_obd->obd_name, imp->imp_initial_recov);
                 RETURN(0);
@@ -1311,11 +1311,11 @@ int mdc_set_info_async(struct obd_export *exp,
         if (KEY_IS(KEY_INIT_RECOV_BACKUP)) {
                 if (vallen != sizeof(int))
                         RETURN(-EINVAL);
-                spin_lock(&imp->imp_lock);
+                cfs_spin_lock(&imp->imp_lock);
                 imp->imp_initial_recov_bk = *(int *)val;
                 if (imp->imp_initial_recov_bk)
                         imp->imp_initial_recov = 1;
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
                 CDEBUG(D_HA, "%s: set imp_initial_recov_bk = %d\n",
                        exp->exp_obd->obd_name, imp->imp_initial_recov_bk);
                 RETURN(0);
@@ -1324,7 +1324,7 @@ int mdc_set_info_async(struct obd_export *exp,
                 if (vallen != sizeof(int))
                         RETURN(-EINVAL);
 
-                spin_lock(&imp->imp_lock);
+                cfs_spin_lock(&imp->imp_lock);
                 if (*((int *)val)) {
                         imp->imp_connect_flags_orig |= OBD_CONNECT_RDONLY;
                         imp->imp_connect_data.ocd_connect_flags |= OBD_CONNECT_RDONLY;
@@ -1332,7 +1332,7 @@ int mdc_set_info_async(struct obd_export *exp,
                         imp->imp_connect_flags_orig &= ~OBD_CONNECT_RDONLY;
                         imp->imp_connect_data.ocd_connect_flags &= ~OBD_CONNECT_RDONLY;
                 }
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
 
                 rc = target_set_info_rpc(imp, MDS_SET_INFO,
                                          keylen, key, vallen, val, set);
@@ -1348,9 +1348,9 @@ int mdc_set_info_async(struct obd_export *exp,
         }
         if (KEY_IS(KEY_MDS_CONN)) {
                 /* mds-mds import */
-                spin_lock(&imp->imp_lock);
+                cfs_spin_lock(&imp->imp_lock);
                 imp->imp_server_timeout = 1;
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
                 imp->imp_client->cli_request_portal = MDS_MDS_PORTAL;
                 CDEBUG(D_OTHER, "%s: timeout / 2\n", exp->exp_obd->obd_name);
                 RETURN(0);
@@ -1413,10 +1413,10 @@ static int mdc_statfs(struct obd_device *obd, struct obd_statfs *osfs,
 
         /*Since the request might also come from lprocfs, so we need
          *sync this with client_disconnect_export Bug15684*/
-        down_read(&obd->u.cli.cl_sem);
+        cfs_down_read(&obd->u.cli.cl_sem);
         if (obd->u.cli.cl_import)
                 imp = class_import_get(obd->u.cli.cl_import);
-        up_read(&obd->u.cli.cl_sem);
+        cfs_up_read(&obd->u.cli.cl_sem);
         if (!imp)
                 RETURN(-ENODEV);
 
@@ -1785,12 +1785,12 @@ static int mdc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
                    client import will not have been cleaned. */
                 if (obd->u.cli.cl_import) {
                         struct obd_import *imp;
-                        down_write(&obd->u.cli.cl_sem);
+                        cfs_down_write(&obd->u.cli.cl_sem);
                         imp = obd->u.cli.cl_import;
                         CERROR("client import never connected\n");
                         ptlrpc_invalidate_import(imp);
                         class_destroy_import(imp);
-                        up_write(&obd->u.cli.cl_sem);
+                        cfs_up_write(&obd->u.cli.cl_sem);
                         obd->u.cli.cl_import = NULL;
                 }
                 rc = obd_llog_finish(obd, 0);
@@ -1987,9 +1987,9 @@ static int mdc_connect(const struct lu_env *env,
 
         /* mds-mds import features */
         if (data && (data->ocd_connect_flags & OBD_CONNECT_MDS_MDS)) {
-                spin_lock(&imp->imp_lock);
+                cfs_spin_lock(&imp->imp_lock);
                 imp->imp_server_timeout = 1;
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
                 imp->imp_client->cli_request_portal = MDS_MDS_PORTAL;
                 CDEBUG(D_OTHER, "%s: Set 'mds' portal and timeout\n",
                        obd->obd_name);
@@ -2064,7 +2064,7 @@ int __init mdc_init(void)
         struct lprocfs_static_vars lvars = { 0 };
         lprocfs_mdc_init_vars(&lvars);
 
-        request_module("lquota");
+        cfs_request_module("lquota");
         quota_interface = PORTAL_SYMBOL_GET(mdc_quota_interface);
         init_obd_quota_ops(quota_interface, &mdc_obd_ops);
 
index 7dc67d1..1bf62ba 100644 (file)
@@ -173,9 +173,9 @@ static int changelog_user_init_cb(struct llog_handle *llh,
                " in log "LPX64"\n", hdr->lrh_index, rec->cur_hdr.lrh_index,
                rec->cur_id, rec->cur_endrec, llh->lgh_id.lgl_oid);
 
-        spin_lock(&mdd->mdd_cl.mc_user_lock);
+        cfs_spin_lock(&mdd->mdd_cl.mc_user_lock);
         mdd->mdd_cl.mc_lastuser = rec->cur_id;
-        spin_unlock(&mdd->mdd_cl.mc_user_lock);
+        cfs_spin_unlock(&mdd->mdd_cl.mc_user_lock);
 
         RETURN(LLOG_PROC_BREAK);
 }
@@ -240,12 +240,12 @@ static int mdd_changelog_init(const struct lu_env *env, struct mdd_device *mdd)
         int rc;
 
         mdd->mdd_cl.mc_index = 0;
-        spin_lock_init(&mdd->mdd_cl.mc_lock);
+        cfs_spin_lock_init(&mdd->mdd_cl.mc_lock);
         cfs_waitq_init(&mdd->mdd_cl.mc_waitq);
         mdd->mdd_cl.mc_starttime = cfs_time_current_64();
         mdd->mdd_cl.mc_flags = 0; /* off by default */
         mdd->mdd_cl.mc_mask = CHANGELOG_DEFMASK;
-        spin_lock_init(&mdd->mdd_cl.mc_user_lock);
+        cfs_spin_lock_init(&mdd->mdd_cl.mc_user_lock);
         mdd->mdd_cl.mc_lastuser = 0;
 
         rc = mdd_changelog_llog_init(mdd);
@@ -275,17 +275,17 @@ int mdd_changelog_on(struct mdd_device *mdd, int on)
                                mdd2obd_dev(mdd)->obd_name);
                         rc = -ESRCH;
                 } else {
-                        spin_lock(&mdd->mdd_cl.mc_lock);
+                        cfs_spin_lock(&mdd->mdd_cl.mc_lock);
                         mdd->mdd_cl.mc_flags |= CLM_ON;
-                        spin_unlock(&mdd->mdd_cl.mc_lock);
+                        cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
                         rc = mdd_changelog_write_header(mdd, CLM_START);
                 }
         } else if ((on == 0) && ((mdd->mdd_cl.mc_flags & CLM_ON) == CLM_ON)) {
                 LCONSOLE_INFO("%s: changelog off\n",mdd2obd_dev(mdd)->obd_name);
                 rc = mdd_changelog_write_header(mdd, CLM_FINI);
-                spin_lock(&mdd->mdd_cl.mc_lock);
+                cfs_spin_lock(&mdd->mdd_cl.mc_lock);
                 mdd->mdd_cl.mc_flags &= ~CLM_ON;
-                spin_unlock(&mdd->mdd_cl.mc_lock);
+                cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
         }
         return rc;
 }
@@ -319,12 +319,12 @@ int mdd_changelog_llog_write(struct mdd_device         *mdd,
         /* llog_lvfs_write_rec sets the llog tail len */
         rec->cr_hdr.lrh_type = CHANGELOG_REC;
         rec->cr.cr_time = cl_time();
-        spin_lock(&mdd->mdd_cl.mc_lock);
+        cfs_spin_lock(&mdd->mdd_cl.mc_lock);
         /* NB: I suppose it's possible llog_add adds out of order wrt cr_index,
            but as long as the MDD transactions are ordered correctly for e.g.
            rename conflicts, I don't think this should matter. */
         rec->cr.cr_index = ++mdd->mdd_cl.mc_index;
-        spin_unlock(&mdd->mdd_cl.mc_lock);
+        cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
         ctxt = llog_get_context(obd, LLOG_CHANGELOG_ORIG_CTXT);
         if (ctxt == NULL)
                 return -ENXIO;
@@ -355,9 +355,9 @@ int mdd_changelog_llog_cancel(struct mdd_device *mdd, long long endrec)
         if (ctxt == NULL)
                 return -ENXIO;
 
-        spin_lock(&mdd->mdd_cl.mc_lock);
+        cfs_spin_lock(&mdd->mdd_cl.mc_lock);
         cur = (long long)mdd->mdd_cl.mc_index;
-        spin_unlock(&mdd->mdd_cl.mc_lock);
+        cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
         if (endrec > cur)
                 endrec = cur;
 
@@ -1129,7 +1129,7 @@ static int mdd_root_get(const struct lu_env *env,
  * No permission check is needed.
  */
 static int mdd_statfs(const struct lu_env *env, struct md_device *m,
-                      struct kstatfs *sfs)
+                      cfs_kstatfs_t *sfs)
 {
         struct mdd_device *mdd = lu2mdd_dev(&m->md_lu_dev);
         int rc;
@@ -1223,7 +1223,7 @@ static struct lu_device *mdd_device_free(const struct lu_env *env,
         struct lu_device  *next = &m->mdd_child->dd_lu_dev;
         ENTRY;
 
-        LASSERT(atomic_read(&lu->ld_ref) == 0);
+        LASSERT(cfs_atomic_read(&lu->ld_ref) == 0);
         md_device_fini(&m->mdd_md_dev);
         OBD_FREE_PTR(m);
         RETURN(next);
@@ -1313,15 +1313,15 @@ static int mdd_changelog_user_register(struct mdd_device *mdd, int *id)
 
         rec->cur_hdr.lrh_len = sizeof(*rec);
         rec->cur_hdr.lrh_type = CHANGELOG_USER_REC;
-        spin_lock(&mdd->mdd_cl.mc_user_lock);
+        cfs_spin_lock(&mdd->mdd_cl.mc_user_lock);
         if (mdd->mdd_cl.mc_lastuser == (unsigned int)(-1)) {
-                spin_unlock(&mdd->mdd_cl.mc_user_lock);
+                cfs_spin_unlock(&mdd->mdd_cl.mc_user_lock);
                 CERROR("Maximum number of changelog users exceeded!\n");
                 GOTO(out, rc = -EOVERFLOW);
         }
         *id = rec->cur_id = ++mdd->mdd_cl.mc_lastuser;
         rec->cur_endrec = mdd->mdd_cl.mc_index;
-        spin_unlock(&mdd->mdd_cl.mc_user_lock);
+        cfs_spin_unlock(&mdd->mdd_cl.mc_user_lock);
 
         rc = llog_add(ctxt, &rec->cur_hdr, NULL, NULL, 0);
 
@@ -1417,9 +1417,9 @@ static int mdd_changelog_user_purge(struct mdd_device *mdd, int id,
         data.mcud_minrec = 0;
         data.mcud_usercount = 0;
         data.mcud_endrec = endrec;
-        spin_lock(&mdd->mdd_cl.mc_lock);
+        cfs_spin_lock(&mdd->mdd_cl.mc_lock);
         endrec = mdd->mdd_cl.mc_index;
-        spin_unlock(&mdd->mdd_cl.mc_lock);
+        cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
         if ((data.mcud_endrec == 0) ||
             ((data.mcud_endrec > endrec) &&
              (data.mcud_endrec != MCUD_UNREGISTER)))
index 2b79611..b523bae 100644 (file)
@@ -102,13 +102,13 @@ struct mdd_txn_op_descr {
 #define CLM_PURGE 0x40000
 
 struct mdd_changelog {
-        spinlock_t                       mc_lock;    /* for index */
+        cfs_spinlock_t                   mc_lock;    /* for index */
         cfs_waitq_t                      mc_waitq;
         int                              mc_flags;
         int                              mc_mask;
         __u64                            mc_index;
         __u64                            mc_starttime;
-        spinlock_t                       mc_user_lock;
+        cfs_spinlock_t                   mc_user_lock;
         int                              mc_lastuser;
 };
 
@@ -546,7 +546,7 @@ static inline const struct lu_fid *mdo2fid(const struct mdd_object *obj)
         return lu_object_fid(&obj->mod_obj.mo_lu);
 }
 
-static inline umode_t mdd_object_type(const struct mdd_object *obj)
+static inline cfs_umode_t mdd_object_type(const struct mdd_object *obj)
 {
         return lu_object_attr(&obj->mod_obj.mo_lu);
 }
index d5b6730..4f1427d 100644 (file)
@@ -51,7 +51,7 @@
 
 
 #ifdef CONFIG_LOCKDEP
-static struct lock_class_key mdd_pdirop_key;
+static cfs_lock_class_key_t mdd_pdirop_key;
 
 #define RETIP ((unsigned long)__builtin_return_address(0))
 
index f9cdc02..2b9535f 100644 (file)
@@ -313,7 +313,7 @@ int mdd_lov_set_md(const struct lu_env *env, struct mdd_object *pobj,
                    int lmm_size, struct thandle *handle, int set_stripe)
 {
         struct lu_buf *buf;
-        umode_t mode;
+        cfs_umode_t mode;
         int rc = 0;
         ENTRY;
 
index dfcb921..769548b 100644 (file)
@@ -131,7 +131,7 @@ static int lprocfs_wr_atime_diff(struct file *file, const char *buffer,
         if (count > (sizeof(kernbuf) - 1))
                 return -EINVAL;
 
-        if (copy_from_user(kernbuf, buffer, count))
+        if (cfs_copy_from_user(kernbuf, buffer, count))
                 return -EFAULT;
 
         kernbuf[count] = '\0';
@@ -184,7 +184,7 @@ static int lprocfs_wr_changelog_mask(struct file *file, const char *buffer,
         OBD_ALLOC(kernbuf, CFS_PAGE_SIZE);
         if (kernbuf == NULL)
                 RETURN(-ENOMEM);
-        if (copy_from_user(kernbuf, buffer, count))
+        if (cfs_copy_from_user(kernbuf, buffer, count))
                 GOTO(out, rc = -EFAULT);
         kernbuf[count] = 0;
 
@@ -237,9 +237,9 @@ static int lprocfs_rd_changelog_users(char *page, char **start, off_t off,
                 return -ENXIO;
         LASSERT(ctxt->loc_handle->lgh_hdr->llh_flags & LLOG_F_IS_CAT);
 
-        spin_lock(&mdd->mdd_cl.mc_lock);
+        cfs_spin_lock(&mdd->mdd_cl.mc_lock);
         cur = mdd->mdd_cl.mc_index;
-        spin_unlock(&mdd->mdd_cl.mc_lock);
+        cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
 
         cucb.count = count;
         cucb.page = page;
index 1967031..c78e5f2 100644 (file)
@@ -489,9 +489,9 @@ static int mdd_path_current(const struct lu_env *env,
         /* Verify that our path hasn't changed since we started the lookup.
            Record the current index, and verify the path resolves to the
            same fid. If it does, then the path is correct as of this index. */
-        spin_lock(&mdd->mdd_cl.mc_lock);
+        cfs_spin_lock(&mdd->mdd_cl.mc_lock);
         pli->pli_currec = mdd->mdd_cl.mc_index;
-        spin_unlock(&mdd->mdd_cl.mc_lock);
+        cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
         rc = mdd_path2fid(env, mdd, ptr, &pli->pli_fid);
         if (rc) {
                 CDEBUG(D_INFO, "mdd_path2fid(%s) failed %d\n", ptr, rc);
@@ -1118,7 +1118,7 @@ static int mdd_fix_attr(const struct lu_env *env, struct mdd_object *obj,
                     !mdd_capable(uc, CFS_CAP_FOWNER))
                         RETURN(-EPERM);
 
-                if (la->la_mode == (umode_t) -1)
+                if (la->la_mode == (cfs_umode_t) -1)
                         la->la_mode = tmp_la->la_mode;
                 else
                         la->la_mode = (la->la_mode & S_IALLUGO) |
@@ -1458,7 +1458,7 @@ static int mdd_attr_set(const struct lu_env *env, struct md_object *obj,
         }
 
         if (rc == 0 && ma->ma_valid & MA_LOV) {
-                umode_t mode;
+                cfs_umode_t mode;
 
                 mode = mdd_object_type(mdd_obj);
                 if (S_ISREG(mode) || S_ISDIR(mode)) {
@@ -1472,7 +1472,7 @@ static int mdd_attr_set(const struct lu_env *env, struct md_object *obj,
 
         }
         if (rc == 0 && ma->ma_valid & (MA_HSM | MA_SOM)) {
-                umode_t mode;
+                cfs_umode_t mode;
 
                 mode = mdd_object_type(mdd_obj);
                 if (S_ISREG(mode))
index 45263f6..146548e 100644 (file)
@@ -279,7 +279,7 @@ static int mds_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
                 break;
         case OBD_CLEANUP_EXPORTS:
                 mds_lov_early_clean(obd);
-                down_write(&mds->mds_notify_lock);
+                cfs_down_write(&mds->mds_notify_lock);
                 mds_lov_disconnect(obd);
                 mds_lov_clean(obd);
                 ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
@@ -290,7 +290,7 @@ static int mds_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
                         llog_cleanup(ctxt);
                 rc = obd_llog_finish(obd, 0);
                 mds->mds_osc_exp = NULL;
-                up_write(&mds->mds_notify_lock);
+                cfs_up_write(&mds->mds_notify_lock);
                 break;
         }
         RETURN(rc);
@@ -360,9 +360,9 @@ static int mds_cmd_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
          * we need only lmi here but not get mount
          * OSD did mount already, so put mount back
          */
-        atomic_dec(&lsi->lsi_mounts);
+        cfs_atomic_dec(&lsi->lsi_mounts);
         mntput(mnt);
-        init_rwsem(&mds->mds_notify_lock);
+        cfs_init_rwsem(&mds->mds_notify_lock);
 
         obd->obd_fsops = fsfilt_get_ops(MT_STR(lsi->lsi_ldd));
         mds_init_ctxt(obd, mnt);
@@ -485,7 +485,7 @@ static int __init mds_cmd_init(void)
         struct lprocfs_static_vars lvars;
         int rc;
 
-        request_module("%s", "lquota");
+        cfs_request_module("%s", "lquota");
         mds_quota_interface_ref = PORTAL_SYMBOL_GET(mds_quota_interface);
         rc = lquota_init(mds_quota_interface_ref);
         if (rc) {
index 58aac97..a33f9bc 100644 (file)
@@ -141,7 +141,7 @@ static int lprocfs_wr_atime_diff(struct file *file, const char *buffer,
         if (count > (sizeof(kernbuf) - 1))
                 return -EINVAL;
 
-        if (copy_from_user(kernbuf, buffer, count))
+        if (cfs_copy_from_user(kernbuf, buffer, count))
                 return -EFAULT;
 
         kernbuf[count] = '\0';
index 80b1658..ac9fa01 100644 (file)
@@ -100,7 +100,8 @@ int mds_lov_init_objids(struct obd_device *obd)
 
         CLASSERT(((MDS_LOV_ALLOC_SIZE % sizeof(obd_id)) == 0));
 
-        mds->mds_lov_page_dirty = ALLOCATE_BITMAP(MDS_LOV_OBJID_PAGES_COUNT);
+        mds->mds_lov_page_dirty =
+                CFS_ALLOCATE_BITMAP(MDS_LOV_OBJID_PAGES_COUNT);
         if (mds->mds_lov_page_dirty == NULL)
                 RETURN(-ENOMEM);
 
@@ -130,7 +131,7 @@ err_open:
 err_free:
         OBD_FREE(mds->mds_lov_page_array, size);
 err_free_bitmap:
-        FREE_BITMAP(mds->mds_lov_page_dirty);
+        CFS_FREE_BITMAP(mds->mds_lov_page_dirty);
 
         RETURN(rc);
 }
@@ -158,7 +159,7 @@ void mds_lov_destroy_objids(struct obd_device *obd)
                         CERROR("%s file won't close, rc=%d\n", LOV_OBJID, rc);
         }
 
-        FREE_BITMAP(mds->mds_lov_page_dirty);
+        CFS_FREE_BITMAP(mds->mds_lov_page_dirty);
         EXIT;
 }
 
@@ -242,7 +243,7 @@ int mds_lov_prepare_objids(struct obd_device *obd, struct lov_mds_md *lmm)
         }
 
 
-        mutex_down(&obd->obd_dev_sem);
+        cfs_mutex_down(&obd->obd_dev_sem);
         for (j = 0; j < count; j++) {
                 __u32 i = le32_to_cpu(data[j].l_ost_idx);
                 if (mds_lov_update_max_ost(&obd->u.mds, i)) {
@@ -250,7 +251,7 @@ int mds_lov_prepare_objids(struct obd_device *obd, struct lov_mds_md *lmm)
                         break;
                 }
         }
-        mutex_up(&obd->obd_dev_sem);
+        cfs_mutex_up(&obd->obd_dev_sem);
 
         RETURN(rc);
 }
@@ -582,9 +583,9 @@ static int mds_lov_update_desc(struct obd_device *obd, int idx,
         CDEBUG(D_CONFIG, "updated lov_desc, tgt_count: %d - idx %d / uuid %s\n",
                mds->mds_lov_desc.ld_tgt_count, idx, uuid->uuid);
 
-        mutex_down(&obd->obd_dev_sem);
+        cfs_mutex_down(&obd->obd_dev_sem);
         rc = mds_lov_update_max_ost(mds, idx);
-        mutex_up(&obd->obd_dev_sem);
+        cfs_mutex_up(&obd->obd_dev_sem);
         if (rc != 0)
                 GOTO(out, rc );
 
@@ -683,9 +684,9 @@ int mds_lov_connect(struct obd_device *obd, char * lov_name)
                 RETURN(-ENOTCONN);
         }
 
-        mutex_down(&obd->obd_dev_sem);
+        cfs_mutex_down(&obd->obd_dev_sem);
         rc = mds_lov_read_objids(obd);
-        mutex_up(&obd->obd_dev_sem);
+        cfs_mutex_up(&obd->obd_dev_sem);
         if (rc) {
                 CERROR("cannot read %s: rc = %d\n", "lov_objids", rc);
                 GOTO(err_exit, rc);
@@ -833,7 +834,7 @@ static int __mds_lov_synchronize(void *data)
         uuid = &watched->u.cli.cl_target_uuid;
         LASSERT(uuid);
 
-        down_read(&mds->mds_notify_lock);
+        cfs_down_read(&mds->mds_notify_lock);
         if (obd->obd_stopping || obd->obd_fail)
                 GOTO(out, rc = -ENODEV);
 
@@ -890,7 +891,7 @@ static int __mds_lov_synchronize(void *data)
 #endif
         EXIT;
 out:
-        up_read(&mds->mds_notify_lock);
+        cfs_up_read(&mds->mds_notify_lock);
         if (rc) {
                 /* Deactivate it for safety */
                 CERROR("%s sync failed %d, deactivating\n", obd_uuid2str(uuid),
index 9672420..a4cbe69 100644 (file)
@@ -49,7 +49,7 @@
 
 static inline void set_capa_key_expiry(struct mdt_device *mdt)
 {
-        mdt->mdt_ck_expiry = jiffies + mdt->mdt_ck_timeout * HZ;
+        mdt->mdt_ck_expiry = jiffies + mdt->mdt_ck_timeout * CFS_HZ;
 }
 
 static void make_capa_key(struct lustre_capa_key *key,
@@ -249,17 +249,17 @@ static int mdt_ck_thread_main(void *args)
                 next = mdt->mdt_child;
                 rc = next->md_ops->mdo_update_capa_key(&env, next, tmp);
                 if (!rc) {
-                        spin_lock(&capa_lock);
+                        cfs_spin_lock(&capa_lock);
                         *bkey = *rkey;
                         *rkey = *tmp;
-                        spin_unlock(&capa_lock);
+                        cfs_spin_unlock(&capa_lock);
 
                         rc = write_capa_keys(&env, mdt, mdt->mdt_capa_keys);
                         if (rc) {
-                                spin_lock(&capa_lock);
+                                cfs_spin_lock(&capa_lock);
                                 *rkey = *bkey;
                                 memset(bkey, 0, sizeof(*bkey));
-                                spin_unlock(&capa_lock);
+                                cfs_spin_unlock(&capa_lock);
                         } else {
                                 set_capa_key_expiry(mdt);
                                 DEBUG_CAPA_KEY(D_SEC, rkey, "new");
@@ -268,7 +268,7 @@ static int mdt_ck_thread_main(void *args)
                 if (rc) {
                         DEBUG_CAPA_KEY(D_ERROR, rkey, "update failed for");
                         /* next retry is in 300 sec */
-                        mdt->mdt_ck_expiry = jiffies + 300 * HZ;
+                        mdt->mdt_ck_expiry = jiffies + 300 * CFS_HZ;
                 }
 
                 cfs_timer_arm(&mdt->mdt_ck_timer, mdt->mdt_ck_expiry);
@@ -288,13 +288,13 @@ int mdt_ck_thread_start(struct mdt_device *mdt)
 
         cfs_waitq_init(&thread->t_ctl_waitq);
         rc = cfs_kernel_thread(mdt_ck_thread_main, mdt,
-                           (CLONE_VM | CLONE_FILES));
+                               (CLONE_VM | CLONE_FILES));
         if (rc < 0) {
                 CERROR("cannot start mdt_ck thread, rc = %d\n", rc);
                 return rc;
         }
 
-        cfs_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_RUNNING);
+        l_cfs_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_RUNNING);
         return 0;
 }
 
@@ -307,5 +307,5 @@ void mdt_ck_thread_stop(struct mdt_device *mdt)
 
         thread->t_flags = SVC_STOPPING;
         cfs_waitq_signal(&thread->t_ctl_waitq);
-        cfs_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
+        l_cfs_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
 }
index cf7decb..56fec6a 100644 (file)
@@ -1123,12 +1123,12 @@ static int mdt_set_info(struct mdt_thread_info *info)
                 req->rq_status = 0;
                 lustre_msg_set_status(req->rq_repmsg, 0);
 
-                spin_lock(&req->rq_export->exp_lock);
+                cfs_spin_lock(&req->rq_export->exp_lock);
                 if (*(__u32 *)val)
                         req->rq_export->exp_connect_flags |= OBD_CONNECT_RDONLY;
                 else
                         req->rq_export->exp_connect_flags &=~OBD_CONNECT_RDONLY;
-                spin_unlock(&req->rq_export->exp_lock);
+                cfs_spin_unlock(&req->rq_export->exp_lock);
 
         } else if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
                 struct changelog_setinfo *cs =
@@ -1393,7 +1393,7 @@ static int mdt_writepage(struct mdt_thread_info *info)
         else
                 rc = ptlrpc_start_bulk_transfer (desc);
         if (rc == 0) {
-                *lwi = LWI_TIMEOUT_INTERVAL(obd_timeout * HZ / 4, HZ,
+                *lwi = LWI_TIMEOUT_INTERVAL(obd_timeout * CFS_HZ / 4, CFS_HZ,
                                             mdt_bulk_timeout, desc);
                 rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc) ||
                                   desc->bd_export->exp_failed, lwi);
@@ -2818,9 +2818,9 @@ static int mdt_recovery(struct mdt_thread_info *info)
         obd = req->rq_export->exp_obd;
 
         /* Check for aborted recovery... */
-        spin_lock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
         recovering = obd->obd_recovering;
-        spin_unlock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
         if (unlikely(recovering)) {
                 int rc;
                 int should_process;
@@ -4391,7 +4391,7 @@ static void mdt_fini(const struct lu_env *env, struct mdt_device *m)
                 OBD_FREE_PTR(mite);
                 d->ld_site = NULL;
         }
-        LASSERT(atomic_read(&d->ld_ref) == 0);
+        LASSERT(cfs_atomic_read(&d->ld_ref) == 0);
 
         EXIT;
 }
@@ -4412,10 +4412,10 @@ static int mdt_adapt_sptlrpc_conf(struct obd_device *obd, int initial)
 
         sptlrpc_target_update_exp_flavor(obd, &tmp_rset);
 
-        write_lock(&m->mdt_sptlrpc_lock);
+        cfs_write_lock(&m->mdt_sptlrpc_lock);
         sptlrpc_rule_set_free(&m->mdt_sptlrpc_rset);
         m->mdt_sptlrpc_rset = tmp_rset;
-        write_unlock(&m->mdt_sptlrpc_lock);
+        cfs_write_unlock(&m->mdt_sptlrpc_lock);
 
         return 0;
 }
@@ -4504,7 +4504,7 @@ static int mdt_init0(const struct lu_env *env, struct mdt_device *m,
         obd = class_name2obd(dev);
         LASSERT(obd != NULL);
 
-        spin_lock_init(&m->mdt_transno_lock);
+        cfs_spin_lock_init(&m->mdt_transno_lock);
 
         m->mdt_max_mdsize = MAX_MD_SIZE;
         m->mdt_max_cookiesize = sizeof(struct llog_cookie);
@@ -4530,10 +4530,10 @@ static int mdt_init0(const struct lu_env *env, struct mdt_device *m,
                 }
         }
 
-        rwlock_init(&m->mdt_sptlrpc_lock);
+        cfs_rwlock_init(&m->mdt_sptlrpc_lock);
         sptlrpc_rule_set_init(&m->mdt_sptlrpc_rset);
 
-        spin_lock_init(&m->mdt_ioepoch_lock);
+        cfs_spin_lock_init(&m->mdt_ioepoch_lock);
         m->mdt_opts.mo_compat_resname = 0;
         m->mdt_capa_timeout = CAPA_TIMEOUT;
         m->mdt_capa_alg = CAPA_HMAC_ALG_SHA1;
@@ -4543,9 +4543,9 @@ static int mdt_init0(const struct lu_env *env, struct mdt_device *m,
         CFS_INIT_LIST_HEAD(&m->mdt_nosquash_nids);
         m->mdt_nosquash_str = NULL;
         m->mdt_nosquash_strlen = 0;
-        init_rwsem(&m->mdt_squash_sem);
+        cfs_init_rwsem(&m->mdt_squash_sem);
 
-        spin_lock_init(&m->mdt_client_bitmap_lock);
+        cfs_spin_lock_init(&m->mdt_client_bitmap_lock);
 
         OBD_ALLOC_PTR(mite);
         if (mite == NULL)
@@ -4802,7 +4802,7 @@ static struct lu_object *mdt_object_alloc(const struct lu_env *env,
                 lu_object_init(o, h, d);
                 lu_object_add_top(h, o);
                 o->lo_ops = &mdt_obj_ops;
-                sema_init(&mo->mot_ioepoch_sem, 1);
+                cfs_sema_init(&mo->mot_ioepoch_sem, 1);
                 RETURN(o);
         } else
                 RETURN(NULL);
@@ -4911,9 +4911,9 @@ static int mdt_connect_internal(struct obd_export *exp,
                 if (!mdt->mdt_som_conf)
                         data->ocd_connect_flags &= ~OBD_CONNECT_SOM;
 
-                spin_lock(&exp->exp_lock);
+                cfs_spin_lock(&exp->exp_lock);
                 exp->exp_connect_flags = data->ocd_connect_flags;
-                spin_unlock(&exp->exp_lock);
+                cfs_spin_unlock(&exp->exp_lock);
                 data->ocd_version = LUSTRE_VERSION_CODE;
                 exp->exp_mdt_data.med_ibits_known = data->ocd_ibits_known;
         }
@@ -4951,14 +4951,14 @@ static int mdt_connect_check_sptlrpc(struct mdt_device *mdt,
         int                     rc = 0;
 
         if (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_INVALID) {
-                read_lock(&mdt->mdt_sptlrpc_lock);
+                cfs_read_lock(&mdt->mdt_sptlrpc_lock);
                 sptlrpc_target_choose_flavor(&mdt->mdt_sptlrpc_rset,
                                              req->rq_sp_from,
                                              req->rq_peer.nid,
                                              &flvr);
-                read_unlock(&mdt->mdt_sptlrpc_lock);
+                cfs_read_unlock(&mdt->mdt_sptlrpc_lock);
 
-                spin_lock(&exp->exp_lock);
+                cfs_spin_lock(&exp->exp_lock);
 
                 exp->exp_sp_peer = req->rq_sp_from;
                 exp->exp_flvr = flvr;
@@ -4972,7 +4972,7 @@ static int mdt_connect_check_sptlrpc(struct mdt_device *mdt,
                         rc = -EACCES;
                 }
 
-                spin_unlock(&exp->exp_lock);
+                cfs_spin_unlock(&exp->exp_lock);
         } else {
                 if (exp->exp_sp_peer != req->rq_sp_from) {
                         CERROR("RPC source %s doesn't match %s\n",
@@ -5094,17 +5094,17 @@ static int mdt_export_cleanup(struct obd_export *exp)
         int rc = 0;
         ENTRY;
 
-        spin_lock(&med->med_open_lock);
-        while (!list_empty(&med->med_open_head)) {
-                struct list_head *tmp = med->med_open_head.next;
-                mfd = list_entry(tmp, struct mdt_file_data, mfd_list);
+        cfs_spin_lock(&med->med_open_lock);
+        while (!cfs_list_empty(&med->med_open_head)) {
+                cfs_list_t *tmp = med->med_open_head.next;
+                mfd = cfs_list_entry(tmp, struct mdt_file_data, mfd_list);
 
                 /* Remove mfd handle so it can't be found again.
                  * We are consuming the mfd_list reference here. */
                 class_handle_unhash(&mfd->mfd_handle);
-                list_move_tail(&mfd->mfd_list, &closing_list);
+                cfs_list_move_tail(&mfd->mfd_list, &closing_list);
         }
-        spin_unlock(&med->med_open_lock);
+        cfs_spin_unlock(&med->med_open_lock);
         mdt = mdt_dev(obd->obd_lu_dev);
         LASSERT(mdt != NULL);
 
@@ -5119,7 +5119,7 @@ static int mdt_export_cleanup(struct obd_export *exp)
         info->mti_mdt = mdt;
         info->mti_exp = exp;
 
-        if (!list_empty(&closing_list)) {
+        if (!cfs_list_empty(&closing_list)) {
                 struct md_attr *ma = &info->mti_attr;
                 int lmm_size;
                 int cookie_size;
@@ -5135,8 +5135,8 @@ static int mdt_export_cleanup(struct obd_export *exp)
                         GOTO(out_cookie, rc = -ENOMEM);
 
                 /* Close any open files (which may also cause orphan unlinking). */
-                list_for_each_entry_safe(mfd, n, &closing_list, mfd_list) {
-                        list_del_init(&mfd->mfd_list);
+                cfs_list_for_each_entry_safe(mfd, n, &closing_list, mfd_list) {
+                        cfs_list_del_init(&mfd->mfd_list);
                         memset(&ma->ma_attr, 0, sizeof(ma->ma_attr));
                         ma->ma_lmm_size = lmm_size;
                         ma->ma_cookie_size = cookie_size;
@@ -5187,12 +5187,12 @@ static int mdt_init_export(struct obd_export *exp)
         ENTRY;
 
         CFS_INIT_LIST_HEAD(&med->med_open_head);
-        spin_lock_init(&med->med_open_lock);
-        sema_init(&med->med_idmap_sem, 1);
+        cfs_spin_lock_init(&med->med_open_lock);
+        cfs_sema_init(&med->med_idmap_sem, 1);
         med->med_idmap = NULL;
-        spin_lock(&exp->exp_lock);
+        cfs_spin_lock(&exp->exp_lock);
         exp->exp_connecting = 1;
-        spin_unlock(&exp->exp_lock);
+        cfs_spin_unlock(&exp->exp_lock);
         rc = ldlm_init_export(exp);
         if (rc)
                 CERROR("Error %d while initializing export\n", rc);
@@ -5212,8 +5212,8 @@ static int mdt_destroy_export(struct obd_export *exp)
         target_destroy_export(exp);
         ldlm_destroy_export(exp);
 
-        LASSERT(list_empty(&exp->exp_outstanding_replies));
-        LASSERT(list_empty(&exp->exp_mdt_data.med_open_head));
+        LASSERT(cfs_list_empty(&exp->exp_outstanding_replies));
+        LASSERT(cfs_list_empty(&exp->exp_mdt_data.med_open_head));
         if (obd_uuid_equals(&exp->exp_client_uuid, &exp->exp_obd->obd_uuid))
                 RETURN(0);
 
@@ -5311,7 +5311,7 @@ static int mdt_rpc_fid2path(struct mdt_thread_info *info, void *key,
         struct getinfo_fid2path *fpout, *fpin;
         int rc = 0;
 
-        fpin = key + size_round(sizeof(KEY_FID2PATH));
+        fpin = key + cfs_size_round(sizeof(KEY_FID2PATH));
         fpout = val;
 
         if (ptlrpc_req_need_swab(info->mti_pill->rc_req))
@@ -5581,7 +5581,7 @@ int mdt_hsm_copytool_send(struct obd_export *exp)
               libcfs_nid2str(exp->exp_connection->c_peer.nid));
 
         len = sizeof(*lh) + sizeof(*hal) + MTI_NAME_MAXLEN +
-                /* for mockup below */ 2 * size_round(sizeof(*hai));
+                /* for mockup below */ 2 * cfs_size_round(sizeof(*hai));
         OBD_ALLOC(lh, len);
         if (lh == NULL)
                 RETURN(-ENOMEM);
index f7b1658..fd367de 100644 (file)
@@ -87,7 +87,7 @@ static void mdt_identity_entry_free(struct upcall_cache *cache,
         struct md_identity *identity = &entry->u.identity;
 
         if (identity->mi_ginfo) {
-                put_group_info(identity->mi_ginfo);
+                cfs_put_group_info(identity->mi_ginfo);
                 identity->mi_ginfo = NULL;
         }
 
@@ -125,9 +125,9 @@ static int mdt_identity_do_upcall(struct upcall_cache *cache,
         if (unlikely(!upcall))
                 RETURN(-ENOMEM);
 
-        read_lock(&cache->uc_upcall_rwlock);
+        cfs_read_lock(&cache->uc_upcall_rwlock);
         memcpy(upcall, cache->uc_upcall, size - 1);
-        read_unlock(&cache->uc_upcall_rwlock);
+        cfs_read_unlock(&cache->uc_upcall_rwlock);
         upcall[size - 1] = 0;
         if (unlikely(!strcmp(upcall, "NONE"))) {
                 CERROR("no upcall set\n");
@@ -163,7 +163,7 @@ static int mdt_identity_parse_downcall(struct upcall_cache *cache,
 {
         struct md_identity *identity = &entry->u.identity;
         struct identity_downcall_data *data = args;
-        struct group_info *ginfo;
+        cfs_group_info_t *ginfo;
         struct md_perm *perms = NULL;
         int size, i;
         ENTRY;
@@ -172,7 +172,7 @@ static int mdt_identity_parse_downcall(struct upcall_cache *cache,
         if (data->idd_ngroups > NGROUPS_MAX)
                 RETURN(-E2BIG);
 
-        ginfo = groups_alloc(data->idd_ngroups);
+        ginfo = cfs_groups_alloc(data->idd_ngroups);
         if (!ginfo) {
                 CERROR("failed to alloc %d groups\n", data->idd_ngroups);
                 RETURN(-ENOMEM);
@@ -187,7 +187,7 @@ static int mdt_identity_parse_downcall(struct upcall_cache *cache,
                 if (!perms) {
                         CERROR("failed to alloc %d permissions\n",
                                data->idd_nperms);
-                        put_group_info(ginfo);
+                        cfs_put_group_info(ginfo);
                         RETURN(-ENOMEM);
                 }
 
index 162a02f..2d9e825 100644 (file)
@@ -82,9 +82,9 @@ do {                                                                    \
                                       OBD_CONNECT_RMT_CLIENT_FORCE |    \
                                       OBD_CONNECT_MDS_CAPA |            \
                                       OBD_CONNECT_OSS_CAPA);            \
-        spin_lock(&exp->exp_lock);                                      \
+        cfs_spin_lock(&exp->exp_lock);                                  \
         exp->exp_connect_flags = reply->ocd_connect_flags;              \
-        spin_unlock(&exp->exp_lock);                                    \
+        cfs_spin_unlock(&exp->exp_lock);                                \
 } while (0)
 
 int mdt_init_sec_level(struct mdt_thread_info *info)
@@ -190,9 +190,9 @@ int mdt_init_sec_level(struct mdt_thread_info *info)
                         if (!mdt->mdt_opts.mo_oss_capa)
                                 reply->ocd_connect_flags &= ~OBD_CONNECT_OSS_CAPA;
 
-                        spin_lock(&exp->exp_lock);
+                        cfs_spin_lock(&exp->exp_lock);
                         exp->exp_connect_flags = reply->ocd_connect_flags;
-                        spin_unlock(&exp->exp_lock);
+                        cfs_spin_unlock(&exp->exp_lock);
                 }
                 break;
         default:
@@ -213,10 +213,10 @@ int mdt_init_idmap(struct mdt_thread_info *info)
         ENTRY;
 
         if (exp_connect_rmtclient(exp)) {
-                down(&med->med_idmap_sem);
+                cfs_down(&med->med_idmap_sem);
                 if (!med->med_idmap)
                         med->med_idmap = lustre_idmap_init();
-                up(&med->med_idmap_sem);
+                cfs_up(&med->med_idmap_sem);
 
                 if (IS_ERR(med->med_idmap)) {
                         long err = PTR_ERR(med->med_idmap);
@@ -243,12 +243,12 @@ int mdt_init_idmap(struct mdt_thread_info *info)
 
 void mdt_cleanup_idmap(struct mdt_export_data *med)
 {
-        down(&med->med_idmap_sem);
+        cfs_down(&med->med_idmap_sem);
         if (med->med_idmap != NULL) {
                 lustre_idmap_fini(med->med_idmap);
                 med->med_idmap = NULL;
         }
-        up(&med->med_idmap_sem);
+        cfs_up(&med->med_idmap_sem);
 }
 
 static inline void mdt_revoke_export_locks(struct obd_export *exp)
index d4dfbca..aab9bd3 100644 (file)
@@ -84,7 +84,7 @@ struct mdt_object;
 /* file data for open files on MDS */
 struct mdt_file_data {
         struct portals_handle mfd_handle; /* must be first */
-        struct list_head      mfd_list;   /* protected by med_open_lock */
+        cfs_list_t            mfd_list;   /* protected by med_open_lock */
         __u64                 mfd_xid;    /* xid of the open request */
         struct lustre_handle  mfd_old_handle; /* old handle in replay case */
         int                   mfd_mode;   /* open mode provided by client */
@@ -126,7 +126,7 @@ struct mdt_device {
         __u32                      mdt_fl_cfglog:1,
                                    mdt_fl_synced:1;
         /* lock to protect IOepoch */
-        spinlock_t                 mdt_ioepoch_lock;
+        cfs_spinlock_t             mdt_ioepoch_lock;
         __u64                      mdt_ioepoch;
 
         /* transaction callbacks */
@@ -140,7 +140,7 @@ struct mdt_device {
         struct upcall_cache        *mdt_identity_cache;
 
         /* sptlrpc rules */
-        rwlock_t                   mdt_sptlrpc_lock;
+        cfs_rwlock_t               mdt_sptlrpc_lock;
         struct sptlrpc_rule_set    mdt_sptlrpc_rset;
 
         /* capability keys */
@@ -158,10 +158,10 @@ struct mdt_device {
         /* root squash */
         uid_t                      mdt_squash_uid;
         gid_t                      mdt_squash_gid;
-        struct list_head           mdt_nosquash_nids;
+        cfs_list_t                 mdt_nosquash_nids;
         char                      *mdt_nosquash_str;
         int                        mdt_nosquash_strlen;
-        struct rw_semaphore        mdt_squash_sem;
+        cfs_rw_semaphore_t         mdt_squash_sem;
 
         cfs_proc_dir_entry_t      *mdt_proc_entry;
         struct lprocfs_stats      *mdt_stats;
@@ -190,7 +190,7 @@ struct mdt_object {
         int                     mot_ioepoch_count;
         int                     mot_writecount;
         /* Lock to protect object's IO epoch. */
-        struct semaphore        mot_ioepoch_sem;
+        cfs_semaphore_t        mot_ioepoch_sem;
 };
 
 enum mdt_object_flags {
@@ -368,7 +368,7 @@ struct mdt_thread_info {
                 struct obd_uuid    uuid[2];       /* for mdt_seq_init_cli()  */
                 char               ns_name[48];   /* for mdt_init0()         */
                 struct lustre_cfg_bufs bufs;      /* for mdt_stack_fini()    */
-                struct kstatfs     ksfs;          /* for mdt_statfs()        */
+                cfs_kstatfs_t      ksfs;          /* for mdt_statfs()        */
                 struct {
                         /* for mdt_readpage()      */
                         struct lu_rdpg     mti_rdpg;
index 3a16cde..0d3307a 100644 (file)
@@ -70,7 +70,7 @@ void mdt_exit_ucred(struct mdt_thread_info *info)
         if (uc->mu_valid != UCRED_INIT) {
                 uc->mu_suppgids[0] = uc->mu_suppgids[1] = -1;
                 if (uc->mu_ginfo) {
-                        put_group_info(uc->mu_ginfo);
+                        cfs_put_group_info(uc->mu_ginfo);
                         uc->mu_ginfo = NULL;
                 }
                 if (uc->mu_identity) {
@@ -82,15 +82,15 @@ void mdt_exit_ucred(struct mdt_thread_info *info)
         }
 }
 
-static int match_nosquash_list(struct rw_semaphore *sem,
-                               struct list_head *nidlist,
+static int match_nosquash_list(cfs_rw_semaphore_t *sem,
+                               cfs_list_t *nidlist,
                                lnet_nid_t peernid)
 {
         int rc;
         ENTRY;
-        down_read(sem);
+        cfs_down_read(sem);
         rc = cfs_match_nid(peernid, nidlist);
-        up_read(sem);
+        cfs_up_read(sem);
         RETURN(rc);
 }
 
@@ -251,7 +251,7 @@ static int new_init_ucred(struct mdt_thread_info *info, ucred_init_type_t type,
         if (!remote && perm & CFS_SETGRP_PERM) {
                 if (pud->pud_ngroups) {
                         /* setgroups for local client */
-                        ucred->mu_ginfo = groups_alloc(pud->pud_ngroups);
+                        ucred->mu_ginfo = cfs_groups_alloc(pud->pud_ngroups);
                         if (!ucred->mu_ginfo) {
                                 CERROR("failed to alloc %d groups\n",
                                        pud->pud_ngroups);
@@ -293,7 +293,7 @@ static int new_init_ucred(struct mdt_thread_info *info, ucred_init_type_t type,
 out:
         if (rc) {
                 if (ucred->mu_ginfo) {
-                        put_group_info(ucred->mu_ginfo);
+                        cfs_put_group_info(ucred->mu_ginfo);
                         ucred->mu_ginfo = NULL;
                 }
                 if (ucred->mu_identity) {
index d9347fc..34553d1 100644 (file)
@@ -152,7 +152,7 @@ static int lprocfs_rd_identity_expire(char *page, char **start, off_t off,
 
         *eof = 1;
         return snprintf(page, count, "%lu\n",
-                        mdt->mdt_identity_cache->uc_entry_expire / HZ);
+                        mdt->mdt_identity_cache->uc_entry_expire / CFS_HZ);
 }
 
 static int lprocfs_wr_identity_expire(struct file *file, const char *buffer,
@@ -166,7 +166,7 @@ static int lprocfs_wr_identity_expire(struct file *file, const char *buffer,
         if (rc)
                 return rc;
 
-        mdt->mdt_identity_cache->uc_entry_expire = val * HZ;
+        mdt->mdt_identity_cache->uc_entry_expire = val * CFS_HZ;
         return count;
 }
 
@@ -179,7 +179,7 @@ static int lprocfs_rd_identity_acquire_expire(char *page, char **start,
 
         *eof = 1;
         return snprintf(page, count, "%lu\n",
-                        mdt->mdt_identity_cache->uc_acquire_expire / HZ);
+                        mdt->mdt_identity_cache->uc_acquire_expire / CFS_HZ);
 }
 
 static int lprocfs_wr_identity_acquire_expire(struct file *file,
@@ -195,7 +195,7 @@ static int lprocfs_wr_identity_acquire_expire(struct file *file,
         if (rc)
                 return rc;
 
-        mdt->mdt_identity_cache->uc_acquire_expire = val * HZ;
+        mdt->mdt_identity_cache->uc_acquire_expire = val * CFS_HZ;
         return count;
 }
 
@@ -208,9 +208,9 @@ static int lprocfs_rd_identity_upcall(char *page, char **start, off_t off,
         int len;
 
         *eof = 1;
-        read_lock(&hash->uc_upcall_rwlock);
+        cfs_read_lock(&hash->uc_upcall_rwlock);
         len = snprintf(page, count, "%s\n", hash->uc_upcall);
-        read_unlock(&hash->uc_upcall_rwlock);
+        cfs_read_unlock(&hash->uc_upcall_rwlock);
         return len;
 }
 
@@ -227,14 +227,15 @@ static int lprocfs_wr_identity_upcall(struct file *file, const char *buffer,
                 return -EINVAL;
         }
 
-        if (copy_from_user(kernbuf, buffer, min_t(unsigned long, count,
-                                                  UC_CACHE_UPCALL_MAXPATH - 1)))
+        if (cfs_copy_from_user(kernbuf, buffer,
+                               min_t(unsigned long, count,
+                                     UC_CACHE_UPCALL_MAXPATH - 1)))
                 return -EFAULT;
 
         /* Remove any extraneous bits from the upcall (e.g. linefeeds) */
-        write_lock(&hash->uc_upcall_rwlock);
+        cfs_write_lock(&hash->uc_upcall_rwlock);
         sscanf(kernbuf, "%s", hash->uc_upcall);
-        write_unlock(&hash->uc_upcall_rwlock);
+        cfs_write_unlock(&hash->uc_upcall_rwlock);
 
         if (strcmp(hash->uc_name, obd->obd_name) != 0)
                 CWARN("%s: write to upcall name %s\n",
@@ -276,7 +277,7 @@ static int lprocfs_wr_identity_info(struct file *file, const char *buffer,
                 return count;
         }
 
-        if (copy_from_user(&sparam, buffer, sizeof(sparam))) {
+        if (cfs_copy_from_user(&sparam, buffer, sizeof(sparam))) {
                 CERROR("%s: bad identity data\n", obd->obd_name);
                 GOTO(out, rc = -EFAULT);
         }
@@ -308,7 +309,7 @@ static int lprocfs_wr_identity_info(struct file *file, const char *buffer,
                                sparam.idd_uid, sparam.idd_ngroups);
                         param = &sparam;
                         param->idd_ngroups = 0;
-                } else if (copy_from_user(param, buffer, size)) {
+                } else if (cfs_copy_from_user(param, buffer, size)) {
                         CERROR("%s: uid %u bad supplementary group data\n",
                                obd->obd_name, sparam.idd_uid);
                         OBD_FREE(param, size);
@@ -557,7 +558,7 @@ static int lprocfs_wr_root_squash(struct file *file, const char *buffer,
                 errmsg = "string too long";
                 GOTO(failed, rc = -EINVAL);
         }
-        if (copy_from_user(kernbuf, buffer, count)) {
+        if (cfs_copy_from_user(kernbuf, buffer, count)) {
                 errmsg = "bad address";
                 GOTO(failed, rc = -EFAULT);
         }
@@ -618,7 +619,7 @@ static int lprocfs_wr_nosquash_nids(struct file *file, const char *buffer,
         struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
         int rc;
         char *kernbuf, *errmsg;
-        struct list_head tmp;
+        cfs_list_t tmp;
         ENTRY;
 
         OBD_ALLOC(kernbuf, count + 1);
@@ -626,7 +627,7 @@ static int lprocfs_wr_nosquash_nids(struct file *file, const char *buffer,
                 errmsg = "no memory";
                 GOTO(failed, rc = -ENOMEM);
         }
-        if (copy_from_user(kernbuf, buffer, count)) {
+        if (cfs_copy_from_user(kernbuf, buffer, count)) {
                 errmsg = "bad address";
                 GOTO(failed, rc = -EFAULT);
         }
@@ -634,15 +635,15 @@ static int lprocfs_wr_nosquash_nids(struct file *file, const char *buffer,
 
         if (!strcmp(kernbuf, "NONE") || !strcmp(kernbuf, "clear")) {
                 /* empty string is special case */
-                down_write(&mdt->mdt_squash_sem);
-                if (!list_empty(&mdt->mdt_nosquash_nids)) {
+                cfs_down_write(&mdt->mdt_squash_sem);
+                if (!cfs_list_empty(&mdt->mdt_nosquash_nids)) {
                         cfs_free_nidlist(&mdt->mdt_nosquash_nids);
                         OBD_FREE(mdt->mdt_nosquash_str,
                                  mdt->mdt_nosquash_strlen);
                         mdt->mdt_nosquash_str = NULL;
                         mdt->mdt_nosquash_strlen = 0;
                 }
-                up_write(&mdt->mdt_squash_sem);
+                cfs_up_write(&mdt->mdt_squash_sem);
                 LCONSOLE_INFO("%s: nosquash_nids is cleared\n",
                               obd->obd_name);
                 OBD_FREE(kernbuf, count + 1);
@@ -655,18 +656,18 @@ static int lprocfs_wr_nosquash_nids(struct file *file, const char *buffer,
                 GOTO(failed, rc = -EINVAL);
         }
 
-        down_write(&mdt->mdt_squash_sem);
-        if (!list_empty(&mdt->mdt_nosquash_nids)) {
+        cfs_down_write(&mdt->mdt_squash_sem);
+        if (!cfs_list_empty(&mdt->mdt_nosquash_nids)) {
                 cfs_free_nidlist(&mdt->mdt_nosquash_nids);
                 OBD_FREE(mdt->mdt_nosquash_str, mdt->mdt_nosquash_strlen);
         }
         mdt->mdt_nosquash_str = kernbuf;
         mdt->mdt_nosquash_strlen = count + 1;
-        list_splice(&tmp, &mdt->mdt_nosquash_nids);
+        cfs_list_splice(&tmp, &mdt->mdt_nosquash_nids);
 
         LCONSOLE_INFO("%s: nosquash_nids is set to %s\n",
                       obd->obd_name, kernbuf);
-        up_write(&mdt->mdt_squash_sem);
+        cfs_up_write(&mdt->mdt_squash_sem);
         RETURN(count);
 
  failed:
@@ -720,7 +721,7 @@ static int lprocfs_wr_mdt_som(struct file *file, const char *buffer,
         if (count > (sizeof(kernbuf) - 1))
                 return -EINVAL;
 
-        if (copy_from_user(kernbuf, buffer, count))
+        if (cfs_copy_from_user(kernbuf, buffer, count))
                 return -EFAULT;
 
         kernbuf[count] = '\0';
@@ -740,7 +741,7 @@ static int lprocfs_wr_mdt_som(struct file *file, const char *buffer,
         }
 
         /* 1 stands for self export. */
-        list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
+        cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
                 if (exp == obd->obd_self_export)
                         continue;
                 if (exp->exp_connect_flags & OBD_CONNECT_MDS_MDS)
index 884eddb..391b4aa 100644 (file)
@@ -88,7 +88,7 @@ struct mdt_file_data *mdt_handle2mfd(struct mdt_thread_info *info,
         if (mfd == NULL &&
             lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) {
                 struct mdt_export_data *med = &req->rq_export->exp_mdt_data;
-                list_for_each_entry(mfd, &med->med_open_head, mfd_list) {
+                cfs_list_for_each_entry(mfd, &med->med_open_head, mfd_list) {
                         if (mfd->mfd_old_handle.cookie == handle->cookie)
                                 RETURN (mfd);
                 }
@@ -100,7 +100,7 @@ struct mdt_file_data *mdt_handle2mfd(struct mdt_thread_info *info,
 /* free mfd */
 void mdt_mfd_free(struct mdt_file_data *mfd)
 {
-        LASSERT(list_empty(&mfd->mfd_list));
+        LASSERT(cfs_list_empty(&mfd->mfd_list));
         OBD_FREE_RCU(mfd, sizeof *mfd, &mfd->mfd_handle);
 }
 
@@ -162,14 +162,14 @@ int mdt_ioepoch_open(struct mdt_thread_info *info, struct mdt_object *o,
             !S_ISREG(lu_object_attr(&o->mot_obj.mo_lu)))
                 RETURN(0);
 
-        down(&o->mot_ioepoch_sem);
+        cfs_down(&o->mot_ioepoch_sem);
         if (mdt_ioepoch_opened(o)) {
                 /* Epoch continues even if there is no writers yet. */
                 CDEBUG(D_INODE, "continue epoch "LPU64" for "DFID"\n",
                        o->mot_ioepoch, PFID(mdt_object_fid(o)));
         } else {
                 /* XXX: ->mdt_ioepoch is not initialized at the mount */
-                spin_lock(&mdt->mdt_ioepoch_lock);
+                cfs_spin_lock(&mdt->mdt_ioepoch_lock);
                 if (mdt->mdt_ioepoch < info->mti_replayepoch)
                         mdt->mdt_ioepoch = info->mti_replayepoch;
 
@@ -180,7 +180,7 @@ int mdt_ioepoch_open(struct mdt_thread_info *info, struct mdt_object *o,
                 else
                         o->mot_ioepoch = mdt->mdt_ioepoch;
 
-                spin_unlock(&mdt->mdt_ioepoch_lock);
+                cfs_spin_unlock(&mdt->mdt_ioepoch_lock);
 
                 CDEBUG(D_INODE, "starting epoch "LPU64" for "DFID"\n",
                        o->mot_ioepoch, PFID(mdt_object_fid(o)));
@@ -189,7 +189,7 @@ int mdt_ioepoch_open(struct mdt_thread_info *info, struct mdt_object *o,
                 cancel = 1;
         }
         o->mot_ioepoch_count++;
-        up(&o->mot_ioepoch_sem);
+        cfs_up(&o->mot_ioepoch_sem);
 
         /* Cancel Size-on-MDS attributes cached on clients for the open case.
          * In the truncate case, see mdt_reint_setattr(). */
@@ -255,7 +255,7 @@ static inline int mdt_ioepoch_close_on_eviction(struct mdt_thread_info *info,
 {
         int rc = 0;
 
-        down(&o->mot_ioepoch_sem);
+        cfs_down(&o->mot_ioepoch_sem);
         CDEBUG(D_INODE, "Eviction. Closing IOepoch "LPU64" on "DFID". "
                "Count %d\n", o->mot_ioepoch, PFID(mdt_object_fid(o)),
                o->mot_ioepoch_count);
@@ -268,7 +268,7 @@ static inline int mdt_ioepoch_close_on_eviction(struct mdt_thread_info *info,
                 rc = mdt_som_attr_set(info, o, o->mot_ioepoch, MDT_SOM_DISABLE);
                 mdt_object_som_enable(o, o->mot_ioepoch);
         }
-        up(&o->mot_ioepoch_sem);
+        cfs_up(&o->mot_ioepoch_sem);
         RETURN(rc);
 }
 
@@ -283,7 +283,7 @@ static inline int mdt_ioepoch_close_on_replay(struct mdt_thread_info *info,
         int rc = MDT_IOEPOCH_CLOSED;
         ENTRY;
 
-        down(&o->mot_ioepoch_sem);
+        cfs_down(&o->mot_ioepoch_sem);
         CDEBUG(D_INODE, "Replay. Closing epoch "LPU64" on "DFID". Count %d\n",
                o->mot_ioepoch, PFID(mdt_object_fid(o)), o->mot_ioepoch_count);
         o->mot_ioepoch_count--;
@@ -295,7 +295,7 @@ static inline int mdt_ioepoch_close_on_replay(struct mdt_thread_info *info,
 
         if (!mdt_ioepoch_opened(o))
                 mdt_object_som_enable(o, info->mti_ioepoch->ioepoch);
-        up(&o->mot_ioepoch_sem);
+        cfs_up(&o->mot_ioepoch_sem);
 
         RETURN(rc);
 }
@@ -324,7 +324,7 @@ static inline int mdt_ioepoch_close_reg(struct mdt_thread_info *info,
         la = &info->mti_attr.ma_attr;
         achange = (info->mti_ioepoch->flags & MF_SOM_CHANGE);
 
-        down(&o->mot_ioepoch_sem);
+        cfs_down(&o->mot_ioepoch_sem);
         o->mot_ioepoch_count--;
 
         tmp_ma = &info->mti_u.som.attr;
@@ -391,7 +391,7 @@ static inline int mdt_ioepoch_close_reg(struct mdt_thread_info *info,
                 mdt_object_som_enable(o, o->mot_ioepoch);
         }
 
-        up(&o->mot_ioepoch_sem);
+        cfs_up(&o->mot_ioepoch_sem);
         /* If recovery is needed, tell the client to perform GETATTR under
          * the lock. */
         if (ret == MDT_IOEPOCH_GETATTR && recovery) {
@@ -403,7 +403,7 @@ static inline int mdt_ioepoch_close_reg(struct mdt_thread_info *info,
         RETURN(rc ? : ret);
 
 error_up:
-        up(&o->mot_ioepoch_sem);
+        cfs_up(&o->mot_ioepoch_sem);
         return rc;
 }
 
@@ -472,7 +472,7 @@ int mdt_som_au_close(struct mdt_thread_info *info, struct mdt_object *o)
              !(info->mti_attr.ma_attr.la_valid & LA_SIZE)))
                 act = MDT_SOM_DISABLE;
 
-        down(&o->mot_ioepoch_sem);
+        cfs_down(&o->mot_ioepoch_sem);
         /* Mark the object it is the recovery state if we failed to obtain
          * SOM attributes. */
         if (act == MDT_SOM_DISABLE)
@@ -486,7 +486,7 @@ int mdt_som_au_close(struct mdt_thread_info *info, struct mdt_object *o)
                         rc = mdt_som_attr_set(info, o, ioepoch, act);
                 mdt_object_som_enable(o, ioepoch);
         }
-        up(&o->mot_ioepoch_sem);
+        cfs_up(&o->mot_ioepoch_sem);
         RETURN(rc);
 }
 
@@ -494,9 +494,9 @@ int mdt_write_read(struct mdt_object *o)
 {
         int rc = 0;
         ENTRY;
-        down(&o->mot_ioepoch_sem);
+        cfs_down(&o->mot_ioepoch_sem);
         rc = o->mot_writecount;
-        up(&o->mot_ioepoch_sem);
+        cfs_up(&o->mot_ioepoch_sem);
         RETURN(rc);
 }
 
@@ -504,21 +504,21 @@ int mdt_write_get(struct mdt_object *o)
 {
         int rc = 0;
         ENTRY;
-        down(&o->mot_ioepoch_sem);
+        cfs_down(&o->mot_ioepoch_sem);
         if (o->mot_writecount < 0)
                 rc = -ETXTBSY;
         else
                 o->mot_writecount++;
-        up(&o->mot_ioepoch_sem);
+        cfs_up(&o->mot_ioepoch_sem);
         RETURN(rc);
 }
 
 void mdt_write_put(struct mdt_object *o)
 {
         ENTRY;
-        down(&o->mot_ioepoch_sem);
+        cfs_down(&o->mot_ioepoch_sem);
         o->mot_writecount--;
-        up(&o->mot_ioepoch_sem);
+        cfs_up(&o->mot_ioepoch_sem);
         EXIT;
 }
 
@@ -526,21 +526,21 @@ static int mdt_write_deny(struct mdt_object *o)
 {
         int rc = 0;
         ENTRY;
-        down(&o->mot_ioepoch_sem);
+        cfs_down(&o->mot_ioepoch_sem);
         if (o->mot_writecount > 0)
                 rc = -ETXTBSY;
         else
                 o->mot_writecount--;
-        up(&o->mot_ioepoch_sem);
+        cfs_up(&o->mot_ioepoch_sem);
         RETURN(rc);
 }
 
 static void mdt_write_allow(struct mdt_object *o)
 {
         ENTRY;
-        down(&o->mot_ioepoch_sem);
+        cfs_down(&o->mot_ioepoch_sem);
         o->mot_writecount++;
-        up(&o->mot_ioepoch_sem);
+        cfs_up(&o->mot_ioepoch_sem);
         EXIT;
 }
 
@@ -557,7 +557,7 @@ static void mdt_empty_transno(struct mdt_thread_info* info)
                 return;
         }
 
-        spin_lock(&mdt->mdt_transno_lock);
+        cfs_spin_lock(&mdt->mdt_transno_lock);
         if (info->mti_transno == 0) {
                 info->mti_transno = ++ mdt->mdt_last_transno;
         } else {
@@ -565,7 +565,7 @@ static void mdt_empty_transno(struct mdt_thread_info* info)
                 if (info->mti_transno > mdt->mdt_last_transno)
                         mdt->mdt_last_transno = info->mti_transno;
         }
-        spin_unlock(&mdt->mdt_transno_lock);
+        cfs_spin_unlock(&mdt->mdt_transno_lock);
 
         CDEBUG(D_INODE, "transno = %llu, last_committed = %llu\n",
                         info->mti_transno,
@@ -682,10 +682,10 @@ static int mdt_mfd_open(struct mdt_thread_info *info, struct mdt_object *p,
                                        mfd, 
                                        PFID(mdt_object_fid(mfd->mfd_object)),
                                        info->mti_rr.rr_handle->cookie);
-                                spin_lock(&med->med_open_lock);
+                                cfs_spin_lock(&med->med_open_lock);
                                 class_handle_unhash(&old_mfd->mfd_handle);
-                                list_del_init(&old_mfd->mfd_list);
-                                spin_unlock(&med->med_open_lock);
+                                cfs_list_del_init(&old_mfd->mfd_list);
+                                cfs_spin_unlock(&med->med_open_lock);
                                 mdt_mfd_close(info, old_mfd);
                         }
                         CDEBUG(D_HA, "Store old cookie "LPX64" in new mfd\n",
@@ -696,15 +696,15 @@ static int mdt_mfd_open(struct mdt_thread_info *info, struct mdt_object *p,
                 repbody->handle.cookie = mfd->mfd_handle.h_cookie;
 
                 if (req->rq_export->exp_disconnected) {
-                        spin_lock(&med->med_open_lock);
+                        cfs_spin_lock(&med->med_open_lock);
                         class_handle_unhash(&mfd->mfd_handle);
-                        list_del_init(&mfd->mfd_list);
-                        spin_unlock(&med->med_open_lock);
+                        cfs_list_del_init(&mfd->mfd_list);
+                        cfs_spin_unlock(&med->med_open_lock);
                         mdt_mfd_close(info, mfd);
                 } else {
-                        spin_lock(&med->med_open_lock);
-                        list_add(&mfd->mfd_list, &med->med_open_head);
-                        spin_unlock(&med->med_open_lock);
+                        cfs_spin_lock(&med->med_open_lock);
+                        cfs_list_add(&mfd->mfd_list, &med->med_open_head);
+                        cfs_spin_unlock(&med->med_open_lock);
                 }
 
                 mdt_empty_transno(info);
@@ -728,7 +728,7 @@ static int mdt_finish_open(struct mdt_thread_info *info,
         struct mdt_body         *repbody;
         int                      rc = 0;
         int                      isreg, isdir, islnk;
-        struct list_head        *t;
+        cfs_list_t              *t;
         ENTRY;
 
         LASSERT(ma->ma_valid & MA_INODE);
@@ -844,15 +844,15 @@ static int mdt_finish_open(struct mdt_thread_info *info,
 
         mfd = NULL;
         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
-                spin_lock(&med->med_open_lock);
-                list_for_each(t, &med->med_open_head) {
-                        mfd = list_entry(t, struct mdt_file_data, mfd_list);
+                cfs_spin_lock(&med->med_open_lock);
+                cfs_list_for_each(t, &med->med_open_head) {
+                        mfd = cfs_list_entry(t, struct mdt_file_data, mfd_list);
                         if (mfd->mfd_xid == req->rq_xid) {
                                 break;
                         }
                         mfd = NULL;
                 }
-                spin_unlock(&med->med_open_lock);
+                cfs_spin_unlock(&med->med_open_lock);
 
                 if (mfd != NULL) {
                         repbody->handle.cookie = mfd->mfd_handle.h_cookie;
@@ -1483,10 +1483,10 @@ int mdt_mfd_close(struct mdt_thread_info *info, struct mdt_file_data *mfd)
 
                 LASSERT(mdt_info_req(info));
                 med = &mdt_info_req(info)->rq_export->exp_mdt_data;
-                spin_lock(&med->med_open_lock);
-                list_add(&mfd->mfd_list, &med->med_open_head);
+                cfs_spin_lock(&med->med_open_lock);
+                cfs_list_add(&mfd->mfd_list, &med->med_open_head);
                 class_handle_hash_back(&mfd->mfd_handle);
-                spin_unlock(&med->med_open_lock);
+                cfs_spin_unlock(&med->med_open_lock);
 
                 if (ret == MDT_IOEPOCH_OPENED) {
                         ret = 0;
@@ -1554,10 +1554,10 @@ int mdt_close(struct mdt_thread_info *info)
         }
 
         med = &req->rq_export->exp_mdt_data;
-        spin_lock(&med->med_open_lock);
+        cfs_spin_lock(&med->med_open_lock);
         mfd = mdt_handle2mfd(info, &info->mti_ioepoch->handle);
         if (mdt_mfd_closed(mfd)) {
-                spin_unlock(&med->med_open_lock);
+                cfs_spin_unlock(&med->med_open_lock);
                 CDEBUG(D_INODE, "no handle for file close: fid = "DFID
                        ": cookie = "LPX64"\n", PFID(info->mti_rr.rr_fid1),
                        info->mti_ioepoch->handle.cookie);
@@ -1565,8 +1565,8 @@ int mdt_close(struct mdt_thread_info *info)
                 rc = -ESTALE;
         } else {
                 class_handle_unhash(&mfd->mfd_handle);
-                list_del_init(&mfd->mfd_list);
-                spin_unlock(&med->med_open_lock);
+                cfs_list_del_init(&mfd->mfd_list);
+                cfs_spin_unlock(&med->med_open_lock);
 
                 /* Do not lose object before last unlink. */
                 o = mfd->mfd_object;
@@ -1624,10 +1624,10 @@ int mdt_done_writing(struct mdt_thread_info *info)
                 RETURN(lustre_msg_get_status(req->rq_repmsg));
 
         med = &info->mti_exp->exp_mdt_data;
-        spin_lock(&med->med_open_lock);
+        cfs_spin_lock(&med->med_open_lock);
         mfd = mdt_handle2mfd(info, &info->mti_ioepoch->handle);
         if (mfd == NULL) {
-                spin_unlock(&med->med_open_lock);
+                cfs_spin_unlock(&med->med_open_lock);
                 CDEBUG(D_INODE, "no handle for done write: fid = "DFID
                        ": cookie = "LPX64" ioepoch = "LPU64"\n",
                        PFID(info->mti_rr.rr_fid1),
@@ -1645,8 +1645,8 @@ int mdt_done_writing(struct mdt_thread_info *info)
         LASSERT(mfd->mfd_mode == FMODE_EPOCH ||
                 mfd->mfd_mode == FMODE_TRUNC);
         class_handle_unhash(&mfd->mfd_handle);
-        list_del_init(&mfd->mfd_list);
-        spin_unlock(&med->med_open_lock);
+        cfs_list_del_init(&mfd->mfd_list);
+        cfs_spin_unlock(&med->med_open_lock);
 
         /* Set EPOCH CLOSE flag if not set by client. */
         info->mti_ioepoch->flags |= MF_EPOCH_CLOSE;
index c290643..9c1a885 100644 (file)
@@ -251,7 +251,7 @@ static int mdt_clients_data_init(const struct lu_env *env,
         /* When we do a clean MDS shutdown, we save the last_transno into
          * the header.  If we find clients with higher last_transno values
          * then those clients may need recovery done. */
-        LASSERT(atomic_read(&obd->obd_req_replay_clients) == 0);
+        LASSERT(cfs_atomic_read(&obd->obd_req_replay_clients) == 0);
         for (cl_idx = 0, off = lsd->lsd_client_start;
              off < last_size; cl_idx++) {
                 __u64 last_transno;
@@ -311,10 +311,10 @@ static int mdt_clients_data_init(const struct lu_env *env,
                         /* VBR: set export last committed version */
                         exp->exp_last_committed = last_transno;
                         lcd = NULL;
-                        spin_lock(&exp->exp_lock);
+                        cfs_spin_lock(&exp->exp_lock);
                         exp->exp_connecting = 0;
                         exp->exp_in_recovery = 0;
-                        spin_unlock(&exp->exp_lock);
+                        cfs_spin_unlock(&exp->exp_lock);
                         obd->obd_max_recoverable_clients++;
                         class_export_put(exp);
                 }
@@ -322,10 +322,10 @@ static int mdt_clients_data_init(const struct lu_env *env,
                 CDEBUG(D_OTHER, "client at idx %d has last_transno="LPU64"\n",
                        cl_idx, last_transno);
                 /* protect __u64 value update */
-                spin_lock(&mdt->mdt_transno_lock);
+                cfs_spin_lock(&mdt->mdt_transno_lock);
                 mdt->mdt_last_transno = max(last_transno,
                                             mdt->mdt_last_transno);
-                spin_unlock(&mdt->mdt_transno_lock);
+                cfs_spin_unlock(&mdt->mdt_transno_lock);
         }
 
 err_client:
@@ -433,9 +433,9 @@ static int mdt_server_data_init(const struct lu_env *env,
 
         lsd->lsd_feature_incompat |= OBD_INCOMPAT_FID;
 
-        spin_lock(&mdt->mdt_transno_lock);
+        cfs_spin_lock(&mdt->mdt_transno_lock);
         mdt->mdt_last_transno = lsd->lsd_last_transno;
-        spin_unlock(&mdt->mdt_transno_lock);
+        cfs_spin_unlock(&mdt->mdt_transno_lock);
 
         CDEBUG(D_INODE, "========BEGIN DUMPING LAST_RCVD========\n");
         CDEBUG(D_INODE, "%s: server last_transno: "LPU64"\n",
@@ -466,11 +466,11 @@ static int mdt_server_data_init(const struct lu_env *env,
         if (rc)
                 GOTO(err_client, rc);
 
-        spin_lock(&mdt->mdt_transno_lock);
+        cfs_spin_lock(&mdt->mdt_transno_lock);
         /* obd_last_committed is used for compatibility
          * with other lustre recovery code */
         obd->obd_last_committed = mdt->mdt_last_transno;
-        spin_unlock(&mdt->mdt_transno_lock);
+        cfs_spin_unlock(&mdt->mdt_transno_lock);
 
         mdt->mdt_mount_count = mount_count + 1;
         lsd->lsd_mount_count = mdt->mdt_mount_count;
@@ -505,9 +505,9 @@ static int mdt_server_data_update(const struct lu_env *env,
         CDEBUG(D_SUPER, "MDS mount_count is "LPU64", last_transno is "LPU64"\n",
                mdt->mdt_mount_count, mdt->mdt_last_transno);
 
-        spin_lock(&mdt->mdt_transno_lock);
+        cfs_spin_lock(&mdt->mdt_transno_lock);
         mdt->mdt_lsd.lsd_last_transno = mdt->mdt_last_transno;
-        spin_unlock(&mdt->mdt_transno_lock);
+        cfs_spin_unlock(&mdt->mdt_transno_lock);
 
         rc = mdt_last_rcvd_header_write(env, mdt, th);
         mdt_trans_stop(env, mdt, th);
@@ -542,17 +542,17 @@ int mdt_client_new(const struct lu_env *env, struct mdt_device *mdt)
         /* the bitmap operations can handle cl_idx > sizeof(long) * 8, so
          * there's no need for extra complication here
          */
-        spin_lock(&mdt->mdt_client_bitmap_lock);
-        cl_idx = find_first_zero_bit(bitmap, LR_MAX_CLIENTS);
+        cfs_spin_lock(&mdt->mdt_client_bitmap_lock);
+        cl_idx = cfs_find_first_zero_bit(bitmap, LR_MAX_CLIENTS);
         if (cl_idx >= LR_MAX_CLIENTS ||
             OBD_FAIL_CHECK(OBD_FAIL_MDS_CLIENT_ADD)) {
                 CERROR("no room for %u clients - fix LR_MAX_CLIENTS\n",
                        cl_idx);
-                spin_unlock(&mdt->mdt_client_bitmap_lock);
+                cfs_spin_unlock(&mdt->mdt_client_bitmap_lock);
                 RETURN(-EOVERFLOW);
         }
-        set_bit(cl_idx, bitmap);
-        spin_unlock(&mdt->mdt_client_bitmap_lock);
+        cfs_set_bit(cl_idx, bitmap);
+        cfs_spin_unlock(&mdt->mdt_client_bitmap_lock);
 
         CDEBUG(D_INFO, "client at idx %d with UUID '%s' added\n",
                cl_idx, med->med_lcd->lcd_uuid);
@@ -560,7 +560,7 @@ int mdt_client_new(const struct lu_env *env, struct mdt_device *mdt)
         med->med_lr_idx = cl_idx;
         med->med_lr_off = lsd->lsd_client_start +
                           (cl_idx * lsd->lsd_client_size);
-        init_mutex(&med->med_lcd_lock);
+        cfs_init_mutex(&med->med_lcd_lock);
 
         LASSERTF(med->med_lr_off > 0, "med_lr_off = %llu\n", med->med_lr_off);
 
@@ -579,9 +579,9 @@ int mdt_client_new(const struct lu_env *env, struct mdt_device *mdt)
          * server down with lots of sync writes.
          */
         mdt_trans_add_cb(th, lut_cb_client, class_export_cb_get(mti->mti_exp));
-        spin_lock(&mti->mti_exp->exp_lock);
+        cfs_spin_lock(&mti->mti_exp->exp_lock);
         mti->mti_exp->exp_need_sync = 1;
-        spin_unlock(&mti->mti_exp->exp_lock);
+        cfs_spin_unlock(&mti->mti_exp->exp_lock);
 
         rc = mdt_last_rcvd_write(env, mdt, lcd, &off, th);
         CDEBUG(D_INFO, "wrote client lcd at idx %u off %llu (len %u)\n",
@@ -621,13 +621,13 @@ int mdt_client_add(const struct lu_env *env,
         if (!strcmp(med->med_lcd->lcd_uuid, obd->obd_uuid.uuid))
                 RETURN(0);
 
-        spin_lock(&mdt->mdt_client_bitmap_lock);
-        if (test_and_set_bit(cl_idx, bitmap)) {
+        cfs_spin_lock(&mdt->mdt_client_bitmap_lock);
+        if (cfs_test_and_set_bit(cl_idx, bitmap)) {
                 CERROR("MDS client %d: bit already set in bitmap!!\n",
                        cl_idx);
                 LBUG();
         }
-        spin_unlock(&mdt->mdt_client_bitmap_lock);
+        cfs_spin_unlock(&mdt->mdt_client_bitmap_lock);
 
         CDEBUG(D_INFO, "client at idx %d with UUID '%s' added\n",
                cl_idx, med->med_lcd->lcd_uuid);
@@ -635,7 +635,7 @@ int mdt_client_add(const struct lu_env *env,
         med->med_lr_idx = cl_idx;
         med->med_lr_off = lsd->lsd_client_start +
                           (cl_idx * lsd->lsd_client_size);
-        init_mutex(&med->med_lcd_lock);
+        cfs_init_mutex(&med->med_lcd_lock);
 
         LASSERTF(med->med_lr_off > 0, "med_lr_off = %llu\n", med->med_lr_off);
 
@@ -686,7 +686,7 @@ int mdt_client_del(const struct lu_env *env, struct mdt_device *mdt)
          * Clear the bit _after_ zeroing out the client so we don't race with
          * mdt_client_add and zero out new clients.
          */
-        if (!test_bit(med->med_lr_idx, mdt->mdt_client_bitmap)) {
+        if (!cfs_test_bit(med->med_lr_idx, mdt->mdt_client_bitmap)) {
                 CERROR("MDT client %u: bit already clear in bitmap!!\n",
                        med->med_lr_idx);
                 LBUG();
@@ -702,25 +702,25 @@ int mdt_client_del(const struct lu_env *env, struct mdt_device *mdt)
         if (IS_ERR(th))
                 GOTO(free, rc = PTR_ERR(th));
 
-        mutex_down(&med->med_lcd_lock);
+        cfs_mutex_down(&med->med_lcd_lock);
         memset(lcd, 0, sizeof *lcd);
         rc = mdt_last_rcvd_write(env, mdt, lcd, &off, th);
         med->med_lcd = NULL;
-        mutex_up(&med->med_lcd_lock);
+        cfs_mutex_up(&med->med_lcd_lock);
         mdt_trans_stop(env, mdt, th);
 
-        spin_lock(&mdt->mdt_client_bitmap_lock);
-        clear_bit(med->med_lr_idx, mdt->mdt_client_bitmap);
-        spin_unlock(&mdt->mdt_client_bitmap_lock);
+        cfs_spin_lock(&mdt->mdt_client_bitmap_lock);
+        cfs_clear_bit(med->med_lr_idx, mdt->mdt_client_bitmap);
+        cfs_spin_unlock(&mdt->mdt_client_bitmap_lock);
 
         CDEBUG(rc == 0 ? D_INFO : D_ERROR, "Zeroing out client idx %u in "
                "%s, rc %d\n",  med->med_lr_idx, LAST_RCVD, rc);
         OBD_FREE_PTR(lcd);
         RETURN(0);
 free:
-        mutex_down(&med->med_lcd_lock);
+        cfs_mutex_down(&med->med_lcd_lock);
         med->med_lcd = NULL;
-        mutex_up(&med->med_lcd_lock);
+        cfs_mutex_up(&med->med_lcd_lock);
         OBD_FREE_PTR(lcd);
         return 0;
 }
@@ -746,12 +746,12 @@ static int mdt_last_rcvd_update(struct mdt_thread_info *mti,
         med = &req->rq_export->exp_mdt_data;
         LASSERT(med);
 
-        mutex_down(&med->med_lcd_lock);
+        cfs_mutex_down(&med->med_lcd_lock);
         lcd = med->med_lcd;
         /* if the export has already been disconnected, we have no last_rcvd slot,
          * update server data with latest transno then */
         if (lcd == NULL) {
-                mutex_up(&med->med_lcd_lock);
+                cfs_mutex_up(&med->med_lcd_lock);
                 CWARN("commit transaction for disconnected client %s: rc %d\n",
                       req->rq_export->exp_client_uuid.uuid, rc);
                 err = mdt_last_rcvd_header_write(mti->mti_env, mdt, th);
@@ -791,7 +791,7 @@ static int mdt_last_rcvd_update(struct mdt_thread_info *mti,
         } else {
                 err = mdt_last_rcvd_write(mti->mti_env, mdt, lcd, &off, th);
         }
-        mutex_up(&med->med_lcd_lock);
+        cfs_mutex_up(&med->med_lcd_lock);
         RETURN(err);
 }
 
@@ -848,7 +848,7 @@ static int mdt_txn_stop_cb(const struct lu_env *env,
         }
 
         mti->mti_has_trans = 1;
-        spin_lock(&mdt->mdt_transno_lock);
+        cfs_spin_lock(&mdt->mdt_transno_lock);
         if (txn->th_result != 0) {
                 if (mti->mti_transno != 0) {
                         CERROR("Replay transno "LPU64" failed: rc %i\n",
@@ -861,7 +861,7 @@ static int mdt_txn_stop_cb(const struct lu_env *env,
                 if (mti->mti_transno > mdt->mdt_last_transno)
                         mdt->mdt_last_transno = mti->mti_transno;
         }
-        spin_unlock(&mdt->mdt_transno_lock);
+        cfs_spin_unlock(&mdt->mdt_transno_lock);
         /* sometimes the reply message has not been successfully packed */
         LASSERT(req != NULL && req->rq_repmsg != NULL);
 
@@ -967,15 +967,16 @@ void mdt_fs_cleanup(const struct lu_env *env, struct mdt_device *mdt)
 static void mdt_steal_ack_locks(struct ptlrpc_request *req)
 {
         struct obd_export         *exp = req->rq_export;
-        struct list_head          *tmp;
+        cfs_list_t                *tmp;
         struct ptlrpc_reply_state *oldrep;
         struct ptlrpc_service     *svc;
         int                        i;
 
         /* CAVEAT EMPTOR: spinlock order */
-        spin_lock(&exp->exp_lock);
-        list_for_each (tmp, &exp->exp_outstanding_replies) {
-                oldrep = list_entry(tmp, struct ptlrpc_reply_state,rs_exp_list);
+        cfs_spin_lock(&exp->exp_lock);
+        cfs_list_for_each (tmp, &exp->exp_outstanding_replies) {
+                oldrep = cfs_list_entry(tmp, struct ptlrpc_reply_state,
+                                        rs_exp_list);
 
                 if (oldrep->rs_xid != req->rq_xid)
                         continue;
@@ -987,9 +988,9 @@ static void mdt_steal_ack_locks(struct ptlrpc_request *req)
                                 oldrep->rs_opc);
 
                 svc = oldrep->rs_service;
-                spin_lock (&svc->srv_lock);
+                cfs_spin_lock (&svc->srv_lock);
 
-                list_del_init (&oldrep->rs_exp_list);
+                cfs_list_del_init (&oldrep->rs_exp_list);
 
                 CWARN("Stealing %d locks from rs %p x"LPD64".t"LPD64
                       " o%d NID %s\n",
@@ -1003,14 +1004,14 @@ static void mdt_steal_ack_locks(struct ptlrpc_request *req)
                 oldrep->rs_nlocks = 0;
 
                 DEBUG_REQ(D_HA, req, "stole locks for");
-                spin_lock(&oldrep->rs_lock);
+                cfs_spin_lock(&oldrep->rs_lock);
                 ptlrpc_schedule_difficult_reply (oldrep);
-                spin_unlock(&oldrep->rs_lock);
+                cfs_spin_unlock(&oldrep->rs_lock);
 
-                spin_unlock (&svc->srv_lock);
+                cfs_spin_unlock (&svc->srv_lock);
                 break;
         }
-        spin_unlock(&exp->exp_lock);
+        cfs_spin_unlock(&exp->exp_lock);
 }
 
 /**
@@ -1142,13 +1143,13 @@ static void mdt_reconstruct_setattr(struct mdt_thread_info *mti,
 
                 repbody = req_capsule_server_get(mti->mti_pill, &RMF_MDT_BODY);
                 repbody->ioepoch = obj->mot_ioepoch;
-                spin_lock(&med->med_open_lock);
-                list_for_each_entry(mfd, &med->med_open_head, mfd_list) {
+                cfs_spin_lock(&med->med_open_lock);
+                cfs_list_for_each_entry(mfd, &med->med_open_head, mfd_list) {
                         if (mfd->mfd_xid == req->rq_xid)
                                 break;
                 }
                 LASSERT(&mfd->mfd_list != &med->med_open_head);
-                spin_unlock(&med->med_open_lock);
+                cfs_spin_unlock(&med->med_open_lock);
                 repbody->handle.cookie = mfd->mfd_handle.h_cookie;
         }
 
index dc99028..7799d54 100644 (file)
@@ -120,16 +120,16 @@ int mdt_version_get_check(struct mdt_thread_info *info, int index)
                 /** Sanity check for malformed buffers */
                 if (pre_versions == NULL) {
                         CERROR("No versions in request buffer\n");
-                        spin_lock(&req->rq_export->exp_lock);
+                        cfs_spin_lock(&req->rq_export->exp_lock);
                         req->rq_export->exp_vbr_failed = 1;
-                        spin_unlock(&req->rq_export->exp_lock);
+                        cfs_spin_unlock(&req->rq_export->exp_lock);
                         RETURN(-EOVERFLOW);
                 } else if (pre_versions[index] != curr_version) {
                         CDEBUG(D_INODE, "Version mismatch "LPX64" != "LPX64"\n",
                                pre_versions[index], curr_version);
-                        spin_lock(&req->rq_export->exp_lock);
+                        cfs_spin_lock(&req->rq_export->exp_lock);
                         req->rq_export->exp_vbr_failed = 1;
-                        spin_unlock(&req->rq_export->exp_lock);
+                        cfs_spin_unlock(&req->rq_export->exp_lock);
                         RETURN(-EOVERFLOW);
                 }
         }
@@ -382,9 +382,9 @@ static int mdt_reint_setattr(struct mdt_thread_info *info,
                 mfd->mfd_object = mo;
                 mfd->mfd_xid = req->rq_xid;
 
-                spin_lock(&med->med_open_lock);
-                list_add(&mfd->mfd_list, &med->med_open_head);
-                spin_unlock(&med->med_open_lock);
+                cfs_spin_lock(&med->med_open_lock);
+                cfs_list_add(&mfd->mfd_list, &med->med_open_head);
+                cfs_spin_unlock(&med->med_open_lock);
                 repbody->handle.cookie = mfd->mfd_handle.h_cookie;
         }
 
@@ -395,10 +395,10 @@ static int mdt_reint_setattr(struct mdt_thread_info *info,
                 LASSERT(mdt_conn_flags(info) & OBD_CONNECT_SOM);
                 LASSERT(info->mti_ioepoch);
 
-                spin_lock(&med->med_open_lock);
+                cfs_spin_lock(&med->med_open_lock);
                 mfd = mdt_handle2mfd(info, &info->mti_ioepoch->handle);
                 if (mfd == NULL) {
-                        spin_unlock(&med->med_open_lock);
+                        cfs_spin_unlock(&med->med_open_lock);
                         CDEBUG(D_INODE, "no handle for file close: "
                                "fid = "DFID": cookie = "LPX64"\n",
                                PFID(info->mti_rr.rr_fid1),
@@ -409,8 +409,8 @@ static int mdt_reint_setattr(struct mdt_thread_info *info,
                 LASSERT(!(info->mti_ioepoch->flags & MF_EPOCH_CLOSE));
 
                 class_handle_unhash(&mfd->mfd_handle);
-                list_del_init(&mfd->mfd_list);
-                spin_unlock(&med->med_open_lock);
+                cfs_list_del_init(&mfd->mfd_list);
+                cfs_spin_unlock(&med->med_open_lock);
 
                 /* Close the found mfd, update attributes. */
                 ma->ma_lmm_size = info->mti_mdt->mdt_max_mdsize;
index b7eb9b0..412e7e1 100644 (file)
@@ -107,7 +107,7 @@ int mgc_logname2resid(char *logname, struct ldlm_res_id *res_id)
 
 /********************** config llog list **********************/
 static CFS_LIST_HEAD(config_llog_list);
-static spinlock_t       config_list_lock = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t       config_list_lock = CFS_SPIN_LOCK_UNLOCKED;
 
 /* Take a reference to a config log */
 static int config_log_get(struct config_llog_data *cld)
@@ -115,9 +115,9 @@ static int config_log_get(struct config_llog_data *cld)
         ENTRY;
         if (cld->cld_stopping)
                 RETURN(1);
-        atomic_inc(&cld->cld_refcount);
+        cfs_atomic_inc(&cld->cld_refcount);
         CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
-               atomic_read(&cld->cld_refcount));
+               cfs_atomic_read(&cld->cld_refcount));
         RETURN(0);
 }
 
@@ -128,14 +128,14 @@ static void config_log_put(struct config_llog_data *cld)
         ENTRY;
 
         CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
-               atomic_read(&cld->cld_refcount));
-        LASSERT(atomic_read(&cld->cld_refcount) > 0);
+               cfs_atomic_read(&cld->cld_refcount));
+        LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
 
         /* spinlock to make sure no item with 0 refcount in the list */
-        spin_lock(&config_list_lock);
-        if (unlikely(atomic_dec_and_test(&cld->cld_refcount))) {
-                list_del(&cld->cld_list_chain);
-                spin_unlock(&config_list_lock);
+        cfs_spin_lock(&config_list_lock);
+        if (unlikely(cfs_atomic_dec_and_test(&cld->cld_refcount))) {
+                cfs_list_del(&cld->cld_list_chain);
+                cfs_spin_unlock(&config_list_lock);
 
                 CDEBUG(D_MGC, "dropping config log %s\n", cld->cld_logname);
 
@@ -151,7 +151,7 @@ static void config_log_put(struct config_llog_data *cld)
                                  strlen(cld->cld_cfg.cfg_instance) + 1);
                 OBD_FREE(cld, sizeof(*cld));
         } else {
-                spin_unlock(&config_list_lock);
+                cfs_spin_unlock(&config_list_lock);
         }
 
         EXIT;
@@ -176,8 +176,8 @@ struct config_llog_data *config_log_find(char *logname,
                 RETURN(ERR_PTR(-EINVAL));
         }
 
-        spin_lock(&config_list_lock);
-        list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
+        cfs_spin_lock(&config_list_lock);
+        cfs_list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
                 if (match_instance && cld->cld_cfg.cfg_instance &&
                     strcmp(logid, cld->cld_cfg.cfg_instance) == 0)
                         goto out_found;
@@ -185,13 +185,13 @@ struct config_llog_data *config_log_find(char *logname,
                     strcmp(logid, cld->cld_logname) == 0)
                         goto out_found;
         }
-        spin_unlock(&config_list_lock);
+        cfs_spin_unlock(&config_list_lock);
 
         CDEBUG(D_CONFIG, "can't get log %s\n", logid);
         RETURN(ERR_PTR(-ENOENT));
 out_found:
-        atomic_inc(&cld->cld_refcount);
-        spin_unlock(&config_list_lock);
+        cfs_atomic_inc(&cld->cld_refcount);
+        cfs_spin_unlock(&config_list_lock);
         LASSERT(cld->cld_stopping == 0 || cld->cld_is_sptlrpc == 0);
         RETURN(cld);
 }
@@ -225,7 +225,7 @@ struct config_llog_data *do_config_log_add(struct obd_device *obd,
         cld->cld_cfg.cfg_flags = 0;
         cld->cld_cfg.cfg_sb = sb;
         cld->cld_is_sptlrpc = is_sptlrpc;
-        atomic_set(&cld->cld_refcount, 1);
+        cfs_atomic_set(&cld->cld_refcount, 1);
 
         /* Keep the mgc around until we are done */
         cld->cld_mgcexp = class_export_get(obd->obd_self_export);
@@ -243,9 +243,9 @@ struct config_llog_data *do_config_log_add(struct obd_device *obd,
 
         rc = mgc_logname2resid(logname, &cld->cld_resid);
 
-        spin_lock(&config_list_lock);
-        list_add(&cld->cld_list_chain, &config_llog_list);
-        spin_unlock(&config_list_lock);
+        cfs_spin_lock(&config_list_lock);
+        cfs_list_add(&cld->cld_list_chain, &config_llog_list);
+        cfs_spin_unlock(&config_list_lock);
 
         if (rc) {
                 config_log_put(cld);
@@ -311,7 +311,7 @@ static int config_log_add(struct obd_device *obd, char *logname,
         RETURN(0);
 }
 
-DECLARE_MUTEX(llog_process_lock);
+CFS_DECLARE_MUTEX(llog_process_lock);
 
 /* Stop watching for updates on this log. */
 static int config_log_end(char *logname, struct config_llog_instance *cfg)
@@ -324,7 +324,7 @@ static int config_log_end(char *logname, struct config_llog_instance *cfg)
         if (IS_ERR(cld))
                 RETURN(PTR_ERR(cld));
 
-        down(&llog_process_lock);
+        cfs_down(&llog_process_lock);
         /*
          * if cld_stopping is set, it means we didn't start the log thus
          * not owning the start ref. this can happen after previous umount:
@@ -333,19 +333,19 @@ static int config_log_end(char *logname, struct config_llog_instance *cfg)
          * calling start_log.
          */
         if (unlikely(cld->cld_stopping)) {
-                up(&llog_process_lock);
+                cfs_up(&llog_process_lock);
                 /* drop the ref from the find */
                 config_log_put(cld);
                 RETURN(rc);
         }
 
         cld->cld_stopping = 1;
-        up(&llog_process_lock);
+        cfs_up(&llog_process_lock);
 
-        spin_lock(&config_list_lock);
+        cfs_spin_lock(&config_list_lock);
         cld_sptlrpc = cld->cld_sptlrpc;
         cld->cld_sptlrpc = NULL;
-        spin_unlock(&config_list_lock);
+        cfs_spin_unlock(&config_list_lock);
 
         if (cld_sptlrpc)
                 config_log_put(cld_sptlrpc);
@@ -372,12 +372,12 @@ static int mgc_requeue_add(struct config_llog_data *cld, int later);
 
 static void do_requeue(struct config_llog_data *cld)
 {
-        LASSERT(atomic_read(&cld->cld_refcount) > 0);
+        LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
 
         /* Do not run mgc_process_log on a disconnected export or an
            export which is being disconnected. Take the client
            semaphore to make the check non-racy. */
-        down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
+        cfs_down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
         if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) {
                 CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname);
                 mgc_process_log(cld->cld_mgcexp->exp_obd, cld);
@@ -385,7 +385,7 @@ static void do_requeue(struct config_llog_data *cld)
                 CDEBUG(D_MGC, "disconnecting, won't update log %s\n",
                        cld->cld_logname);
         }
-        up_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
+        cfs_up_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
 
         /* Whether we enqueued again or not in mgc_process_log, we're done
          * with the ref from the old enqueue */
@@ -404,20 +404,21 @@ static int mgc_requeue_thread(void *data)
 
         CDEBUG(D_MGC, "Starting requeue thread\n");
 
-        lwi_later = LWI_TIMEOUT(60 * HZ, NULL, NULL);
+        lwi_later = LWI_TIMEOUT(60 * CFS_HZ, NULL, NULL);
         l_wait_event(rq_waitq, rq_state & (RQ_NOW | RQ_STOP), &lwi_later);
 
         /* Keep trying failed locks periodically */
-        spin_lock(&config_list_lock);
+        cfs_spin_lock(&config_list_lock);
         while (rq_state & (RQ_NOW | RQ_LATER)) {
                 /* Any new or requeued lostlocks will change the state */
                 rq_state &= ~(RQ_NOW | RQ_LATER);
-                spin_unlock(&config_list_lock);
+                cfs_spin_unlock(&config_list_lock);
 
                 /* Always wait a few seconds to allow the server who
                    caused the lock revocation to finish its setup, plus some
                    random so everyone doesn't try to reconnect at once. */
-                lwi_now = LWI_TIMEOUT(3 * HZ + (ll_rand() & 0xff) * (HZ / 100),
+                lwi_now = LWI_TIMEOUT(3 * CFS_HZ + (ll_rand() & 0xff) * \
+                                      (CFS_HZ / 100),
                                       NULL, NULL);
                 l_wait_event(rq_waitq, rq_state & RQ_STOP, &lwi_now);
 
@@ -435,49 +436,49 @@ static int mgc_requeue_thread(void *data)
                  */
                 cld_prev = NULL;
 
-                spin_lock(&config_list_lock);
-                list_for_each_entry_safe(cld, cld_next, &config_llog_list,
-                                         cld_list_chain) {
+                cfs_spin_lock(&config_list_lock);
+                cfs_list_for_each_entry_safe(cld, cld_next, &config_llog_list,
+                                             cld_list_chain) {
                         if (cld->cld_list_chain.next != &config_llog_list)
-                                atomic_inc(&cld_next->cld_refcount);
+                                cfs_atomic_inc(&cld_next->cld_refcount);
 
                         if (cld->cld_lostlock) {
                                 if (cld->cld_sptlrpc &&
                                     cld->cld_sptlrpc->cld_lostlock) {
                                         cld->cld_sptlrpc->cld_lostlock = 0;
 
-                                        spin_unlock(&config_list_lock);
+                                        cfs_spin_unlock(&config_list_lock);
                                         do_requeue(cld->cld_sptlrpc);
-                                        spin_lock(&config_list_lock);
+                                        cfs_spin_lock(&config_list_lock);
                                         LASSERT(cld->cld_lostlock);
                                 }
 
                                 cld->cld_lostlock = 0;
 
-                                spin_unlock(&config_list_lock);
+                                cfs_spin_unlock(&config_list_lock);
                                 do_requeue(cld);
-                                spin_lock(&config_list_lock);
+                                cfs_spin_lock(&config_list_lock);
                         }
 
 
                         if (cld_prev) {
-                                spin_unlock(&config_list_lock);
+                                cfs_spin_unlock(&config_list_lock);
                                 config_log_put(cld_prev);
-                                spin_lock(&config_list_lock);
+                                cfs_spin_lock(&config_list_lock);
                         }
 
                         cld_prev = cld_next;
                 }
-                spin_unlock(&config_list_lock);
+                cfs_spin_unlock(&config_list_lock);
 
                 /* Wait a bit to see if anyone else needs a requeue */
                 l_wait_event(rq_waitq, rq_state & (RQ_NOW | RQ_STOP),
                              &lwi_later);
-                spin_lock(&config_list_lock);
+                cfs_spin_lock(&config_list_lock);
         }
         /* spinlock and while guarantee RQ_NOW and RQ_LATER are not set */
         rq_state &= ~RQ_RUNNING;
-        spin_unlock(&config_list_lock);
+        cfs_spin_unlock(&config_list_lock);
 
         CDEBUG(D_MGC, "Ending requeue thread\n");
         RETURN(rc);
@@ -490,16 +491,16 @@ static int mgc_requeue_add(struct config_llog_data *cld, int later)
         int rc = 0;
 
         CDEBUG(D_INFO, "log %s: requeue (l=%d r=%d sp=%d st=%x)\n",
-               cld->cld_logname, later, atomic_read(&cld->cld_refcount),
+               cld->cld_logname, later, cfs_atomic_read(&cld->cld_refcount),
                cld->cld_stopping, rq_state);
-        LASSERT(atomic_read(&cld->cld_refcount) > 0);
+        LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
 
         /* Hold lock for rq_state */
-        spin_lock(&config_list_lock);
+        cfs_spin_lock(&config_list_lock);
 
         if (cld->cld_stopping || (rq_state & RQ_STOP)) {
                 cld->cld_lostlock = 0;
-                spin_unlock(&config_list_lock);
+                cfs_spin_unlock(&config_list_lock);
                 config_log_put(cld);
                 RETURN(0);
         }
@@ -509,7 +510,7 @@ static int mgc_requeue_add(struct config_llog_data *cld, int later)
         if (!(rq_state & RQ_RUNNING)) {
                 LASSERT(rq_state == 0);
                 rq_state = RQ_RUNNING | (later ? RQ_LATER : RQ_NOW);
-                spin_unlock(&config_list_lock);
+                cfs_spin_unlock(&config_list_lock);
                 rc = cfs_kernel_thread(mgc_requeue_thread, 0,
                                        CLONE_VM | CLONE_FILES);
                 if (rc < 0) {
@@ -523,7 +524,7 @@ static int mgc_requeue_add(struct config_llog_data *cld, int later)
                 }
         } else {
                 rq_state |= later ? RQ_LATER : RQ_NOW;
-                spin_unlock(&config_list_lock);
+                cfs_spin_unlock(&config_list_lock);
                 cfs_waitq_signal(&rq_waitq);
         }
 
@@ -547,13 +548,13 @@ static int mgc_fs_setup(struct obd_device *obd, struct super_block *sb,
         LASSERT(lsi->lsi_srv_mnt == mnt);
 
         /* The mgc fs exclusion sem. Only one fs can be setup at a time. */
-        down(&cli->cl_mgc_sem);
+        cfs_down(&cli->cl_mgc_sem);
 
-        cleanup_group_info();
+        cfs_cleanup_group_info();
 
         obd->obd_fsops = fsfilt_get_ops(MT_STR(lsi->lsi_ldd));
         if (IS_ERR(obd->obd_fsops)) {
-                up(&cli->cl_mgc_sem);
+                cfs_up(&cli->cl_mgc_sem);
                 CERROR("No fstype %s rc=%ld\n", MT_STR(lsi->lsi_ldd),
                        PTR_ERR(obd->obd_fsops));
                 RETURN(PTR_ERR(obd->obd_fsops));
@@ -594,7 +595,7 @@ err_ops:
         fsfilt_put_ops(obd->obd_fsops);
         obd->obd_fsops = NULL;
         cli->cl_mgc_vfsmnt = NULL;
-        up(&cli->cl_mgc_sem);
+        cfs_up(&cli->cl_mgc_sem);
         RETURN(err);
 }
 
@@ -619,12 +620,12 @@ static int mgc_fs_cleanup(struct obd_device *obd)
         if (obd->obd_fsops)
                 fsfilt_put_ops(obd->obd_fsops);
 
-        up(&cli->cl_mgc_sem);
+        cfs_up(&cli->cl_mgc_sem);
 
         RETURN(rc);
 }
 
-static atomic_t mgc_count = ATOMIC_INIT(0);
+static cfs_atomic_t mgc_count = CFS_ATOMIC_INIT(0);
 static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
 {
         int rc = 0;
@@ -634,12 +635,12 @@ static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
         case OBD_CLEANUP_EARLY:
                 break;
         case OBD_CLEANUP_EXPORTS:
-                if (atomic_dec_and_test(&mgc_count)) {
+                if (cfs_atomic_dec_and_test(&mgc_count)) {
                         /* Kick the requeue waitq - cld's should all be
                            stopping */
-                        spin_lock(&config_list_lock);
+                        cfs_spin_lock(&config_list_lock);
                         rq_state |= RQ_STOP;
-                        spin_unlock(&config_list_lock);
+                        cfs_spin_unlock(&config_list_lock);
                         cfs_waitq_signal(&rq_waitq);
                 }
                 rc = obd_llog_finish(obd, 0);
@@ -693,13 +694,13 @@ static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
         lprocfs_obd_setup(obd, lvars.obd_vars);
         sptlrpc_lprocfs_cliobd_attach(obd);
 
-        spin_lock(&config_list_lock);
-        atomic_inc(&mgc_count);
-        if (atomic_read(&mgc_count) == 1) {
+        cfs_spin_lock(&config_list_lock);
+        cfs_atomic_inc(&mgc_count);
+        if (cfs_atomic_read(&mgc_count) == 1) {
                 rq_state &= ~RQ_STOP;
                 cfs_waitq_init(&rq_waitq);
         }
-        spin_unlock(&config_list_lock);
+        cfs_spin_unlock(&config_list_lock);
 
         RETURN(rc);
 
@@ -868,7 +869,7 @@ static int mgc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
         int rc;
         ENTRY;
 
-        if (!try_module_get(THIS_MODULE)) {
+        if (!cfs_try_module_get(THIS_MODULE)) {
                 CERROR("Can't get module. Is it alive?");
                 return -EINVAL;
         }
@@ -905,7 +906,7 @@ static int mgc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
                 GOTO(out, rc = -ENOTTY);
         }
 out:
-        module_put(THIS_MODULE);
+        cfs_module_put(THIS_MODULE);
 
         return rc;
 }
@@ -961,9 +962,9 @@ int mgc_set_info_async(struct obd_export *exp, obd_count keylen,
         if (KEY_IS(KEY_INIT_RECOV)) {
                 if (vallen != sizeof(int))
                         RETURN(-EINVAL);
-                spin_lock(&imp->imp_lock);
+                cfs_spin_lock(&imp->imp_lock);
                 imp->imp_initial_recov = *(int *)val;
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
                 CDEBUG(D_HA, "%s: set imp_initial_recov = %d\n",
                        exp->exp_obd->obd_name, imp->imp_initial_recov);
                 RETURN(0);
@@ -974,12 +975,12 @@ int mgc_set_info_async(struct obd_export *exp, obd_count keylen,
                 if (vallen != sizeof(int))
                         RETURN(-EINVAL);
                 value = *(int *)val;
-                spin_lock(&imp->imp_lock);
+                cfs_spin_lock(&imp->imp_lock);
                 imp->imp_initial_recov_bk = value > 0;
                 /* Even after the initial connection, give up all comms if
                    nobody answers the first time. */
                 imp->imp_recon_bk = 1;
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
                 CDEBUG(D_MGC, "InitRecov %s %d/%d:d%d:i%d:r%d:or%d:%s\n",
                        imp->imp_obd->obd_name, value, imp->imp_initial_recov,
                        imp->imp_deactive, imp->imp_invalid,
@@ -1307,10 +1308,10 @@ int mgc_process_log(struct obd_device *mgc,
            sounds like badness.  It actually might be fine, as long as
            we're not trying to update from the same log
            simultaneously (in which case we should use a per-log sem.) */
-        down(&llog_process_lock);
+        cfs_down(&llog_process_lock);
 
         if (cld->cld_stopping) {
-                up(&llog_process_lock);
+                cfs_up(&llog_process_lock);
                 RETURN(0);
         }
 
@@ -1325,7 +1326,7 @@ int mgc_process_log(struct obd_device *mgc,
         ctxt = llog_get_context(mgc, LLOG_CONFIG_REPL_CTXT);
         if (!ctxt) {
                 CERROR("missing llog context\n");
-                up(&llog_process_lock);
+                cfs_up(&llog_process_lock);
                 RETURN(-EINVAL);
         }
 
@@ -1411,7 +1412,7 @@ out_pop:
         CDEBUG(D_MGC, "%s: configuration from log '%s' %sed (%d).\n",
                mgc->obd_name, cld->cld_logname, rc ? "fail" : "succeed", rc);
 
-        up(&llog_process_lock);
+        cfs_up(&llog_process_lock);
 
         RETURN(rc);
 }
index 97f4a70..2d93e54 100644 (file)
@@ -61,7 +61,7 @@ static int mgs_fs_seq_show(struct seq_file *seq, void *v)
 {
         struct obd_device *obd = seq->private;
         struct mgs_obd *mgs = &obd->u.mgs;
-        struct list_head dentry_list;
+        cfs_list_t dentry_list;
         struct l_linux_dirent *dirent, *n;
         int rc, len;
         ENTRY;
@@ -73,8 +73,8 @@ static int mgs_fs_seq_show(struct seq_file *seq, void *v)
                 CERROR("Can't read config dir\n");
                 RETURN(rc);
         }
-        list_for_each_entry_safe(dirent, n, &dentry_list, lld_list) {
-                list_del(&dirent->lld_list);
+        cfs_list_for_each_entry_safe(dirent, n, &dentry_list, lld_list) {
+                cfs_list_del(&dirent->lld_list);
                 len = strlen(dirent->lld_name);
                 if ((len > 7) && (strncmp(dirent->lld_name + len - 7, "-client",
                                           len) == 0)) {
@@ -128,9 +128,9 @@ static int mgsself_srpc_seq_show(struct seq_file *seq, void *v)
         if (rc)
                 return rc;
 
-        down(&fsdb->fsdb_sem);
+        cfs_down(&fsdb->fsdb_sem);
         seq_show_srpc_rules(seq, fsdb->fsdb_name, &fsdb->fsdb_srpc_gen);
-        up(&fsdb->fsdb_sem);
+        cfs_up(&fsdb->fsdb_sem);
         return 0;
 }
 
@@ -193,16 +193,16 @@ static int mgs_live_seq_show(struct seq_file *seq, void *v)
         struct mgs_tgt_srpc_conf *srpc_tgt;
         int i;
 
-        down(&fsdb->fsdb_sem);
+        cfs_down(&fsdb->fsdb_sem);
 
         seq_printf(seq, "fsname: %s\n", fsdb->fsdb_name);
         seq_printf(seq, "flags: %#x     gen: %d\n",
                    fsdb->fsdb_flags, fsdb->fsdb_gen);
         for (i = 0; i < INDEX_MAP_SIZE * 8; i++)
-                 if (test_bit(i, fsdb->fsdb_mdt_index_map))
+                 if (cfs_test_bit(i, fsdb->fsdb_mdt_index_map))
                          seq_printf(seq, "%s-MDT%04x\n", fsdb->fsdb_name, i);
         for (i = 0; i < INDEX_MAP_SIZE * 8; i++)
-                 if (test_bit(i, fsdb->fsdb_ost_index_map))
+                 if (cfs_test_bit(i, fsdb->fsdb_ost_index_map))
                          seq_printf(seq, "%s-OST%04x\n", fsdb->fsdb_name, i);
 
         seq_printf(seq, "\nSecure RPC Config Rules:\n");
@@ -217,7 +217,7 @@ static int mgs_live_seq_show(struct seq_file *seq, void *v)
         }
         seq_show_srpc_rules(seq, fsdb->fsdb_name, &fsdb->fsdb_srpc_gen);
 
-        up(&fsdb->fsdb_sem);
+        cfs_up(&fsdb->fsdb_sem);
         return 0;
 }
 
index 0ff9ebf..f4adf56 100644 (file)
@@ -149,7 +149,8 @@ static struct dentry *mgs_fid2dentry(struct mgs_obd *mgs,
                 /* we didn't find the right inode.. */
                 CDEBUG(D_INODE, "found wrong generation: inode %lu, link: %lu, "
                        "count: %d, generation %u/%u\n", inode->i_ino,
-                       (unsigned long)inode->i_nlink, atomic_read(&inode->i_count),
+                       (unsigned long)inode->i_nlink,
+                       atomic_read(&inode->i_count),
                        inode->i_generation, gen);
                 l_dput(result);
                 RETURN(ERR_PTR(-ENOENT));
@@ -178,7 +179,7 @@ int mgs_fs_setup(struct obd_device *obd, struct vfsmount *mnt)
         ENTRY;
 
         /* FIXME what's this?  Do I need it? */
-        rc = cleanup_group_info();
+        rc = cfs_cleanup_group_info();
         if (rc)
                 RETURN(rc);
 
index f8394cf..deb9b1b 100644 (file)
@@ -222,7 +222,7 @@ static int mgs_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
 
         /* Internal mgs setup */
         mgs_init_fsdb_list(obd);
-        sema_init(&mgs->mgs_sem, 1);
+        cfs_sema_init(&mgs->mgs_sem, 1);
 
         /* Setup proc */
         lprocfs_mgs_init_vars(&lvars);
@@ -564,7 +564,7 @@ static int mgs_connect_check_sptlrpc(struct ptlrpc_request *req)
                 if (rc)
                         return rc;
 
-                down(&fsdb->fsdb_sem);
+                cfs_down(&fsdb->fsdb_sem);
                 if (sptlrpc_rule_set_choose(&fsdb->fsdb_srpc_gen,
                                             LUSTRE_SP_MGC, LUSTRE_SP_MGS,
                                             req->rq_peer.nid,
@@ -572,9 +572,9 @@ static int mgs_connect_check_sptlrpc(struct ptlrpc_request *req)
                         /* by defualt allow any flavors */
                         flvr.sf_rpc = SPTLRPC_FLVR_ANY;
                 }
-                up(&fsdb->fsdb_sem);
+                cfs_up(&fsdb->fsdb_sem);
 
-                spin_lock(&exp->exp_lock);
+                cfs_spin_lock(&exp->exp_lock);
 
                 exp->exp_sp_peer = req->rq_sp_from;
                 exp->exp_flvr = flvr;
@@ -587,7 +587,7 @@ static int mgs_connect_check_sptlrpc(struct ptlrpc_request *req)
                         rc = -EACCES;
                 }
 
-                spin_unlock(&exp->exp_lock);
+                cfs_spin_unlock(&exp->exp_lock);
         } else {
                 if (exp->exp_sp_peer != req->rq_sp_from) {
                         CERROR("RPC source %s doesn't match %s\n",
@@ -756,9 +756,9 @@ out:
 
 static inline int mgs_init_export(struct obd_export *exp)
 {
-        spin_lock(&exp->exp_lock);
+        cfs_spin_lock(&exp->exp_lock);
         exp->exp_connecting = 1;
-        spin_unlock(&exp->exp_lock);
+        cfs_spin_unlock(&exp->exp_lock);
 
         return ldlm_init_export(exp);
 }
@@ -831,7 +831,7 @@ static int mgs_iocontrol_pool(struct obd_device *obd,
         if (lcfg == NULL)
                 GOTO(out_pool, rc = -ENOMEM);
 
-        if (copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1))
+        if (cfs_copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1))
                 GOTO(out_pool, rc = -EFAULT);
 
         if (lcfg->lcfg_bufcount < 2) {
@@ -931,7 +931,7 @@ int mgs_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
                 OBD_ALLOC(lcfg, data->ioc_plen1);
                 if (lcfg == NULL)
                         RETURN(-ENOMEM);
-                if (copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1))
+                if (cfs_copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1))
                         GOTO(out_free, rc = -EFAULT);
 
                 if (lcfg->lcfg_bufcount < 1)
index a7384e4..f62cb29 100644 (file)
@@ -50,7 +50,7 @@
 /* mgs_llog.c */
 int class_dentry_readdir(struct obd_device *obd, struct dentry *dir,
                          struct vfsmount *inmnt,
-                         struct list_head *dentry_list);
+                         cfs_list_t *dentry_list);
 
 #define MGSSELF_NAME    "_mgs"
 
@@ -67,16 +67,16 @@ struct mgs_tgt_srpc_conf {
 
 struct fs_db {
         char              fsdb_name[9];
-        struct list_head  fsdb_list;           /* list of databases */
-        struct semaphore  fsdb_sem;
+        cfs_list_t        fsdb_list;           /* list of databases */
+        cfs_semaphore_t   fsdb_sem;
         void             *fsdb_ost_index_map;  /* bitmap of used indicies */
         void             *fsdb_mdt_index_map;  /* bitmap of used indicies */
         /* COMPAT_146 these items must be recorded out of the old client log */
-        char             *fsdb_clilov;         /* COMPAT_146 client lov name */
+        char             *fsdb_clilov;       /* COMPAT_146 client lov name */
         char             *fsdb_clilmv;
-        char             *fsdb_mdtlov;         /* COMPAT_146 mds lov name */
+        char             *fsdb_mdtlov;       /* COMPAT_146 mds lov name */
         char             *fsdb_mdtlmv;
-        char             *fsdb_mdc;            /* COMPAT_146 mdc name */
+        char             *fsdb_mdc;          /* COMPAT_146 mdc name */
         /* end COMPAT_146 */
         __u32             fsdb_flags;
         __u32             fsdb_gen;
index 8d6c90c..37e0e65 100644 (file)
@@ -70,7 +70,7 @@
 /* Caller must list_del and OBD_FREE each dentry from the list */
 int class_dentry_readdir(struct obd_device *obd, struct dentry *dir,
                                 struct vfsmount *inmnt,
-                                struct list_head *dentry_list){
+                                cfs_list_t *dentry_list){
         /* see mds_cleanup_pending */
         struct lvfs_run_ctxt saved;
         struct file *file;
@@ -176,7 +176,7 @@ static int mgs_fsdb_handler(struct llog_handle *llh, struct llog_rec_hdr *rec,
                 CDEBUG(D_MGS, "OST index for %s is %u (%s)\n",
                        lustre_cfg_string(lcfg, 1), index,
                        lustre_cfg_string(lcfg, 2));
-                set_bit(index, fsdb->fsdb_ost_index_map);
+                cfs_set_bit(index, fsdb->fsdb_ost_index_map);
         }
 
         /* Figure out mdt indicies */
@@ -192,7 +192,7 @@ static int mgs_fsdb_handler(struct llog_handle *llh, struct llog_rec_hdr *rec,
                 }
                 rc = 0;
                 CDEBUG(D_MGS, "MDT index is %u\n", index);
-                set_bit(index, fsdb->fsdb_mdt_index_map);
+                cfs_set_bit(index, fsdb->fsdb_mdt_index_map);
         }
 
         /* COMPAT_146 */
@@ -275,7 +275,7 @@ static int mgs_get_fsdb_from_llog(struct obd_device *obd, struct fs_db *fsdb)
         ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
         LASSERT(ctxt != NULL);
         name_create(&logname, fsdb->fsdb_name, "-client");
-        down(&fsdb->fsdb_sem);
+        cfs_down(&fsdb->fsdb_sem);
         push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
         rc = llog_create(ctxt, &loghandle, NULL, logname);
         if (rc)
@@ -296,7 +296,7 @@ out_close:
                 rc = rc2;
 out_pop:
         pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
-        up(&fsdb->fsdb_sem);
+        cfs_up(&fsdb->fsdb_sem);
         name_destroy(&logname);
         llog_ctxt_put(ctxt);
 
@@ -327,10 +327,10 @@ static struct fs_db *mgs_find_fsdb(struct obd_device *obd, char *fsname)
 {
         struct mgs_obd *mgs = &obd->u.mgs;
         struct fs_db *fsdb;
-        struct list_head *tmp;
+        cfs_list_t *tmp;
 
-        list_for_each(tmp, &mgs->mgs_fs_db_list) {
-                fsdb = list_entry(tmp, struct fs_db, fsdb_list);
+        cfs_list_for_each(tmp, &mgs->mgs_fs_db_list) {
+                fsdb = cfs_list_entry(tmp, struct fs_db, fsdb_list);
                 if (strcmp(fsdb->fsdb_name, fsname) == 0)
                         return fsdb;
         }
@@ -355,7 +355,7 @@ static struct fs_db *mgs_new_fsdb(struct obd_device *obd, char *fsname)
                 RETURN(NULL);
 
         strcpy(fsdb->fsdb_name, fsname);
-        sema_init(&fsdb->fsdb_sem, 1);
+        cfs_sema_init(&fsdb->fsdb_sem, 1);
         fsdb->fsdb_fl_udesc = 1;
 
         if (strcmp(fsname, MGSSELF_NAME) == 0) {
@@ -384,7 +384,7 @@ static struct fs_db *mgs_new_fsdb(struct obd_device *obd, char *fsname)
                 lproc_mgs_add_live(obd, fsdb);
         }
 
-        list_add(&fsdb->fsdb_list, &mgs->mgs_fs_db_list);
+        cfs_list_add(&fsdb->fsdb_list, &mgs->mgs_fs_db_list);
 
         RETURN(fsdb);
 err:
@@ -403,9 +403,9 @@ err:
 static void mgs_free_fsdb(struct obd_device *obd, struct fs_db *fsdb)
 {
         /* wait for anyone with the sem */
-        down(&fsdb->fsdb_sem);
+        cfs_down(&fsdb->fsdb_sem);
         lproc_mgs_del_live(obd, fsdb);
-        list_del(&fsdb->fsdb_list);
+        cfs_list_del(&fsdb->fsdb_list);
         if (fsdb->fsdb_ost_index_map)
                 OBD_FREE(fsdb->fsdb_ost_index_map, INDEX_MAP_SIZE);
         if (fsdb->fsdb_mdt_index_map)
@@ -430,13 +430,13 @@ int mgs_cleanup_fsdb_list(struct obd_device *obd)
 {
         struct mgs_obd *mgs = &obd->u.mgs;
         struct fs_db *fsdb;
-        struct list_head *tmp, *tmp2;
-        down(&mgs->mgs_sem);
-        list_for_each_safe(tmp, tmp2, &mgs->mgs_fs_db_list) {
-                fsdb = list_entry(tmp, struct fs_db, fsdb_list);
+        cfs_list_t *tmp, *tmp2;
+        cfs_down(&mgs->mgs_sem);
+        cfs_list_for_each_safe(tmp, tmp2, &mgs->mgs_fs_db_list) {
+                fsdb = cfs_list_entry(tmp, struct fs_db, fsdb_list);
                 mgs_free_fsdb(obd, fsdb);
         }
-        up(&mgs->mgs_sem);
+        cfs_up(&mgs->mgs_sem);
         return 0;
 }
 
@@ -447,17 +447,17 @@ int mgs_find_or_make_fsdb(struct obd_device *obd, char *name,
         struct fs_db *fsdb;
         int rc = 0;
 
-        down(&mgs->mgs_sem);
+        cfs_down(&mgs->mgs_sem);
         fsdb = mgs_find_fsdb(obd, name);
         if (fsdb) {
-                up(&mgs->mgs_sem);
+                cfs_up(&mgs->mgs_sem);
                 *dbh = fsdb;
                 return 0;
         }
 
         CDEBUG(D_MGS, "Creating new db\n");
         fsdb = mgs_new_fsdb(obd, name);
-        up(&mgs->mgs_sem);
+        cfs_up(&mgs->mgs_sem);
         if (!fsdb)
                 return -ENOMEM;
 
@@ -512,7 +512,7 @@ int mgs_check_index(struct obd_device *obd, struct mgs_target_info *mti)
         else
                 RETURN(-EINVAL);
 
-        if (test_bit(mti->mti_stripe_index, imap))
+        if (cfs_test_bit(mti->mti_stripe_index, imap))
                 RETURN(1);
         RETURN(0);
 }
@@ -521,7 +521,7 @@ static __inline__ int next_index(void *index_map, int map_len)
 {
         int i;
         for (i = 0; i < map_len * 8; i++)
-                 if (!test_bit(i, index_map)) {
+                 if (!cfs_test_bit(i, index_map)) {
                          return i;
                  }
         CERROR("max index %d exceeded.\n", i);
@@ -567,7 +567,7 @@ int mgs_set_index(struct obd_device *obd, struct mgs_target_info *mti)
                 RETURN(-ERANGE);
         }
 
-        if (test_bit(mti->mti_stripe_index, imap)) {
+        if (cfs_test_bit(mti->mti_stripe_index, imap)) {
                 if ((mti->mti_flags & LDD_F_VIRGIN) &&
                     !(mti->mti_flags & LDD_F_WRITECONF)) {
                         LCONSOLE_ERROR_MSG(0x140, "Server %s requested index "
@@ -583,7 +583,7 @@ int mgs_set_index(struct obd_device *obd, struct mgs_target_info *mti)
                 }
         }
 
-        set_bit(mti->mti_stripe_index, imap);
+        cfs_set_bit(mti->mti_stripe_index, imap);
         fsdb->fsdb_flags &= ~FSDB_LOG_EMPTY;
         server_make_name(mti->mti_flags, mti->mti_stripe_index,
                          mti->mti_fsname, mti->mti_svname);
@@ -997,7 +997,7 @@ int mgs_write_log_direct_all(struct obd_device *obd, struct fs_db *fsdb,
                              char *devname, char *comment)
 {
         struct mgs_obd *mgs = &obd->u.mgs;
-        struct list_head dentry_list;
+        cfs_list_t dentry_list;
         struct l_linux_dirent *dirent, *n;
         char *fsname = mti->mti_fsname;
         char *logname;
@@ -1027,8 +1027,8 @@ int mgs_write_log_direct_all(struct obd_device *obd, struct fs_db *fsdb,
         }
 
         /* Could use fsdb index maps instead of directory listing */
-        list_for_each_entry_safe(dirent, n, &dentry_list, lld_list) {
-                list_del(&dirent->lld_list);
+        cfs_list_for_each_entry_safe(dirent, n, &dentry_list, lld_list) {
+                cfs_list_del(&dirent->lld_list);
                 /* don't write to sptlrpc rule log */
                 if (strncmp(fsname, dirent->lld_name, len) == 0 &&
                     strstr(dirent->lld_name, "-sptlrpc") == NULL) {
@@ -1646,7 +1646,7 @@ out:
         for (i = 0; i < INDEX_MAP_SIZE * 8; i++){
                 char *mdtname;
                 if (i !=  mti->mti_stripe_index &&
-                    test_bit(i,  fsdb->fsdb_mdt_index_map)) {
+                    cfs_test_bit(i,  fsdb->fsdb_mdt_index_map)) {
                         name_create_mdt(&mdtname, mti->mti_fsname, i);
                         rc = mgs_write_log_mdc_to_mdt(obd, fsdb, mti, mdtname);
                         name_destroy(&mdtname);
@@ -1789,7 +1789,7 @@ static int mgs_write_log_ost(struct obd_device *obd, struct fs_db *fsdb,
 
         /* Add ost to all MDT lov defs */
         for (i = 0; i < INDEX_MAP_SIZE * 8; i++){
-                if (test_bit(i, fsdb->fsdb_mdt_index_map)) {
+                if (cfs_test_bit(i, fsdb->fsdb_mdt_index_map)) {
                         char mdt_index[9];
 
                         name_create_mdt_and_lov(&logname, &lovname, fsdb, i);
@@ -1865,7 +1865,7 @@ static int mgs_write_log_add_failnid(struct obd_device *obd, struct fs_db *fsdb,
                 int i;
 
                 for (i = 0; i < INDEX_MAP_SIZE * 8; i++) {
-                        if (!test_bit(i, fsdb->fsdb_mdt_index_map))
+                        if (!cfs_test_bit(i, fsdb->fsdb_mdt_index_map))
                                 continue;
                         name_create_mdt(&logname, mti->mti_fsname, i);
                         name_create_mdt_osc(&cliname, mti->mti_svname, fsdb, i);
@@ -2380,7 +2380,7 @@ static int mgs_write_log_param(struct obd_device *obd, struct fs_db *fsdb,
                 /* Modify mdtlov */
                 /* Add to all MDT logs for CMD */
                 for (i = 0; i < INDEX_MAP_SIZE * 8; i++) {
-                        if (!test_bit(i, fsdb->fsdb_mdt_index_map))
+                        if (!cfs_test_bit(i, fsdb->fsdb_mdt_index_map))
                                 continue;
                         name_create_mdt(&logname, mti->mti_fsname, i);
                         rc = mgs_modify(obd, fsdb, mti, logname,
@@ -2492,7 +2492,7 @@ static int mgs_write_log_param(struct obd_device *obd, struct fs_db *fsdb,
                         int i;
 
                         for (i = 0; i < INDEX_MAP_SIZE * 8; i++){
-                                if (!test_bit(i, fsdb->fsdb_mdt_index_map))
+                                if (!cfs_test_bit(i, fsdb->fsdb_mdt_index_map))
                                         continue;
                                 name_destroy(&cname);
                                 name_create_mdt_osc(&cname, mti->mti_svname,
@@ -2529,8 +2529,8 @@ static int mgs_write_log_param(struct obd_device *obd, struct fs_db *fsdb,
                         goto active_err;
                 if (rc & LDD_F_SV_ALL) {
                         for (i = 0; i < INDEX_MAP_SIZE * 8; i++) {
-                                if (!test_bit(i,
-                                              fsdb->fsdb_mdt_index_map))
+                                if (!cfs_test_bit(i,
+                                                  fsdb->fsdb_mdt_index_map))
                                         continue;
                                 name_create_mdt(&logname, mti->mti_fsname, i);
                                 rc = mgs_wlp_lcfg(obd, fsdb, mti,
@@ -2593,9 +2593,9 @@ int mgs_check_failnid(struct obd_device *obd, struct mgs_target_info *mti)
            the failover list.  Modify mti->params for rewriting back at
            server_register_target(). */
 
-        down(&fsdb->fsdb_sem);
+        cfs_down(&fsdb->fsdb_sem);
         rc = mgs_write_log_add_failnid(obd, fsdb, mti);
-        up(&fsdb->fsdb_sem);
+        cfs_up(&fsdb->fsdb_sem);
 
         RETURN(rc);
 #endif
@@ -2656,7 +2656,7 @@ int mgs_write_log_target(struct obd_device *obd,
                 RETURN(rc);
         }
 
-        down(&fsdb->fsdb_sem);
+        cfs_down(&fsdb->fsdb_sem);
 
         if (mti->mti_flags &
             (LDD_F_VIRGIN | LDD_F_UPGRADE14 | LDD_F_WRITECONF)) {
@@ -2705,7 +2705,7 @@ int mgs_write_log_target(struct obd_device *obd,
         OBD_FREE(buf, strlen(mti->mti_params) + 1);
 
 out_up:
-        up(&fsdb->fsdb_sem);
+        cfs_up(&fsdb->fsdb_sem);
         RETURN(rc);
 }
 
@@ -2811,7 +2811,7 @@ int mgs_erase_logs(struct obd_device *obd, char *fsname)
 {
         struct mgs_obd *mgs = &obd->u.mgs;
         static struct fs_db *fsdb;
-        struct list_head dentry_list;
+        cfs_list_t dentry_list;
         struct l_linux_dirent *dirent, *n;
         int rc, len = strlen(fsname);
         ENTRY;
@@ -2824,15 +2824,15 @@ int mgs_erase_logs(struct obd_device *obd, char *fsname)
                 RETURN(rc);
         }
 
-        down(&mgs->mgs_sem);
+        cfs_down(&mgs->mgs_sem);
 
         /* Delete the fs db */
         fsdb = mgs_find_fsdb(obd, fsname);
         if (fsdb)
                 mgs_free_fsdb(obd, fsdb);
 
-        list_for_each_entry_safe(dirent, n, &dentry_list, lld_list) {
-                list_del(&dirent->lld_list);
+        cfs_list_for_each_entry_safe(dirent, n, &dentry_list, lld_list) {
+                cfs_list_del(&dirent->lld_list);
                 if (strncmp(fsname, dirent->lld_name, len) == 0) {
                         CDEBUG(D_MGS, "Removing log %s\n", dirent->lld_name);
                         mgs_erase_log(obd, dirent->lld_name);
@@ -2840,7 +2840,7 @@ int mgs_erase_logs(struct obd_device *obd, char *fsname)
                 OBD_FREE(dirent, sizeof(*dirent));
         }
 
-        up(&mgs->mgs_sem);
+        cfs_up(&mgs->mgs_sem);
 
         RETURN(rc);
 }
@@ -2945,11 +2945,11 @@ int mgs_setparam(struct obd_device *obd, struct lustre_cfg *lcfg, char *fsname)
 
         mti->mti_flags = rc | LDD_F_PARAM;
 
-        down(&fsdb->fsdb_sem);
+        cfs_down(&fsdb->fsdb_sem);
         /* this is lctl conf_param's single param path, there is not
            need to loop through parameters */
         rc = mgs_write_log_param(obd, fsdb, mti, mti->mti_params);
-        up(&fsdb->fsdb_sem);
+        cfs_up(&fsdb->fsdb_sem);
 
 out:
         OBD_FREE_PTR(mti);
@@ -3052,7 +3052,7 @@ int mgs_pool_cmd(struct obd_device *obd, enum lcfg_command_type cmd,
         }
         }
 
-        down(&fsdb->fsdb_sem);
+        cfs_down(&fsdb->fsdb_sem);
 
         if (canceled_label != NULL) {
                 OBD_ALLOC_PTR(mti);
@@ -3062,7 +3062,7 @@ int mgs_pool_cmd(struct obd_device *obd, enum lcfg_command_type cmd,
 
         /* write pool def to all MDT logs */
         for (i = 0; i < INDEX_MAP_SIZE * 8; i++) {
-                 if (test_bit(i,  fsdb->fsdb_mdt_index_map)) {
+                 if (cfs_test_bit(i,  fsdb->fsdb_mdt_index_map)) {
                         name_create_mdt_and_lov(&logname, &lovname, fsdb, i);
 
                         if (canceled_label != NULL) {
@@ -3088,7 +3088,7 @@ int mgs_pool_cmd(struct obd_device *obd, enum lcfg_command_type cmd,
                            cmd, fsname, poolname, ostname, label);
         name_destroy(&logname);
 
-        up(&fsdb->fsdb_sem);
+        cfs_up(&fsdb->fsdb_sem);
 
         EXIT;
 out:
index 476b8ff..74149f7 100644 (file)
@@ -71,9 +71,9 @@ cfs_mem_cache_t *capa_cachep = NULL;
 
 #ifdef __KERNEL__
 /* lock for capa hash/capa_list/fo_capa_keys */
-spinlock_t capa_lock = SPIN_LOCK_UNLOCKED;
+cfs_spinlock_t capa_lock = CFS_SPIN_LOCK_UNLOCKED;
 
-struct list_head capa_list[CAPA_SITE_MAX];
+cfs_list_t capa_list[CAPA_SITE_MAX];
 
 static struct capa_hmac_alg capa_hmac_algs[] = {
         DEF_CAPA_HMAC_ALG("sha1", SHA1, 20, 20),
@@ -87,20 +87,20 @@ EXPORT_SYMBOL(capa_list);
 EXPORT_SYMBOL(capa_lock);
 EXPORT_SYMBOL(capa_count);
 
-struct hlist_head *init_capa_hash(void)
+cfs_hlist_head_t *init_capa_hash(void)
 {
-        struct hlist_head *hash;
+        cfs_hlist_head_t *hash;
         int nr_hash, i;
 
         OBD_ALLOC(hash, CFS_PAGE_SIZE);
         if (!hash)
                 return NULL;
 
-        nr_hash = CFS_PAGE_SIZE / sizeof(struct hlist_head);
+        nr_hash = CFS_PAGE_SIZE / sizeof(cfs_hlist_head_t);
         LASSERT(nr_hash > NR_CAPAHASH);
 
         for (i = 0; i < NR_CAPAHASH; i++)
-                INIT_HLIST_HEAD(hash + i);
+                CFS_INIT_HLIST_HEAD(hash + i);
         return hash;
 }
 
@@ -113,25 +113,26 @@ static inline int capa_on_server(struct obd_capa *ocapa)
 static inline void capa_delete(struct obd_capa *ocapa)
 {
         LASSERT(capa_on_server(ocapa));
-        hlist_del_init(&ocapa->u.tgt.c_hash);
-        list_del_init(&ocapa->c_list);
+        cfs_hlist_del_init(&ocapa->u.tgt.c_hash);
+        cfs_list_del_init(&ocapa->c_list);
         capa_count[ocapa->c_site]--;
         /* release the ref when alloc */
         capa_put(ocapa);
 }
 
-void cleanup_capa_hash(struct hlist_head *hash)
+void cleanup_capa_hash(cfs_hlist_head_t *hash)
 {
         int i;
-        struct hlist_node *pos, *next;
+        cfs_hlist_node_t *pos, *next;
         struct obd_capa *oc;
 
-        spin_lock(&capa_lock);
+        cfs_spin_lock(&capa_lock);
         for (i = 0; i < NR_CAPAHASH; i++) {
-                hlist_for_each_entry_safe(oc, pos, next, hash + i, u.tgt.c_hash)
+                cfs_hlist_for_each_entry_safe(oc, pos, next, hash + i,
+                                              u.tgt.c_hash)
                         capa_delete(oc);
         }
-        spin_unlock(&capa_lock);
+        cfs_spin_unlock(&capa_lock);
 
         OBD_FREE(hash, CFS_PAGE_SIZE);
 }
@@ -152,13 +153,13 @@ static inline int capa_is_to_expire(struct obd_capa *oc)
 }
 
 static struct obd_capa *find_capa(struct lustre_capa *capa,
-                                  struct hlist_head *head, int alive)
+                                  cfs_hlist_head_t *head, int alive)
 {
-        struct hlist_node *pos;
+        cfs_hlist_node_t *pos;
         struct obd_capa *ocapa;
         int len = alive ? offsetof(struct lustre_capa, lc_keyid):sizeof(*capa);
 
-        hlist_for_each_entry(ocapa, pos, head, u.tgt.c_hash) {
+        cfs_hlist_for_each_entry(ocapa, pos, head, u.tgt.c_hash) {
                 if (memcmp(&ocapa->c_capa, capa, len))
                         continue;
                 /* don't return one that will expire soon in this case */
@@ -175,17 +176,17 @@ static struct obd_capa *find_capa(struct lustre_capa *capa,
 }
 
 #define LRU_CAPA_DELETE_COUNT 12
-static inline void capa_delete_lru(struct list_head *head)
+static inline void capa_delete_lru(cfs_list_t *head)
 {
         struct obd_capa *ocapa;
-        struct list_head *node = head->next;
+        cfs_list_t *node = head->next;
         int count = 0;
 
         /* free LRU_CAPA_DELETE_COUNT unused capa from head */
         while (count++ < LRU_CAPA_DELETE_COUNT) {
-                ocapa = list_entry(node, struct obd_capa, c_list);
+                ocapa = cfs_list_entry(node, struct obd_capa, c_list);
                 node = node->next;
-                if (atomic_read(&ocapa->c_refc))
+                if (cfs_atomic_read(&ocapa->c_refc))
                         continue;
 
                 DEBUG_CAPA(D_SEC, &ocapa->c_capa, "free lru");
@@ -194,49 +195,50 @@ static inline void capa_delete_lru(struct list_head *head)
 }
 
 /* add or update */
-struct obd_capa *capa_add(struct hlist_head *hash, struct lustre_capa *capa)
+struct obd_capa *capa_add(cfs_hlist_head_t *hash, struct lustre_capa *capa)
 {
-        struct hlist_head *head = hash + capa_hashfn(&capa->lc_fid);
+        cfs_hlist_head_t *head = hash + capa_hashfn(&capa->lc_fid);
         struct obd_capa *ocapa, *old = NULL;
-        struct list_head *list = &capa_list[CAPA_SITE_SERVER];
+        cfs_list_t *list = &capa_list[CAPA_SITE_SERVER];
 
         ocapa = alloc_capa(CAPA_SITE_SERVER);
         if (IS_ERR(ocapa))
                 return NULL;
 
-        spin_lock(&capa_lock);
+        cfs_spin_lock(&capa_lock);
         old = find_capa(capa, head, 0);
         if (!old) {
                 ocapa->c_capa = *capa;
                 set_capa_expiry(ocapa);
-                hlist_add_head(&ocapa->u.tgt.c_hash, head);
-                list_add_tail(&ocapa->c_list, list);
+                cfs_hlist_add_head(&ocapa->u.tgt.c_hash, head);
+                cfs_list_add_tail(&ocapa->c_list, list);
                 capa_get(ocapa);
                 capa_count[CAPA_SITE_SERVER]++;
                 if (capa_count[CAPA_SITE_SERVER] > CAPA_HASH_SIZE)
                         capa_delete_lru(list);
-                spin_unlock(&capa_lock);
+                cfs_spin_unlock(&capa_lock);
                 return ocapa;
         } else {
                 capa_get(old);
-                spin_unlock(&capa_lock);
+                cfs_spin_unlock(&capa_lock);
                 capa_put(ocapa);
                 return old;
         }
 }
 
-struct obd_capa *capa_lookup(struct hlist_head *hash, struct lustre_capa *capa,
+struct obd_capa *capa_lookup(cfs_hlist_head_t *hash, struct lustre_capa *capa,
                              int alive)
 {
         struct obd_capa *ocapa;
 
-        spin_lock(&capa_lock);
+        cfs_spin_lock(&capa_lock);
         ocapa = find_capa(capa, hash + capa_hashfn(&capa->lc_fid), alive);
         if (ocapa) {
-                list_move_tail(&ocapa->c_list, &capa_list[CAPA_SITE_SERVER]);
+                cfs_list_move_tail(&ocapa->c_list,
+                                   &capa_list[CAPA_SITE_SERVER]);
                 capa_get(ocapa);
         }
-        spin_unlock(&capa_lock);
+        cfs_spin_unlock(&capa_lock);
 
         return ocapa;
 }
@@ -381,9 +383,9 @@ out:
 
 void capa_cpy(void *capa, struct obd_capa *ocapa)
 {
-        spin_lock(&ocapa->c_lock);
+        cfs_spin_lock(&ocapa->c_lock);
         *(struct lustre_capa *)capa = ocapa->c_capa;
-        spin_unlock(&ocapa->c_lock);
+        cfs_spin_unlock(&ocapa->c_lock);
 }
 
 EXPORT_SYMBOL(init_capa_hash);
index 5a6b8b0..39513da 100644 (file)
@@ -60,9 +60,9 @@
  */
 
 #define cl_io_for_each(slice, io) \
-        list_for_each_entry((slice), &io->ci_layers, cis_linkage)
+        cfs_list_for_each_entry((slice), &io->ci_layers, cis_linkage)
 #define cl_io_for_each_reverse(slice, io)                 \
-        list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
+        cfs_list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
 
 static inline int cl_io_type_is_valid(enum cl_io_type type)
 {
@@ -114,10 +114,10 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io)
         LINVRNT(cl_io_invariant(io));
         ENTRY;
 
-        while (!list_empty(&io->ci_layers)) {
+        while (!cfs_list_empty(&io->ci_layers)) {
                 slice = container_of(io->ci_layers.next, struct cl_io_slice,
                                      cis_linkage);
-                list_del_init(&slice->cis_linkage);
+                cfs_list_del_init(&slice->cis_linkage);
                 if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
                         slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
                 /*
@@ -257,8 +257,9 @@ static void cl_io_locks_sort(struct cl_io *io)
                 done = 1;
                 prev = NULL;
 
-                list_for_each_entry_safe(curr, temp, &io->ci_lockset.cls_todo,
-                                         cill_linkage) {
+                cfs_list_for_each_entry_safe(curr, temp,
+                                             &io->ci_lockset.cls_todo,
+                                             cill_linkage) {
                         if (prev != NULL) {
                                 switch (cl_lock_descr_cmp(&prev->cill_descr,
                                                           &curr->cill_descr)) {
@@ -271,8 +272,8 @@ static void cl_io_locks_sort(struct cl_io *io)
                                 default:
                                         LBUG();
                                 case +1:
-                                        list_move_tail(&curr->cill_linkage,
-                                                       &prev->cill_linkage);
+                                        cfs_list_move_tail(&curr->cill_linkage,
+                                                           &prev->cill_linkage);
                                         done = 0;
                                         continue; /* don't change prev: it's
                                                    * still "previous" */
@@ -292,13 +293,13 @@ static void cl_io_locks_sort(struct cl_io *io)
  * \retval +ve there is a matching lock in the \a queue
  * \retval   0 there are no matching locks in the \a queue
  */
-int cl_queue_match(const struct list_head *queue,
+int cl_queue_match(const cfs_list_t *queue,
                    const struct cl_lock_descr *need)
 {
        struct cl_io_lock_link *scan;
 
        ENTRY;
-       list_for_each_entry(scan, queue, cill_linkage) {
+       cfs_list_for_each_entry(scan, queue, cill_linkage) {
                if (cl_lock_descr_match(&scan->cill_descr, need))
                        RETURN(+1);
        }
@@ -326,11 +327,12 @@ static int cl_lockset_lock_one(const struct lu_env *env,
         lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
         if (!IS_ERR(lock)) {
                 link->cill_lock = lock;
-                list_move(&link->cill_linkage, &set->cls_curr);
+                cfs_list_move(&link->cill_linkage, &set->cls_curr);
                 if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) {
                         result = cl_wait(env, lock);
                         if (result == 0)
-                                list_move(&link->cill_linkage, &set->cls_done);
+                                cfs_list_move(&link->cill_linkage,
+                                              &set->cls_done);
                 } else
                         result = 0;
         } else
@@ -344,7 +346,7 @@ static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io,
         struct cl_lock *lock = link->cill_lock;
 
         ENTRY;
-        list_del_init(&link->cill_linkage);
+        cfs_list_del_init(&link->cill_linkage);
         if (lock != NULL) {
                 cl_lock_release(env, lock, "io", io);
                 link->cill_lock = NULL;
@@ -364,7 +366,7 @@ static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
 
         ENTRY;
         result = 0;
-        list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
+        cfs_list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
                 if (!cl_lockset_match(set, &link->cill_descr, 0)) {
                         /* XXX some locking to guarantee that locks aren't
                          * expanded in between. */
@@ -375,12 +377,13 @@ static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
                         cl_lock_link_fini(env, io, link);
         }
         if (result == 0) {
-                list_for_each_entry_safe(link, temp,
-                                         &set->cls_curr, cill_linkage) {
+                cfs_list_for_each_entry_safe(link, temp,
+                                             &set->cls_curr, cill_linkage) {
                         lock = link->cill_lock;
                         result = cl_wait(env, lock);
                         if (result == 0)
-                                list_move(&link->cill_linkage, &set->cls_done);
+                                cfs_list_move(&link->cill_linkage,
+                                              &set->cls_done);
                         else
                                 break;
                 }
@@ -441,13 +444,13 @@ void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
         ENTRY;
         set = &io->ci_lockset;
 
-        list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage)
+        cfs_list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage)
                 cl_lock_link_fini(env, io, link);
 
-        list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage)
+        cfs_list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage)
                 cl_lock_link_fini(env, io, link);
 
-        list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
+        cfs_list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
                 cl_unuse(env, link->cill_lock);
                 cl_lock_link_fini(env, io, link);
         }
@@ -555,7 +558,7 @@ int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
         if (cl_lockset_match(&io->ci_lockset, &link->cill_descr, 1))
                 result = +1;
         else {
-                list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
+                cfs_list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
                 result = 0;
         }
         RETURN(result);
@@ -847,7 +850,7 @@ int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
         /*
          * If ->cio_submit() failed, no pages were sent.
          */
-        LASSERT(ergo(result != 0, list_empty(&queue->c2_qout.pl_pages)));
+        LASSERT(ergo(result != 0, cfs_list_empty(&queue->c2_qout.pl_pages)));
         RETURN(result);
 }
 EXPORT_SYMBOL(cl_io_submit_rw);
@@ -889,7 +892,7 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
                  rc = cl_sync_io_wait(env, io, &queue->c2_qout,
                                       anchor, timeout);
         } else {
-                LASSERT(list_empty(&queue->c2_qout.pl_pages));
+                LASSERT(cfs_list_empty(&queue->c2_qout.pl_pages));
                 cl_page_list_for_each(pg, &queue->c2_qin)
                         pg->cp_sync_io = NULL;
         }
@@ -991,13 +994,13 @@ void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
                      struct cl_object *obj,
                      const struct cl_io_operations *ops)
 {
-        struct list_head *linkage = &slice->cis_linkage;
+        cfs_list_t *linkage = &slice->cis_linkage;
 
         LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
-                list_empty(linkage));
+                cfs_list_empty(linkage));
         ENTRY;
 
-        list_add_tail(linkage, &io->ci_layers);
+        cfs_list_add_tail(linkage, &io->ci_layers);
         slice->cis_io  = io;
         slice->cis_obj = obj;
         slice->cis_iop = ops;
@@ -1030,11 +1033,11 @@ void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
         LASSERT(page->cp_owner != NULL);
         LINVRNT(plist->pl_owner == cfs_current());
 
-        lockdep_off();
-        mutex_lock(&page->cp_mutex);
-        lockdep_on();
-        LASSERT(list_empty(&page->cp_batch));
-        list_add_tail(&page->cp_batch, &plist->pl_pages);
+        cfs_lockdep_off();
+        cfs_mutex_lock(&page->cp_mutex);
+        cfs_lockdep_on();
+        LASSERT(cfs_list_empty(&page->cp_batch));
+        cfs_list_add_tail(&page->cp_batch, &plist->pl_pages);
         ++plist->pl_nr;
         page->cp_queue_ref = lu_ref_add(&page->cp_reference, "queue", plist);
         cl_page_get(page);
@@ -1052,10 +1055,10 @@ void cl_page_list_del(const struct lu_env *env,
         LINVRNT(plist->pl_owner == cfs_current());
 
         ENTRY;
-        list_del_init(&page->cp_batch);
-        lockdep_off();
-        mutex_unlock(&page->cp_mutex);
-        lockdep_on();
+        cfs_list_del_init(&page->cp_batch);
+        cfs_lockdep_off();
+        cfs_mutex_unlock(&page->cp_mutex);
+        cfs_lockdep_on();
         --plist->pl_nr;
         lu_ref_del_at(&page->cp_reference, page->cp_queue_ref, "queue", plist);
         cl_page_put(env, page);
@@ -1074,7 +1077,7 @@ void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
         LINVRNT(src->pl_owner == cfs_current());
 
         ENTRY;
-        list_move_tail(&page->cp_batch, &dst->pl_pages);
+        cfs_list_move_tail(&page->cp_batch, &dst->pl_pages);
         --src->pl_nr;
         ++dst->pl_nr;
         lu_ref_set_at(&page->cp_reference,
@@ -1119,10 +1122,10 @@ void cl_page_list_disown(const struct lu_env *env,
         cl_page_list_for_each_safe(page, temp, plist) {
                 LASSERT(plist->pl_nr > 0);
 
-                list_del_init(&page->cp_batch);
-                lockdep_off();
-                mutex_unlock(&page->cp_mutex);
-                lockdep_on();
+                cfs_list_del_init(&page->cp_batch);
+                cfs_lockdep_off();
+                cfs_mutex_unlock(&page->cp_mutex);
+                cfs_lockdep_on();
                 --plist->pl_nr;
                 /*
                  * cl_page_disown0 rather than usual cl_page_disown() is used,
@@ -1357,7 +1360,7 @@ void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
                       const struct cl_req_operations *ops)
 {
         ENTRY;
-        list_add_tail(&slice->crs_linkage, &req->crq_layers);
+        cfs_list_add_tail(&slice->crs_linkage, &req->crq_layers);
         slice->crs_dev = dev;
         slice->crs_ops = ops;
         slice->crs_req = req;
@@ -1369,9 +1372,9 @@ static void cl_req_free(const struct lu_env *env, struct cl_req *req)
 {
         unsigned i;
 
-        LASSERT(list_empty(&req->crq_pages));
+        LASSERT(cfs_list_empty(&req->crq_pages));
         LASSERT(req->crq_nrpages == 0);
-        LINVRNT(list_empty(&req->crq_layers));
+        LINVRNT(cfs_list_empty(&req->crq_layers));
         LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL));
         ENTRY;
 
@@ -1402,7 +1405,7 @@ static int cl_req_init(const struct lu_env *env, struct cl_req *req,
         result = 0;
         page = cl_page_top(page);
         do {
-                list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+                cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
                         dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
                         if (dev->cd_ops->cdo_req_init != NULL) {
                                 result = dev->cd_ops->cdo_req_init(env,
@@ -1428,10 +1431,10 @@ void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc)
         /*
          * for the lack of list_for_each_entry_reverse_safe()...
          */
-        while (!list_empty(&req->crq_layers)) {
-                slice = list_entry(req->crq_layers.prev,
-                                   struct cl_req_slice, crs_linkage);
-                list_del_init(&slice->crs_linkage);
+        while (!cfs_list_empty(&req->crq_layers)) {
+                slice = cfs_list_entry(req->crq_layers.prev,
+                                       struct cl_req_slice, crs_linkage);
+                cfs_list_del_init(&slice->crs_linkage);
                 if (slice->crs_ops->cro_completion != NULL)
                         slice->crs_ops->cro_completion(env, slice, rc);
         }
@@ -1488,10 +1491,10 @@ void cl_req_page_add(const struct lu_env *env,
         page = cl_page_top(page);
 
         LINVRNT(cl_page_is_vmlocked(env, page));
-        LASSERT(list_empty(&page->cp_flight));
+        LASSERT(cfs_list_empty(&page->cp_flight));
         LASSERT(page->cp_req == NULL);
 
-        list_add_tail(&page->cp_flight, &req->crq_pages);
+        cfs_list_add_tail(&page->cp_flight, &req->crq_pages);
         ++req->crq_nrpages;
         page->cp_req = req;
         obj = cl_object_top(page->cp_obj);
@@ -1520,10 +1523,10 @@ void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
         page = cl_page_top(page);
 
         LINVRNT(cl_page_is_vmlocked(env, page));
-        LASSERT(!list_empty(&page->cp_flight));
+        LASSERT(!cfs_list_empty(&page->cp_flight));
         LASSERT(req->crq_nrpages > 0);
 
-        list_del_init(&page->cp_flight);
+        cfs_list_del_init(&page->cp_flight);
         --req->crq_nrpages;
         page->cp_req = NULL;
         EXIT;
@@ -1549,7 +1552,7 @@ int cl_req_prep(const struct lu_env *env, struct cl_req *req)
                 LASSERT(req->crq_o[i].ro_obj != NULL);
 
         result = 0;
-        list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
+        cfs_list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
                 if (slice->crs_ops->cro_prep != NULL) {
                         result = slice->crs_ops->cro_prep(env, slice);
                         if (result != 0)
@@ -1572,14 +1575,14 @@ void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
         struct cl_page            *page;
         int i;
 
-        LASSERT(!list_empty(&req->crq_pages));
+        LASSERT(!cfs_list_empty(&req->crq_pages));
         ENTRY;
 
         /* Take any page to use as a model. */
-        page = list_entry(req->crq_pages.next, struct cl_page, cp_flight);
+        page = cfs_list_entry(req->crq_pages.next, struct cl_page, cp_flight);
 
         for (i = 0; i < req->crq_nrobjs; ++i) {
-                list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
+                cfs_list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
                         const struct cl_page_slice *scan;
                         const struct cl_object     *obj;
 
@@ -1611,7 +1614,7 @@ void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
 {
         ENTRY;
         cfs_waitq_init(&anchor->csi_waitq);
-        atomic_set(&anchor->csi_sync_nr, nrpages);
+        cfs_atomic_set(&anchor->csi_sync_nr, nrpages);
         anchor->csi_sync_rc  = 0;
         EXIT;
 }
@@ -1633,23 +1636,23 @@ int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
         LASSERT(timeout >= 0);
 
         rc = l_wait_event(anchor->csi_waitq,
-                          atomic_read(&anchor->csi_sync_nr) == 0,
+                          cfs_atomic_read(&anchor->csi_sync_nr) == 0,
                           &lwi);
         if (rc < 0) {
                 CERROR("SYNC IO failed with error: %d, try to cancel "
                        "%d remaining pages\n",
-                       rc, atomic_read(&anchor->csi_sync_nr));
+                       rc, cfs_atomic_read(&anchor->csi_sync_nr));
 
                 (void)cl_io_cancel(env, io, queue);
 
                 lwi = (struct l_wait_info) { 0 };
                 (void)l_wait_event(anchor->csi_waitq,
-                                   atomic_read(&anchor->csi_sync_nr) == 0,
+                                   cfs_atomic_read(&anchor->csi_sync_nr) == 0,
                                    &lwi);
         } else {
                 rc = anchor->csi_sync_rc;
         }
-        LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
+        LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) == 0);
         cl_page_list_assume(env, io, queue);
         POISON(anchor, 0x5a, sizeof *anchor);
         RETURN(rc);
@@ -1669,8 +1672,8 @@ void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
          * ->{prepare,commit}_write(). Completion is used to signal the end of
          * IO.
          */
-        LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
-        if (atomic_dec_and_test(&anchor->csi_sync_nr))
+        LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) > 0);
+        if (cfs_atomic_dec_and_test(&anchor->csi_sync_nr))
                 cfs_waitq_broadcast(&anchor->csi_waitq);
         EXIT;
 }
index ce219a2..3e8b004 100644 (file)
@@ -54,7 +54,7 @@
 #include "cl_internal.h"
 
 /** Lock class of cl_lock::cll_guard */
-static struct lock_class_key cl_lock_guard_class;
+static cfs_lock_class_key_t cl_lock_guard_class;
 static cfs_mem_cache_t *cl_lock_kmem;
 
 static struct lu_kmem_descr cl_lock_caches[] = {
@@ -80,7 +80,7 @@ static int cl_lock_invariant_trusted(const struct lu_env *env,
         return
                 cl_is_lock(lock) &&
                 ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
-                atomic_read(&lock->cll_ref) >= lock->cll_holds &&
+                cfs_atomic_read(&lock->cll_ref) >= lock->cll_holds &&
                 lock->cll_holds >= lock->cll_users &&
                 lock->cll_holds >= 0 &&
                 lock->cll_users >= 0 &&
@@ -97,7 +97,7 @@ static int cl_lock_invariant(const struct lu_env *env,
 {
         int result;
 
-        result = atomic_read(&lock->cll_ref) > 0 &&
+        result = cfs_atomic_read(&lock->cll_ref) > 0 &&
                 cl_lock_invariant_trusted(env, lock);
         if (!result && env != NULL)
                 CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
@@ -134,8 +134,8 @@ static void cl_lock_trace0(int level, const struct lu_env *env,
         struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
         CDEBUG(level, "%s: %p@(%i %p %i %d %d %d %d %lx)"
                       "(%p/%d/%i) at %s():%d\n",
-               prefix, lock,
-               atomic_read(&lock->cll_ref), lock->cll_guarder, lock->cll_depth,
+               prefix, lock, cfs_atomic_read(&lock->cll_ref),
+               lock->cll_guarder, lock->cll_depth,
                lock->cll_state, lock->cll_error, lock->cll_holds,
                lock->cll_users, lock->cll_flags,
                env, h->coh_nesting, cl_lock_nr_mutexed(env),
@@ -147,7 +147,7 @@ static void cl_lock_trace0(int level, const struct lu_env *env,
 #define RETIP ((unsigned long)__builtin_return_address(0))
 
 #ifdef CONFIG_LOCKDEP
-static struct lock_class_key cl_lock_key;
+static cfs_lock_class_key_t cl_lock_key;
 
 static void cl_lock_lockdep_init(struct cl_lock *lock)
 {
@@ -198,7 +198,7 @@ void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
 {
         ENTRY;
         slice->cls_lock = lock;
-        list_add_tail(&slice->cls_linkage, &lock->cll_layers);
+        cfs_list_add_tail(&slice->cls_linkage, &lock->cll_layers);
         slice->cls_obj = obj;
         slice->cls_ops = ops;
         EXIT;
@@ -262,22 +262,22 @@ static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
 
         ENTRY;
         cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
-        might_sleep();
-        while (!list_empty(&lock->cll_layers)) {
+        cfs_might_sleep();
+        while (!cfs_list_empty(&lock->cll_layers)) {
                 struct cl_lock_slice *slice;
 
-                slice = list_entry(lock->cll_layers.next, struct cl_lock_slice,
-                                   cls_linkage);
-                list_del_init(lock->cll_layers.next);
+                slice = cfs_list_entry(lock->cll_layers.next,
+                                       struct cl_lock_slice, cls_linkage);
+                cfs_list_del_init(lock->cll_layers.next);
                 slice->cls_ops->clo_fini(env, slice);
         }
-        atomic_dec(&cl_object_site(obj)->cs_locks.cs_total);
-        atomic_dec(&cl_object_site(obj)->cs_locks_state[lock->cll_state]);
+        cfs_atomic_dec(&cl_object_site(obj)->cs_locks.cs_total);
+        cfs_atomic_dec(&cl_object_site(obj)->cs_locks_state[lock->cll_state]);
         lu_object_ref_del_at(&obj->co_lu, lock->cll_obj_ref, "cl_lock", lock);
         cl_object_put(env, obj);
         lu_ref_fini(&lock->cll_reference);
         lu_ref_fini(&lock->cll_holders);
-        mutex_destroy(&lock->cll_guard);
+        cfs_mutex_destroy(&lock->cll_guard);
         OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
         EXIT;
 }
@@ -305,14 +305,14 @@ void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
         site = cl_object_site(obj);
 
         CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
-               atomic_read(&lock->cll_ref), lock, RETIP);
+               cfs_atomic_read(&lock->cll_ref), lock, RETIP);
 
-        if (atomic_dec_and_test(&lock->cll_ref)) {
+        if (cfs_atomic_dec_and_test(&lock->cll_ref)) {
                 if (lock->cll_state == CLS_FREEING) {
-                        LASSERT(list_empty(&lock->cll_linkage));
+                        LASSERT(cfs_list_empty(&lock->cll_linkage));
                         cl_lock_free(env, lock);
                 }
-                atomic_dec(&site->cs_locks.cs_busy);
+                cfs_atomic_dec(&site->cs_locks.cs_busy);
         }
         EXIT;
 }
@@ -330,8 +330,8 @@ void cl_lock_get(struct cl_lock *lock)
 {
         LINVRNT(cl_lock_invariant(NULL, lock));
         CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
-               atomic_read(&lock->cll_ref), lock, RETIP);
-        atomic_inc(&lock->cll_ref);
+               cfs_atomic_read(&lock->cll_ref), lock, RETIP);
+        cfs_atomic_inc(&lock->cll_ref);
 }
 EXPORT_SYMBOL(cl_lock_get);
 
@@ -350,9 +350,9 @@ void cl_lock_get_trust(struct cl_lock *lock)
 
         LASSERT(cl_is_lock(lock));
         CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
-               atomic_read(&lock->cll_ref), lock, RETIP);
-        if (atomic_inc_return(&lock->cll_ref) == 1)
-                atomic_inc(&site->cs_locks.cs_busy);
+               cfs_atomic_read(&lock->cll_ref), lock, RETIP);
+        if (cfs_atomic_inc_return(&lock->cll_ref) == 1)
+                cfs_atomic_inc(&site->cs_locks.cs_busy);
 }
 EXPORT_SYMBOL(cl_lock_get_trust);
 
@@ -383,7 +383,7 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
         ENTRY;
         OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, CFS_ALLOC_IO);
         if (lock != NULL) {
-                atomic_set(&lock->cll_ref, 1);
+                cfs_atomic_set(&lock->cll_ref, 1);
                 lock->cll_descr = *descr;
                 lock->cll_state = CLS_NEW;
                 cl_object_get(obj);
@@ -394,15 +394,16 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
                 CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
                 lu_ref_init(&lock->cll_reference);
                 lu_ref_init(&lock->cll_holders);
-                mutex_init(&lock->cll_guard);
-                lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
+                cfs_mutex_init(&lock->cll_guard);
+                cfs_lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
                 cfs_waitq_init(&lock->cll_wq);
                 head = obj->co_lu.lo_header;
-                atomic_inc(&site->cs_locks_state[CLS_NEW]);
-                atomic_inc(&site->cs_locks.cs_total);
-                atomic_inc(&site->cs_locks.cs_created);
+                cfs_atomic_inc(&site->cs_locks_state[CLS_NEW]);
+                cfs_atomic_inc(&site->cs_locks.cs_total);
+                cfs_atomic_inc(&site->cs_locks.cs_created);
                 cl_lock_lockdep_init(lock);
-                list_for_each_entry(obj, &head->loh_layers, co_lu.lo_linkage) {
+                cfs_list_for_each_entry(obj, &head->loh_layers,
+                                        co_lu.lo_linkage) {
                         int err;
 
                         err = obj->co_ops->coo_lock_init(env, obj, lock, io);
@@ -482,7 +483,7 @@ static int cl_lock_fits_into(const struct lu_env *env,
 
         LINVRNT(cl_lock_invariant_trusted(env, lock));
         ENTRY;
-        list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+        cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
                 if (slice->cls_ops->clo_fits_into != NULL &&
                     !slice->cls_ops->clo_fits_into(env, slice, need, io))
                         RETURN(0);
@@ -504,8 +505,8 @@ static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
         head = cl_object_header(obj);
         site = cl_object_site(obj);
         LINVRNT_SPIN_LOCKED(&head->coh_lock_guard);
-        atomic_inc(&site->cs_locks.cs_lookup);
-        list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
+        cfs_atomic_inc(&site->cs_locks.cs_lookup);
+        cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
                 int matched;
 
                 LASSERT(cl_is_lock(lock));
@@ -519,7 +520,7 @@ static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
                        matched);
                 if (matched) {
                         cl_lock_get_trust(lock);
-                        atomic_inc(&cl_object_site(obj)->cs_locks.cs_hit);
+                        cfs_atomic_inc(&cl_object_site(obj)->cs_locks.cs_hit);
                         RETURN(lock);
                 }
         }
@@ -551,23 +552,24 @@ static struct cl_lock *cl_lock_find(const struct lu_env *env,
         head = cl_object_header(obj);
         site = cl_object_site(obj);
 
-        spin_lock(&head->coh_lock_guard);
+        cfs_spin_lock(&head->coh_lock_guard);
         lock = cl_lock_lookup(env, obj, io, need);
-        spin_unlock(&head->coh_lock_guard);
+        cfs_spin_unlock(&head->coh_lock_guard);
 
         if (lock == NULL) {
                 lock = cl_lock_alloc(env, obj, io, need);
                 if (!IS_ERR(lock)) {
                         struct cl_lock *ghost;
 
-                        spin_lock(&head->coh_lock_guard);
+                        cfs_spin_lock(&head->coh_lock_guard);
                         ghost = cl_lock_lookup(env, obj, io, need);
                         if (ghost == NULL) {
-                                list_add_tail(&lock->cll_linkage, &head->coh_locks);
-                                spin_unlock(&head->coh_lock_guard);
-                                atomic_inc(&site->cs_locks.cs_busy);
+                                cfs_list_add_tail(&lock->cll_linkage,
+                                                  &head->coh_locks);
+                                cfs_spin_unlock(&head->coh_lock_guard);
+                                cfs_atomic_inc(&site->cs_locks.cs_busy);
                         } else {
-                                spin_unlock(&head->coh_lock_guard);
+                                cfs_spin_unlock(&head->coh_lock_guard);
                                 /*
                                  * Other threads can acquire references to the
                                  * top-lock through its sub-locks. Hence, it
@@ -598,9 +600,9 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
         obj  = need->cld_obj;
         head = cl_object_header(obj);
 
-        spin_lock(&head->coh_lock_guard);
+        cfs_spin_lock(&head->coh_lock_guard);
         lock = cl_lock_lookup(env, obj, io, need);
-        spin_unlock(&head->coh_lock_guard);
+        cfs_spin_unlock(&head->coh_lock_guard);
 
         if (lock == NULL)
                 return NULL;
@@ -644,7 +646,7 @@ const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
         LINVRNT(cl_lock_invariant_trusted(NULL, lock));
         ENTRY;
 
-        list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+        cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
                 if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
                         RETURN(slice);
         }
@@ -693,7 +695,7 @@ void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
                 info = cl_env_info(env);
                 for (i = 0; i < hdr->coh_nesting; ++i)
                         LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
-                mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
+                cfs_mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
                 lock->cll_guarder = cfs_current();
                 LINVRNT(lock->cll_depth == 0);
         }
@@ -723,7 +725,7 @@ int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
         if (lock->cll_guarder == cfs_current()) {
                 LINVRNT(lock->cll_depth > 0);
                 cl_lock_mutex_tail(env, lock);
-        } else if (mutex_trylock(&lock->cll_guard)) {
+        } else if (cfs_mutex_trylock(&lock->cll_guard)) {
                 LINVRNT(lock->cll_depth == 0);
                 lock->cll_guarder = cfs_current();
                 cl_lock_mutex_tail(env, lock);
@@ -757,7 +759,7 @@ void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
         counters->ctc_nr_locks_locked--;
         if (--lock->cll_depth == 0) {
                 lock->cll_guarder = NULL;
-                mutex_unlock(&lock->cll_guard);
+                cfs_mutex_unlock(&lock->cll_guard);
         }
 }
 EXPORT_SYMBOL(cl_lock_mutex_put);
@@ -801,8 +803,8 @@ static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
                 const struct cl_lock_slice *slice;
 
                 lock->cll_flags |= CLF_CANCELLED;
-                list_for_each_entry_reverse(slice, &lock->cll_layers,
-                                            cls_linkage) {
+                cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
+                                                cls_linkage) {
                         if (slice->cls_ops->clo_cancel != NULL)
                                 slice->cls_ops->clo_cancel(env, slice);
                 }
@@ -825,16 +827,16 @@ static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
 
                 head = cl_object_header(lock->cll_descr.cld_obj);
 
-                spin_lock(&head->coh_lock_guard);
-                list_del_init(&lock->cll_linkage);
+                cfs_spin_lock(&head->coh_lock_guard);
+                cfs_list_del_init(&lock->cll_linkage);
 
-                spin_unlock(&head->coh_lock_guard);
+                cfs_spin_unlock(&head->coh_lock_guard);
                 /*
                  * From now on, no new references to this lock can be acquired
                  * by cl_lock_lookup().
                  */
-                list_for_each_entry_reverse(slice, &lock->cll_layers,
-                                            cls_linkage) {
+                cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
+                                                cls_linkage) {
                         if (slice->cls_ops->clo_delete != NULL)
                                 slice->cls_ops->clo_delete(env, slice);
                 }
@@ -959,14 +961,14 @@ int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
         if (result == 0) {
                 cfs_waitlink_init(&waiter);
                 cfs_waitq_add(&lock->cll_wq, &waiter);
-                set_current_state(CFS_TASK_INTERRUPTIBLE);
+                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
                 cl_lock_mutex_put(env, lock);
 
                 LASSERT(cl_lock_nr_mutexed(env) == 0);
                 cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
 
                 cl_lock_mutex_get(env, lock);
-                set_current_state(CFS_TASK_RUNNING);
+                cfs_set_current_state(CFS_TASK_RUNNING);
                 cfs_waitq_del(&lock->cll_wq, &waiter);
                 result = cfs_signal_pending() ? -EINTR : 0;
         }
@@ -983,7 +985,7 @@ static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
         LINVRNT(cl_lock_is_mutexed(lock));
         LINVRNT(cl_lock_invariant(env, lock));
 
-        list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
+        cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
                 if (slice->cls_ops->clo_state != NULL)
                         slice->cls_ops->clo_state(env, slice, state);
         cfs_waitq_broadcast(&lock->cll_wq);
@@ -1031,8 +1033,8 @@ void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
                 lock->cll_state == CLS_INTRANSIT);
 
         if (lock->cll_state != state) {
-                atomic_dec(&site->cs_locks_state[lock->cll_state]);
-                atomic_inc(&site->cs_locks_state[state]);
+                cfs_atomic_dec(&site->cs_locks_state[lock->cll_state]);
+                cfs_atomic_inc(&site->cs_locks_state[state]);
 
                 cl_lock_state_signal(env, lock, state);
                 lock->cll_state = state;
@@ -1054,8 +1056,8 @@ static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
                 LASSERT(lock->cll_state == CLS_INTRANSIT);
 
                 result = -ENOSYS;
-                list_for_each_entry_reverse(slice, &lock->cll_layers,
-                                            cls_linkage) {
+                cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
+                                                cls_linkage) {
                         if (slice->cls_ops->clo_unuse != NULL) {
                                 result = slice->cls_ops->clo_unuse(env, slice);
                                 if (result != 0)
@@ -1089,7 +1091,7 @@ int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
 
         result = -ENOSYS;
         state = cl_lock_intransit(env, lock);
-        list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+        cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
                 if (slice->cls_ops->clo_use != NULL) {
                         result = slice->cls_ops->clo_use(env, slice);
                         if (result != 0)
@@ -1142,7 +1144,7 @@ static int cl_enqueue_kick(const struct lu_env *env,
 
         ENTRY;
         result = -ENOSYS;
-        list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+        cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
                 if (slice->cls_ops->clo_enqueue != NULL) {
                         result = slice->cls_ops->clo_enqueue(env,
                                                              slice, io, flags);
@@ -1421,7 +1423,7 @@ int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
                         break;
 
                 result = -ENOSYS;
-                list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+                cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
                         if (slice->cls_ops->clo_wait != NULL) {
                                 result = slice->cls_ops->clo_wait(env, slice);
                                 if (result != 0)
@@ -1495,7 +1497,7 @@ unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
         LINVRNT(cl_lock_invariant(env, lock));
 
         pound = 0;
-        list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
+        cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
                 if (slice->cls_ops->clo_weigh != NULL) {
                         ounce = slice->cls_ops->clo_weigh(env, slice);
                         pound += ounce;
@@ -1532,7 +1534,7 @@ int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
         LINVRNT(cl_lock_is_mutexed(lock));
         LINVRNT(cl_lock_invariant(env, lock));
 
-        list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
+        cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
                 if (slice->cls_ops->clo_modify != NULL) {
                         result = slice->cls_ops->clo_modify(env, slice, desc);
                         if (result != 0)
@@ -1546,9 +1548,9 @@ int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
          * now. If locks were indexed according to their extent and/or mode,
          * that index would have to be updated here.
          */
-        spin_lock(&hdr->coh_lock_guard);
+        cfs_spin_lock(&hdr->coh_lock_guard);
         lock->cll_descr = *desc;
-        spin_unlock(&hdr->coh_lock_guard);
+        cfs_spin_unlock(&hdr->coh_lock_guard);
         RETURN(0);
 }
 EXPORT_SYMBOL(cl_lock_modify);
@@ -1594,7 +1596,7 @@ int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
 
         result = cl_lock_enclosure(env, lock, closure);
         if (result == 0) {
-                list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+                cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
                         if (slice->cls_ops->clo_closure != NULL) {
                                 result = slice->cls_ops->clo_closure(env, slice,
                                                                      closure);
@@ -1627,10 +1629,10 @@ int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
                  * If lock->cll_inclosure is not empty, lock is already in
                  * this closure.
                  */
-                if (list_empty(&lock->cll_inclosure)) {
+                if (cfs_list_empty(&lock->cll_inclosure)) {
                         cl_lock_get_trust(lock);
                         lu_ref_add(&lock->cll_reference, "closure", closure);
-                        list_add(&lock->cll_inclosure, &closure->clc_list);
+                        cfs_list_add(&lock->cll_inclosure, &closure->clc_list);
                         closure->clc_nr++;
                 } else
                         cl_lock_mutex_put(env, lock);
@@ -1664,8 +1666,9 @@ void cl_lock_disclosure(const struct lu_env *env,
         struct cl_lock *temp;
 
         cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
-        list_for_each_entry_safe(scan, temp, &closure->clc_list, cll_inclosure){
-                list_del_init(&scan->cll_inclosure);
+        cfs_list_for_each_entry_safe(scan, temp, &closure->clc_list,
+                                     cll_inclosure){
+                cfs_list_del_init(&scan->cll_inclosure);
                 cl_lock_mutex_put(env, scan);
                 lu_ref_del(&scan->cll_reference, "closure", closure);
                 cl_lock_put(env, scan);
@@ -1679,7 +1682,7 @@ EXPORT_SYMBOL(cl_lock_disclosure);
 void cl_lock_closure_fini(struct cl_lock_closure *closure)
 {
         LASSERT(closure->clc_nr == 0);
-        LASSERT(list_empty(&closure->clc_list));
+        LASSERT(cfs_list_empty(&closure->clc_list));
 }
 EXPORT_SYMBOL(cl_lock_closure_fini);
 
@@ -1798,10 +1801,10 @@ struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_object *obj,
         need->cld_start = need->cld_end = page->cp_index;
         need->cld_enq_flags = 0;
 
-        spin_lock(&head->coh_lock_guard);
+        cfs_spin_lock(&head->coh_lock_guard);
         /* It is fine to match any group lock since there could be only one
          * with a uniq gid and it conflicts with all other lock modes too */
-        list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
+        cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
                 if (scan != except &&
                     (scan->cll_descr.cld_mode == CLM_GROUP ||
                     cl_lock_ext_match(&scan->cll_descr, need)) &&
@@ -1821,7 +1824,7 @@ struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_object *obj,
                         break;
                 }
         }
-        spin_unlock(&head->coh_lock_guard);
+        cfs_spin_unlock(&head->coh_lock_guard);
         RETURN(lock);
 }
 EXPORT_SYMBOL(cl_lock_at_page);
@@ -1870,8 +1873,8 @@ void cl_lock_page_list_fixup(const struct lu_env *env,
                         continue;
 
                 descr = &found->cll_descr;
-                list_for_each_entry_safe_from(page, temp, &queue->pl_pages,
-                                              cp_batch) {
+                cfs_list_for_each_entry_safe_from(page, temp, &queue->pl_pages,
+                                                  cp_batch) {
                         idx = page->cp_index;
                         if (descr->cld_start > idx || descr->cld_end < idx)
                                 break;
@@ -1988,12 +1991,12 @@ void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
         LASSERT(ergo(!cancel,
                      head->coh_tree.rnode == NULL && head->coh_pages == 0));
 
-        spin_lock(&head->coh_lock_guard);
-        while (!list_empty(&head->coh_locks)) {
+        cfs_spin_lock(&head->coh_lock_guard);
+        while (!cfs_list_empty(&head->coh_locks)) {
                 lock = container_of(head->coh_locks.next,
                                     struct cl_lock, cll_linkage);
                 cl_lock_get_trust(lock);
-                spin_unlock(&head->coh_lock_guard);
+                cfs_spin_unlock(&head->coh_lock_guard);
                 lu_ref_add(&lock->cll_reference, "prune", cfs_current());
                 cl_lock_mutex_get(env, lock);
                 if (lock->cll_state < CLS_FREEING) {
@@ -2006,9 +2009,9 @@ void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
                 cl_lock_mutex_put(env, lock);
                 lu_ref_del(&lock->cll_reference, "prune", cfs_current());
                 cl_lock_put(env, lock);
-                spin_lock(&head->coh_lock_guard);
+                cfs_spin_lock(&head->coh_lock_guard);
         }
-        spin_unlock(&head->coh_lock_guard);
+        cfs_spin_unlock(&head->coh_lock_guard);
         EXIT;
 }
 EXPORT_SYMBOL(cl_locks_prune);
@@ -2233,13 +2236,13 @@ void cl_lock_print(const struct lu_env *env, void *cookie,
 {
         const struct cl_lock_slice *slice;
         (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
-                   lock, atomic_read(&lock->cll_ref),
+                   lock, cfs_atomic_read(&lock->cll_ref),
                    lock->cll_state, lock->cll_error, lock->cll_holds,
                    lock->cll_users, lock->cll_flags);
         cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
         (*printer)(env, cookie, " {\n");
 
-        list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+        cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
                 (*printer)(env, cookie, "    %s@%p: ",
                            slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
                            slice);
index 3d6ef8c..1971baa 100644 (file)
 static cfs_mem_cache_t *cl_env_kmem;
 
 /** Lock class of cl_object_header::coh_page_guard */
-static struct lock_class_key cl_page_guard_class;
+static cfs_lock_class_key_t cl_page_guard_class;
 /** Lock class of cl_object_header::coh_lock_guard */
-static struct lock_class_key cl_lock_guard_class;
+static cfs_lock_class_key_t cl_lock_guard_class;
 /** Lock class of cl_object_header::coh_attr_guard */
-static struct lock_class_key cl_attr_guard_class;
+static cfs_lock_class_key_t cl_attr_guard_class;
 
 /**
  * Initialize cl_object_header.
@@ -86,12 +86,12 @@ int cl_object_header_init(struct cl_object_header *h)
         ENTRY;
         result = lu_object_header_init(&h->coh_lu);
         if (result == 0) {
-                spin_lock_init(&h->coh_page_guard);
-                spin_lock_init(&h->coh_lock_guard);
-                spin_lock_init(&h->coh_attr_guard);
-                lockdep_set_class(&h->coh_attr_guard, &cl_page_guard_class);
-                lockdep_set_class(&h->coh_attr_guard, &cl_lock_guard_class);
-                lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
+                cfs_spin_lock_init(&h->coh_page_guard);
+                cfs_spin_lock_init(&h->coh_lock_guard);
+                cfs_spin_lock_init(&h->coh_attr_guard);
+                cfs_lockdep_set_class(&h->coh_attr_guard, &cl_page_guard_class);
+                cfs_lockdep_set_class(&h->coh_attr_guard, &cl_lock_guard_class);
+                cfs_lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
                 h->coh_pages = 0;
                 /* XXX hard coded GFP_* mask. */
                 INIT_RADIX_TREE(&h->coh_tree, GFP_ATOMIC);
@@ -106,7 +106,7 @@ EXPORT_SYMBOL(cl_object_header_init);
  */
 void cl_object_header_fini(struct cl_object_header *h)
 {
-        LASSERT(list_empty(&h->coh_locks));
+        LASSERT(cfs_list_empty(&h->coh_locks));
         lu_object_header_fini(&h->coh_lu);
 }
 EXPORT_SYMBOL(cl_object_header_fini);
@@ -123,7 +123,7 @@ struct cl_object *cl_object_find(const struct lu_env *env,
                                  struct cl_device *cd, const struct lu_fid *fid,
                                  const struct cl_object_conf *c)
 {
-        might_sleep();
+        cfs_might_sleep();
         return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu));
 }
 EXPORT_SYMBOL(cl_object_find);
@@ -184,7 +184,7 @@ EXPORT_SYMBOL(cl_object_top);
  *
  * \see cl_attr, cl_object_attr_lock(), cl_object_operations::coo_attr_get().
  */
-static spinlock_t *cl_object_attr_guard(struct cl_object *o)
+static cfs_spinlock_t *cl_object_attr_guard(struct cl_object *o)
 {
         return &cl_object_header(cl_object_top(o))->coh_attr_guard;
 }
@@ -198,7 +198,7 @@ static spinlock_t *cl_object_attr_guard(struct cl_object *o)
  */
 void cl_object_attr_lock(struct cl_object *o)
 {
-        spin_lock(cl_object_attr_guard(o));
+        cfs_spin_lock(cl_object_attr_guard(o));
 }
 EXPORT_SYMBOL(cl_object_attr_lock);
 
@@ -207,7 +207,7 @@ EXPORT_SYMBOL(cl_object_attr_lock);
  */
 void cl_object_attr_unlock(struct cl_object *o)
 {
-        spin_unlock(cl_object_attr_guard(o));
+        cfs_spin_unlock(cl_object_attr_guard(o));
 }
 EXPORT_SYMBOL(cl_object_attr_unlock);
 
@@ -229,7 +229,7 @@ int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
 
         top = obj->co_lu.lo_header;
         result = 0;
-        list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
+        cfs_list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
                 if (obj->co_ops->coo_attr_get != NULL) {
                         result = obj->co_ops->coo_attr_get(env, obj, attr);
                         if (result != 0) {
@@ -261,7 +261,8 @@ int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
 
         top = obj->co_lu.lo_header;
         result = 0;
-        list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
+        cfs_list_for_each_entry_reverse(obj, &top->loh_layers,
+                                        co_lu.lo_linkage) {
                 if (obj->co_ops->coo_attr_set != NULL) {
                         result = obj->co_ops->coo_attr_set(env, obj, attr, v);
                         if (result != 0) {
@@ -292,7 +293,8 @@ int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
         ENTRY;
         top = obj->co_lu.lo_header;
         result = 0;
-        list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
+        cfs_list_for_each_entry_reverse(obj, &top->loh_layers,
+                                        co_lu.lo_linkage) {
                 if (obj->co_ops->coo_glimpse != NULL) {
                         result = obj->co_ops->coo_glimpse(env, obj, lvb);
                         if (result != 0)
@@ -320,7 +322,7 @@ int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
         ENTRY;
         top = obj->co_lu.lo_header;
         result = 0;
-        list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
+        cfs_list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
                 if (obj->co_ops->coo_conf_set != NULL) {
                         result = obj->co_ops->coo_conf_set(env, obj, conf);
                         if (result != 0)
@@ -346,7 +348,7 @@ void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
         LASSERT(hdr->coh_tree.rnode == NULL);
         LASSERT(hdr->coh_pages == 0);
 
-        set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
+        cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
         /*
          * Destroy all locks. Object destruction (including cl_inode_fini())
          * cannot cancel the locks, because in the case of a local client,
@@ -378,9 +380,9 @@ int cl_object_has_locks(struct cl_object *obj)
         struct cl_object_header *head = cl_object_header(obj);
         int has;
 
-        spin_lock(&head->coh_lock_guard);
-        has = list_empty(&head->coh_locks);
-        spin_unlock(&head->coh_lock_guard);
+        cfs_spin_lock(&head->coh_lock_guard);
+        has = cfs_list_empty(&head->coh_locks);
+        cfs_spin_unlock(&head->coh_lock_guard);
 
         return (has == 0);
 }
@@ -389,10 +391,10 @@ EXPORT_SYMBOL(cl_object_has_locks);
 void cache_stats_init(struct cache_stats *cs, const char *name)
 {
         cs->cs_name = name;
-        atomic_set(&cs->cs_lookup, 0);
-        atomic_set(&cs->cs_hit,    0);
-        atomic_set(&cs->cs_total,  0);
-        atomic_set(&cs->cs_busy,   0);
+        cfs_atomic_set(&cs->cs_lookup, 0);
+        cfs_atomic_set(&cs->cs_hit,    0);
+        cfs_atomic_set(&cs->cs_total,  0);
+        cfs_atomic_set(&cs->cs_busy,   0);
 }
 
 int cache_stats_print(const struct cache_stats *cs,
@@ -410,11 +412,11 @@ int cache_stats_print(const struct cache_stats *cs,
         nob += snprintf(page + nob, count - nob,
                         "%5.5s: %6u %6u %6u %6u %6u",
                         cs->cs_name,
-                        atomic_read(&cs->cs_lookup),
-                        atomic_read(&cs->cs_hit),
-                        atomic_read(&cs->cs_total),
-                        atomic_read(&cs->cs_busy),
-                        atomic_read(&cs->cs_created));
+                        cfs_atomic_read(&cs->cs_lookup),
+                        cfs_atomic_read(&cs->cs_hit),
+                        cfs_atomic_read(&cs->cs_total),
+                        cfs_atomic_read(&cs->cs_busy),
+                        cfs_atomic_read(&cs->cs_created));
         return nob;
 }
 
@@ -434,9 +436,9 @@ int cl_site_init(struct cl_site *s, struct cl_device *d)
                 cache_stats_init(&s->cs_pages, "pages");
                 cache_stats_init(&s->cs_locks, "locks");
                 for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
-                        atomic_set(&s->cs_pages_state[0], 0);
+                        cfs_atomic_set(&s->cs_pages_state[0], 0);
                 for (i = 0; i < ARRAY_SIZE(s->cs_locks_state); ++i)
-                        atomic_set(&s->cs_locks_state[i], 0);
+                        cfs_atomic_set(&s->cs_locks_state[i], 0);
         }
         return result;
 }
@@ -453,11 +455,11 @@ EXPORT_SYMBOL(cl_site_fini);
 
 static struct cache_stats cl_env_stats = {
         .cs_name    = "envs",
-        .cs_created = ATOMIC_INIT(0),
-        .cs_lookup  = ATOMIC_INIT(0),
-        .cs_hit     = ATOMIC_INIT(0),
-        .cs_total   = ATOMIC_INIT(0),
-        .cs_busy    = ATOMIC_INIT(0)
+        .cs_created = CFS_ATOMIC_INIT(0),
+        .cs_lookup  = CFS_ATOMIC_INIT(0),
+        .cs_hit     = CFS_ATOMIC_INIT(0),
+        .cs_total   = CFS_ATOMIC_INIT(0),
+        .cs_busy    = CFS_ATOMIC_INIT(0)
 };
 
 /**
@@ -496,14 +498,14 @@ locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......]
         for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
                 nob += snprintf(page + nob, count - nob, "%s: %u ",
                                 pstate[i],
-                                atomic_read(&site->cs_pages_state[i]));
+                                cfs_atomic_read(&site->cs_pages_state[i]));
         nob += snprintf(page + nob, count - nob, "]\n");
         nob += cache_stats_print(&site->cs_locks, page + nob, count - nob, 0);
         nob += snprintf(page + nob, count - nob, " [");
         for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i)
                 nob += snprintf(page + nob, count - nob, "%s: %u ",
                                 lstate[i],
-                                atomic_read(&site->cs_locks_state[i]));
+                                cfs_atomic_read(&site->cs_locks_state[i]));
         nob += snprintf(page + nob, count - nob, "]\n");
         nob += cache_stats_print(&cl_env_stats, page + nob, count - nob, 0);
         nob += snprintf(page + nob, count - nob, "\n");
@@ -521,7 +523,7 @@ static CFS_LIST_HEAD(cl_envs);
 static unsigned cl_envs_cached_nr  = 0;
 static unsigned cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit
                                            * for now. */
-static spinlock_t cl_envs_guard = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t cl_envs_guard = CFS_SPIN_LOCK_UNLOCKED;
 
 struct cl_env {
         void             *ce_magic;
@@ -531,7 +533,7 @@ struct cl_env {
          * This allows cl_env to be entered into cl_env_hash which implements
          * the current thread -> client environment lookup.
          */
-        struct hlist_node ce_node;
+        cfs_hlist_node_t  ce_node;
         /**
          * Owner for the current cl_env, the key for cfs_hash.
          * Now current thread pointer is stored.
@@ -541,7 +543,7 @@ struct cl_env {
          * Linkage into global list of all client environments. Used for
          * garbage collection.
          */
-        struct list_head  ce_linkage;
+        cfs_list_t        ce_linkage;
         /*
          *
          */
@@ -553,12 +555,12 @@ struct cl_env {
         void             *ce_debug;
 };
 
-#define CL_ENV_INC(counter) atomic_inc(&cl_env_stats.counter)
+#define CL_ENV_INC(counter) cfs_atomic_inc(&cl_env_stats.counter)
 
 #define CL_ENV_DEC(counter)                                             \
         do {                                                            \
-                LASSERT(atomic_read(&cl_env_stats.counter) > 0);        \
-                atomic_dec(&cl_env_stats.counter);                      \
+                LASSERT(cfs_atomic_read(&cl_env_stats.counter) > 0);    \
+                cfs_atomic_dec(&cl_env_stats.counter);                  \
         } while (0)
 
 /*****************************************************************************
@@ -579,14 +581,14 @@ static unsigned cl_env_hops_hash(cfs_hash_t *lh, void *key, unsigned mask)
 #endif
 }
 
-static void *cl_env_hops_obj(struct hlist_node *hn)
+static void *cl_env_hops_obj(cfs_hlist_node_t *hn)
 {
-        struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
+        struct cl_env *cle = cfs_hlist_entry(hn, struct cl_env, ce_node);
         LASSERT(cle->ce_magic == &cl_env_init0);
         return (void *)cle;
 }
 
-static int cl_env_hops_compare(void *key, struct hlist_node *hn)
+static int cl_env_hops_compare(void *key, cfs_hlist_node_t *hn)
 {
         struct cl_env *cle = cl_env_hops_obj(hn);
 
@@ -697,15 +699,15 @@ static struct lu_env *cl_env_obtain(void *debug)
         struct lu_env *env;
 
         ENTRY;
-        spin_lock(&cl_envs_guard);
-        LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
+        cfs_spin_lock(&cl_envs_guard);
+        LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
         if (cl_envs_cached_nr > 0) {
                 int rc;
 
                 cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
-                list_del_init(&cle->ce_linkage);
+                cfs_list_del_init(&cle->ce_linkage);
                 cl_envs_cached_nr--;
-                spin_unlock(&cl_envs_guard);
+                cfs_spin_unlock(&cl_envs_guard);
 
                 env = &cle->ce_lu;
                 rc = lu_env_refill(env);
@@ -718,7 +720,7 @@ static struct lu_env *cl_env_obtain(void *debug)
                         env = ERR_PTR(rc);
                 }
         } else {
-                spin_unlock(&cl_envs_guard);
+                cfs_spin_unlock(&cl_envs_guard);
                 env = cl_env_new(0, debug);
         }
         RETURN(env);
@@ -823,19 +825,19 @@ unsigned cl_env_cache_purge(unsigned nr)
         struct cl_env *cle;
 
         ENTRY;
-        spin_lock(&cl_envs_guard);
-        for (; !list_empty(&cl_envs) && nr > 0; --nr) {
+        cfs_spin_lock(&cl_envs_guard);
+        for (; !cfs_list_empty(&cl_envs) && nr > 0; --nr) {
                 cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
-                list_del_init(&cle->ce_linkage);
+                cfs_list_del_init(&cle->ce_linkage);
                 LASSERT(cl_envs_cached_nr > 0);
                 cl_envs_cached_nr--;
-                spin_unlock(&cl_envs_guard);
+                cfs_spin_unlock(&cl_envs_guard);
 
                 cl_env_fini(cle);
-                spin_lock(&cl_envs_guard);
+                cfs_spin_lock(&cl_envs_guard);
         }
-        LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
-        spin_unlock(&cl_envs_guard);
+        LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
+        cfs_spin_unlock(&cl_envs_guard);
         RETURN(nr);
 }
 EXPORT_SYMBOL(cl_env_cache_purge);
@@ -871,10 +873,10 @@ void cl_env_put(struct lu_env *env, int *refcheck)
                 if (cl_envs_cached_nr < cl_envs_cached_max &&
                     (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
                     (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
-                        spin_lock(&cl_envs_guard);
-                        list_add(&cle->ce_linkage, &cl_envs);
+                        cfs_spin_lock(&cl_envs_guard);
+                        cfs_list_add(&cle->ce_linkage, &cl_envs);
                         cl_envs_cached_nr++;
-                        spin_unlock(&cl_envs_guard);
+                        cfs_spin_unlock(&cl_envs_guard);
                 } else
                         cl_env_fini(cle);
         }
index 5ae367e..603184e 100644 (file)
@@ -122,8 +122,8 @@ static void cl_page_get_trust(struct cl_page *page)
         /*
          * Checkless version for trusted users.
          */
-        if (atomic_inc_return(&page->cp_ref) == 1)
-                atomic_inc(&cl_object_site(page->cp_obj)->cs_pages.cs_busy);
+        if (cfs_atomic_inc_return(&page->cp_ref) == 1)
+                cfs_atomic_inc(&cl_object_site(page->cp_obj)->cs_pages.cs_busy);
 }
 
 /**
@@ -141,14 +141,14 @@ cl_page_at_trusted(const struct cl_page *page,
 #ifdef INVARIANT_CHECK
         struct cl_object_header *ch = cl_object_header(page->cp_obj);
 
-        if (!atomic_read(&page->cp_ref))
+        if (!cfs_atomic_read(&page->cp_ref))
                 LASSERT_SPIN_LOCKED(&ch->coh_page_guard);
 #endif
         ENTRY;
 
         page = cl_page_top_trusted((struct cl_page *)page);
         do {
-                list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+                cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
                         if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
                                 RETURN(slice);
                 }
@@ -208,7 +208,7 @@ void cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
         hdr = cl_object_header(obj);
         pvec = cl_env_info(env)->clt_pvec;
         dtype = cl_object_top(obj)->co_lu.lo_dev->ld_type;
-        spin_lock(&hdr->coh_page_guard);
+        cfs_spin_lock(&hdr->coh_page_guard);
         while ((nr = radix_tree_gang_lookup(&hdr->coh_tree, (void **)pvec,
                                             idx, CLT_PVEC_SIZE)) > 0) {
                 idx = pvec[nr - 1]->cp_index + 1;
@@ -254,7 +254,7 @@ void cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
                  * check that pages weren't truncated (cl_page_own() returns
                  * error in the latter case).
                  */
-                spin_unlock(&hdr->coh_page_guard);
+                cfs_spin_unlock(&hdr->coh_page_guard);
                 for (i = 0; i < j; ++i) {
                         page = pvec[i];
                         if (page_own(env, io, page) == 0)
@@ -263,11 +263,11 @@ void cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
                                    "page_list", cfs_current());
                         cl_page_put(env, page);
                 }
-                spin_lock(&hdr->coh_page_guard);
+                cfs_spin_lock(&hdr->coh_page_guard);
                 if (nr < CLT_PVEC_SIZE)
                         break;
         }
-        spin_unlock(&hdr->coh_page_guard);
+        cfs_spin_unlock(&hdr->coh_page_guard);
         EXIT;
 }
 EXPORT_SYMBOL(cl_page_gang_lookup);
@@ -278,24 +278,24 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page)
         struct cl_site   *site = cl_object_site(obj);
 
         PASSERT(env, page, cl_is_page(page));
-        PASSERT(env, page, list_empty(&page->cp_batch));
+        PASSERT(env, page, cfs_list_empty(&page->cp_batch));
         PASSERT(env, page, page->cp_owner == NULL);
         PASSERT(env, page, page->cp_req == NULL);
         PASSERT(env, page, page->cp_parent == NULL);
         PASSERT(env, page, page->cp_state == CPS_FREEING);
 
         ENTRY;
-        might_sleep();
-        while (!list_empty(&page->cp_layers)) {
+        cfs_might_sleep();
+        while (!cfs_list_empty(&page->cp_layers)) {
                 struct cl_page_slice *slice;
 
-                slice = list_entry(page->cp_layers.next, struct cl_page_slice,
-                                   cpl_linkage);
-                list_del_init(page->cp_layers.next);
+                slice = cfs_list_entry(page->cp_layers.next,
+                                       struct cl_page_slice, cpl_linkage);
+                cfs_list_del_init(page->cp_layers.next);
                 slice->cpl_ops->cpo_fini(env, slice);
         }
-        atomic_dec(&site->cs_pages.cs_total);
-        atomic_dec(&site->cs_pages_state[page->cp_state]);
+        cfs_atomic_dec(&site->cs_pages.cs_total);
+        cfs_atomic_dec(&site->cs_pages_state[page->cp_state]);
         lu_object_ref_del_at(&obj->co_lu, page->cp_obj_ref, "cl_page", page);
         cl_object_put(env, obj);
         lu_ref_fini(&page->cp_reference);
@@ -328,7 +328,7 @@ static int cl_page_alloc(const struct lu_env *env, struct cl_object *o,
         result = +1;
         OBD_SLAB_ALLOC_PTR_GFP(page, cl_page_kmem, CFS_ALLOC_IO);
         if (page != NULL) {
-                atomic_set(&page->cp_ref, 1);
+                cfs_atomic_set(&page->cp_ref, 1);
                 page->cp_obj = o;
                 cl_object_get(o);
                 page->cp_obj_ref = lu_object_ref_add(&o->co_lu,
@@ -339,10 +339,11 @@ static int cl_page_alloc(const struct lu_env *env, struct cl_object *o,
                 CFS_INIT_LIST_HEAD(&page->cp_layers);
                 CFS_INIT_LIST_HEAD(&page->cp_batch);
                 CFS_INIT_LIST_HEAD(&page->cp_flight);
-                mutex_init(&page->cp_mutex);
+                cfs_mutex_init(&page->cp_mutex);
                 lu_ref_init(&page->cp_reference);
                 head = o->co_lu.lo_header;
-                list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) {
+                cfs_list_for_each_entry(o, &head->loh_layers,
+                                        co_lu.lo_linkage) {
                         if (o->co_ops->coo_page_init != NULL) {
                                 err = o->co_ops->coo_page_init(env, o,
                                                                page, vmpage);
@@ -356,10 +357,10 @@ static int cl_page_alloc(const struct lu_env *env, struct cl_object *o,
                         }
                 }
                 if (err == NULL) {
-                        atomic_inc(&site->cs_pages.cs_busy);
-                        atomic_inc(&site->cs_pages.cs_total);
-                        atomic_inc(&site->cs_pages_state[CPS_CACHED]);
-                        atomic_inc(&site->cs_pages.cs_created);
+                        cfs_atomic_inc(&site->cs_pages.cs_busy);
+                        cfs_atomic_inc(&site->cs_pages.cs_total);
+                        cfs_atomic_inc(&site->cs_pages_state[CPS_CACHED]);
+                        cfs_atomic_inc(&site->cs_pages.cs_created);
                         result = 0;
                 }
         } else
@@ -392,12 +393,12 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
         int err;
 
         LINVRNT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
-        might_sleep();
+        cfs_might_sleep();
 
         ENTRY;
 
         hdr = cl_object_header(o);
-        atomic_inc(&site->cs_pages.cs_lookup);
+        cfs_atomic_inc(&site->cs_pages.cs_lookup);
 
         CDEBUG(D_PAGE, "%lu@"DFID" %p %lu %i\n",
                idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
@@ -419,12 +420,12 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
                              (void *)radix_tree_lookup(&hdr->coh_tree,
                                                        idx) == page));
         } else {
-                spin_lock(&hdr->coh_page_guard);
+                cfs_spin_lock(&hdr->coh_page_guard);
                 page = cl_page_lookup(hdr, idx);
-                spin_unlock(&hdr->coh_page_guard);
+                cfs_spin_unlock(&hdr->coh_page_guard);
         }
         if (page != NULL) {
-                atomic_inc(&site->cs_pages.cs_hit);
+                cfs_atomic_inc(&site->cs_pages.cs_hit);
                 RETURN(page);
         }
 
@@ -436,7 +437,7 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
          * XXX optimization: use radix_tree_preload() here, and change tree
          * gfp mask to GFP_KERNEL in cl_object_header_init().
          */
-        spin_lock(&hdr->coh_page_guard);
+        cfs_spin_lock(&hdr->coh_page_guard);
         err = radix_tree_insert(&hdr->coh_tree, idx, page);
         if (err != 0) {
                 ghost = page;
@@ -469,9 +470,9 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
                                  * transient pages, so it is impossible to
                                  * have conflicting transient pages.
                                  */
-                                spin_unlock(&hdr->coh_page_guard);
+                                cfs_spin_unlock(&hdr->coh_page_guard);
                                 cl_page_put(env, page);
-                                spin_lock(&hdr->coh_page_guard);
+                                cfs_spin_lock(&hdr->coh_page_guard);
                                 page = ERR_PTR(-EBUSY);
                         }
                 }
@@ -483,10 +484,10 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
                 }
                 hdr->coh_pages++;
         }
-        spin_unlock(&hdr->coh_page_guard);
+        cfs_spin_unlock(&hdr->coh_page_guard);
 
         if (unlikely(ghost != NULL)) {
-                atomic_dec(&site->cs_pages.cs_busy);
+                cfs_atomic_dec(&site->cs_pages.cs_busy);
                 cl_page_delete0(env, ghost, 0);
                 cl_page_free(env, ghost);
         }
@@ -528,7 +529,7 @@ static inline int cl_page_invariant(const struct cl_page *pg)
         child  = pg->cp_child;
         owner  = pg->cp_owner;
 
-        return atomic_read(&pg->cp_ref) > 0 &&
+        return cfs_atomic_read(&pg->cp_ref) > 0 &&
                 ergo(parent != NULL, parent->cp_child == pg) &&
                 ergo(child != NULL, child->cp_parent == pg) &&
                 ergo(child != NULL, pg->cp_obj != child->cp_obj) &&
@@ -604,8 +605,8 @@ static void cl_page_state_set0(const struct lu_env *env,
                 PASSERT(env, page,
                         equi(state == CPS_OWNED, page->cp_owner != NULL));
 
-                atomic_dec(&site->cs_pages_state[page->cp_state]);
-                atomic_inc(&site->cs_pages_state[state]);
+                cfs_atomic_dec(&site->cs_pages_state[page->cp_state]);
+                cfs_atomic_inc(&site->cs_pages_state[state]);
                 cl_page_state_set_trust(page, state);
         }
         EXIT;
@@ -649,14 +650,15 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page)
         struct cl_object_header *hdr;
         struct cl_site *site = cl_object_site(page->cp_obj);
 
-        PASSERT(env, page, atomic_read(&page->cp_ref) > !!page->cp_parent);
+        PASSERT(env, page, cfs_atomic_read(&page->cp_ref) > !!page->cp_parent);
 
         ENTRY;
-        CL_PAGE_HEADER(D_TRACE, env, page, "%i\n", atomic_read(&page->cp_ref));
+        CL_PAGE_HEADER(D_TRACE, env, page, "%i\n",
+                       cfs_atomic_read(&page->cp_ref));
 
         hdr = cl_object_header(cl_object_top(page->cp_obj));
-        if (atomic_dec_and_lock(&page->cp_ref, &hdr->coh_page_guard)) {
-                atomic_dec(&site->cs_pages.cs_busy);
+        if (cfs_atomic_dec_and_lock(&page->cp_ref, &hdr->coh_page_guard)) {
+                cfs_atomic_dec(&site->cs_pages.cs_busy);
                 /* We're going to access the page w/o a reference, but it's
                  * ok because we have grabbed the lock coh_page_guard, which
                  * means nobody is able to free this page behind us.
@@ -666,11 +668,11 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page)
                          * inside the coh_page_guard. So that if it gets here,
                          * it is the REALLY last reference to this page.
                          */
-                        spin_unlock(&hdr->coh_page_guard);
+                        cfs_spin_unlock(&hdr->coh_page_guard);
 
-                        LASSERT(atomic_read(&page->cp_ref) == 0);
+                        LASSERT(cfs_atomic_read(&page->cp_ref) == 0);
                         PASSERT(env, page, page->cp_owner == NULL);
-                        PASSERT(env, page, list_empty(&page->cp_batch));
+                        PASSERT(env, page, cfs_list_empty(&page->cp_batch));
                         /*
                          * Page is no longer reachable by other threads. Tear
                          * it down.
@@ -680,7 +682,7 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page)
                         EXIT;
                         return;
                 }
-                spin_unlock(&hdr->coh_page_guard);
+                cfs_spin_unlock(&hdr->coh_page_guard);
         }
 
         EXIT;
@@ -700,7 +702,7 @@ cfs_page_t *cl_page_vmpage(const struct lu_env *env, struct cl_page *page)
          */
         page = cl_page_top(page);
         do {
-                list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+                cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
                         if (slice->cpl_ops->cpo_vmpage != NULL)
                                 RETURN(slice->cpl_ops->cpo_vmpage(env, slice));
                 }
@@ -732,7 +734,7 @@ struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
          * can be rectified easily.
          */
         hdr = cl_object_header(cl_object_top(obj));
-        spin_lock(&hdr->coh_page_guard);
+        cfs_spin_lock(&hdr->coh_page_guard);
         for (page = (void *)vmpage->private;
              page != NULL; page = page->cp_child) {
                 if (cl_object_same(page->cp_obj, obj)) {
@@ -740,7 +742,7 @@ struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
                         break;
                 }
         }
-        spin_unlock(&hdr->coh_page_guard);
+        cfs_spin_unlock(&hdr->coh_page_guard);
         LASSERT(ergo(page, cl_is_page(page) && page->cp_type == CPT_CACHEABLE));
         RETURN(page);
 }
@@ -792,8 +794,8 @@ EXPORT_SYMBOL(cl_page_at);
         __result = 0;                                                   \
         __page = cl_page_top(__page);                                   \
         do {                                                            \
-                list_for_each_entry(__scan, &__page->cp_layers,         \
-                                    cpl_linkage) {                      \
+                cfs_list_for_each_entry(__scan, &__page->cp_layers,     \
+                                        cpl_linkage) {                  \
                         __method = *(void **)((char *)__scan->cpl_ops + \
                                               __op);                    \
                         if (__method != NULL) {                         \
@@ -820,8 +822,8 @@ do {                                                                    \
                                                                         \
         __page = cl_page_top(__page);                                   \
         do {                                                            \
-                list_for_each_entry(__scan, &__page->cp_layers,         \
-                                    cpl_linkage) {                      \
+                cfs_list_for_each_entry(__scan, &__page->cp_layers,     \
+                                        cpl_linkage) {                  \
                         __method = *(void **)((char *)__scan->cpl_ops + \
                                               __op);                    \
                         if (__method != NULL)                           \
@@ -832,28 +834,28 @@ do {                                                                    \
         } while (__page != NULL);                                       \
 } while (0)
 
-#define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...)           \
-do {                                                                    \
-        const struct lu_env        *__env  = (_env);                    \
-        struct cl_page             *__page = (_page);                   \
-        const struct cl_page_slice *__scan;                             \
-        ptrdiff_t                   __op   = (_op);                     \
-        void                      (*__method)_proto;                    \
-                                                                        \
-        /* get to the bottom page. */                                   \
-        while (__page->cp_child != NULL)                                \
-                __page = __page->cp_child;                              \
-        do {                                                            \
-                list_for_each_entry_reverse(__scan, &__page->cp_layers, \
-                                            cpl_linkage) {              \
-                        __method = *(void **)((char *)__scan->cpl_ops + \
-                                              __op);                    \
-                        if (__method != NULL)                           \
-                                (*__method)(__env, __scan,              \
-                                            ## __VA_ARGS__);            \
-                }                                                       \
-                __page = __page->cp_parent;                             \
-        } while (__page != NULL);                                       \
+#define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...)               \
+do {                                                                        \
+        const struct lu_env        *__env  = (_env);                        \
+        struct cl_page             *__page = (_page);                       \
+        const struct cl_page_slice *__scan;                                 \
+        ptrdiff_t                   __op   = (_op);                         \
+        void                      (*__method)_proto;                        \
+                                                                            \
+        /* get to the bottom page. */                                       \
+        while (__page->cp_child != NULL)                                    \
+                __page = __page->cp_child;                                  \
+        do {                                                                \
+                cfs_list_for_each_entry_reverse(__scan, &__page->cp_layers, \
+                                                cpl_linkage) {              \
+                        __method = *(void **)((char *)__scan->cpl_ops +     \
+                                              __op);                        \
+                        if (__method != NULL)                               \
+                                (*__method)(__env, __scan,                  \
+                                            ## __VA_ARGS__);                \
+                }                                                           \
+                __page = __page->cp_parent;                                 \
+        } while (__page != NULL);                                           \
 } while (0)
 
 static int cl_page_invoke(const struct lu_env *env,
@@ -1167,12 +1169,12 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
                 struct cl_object_header *hdr;
 
                 hdr = cl_object_header(tmp->cp_obj);
-                spin_lock(&hdr->coh_page_guard);
+                cfs_spin_lock(&hdr->coh_page_guard);
                 value = radix_tree_delete(&hdr->coh_tree, tmp->cp_index);
                 PASSERT(env, tmp, value == tmp);
                 PASSERT(env, tmp, hdr->coh_pages > 0);
                 hdr->coh_pages--;
-                spin_unlock(&hdr->coh_page_guard);
+                cfs_spin_unlock(&hdr->coh_page_guard);
         }
 
         CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_delete),
@@ -1535,7 +1537,7 @@ void cl_page_header_print(const struct lu_env *env, void *cookie,
 {
         (*printer)(env, cookie,
                    "page@%p[%d %p:%lu ^%p_%p %d %d %d %p %p %#x]\n",
-                   pg, atomic_read(&pg->cp_ref), pg->cp_obj,
+                   pg, cfs_atomic_read(&pg->cp_ref), pg->cp_obj,
                    pg->cp_index, pg->cp_parent, pg->cp_child,
                    pg->cp_state, pg->cp_error, pg->cp_type,
                    pg->cp_owner, pg->cp_req, pg->cp_flags);
@@ -1616,7 +1618,7 @@ void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
                        const struct cl_page_operations *ops)
 {
         ENTRY;
-        list_add_tail(&slice->cpl_linkage, &page->cp_layers);
+        cfs_list_add_tail(&slice->cpl_linkage, &page->cp_layers);
         slice->cpl_obj  = obj;
         slice->cpl_ops  = ops;
         slice->cpl_page = page;
index 50058a1..bf6fb32 100644 (file)
 
 #ifndef __KERNEL__
 /* liblustre workaround */
-atomic_t libcfs_kmemory = {0};
+cfs_atomic_t libcfs_kmemory = {0};
 #endif
 
 struct obd_device *obd_devs[MAX_OBD_DEVICES];
-struct list_head obd_types;
-spinlock_t obd_dev_lock = SPIN_LOCK_UNLOCKED;
+cfs_list_t obd_types;
+cfs_spinlock_t obd_dev_lock = CFS_SPIN_LOCK_UNLOCKED;
 
 #ifndef __KERNEL__
 __u64 obd_max_pages = 0;
@@ -82,8 +82,8 @@ unsigned int at_history = 600;
 int at_early_margin = 5;
 int at_extra = 30;
 
-atomic_t obd_dirty_pages;
-atomic_t obd_dirty_transit_pages;
+cfs_atomic_t obd_dirty_pages;
+cfs_atomic_t obd_dirty_transit_pages;
 
 cfs_waitq_t obd_race_waitq;
 int obd_race_state;
@@ -191,7 +191,8 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg)
                 OBD_ALLOC(lcfg, data->ioc_plen1);
                 if (lcfg == NULL)
                         GOTO(out, err = -ENOMEM);
-                err = copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1);
+                err = cfs_copy_from_user(lcfg, data->ioc_pbuf1,
+                                         data->ioc_plen1);
                 if (!err)
                         err = lustre_cfg_sanity_check(lcfg, data->ioc_plen1);
                 if (!err)
@@ -307,7 +308,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg)
                 snprintf(str, len - sizeof(*data), "%3d %s %s %s %s %d",
                          (int)index, status, obd->obd_type->typ_name,
                          obd->obd_name, obd->obd_uuid.uuid,
-                         atomic_read(&obd->obd_refcount));
+                         cfs_atomic_read(&obd->obd_refcount));
                 err = obd_ioctl_popdata((void *)arg, data, len);
 
                 GOTO(out, err = 0);
@@ -536,7 +537,7 @@ int obd_init_checks(void)
 #define obd_init_checks() do {} while(0)
 #endif
 
-extern spinlock_t obd_types_lock;
+extern cfs_spinlock_t obd_types_lock;
 extern int class_procfs_init(void);
 extern int class_procfs_clean(void);
 
@@ -558,7 +559,7 @@ int init_obdclass(void)
         LCONSOLE_INFO("        Lustre Version: "LUSTRE_VERSION_STRING"\n");
         LCONSOLE_INFO("        Build Version: "BUILD_VERSION"\n");
 
-        spin_lock_init(&obd_types_lock);
+        cfs_spin_lock_init(&obd_types_lock);
         cfs_waitq_init(&obd_race_waitq);
         obd_zombie_impexp_init();
 #ifdef LPROCFS
@@ -585,7 +586,7 @@ int init_obdclass(void)
         if (err)
                 return err;
 
-        spin_lock_init(&obd_dev_lock);
+        cfs_spin_lock_init(&obd_dev_lock);
         CFS_INIT_LIST_HEAD(&obd_types);
 
         err = cfs_psdev_register(&obd_psdev);
@@ -601,10 +602,10 @@ int init_obdclass(void)
         /* Default the dirty page cache cap to 1/2 of system memory.
          * For clients with less memory, a larger fraction is needed
          * for other purposes (mostly for BGL). */
-        if (num_physpages <= 512 << (20 - CFS_PAGE_SHIFT))
-                obd_max_dirty_pages = num_physpages / 4;
+        if (cfs_num_physpages <= 512 << (20 - CFS_PAGE_SHIFT))
+                obd_max_dirty_pages = cfs_num_physpages / 4;
         else
-                obd_max_dirty_pages = num_physpages / 2;
+                obd_max_dirty_pages = cfs_num_physpages / 2;
 
         err = obd_init_caches();
         if (err)
index 23fa33a..cb7c8f8 100644 (file)
@@ -78,13 +78,13 @@ static struct lu_context_key dt_key = {
  */
 void dt_txn_callback_add(struct dt_device *dev, struct dt_txn_callback *cb)
 {
-        list_add(&cb->dtc_linkage, &dev->dd_txn_callbacks);
+        cfs_list_add(&cb->dtc_linkage, &dev->dd_txn_callbacks);
 }
 EXPORT_SYMBOL(dt_txn_callback_add);
 
 void dt_txn_callback_del(struct dt_device *dev, struct dt_txn_callback *cb)
 {
-        list_del_init(&cb->dtc_linkage);
+        cfs_list_del_init(&cb->dtc_linkage);
 }
 EXPORT_SYMBOL(dt_txn_callback_del);
 
@@ -95,7 +95,7 @@ int dt_txn_hook_start(const struct lu_env *env,
         struct dt_txn_callback *cb;
 
         result = 0;
-        list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
+        cfs_list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
                 if (cb->dtc_txn_start == NULL ||
                     !(cb->dtc_tag & env->le_ctx.lc_tags))
                         continue;
@@ -114,7 +114,7 @@ int dt_txn_hook_stop(const struct lu_env *env, struct thandle *txn)
         int                     result;
 
         result = 0;
-        list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
+        cfs_list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
                 if (cb->dtc_txn_stop == NULL ||
                     !(cb->dtc_tag & env->le_ctx.lc_tags))
                         continue;
@@ -133,7 +133,7 @@ int dt_txn_hook_commit(const struct lu_env *env, struct thandle *txn)
         int                     result;
 
         result = 0;
-        list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
+        cfs_list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
                 if (cb->dtc_txn_commit == NULL ||
                     !(cb->dtc_tag & env->le_ctx.lc_tags))
                         continue;
index 9c20af2..3ea2c47 100644 (file)
 #include <obd_class.h>
 #include <lprocfs_status.h>
 
-extern struct list_head obd_types;
-spinlock_t obd_types_lock;
+extern cfs_list_t obd_types;
+cfs_spinlock_t obd_types_lock;
 
 cfs_mem_cache_t *obd_device_cachep;
 cfs_mem_cache_t *obdo_cachep;
 EXPORT_SYMBOL(obdo_cachep);
 cfs_mem_cache_t *import_cachep;
 
-struct list_head  obd_zombie_imports;
-struct list_head  obd_zombie_exports;
-spinlock_t        obd_zombie_impexp_lock;
+cfs_list_t      obd_zombie_imports;
+cfs_list_t      obd_zombie_exports;
+cfs_spinlock_t  obd_zombie_impexp_lock;
 static void obd_zombie_impexp_notify(void);
 static void obd_zombie_export_add(struct obd_export *exp);
 static void obd_zombie_import_add(struct obd_import *imp);
@@ -98,18 +98,18 @@ static void obd_device_free(struct obd_device *obd)
 
 struct obd_type *class_search_type(const char *name)
 {
-        struct list_head *tmp;
+        cfs_list_t *tmp;
         struct obd_type *type;
 
-        spin_lock(&obd_types_lock);
-        list_for_each(tmp, &obd_types) {
-                type = list_entry(tmp, struct obd_type, typ_chain);
+        cfs_spin_lock(&obd_types_lock);
+        cfs_list_for_each(tmp, &obd_types) {
+                type = cfs_list_entry(tmp, struct obd_type, typ_chain);
                 if (strcmp(type->typ_name, name) == 0) {
-                        spin_unlock(&obd_types_lock);
+                        cfs_spin_unlock(&obd_types_lock);
                         return type;
                 }
         }
-        spin_unlock(&obd_types_lock);
+        cfs_spin_unlock(&obd_types_lock);
         return NULL;
 }
 
@@ -120,7 +120,7 @@ struct obd_type *class_get_type(const char *name)
 #ifdef CONFIG_KMOD
         if (!type) {
                 const char *modname = name;
-                if (!request_module("%s", modname)) {
+                if (!cfs_request_module("%s", modname)) {
                         CDEBUG(D_INFO, "Loaded module '%s'\n", modname);
                         type = class_search_type(name);
                 } else {
@@ -130,10 +130,10 @@ struct obd_type *class_get_type(const char *name)
         }
 #endif
         if (type) {
-                spin_lock(&type->obd_type_lock);
+                cfs_spin_lock(&type->obd_type_lock);
                 type->typ_refcnt++;
-                try_module_get(type->typ_dt_ops->o_owner);
-                spin_unlock(&type->obd_type_lock);
+                cfs_try_module_get(type->typ_dt_ops->o_owner);
+                cfs_spin_unlock(&type->obd_type_lock);
         }
         return type;
 }
@@ -141,10 +141,10 @@ struct obd_type *class_get_type(const char *name)
 void class_put_type(struct obd_type *type)
 {
         LASSERT(type);
-        spin_lock(&type->obd_type_lock);
+        cfs_spin_lock(&type->obd_type_lock);
         type->typ_refcnt--;
-        module_put(type->typ_dt_ops->o_owner);
-        spin_unlock(&type->obd_type_lock);
+        cfs_module_put(type->typ_dt_ops->o_owner);
+        cfs_spin_unlock(&type->obd_type_lock);
 }
 
 #define CLASS_MAX_NAME 1024
@@ -184,7 +184,7 @@ int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops,
         if (md_ops)
                 *(type->typ_md_ops) = *md_ops;
         strcpy(type->typ_name, name);
-        spin_lock_init(&type->obd_type_lock);
+        cfs_spin_lock_init(&type->obd_type_lock);
 
 #ifdef LPROCFS
         type->typ_procroot = lprocfs_register(type->typ_name, proc_lustre_root,
@@ -202,9 +202,9 @@ int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops,
                         GOTO (failed, rc);
         }
 
-        spin_lock(&obd_types_lock);
-        list_add(&type->typ_chain, &obd_types);
-        spin_unlock(&obd_types_lock);
+        cfs_spin_lock(&obd_types_lock);
+        cfs_list_add(&type->typ_chain, &obd_types);
+        cfs_spin_unlock(&obd_types_lock);
 
         RETURN (0);
 
@@ -245,9 +245,9 @@ int class_unregister_type(const char *name)
         if (type->typ_lu)
                 lu_device_type_fini(type->typ_lu);
 
-        spin_lock(&obd_types_lock);
-        list_del(&type->typ_chain);
-        spin_unlock(&obd_types_lock);
+        cfs_spin_lock(&obd_types_lock);
+        cfs_list_del(&type->typ_chain);
+        cfs_spin_unlock(&obd_types_lock);
         OBD_FREE(type->typ_name, strlen(name) + 1);
         if (type->typ_dt_ops != NULL)
                 OBD_FREE_PTR(type->typ_dt_ops);
@@ -294,7 +294,7 @@ struct obd_device *class_newdev(const char *type_name, const char *name)
         }
         LASSERT(newdev->obd_magic == OBD_DEVICE_MAGIC);
 
-        spin_lock(&obd_dev_lock);
+        cfs_spin_lock(&obd_dev_lock);
         for (i = 0; i < class_devno_max(); i++) {
                 struct obd_device *obd = class_num2obd(i);
                 if (obd && obd->obd_name &&
@@ -324,7 +324,7 @@ struct obd_device *class_newdev(const char *type_name, const char *name)
                         obd_devs[i] = result;
                 }
         }
-        spin_unlock(&obd_dev_lock);
+        cfs_spin_unlock(&obd_dev_lock);
 
         if (result == NULL && i >= class_devno_max()) {
                 CERROR("all %u OBD devices used, increase MAX_OBD_DEVICES\n",
@@ -355,9 +355,9 @@ void class_release_dev(struct obd_device *obd)
         CDEBUG(D_INFO, "Release obd device %s obd_type name =%s\n",
                obd->obd_name,obd->obd_type->typ_name);
 
-        spin_lock(&obd_dev_lock);
+        cfs_spin_lock(&obd_dev_lock);
         obd_devs[obd->obd_minor] = NULL;
-        spin_unlock(&obd_dev_lock);
+        cfs_spin_unlock(&obd_dev_lock);
         obd_device_free(obd);
 
         class_put_type(obd_type);
@@ -370,7 +370,7 @@ int class_name2dev(const char *name)
         if (!name)
                 return -1;
 
-        spin_lock(&obd_dev_lock);
+        cfs_spin_lock(&obd_dev_lock);
         for (i = 0; i < class_devno_max(); i++) {
                 struct obd_device *obd = class_num2obd(i);
                 if (obd && obd->obd_name && strcmp(name, obd->obd_name) == 0) {
@@ -378,13 +378,13 @@ int class_name2dev(const char *name)
                            out any references */
                         LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
                         if (obd->obd_attached) {
-                                spin_unlock(&obd_dev_lock);
+                                cfs_spin_unlock(&obd_dev_lock);
                                 return i;
                         }
                         break;
                 }
         }
-        spin_unlock(&obd_dev_lock);
+        cfs_spin_unlock(&obd_dev_lock);
 
         return -1;
 }
@@ -402,16 +402,16 @@ int class_uuid2dev(struct obd_uuid *uuid)
 {
         int i;
 
-        spin_lock(&obd_dev_lock);
+        cfs_spin_lock(&obd_dev_lock);
         for (i = 0; i < class_devno_max(); i++) {
                 struct obd_device *obd = class_num2obd(i);
                 if (obd && obd_uuid_equals(uuid, &obd->obd_uuid)) {
                         LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
-                        spin_unlock(&obd_dev_lock);
+                        cfs_spin_unlock(&obd_dev_lock);
                         return i;
                 }
         }
-        spin_unlock(&obd_dev_lock);
+        cfs_spin_unlock(&obd_dev_lock);
 
         return -1;
 }
@@ -457,7 +457,7 @@ void class_obd_list(void)
         char *status;
         int i;
 
-        spin_lock(&obd_dev_lock);
+        cfs_spin_lock(&obd_dev_lock);
         for (i = 0; i < class_devno_max(); i++) {
                 struct obd_device *obd = class_num2obd(i);
                 if (obd == NULL)
@@ -473,9 +473,9 @@ void class_obd_list(void)
                 LCONSOLE(D_CONFIG, "%3d %s %s %s %s %d\n",
                          i, status, obd->obd_type->typ_name,
                          obd->obd_name, obd->obd_uuid.uuid,
-                         atomic_read(&obd->obd_refcount));
+                         cfs_atomic_read(&obd->obd_refcount));
         }
-        spin_unlock(&obd_dev_lock);
+        cfs_spin_unlock(&obd_dev_lock);
         return;
 }
 
@@ -488,7 +488,7 @@ struct obd_device * class_find_client_obd(struct obd_uuid *tgt_uuid,
 {
         int i;
 
-        spin_lock(&obd_dev_lock);
+        cfs_spin_lock(&obd_dev_lock);
         for (i = 0; i < class_devno_max(); i++) {
                 struct obd_device *obd = class_num2obd(i);
                 if (obd == NULL)
@@ -499,12 +499,12 @@ struct obd_device * class_find_client_obd(struct obd_uuid *tgt_uuid,
                                             &obd->u.cli.cl_target_uuid) &&
                             ((grp_uuid)? obd_uuid_equals(grp_uuid,
                                                          &obd->obd_uuid) : 1)) {
-                                spin_unlock(&obd_dev_lock);
+                                cfs_spin_unlock(&obd_dev_lock);
                                 return obd;
                         }
                 }
         }
-        spin_unlock(&obd_dev_lock);
+        cfs_spin_unlock(&obd_dev_lock);
 
         return NULL;
 }
@@ -524,7 +524,7 @@ struct obd_device * class_devices_in_group(struct obd_uuid *grp_uuid, int *next)
         else
                 return NULL;
 
-        spin_lock(&obd_dev_lock);
+        cfs_spin_lock(&obd_dev_lock);
         for (; i < class_devno_max(); i++) {
                 struct obd_device *obd = class_num2obd(i);
                 if (obd == NULL)
@@ -532,11 +532,11 @@ struct obd_device * class_devices_in_group(struct obd_uuid *grp_uuid, int *next)
                 if (obd_uuid_equals(grp_uuid, &obd->obd_uuid)) {
                         if (next != NULL)
                                 *next = i+1;
-                        spin_unlock(&obd_dev_lock);
+                        cfs_spin_unlock(&obd_dev_lock);
                         return obd;
                 }
         }
-        spin_unlock(&obd_dev_lock);
+        cfs_spin_unlock(&obd_dev_lock);
 
         return NULL;
 }
@@ -553,7 +553,7 @@ int class_notify_sptlrpc_conf(const char *fsname, int namelen)
 
         LASSERT(namelen > 0);
 
-        spin_lock(&obd_dev_lock);
+        cfs_spin_lock(&obd_dev_lock);
         for (i = 0; i < class_devno_max(); i++) {
                 obd = class_num2obd(i);
 
@@ -572,15 +572,15 @@ int class_notify_sptlrpc_conf(const char *fsname, int namelen)
                         continue;
 
                 class_incref(obd, __FUNCTION__, obd);
-                spin_unlock(&obd_dev_lock);
+                cfs_spin_unlock(&obd_dev_lock);
                 rc2 = obd_set_info_async(obd->obd_self_export,
                                          sizeof(KEY_SPTLRPC_CONF),
                                          KEY_SPTLRPC_CONF, 0, NULL, NULL);
                 rc = rc ? rc : rc2;
                 class_decref(obd, __FUNCTION__, obd);
-                spin_lock(&obd_dev_lock);
+                cfs_spin_lock(&obd_dev_lock);
         }
-        spin_unlock(&obd_dev_lock);
+        cfs_spin_unlock(&obd_dev_lock);
         return rc;
 }
 EXPORT_SYMBOL(class_notify_sptlrpc_conf);
@@ -712,7 +712,7 @@ static void class_export_destroy(struct obd_export *exp)
         struct obd_device *obd = exp->exp_obd;
         ENTRY;
 
-        LASSERT (atomic_read(&exp->exp_refcount) == 0);
+        LASSERT (cfs_atomic_read(&exp->exp_refcount) == 0);
 
         CDEBUG(D_IOCTL, "destroying export %p/%s for %s\n", exp,
                exp->exp_client_uuid.uuid, obd->obd_name);
@@ -723,10 +723,10 @@ static void class_export_destroy(struct obd_export *exp)
         if (exp->exp_connection)
                 ptlrpc_put_connection_superhack(exp->exp_connection);
 
-        LASSERT(list_empty(&exp->exp_outstanding_replies));
-        LASSERT(list_empty(&exp->exp_uncommitted_replies));
-        LASSERT(list_empty(&exp->exp_req_replay_queue));
-        LASSERT(list_empty(&exp->exp_queued_rpc));
+        LASSERT(cfs_list_empty(&exp->exp_outstanding_replies));
+        LASSERT(cfs_list_empty(&exp->exp_uncommitted_replies));
+        LASSERT(cfs_list_empty(&exp->exp_req_replay_queue));
+        LASSERT(cfs_list_empty(&exp->exp_queued_rpc));
         obd_destroy_export(exp);
         class_decref(obd, "export", exp);
 
@@ -741,9 +741,9 @@ static void export_handle_addref(void *export)
 
 struct obd_export *class_export_get(struct obd_export *exp)
 {
-        atomic_inc(&exp->exp_refcount);
+        cfs_atomic_inc(&exp->exp_refcount);
         CDEBUG(D_INFO, "GETting export %p : new refcount %d\n", exp,
-               atomic_read(&exp->exp_refcount));
+               cfs_atomic_read(&exp->exp_refcount));
         return exp;
 }
 EXPORT_SYMBOL(class_export_get);
@@ -752,12 +752,12 @@ void class_export_put(struct obd_export *exp)
 {
         LASSERT(exp != NULL);
         CDEBUG(D_INFO, "PUTting export %p : new refcount %d\n", exp,
-               atomic_read(&exp->exp_refcount) - 1);
-        LASSERT(atomic_read(&exp->exp_refcount) > 0);
-        LASSERT(atomic_read(&exp->exp_refcount) < 0x5a5a5a);
+               cfs_atomic_read(&exp->exp_refcount) - 1);
+        LASSERT(cfs_atomic_read(&exp->exp_refcount) > 0);
+        LASSERT(cfs_atomic_read(&exp->exp_refcount) < 0x5a5a5a);
 
-        if (atomic_dec_and_test(&exp->exp_refcount)) {
-                LASSERT(!list_empty(&exp->exp_obd_chain));
+        if (cfs_atomic_dec_and_test(&exp->exp_refcount)) {
+                LASSERT(!cfs_list_empty(&exp->exp_obd_chain));
                 CDEBUG(D_IOCTL, "final put %p/%s\n",
                        exp, exp->exp_client_uuid.uuid);
                 obd_zombie_export_add(exp);
@@ -781,34 +781,34 @@ struct obd_export *class_new_export(struct obd_device *obd,
 
         export->exp_conn_cnt = 0;
         export->exp_lock_hash = NULL;
-        atomic_set(&export->exp_refcount, 2);
-        atomic_set(&export->exp_rpc_count, 0);
-        atomic_set(&export->exp_cb_count, 0);
-        atomic_set(&export->exp_locks_count, 0);
+        cfs_atomic_set(&export->exp_refcount, 2);
+        cfs_atomic_set(&export->exp_rpc_count, 0);
+        cfs_atomic_set(&export->exp_cb_count, 0);
+        cfs_atomic_set(&export->exp_locks_count, 0);
 #if LUSTRE_TRACKS_LOCK_EXP_REFS
         CFS_INIT_LIST_HEAD(&export->exp_locks_list);
-        spin_lock_init(&export->exp_locks_list_guard);
+        cfs_spin_lock_init(&export->exp_locks_list_guard);
 #endif
-        atomic_set(&export->exp_replay_count, 0);
+        cfs_atomic_set(&export->exp_replay_count, 0);
         export->exp_obd = obd;
         CFS_INIT_LIST_HEAD(&export->exp_outstanding_replies);
-        spin_lock_init(&export->exp_uncommitted_replies_lock);
+        cfs_spin_lock_init(&export->exp_uncommitted_replies_lock);
         CFS_INIT_LIST_HEAD(&export->exp_uncommitted_replies);
         CFS_INIT_LIST_HEAD(&export->exp_req_replay_queue);
         CFS_INIT_LIST_HEAD(&export->exp_handle.h_link);
         CFS_INIT_LIST_HEAD(&export->exp_queued_rpc);
         class_handle_hash(&export->exp_handle, export_handle_addref);
         export->exp_last_request_time = cfs_time_current_sec();
-        spin_lock_init(&export->exp_lock);
-        INIT_HLIST_NODE(&export->exp_uuid_hash);
-        INIT_HLIST_NODE(&export->exp_nid_hash);
+        cfs_spin_lock_init(&export->exp_lock);
+        CFS_INIT_HLIST_NODE(&export->exp_uuid_hash);
+        CFS_INIT_HLIST_NODE(&export->exp_nid_hash);
 
         export->exp_sp_peer = LUSTRE_SP_ANY;
         export->exp_flvr.sf_rpc = SPTLRPC_FLVR_INVALID;
         export->exp_client_uuid = *cluuid;
         obd_init_export(export);
 
-        spin_lock(&obd->obd_dev_lock);
+        cfs_spin_lock(&obd->obd_dev_lock);
          /* shouldn't happen, but might race */
         if (obd->obd_stopping)
                 GOTO(exit_err, rc = -ENODEV);
@@ -824,17 +824,17 @@ struct obd_export *class_new_export(struct obd_device *obd,
         }
 
         class_incref(obd, "export", export);
-        list_add(&export->exp_obd_chain, &export->exp_obd->obd_exports);
-        list_add_tail(&export->exp_obd_chain_timed,
-                      &export->exp_obd->obd_exports_timed);
+        cfs_list_add(&export->exp_obd_chain, &export->exp_obd->obd_exports);
+        cfs_list_add_tail(&export->exp_obd_chain_timed,
+                          &export->exp_obd->obd_exports_timed);
         export->exp_obd->obd_num_exports++;
-        spin_unlock(&obd->obd_dev_lock);
+        cfs_spin_unlock(&obd->obd_dev_lock);
         RETURN(export);
 
 exit_err:
-        spin_unlock(&obd->obd_dev_lock);
+        cfs_spin_unlock(&obd->obd_dev_lock);
         class_handle_unhash(&export->exp_handle);
-        LASSERT(hlist_unhashed(&export->exp_uuid_hash));
+        LASSERT(cfs_hlist_unhashed(&export->exp_uuid_hash));
         obd_destroy_export(export);
         OBD_FREE_PTR(export);
         return ERR_PTR(rc);
@@ -845,17 +845,17 @@ void class_unlink_export(struct obd_export *exp)
 {
         class_handle_unhash(&exp->exp_handle);
 
-        spin_lock(&exp->exp_obd->obd_dev_lock);
+        cfs_spin_lock(&exp->exp_obd->obd_dev_lock);
         /* delete an uuid-export hashitem from hashtables */
-        if (!hlist_unhashed(&exp->exp_uuid_hash))
+        if (!cfs_hlist_unhashed(&exp->exp_uuid_hash))
                 cfs_hash_del(exp->exp_obd->obd_uuid_hash,
                              &exp->exp_client_uuid,
                              &exp->exp_uuid_hash);
 
-        list_move(&exp->exp_obd_chain, &exp->exp_obd->obd_unlinked_exports);
-        list_del_init(&exp->exp_obd_chain_timed);
+        cfs_list_move(&exp->exp_obd_chain, &exp->exp_obd->obd_unlinked_exports);
+        cfs_list_del_init(&exp->exp_obd_chain_timed);
         exp->exp_obd->obd_num_exports--;
-        spin_unlock(&exp->exp_obd->obd_dev_lock);
+        cfs_spin_unlock(&exp->exp_obd->obd_dev_lock);
         class_export_put(exp);
 }
 EXPORT_SYMBOL(class_unlink_export);
@@ -868,16 +868,16 @@ void class_import_destroy(struct obd_import *imp)
         CDEBUG(D_IOCTL, "destroying import %p for %s\n", imp,
                 imp->imp_obd->obd_name);
 
-        LASSERT(atomic_read(&imp->imp_refcount) == 0);
+        LASSERT(cfs_atomic_read(&imp->imp_refcount) == 0);
 
         ptlrpc_put_connection_superhack(imp->imp_connection);
 
-        while (!list_empty(&imp->imp_conn_list)) {
+        while (!cfs_list_empty(&imp->imp_conn_list)) {
                 struct obd_import_conn *imp_conn;
 
-                imp_conn = list_entry(imp->imp_conn_list.next,
-                                      struct obd_import_conn, oic_item);
-                list_del_init(&imp_conn->oic_item);
+                imp_conn = cfs_list_entry(imp->imp_conn_list.next,
+                                          struct obd_import_conn, oic_item);
+                cfs_list_del_init(&imp_conn->oic_item);
                 ptlrpc_put_connection_superhack(imp_conn->oic_conn);
                 OBD_FREE(imp_conn, sizeof(*imp_conn));
         }
@@ -895,11 +895,11 @@ static void import_handle_addref(void *import)
 
 struct obd_import *class_import_get(struct obd_import *import)
 {
-        LASSERT(atomic_read(&import->imp_refcount) >= 0);
-        LASSERT(atomic_read(&import->imp_refcount) < 0x5a5a5a);
-        atomic_inc(&import->imp_refcount);
+        LASSERT(cfs_atomic_read(&import->imp_refcount) >= 0);
+        LASSERT(cfs_atomic_read(&import->imp_refcount) < 0x5a5a5a);
+        cfs_atomic_inc(&import->imp_refcount);
         CDEBUG(D_INFO, "import %p refcount=%d obd=%s\n", import,
-               atomic_read(&import->imp_refcount),
+               cfs_atomic_read(&import->imp_refcount),
                import->imp_obd->obd_name);
         return import;
 }
@@ -909,15 +909,15 @@ void class_import_put(struct obd_import *imp)
 {
         ENTRY;
 
-        LASSERT(atomic_read(&imp->imp_refcount) > 0);
-        LASSERT(atomic_read(&imp->imp_refcount) < 0x5a5a5a);
-        LASSERT(list_empty(&imp->imp_zombie_chain));
+        LASSERT(cfs_atomic_read(&imp->imp_refcount) > 0);
+        LASSERT(cfs_atomic_read(&imp->imp_refcount) < 0x5a5a5a);
+        LASSERT(cfs_list_empty(&imp->imp_zombie_chain));
 
         CDEBUG(D_INFO, "import %p refcount=%d obd=%s\n", imp,
-               atomic_read(&imp->imp_refcount) - 1,
+               cfs_atomic_read(&imp->imp_refcount) - 1,
                imp->imp_obd->obd_name);
 
-        if (atomic_dec_and_test(&imp->imp_refcount)) {
+        if (cfs_atomic_dec_and_test(&imp->imp_refcount)) {
                 CDEBUG(D_INFO, "final put import %p\n", imp);
                 obd_zombie_import_add(imp);
         }
@@ -950,18 +950,18 @@ struct obd_import *class_new_import(struct obd_device *obd)
         CFS_INIT_LIST_HEAD(&imp->imp_replay_list);
         CFS_INIT_LIST_HEAD(&imp->imp_sending_list);
         CFS_INIT_LIST_HEAD(&imp->imp_delayed_list);
-        spin_lock_init(&imp->imp_lock);
+        cfs_spin_lock_init(&imp->imp_lock);
         imp->imp_last_success_conn = 0;
         imp->imp_state = LUSTRE_IMP_NEW;
         imp->imp_obd = class_incref(obd, "import", imp);
-        sema_init(&imp->imp_sec_mutex, 1);
+        cfs_sema_init(&imp->imp_sec_mutex, 1);
         cfs_waitq_init(&imp->imp_recovery_waitq);
 
-        atomic_set(&imp->imp_refcount, 2);
-        atomic_set(&imp->imp_unregistering, 0);
-        atomic_set(&imp->imp_inflight, 0);
-        atomic_set(&imp->imp_replay_inflight, 0);
-        atomic_set(&imp->imp_inval_count, 0);
+        cfs_atomic_set(&imp->imp_refcount, 2);
+        cfs_atomic_set(&imp->imp_unregistering, 0);
+        cfs_atomic_set(&imp->imp_inflight, 0);
+        cfs_atomic_set(&imp->imp_replay_inflight, 0);
+        cfs_atomic_set(&imp->imp_inval_count, 0);
         CFS_INIT_LIST_HEAD(&imp->imp_conn_list);
         CFS_INIT_LIST_HEAD(&imp->imp_handle.h_link);
         class_handle_hash(&imp->imp_handle, import_handle_addref);
@@ -982,9 +982,9 @@ void class_destroy_import(struct obd_import *import)
 
         class_handle_unhash(&import->imp_handle);
 
-        spin_lock(&import->imp_lock);
+        cfs_spin_lock(&import->imp_lock);
         import->imp_generation++;
-        spin_unlock(&import->imp_lock);
+        cfs_spin_unlock(&import->imp_lock);
         class_import_put(import);
 }
 EXPORT_SYMBOL(class_destroy_import);
@@ -993,7 +993,7 @@ EXPORT_SYMBOL(class_destroy_import);
 
 void __class_export_add_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
 {
-        spin_lock(&exp->exp_locks_list_guard);
+        cfs_spin_lock(&exp->exp_locks_list_guard);
 
         LASSERT(lock->l_exp_refs_nr >= 0);
 
@@ -1003,18 +1003,18 @@ void __class_export_add_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
                               exp, lock, lock->l_exp_refs_target);
         }
         if ((lock->l_exp_refs_nr ++) == 0) {
-                list_add(&lock->l_exp_refs_link, &exp->exp_locks_list);
+                cfs_list_add(&lock->l_exp_refs_link, &exp->exp_locks_list);
                 lock->l_exp_refs_target = exp;
         }
         CDEBUG(D_INFO, "lock = %p, export = %p, refs = %u\n",
                lock, exp, lock->l_exp_refs_nr);
-        spin_unlock(&exp->exp_locks_list_guard);
+        cfs_spin_unlock(&exp->exp_locks_list_guard);
 }
 EXPORT_SYMBOL(__class_export_add_lock_ref);
 
 void __class_export_del_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
 {
-        spin_lock(&exp->exp_locks_list_guard);
+        cfs_spin_lock(&exp->exp_locks_list_guard);
         LASSERT(lock->l_exp_refs_nr > 0);
         if (lock->l_exp_refs_target != exp) {
                 LCONSOLE_WARN("lock %p, "
@@ -1022,12 +1022,12 @@ void __class_export_del_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
                               lock, lock->l_exp_refs_target, exp);
         }
         if (-- lock->l_exp_refs_nr == 0) {
-                list_del_init(&lock->l_exp_refs_link);
+                cfs_list_del_init(&lock->l_exp_refs_link);
                 lock->l_exp_refs_target = NULL;
         }
         CDEBUG(D_INFO, "lock = %p, export = %p, refs = %u\n",
                lock, exp, lock->l_exp_refs_nr);
-        spin_unlock(&exp->exp_locks_list_guard);
+        cfs_spin_unlock(&exp->exp_locks_list_guard);
 }
 EXPORT_SYMBOL(__class_export_del_lock_ref);
 #endif
@@ -1063,33 +1063,33 @@ void class_export_recovery_cleanup(struct obd_export *exp)
 {
         struct obd_device *obd = exp->exp_obd;
 
-        spin_lock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
         if (exp->exp_delayed)
                 obd->obd_delayed_clients--;
         if (obd->obd_recovering && exp->exp_in_recovery) {
-                spin_lock(&exp->exp_lock);
+                cfs_spin_lock(&exp->exp_lock);
                 exp->exp_in_recovery = 0;
-                spin_unlock(&exp->exp_lock);
+                cfs_spin_unlock(&exp->exp_lock);
                 LASSERT(obd->obd_connected_clients);
                 obd->obd_connected_clients--;
         }
         /** Cleanup req replay fields */
         if (exp->exp_req_replay_needed) {
-                spin_lock(&exp->exp_lock);
+                cfs_spin_lock(&exp->exp_lock);
                 exp->exp_req_replay_needed = 0;
-                spin_unlock(&exp->exp_lock);
-                LASSERT(atomic_read(&obd->obd_req_replay_clients));
-                atomic_dec(&obd->obd_req_replay_clients);
+                cfs_spin_unlock(&exp->exp_lock);
+                LASSERT(cfs_atomic_read(&obd->obd_req_replay_clients));
+                cfs_atomic_dec(&obd->obd_req_replay_clients);
         }
         /** Cleanup lock replay data */
         if (exp->exp_lock_replay_needed) {
-                spin_lock(&exp->exp_lock);
+                cfs_spin_lock(&exp->exp_lock);
                 exp->exp_lock_replay_needed = 0;
-                spin_unlock(&exp->exp_lock);
-                LASSERT(atomic_read(&obd->obd_lock_replay_clients));
-                atomic_dec(&obd->obd_lock_replay_clients);
+                cfs_spin_unlock(&exp->exp_lock);
+                LASSERT(cfs_atomic_read(&obd->obd_lock_replay_clients));
+                cfs_atomic_dec(&obd->obd_lock_replay_clients);
         }
-        spin_unlock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
 }
 
 /* This function removes 1-3 references from the export:
@@ -1109,23 +1109,23 @@ int class_disconnect(struct obd_export *export)
                 RETURN(-EINVAL);
         }
 
-        spin_lock(&export->exp_lock);
+        cfs_spin_lock(&export->exp_lock);
         already_disconnected = export->exp_disconnected;
         export->exp_disconnected = 1;
-        spin_unlock(&export->exp_lock);
+        cfs_spin_unlock(&export->exp_lock);
 
         /* class_cleanup(), abort_recovery(), and class_fail_export()
          * all end up in here, and if any of them race we shouldn't
          * call extra class_export_puts(). */
         if (already_disconnected) {
-                LASSERT(hlist_unhashed(&export->exp_nid_hash));
+                LASSERT(cfs_hlist_unhashed(&export->exp_nid_hash));
                 GOTO(no_disconn, already_disconnected);
         }
 
         CDEBUG(D_IOCTL, "disconnect: cookie "LPX64"\n",
                export->exp_handle.h_cookie);
 
-        if (!hlist_unhashed(&export->exp_nid_hash))
+        if (!cfs_hlist_unhashed(&export->exp_nid_hash))
                 cfs_hash_del(export->exp_obd->obd_nid_hash,
                              &export->exp_connection->c_peer.nid,
                              &export->exp_nid_hash);
@@ -1142,16 +1142,16 @@ int class_connected_export(struct obd_export *exp)
 {
         if (exp) {
                 int connected;
-                spin_lock(&exp->exp_lock);
+                cfs_spin_lock(&exp->exp_lock);
                 connected = (exp->exp_conn_cnt > 0);
-                spin_unlock(&exp->exp_lock);
+                cfs_spin_unlock(&exp->exp_lock);
                 return connected;
         }
         return 0;
 }
 EXPORT_SYMBOL(class_connected_export);
 
-static void class_disconnect_export_list(struct list_head *list,
+static void class_disconnect_export_list(cfs_list_t *list,
                                          enum obd_option flags)
 {
         int rc;
@@ -1160,14 +1160,15 @@ static void class_disconnect_export_list(struct list_head *list,
 
         /* It's possible that an export may disconnect itself, but
          * nothing else will be added to this list. */
-        while (!list_empty(list)) {
-                exp = list_entry(list->next, struct obd_export, exp_obd_chain);
+        while (!cfs_list_empty(list)) {
+                exp = cfs_list_entry(list->next, struct obd_export,
+                                     exp_obd_chain);
                 /* need for safe call CDEBUG after obd_disconnect */
                 class_export_get(exp);
 
-                spin_lock(&exp->exp_lock);
+                cfs_spin_lock(&exp->exp_lock);
                 exp->exp_flags = flags;
-                spin_unlock(&exp->exp_lock);
+                cfs_spin_unlock(&exp->exp_lock);
 
                 if (obd_uuid_equals(&exp->exp_client_uuid,
                                     &exp->exp_obd->obd_uuid)) {
@@ -1176,7 +1177,7 @@ static void class_disconnect_export_list(struct list_head *list,
                                exp);
                         /* Need to delete this now so we don't end up pointing
                          * to work_list later when this export is cleaned up. */
-                        list_del_init(&exp->exp_obd_chain);
+                        cfs_list_del_init(&exp->exp_obd_chain);
                         class_export_put(exp);
                         continue;
                 }
@@ -1198,17 +1199,17 @@ static void class_disconnect_export_list(struct list_head *list,
 
 void class_disconnect_exports(struct obd_device *obd)
 {
-        struct list_head work_list;
+        cfs_list_t work_list;
         ENTRY;
 
         /* Move all of the exports from obd_exports to a work list, en masse. */
         CFS_INIT_LIST_HEAD(&work_list);
-        spin_lock(&obd->obd_dev_lock);
-        list_splice_init(&obd->obd_exports, &work_list);
-        list_splice_init(&obd->obd_delayed_exports, &work_list);
-        spin_unlock(&obd->obd_dev_lock);
+        cfs_spin_lock(&obd->obd_dev_lock);
+        cfs_list_splice_init(&obd->obd_exports, &work_list);
+        cfs_list_splice_init(&obd->obd_delayed_exports, &work_list);
+        cfs_spin_unlock(&obd->obd_dev_lock);
 
-        if (!list_empty(&work_list)) {
+        if (!cfs_list_empty(&work_list)) {
                 CDEBUG(D_HA, "OBD device %d (%p) has exports, "
                        "disconnecting them\n", obd->obd_minor, obd);
                 class_disconnect_export_list(&work_list,
@@ -1225,16 +1226,16 @@ EXPORT_SYMBOL(class_disconnect_exports);
 void class_disconnect_stale_exports(struct obd_device *obd,
                                     int (*test_export)(struct obd_export *))
 {
-        struct list_head work_list;
-        struct list_head *pos, *n;
+        cfs_list_t work_list;
+        cfs_list_t *pos, *n;
         struct obd_export *exp;
         int evicted = 0;
         ENTRY;
 
         CFS_INIT_LIST_HEAD(&work_list);
-        spin_lock(&obd->obd_dev_lock);
-        list_for_each_safe(pos, n, &obd->obd_exports) {
-                exp = list_entry(pos, struct obd_export, exp_obd_chain);
+        cfs_spin_lock(&obd->obd_dev_lock);
+        cfs_list_for_each_safe(pos, n, &obd->obd_exports) {
+                exp = cfs_list_entry(pos, struct obd_export, exp_obd_chain);
                 if (test_export(exp))
                         continue;
 
@@ -1243,7 +1244,7 @@ void class_disconnect_stale_exports(struct obd_device *obd,
                                     &exp->exp_obd->obd_uuid))
                         continue;
 
-                list_move(&exp->exp_obd_chain, &work_list);
+                cfs_list_move(&exp->exp_obd_chain, &work_list);
                 evicted++;
                 CDEBUG(D_ERROR, "%s: disconnect stale client %s@%s\n",
                        obd->obd_name, exp->exp_client_uuid.uuid,
@@ -1251,7 +1252,7 @@ void class_disconnect_stale_exports(struct obd_device *obd,
                        libcfs_nid2str(exp->exp_connection->c_peer.nid));
                 print_export_data(exp, "EVICTING", 0);
         }
-        spin_unlock(&obd->obd_dev_lock);
+        cfs_spin_unlock(&obd->obd_dev_lock);
 
         if (evicted) {
                 CDEBUG(D_HA, "%s: disconnecting %d stale clients\n",
@@ -1268,10 +1269,10 @@ void class_fail_export(struct obd_export *exp)
 {
         int rc, already_failed;
 
-        spin_lock(&exp->exp_lock);
+        cfs_spin_lock(&exp->exp_lock);
         already_failed = exp->exp_failed;
         exp->exp_failed = 1;
-        spin_unlock(&exp->exp_lock);
+        cfs_spin_unlock(&exp->exp_lock);
 
         if (already_failed) {
                 CDEBUG(D_HA, "disconnecting dead export %p/%s; skipping\n",
@@ -1381,20 +1382,21 @@ static void print_export_data(struct obd_export *exp, const char *status,
         struct ptlrpc_reply_state *first_reply = NULL;
         int nreplies = 0;
 
-        spin_lock(&exp->exp_lock);
-        list_for_each_entry (rs, &exp->exp_outstanding_replies, rs_exp_list) {
+        cfs_spin_lock(&exp->exp_lock);
+        cfs_list_for_each_entry(rs, &exp->exp_outstanding_replies,
+                                rs_exp_list) {
                 if (nreplies == 0)
                         first_reply = rs;
                 nreplies++;
         }
-        spin_unlock(&exp->exp_lock);
+        cfs_spin_unlock(&exp->exp_lock);
 
         CDEBUG(D_HA, "%s: %s %p %s %s %d (%d %d %d) %d %d %d %d: %p %s "LPU64"\n",
                exp->exp_obd->obd_name, status, exp, exp->exp_client_uuid.uuid,
-               obd_export_nid2str(exp), atomic_read(&exp->exp_refcount),
-               atomic_read(&exp->exp_rpc_count),
-               atomic_read(&exp->exp_cb_count),
-               atomic_read(&exp->exp_locks_count),
+               obd_export_nid2str(exp), cfs_atomic_read(&exp->exp_refcount),
+               cfs_atomic_read(&exp->exp_rpc_count),
+               cfs_atomic_read(&exp->exp_cb_count),
+               cfs_atomic_read(&exp->exp_locks_count),
                exp->exp_disconnected, exp->exp_delayed, exp->exp_failed,
                nreplies, first_reply, nreplies > 3 ? "..." : "",
                exp->exp_last_committed);
@@ -1408,41 +1410,42 @@ void dump_exports(struct obd_device *obd, int locks)
 {
         struct obd_export *exp;
 
-        spin_lock(&obd->obd_dev_lock);
-        list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain)
+        cfs_spin_lock(&obd->obd_dev_lock);
+        cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain)
                 print_export_data(exp, "ACTIVE", locks);
-        list_for_each_entry(exp, &obd->obd_unlinked_exports, exp_obd_chain)
+        cfs_list_for_each_entry(exp, &obd->obd_unlinked_exports, exp_obd_chain)
                 print_export_data(exp, "UNLINKED", locks);
-        list_for_each_entry(exp, &obd->obd_delayed_exports, exp_obd_chain)
+        cfs_list_for_each_entry(exp, &obd->obd_delayed_exports, exp_obd_chain)
                 print_export_data(exp, "DELAYED", locks);
-        spin_unlock(&obd->obd_dev_lock);
-        spin_lock(&obd_zombie_impexp_lock);
-        list_for_each_entry(exp, &obd_zombie_exports, exp_obd_chain)
+        cfs_spin_unlock(&obd->obd_dev_lock);
+        cfs_spin_lock(&obd_zombie_impexp_lock);
+        cfs_list_for_each_entry(exp, &obd_zombie_exports, exp_obd_chain)
                 print_export_data(exp, "ZOMBIE", locks);
-        spin_unlock(&obd_zombie_impexp_lock);
+        cfs_spin_unlock(&obd_zombie_impexp_lock);
 }
 EXPORT_SYMBOL(dump_exports);
 
 void obd_exports_barrier(struct obd_device *obd)
 {
         int waited = 2;
-        LASSERT(list_empty(&obd->obd_exports));
-        spin_lock(&obd->obd_dev_lock);
-        while (!list_empty(&obd->obd_unlinked_exports)) {
-                spin_unlock(&obd->obd_dev_lock);
-                cfs_schedule_timeout(CFS_TASK_UNINT, cfs_time_seconds(waited));
+        LASSERT(cfs_list_empty(&obd->obd_exports));
+        cfs_spin_lock(&obd->obd_dev_lock);
+        while (!cfs_list_empty(&obd->obd_unlinked_exports)) {
+                cfs_spin_unlock(&obd->obd_dev_lock);
+                cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
+                                                   cfs_time_seconds(waited));
                 if (waited > 5 && IS_PO2(waited)) {
                         LCONSOLE_WARN("%s is waiting for obd_unlinked_exports "
                                       "more than %d seconds. "
                                       "The obd refcount = %d. Is it stuck?\n",
                                       obd->obd_name, waited,
-                                      atomic_read(&obd->obd_refcount));
+                                      cfs_atomic_read(&obd->obd_refcount));
                         dump_exports(obd, 0);
                 }
                 waited *= 2;
-                spin_lock(&obd->obd_dev_lock);
+                cfs_spin_lock(&obd->obd_dev_lock);
         }
-        spin_unlock(&obd->obd_dev_lock);
+        cfs_spin_unlock(&obd->obd_dev_lock);
 }
 EXPORT_SYMBOL(obd_exports_barrier);
 
@@ -1456,25 +1459,25 @@ void obd_zombie_impexp_cull(void)
         ENTRY;
 
         do {
-                spin_lock(&obd_zombie_impexp_lock);
+                cfs_spin_lock(&obd_zombie_impexp_lock);
 
                 import = NULL;
-                if (!list_empty(&obd_zombie_imports)) {
-                        import = list_entry(obd_zombie_imports.next,
-                                            struct obd_import,
-                                            imp_zombie_chain);
-                        list_del_init(&import->imp_zombie_chain);
+                if (!cfs_list_empty(&obd_zombie_imports)) {
+                        import = cfs_list_entry(obd_zombie_imports.next,
+                                                struct obd_import,
+                                                imp_zombie_chain);
+                        cfs_list_del_init(&import->imp_zombie_chain);
                 }
 
                 export = NULL;
-                if (!list_empty(&obd_zombie_exports)) {
-                        export = list_entry(obd_zombie_exports.next,
-                                            struct obd_export,
-                                            exp_obd_chain);
-                        list_del_init(&export->exp_obd_chain);
+                if (!cfs_list_empty(&obd_zombie_exports)) {
+                        export = cfs_list_entry(obd_zombie_exports.next,
+                                                struct obd_export,
+                                                exp_obd_chain);
+                        cfs_list_del_init(&export->exp_obd_chain);
                 }
 
-                spin_unlock(&obd_zombie_impexp_lock);
+                cfs_spin_unlock(&obd_zombie_impexp_lock);
 
                 if (import != NULL)
                         class_import_destroy(import);
@@ -1486,8 +1489,8 @@ void obd_zombie_impexp_cull(void)
         EXIT;
 }
 
-static struct completion        obd_zombie_start;
-static struct completion        obd_zombie_stop;
+static cfs_completion_t         obd_zombie_start;
+static cfs_completion_t         obd_zombie_stop;
 static unsigned long            obd_zombie_flags;
 static cfs_waitq_t              obd_zombie_waitq;
 static pid_t                    obd_zombie_pid;
@@ -1503,12 +1506,12 @@ static int obd_zombie_impexp_check(void *arg)
 {
         int rc;
 
-        spin_lock(&obd_zombie_impexp_lock);
-        rc = list_empty(&obd_zombie_imports) &&
-             list_empty(&obd_zombie_exports) &&
-             !test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags);
+        cfs_spin_lock(&obd_zombie_impexp_lock);
+        rc = cfs_list_empty(&obd_zombie_imports) &&
+             cfs_list_empty(&obd_zombie_exports) &&
+             !cfs_test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags);
 
-        spin_unlock(&obd_zombie_impexp_lock);
+        cfs_spin_unlock(&obd_zombie_impexp_lock);
 
         RETURN(rc);
 }
@@ -1517,13 +1520,13 @@ static int obd_zombie_impexp_check(void *arg)
  * Add export to the obd_zombe thread and notify it.
  */
 static void obd_zombie_export_add(struct obd_export *exp) {
-        spin_lock(&exp->exp_obd->obd_dev_lock);
-        LASSERT(!list_empty(&exp->exp_obd_chain));
-        list_del_init(&exp->exp_obd_chain);
-        spin_unlock(&exp->exp_obd->obd_dev_lock);
-        spin_lock(&obd_zombie_impexp_lock);
-        list_add(&exp->exp_obd_chain, &obd_zombie_exports);
-        spin_unlock(&obd_zombie_impexp_lock);
+        cfs_spin_lock(&exp->exp_obd->obd_dev_lock);
+        LASSERT(!cfs_list_empty(&exp->exp_obd_chain));
+        cfs_list_del_init(&exp->exp_obd_chain);
+        cfs_spin_unlock(&exp->exp_obd->obd_dev_lock);
+        cfs_spin_lock(&obd_zombie_impexp_lock);
+        cfs_list_add(&exp->exp_obd_chain, &obd_zombie_exports);
+        cfs_spin_unlock(&obd_zombie_impexp_lock);
 
         if (obd_zombie_impexp_notify != NULL)
                 obd_zombie_impexp_notify();
@@ -1534,10 +1537,10 @@ static void obd_zombie_export_add(struct obd_export *exp) {
  */
 static void obd_zombie_import_add(struct obd_import *imp) {
         LASSERT(imp->imp_sec == NULL);
-        spin_lock(&obd_zombie_impexp_lock);
-        LASSERT(list_empty(&imp->imp_zombie_chain));
-        list_add(&imp->imp_zombie_chain, &obd_zombie_imports);
-        spin_unlock(&obd_zombie_impexp_lock);
+        cfs_spin_lock(&obd_zombie_impexp_lock);
+        LASSERT(cfs_list_empty(&imp->imp_zombie_chain));
+        cfs_list_add(&imp->imp_zombie_chain, &obd_zombie_imports);
+        cfs_spin_unlock(&obd_zombie_impexp_lock);
 
         if (obd_zombie_impexp_notify != NULL)
                 obd_zombie_impexp_notify();
@@ -1558,11 +1561,11 @@ static int obd_zombie_is_idle(void)
 {
         int rc;
 
-        LASSERT(!test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags));
-        spin_lock(&obd_zombie_impexp_lock);
-        rc = list_empty(&obd_zombie_imports) &&
-             list_empty(&obd_zombie_exports);
-        spin_unlock(&obd_zombie_impexp_lock);
+        LASSERT(!cfs_test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags));
+        cfs_spin_lock(&obd_zombie_impexp_lock);
+        rc = cfs_list_empty(&obd_zombie_imports) &&
+             cfs_list_empty(&obd_zombie_exports);
+        cfs_spin_unlock(&obd_zombie_impexp_lock);
         return rc;
 }
 
@@ -1590,15 +1593,15 @@ static int obd_zombie_impexp_thread(void *unused)
         int rc;
 
         if ((rc = cfs_daemonize_ctxt("obd_zombid"))) {
-                complete(&obd_zombie_start);
+                cfs_complete(&obd_zombie_start);
                 RETURN(rc);
         }
 
-        complete(&obd_zombie_start);
+        cfs_complete(&obd_zombie_start);
 
         obd_zombie_pid = cfs_curproc_pid();
 
-        while(!test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags)) {
+        while(!cfs_test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags)) {
                 struct l_wait_info lwi = { 0 };
 
                 l_wait_event(obd_zombie_waitq,
@@ -1612,14 +1615,14 @@ static int obd_zombie_impexp_thread(void *unused)
                 cfs_waitq_signal(&obd_zombie_waitq);
         }
 
-        complete(&obd_zombie_stop);
+        cfs_complete(&obd_zombie_stop);
 
         RETURN(0);
 }
 
 #else /* ! KERNEL */
 
-static atomic_t zombie_recur = ATOMIC_INIT(0);
+static cfs_atomic_t zombie_recur = CFS_ATOMIC_INIT(0);
 static void *obd_zombie_impexp_work_cb;
 static void *obd_zombie_impexp_idle_cb;
 
@@ -1627,11 +1630,11 @@ int obd_zombie_impexp_kill(void *arg)
 {
         int rc = 0;
 
-        if (atomic_inc_return(&zombie_recur) == 1) {
+        if (cfs_atomic_inc_return(&zombie_recur) == 1) {
                 obd_zombie_impexp_cull();
                 rc = 1;
         }
-        atomic_dec(&zombie_recur);
+        cfs_atomic_dec(&zombie_recur);
         return rc;
 }
 
@@ -1646,9 +1649,9 @@ int obd_zombie_impexp_init(void)
 
         CFS_INIT_LIST_HEAD(&obd_zombie_imports);
         CFS_INIT_LIST_HEAD(&obd_zombie_exports);
-        spin_lock_init(&obd_zombie_impexp_lock);
-        init_completion(&obd_zombie_start);
-        init_completion(&obd_zombie_stop);
+        cfs_spin_lock_init(&obd_zombie_impexp_lock);
+        cfs_init_completion(&obd_zombie_start);
+        cfs_init_completion(&obd_zombie_stop);
         cfs_waitq_init(&obd_zombie_waitq);
         obd_zombie_pid = 0;
 
@@ -1657,7 +1660,7 @@ int obd_zombie_impexp_init(void)
         if (rc < 0)
                 RETURN(rc);
 
-        wait_for_completion(&obd_zombie_start);
+        cfs_wait_for_completion(&obd_zombie_start);
 #else
 
         obd_zombie_impexp_work_cb =
@@ -1676,10 +1679,10 @@ int obd_zombie_impexp_init(void)
  */
 void obd_zombie_impexp_stop(void)
 {
-        set_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags);
+        cfs_set_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags);
         obd_zombie_impexp_notify();
 #ifdef __KERNEL__
-        wait_for_completion(&obd_zombie_stop);
+        cfs_wait_for_completion(&obd_zombie_stop);
 #else
         liblustre_deregister_wait_callback(obd_zombie_impexp_work_cb);
         liblustre_deregister_idle_callback(obd_zombie_impexp_idle_cb);
index 89a00b9..1ca3b4f 100644 (file)
 #include <obd_support.h>
 
 #define lustre_get_group_info(group_info) do {             \
-        atomic_inc(&(group_info)->usage);                  \
+        cfs_atomic_inc(&(group_info)->usage);              \
 } while (0)
 
 #define lustre_put_group_info(group_info) do {             \
-        if (atomic_dec_and_test(&(group_info)->usage))     \
-                groups_free(group_info);                   \
+        if (cfs_atomic_dec_and_test(&(group_info)->usage)) \
+                cfs_groups_free(group_info);               \
 } while (0)
 
 /*
  * groups_search() is copied from linux kernel!
  * A simple bsearch.
  */
-static int lustre_groups_search(struct group_info *group_info, gid_t grp)
+static int lustre_groups_search(cfs_group_info_t *group_info,
+                                gid_t grp)
 {
         int left, right;
 
@@ -85,7 +86,7 @@ static int lustre_groups_search(struct group_info *group_info, gid_t grp)
         return 0;
 }
 
-void lustre_groups_from_list(struct group_info *ginfo, gid_t *glist)
+void lustre_groups_from_list(cfs_group_info_t *ginfo, gid_t *glist)
 {
         int i;
         int count = ginfo->ngroups;
@@ -104,7 +105,7 @@ EXPORT_SYMBOL(lustre_groups_from_list);
 
 /* groups_sort() is copied from linux kernel! */
 /* a simple shell-metzner sort */
-void lustre_groups_sort(struct group_info *group_info)
+void lustre_groups_sort(cfs_group_info_t *group_info)
 {
         int base, max, stride;
         int gidsetsize = group_info->ngroups;
@@ -139,7 +140,7 @@ int lustre_in_group_p(struct md_ucred *mu, gid_t grp)
         int rc = 1;
 
         if (grp != mu->mu_fsgid) {
-                struct group_info *group_info = NULL;
+                cfs_group_info_t *group_info = NULL;
 
                 if (mu->mu_ginfo || !mu->mu_identity ||
                     mu->mu_valid == UCRED_OLD)
@@ -164,10 +165,10 @@ int lustre_in_group_p(struct md_ucred *mu, gid_t grp)
 EXPORT_SYMBOL(lustre_in_group_p);
 
 struct lustre_idmap_entry {
-        struct list_head lie_rmt_uid_hash; /* hashed as lie_rmt_uid; */
-        struct list_head lie_lcl_uid_hash; /* hashed as lie_lcl_uid; */
-        struct list_head lie_rmt_gid_hash; /* hashed as lie_rmt_gid; */
-        struct list_head lie_lcl_gid_hash; /* hashed as lie_lcl_gid; */
+        cfs_list_t       lie_rmt_uid_hash; /* hashed as lie_rmt_uid; */
+        cfs_list_t       lie_lcl_uid_hash; /* hashed as lie_lcl_uid; */
+        cfs_list_t       lie_rmt_gid_hash; /* hashed as lie_rmt_gid; */
+        cfs_list_t       lie_lcl_gid_hash; /* hashed as lie_lcl_gid; */
         uid_t            lie_rmt_uid;      /* remote uid */
         uid_t            lie_lcl_uid;      /* local uid */
         gid_t            lie_rmt_gid;      /* remote gid */
@@ -203,14 +204,14 @@ struct lustre_idmap_entry *idmap_entry_alloc(uid_t rmt_uid, uid_t lcl_uid,
 
 static void idmap_entry_free(struct lustre_idmap_entry *e)
 {
-        if (!list_empty(&e->lie_rmt_uid_hash))
-                list_del(&e->lie_rmt_uid_hash);
-        if (!list_empty(&e->lie_lcl_uid_hash))
-                list_del(&e->lie_lcl_uid_hash);
-        if (!list_empty(&e->lie_rmt_gid_hash))
-                list_del(&e->lie_rmt_gid_hash);
-        if (!list_empty(&e->lie_lcl_gid_hash))
-                list_del(&e->lie_lcl_gid_hash);
+        if (!cfs_list_empty(&e->lie_rmt_uid_hash))
+                cfs_list_del(&e->lie_rmt_uid_hash);
+        if (!cfs_list_empty(&e->lie_lcl_uid_hash))
+                cfs_list_del(&e->lie_lcl_uid_hash);
+        if (!cfs_list_empty(&e->lie_rmt_gid_hash))
+                cfs_list_del(&e->lie_rmt_gid_hash);
+        if (!cfs_list_empty(&e->lie_lcl_gid_hash))
+                cfs_list_del(&e->lie_lcl_gid_hash);
         OBD_FREE_PTR(e);
 }
 
@@ -225,11 +226,11 @@ struct lustre_idmap_entry *idmap_search_entry(struct lustre_idmap_table *t,
                                               uid_t rmt_uid, uid_t lcl_uid,
                                               gid_t rmt_gid, gid_t lcl_gid)
 {
-        struct list_head *head;
+        cfs_list_t *head;
         struct lustre_idmap_entry *e;
 
         head = &t->lit_idmaps[RMT_UIDMAP_IDX][lustre_idmap_hashfunc(rmt_uid)];
-        list_for_each_entry(e, head, lie_rmt_uid_hash)
+        cfs_list_for_each_entry(e, head, lie_rmt_uid_hash)
                 if (e->lie_rmt_uid == rmt_uid) {
                         if (e->lie_lcl_uid == lcl_uid) {
                                 if (e->lie_rmt_gid == rmt_gid &&
@@ -246,7 +247,7 @@ struct lustre_idmap_entry *idmap_search_entry(struct lustre_idmap_table *t,
                 }
 
         head = &t->lit_idmaps[RMT_GIDMAP_IDX][lustre_idmap_hashfunc(rmt_gid)];
-        list_for_each_entry(e, head, lie_rmt_gid_hash)
+        cfs_list_for_each_entry(e, head, lie_rmt_gid_hash)
                 if (e->lie_rmt_gid == rmt_gid) {
                         if (e->lie_lcl_gid == lcl_gid) {
                                 if (unlikely(e->lie_rmt_uid == rmt_uid &&
@@ -266,17 +267,18 @@ struct lustre_idmap_entry *idmap_search_entry(struct lustre_idmap_table *t,
         return NULL;
 }
 
-static __u32 idmap_lookup_uid(struct list_head *hash, int reverse, __u32 uid)
+static __u32 idmap_lookup_uid(cfs_list_t *hash, int reverse,
+                              __u32 uid)
 {
-        struct list_head *head = &hash[lustre_idmap_hashfunc(uid)];
+        cfs_list_t *head = &hash[lustre_idmap_hashfunc(uid)];
         struct lustre_idmap_entry *e;
 
         if (!reverse) {
-                list_for_each_entry(e, head, lie_rmt_uid_hash)
+                cfs_list_for_each_entry(e, head, lie_rmt_uid_hash)
                         if (e->lie_rmt_uid == uid)
                                 return e->lie_lcl_uid;
         } else {
-                list_for_each_entry(e, head, lie_lcl_uid_hash)
+                cfs_list_for_each_entry(e, head, lie_lcl_uid_hash)
                         if (e->lie_lcl_uid == uid)
                                 return e->lie_rmt_uid;
         }
@@ -284,17 +286,17 @@ static __u32 idmap_lookup_uid(struct list_head *hash, int reverse, __u32 uid)
         return CFS_IDMAP_NOTFOUND;
 }
 
-static __u32 idmap_lookup_gid(struct list_head *hash, int reverse, __u32 gid)
+static __u32 idmap_lookup_gid(cfs_list_t *hash, int reverse, __u32 gid)
 {
-        struct list_head *head = &hash[lustre_idmap_hashfunc(gid)];
+        cfs_list_t *head = &hash[lustre_idmap_hashfunc(gid)];
         struct lustre_idmap_entry *e;
 
         if (!reverse) {
-                list_for_each_entry(e, head, lie_rmt_gid_hash)
+                cfs_list_for_each_entry(e, head, lie_rmt_gid_hash)
                         if (e->lie_rmt_gid == gid)
                                 return e->lie_lcl_gid;
         } else {
-                list_for_each_entry(e, head, lie_lcl_gid_hash)
+                cfs_list_for_each_entry(e, head, lie_lcl_gid_hash)
                         if (e->lie_lcl_gid == gid)
                                 return e->lie_rmt_gid;
         }
@@ -310,31 +312,31 @@ int lustre_idmap_add(struct lustre_idmap_table *t,
 
         LASSERT(t);
 
-        spin_lock(&t->lit_lock);
+        cfs_spin_lock(&t->lit_lock);
         e0 = idmap_search_entry(t, ruid, luid, rgid, lgid);
-        spin_unlock(&t->lit_lock);
+        cfs_spin_unlock(&t->lit_lock);
         if (!e0) {
                 e0 = idmap_entry_alloc(ruid, luid, rgid, lgid);
                 if (!e0)
                         return -ENOMEM;
 
-                spin_lock(&t->lit_lock);
+                cfs_spin_lock(&t->lit_lock);
                 e1 = idmap_search_entry(t, ruid, luid, rgid, lgid);
                 if (e1 == NULL) {
-                        list_add_tail(&e0->lie_rmt_uid_hash,
-                                      &t->lit_idmaps[RMT_UIDMAP_IDX]
-                                        [lustre_idmap_hashfunc(ruid)]);
-                        list_add_tail(&e0->lie_lcl_uid_hash,
-                                      &t->lit_idmaps[LCL_UIDMAP_IDX]
-                                        [lustre_idmap_hashfunc(luid)]);
-                        list_add_tail(&e0->lie_rmt_gid_hash,
-                                      &t->lit_idmaps[RMT_GIDMAP_IDX]
-                                        [lustre_idmap_hashfunc(rgid)]);
-                        list_add_tail(&e0->lie_lcl_gid_hash,
-                                      &t->lit_idmaps[LCL_GIDMAP_IDX]
-                                        [lustre_idmap_hashfunc(lgid)]);
-                } 
-                spin_unlock(&t->lit_lock);
+                        cfs_list_add_tail(&e0->lie_rmt_uid_hash,
+                                          &t->lit_idmaps[RMT_UIDMAP_IDX]
+                                          [lustre_idmap_hashfunc(ruid)]);
+                        cfs_list_add_tail(&e0->lie_lcl_uid_hash,
+                                          &t->lit_idmaps[LCL_UIDMAP_IDX]
+                                          [lustre_idmap_hashfunc(luid)]);
+                        cfs_list_add_tail(&e0->lie_rmt_gid_hash,
+                                          &t->lit_idmaps[RMT_GIDMAP_IDX]
+                                          [lustre_idmap_hashfunc(rgid)]);
+                        cfs_list_add_tail(&e0->lie_lcl_gid_hash,
+                                          &t->lit_idmaps[LCL_GIDMAP_IDX]
+                                          [lustre_idmap_hashfunc(lgid)]);
+                }
+                cfs_spin_unlock(&t->lit_lock);
                 if (e1 != NULL) {
                         idmap_entry_free(e0);
                         if (IS_ERR(e1))
@@ -357,13 +359,13 @@ int lustre_idmap_del(struct lustre_idmap_table *t,
 
         LASSERT(t);
 
-        spin_lock(&t->lit_lock);
+        cfs_spin_lock(&t->lit_lock);
         e = idmap_search_entry(t, ruid, luid, rgid, lgid);
         if (IS_ERR(e))
                 rc = PTR_ERR(e);
         else if (e)
                 idmap_entry_free(e);
-        spin_unlock(&t->lit_lock);
+        cfs_spin_unlock(&t->lit_lock);
 
         return rc;
 }
@@ -373,7 +375,7 @@ int lustre_idmap_lookup_uid(struct md_ucred *mu,
                             struct lustre_idmap_table *t,
                             int reverse, uid_t uid)
 {
-        struct list_head *hash;
+        cfs_list_t *hash;
 
         if (mu && (mu->mu_valid == UCRED_OLD || mu->mu_valid == UCRED_NEW)) {
                 if (!reverse) {
@@ -394,9 +396,9 @@ int lustre_idmap_lookup_uid(struct md_ucred *mu,
 
         hash = t->lit_idmaps[reverse ? LCL_UIDMAP_IDX : RMT_UIDMAP_IDX];
 
-        spin_lock(&t->lit_lock);
+        cfs_spin_lock(&t->lit_lock);
         uid = idmap_lookup_uid(hash, reverse, uid);
-        spin_unlock(&t->lit_lock);
+        cfs_spin_unlock(&t->lit_lock);
 
         return uid;
 }
@@ -405,7 +407,7 @@ EXPORT_SYMBOL(lustre_idmap_lookup_uid);
 int lustre_idmap_lookup_gid(struct md_ucred *mu, struct lustre_idmap_table *t,
                             int reverse, gid_t gid)
 {
-        struct list_head *hash;
+        cfs_list_t *hash;
 
         if (mu && (mu->mu_valid == UCRED_OLD || mu->mu_valid == UCRED_NEW)) {
                 if (!reverse) {
@@ -426,9 +428,9 @@ int lustre_idmap_lookup_gid(struct md_ucred *mu, struct lustre_idmap_table *t,
 
         hash = t->lit_idmaps[reverse ? LCL_GIDMAP_IDX : RMT_GIDMAP_IDX];
 
-        spin_lock(&t->lit_lock);
+        cfs_spin_lock(&t->lit_lock);
         gid = idmap_lookup_gid(hash, reverse, gid);
-        spin_unlock(&t->lit_lock);
+        cfs_spin_unlock(&t->lit_lock);
 
         return gid;
 }
@@ -443,10 +445,10 @@ struct lustre_idmap_table *lustre_idmap_init(void)
         if(unlikely(t == NULL))
                 return (ERR_PTR(-ENOMEM));
 
-        spin_lock_init(&t->lit_lock);
+        cfs_spin_lock_init(&t->lit_lock);
         for (i = 0; i < ARRAY_SIZE(t->lit_idmaps); i++)
                 for (j = 0; j < ARRAY_SIZE(t->lit_idmaps[i]); j++)
-                        INIT_LIST_HEAD(&t->lit_idmaps[i][j]);
+                        CFS_INIT_LIST_HEAD(&t->lit_idmaps[i][j]);
 
         return t;
 }
@@ -454,20 +456,21 @@ EXPORT_SYMBOL(lustre_idmap_init);
 
 void lustre_idmap_fini(struct lustre_idmap_table *t)
 {
-        struct list_head *list;
+        cfs_list_t *list;
         struct lustre_idmap_entry *e;
         int i;
         LASSERT(t);
 
         list = t->lit_idmaps[RMT_UIDMAP_IDX];
-        spin_lock(&t->lit_lock);
+        cfs_spin_lock(&t->lit_lock);
         for (i = 0; i < CFS_IDMAP_HASHSIZE; i++)
-                while (!list_empty(&list[i])) {
-                        e = list_entry(list[i].next, struct lustre_idmap_entry,
-                                       lie_rmt_uid_hash);
+                while (!cfs_list_empty(&list[i])) {
+                        e = cfs_list_entry(list[i].next,
+                                           struct lustre_idmap_entry,
+                                           lie_rmt_uid_hash);
                         idmap_entry_free(e);
                 }
-        spin_unlock(&t->lit_lock);
+        cfs_spin_unlock(&t->lit_lock);
 
         OBD_FREE_PTR(t);
 }
index 5c7a38d..3480e20 100644 (file)
@@ -98,7 +98,7 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg)
         int offset = 0;
         ENTRY;
 
-        err = copy_from_user(&hdr, (void *)arg, sizeof(hdr));
+        err = cfs_copy_from_user(&hdr, (void *)arg, sizeof(hdr));
         if ( err )
                 RETURN(err);
 
@@ -130,7 +130,7 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg)
         *len = hdr.ioc_len;
         data = (struct obd_ioctl_data *)*buf;
 
-        err = copy_from_user(*buf, (void *)arg, hdr.ioc_len);
+        err = cfs_copy_from_user(*buf, (void *)arg, hdr.ioc_len);
         if ( err ) {
                 OBD_VFREE(*buf, hdr.ioc_len);
                 RETURN(err);
@@ -144,17 +144,17 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg)
 
         if (data->ioc_inllen1) {
                 data->ioc_inlbuf1 = &data->ioc_bulk[0];
-                offset += size_round(data->ioc_inllen1);
+                offset += cfs_size_round(data->ioc_inllen1);
         }
 
         if (data->ioc_inllen2) {
                 data->ioc_inlbuf2 = &data->ioc_bulk[0] + offset;
-                offset += size_round(data->ioc_inllen2);
+                offset += cfs_size_round(data->ioc_inllen2);
         }
 
         if (data->ioc_inllen3) {
                 data->ioc_inlbuf3 = &data->ioc_bulk[0] + offset;
-                offset += size_round(data->ioc_inllen3);
+                offset += cfs_size_round(data->ioc_inllen3);
         }
 
         if (data->ioc_inllen4) {
@@ -169,7 +169,7 @@ int obd_ioctl_popdata(void *arg, void *data, int len)
 {
         int err;
 
-        err = copy_to_user(arg, data, len);
+        err = cfs_copy_to_user(arg, data, len);
         if (err)
                 err = -EFAULT;
         return err;
@@ -288,7 +288,7 @@ static int obd_proc_read_health(char *page, char **start, off_t off,
         if (libcfs_catastrophe)
                 rc += snprintf(page + rc, count - rc, "LBUG\n");
 
-        spin_lock(&obd_dev_lock);
+        cfs_spin_lock(&obd_dev_lock);
         for (i = 0; i < class_devno_max(); i++) {
                 struct obd_device *obd;
 
@@ -301,7 +301,7 @@ static int obd_proc_read_health(char *page, char **start, off_t off,
                         continue;
 
                 class_incref(obd, __FUNCTION__, cfs_current());
-                spin_unlock(&obd_dev_lock);
+                cfs_spin_unlock(&obd_dev_lock);
 
                 if (obd_health_check(obd)) {
                         rc += snprintf(page + rc, count - rc,
@@ -309,9 +309,9 @@ static int obd_proc_read_health(char *page, char **start, off_t off,
                                        obd->obd_name);
                 }
                 class_decref(obd, __FUNCTION__, cfs_current());
-                spin_lock(&obd_dev_lock);
+                cfs_spin_lock(&obd_dev_lock);
         }
-        spin_unlock(&obd_dev_lock);
+        cfs_spin_unlock(&obd_dev_lock);
 
         if (rc == 0)
                 return snprintf(page, count, "healthy\n");
@@ -379,7 +379,7 @@ static int obd_device_list_seq_show(struct seq_file *p, void *v)
         return seq_printf(p, "%3d %s %s %s %s %d\n",
                           (int)index, status, obd->obd_type->typ_name,
                           obd->obd_name, obd->obd_uuid.uuid,
-                          atomic_read(&obd->obd_refcount));
+                          cfs_atomic_read(&obd->obd_refcount));
 }
 
 struct seq_operations obd_device_list_sops = {
index e85b5ab..5e5a5d8 100644 (file)
@@ -196,7 +196,7 @@ void obdo_refresh_inode(struct inode *dst, struct obdo *src, obd_flag valid)
                 i_size_write(dst, src->o_size);
         /* optimum IO size */
         if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits)) {
-                dst->i_blkbits = ffs(src->o_blksize) - 1;
+                dst->i_blkbits = cfs_ffs(src->o_blksize) - 1;
 #ifdef HAVE_INODE_BLKSIZE
                 dst->i_blksize = src->o_blksize;
 #endif
@@ -244,7 +244,7 @@ void obdo_to_inode(struct inode *dst, struct obdo *src, obd_flag valid)
 
         }
         if (valid & OBD_MD_FLBLKSZ) {
-                dst->i_blkbits = ffs(src->o_blksize)-1;
+                dst->i_blkbits = cfs_ffs(src->o_blksize)-1;
 #ifdef HAVE_INODE_BLKSIZE
                 dst->i_blksize = src->o_blksize;
 #endif
index 86813fb..500deb5 100644 (file)
@@ -116,7 +116,7 @@ int LL_PROC_PROTO(proc_fail_loc)
 
         rc = ll_proc_dolongvec(table, write, filp, buffer, lenp, ppos);
         if (old_fail_loc != obd_fail_loc)
-                wake_up(&obd_race_waitq);
+                cfs_waitq_signal(&obd_race_waitq);
         return rc;
 }
 
@@ -147,7 +147,7 @@ int LL_PROC_PROTO(proc_memory_alloc)
         if (len > *lenp)
                 len = *lenp;
         buf[len] = '\0';
-        if (copy_to_user(buffer, buf, len))
+        if (cfs_copy_to_user(buffer, buf, len))
                 return -EFAULT;
         *lenp = len;
         *ppos += *lenp;
@@ -171,7 +171,7 @@ int LL_PROC_PROTO(proc_pages_alloc)
         if (len > *lenp)
                 len = *lenp;
         buf[len] = '\0';
-        if (copy_to_user(buffer, buf, len))
+        if (cfs_copy_to_user(buffer, buf, len))
                 return -EFAULT;
         *lenp = len;
         *ppos += *lenp;
@@ -195,7 +195,7 @@ int LL_PROC_PROTO(proc_mem_max)
         if (len > *lenp)
                 len = *lenp;
         buf[len] = '\0';
-        if (copy_to_user(buffer, buf, len))
+        if (cfs_copy_to_user(buffer, buf, len))
                 return -EFAULT;
         *lenp = len;
         *ppos += *lenp;
@@ -219,7 +219,7 @@ int LL_PROC_PROTO(proc_pages_max)
         if (len > *lenp)
                 len = *lenp;
         buf[len] = '\0';
-        if (copy_to_user(buffer, buf, len))
+        if (cfs_copy_to_user(buffer, buf, len))
                 return -EFAULT;
         *lenp = len;
         *ppos += *lenp;
@@ -239,13 +239,14 @@ int LL_PROC_PROTO(proc_max_dirty_pages_in_mb)
                 rc = lprocfs_write_frac_helper(buffer, *lenp,
                                                (unsigned int*)table->data,
                                                1 << (20 - CFS_PAGE_SHIFT));
-                /* Don't allow them to let dirty pages exceed 90% of system memory,
-                 * and set a hard minimum of 4MB. */
-                if (obd_max_dirty_pages > ((num_physpages / 10) * 9)) {
+                /* Don't allow them to let dirty pages exceed 90% of system
+                 * memory and set a hard minimum of 4MB. */
+                if (obd_max_dirty_pages > ((cfs_num_physpages / 10) * 9)) {
                         CERROR("Refusing to set max dirty pages to %u, which "
-                               "is more than 90%% of available RAM; setting to %lu\n",
-                               obd_max_dirty_pages, ((num_physpages / 10) * 9));
-                        obd_max_dirty_pages = ((num_physpages / 10) * 9);
+                               "is more than 90%% of available RAM; setting "
+                               "to %lu\n", obd_max_dirty_pages,
+                               ((cfs_num_physpages / 10) * 9));
+                        obd_max_dirty_pages = ((cfs_num_physpages / 10) * 9);
                 } else if (obd_max_dirty_pages < 4 << (20 - CFS_PAGE_SHIFT)) {
                         obd_max_dirty_pages = 4 << (20 - CFS_PAGE_SHIFT);
                 }
@@ -259,7 +260,7 @@ int LL_PROC_PROTO(proc_max_dirty_pages_in_mb)
                 if (len > *lenp)
                         len = *lenp;
                 buf[len] = '\0';
-                if (copy_to_user(buffer, buf, len))
+                if (cfs_copy_to_user(buffer, buf, len))
                         return -EFAULT;
                 *lenp = len;
         }
@@ -291,7 +292,7 @@ int LL_PROC_PROTO(proc_alloc_fail_rate)
                 if (len > *lenp)
                         len = *lenp;
                 buf[len] = '\0';
-                if (copy_to_user(buffer, buf, len))
+                if (cfs_copy_to_user(buffer, buf, len))
                         return -EFAULT;
                 *lenp = len;
         }
index 31d6ba7..14ed25d 100644 (file)
@@ -68,7 +68,7 @@ struct llog_handle *llog_alloc_handle(void)
         if (loghandle == NULL)
                 RETURN(ERR_PTR(-ENOMEM));
 
-        init_rwsem(&loghandle->lgh_lock);
+        cfs_init_rwsem(&loghandle->lgh_lock);
 
         RETURN(loghandle);
 }
@@ -83,9 +83,9 @@ void llog_free_handle(struct llog_handle *loghandle)
         if (!loghandle->lgh_hdr)
                 goto out;
         if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_PLAIN)
-                list_del_init(&loghandle->u.phd.phd_entry);
+                cfs_list_del_init(&loghandle->u.phd.phd_entry);
         if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_CAT)
-                LASSERT(list_empty(&loghandle->u.chd.chd_head));
+                LASSERT(cfs_list_empty(&loghandle->u.chd.chd_head));
         OBD_FREE(loghandle->lgh_hdr, LLOG_CHUNK_SIZE);
 
  out:
@@ -237,7 +237,7 @@ static int llog_process_thread(void *arg)
         if (!buf) {
                 lpi->lpi_rc = -ENOMEM;
 #ifdef __KERNEL__
-                complete(&lpi->lpi_completion);
+                cfs_complete(&lpi->lpi_completion);
 #endif
                 return 0;
         }
@@ -349,7 +349,7 @@ static int llog_process_thread(void *arg)
                 OBD_FREE(buf, LLOG_CHUNK_SIZE);
         lpi->lpi_rc = rc;
 #ifdef __KERNEL__
-        complete(&lpi->lpi_completion);
+        cfs_complete(&lpi->lpi_completion);
 #endif
         return 0;
 }
@@ -372,14 +372,14 @@ int llog_process(struct llog_handle *loghandle, llog_cb_t cb,
         lpi->lpi_catdata   = catdata;
 
 #ifdef __KERNEL__
-        init_completion(&lpi->lpi_completion);
+        cfs_init_completion(&lpi->lpi_completion);
         rc = cfs_kernel_thread(llog_process_thread, lpi, CLONE_VM | CLONE_FILES);
         if (rc < 0) {
                 CERROR("cannot start thread: %d\n", rc);
                 OBD_FREE_PTR(lpi);
                 RETURN(rc);
         }
-        wait_for_completion(&lpi->lpi_completion);
+        cfs_wait_for_completion(&lpi->lpi_completion);
 #else
         llog_process_thread(lpi);
 #endif
index fdc7248..de05bb6 100644 (file)
@@ -126,8 +126,9 @@ static struct llog_handle *llog_cat_new_log(struct llog_handle *cathandle)
 
         loghandle->lgh_hdr->llh_cat_idx = index;
         cathandle->u.chd.chd_current_log = loghandle;
-        LASSERT(list_empty(&loghandle->u.phd.phd_entry));
-        list_add_tail(&loghandle->u.phd.phd_entry, &cathandle->u.chd.chd_head);
+        LASSERT(cfs_list_empty(&loghandle->u.phd.phd_entry));
+        cfs_list_add_tail(&loghandle->u.phd.phd_entry,
+                          &cathandle->u.chd.chd_head);
 
 out_destroy:
         if (rc < 0)
@@ -152,8 +153,8 @@ int llog_cat_id2handle(struct llog_handle *cathandle, struct llog_handle **res,
         if (cathandle == NULL)
                 RETURN(-EBADF);
 
-        list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
-                            u.phd.phd_entry) {
+        cfs_list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
+                                u.phd.phd_entry) {
                 struct llog_logid *cgl = &loghandle->lgh_id;
                 if (cgl->lgl_oid == logid->lgl_oid) {
                         if (cgl->lgl_ogen != logid->lgl_ogen) {
@@ -174,8 +175,8 @@ int llog_cat_id2handle(struct llog_handle *cathandle, struct llog_handle **res,
         } else {
                 rc = llog_init_handle(loghandle, LLOG_F_IS_PLAIN, NULL);
                 if (!rc) {
-                        list_add(&loghandle->u.phd.phd_entry,
-                                 &cathandle->u.chd.chd_head);
+                        cfs_list_add(&loghandle->u.phd.phd_entry,
+                                     &cathandle->u.chd.chd_head);
                 }
         }
         if (!rc) {
@@ -196,8 +197,8 @@ int llog_cat_put(struct llog_handle *cathandle)
         int rc;
         ENTRY;
 
-        list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head,
-                                 u.phd.phd_entry) {
+        cfs_list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head,
+                                     u.phd.phd_entry) {
                 int err = llog_close(loghandle);
                 if (err)
                         CERROR("error closing loghandle\n");
@@ -231,47 +232,47 @@ static struct llog_handle *llog_cat_current_log(struct llog_handle *cathandle,
         struct llog_handle *loghandle = NULL;
         ENTRY;
 
-        down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
+        cfs_down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
         loghandle = cathandle->u.chd.chd_current_log;
         if (loghandle) {
                 struct llog_log_hdr *llh = loghandle->lgh_hdr;
-                down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
+                cfs_down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
                 if (loghandle->lgh_last_idx < LLOG_BITMAP_SIZE(llh) - 1) {
-                        up_read(&cathandle->lgh_lock);
+                        cfs_up_read(&cathandle->lgh_lock);
                         RETURN(loghandle);
                 } else {
-                        up_write(&loghandle->lgh_lock);
+                        cfs_up_write(&loghandle->lgh_lock);
                 }
         }
         if (!create) {
                 if (loghandle)
-                        down_write(&loghandle->lgh_lock);
-                up_read(&cathandle->lgh_lock);
+                        cfs_down_write(&loghandle->lgh_lock);
+                cfs_up_read(&cathandle->lgh_lock);
                 RETURN(loghandle);
         }
-        up_read(&cathandle->lgh_lock);
+        cfs_up_read(&cathandle->lgh_lock);
 
         /* time to create new log */
 
         /* first, we have to make sure the state hasn't changed */
-        down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
+        cfs_down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
         loghandle = cathandle->u.chd.chd_current_log;
         if (loghandle) {
                 struct llog_log_hdr *llh = loghandle->lgh_hdr;
-                down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
+                cfs_down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
                 if (loghandle->lgh_last_idx < LLOG_BITMAP_SIZE(llh) - 1) {
-                        up_write(&cathandle->lgh_lock);
+                        cfs_up_write(&cathandle->lgh_lock);
                         RETURN(loghandle);
                 } else {
-                        up_write(&loghandle->lgh_lock);
+                        cfs_up_write(&loghandle->lgh_lock);
                 }
         }
 
         CDEBUG(D_INODE, "creating new log\n");
         loghandle = llog_cat_new_log(cathandle);
         if (!IS_ERR(loghandle))
-                down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
-        up_write(&cathandle->lgh_lock);
+                cfs_down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
+        cfs_up_write(&cathandle->lgh_lock);
         RETURN(loghandle);
 }
 
@@ -295,14 +296,14 @@ int llog_cat_add_rec(struct llog_handle *cathandle, struct llog_rec_hdr *rec,
         rc = llog_write_rec(loghandle, rec, reccookie, 1, buf, -1);
         if (rc < 0)
                 CERROR("llog_write_rec %d: lh=%p\n", rc, loghandle);
-        up_write(&loghandle->lgh_lock);
+        cfs_up_write(&loghandle->lgh_lock);
         if (rc == -ENOSPC) {
                 /* to create a new plain log */
                 loghandle = llog_cat_current_log(cathandle, 1);
                 if (IS_ERR(loghandle))
                         RETURN(PTR_ERR(loghandle));
                 rc = llog_write_rec(loghandle, rec, reccookie, 1, buf, -1);
-                up_write(&loghandle->lgh_lock);
+                cfs_up_write(&loghandle->lgh_lock);
         }
 
         RETURN(rc);
@@ -324,7 +325,7 @@ int llog_cat_cancel_records(struct llog_handle *cathandle, int count,
         int i, index, rc = 0;
         ENTRY;
 
-        down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
+        cfs_down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
         for (i = 0; i < count; i++, cookies++) {
                 struct llog_handle *loghandle;
                 struct llog_logid *lgl = &cookies->lgc_lgl;
@@ -335,9 +336,9 @@ int llog_cat_cancel_records(struct llog_handle *cathandle, int count,
                         break;
                 }
 
-                down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
+                cfs_down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
                 rc = llog_cancel_rec(loghandle, cookies->lgc_index);
-                up_write(&loghandle->lgh_lock);
+                cfs_up_write(&loghandle->lgh_lock);
 
                 if (rc == 1) {          /* log has been destroyed */
                         index = loghandle->u.phd.phd_cookie.lgc_index;
@@ -354,7 +355,7 @@ int llog_cat_cancel_records(struct llog_handle *cathandle, int count,
                                        index, cathandle->lgh_id.lgl_oid);
                 }
         }
-        up_write(&cathandle->lgh_lock);
+        cfs_up_write(&cathandle->lgh_lock);
 
         RETURN(rc);
 }
index d83733e..82433cb 100644 (file)
@@ -45,7 +45,7 @@ struct llog_process_info {
         void               *lpi_cbdata;
         void               *lpi_catdata;
         int                 lpi_rc;
-        struct completion   lpi_completion;
+        cfs_completion_t    lpi_completion;
 };
 
 int llog_cat_id2handle(struct llog_handle *cathandle, struct llog_handle **res,
index ca1877e..b7e1843 100644 (file)
@@ -103,9 +103,9 @@ static int llog_check_cb(struct llog_handle *handle, struct llog_rec_hdr *rec,
         if (ioc_data && (ioc_data->ioc_inllen1)) {
                 l = 0;
                 remains = ioc_data->ioc_inllen4 +
-                        size_round(ioc_data->ioc_inllen1) +
-                        size_round(ioc_data->ioc_inllen2) +
-                        size_round(ioc_data->ioc_inllen3);
+                        cfs_size_round(ioc_data->ioc_inllen1) +
+                        cfs_size_round(ioc_data->ioc_inllen2) +
+                        cfs_size_round(ioc_data->ioc_inllen3);
                 from = simple_strtol(ioc_data->ioc_inlbuf2, &endp, 0);
                 if (*endp != '\0')
                         RETURN(-EINVAL);
@@ -193,9 +193,9 @@ static int llog_print_cb(struct llog_handle *handle, struct llog_rec_hdr *rec,
         if (ioc_data->ioc_inllen1) {
                 l = 0;
                 remains = ioc_data->ioc_inllen4 +
-                        size_round(ioc_data->ioc_inllen1) +
-                        size_round(ioc_data->ioc_inllen2) +
-                        size_round(ioc_data->ioc_inllen3);
+                        cfs_size_round(ioc_data->ioc_inllen1) +
+                        cfs_size_round(ioc_data->ioc_inllen2) +
+                        cfs_size_round(ioc_data->ioc_inllen3);
                 from = simple_strtol(ioc_data->ioc_inlbuf2, &endp, 0);
                 if (*endp != '\0')
                         RETURN(-EINVAL);
@@ -244,7 +244,7 @@ static int llog_remove_log(struct llog_handle *cat, struct llog_logid *logid)
         int rc, index = 0;
 
         ENTRY;
-        down_write(&cat->lgh_lock);
+        cfs_down_write(&cat->lgh_lock);
         rc = llog_cat_id2handle(cat, &log, logid);
         if (rc) {
                 CDEBUG(D_IOCTL, "cannot find log #"LPX64"#"LPX64"#%08x\n",
@@ -263,7 +263,7 @@ static int llog_remove_log(struct llog_handle *cat, struct llog_logid *logid)
         rc = llog_cancel_rec(cat, index);
 out:
         llog_free_handle(log);
-        up_write(&cat->lgh_lock);
+        cfs_up_write(&cat->lgh_lock);
         RETURN(rc);
 
 }
@@ -314,7 +314,7 @@ int llog_ioctl(struct llog_ctxt *ctxt, int cmd, struct obd_ioctl_data *data)
         case OBD_IOC_LLOG_INFO: {
                 int l;
                 int remains = data->ioc_inllen2 +
-                        size_round(data->ioc_inllen1);
+                        cfs_size_round(data->ioc_inllen1);
                 char *out = data->ioc_bulk;
 
                 l = snprintf(out, remains,
@@ -364,9 +364,9 @@ int llog_ioctl(struct llog_ctxt *ctxt, int cmd, struct obd_ioctl_data *data)
                         GOTO(out_close, err = -EINVAL);
 
                 if (handle->lgh_hdr->llh_flags & LLOG_F_IS_CAT) {
-                        down_write(&handle->lgh_lock);
+                        cfs_down_write(&handle->lgh_lock);
                         err = llog_cancel_rec(handle, cookie.lgc_index);
-                        up_write(&handle->lgh_lock);
+                        cfs_up_write(&handle->lgh_lock);
                         GOTO(out_close, err);
                 }
 
@@ -437,7 +437,7 @@ int llog_catalog_list(struct obd_device *obd, int count,
         if (!idarray)
                 RETURN(-ENOMEM);
 
-        mutex_down(&obd->obd_olg.olg_cat_processing);
+        cfs_mutex_down(&obd->obd_olg.olg_cat_processing);
         rc = llog_get_cat_list(obd, name, 0, count, idarray);
         if (rc)
                 GOTO(out, rc);
@@ -458,7 +458,7 @@ int llog_catalog_list(struct obd_device *obd, int count,
         }
 out:
         /* release semaphore */
-        mutex_up(&obd->obd_olg.olg_cat_processing);
+        cfs_mutex_up(&obd->obd_olg.olg_cat_processing);
 
         OBD_VFREE(idarray, size);
         RETURN(rc);
index c0fb21b..f945e90 100644 (file)
@@ -59,7 +59,7 @@ static struct llog_ctxt* llog_new_ctxt(struct obd_device *obd)
                 return NULL;
 
         ctxt->loc_obd = obd;
-        atomic_set(&ctxt->loc_refcount, 1);
+        cfs_atomic_set(&ctxt->loc_refcount, 1);
 
         return ctxt;
 }
@@ -84,20 +84,21 @@ int __llog_ctxt_put(struct llog_ctxt *ctxt)
         struct obd_device *obd;
         int rc = 0;
 
-        spin_lock(&olg->olg_lock);
-        if (!atomic_dec_and_test(&ctxt->loc_refcount)) {
-                spin_unlock(&olg->olg_lock);
+        cfs_spin_lock(&olg->olg_lock);
+        if (!cfs_atomic_dec_and_test(&ctxt->loc_refcount)) {
+                cfs_spin_unlock(&olg->olg_lock);
                 return rc;
         }
         olg->olg_ctxts[ctxt->loc_idx] = NULL;
-        spin_unlock(&olg->olg_lock);
+        cfs_spin_unlock(&olg->olg_lock);
 
         if (ctxt->loc_lcm)
                 lcm_put(ctxt->loc_lcm);
 
         obd = ctxt->loc_obd;
-        spin_lock(&obd->obd_dev_lock);
-        spin_unlock(&obd->obd_dev_lock); /* sync with llog ctxt user thread */
+        cfs_spin_lock(&obd->obd_dev_lock);
+        /* sync with llog ctxt user thread */
+        cfs_spin_unlock(&obd->obd_dev_lock);
 
         /* obd->obd_starting is needed for the case of cleanup
          * in error case while obd is starting up. */
@@ -111,7 +112,7 @@ int __llog_ctxt_put(struct llog_ctxt *ctxt)
                 rc = CTXTP(ctxt, cleanup)(ctxt);
 
         llog_ctxt_destroy(ctxt);
-        wake_up(&olg->olg_waitq);
+        cfs_waitq_signal(&olg->olg_waitq);
         return rc;
 }
 EXPORT_SYMBOL(__llog_ctxt_put);
@@ -135,8 +136,8 @@ int llog_cleanup(struct llog_ctxt *ctxt)
         /* 
          * Banlance the ctxt get when calling llog_cleanup()
          */
-        LASSERT(atomic_read(&ctxt->loc_refcount) < 0x5a5a5a);
-        LASSERT(atomic_read(&ctxt->loc_refcount) > 1);
+        LASSERT(cfs_atomic_read(&ctxt->loc_refcount) < 0x5a5a5a);
+        LASSERT(cfs_atomic_read(&ctxt->loc_refcount) > 1);
         llog_ctxt_put(ctxt);
 
         /* 
@@ -176,7 +177,7 @@ int llog_setup_named(struct obd_device *obd,  struct obd_llog_group *olg,
         ctxt->loc_olg = olg;
         ctxt->loc_idx = index;
         ctxt->loc_logops = op;
-        sema_init(&ctxt->loc_sem, 1);
+        cfs_sema_init(&ctxt->loc_sem, 1);
         ctxt->loc_exp = class_export_get(disk_obd->obd_self_export);
         ctxt->loc_flags = LLOG_CTXT_FLAG_UNINITIALIZED;
 
@@ -402,9 +403,9 @@ int llog_obd_origin_cleanup(struct llog_ctxt *ctxt)
 
         cathandle = ctxt->loc_handle;
         if (cathandle) {
-                list_for_each_entry_safe(loghandle, n,
-                                         &cathandle->u.chd.chd_head,
-                                         u.phd.phd_entry) {
+                cfs_list_for_each_entry_safe(loghandle, n,
+                                             &cathandle->u.chd.chd_head,
+                                             u.phd.phd_entry) {
                         llh = loghandle->lgh_hdr;
                         if ((llh->llh_flags &
                                 LLOG_F_ZAP_WHEN_EMPTY) &&
index d059730..e2ec7ce 100644 (file)
@@ -58,7 +58,7 @@
 #define MAX_STRING_SIZE 128
 
 /* for bug 10866, global variable */
-DECLARE_RWSEM(_lprocfs_lock);
+CFS_DECLARE_RWSEM(_lprocfs_lock);
 EXPORT_SYMBOL(_lprocfs_lock);
 
 int lprocfs_seq_release(struct inode *inode, struct file *file)
@@ -199,7 +199,7 @@ static ssize_t lprocfs_fops_read(struct file *f, char __user *buf,
         }
 
         count = (rc < size) ? rc : size;
-        if (copy_to_user(buf, start, count)) {
+        if (cfs_copy_to_user(buf, start, count)) {
                 rc = -EFAULT;
                 goto out;
         }
@@ -235,7 +235,7 @@ int lprocfs_evict_client_open(struct inode *inode, struct file *f)
         struct proc_dir_entry *dp = PDE(f->f_dentry->d_inode);
         struct obd_device *obd = dp->data;
 
-        atomic_inc(&obd->obd_evict_inprogress);
+        cfs_atomic_inc(&obd->obd_evict_inprogress);
 
         return 0;
 }
@@ -245,8 +245,8 @@ int lprocfs_evict_client_release(struct inode *inode, struct file *f)
         struct proc_dir_entry *dp = PDE(f->f_dentry->d_inode);
         struct obd_device *obd = dp->data;
 
-        atomic_dec(&obd->obd_evict_inprogress);
-        wake_up(&obd->obd_evict_inprogress_waitq);
+        cfs_atomic_dec(&obd->obd_evict_inprogress);
+        cfs_waitq_signal(&obd->obd_evict_inprogress_waitq);
 
         return 0;
 }
@@ -427,7 +427,7 @@ int lprocfs_wr_uint(struct file *file, const char *buffer,
         unsigned long tmp;
 
         dummy[MAX_STRING_SIZE] = '\0';
-        if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
+        if (cfs_copy_from_user(dummy, buffer, MAX_STRING_SIZE))
                 return -EFAULT;
 
         tmp = simple_strtoul(dummy, &end, 0);
@@ -449,16 +449,16 @@ int lprocfs_rd_u64(char *page, char **start, off_t off,
 int lprocfs_rd_atomic(char *page, char **start, off_t off,
                    int count, int *eof, void *data)
 {
-        atomic_t *atom = data;
+        cfs_atomic_t *atom = data;
         LASSERT(atom != NULL);
         *eof = 1;
-        return snprintf(page, count, "%d\n", atomic_read(atom));
+        return snprintf(page, count, "%d\n", cfs_atomic_read(atom));
 }
 
 int lprocfs_wr_atomic(struct file *file, const char *buffer,
                       unsigned long count, void *data)
 {
-        atomic_t *atm = data;
+        cfs_atomic_t *atm = data;
         int val = 0;
         int rc;
 
@@ -469,7 +469,7 @@ int lprocfs_wr_atomic(struct file *file, const char *buffer,
         if (val <= 0)
                 return -ERANGE;
 
-        atomic_set(atm, val);
+        cfs_atomic_set(atm, val);
         return count;
 }
 
@@ -509,7 +509,7 @@ int lprocfs_rd_blksize(char *page, char **start, off_t off, int count,
                        int *eof, void *data)
 {
         struct obd_statfs osfs;
-        int rc = obd_statfs(data, &osfs, cfs_time_current_64() - HZ,
+        int rc = obd_statfs(data, &osfs, cfs_time_current_64() - CFS_HZ,
                             OBD_STATFS_NODELAY);
         if (!rc) {
                 *eof = 1;
@@ -522,7 +522,7 @@ int lprocfs_rd_kbytestotal(char *page, char **start, off_t off, int count,
                            int *eof, void *data)
 {
         struct obd_statfs osfs;
-        int rc = obd_statfs(data, &osfs, cfs_time_current_64() - HZ,
+        int rc = obd_statfs(data, &osfs, cfs_time_current_64() - CFS_HZ,
                             OBD_STATFS_NODELAY);
         if (!rc) {
                 __u32 blk_size = osfs.os_bsize >> 10;
@@ -541,7 +541,7 @@ int lprocfs_rd_kbytesfree(char *page, char **start, off_t off, int count,
                           int *eof, void *data)
 {
         struct obd_statfs osfs;
-        int rc = obd_statfs(data, &osfs, cfs_time_current_64() - HZ,
+        int rc = obd_statfs(data, &osfs, cfs_time_current_64() - CFS_HZ,
                             OBD_STATFS_NODELAY);
         if (!rc) {
                 __u32 blk_size = osfs.os_bsize >> 10;
@@ -560,7 +560,7 @@ int lprocfs_rd_kbytesavail(char *page, char **start, off_t off, int count,
                            int *eof, void *data)
 {
         struct obd_statfs osfs;
-        int rc = obd_statfs(data, &osfs, cfs_time_current_64() - HZ,
+        int rc = obd_statfs(data, &osfs, cfs_time_current_64() - CFS_HZ,
                             OBD_STATFS_NODELAY);
         if (!rc) {
                 __u32 blk_size = osfs.os_bsize >> 10;
@@ -579,7 +579,7 @@ int lprocfs_rd_filestotal(char *page, char **start, off_t off, int count,
                           int *eof, void *data)
 {
         struct obd_statfs osfs;
-        int rc = obd_statfs(data, &osfs, cfs_time_current_64() - HZ,
+        int rc = obd_statfs(data, &osfs, cfs_time_current_64() - CFS_HZ,
                             OBD_STATFS_NODELAY);
         if (!rc) {
                 *eof = 1;
@@ -593,7 +593,7 @@ int lprocfs_rd_filesfree(char *page, char **start, off_t off, int count,
                          int *eof, void *data)
 {
         struct obd_statfs osfs;
-        int rc = obd_statfs(data, &osfs, cfs_time_current_64() - HZ,
+        int rc = obd_statfs(data, &osfs, cfs_time_current_64() - CFS_HZ,
                             OBD_STATFS_NODELAY);
         if (!rc) {
                 *eof = 1;
@@ -668,20 +668,23 @@ void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx,
         if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU)
                 num_cpu = 1;
         else
-                num_cpu = num_possible_cpus();
+                num_cpu = cfs_num_possible_cpus();
 
         for (i = 0; i < num_cpu; i++) {
                 percpu_cntr = &(stats->ls_percpu[i])->lp_cntr[idx];
 
                 do {
-                        centry = atomic_read(&percpu_cntr->lc_cntl.la_entry);
+                        centry = cfs_atomic_read(&percpu_cntr-> \
+                                                 lc_cntl.la_entry);
                         t.lc_count = percpu_cntr->lc_count;
                         t.lc_sum = percpu_cntr->lc_sum;
                         t.lc_min = percpu_cntr->lc_min;
                         t.lc_max = percpu_cntr->lc_max;
                         t.lc_sumsquare = percpu_cntr->lc_sumsquare;
-                } while (centry != atomic_read(&percpu_cntr->lc_cntl.la_entry) &&
-                         centry != atomic_read(&percpu_cntr->lc_cntl.la_exit));
+                } while (centry != cfs_atomic_read(&percpu_cntr->lc_cntl. \
+                                                   la_entry) &&
+                         centry != cfs_atomic_read(&percpu_cntr->lc_cntl. \
+                                                   la_exit));
                 cnt->lc_count += t.lc_count;
                 cnt->lc_sum += t.lc_sum;
                 if (t.lc_min < cnt->lc_min)
@@ -813,7 +816,7 @@ int lprocfs_rd_import(char *page, char **start, off_t off, int count,
                       "       in-progress_invalidations: %u\n",
                       imp->imp_conn_cnt,
                       imp->imp_generation,
-                      atomic_read(&imp->imp_inval_count));
+                      cfs_atomic_read(&imp->imp_inval_count));
 
         lprocfs_stats_collect(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR, &ret);
         do_div(ret.lc_sum, ret.lc_count);
@@ -823,9 +826,9 @@ int lprocfs_rd_import(char *page, char **start, off_t off, int count,
                       "       unregistering: %u\n"
                       "       timeouts: %u\n"
                       "       avg_waittime: "LPU64" %s\n",
-                      atomic_read(&imp->imp_inflight),
-                      atomic_read(&imp->imp_unregistering),
-                      atomic_read(&imp->imp_timeouts),
+                      cfs_atomic_read(&imp->imp_inflight),
+                      cfs_atomic_read(&imp->imp_unregistering),
+                      cfs_atomic_read(&imp->imp_timeouts),
                       ret.lc_sum, ret.lc_units);
 
         k = 0;
@@ -931,7 +934,7 @@ int lprocfs_rd_quota_resend_count(char *page, char **start, off_t off,
         struct obd_device *obd = data;
 
         return snprintf(page, count, "%d\n",
-                        atomic_read(&obd->u.cli.cl_quota_resends));
+                        cfs_atomic_read(&obd->u.cli.cl_quota_resends));
 }
 
 int lprocfs_wr_quota_resend_count(struct file *file, const char *buffer,
@@ -944,7 +947,7 @@ int lprocfs_wr_quota_resend_count(struct file *file, const char *buffer,
         if (rc)
                 return rc;
 
-        atomic_set(&obd->u.cli.cl_quota_resends, val);
+        cfs_atomic_set(&obd->u.cli.cl_quota_resends, val);
 
         return count;
 }
@@ -1081,10 +1084,11 @@ static void lprocfs_free_client_stats(struct nid_stat *client_stat)
                client_stat->nid_proc, client_stat->nid_stats,
                client_stat->nid_brw_stats);
 
-        LASSERTF(atomic_read(&client_stat->nid_exp_ref_count) == 0,
-                 "count %d\n", atomic_read(&client_stat->nid_exp_ref_count));
+        LASSERTF(cfs_atomic_read(&client_stat->nid_exp_ref_count) == 0,
+                 "count %d\n",
+                 cfs_atomic_read(&client_stat->nid_exp_ref_count));
 
-        hlist_del_init(&client_stat->nid_hash);
+        cfs_hlist_del_init(&client_stat->nid_hash);
 
         if (client_stat->nid_proc)
                 lprocfs_remove(&client_stat->nid_proc);
@@ -1110,10 +1114,10 @@ void lprocfs_free_per_client_stats(struct obd_device *obd)
 
         /* we need extra list - because hash_exit called to early */
         /* not need locking because all clients is died */
-        while(!list_empty(&obd->obd_nid_stats)) {
-                stat = list_entry(obd->obd_nid_stats.next,
-                                  struct nid_stat, nid_list);
-                list_del_init(&stat->nid_list);
+        while(!cfs_list_empty(&obd->obd_nid_stats)) {
+                stat = cfs_list_entry(obd->obd_nid_stats.next,
+                                      struct nid_stat, nid_list);
+                cfs_list_del_init(&stat->nid_list);
                 lprocfs_free_client_stats(stat);
         }
 
@@ -1134,7 +1138,7 @@ struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num,
         if (flags & LPROCFS_STATS_FLAG_NOPERCPU)
                 num_cpu = 1;
         else
-                num_cpu = num_possible_cpus();
+                num_cpu = cfs_num_possible_cpus();
 
         OBD_ALLOC(stats, offsetof(typeof(*stats), ls_percpu[num_cpu]));
         if (stats == NULL)
@@ -1142,7 +1146,7 @@ struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num,
 
         if (flags & LPROCFS_STATS_FLAG_NOPERCPU) {
                 stats->ls_flags = flags;
-                spin_lock_init(&stats->ls_lock);
+                cfs_spin_lock_init(&stats->ls_lock);
                 /* Use this lock only if there are no percpu areas */
         } else {
                 stats->ls_flags = 0;
@@ -1150,7 +1154,7 @@ struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num,
 
         percpusize = offsetof(struct lprocfs_percpu, lp_cntr[num]);
         if (num_cpu > 1)
-                percpusize = L1_CACHE_ALIGN(percpusize);
+                percpusize = CFS_L1_CACHE_ALIGN(percpusize);
 
         for (i = 0; i < num_cpu; i++) {
                 OBD_ALLOC(stats->ls_percpu[i], percpusize);
@@ -1186,11 +1190,11 @@ void lprocfs_free_stats(struct lprocfs_stats **statsh)
         if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU)
                 num_cpu = 1;
         else
-                num_cpu = num_possible_cpus();
+                num_cpu = cfs_num_possible_cpus();
 
         percpusize = offsetof(struct lprocfs_percpu, lp_cntr[stats->ls_num]);
         if (num_cpu > 1)
-                percpusize = L1_CACHE_ALIGN(percpusize);
+                percpusize = CFS_L1_CACHE_ALIGN(percpusize);
         for (i = 0; i < num_cpu; i++)
                 OBD_FREE(stats->ls_percpu[i], percpusize);
         OBD_FREE(stats, offsetof(typeof(*stats), ls_percpu[num_cpu]));
@@ -1207,13 +1211,13 @@ void lprocfs_clear_stats(struct lprocfs_stats *stats)
         for (i = 0; i < num_cpu; i++) {
                 for (j = 0; j < stats->ls_num; j++) {
                         percpu_cntr = &(stats->ls_percpu[i])->lp_cntr[j];
-                        atomic_inc(&percpu_cntr->lc_cntl.la_entry);
+                        cfs_atomic_inc(&percpu_cntr->lc_cntl.la_entry);
                         percpu_cntr->lc_count = 0;
                         percpu_cntr->lc_sum = 0;
                         percpu_cntr->lc_min = LC_MIN_INIT;
                         percpu_cntr->lc_max = 0;
                         percpu_cntr->lc_sumsquare = 0;
-                        atomic_inc(&percpu_cntr->lc_cntl.la_exit);
+                        cfs_atomic_inc(&percpu_cntr->lc_cntl.la_exit);
                 }
         }
 
@@ -1261,7 +1265,7 @@ static int lprocfs_stats_seq_show(struct seq_file *p, void *v)
 
        if (cntr == &(stats->ls_percpu[0])->lp_cntr[0]) {
                struct timeval now;
-               do_gettimeofday(&now);
+               cfs_gettimeofday(&now);
                rc = seq_printf(p, "%-25s %lu.%lu secs.usecs\n",
                                "snapshot_time", now.tv_sec, now.tv_usec);
                if (rc < 0)
@@ -1704,13 +1708,13 @@ void lprocfs_nid_stats_clear_write_cb(void *obj, void *data)
         ENTRY;
         /* object has only hash + iterate_all references.
          * add/delete blocked by hash bucket lock */
-        CDEBUG(D_INFO,"refcnt %d\n", atomic_read(&stat->nid_exp_ref_count));
-        if (atomic_read(&stat->nid_exp_ref_count) == 2) {
-                hlist_del_init(&stat->nid_hash);
+        CDEBUG(D_INFO,"refcnt %d\n", cfs_atomic_read(&stat->nid_exp_ref_count));
+        if (cfs_atomic_read(&stat->nid_exp_ref_count) == 2) {
+                cfs_hlist_del_init(&stat->nid_hash);
                 nidstat_putref(stat);
-                spin_lock(&stat->nid_obd->obd_nid_lock);
-                list_move(&stat->nid_list, data);
-                spin_unlock(&stat->nid_obd->obd_nid_lock);
+                cfs_spin_lock(&stat->nid_obd->obd_nid_lock);
+                cfs_list_move(&stat->nid_list, data);
+                cfs_spin_unlock(&stat->nid_obd->obd_nid_lock);
                 EXIT;
                 return;
         }
@@ -1736,10 +1740,10 @@ int lprocfs_nid_stats_clear_write(struct file *file, const char *buffer,
         cfs_hash_for_each(obd->obd_nid_stats_hash,
                           lprocfs_nid_stats_clear_write_cb, &free_list);
 
-        while (!list_empty(&free_list)) {
-                client_stat = list_entry(free_list.next, struct nid_stat,
-                                         nid_list);
-                list_del_init(&client_stat->nid_list);
+        while (!cfs_list_empty(&free_list)) {
+                client_stat = cfs_list_entry(free_list.next, struct nid_stat,
+                                             nid_list);
+                cfs_list_del_init(&client_stat->nid_list);
                 lprocfs_free_client_stats(client_stat);
         }
 
@@ -1777,18 +1781,18 @@ int lprocfs_exp_setup(struct obd_export *exp, lnet_nid_t *nid, int *newnid)
 
         new_stat->nid               = *nid;
         new_stat->nid_obd           = exp->exp_obd;
-        atomic_set(&new_stat->nid_exp_ref_count, 0);
+        cfs_atomic_set(&new_stat->nid_exp_ref_count, 0);
 
         old_stat = cfs_hash_findadd_unique(obd->obd_nid_stats_hash,
                                            nid, &new_stat->nid_hash);
         CDEBUG(D_INFO, "Found stats %p for nid %s - ref %d\n",
                old_stat, libcfs_nid2str(*nid),
-               atomic_read(&new_stat->nid_exp_ref_count));
+               cfs_atomic_read(&new_stat->nid_exp_ref_count));
 
         /* Return -EALREADY here so that we know that the /proc
          * entry already has been created */
         if (old_stat != new_stat) {
-                spin_lock(&obd->obd_nid_lock);
+                cfs_spin_lock(&obd->obd_nid_lock);
                 if (exp->exp_nid_stats != old_stat) {
                         if (exp->exp_nid_stats)
                                 nidstat_putref(exp->exp_nid_stats);
@@ -1799,7 +1803,7 @@ int lprocfs_exp_setup(struct obd_export *exp, lnet_nid_t *nid, int *newnid)
                         nidstat_putref(old_stat);
                 }
 
-                spin_unlock(&obd->obd_nid_lock);
+                cfs_spin_unlock(&obd->obd_nid_lock);
 
                 GOTO(destroy_new, rc = -EALREADY);
         }
@@ -1835,9 +1839,9 @@ int lprocfs_exp_setup(struct obd_export *exp, lnet_nid_t *nid, int *newnid)
         exp->exp_nid_stats = new_stat;
         *newnid = 1;
         /* protect competitive add to list, not need locking on destroy */
-        spin_lock(&obd->obd_nid_lock);
-        list_add(&new_stat->nid_list, &obd->obd_nid_stats);
-        spin_unlock(&obd->obd_nid_lock);
+        cfs_spin_lock(&obd->obd_nid_lock);
+        cfs_list_add(&new_stat->nid_list, &obd->obd_nid_stats);
+        cfs_spin_unlock(&obd->obd_nid_lock);
 
         RETURN(rc);
 
@@ -1878,7 +1882,7 @@ int lprocfs_write_frac_helper(const char *buffer, unsigned long count,
         if (count > (sizeof(kernbuf) - 1))
                 return -EINVAL;
 
-        if (copy_from_user(kernbuf, buffer, count))
+        if (cfs_copy_from_user(kernbuf, buffer, count))
                 return -EFAULT;
 
         kernbuf[count] = '\0';
@@ -1981,7 +1985,7 @@ int lprocfs_write_frac_u64_helper(const char *buffer, unsigned long count,
         if (count > (sizeof(kernbuf) - 1))
                 return -EINVAL;
 
-        if (copy_from_user(kernbuf, buffer, count))
+        if (cfs_copy_from_user(kernbuf, buffer, count))
                 return -EFAULT;
 
         kernbuf[count] = '\0';
@@ -2063,9 +2067,9 @@ void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value)
         if (value >= OBD_HIST_MAX)
                 value = OBD_HIST_MAX - 1;
 
-        spin_lock(&oh->oh_lock);
+        cfs_spin_lock(&oh->oh_lock);
         oh->oh_buckets[value]++;
-        spin_unlock(&oh->oh_lock);
+        cfs_spin_unlock(&oh->oh_lock);
 }
 EXPORT_SYMBOL(lprocfs_oh_tally);
 
@@ -2093,9 +2097,9 @@ EXPORT_SYMBOL(lprocfs_oh_sum);
 
 void lprocfs_oh_clear(struct obd_histogram *oh)
 {
-        spin_lock(&oh->oh_lock);
+        cfs_spin_lock(&oh->oh_lock);
         memset(oh->oh_buckets, 0, sizeof(oh->oh_buckets));
-        spin_unlock(&oh->oh_lock);
+        cfs_spin_unlock(&oh->oh_lock);
 }
 EXPORT_SYMBOL(lprocfs_oh_clear);
 
@@ -2199,14 +2203,17 @@ int lprocfs_obd_rd_recovery_status(char *page, char **start, off_t off,
                 goto out;
         /* Number of clients that have completed recovery */
         if (lprocfs_obd_snprintf(&page, size, &len,"req_replay_clients: %d\n",
-                                 atomic_read(&obd->obd_req_replay_clients))<= 0)
+                                 cfs_atomic_read(&obd->obd_req_replay_clients))
+                <= 0)
                 goto out;
         if (lprocfs_obd_snprintf(&page, size, &len,"lock_repay_clients: %d\n",
-                                 atomic_read(&obd->obd_lock_replay_clients))<=0)
+                                 cfs_atomic_read(&obd->obd_lock_replay_clients))
+                <=0)
                 goto out;
         if (lprocfs_obd_snprintf(&page, size, &len,"completed_clients: %d\n",
                                  obd->obd_connected_clients -
-                                 atomic_read(&obd->obd_lock_replay_clients))<=0)
+                                 cfs_atomic_read(&obd->obd_lock_replay_clients))
+                <=0)
                 goto out;
         if (lprocfs_obd_snprintf(&page, size, &len,"evicted_clients: %d\n",
                                  obd->obd_stale_clients) <= 0)
index f7bdbcc..fe5ee83 100644 (file)
@@ -82,13 +82,14 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o)
         site = o->lo_dev->ld_site;
         orig = o;
         kill_it = 0;
-        write_lock(&site->ls_guard);
-        if (atomic_dec_and_test(&top->loh_ref)) {
+        cfs_write_lock(&site->ls_guard);
+        if (cfs_atomic_dec_and_test(&top->loh_ref)) {
                 /*
                  * When last reference is released, iterate over object
                  * layers, and notify them that object is no longer busy.
                  */
-                list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
+                cfs_list_for_each_entry_reverse(o, &top->loh_layers,
+                                                lo_linkage) {
                         if (o->lo_ops->loo_object_release != NULL)
                                 o->lo_ops->loo_object_release(env, o);
                 }
@@ -107,13 +108,13 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o)
                          * object lookup is possible and we can safely destroy
                          * object below.
                          */
-                        hlist_del_init(&top->loh_hash);
-                        list_del_init(&top->loh_lru);
+                        cfs_hlist_del_init(&top->loh_hash);
+                        cfs_list_del_init(&top->loh_lru);
                         -- site->ls_total;
                         kill_it = 1;
                 }
         }
-        write_unlock(&site->ls_guard);
+        cfs_write_unlock(&site->ls_guard);
         if (kill_it)
                 /*
                  * Object was already removed from hash and lru above, can
@@ -136,7 +137,7 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env,
 {
         struct lu_object *scan;
         struct lu_object *top;
-        struct list_head *layers;
+        cfs_list_t *layers;
         int clean;
         int result;
         ENTRY;
@@ -161,7 +162,7 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env,
                  * object slices are created.
                  */
                 clean = 1;
-                list_for_each_entry(scan, layers, lo_linkage) {
+                cfs_list_for_each_entry(scan, layers, lo_linkage) {
                         if (scan->lo_flags & LU_OBJECT_ALLOCATED)
                                 continue;
                         clean = 0;
@@ -175,7 +176,7 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env,
                 }
         } while (!clean);
 
-        list_for_each_entry_reverse(scan, layers, lo_linkage) {
+        cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
                 if (scan->lo_ops->loo_object_start != NULL) {
                         result = scan->lo_ops->loo_object_start(env, scan);
                         if (result != 0) {
@@ -194,17 +195,17 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env,
  */
 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
 {
-        struct list_head  splice;
-        struct lu_object *scan;
-        struct lu_site   *site;
-        struct list_head *layers;
+        cfs_list_t            splice;
+        struct lu_object     *scan;
+        struct lu_site       *site;
+        cfs_list_t           *layers;
 
         site   = o->lo_dev->ld_site;
         layers = &o->lo_header->loh_layers;
         /*
          * First call ->loo_object_delete() method to release all resources.
          */
-        list_for_each_entry_reverse(scan, layers, lo_linkage) {
+        cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
                 if (scan->lo_ops->loo_object_delete != NULL)
                         scan->lo_ops->loo_object_delete(env, scan);
         }
@@ -216,15 +217,15 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o)
          * top-level slice.
          */
         CFS_INIT_LIST_HEAD(&splice);
-        list_splice_init(layers, &splice);
-        while (!list_empty(&splice)) {
+        cfs_list_splice_init(layers, &splice);
+        while (!cfs_list_empty(&splice)) {
                 /*
                  * Free layers in bottom-to-top order, so that object header
                  * lives as long as possible and ->loo_object_free() methods
                  * can look at its contents.
                  */
                 o = container_of0(splice.prev, struct lu_object, lo_linkage);
-                list_del_init(&o->lo_linkage);
+                cfs_list_del_init(&o->lo_linkage);
                 LASSERT(o->lo_ops->loo_object_free != NULL);
                 o->lo_ops->loo_object_free(env, o);
         }
@@ -236,7 +237,7 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o)
  */
 int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
 {
-        struct list_head         dispose;
+        cfs_list_t               dispose;
         struct lu_object_header *h;
         struct lu_object_header *temp;
 
@@ -245,8 +246,8 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
          * Under LRU list lock, scan LRU list and move unreferenced objects to
          * the dispose list, removing them from LRU and hash table.
          */
-        write_lock(&s->ls_guard);
-        list_for_each_entry_safe(h, temp, &s->ls_lru, loh_lru) {
+        cfs_write_lock(&s->ls_guard);
+        cfs_list_for_each_entry_safe(h, temp, &s->ls_lru, loh_lru) {
                 /*
                  * Objects are sorted in lru order, and "busy" objects (ones
                  * with h->loh_ref > 0) naturally tend to live near hot end
@@ -259,21 +260,21 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
                  */
                 if (nr-- == 0)
                         break;
-                if (atomic_read(&h->loh_ref) > 0)
+                if (cfs_atomic_read(&h->loh_ref) > 0)
                         continue;
-                hlist_del_init(&h->loh_hash);
-                list_move(&h->loh_lru, &dispose);
+                cfs_hlist_del_init(&h->loh_hash);
+                cfs_list_move(&h->loh_lru, &dispose);
                 s->ls_total --;
         }
-        write_unlock(&s->ls_guard);
+        cfs_write_unlock(&s->ls_guard);
         /*
          * Free everything on the dispose list. This is safe against races due
          * to the reasons described in lu_object_put().
          */
-        while (!list_empty(&dispose)) {
+        while (!cfs_list_empty(&dispose)) {
                 h = container_of0(dispose.next,
                                  struct lu_object_header, loh_lru);
-                list_del_init(&h->loh_lru);
+                cfs_list_del_init(&h->loh_lru);
                 lu_object_free(env, lu_object_top(h));
                 s->ls_stats.s_lru_purged ++;
         }
@@ -351,7 +352,7 @@ int lu_cdebug_printer(const struct lu_env *env,
         vsnprintf(key->lck_area + used,
                   ARRAY_SIZE(key->lck_area) - used, format, args);
         if (complete) {
-                if (cdebug_show(info->lpi_mask, info->lpi_subsys))
+                if (cfs_cdebug_show(info->lpi_mask, info->lpi_subsys))
                         libcfs_debug_msg(NULL, info->lpi_subsys, info->lpi_mask,
                                          (char *)info->lpi_file, info->lpi_fn,
                                          info->lpi_line, "%s", key->lck_area);
@@ -370,10 +371,11 @@ void lu_object_header_print(const struct lu_env *env, void *cookie,
                             const struct lu_object_header *hdr)
 {
         (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
-                   hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
+                   hdr, hdr->loh_flags, cfs_atomic_read(&hdr->loh_ref),
                    PFID(&hdr->loh_fid),
-                   hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
-                   list_empty((struct list_head *)&hdr->loh_lru) ? "" : " lru",
+                   cfs_hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
+                   cfs_list_empty((cfs_list_t *)&hdr->loh_lru) ? \
+                   "" : " lru",
                    hdr->loh_attr & LOHA_EXISTS ? " exist":"");
 }
 EXPORT_SYMBOL(lu_object_header_print);
@@ -391,7 +393,7 @@ void lu_object_print(const struct lu_env *env, void *cookie,
         top = o->lo_header;
         lu_object_header_print(env, cookie, printer, top);
         (*printer)(env, cookie, "{ \n");
-        list_for_each_entry(o, &top->loh_layers, lo_linkage) {
+        cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
                 depth = o->lo_depth + 4;
 
                 /*
@@ -415,7 +417,7 @@ int lu_object_invariant(const struct lu_object *o)
         struct lu_object_header *top;
 
         top = o->lo_header;
-        list_for_each_entry(o, &top->loh_layers, lo_linkage) {
+        cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
                 if (o->lo_ops->loo_object_invariant != NULL &&
                     !o->lo_ops->loo_object_invariant(o))
                         return 0;
@@ -425,14 +427,14 @@ int lu_object_invariant(const struct lu_object *o)
 EXPORT_SYMBOL(lu_object_invariant);
 
 static struct lu_object *htable_lookup(struct lu_site *s,
-                                       const struct hlist_head *bucket,
+                                       const cfs_hlist_head_t *bucket,
                                        const struct lu_fid *f,
                                        cfs_waitlink_t *waiter)
 {
         struct lu_object_header *h;
-        struct hlist_node *scan;
+        cfs_hlist_node_t *scan;
 
-        hlist_for_each_entry(h, scan, bucket, loh_hash) {
+        cfs_hlist_for_each_entry(h, scan, bucket, loh_hash) {
                 s->ls_stats.s_cache_check ++;
                 if (likely(lu_fid_eq(&h->loh_fid, f))) {
                         if (unlikely(lu_object_is_dying(h))) {
@@ -445,12 +447,12 @@ static struct lu_object *htable_lookup(struct lu_site *s,
                                  */
                                 cfs_waitlink_init(waiter);
                                 cfs_waitq_add(&s->ls_marche_funebre, waiter);
-                                set_current_state(CFS_TASK_UNINT);
+                                cfs_set_current_state(CFS_TASK_UNINT);
                                 s->ls_stats.s_cache_death_race ++;
                                 return ERR_PTR(-EAGAIN);
                         }
                         /* bump reference count... */
-                        if (atomic_add_return(1, &h->loh_ref) == 1)
+                        if (cfs_atomic_add_return(1, &h->loh_ref) == 1)
                                 ++ s->ls_busy;
                         /* and move to the head of the LRU */
                         /*
@@ -470,7 +472,7 @@ static __u32 fid_hash(const struct lu_fid *f, int bits)
 {
         /* all objects with same id and different versions will belong to same
          * collisions list. */
-        return hash_long(fid_flatten(f), bits);
+        return cfs_hash_long(fid_flatten(f), bits);
 }
 
 /**
@@ -495,10 +497,10 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env,
                                             const struct lu_object_conf *conf,
                                             cfs_waitlink_t *waiter)
 {
-        struct lu_site    *s;
-        struct lu_object  *o;
-        struct lu_object  *shadow;
-        struct hlist_head *bucket;
+        struct lu_site        *s;
+        struct lu_object      *o;
+        struct lu_object      *shadow;
+        cfs_hlist_head_t      *bucket;
 
         /*
          * This uses standard index maintenance protocol:
@@ -520,9 +522,9 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env,
         s = dev->ld_site;
         bucket = s->ls_hash + fid_hash(f, s->ls_hash_bits);
 
-        read_lock(&s->ls_guard);
+        cfs_read_lock(&s->ls_guard);
         o = htable_lookup(s, bucket, f, waiter);
-        read_unlock(&s->ls_guard);
+        cfs_read_unlock(&s->ls_guard);
 
         if (o != NULL)
                 return o;
@@ -537,18 +539,18 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env,
 
         LASSERT(lu_fid_eq(lu_object_fid(o), f));
 
-        write_lock(&s->ls_guard);
+        cfs_write_lock(&s->ls_guard);
         shadow = htable_lookup(s, bucket, f, waiter);
         if (likely(shadow == NULL)) {
-                hlist_add_head(&o->lo_header->loh_hash, bucket);
-                list_add_tail(&o->lo_header->loh_lru, &s->ls_lru);
+                cfs_hlist_add_head(&o->lo_header->loh_hash, bucket);
+                cfs_list_add_tail(&o->lo_header->loh_lru, &s->ls_lru);
                 ++ s->ls_busy;
                 ++ s->ls_total;
                 shadow = o;
                 o = NULL;
         } else
                 s->ls_stats.s_cache_race ++;
-        write_unlock(&s->ls_guard);
+        cfs_write_unlock(&s->ls_guard);
         if (o != NULL)
                 lu_object_free(env, o);
         return shadow;
@@ -617,14 +619,14 @@ int lu_device_type_init(struct lu_device_type *ldt)
         CFS_INIT_LIST_HEAD(&ldt->ldt_linkage);
         result = ldt->ldt_ops->ldto_init(ldt);
         if (result == 0)
-                list_add(&ldt->ldt_linkage, &lu_device_types);
+                cfs_list_add(&ldt->ldt_linkage, &lu_device_types);
         return result;
 }
 EXPORT_SYMBOL(lu_device_type_init);
 
 void lu_device_type_fini(struct lu_device_type *ldt)
 {
-        list_del_init(&ldt->ldt_linkage);
+        cfs_list_del_init(&ldt->ldt_linkage);
         ldt->ldt_ops->ldto_fini(ldt);
 }
 EXPORT_SYMBOL(lu_device_type_fini);
@@ -633,7 +635,7 @@ void lu_types_stop(void)
 {
         struct lu_device_type *ldt;
 
-        list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
+        cfs_list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
                 if (ldt->ldt_device_nr == 0)
                         ldt->ldt_ops->ldto_stop(ldt);
         }
@@ -644,7 +646,7 @@ EXPORT_SYMBOL(lu_types_stop);
  * Global list of all sites on this node
  */
 static CFS_LIST_HEAD(lu_sites);
-static DECLARE_MUTEX(lu_sites_guard);
+static CFS_DECLARE_MUTEX(lu_sites_guard);
 
 /**
  * Global environment used by site shrinker.
@@ -661,12 +663,12 @@ void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
 
         for (i = 0; i < s->ls_hash_size; ++i) {
                 struct lu_object_header *h;
-                struct hlist_node       *scan;
+                cfs_hlist_node_t        *scan;
 
-                read_lock(&s->ls_guard);
-                hlist_for_each_entry(h, scan, &s->ls_hash[i], loh_hash) {
+                cfs_read_lock(&s->ls_guard);
+                cfs_hlist_for_each_entry(h, scan, &s->ls_hash[i], loh_hash) {
 
-                        if (!list_empty(&h->loh_layers)) {
+                        if (!cfs_list_empty(&h->loh_layers)) {
                                 const struct lu_object *obj;
 
                                 obj = lu_object_top(h);
@@ -674,7 +676,7 @@ void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
                         } else
                                 lu_object_header_print(env, cookie, printer, h);
                 }
-                read_unlock(&s->ls_guard);
+                cfs_read_unlock(&s->ls_guard);
         }
 }
 EXPORT_SYMBOL(lu_site_print);
@@ -698,7 +700,7 @@ static int lu_htable_order(void)
          *
          * Size of lu_object is (arbitrary) taken as 1K (together with inode).
          */
-        cache_size = num_physpages;
+        cache_size = cfs_num_physpages;
 
 #if BITS_PER_LONG == 32
         /* limit hashtable size for lowmem systems to low RAM */
@@ -715,7 +717,7 @@ static int lu_htable_order(void)
         return bits;
 }
 
-static struct lock_class_key lu_site_guard_class;
+static cfs_lock_class_key_t lu_site_guard_class;
 
 /**
  * Initialize site \a s, with \a d as the top level device.
@@ -728,8 +730,8 @@ int lu_site_init(struct lu_site *s, struct lu_device *top)
         ENTRY;
 
         memset(s, 0, sizeof *s);
-        rwlock_init(&s->ls_guard);
-        lockdep_set_class(&s->ls_guard, &lu_site_guard_class);
+        cfs_rwlock_init(&s->ls_guard);
+        cfs_lockdep_set_class(&s->ls_guard, &lu_site_guard_class);
         CFS_INIT_LIST_HEAD(&s->ls_lru);
         CFS_INIT_LIST_HEAD(&s->ls_linkage);
         cfs_waitq_init(&s->ls_marche_funebre);
@@ -753,7 +755,7 @@ int lu_site_init(struct lu_site *s, struct lu_device *top)
         s->ls_hash_mask = size - 1;
 
         for (i = 0; i < size; i++)
-                INIT_HLIST_HEAD(&s->ls_hash[i]);
+                CFS_INIT_HLIST_HEAD(&s->ls_hash[i]);
 
         RETURN(0);
 }
@@ -764,17 +766,17 @@ EXPORT_SYMBOL(lu_site_init);
  */
 void lu_site_fini(struct lu_site *s)
 {
-        LASSERT(list_empty(&s->ls_lru));
+        LASSERT(cfs_list_empty(&s->ls_lru));
         LASSERT(s->ls_total == 0);
 
-        down(&lu_sites_guard);
-        list_del_init(&s->ls_linkage);
-        up(&lu_sites_guard);
+        cfs_down(&lu_sites_guard);
+        cfs_list_del_init(&s->ls_linkage);
+        cfs_up(&lu_sites_guard);
 
         if (s->ls_hash != NULL) {
                 int i;
                 for (i = 0; i < s->ls_hash_size; i++)
-                        LASSERT(hlist_empty(&s->ls_hash[i]));
+                        LASSERT(cfs_hlist_empty(&s->ls_hash[i]));
                 cfs_free_large(s->ls_hash);
                 s->ls_hash = NULL;
         }
@@ -793,11 +795,11 @@ EXPORT_SYMBOL(lu_site_fini);
 int lu_site_init_finish(struct lu_site *s)
 {
         int result;
-        down(&lu_sites_guard);
+        cfs_down(&lu_sites_guard);
         result = lu_context_refill(&lu_shrink_env.le_ctx);
         if (result == 0)
-                list_add(&s->ls_linkage, &lu_sites);
-        up(&lu_sites_guard);
+                cfs_list_add(&s->ls_linkage, &lu_sites);
+        cfs_up(&lu_sites_guard);
         return result;
 }
 EXPORT_SYMBOL(lu_site_init_finish);
@@ -807,7 +809,7 @@ EXPORT_SYMBOL(lu_site_init_finish);
  */
 void lu_device_get(struct lu_device *d)
 {
-        atomic_inc(&d->ld_ref);
+        cfs_atomic_inc(&d->ld_ref);
 }
 EXPORT_SYMBOL(lu_device_get);
 
@@ -816,8 +818,8 @@ EXPORT_SYMBOL(lu_device_get);
  */
 void lu_device_put(struct lu_device *d)
 {
-        LASSERT(atomic_read(&d->ld_ref) > 0);
-        atomic_dec(&d->ld_ref);
+        LASSERT(cfs_atomic_read(&d->ld_ref) > 0);
+        cfs_atomic_dec(&d->ld_ref);
 }
 EXPORT_SYMBOL(lu_device_put);
 
@@ -829,7 +831,7 @@ int lu_device_init(struct lu_device *d, struct lu_device_type *t)
         if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
                 t->ldt_ops->ldto_start(t);
         memset(d, 0, sizeof *d);
-        atomic_set(&d->ld_ref, 0);
+        cfs_atomic_set(&d->ld_ref, 0);
         d->ld_type = t;
         lu_ref_init(&d->ld_reference);
         return 0;
@@ -850,8 +852,8 @@ void lu_device_fini(struct lu_device *d)
         }
 
         lu_ref_fini(&d->ld_reference);
-        LASSERTF(atomic_read(&d->ld_ref) == 0,
-                 "Refcount is %u\n", atomic_read(&d->ld_ref));
+        LASSERTF(cfs_atomic_read(&d->ld_ref) == 0,
+                 "Refcount is %u\n", cfs_atomic_read(&d->ld_ref));
         LASSERT(t->ldt_device_nr > 0);
         if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
                 t->ldt_ops->ldto_stop(t);
@@ -882,7 +884,7 @@ void lu_object_fini(struct lu_object *o)
 {
         struct lu_device *dev = o->lo_dev;
 
-        LASSERT(list_empty(&o->lo_linkage));
+        LASSERT(cfs_list_empty(&o->lo_linkage));
 
         if (dev != NULL) {
                 lu_ref_del_at(&dev->ld_reference,
@@ -901,7 +903,7 @@ EXPORT_SYMBOL(lu_object_fini);
  */
 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
 {
-        list_move(&o->lo_linkage, &h->loh_layers);
+        cfs_list_move(&o->lo_linkage, &h->loh_layers);
 }
 EXPORT_SYMBOL(lu_object_add_top);
 
@@ -913,7 +915,7 @@ EXPORT_SYMBOL(lu_object_add_top);
  */
 void lu_object_add(struct lu_object *before, struct lu_object *o)
 {
-        list_move(&o->lo_linkage, &before->lo_linkage);
+        cfs_list_move(&o->lo_linkage, &before->lo_linkage);
 }
 EXPORT_SYMBOL(lu_object_add);
 
@@ -923,8 +925,8 @@ EXPORT_SYMBOL(lu_object_add);
 int lu_object_header_init(struct lu_object_header *h)
 {
         memset(h, 0, sizeof *h);
-        atomic_set(&h->loh_ref, 1);
-        INIT_HLIST_NODE(&h->loh_hash);
+        cfs_atomic_set(&h->loh_ref, 1);
+        CFS_INIT_HLIST_NODE(&h->loh_hash);
         CFS_INIT_LIST_HEAD(&h->loh_lru);
         CFS_INIT_LIST_HEAD(&h->loh_layers);
         lu_ref_init(&h->loh_reference);
@@ -937,9 +939,9 @@ EXPORT_SYMBOL(lu_object_header_init);
  */
 void lu_object_header_fini(struct lu_object_header *h)
 {
-        LASSERT(list_empty(&h->loh_layers));
-        LASSERT(list_empty(&h->loh_lru));
-        LASSERT(hlist_unhashed(&h->loh_hash));
+        LASSERT(cfs_list_empty(&h->loh_layers));
+        LASSERT(cfs_list_empty(&h->loh_lru));
+        LASSERT(cfs_hlist_unhashed(&h->loh_hash));
         lu_ref_fini(&h->loh_reference);
 }
 EXPORT_SYMBOL(lu_object_header_fini);
@@ -953,7 +955,7 @@ struct lu_object *lu_object_locate(struct lu_object_header *h,
 {
         struct lu_object *o;
 
-        list_for_each_entry(o, &h->loh_layers, lo_linkage) {
+        cfs_list_for_each_entry(o, &h->loh_layers, lo_linkage) {
                 if (o->lo_dev->ld_type == dtype)
                         return o;
         }
@@ -986,7 +988,7 @@ void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
         /* purge again. */
         lu_site_purge(env, site, ~0);
 
-        if (!list_empty(&site->ls_lru) || site->ls_total != 0) {
+        if (!cfs_list_empty(&site->ls_lru) || site->ls_total != 0) {
                 /*
                  * Uh-oh, objects still exist.
                  */
@@ -1018,7 +1020,7 @@ enum {
 
 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
 
-static spinlock_t lu_keys_guard = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t lu_keys_guard = CFS_SPIN_LOCK_UNLOCKED;
 
 /**
  * Global counter incremented whenever key is registered, unregistered,
@@ -1042,11 +1044,11 @@ int lu_context_key_register(struct lu_context_key *key)
         LASSERT(key->lct_owner != NULL);
 
         result = -ENFILE;
-        spin_lock(&lu_keys_guard);
+        cfs_spin_lock(&lu_keys_guard);
         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
                 if (lu_keys[i] == NULL) {
                         key->lct_index = i;
-                        atomic_set(&key->lct_used, 1);
+                        cfs_atomic_set(&key->lct_used, 1);
                         lu_keys[i] = key;
                         lu_ref_init(&key->lct_reference);
                         result = 0;
@@ -1054,7 +1056,7 @@ int lu_context_key_register(struct lu_context_key *key)
                         break;
                 }
         }
-        spin_unlock(&lu_keys_guard);
+        cfs_spin_unlock(&lu_keys_guard);
         return result;
 }
 EXPORT_SYMBOL(lu_context_key_register);
@@ -1067,15 +1069,15 @@ static void key_fini(struct lu_context *ctx, int index)
                 key = lu_keys[index];
                 LASSERT(key != NULL);
                 LASSERT(key->lct_fini != NULL);
-                LASSERT(atomic_read(&key->lct_used) > 1);
+                LASSERT(cfs_atomic_read(&key->lct_used) > 1);
 
                 key->lct_fini(ctx, key, ctx->lc_value[index]);
                 lu_ref_del(&key->lct_reference, "ctx", ctx);
-                atomic_dec(&key->lct_used);
+                cfs_atomic_dec(&key->lct_used);
                 LASSERT(key->lct_owner != NULL);
                 if (!(ctx->lc_tags & LCT_NOREF)) {
-                        LASSERT(module_refcount(key->lct_owner) > 0);
-                        module_put(key->lct_owner);
+                        LASSERT(cfs_module_refcount(key->lct_owner) > 0);
+                        cfs_module_put(key->lct_owner);
                 }
                 ctx->lc_value[index] = NULL;
         }
@@ -1086,22 +1088,23 @@ static void key_fini(struct lu_context *ctx, int index)
  */
 void lu_context_key_degister(struct lu_context_key *key)
 {
-        LASSERT(atomic_read(&key->lct_used) >= 1);
+        LASSERT(cfs_atomic_read(&key->lct_used) >= 1);
         LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
 
         lu_context_key_quiesce(key);
 
         ++key_set_version;
-        spin_lock(&lu_keys_guard);
+        cfs_spin_lock(&lu_keys_guard);
         key_fini(&lu_shrink_env.le_ctx, key->lct_index);
         if (lu_keys[key->lct_index]) {
                 lu_keys[key->lct_index] = NULL;
                 lu_ref_fini(&key->lct_reference);
         }
-        spin_unlock(&lu_keys_guard);
+        cfs_spin_unlock(&lu_keys_guard);
 
-        LASSERTF(atomic_read(&key->lct_used) == 1, "key has instances: %d\n",
-                 atomic_read(&key->lct_used));
+        LASSERTF(cfs_atomic_read(&key->lct_used) == 1,
+                 "key has instances: %d\n",
+                 cfs_atomic_read(&key->lct_used));
 }
 EXPORT_SYMBOL(lu_context_key_degister);
 
@@ -1223,10 +1226,11 @@ void lu_context_key_quiesce(struct lu_context_key *key)
                 /*
                  * XXX memory barrier has to go here.
                  */
-                spin_lock(&lu_keys_guard);
-                list_for_each_entry(ctx, &lu_context_remembered, lc_remember)
+                cfs_spin_lock(&lu_keys_guard);
+                cfs_list_for_each_entry(ctx, &lu_context_remembered,
+                                        lc_remember)
                         key_fini(ctx, key->lct_index);
-                spin_unlock(&lu_keys_guard);
+                cfs_spin_unlock(&lu_keys_guard);
                 ++key_set_version;
         }
 }
@@ -1243,7 +1247,7 @@ static void keys_fini(struct lu_context *ctx)
 {
         int i;
 
-        spin_lock(&lu_keys_guard);
+        cfs_spin_lock(&lu_keys_guard);
         if (ctx->lc_value != NULL) {
                 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
                         key_fini(ctx, i);
@@ -1251,7 +1255,7 @@ static void keys_fini(struct lu_context *ctx)
                          ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
                 ctx->lc_value = NULL;
         }
-        spin_unlock(&lu_keys_guard);
+        cfs_spin_unlock(&lu_keys_guard);
 }
 
 static int keys_fill(struct lu_context *ctx)
@@ -1280,9 +1284,9 @@ static int keys_fill(struct lu_context *ctx)
 
                         LASSERT(key->lct_owner != NULL);
                         if (!(ctx->lc_tags & LCT_NOREF))
-                                try_module_get(key->lct_owner);
+                                cfs_try_module_get(key->lct_owner);
                         lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
-                        atomic_inc(&key->lct_used);
+                        cfs_atomic_inc(&key->lct_used);
                         /*
                          * This is the only place in the code, where an
                          * element of ctx->lc_value[] array is set to non-NULL
@@ -1321,9 +1325,9 @@ int lu_context_init(struct lu_context *ctx, __u32 tags)
         ctx->lc_state = LCS_INITIALIZED;
         ctx->lc_tags = tags;
         if (tags & LCT_REMEMBER) {
-                spin_lock(&lu_keys_guard);
-                list_add(&ctx->lc_remember, &lu_context_remembered);
-                spin_unlock(&lu_keys_guard);
+                cfs_spin_lock(&lu_keys_guard);
+                cfs_list_add(&ctx->lc_remember, &lu_context_remembered);
+                cfs_spin_unlock(&lu_keys_guard);
         } else
                 CFS_INIT_LIST_HEAD(&ctx->lc_remember);
         return keys_init(ctx);
@@ -1338,9 +1342,9 @@ void lu_context_fini(struct lu_context *ctx)
         LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
         ctx->lc_state = LCS_FINALIZED;
         keys_fini(ctx);
-        spin_lock(&lu_keys_guard);
-        list_del_init(&ctx->lc_remember);
-        spin_unlock(&lu_keys_guard);
+        cfs_spin_lock(&lu_keys_guard);
+        cfs_list_del_init(&ctx->lc_remember);
+        cfs_spin_unlock(&lu_keys_guard);
 }
 EXPORT_SYMBOL(lu_context_fini);
 
@@ -1421,7 +1425,7 @@ int lu_env_refill(struct lu_env *env)
 }
 EXPORT_SYMBOL(lu_env_refill);
 
-static struct shrinker *lu_site_shrinker = NULL;
+static struct cfs_shrinker *lu_site_shrinker = NULL;
 
 #ifdef __KERNEL__
 static int lu_cache_shrink(int nr, unsigned int gfp_mask)
@@ -1438,24 +1442,24 @@ static int lu_cache_shrink(int nr, unsigned int gfp_mask)
                 CDEBUG(D_INODE, "Shrink %d objects\n", nr);
         }
 
-        down(&lu_sites_guard);
-        list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
+        cfs_down(&lu_sites_guard);
+        cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
                 if (nr != 0) {
                         remain = lu_site_purge(&lu_shrink_env, s, remain);
                         /*
                          * Move just shrunk site to the tail of site list to
                          * assure shrinking fairness.
                          */
-                        list_move_tail(&s->ls_linkage, &splice);
+                        cfs_list_move_tail(&s->ls_linkage, &splice);
                 }
-                read_lock(&s->ls_guard);
+                cfs_read_lock(&s->ls_guard);
                 cached += s->ls_total - s->ls_busy;
-                read_unlock(&s->ls_guard);
+                cfs_read_unlock(&s->ls_guard);
                 if (nr && remain <= 0)
                         break;
         }
-        list_splice(&splice, lu_sites.prev);
-        up(&lu_sites_guard);
+        cfs_list_splice(&splice, lu_sites.prev);
+        cfs_up(&lu_sites_guard);
 
         cached = (cached / 100) * sysctl_vfs_cache_pressure;
         if (nr == 0)
@@ -1503,7 +1507,7 @@ void lu_context_keys_dump(void)
                         CERROR("[%i]: %p %x (%p,%p,%p) %i %i \"%s\"@%p\n",
                                i, key, key->lct_tags,
                                key->lct_init, key->lct_fini, key->lct_exit,
-                               key->lct_index, atomic_read(&key->lct_used),
+                               key->lct_index, cfs_atomic_read(&key->lct_used),
                                key->lct_owner ? key->lct_owner->name : "",
                                key->lct_owner);
                         lu_ref_print(&key->lct_reference);
@@ -1551,9 +1555,9 @@ int lu_global_init(void)
          * conservatively. This should not be too bad, because this
          * environment is global.
          */
-        down(&lu_sites_guard);
+        cfs_down(&lu_sites_guard);
         result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
-        up(&lu_sites_guard);
+        cfs_up(&lu_sites_guard);
         if (result != 0)
                 return result;
 
@@ -1562,7 +1566,7 @@ int lu_global_init(void)
          * inode, one for ea. Unfortunately setting this high value results in
          * lu_object/inode cache consuming all the memory.
          */
-        lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, lu_cache_shrink);
+        lu_site_shrinker = cfs_set_shrinker(CFS_DEFAULT_SEEKS, lu_cache_shrink);
         if (lu_site_shrinker == NULL)
                 return -ENOMEM;
 
@@ -1597,7 +1601,7 @@ void lu_global_fini(void)
 #endif
         lu_time_global_fini();
         if (lu_site_shrinker != NULL) {
-                remove_shrinker(lu_site_shrinker);
+                cfs_remove_shrinker(lu_site_shrinker);
                 lu_site_shrinker = NULL;
         }
 
@@ -1607,9 +1611,9 @@ void lu_global_fini(void)
          * Tear shrinker environment down _after_ de-registering
          * lu_global_key, because the latter has a value in the former.
          */
-        down(&lu_sites_guard);
+        cfs_down(&lu_sites_guard);
         lu_env_fini(&lu_shrink_env);
-        up(&lu_sites_guard);
+        cfs_up(&lu_sites_guard);
 
         lu_ref_global_fini();
 }
@@ -1634,7 +1638,7 @@ int lu_site_stats_print(const struct lu_site *s, char *page, int count)
          * an estimation anyway.
          */
         for (i = 0, populated = 0; i < s->ls_hash_size; i++)
-                populated += !hlist_empty(&s->ls_hash[i]);
+                populated += !cfs_hlist_empty(&s->ls_hash[i]);
 
         return snprintf(page, count, "%d %d %d/%d %d %d %d %d %d %d %d\n",
                         s->ls_total,
index a75a61a..01d31b6 100644 (file)
                                                         \
           if (unlikely(!(expr))) {                      \
                   lu_ref_print(__ref);                  \
-                  spin_unlock(&__ref->lf_guard);        \
+                  cfs_spin_unlock(&__ref->lf_guard);    \
                   lu_ref_print_all();                   \
-                  spin_lock(&__ref->lf_guard);          \
+                  cfs_spin_lock(&__ref->lf_guard);      \
                   LASSERT(0);                           \
           }                                             \
   } while (0)
 
 struct lu_ref_link {
         struct lu_ref    *ll_ref;
-        struct list_head  ll_linkage;
+        cfs_list_t        ll_linkage;
         const char       *ll_scope;
         const void       *ll_source;
 };
@@ -101,9 +101,9 @@ static struct lu_kmem_descr lu_ref_caches[] = {
  * Protected by lu_ref_refs_guard.
  */
 static CFS_LIST_HEAD(lu_ref_refs);
-static spinlock_t lu_ref_refs_guard;
+static cfs_spinlock_t lu_ref_refs_guard;
 static struct lu_ref lu_ref_marker = {
-        .lf_guard   = SPIN_LOCK_UNLOCKED,
+        .lf_guard   = CFS_SPIN_LOCK_UNLOCKED,
         .lf_list    = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_list),
         .lf_linkage = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_linkage)
 };
@@ -114,7 +114,7 @@ void lu_ref_print(const struct lu_ref *ref)
 
         CERROR("lu_ref: %p %d %d %s:%d\n",
                ref, ref->lf_refs, ref->lf_failed, ref->lf_func, ref->lf_line);
-        list_for_each_entry(link, &ref->lf_list, ll_linkage) {
+        cfs_list_for_each_entry(link, &ref->lf_list, ll_linkage) {
                 CERROR("     link: %s %p\n", link->ll_scope, link->ll_source);
         }
 }
@@ -129,16 +129,16 @@ void lu_ref_print_all(void)
 {
         struct lu_ref *ref;
 
-        spin_lock(&lu_ref_refs_guard);
-        list_for_each_entry(ref, &lu_ref_refs, lf_linkage) {
+        cfs_spin_lock(&lu_ref_refs_guard);
+        cfs_list_for_each_entry(ref, &lu_ref_refs, lf_linkage) {
                 if (lu_ref_is_marker(ref))
                         continue;
 
-                spin_lock(&ref->lf_guard);
+                cfs_spin_lock(&ref->lf_guard);
                 lu_ref_print(ref);
-                spin_unlock(&ref->lf_guard);
+                cfs_spin_unlock(&ref->lf_guard);
         }
-        spin_unlock(&lu_ref_refs_guard);
+        cfs_spin_unlock(&lu_ref_refs_guard);
 }
 EXPORT_SYMBOL(lu_ref_print_all);
 
@@ -147,21 +147,21 @@ void lu_ref_init_loc(struct lu_ref *ref, const char *func, const int line)
         ref->lf_refs = 0;
         ref->lf_func = func;
         ref->lf_line = line;
-        spin_lock_init(&ref->lf_guard);
+        cfs_spin_lock_init(&ref->lf_guard);
         CFS_INIT_LIST_HEAD(&ref->lf_list);
-        spin_lock(&lu_ref_refs_guard);
-        list_add(&ref->lf_linkage, &lu_ref_refs);
-        spin_unlock(&lu_ref_refs_guard);
+        cfs_spin_lock(&lu_ref_refs_guard);
+        cfs_list_add(&ref->lf_linkage, &lu_ref_refs);
+        cfs_spin_unlock(&lu_ref_refs_guard);
 }
 EXPORT_SYMBOL(lu_ref_init_loc);
 
 void lu_ref_fini(struct lu_ref *ref)
 {
-        REFASSERT(ref, list_empty(&ref->lf_list));
+        REFASSERT(ref, cfs_list_empty(&ref->lf_list));
         REFASSERT(ref, ref->lf_refs == 0);
-        spin_lock(&lu_ref_refs_guard);
-        list_del_init(&ref->lf_linkage);
-        spin_unlock(&lu_ref_refs_guard);
+        cfs_spin_lock(&lu_ref_refs_guard);
+        cfs_list_del_init(&ref->lf_linkage);
+        cfs_spin_unlock(&lu_ref_refs_guard);
 }
 EXPORT_SYMBOL(lu_ref_fini);
 
@@ -179,17 +179,17 @@ static struct lu_ref_link *lu_ref_add_context(struct lu_ref *ref,
                         link->ll_ref    = ref;
                         link->ll_scope  = scope;
                         link->ll_source = source;
-                        spin_lock(&ref->lf_guard);
-                        list_add_tail(&link->ll_linkage, &ref->lf_list);
+                        cfs_spin_lock(&ref->lf_guard);
+                        cfs_list_add_tail(&link->ll_linkage, &ref->lf_list);
                         ref->lf_refs++;
-                        spin_unlock(&ref->lf_guard);
+                        cfs_spin_unlock(&ref->lf_guard);
                 }
         }
 
         if (link == NULL) {
-                spin_lock(&ref->lf_guard);
+                cfs_spin_lock(&ref->lf_guard);
                 ref->lf_failed++;
-                spin_unlock(&ref->lf_guard);
+                cfs_spin_unlock(&ref->lf_guard);
                 link = ERR_PTR(-ENOMEM);
         }
 
@@ -199,7 +199,7 @@ static struct lu_ref_link *lu_ref_add_context(struct lu_ref *ref,
 struct lu_ref_link *lu_ref_add(struct lu_ref *ref, const char *scope,
                                const void *source)
 {
-        might_sleep();
+        cfs_might_sleep();
         return lu_ref_add_context(ref, CFS_ALLOC_STD, scope, source);
 }
 EXPORT_SYMBOL(lu_ref_add);
@@ -235,7 +235,7 @@ static struct lu_ref_link *lu_ref_find(struct lu_ref *ref, const char *scope,
         unsigned            iterations;
 
         iterations = 0;
-        list_for_each_entry(link, &ref->lf_list, ll_linkage) {
+        cfs_list_for_each_entry(link, &ref->lf_list, ll_linkage) {
                 ++iterations;
                 if (lu_ref_link_eq(link, scope, source)) {
                         if (iterations > lu_ref_chain_max_length) {
@@ -253,17 +253,17 @@ void lu_ref_del(struct lu_ref *ref, const char *scope, const void *source)
 {
         struct lu_ref_link *link;
 
-        spin_lock(&ref->lf_guard);
+        cfs_spin_lock(&ref->lf_guard);
         link = lu_ref_find(ref, scope, source);
         if (link != NULL) {
-                list_del(&link->ll_linkage);
+                cfs_list_del(&link->ll_linkage);
                 ref->lf_refs--;
-                spin_unlock(&ref->lf_guard);
+                cfs_spin_unlock(&ref->lf_guard);
                 OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
         } else {
                 REFASSERT(ref, ref->lf_failed > 0);
                 ref->lf_failed--;
-                spin_unlock(&ref->lf_guard);
+                cfs_spin_unlock(&ref->lf_guard);
         }
 }
 EXPORT_SYMBOL(lu_ref_del);
@@ -272,7 +272,7 @@ void lu_ref_set_at(struct lu_ref *ref, struct lu_ref_link *link,
                    const char *scope,
                    const void *source0, const void *source1)
 {
-        spin_lock(&ref->lf_guard);
+        cfs_spin_lock(&ref->lf_guard);
         if (link != ERR_PTR(-ENOMEM)) {
                 REFASSERT(ref, link->ll_ref == ref);
                 REFASSERT(ref, lu_ref_link_eq(link, scope, source0));
@@ -280,7 +280,7 @@ void lu_ref_set_at(struct lu_ref *ref, struct lu_ref_link *link,
         } else {
                 REFASSERT(ref, ref->lf_failed > 0);
         }
-        spin_unlock(&ref->lf_guard);
+        cfs_spin_unlock(&ref->lf_guard);
 }
 EXPORT_SYMBOL(lu_ref_set_at);
 
@@ -288,18 +288,18 @@ void lu_ref_del_at(struct lu_ref *ref, struct lu_ref_link *link,
                    const char *scope, const void *source)
 {
         if (link != ERR_PTR(-ENOMEM)) {
-                spin_lock(&ref->lf_guard);
+                cfs_spin_lock(&ref->lf_guard);
                 REFASSERT(ref, link->ll_ref == ref);
                 REFASSERT(ref, lu_ref_link_eq(link, scope, source));
-                list_del(&link->ll_linkage);
+                cfs_list_del(&link->ll_linkage);
                 ref->lf_refs--;
-                spin_unlock(&ref->lf_guard);
+                cfs_spin_unlock(&ref->lf_guard);
                 OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
         } else {
-                spin_lock(&ref->lf_guard);
+                cfs_spin_lock(&ref->lf_guard);
                 REFASSERT(ref, ref->lf_failed > 0);
                 ref->lf_failed--;
-                spin_unlock(&ref->lf_guard);
+                cfs_spin_unlock(&ref->lf_guard);
         }
 }
 EXPORT_SYMBOL(lu_ref_del_at);
@@ -310,10 +310,10 @@ static void *lu_ref_seq_start(struct seq_file *seq, loff_t *pos)
 {
         struct lu_ref *ref = seq->private;
 
-        spin_lock(&lu_ref_refs_guard);
-        if (list_empty(&ref->lf_linkage))
+        cfs_spin_lock(&lu_ref_refs_guard);
+        if (cfs_list_empty(&ref->lf_linkage))
                 ref = NULL;
-        spin_unlock(&lu_ref_refs_guard);
+        cfs_spin_unlock(&lu_ref_refs_guard);
 
         return ref;
 }
@@ -324,17 +324,17 @@ static void *lu_ref_seq_next(struct seq_file *seq, void *p, loff_t *pos)
         struct lu_ref *next;
 
         LASSERT(seq->private == p);
-        LASSERT(!list_empty(&ref->lf_linkage));
+        LASSERT(!cfs_list_empty(&ref->lf_linkage));
 
-        spin_lock(&lu_ref_refs_guard);
-        next = list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
+        cfs_spin_lock(&lu_ref_refs_guard);
+        next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
         if (&next->lf_linkage == &lu_ref_refs) {
                 p = NULL;
         } else {
                 (*pos)++;
-                list_move(&ref->lf_linkage, &next->lf_linkage);
+                cfs_list_move(&ref->lf_linkage, &next->lf_linkage);
         }
-        spin_unlock(&lu_ref_refs_guard);
+        cfs_spin_unlock(&lu_ref_refs_guard);
         return p;
 }
 
@@ -349,16 +349,16 @@ static int lu_ref_seq_show(struct seq_file *seq, void *p)
         struct lu_ref *ref  = p;
         struct lu_ref *next; 
 
-        spin_lock(&lu_ref_refs_guard);
-        next = list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
+        cfs_spin_lock(&lu_ref_refs_guard);
+        next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
         if ((&next->lf_linkage == &lu_ref_refs) || lu_ref_is_marker(next)) {
-                spin_unlock(&lu_ref_refs_guard);
+                cfs_spin_unlock(&lu_ref_refs_guard);
                 return 0;
         }
 
         /* print the entry */
 
-        spin_lock(&next->lf_guard);
+        cfs_spin_lock(&next->lf_guard);
         seq_printf(seq, "lu_ref: %p %d %d %s:%d\n",
                    next, next->lf_refs, next->lf_failed,
                    next->lf_func, next->lf_line);
@@ -368,12 +368,12 @@ static int lu_ref_seq_show(struct seq_file *seq, void *p)
                 struct lu_ref_link *link;
                 int i = 0;
 
-                list_for_each_entry(link, &next->lf_list, ll_linkage)
+                cfs_list_for_each_entry(link, &next->lf_list, ll_linkage)
                         seq_printf(seq, "  #%d link: %s %p\n",
                                    i++, link->ll_scope, link->ll_source);
         }
-        spin_unlock(&next->lf_guard);
-        spin_unlock(&lu_ref_refs_guard);
+        cfs_spin_unlock(&next->lf_guard);
+        cfs_spin_unlock(&lu_ref_refs_guard);
 
         return 0;
 }
@@ -392,12 +392,12 @@ static int lu_ref_seq_open(struct inode *inode, struct file *file)
 
         result = seq_open(file, &lu_ref_seq_ops);
         if (result == 0) {
-                spin_lock(&lu_ref_refs_guard);
-                if (!list_empty(&marker->lf_linkage))
+                cfs_spin_lock(&lu_ref_refs_guard);
+                if (!cfs_list_empty(&marker->lf_linkage))
                         result = -EAGAIN;
                 else
-                        list_add(&marker->lf_linkage, &lu_ref_refs);
-                spin_unlock(&lu_ref_refs_guard);
+                        cfs_list_add(&marker->lf_linkage, &lu_ref_refs);
+                cfs_spin_unlock(&lu_ref_refs_guard);
 
                 if (result == 0) {
                         struct seq_file *f = file->private_data;
@@ -414,9 +414,9 @@ static int lu_ref_seq_release(struct inode *inode, struct file *file)
 {
         struct lu_ref *ref = ((struct seq_file *)file->private_data)->private;
 
-        spin_lock(&lu_ref_refs_guard);
-        list_del_init(&ref->lf_linkage);
-        spin_unlock(&lu_ref_refs_guard);
+        cfs_spin_lock(&lu_ref_refs_guard);
+        cfs_list_del_init(&ref->lf_linkage);
+        cfs_spin_unlock(&lu_ref_refs_guard);
 
         return seq_release(inode, file);
 }
@@ -439,7 +439,7 @@ int lu_ref_global_init(void)
                "lu_ref tracking is enabled. Performance isn't.\n");
 
 
-        spin_lock_init(&lu_ref_refs_guard);
+        cfs_spin_lock_init(&lu_ref_refs_guard);
         result = lu_kmem_init(lu_ref_caches);
 
 #if defined(__KERNEL__) && defined(LPROCFS)
index 26513cf..342d334 100644 (file)
@@ -175,7 +175,7 @@ unsigned long long lu_time_stamp_get(void)
        struct timeval now;
        unsigned long long ret;
 
-       do_gettimeofday(&now);
+       cfs_gettimeofday(&now);
        ret = now.tv_sec;
        ret *= 1000000;
        ret += now.tv_usec;
index b8b07ba..abd436d 100644 (file)
 #include <lustre_lib.h>
 
 #if !defined(HAVE_RCU) || !defined(__KERNEL__)
-# define list_add_rcu            list_add
-# define list_del_rcu            list_del
-# define list_for_each_rcu       list_for_each
-# define list_for_each_safe_rcu  list_for_each_safe
-# define rcu_read_lock()         spin_lock(&bucket->lock)
-# define rcu_read_unlock()       spin_unlock(&bucket->lock)
+# define list_add_rcu            cfs_list_add
+# define list_del_rcu            cfs_list_del
+# define list_for_each_rcu       cfs_list_for_each
+# define list_for_each_safe_rcu  cfs_list_for_each_safe
+# define list_for_each_entry_rcu cfs_list_for_each_entry
+# define rcu_read_lock()         cfs_spin_lock(&bucket->lock)
+# define rcu_read_unlock()       cfs_spin_unlock(&bucket->lock)
 #endif /* ifndef HAVE_RCU */
 
 static __u64 handle_base;
 #define HANDLE_INCR 7
-static spinlock_t handle_base_lock;
+static cfs_spinlock_t handle_base_lock;
 
 static struct handle_bucket {
-        spinlock_t lock;
-        struct list_head head;
+        cfs_spinlock_t lock;
+        cfs_list_t head;
 } *handle_hash;
 
-static atomic_t handle_count = ATOMIC_INIT(0);
+static cfs_atomic_t handle_count = CFS_ATOMIC_INIT(0);
 
 #ifdef __arch_um__
 /* For unknown reason, UML uses kmalloc rather than vmalloc to allocate
@@ -89,13 +90,13 @@ void class_handle_hash(struct portals_handle *h, portals_handle_addref_cb cb)
         ENTRY;
 
         LASSERT(h != NULL);
-        LASSERT(list_empty(&h->h_link));
+        LASSERT(cfs_list_empty(&h->h_link));
 
         /*
          * This is fast, but simplistic cookie generation algorithm, it will
          * need a re-do at some point in the future for security.
          */
-        spin_lock(&handle_base_lock);
+        cfs_spin_lock(&handle_base_lock);
         handle_base += HANDLE_INCR;
 
         h->h_cookie = handle_base;
@@ -108,17 +109,17 @@ void class_handle_hash(struct portals_handle *h, portals_handle_addref_cb cb)
                 CWARN("The universe has been exhausted: cookie wrap-around.\n");
                 handle_base += HANDLE_INCR;
         }
-        spin_unlock(&handle_base_lock);
+        cfs_spin_unlock(&handle_base_lock);
  
-        atomic_inc(&handle_count);
+        cfs_atomic_inc(&handle_count);
         h->h_addref = cb;
-        spin_lock_init(&h->h_lock);
+        cfs_spin_lock_init(&h->h_lock);
 
         bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK];
-        spin_lock(&bucket->lock);
+        cfs_spin_lock(&bucket->lock);
         list_add_rcu(&h->h_link, &bucket->head);
         h->h_in = 1;
-        spin_unlock(&bucket->lock);
+        cfs_spin_unlock(&bucket->lock);
 
         CDEBUG(D_INFO, "added object %p with handle "LPX64" to hash\n",
                h, h->h_cookie);
@@ -127,7 +128,7 @@ void class_handle_hash(struct portals_handle *h, portals_handle_addref_cb cb)
 
 static void class_handle_unhash_nolock(struct portals_handle *h)
 {
-        if (list_empty(&h->h_link)) {
+        if (cfs_list_empty(&h->h_link)) {
                 CERROR("removing an already-removed handle ("LPX64")\n",
                        h->h_cookie);
                 return;
@@ -136,13 +137,13 @@ static void class_handle_unhash_nolock(struct portals_handle *h)
         CDEBUG(D_INFO, "removing object %p with handle "LPX64" from hash\n",
                h, h->h_cookie);
 
-        spin_lock(&h->h_lock);
+        cfs_spin_lock(&h->h_lock);
         if (h->h_in == 0) {
-                spin_unlock(&h->h_lock);
+                cfs_spin_unlock(&h->h_lock);
                 return;
         }
         h->h_in = 0;
-        spin_unlock(&h->h_lock);
+        cfs_spin_unlock(&h->h_lock);
         list_del_rcu(&h->h_link);
 }
 
@@ -151,11 +152,11 @@ void class_handle_unhash(struct portals_handle *h)
         struct handle_bucket *bucket;
         bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
 
-        spin_lock(&bucket->lock);
+        cfs_spin_lock(&bucket->lock);
         class_handle_unhash_nolock(h);
-        spin_unlock(&bucket->lock);
+        cfs_spin_unlock(&bucket->lock);
 
-        atomic_dec(&handle_count);
+        cfs_atomic_dec(&handle_count);
 }
 
 void class_handle_hash_back(struct portals_handle *h)
@@ -165,11 +166,11 @@ void class_handle_hash_back(struct portals_handle *h)
 
         bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
 
-        atomic_inc(&handle_count);
-        spin_lock(&bucket->lock);
+        cfs_atomic_inc(&handle_count);
+        cfs_spin_lock(&bucket->lock);
         list_add_rcu(&h->h_link, &bucket->head);
         h->h_in = 1;
-        spin_unlock(&bucket->lock);
+        cfs_spin_unlock(&bucket->lock);
 
         EXIT;
 }
@@ -192,12 +193,12 @@ void *class_handle2object(__u64 cookie)
                 if (h->h_cookie != cookie)
                         continue;
 
-                spin_lock(&h->h_lock);
+                cfs_spin_lock(&h->h_lock);
                 if (likely(h->h_in != 0)) {
                         h->h_addref(h);
                         retval = h;
                 }
-                spin_unlock(&h->h_lock);
+                cfs_spin_unlock(&h->h_lock);
                 break;
         }
         rcu_read_unlock();
@@ -205,7 +206,7 @@ void *class_handle2object(__u64 cookie)
         RETURN(retval);
 }
 
-void class_handle_free_cb(struct rcu_head *rcu)
+void class_handle_free_cb(cfs_rcu_head_t *rcu)
 {
         struct portals_handle *h = RCU2HANDLE(rcu);
         if (h->h_free_cb) {
@@ -229,16 +230,16 @@ int class_handle_init(void)
         if (handle_hash == NULL)
                 return -ENOMEM;
 
-        spin_lock_init(&handle_base_lock);
+        cfs_spin_lock_init(&handle_base_lock);
         for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
              bucket--) {
                 CFS_INIT_LIST_HEAD(&bucket->head);
-                spin_lock_init(&bucket->lock);
+                cfs_spin_lock_init(&bucket->lock);
         }
 
         /** bug 21430: add randomness to the initial base */
         ll_get_random_bytes(seed, sizeof(seed));
-        do_gettimeofday(&tv);
+        cfs_gettimeofday(&tv);
         ll_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
 
         ll_get_random_bytes(&handle_base, sizeof(handle_base));
@@ -254,14 +255,14 @@ static void cleanup_all_handles(void)
         for (i = 0; i < HANDLE_HASH_SIZE; i++) {
                 struct portals_handle *h;
 
-                spin_lock(&handle_hash[i].lock);
+                cfs_spin_lock(&handle_hash[i].lock);
                 list_for_each_entry_rcu(h, &(handle_hash[i].head), h_link) {
                         CERROR("force clean handle "LPX64" addr %p addref %p\n",
                                h->h_cookie, h, h->h_addref);
 
                         class_handle_unhash_nolock(h);
                 }
-                spin_unlock(&handle_hash[i].lock);
+                cfs_spin_unlock(&handle_hash[i].lock);
         }
 }
 
@@ -270,7 +271,7 @@ void class_handle_cleanup(void)
         int count;
         LASSERT(handle_hash != NULL);
 
-        count = atomic_read(&handle_count);
+        count = cfs_atomic_read(&handle_count);
         if (count != 0) {
                 CERROR("handle_count at cleanup: %d\n", count);
                 cleanup_all_handles();
@@ -279,6 +280,6 @@ void class_handle_cleanup(void)
         OBD_VFREE(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
         handle_hash = NULL;
 
-        if (atomic_read(&handle_count))
-                CERROR("leaked %d handles\n", atomic_read(&handle_count));
+        if (cfs_atomic_read(&handle_count))
+                CERROR("leaked %d handles\n", cfs_atomic_read(&handle_count));
 }
index 5be9788..d60d026 100644 (file)
 #include <lprocfs_status.h>
 
 struct uuid_nid_data {
-        struct list_head un_list;
+        cfs_list_t       un_list;
         lnet_nid_t       un_nid;
         char            *un_uuid;
         int              un_count;  /* nid/uuid pair refcount */
 };
 
 /* FIXME: This should probably become more elegant than a global linked list */
-static struct list_head g_uuid_list;
-static spinlock_t       g_uuid_lock;
+static cfs_list_t           g_uuid_list;
+static cfs_spinlock_t       g_uuid_lock;
 
 void class_init_uuidlist(void)
 {
         CFS_INIT_LIST_HEAD(&g_uuid_list);
-        spin_lock_init(&g_uuid_lock);
+        cfs_spin_lock_init(&g_uuid_lock);
 }
 
 void class_exit_uuidlist(void)
@@ -72,24 +72,24 @@ void class_exit_uuidlist(void)
 
 int lustre_uuid_to_peer(const char *uuid, lnet_nid_t *peer_nid, int index)
 {
-        struct list_head *tmp;
+        cfs_list_t *tmp;
 
-        spin_lock (&g_uuid_lock);
+        cfs_spin_lock (&g_uuid_lock);
 
-        list_for_each(tmp, &g_uuid_list) {
+        cfs_list_for_each(tmp, &g_uuid_list) {
                 struct uuid_nid_data *data =
-                        list_entry(tmp, struct uuid_nid_data, un_list);
+                        cfs_list_entry(tmp, struct uuid_nid_data, un_list);
 
                 if (!strcmp(data->un_uuid, uuid) &&
                     index-- == 0) {
                         *peer_nid = data->un_nid;
 
-                        spin_unlock (&g_uuid_lock);
+                        cfs_spin_unlock (&g_uuid_lock);
                         return 0;
                 }
         }
 
-        spin_unlock (&g_uuid_lock);
+        cfs_spin_unlock (&g_uuid_lock);
         return -ENOENT;
 }
 
@@ -120,9 +120,9 @@ int class_add_uuid(const char *uuid, __u64 nid)
         data->un_nid = nid;
         data->un_count = 1;
 
-        spin_lock (&g_uuid_lock);
+        cfs_spin_lock (&g_uuid_lock);
 
-        list_for_each_entry(entry, &g_uuid_list, un_list) {
+        cfs_list_for_each_entry(entry, &g_uuid_list, un_list) {
                 if (entry->un_nid == nid && 
                     (strcmp(entry->un_uuid, uuid) == 0)) {
                         found++;
@@ -131,8 +131,8 @@ int class_add_uuid(const char *uuid, __u64 nid)
                 }
         }
         if (!found) 
-                list_add(&data->un_list, &g_uuid_list);
-        spin_unlock (&g_uuid_lock);
+                cfs_list_add(&data->un_list, &g_uuid_list);
+        cfs_spin_unlock (&g_uuid_lock);
 
         if (found) {
                 CDEBUG(D_INFO, "found uuid %s %s cnt=%d\n", uuid, 
@@ -152,23 +152,23 @@ int class_del_uuid(const char *uuid)
         struct uuid_nid_data *data;
         int found = 0;
 
-        spin_lock (&g_uuid_lock);
+        cfs_spin_lock (&g_uuid_lock);
         if (uuid == NULL) {
-                list_splice_init(&g_uuid_list, &deathrow);
+                cfs_list_splice_init(&g_uuid_list, &deathrow);
                 found = 1;
         } else {
-                list_for_each_entry(data, &g_uuid_list, un_list) {
+                cfs_list_for_each_entry(data, &g_uuid_list, un_list) {
                         if (strcmp(data->un_uuid, uuid))
                                 continue;
                         --data->un_count;
                         LASSERT(data->un_count >= 0);
                         if (data->un_count == 0)
-                                list_move(&data->un_list, &deathrow);
+                                cfs_list_move(&data->un_list, &deathrow);
                         found = 1;
                         break;
                 }
         }
-        spin_unlock (&g_uuid_lock);
+        cfs_spin_unlock (&g_uuid_lock);
 
         if (!found) {
                 if (uuid)
@@ -176,9 +176,10 @@ int class_del_uuid(const char *uuid)
                 return -EINVAL;
         }
 
-        while (!list_empty(&deathrow)) {
-                data = list_entry(deathrow.next, struct uuid_nid_data, un_list);
-                list_del(&data->un_list);
+        while (!cfs_list_empty(&deathrow)) {
+                data = cfs_list_entry(deathrow.next, struct uuid_nid_data,
+                                      un_list);
+                cfs_list_del(&data->un_list);
 
                 CDEBUG(D_INFO, "del uuid %s %s\n", data->un_uuid,
                        libcfs_nid2str(data->un_nid));
index e12841e..6581e3d 100644 (file)
 
 
 /** List head to hold list of objects to be created. */
-static struct list_head llo_lobj_list;
+static cfs_list_t llo_lobj_list;
 
 /** Lock to protect list manipulations */
-static struct mutex     llo_lock;
+static cfs_mutex_t     llo_lock;
 
 /**
  * Structure used to maintain state of path parsing.
@@ -378,18 +378,18 @@ EXPORT_SYMBOL(llo_store_create);
 
 void llo_local_obj_register(struct lu_local_obj_desc *llod)
 {
-        mutex_lock(&llo_lock);
-        list_add_tail(&llod->llod_linkage, &llo_lobj_list);
-        mutex_unlock(&llo_lock);
+        cfs_mutex_lock(&llo_lock);
+        cfs_list_add_tail(&llod->llod_linkage, &llo_lobj_list);
+        cfs_mutex_unlock(&llo_lock);
 }
 
 EXPORT_SYMBOL(llo_local_obj_register);
 
 void llo_local_obj_unregister(struct lu_local_obj_desc *llod)
 {
-        mutex_lock(&llo_lock);
-        list_del(&llod->llod_linkage);
-        mutex_unlock(&llo_lock);
+        cfs_mutex_lock(&llo_lock);
+        cfs_list_del(&llod->llod_linkage);
+        cfs_mutex_unlock(&llo_lock);
 }
 
 EXPORT_SYMBOL(llo_local_obj_unregister);
@@ -410,9 +410,9 @@ int llo_local_objects_setup(const struct lu_env *env,
         int rc = 0;
 
         fid = &info->lti_cfid;
-        mutex_lock(&llo_lock);
+        cfs_mutex_lock(&llo_lock);
 
-        list_for_each_entry(scan, &llo_lobj_list, llod_linkage) {
+        cfs_list_for_each_entry(scan, &llo_lobj_list, llod_linkage) {
                 lu_local_obj_fid(fid, scan->llod_oid);
                 dir = "";
                 if (scan->llod_dir)
@@ -439,7 +439,7 @@ int llo_local_objects_setup(const struct lu_env *env,
         }
 
 out:
-        mutex_unlock(&llo_lock);
+        cfs_mutex_unlock(&llo_lock);
         return rc;
 }
 
@@ -450,7 +450,7 @@ int llo_global_init(void)
         int result;
 
         CFS_INIT_LIST_HEAD(&llo_lobj_list);
-        mutex_init(&llo_lock);
+        cfs_mutex_init(&llo_lock);
 
         LU_CONTEXT_KEY_INIT(&llod_key);
         result = lu_context_key_register(&llod_key);
@@ -460,5 +460,5 @@ int llo_global_init(void)
 void llo_global_fini(void)
 {
         lu_context_key_degister(&llod_key);
-        LASSERT(list_empty(&llo_lobj_list));
+        LASSERT(cfs_list_empty(&llo_lobj_list));
 }
index ae04e35..44d94dd 100644 (file)
@@ -249,7 +249,7 @@ int class_attach(struct lustre_cfg *lcfg)
         LASSERTF(strncmp(obd->obd_name, name, strlen(name)) == 0,
                  "%p obd_name %s != %s\n", obd, obd->obd_name, name);
 
-        rwlock_init(&obd->obd_pool_lock);
+        cfs_rwlock_init(&obd->obd_pool_lock);
         obd->obd_pool_limit = 0;
         obd->obd_pool_slv = 0;
 
@@ -258,19 +258,19 @@ int class_attach(struct lustre_cfg *lcfg)
         CFS_INIT_LIST_HEAD(&obd->obd_delayed_exports);
         CFS_INIT_LIST_HEAD(&obd->obd_exports_timed);
         CFS_INIT_LIST_HEAD(&obd->obd_nid_stats);
-        spin_lock_init(&obd->obd_nid_lock);
-        spin_lock_init(&obd->obd_dev_lock);
-        sema_init(&obd->obd_dev_sem, 1);
-        spin_lock_init(&obd->obd_osfs_lock);
+        cfs_spin_lock_init(&obd->obd_nid_lock);
+        cfs_spin_lock_init(&obd->obd_dev_lock);
+        cfs_sema_init(&obd->obd_dev_sem, 1);
+        cfs_spin_lock_init(&obd->obd_osfs_lock);
         /* obd->obd_osfs_age must be set to a value in the distant
          * past to guarantee a fresh statfs is fetched on mount. */
         obd->obd_osfs_age = cfs_time_shift_64(-1000);
 
         /* XXX belongs in setup not attach  */
-        init_rwsem(&obd->obd_observer_link_sem);
+        cfs_init_rwsem(&obd->obd_observer_link_sem);
         /* recovery data */
         cfs_init_timer(&obd->obd_recovery_timer);
-        spin_lock_init(&obd->obd_processing_task_lock);
+        cfs_spin_lock_init(&obd->obd_processing_task_lock);
         cfs_waitq_init(&obd->obd_next_transno_waitq);
         cfs_waitq_init(&obd->obd_evict_inprogress_waitq);
         CFS_INIT_LIST_HEAD(&obd->obd_req_replay_queue);
@@ -296,15 +296,15 @@ int class_attach(struct lustre_cfg *lcfg)
         }
 
         /* Detach drops this */
-        spin_lock(&obd->obd_dev_lock);
-        atomic_set(&obd->obd_refcount, 1);
-        spin_unlock(&obd->obd_dev_lock);
+        cfs_spin_lock(&obd->obd_dev_lock);
+        cfs_atomic_set(&obd->obd_refcount, 1);
+        cfs_spin_unlock(&obd->obd_dev_lock);
         lu_ref_init(&obd->obd_reference);
         lu_ref_add(&obd->obd_reference, "attach", obd);
 
         obd->obd_attached = 1;
         CDEBUG(D_IOCTL, "OBD: dev %d attached type %s with refcount %d\n",
-               obd->obd_minor, typename, atomic_read(&obd->obd_refcount));
+               obd->obd_minor, typename, cfs_atomic_read(&obd->obd_refcount));
         RETURN(0);
  out:
         if (obd != NULL) {
@@ -340,9 +340,9 @@ int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
         }
 
         /* is someone else setting us up right now? (attach inits spinlock) */
-        spin_lock(&obd->obd_dev_lock);
+        cfs_spin_lock(&obd->obd_dev_lock);
         if (obd->obd_starting) {
-                spin_unlock(&obd->obd_dev_lock);
+                cfs_spin_unlock(&obd->obd_dev_lock);
                 CERROR("Device %d setup in progress (type %s)\n",
                        obd->obd_minor, obd->obd_type->typ_name);
                 RETURN(-EEXIST);
@@ -353,7 +353,7 @@ int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
         obd->obd_uuid_hash = NULL;
         obd->obd_nid_hash = NULL;
         obd->obd_nid_stats_hash = NULL;
-        spin_unlock(&obd->obd_dev_lock);
+        cfs_spin_unlock(&obd->obd_dev_lock);
 
         /* create an uuid-export lustre hash */
         obd->obd_uuid_hash = cfs_hash_create("UUID_HASH",
@@ -384,7 +384,7 @@ int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
                 GOTO(err_hash, err = PTR_ERR(exp));
 
         obd->obd_self_export = exp;
-        list_del_init(&exp->exp_obd_chain_timed);
+        cfs_list_del_init(&exp->exp_obd_chain_timed);
         class_export_put(exp);
 
         err = obd_setup(obd, lcfg);
@@ -393,10 +393,10 @@ int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
 
         obd->obd_set_up = 1;
 
-        spin_lock(&obd->obd_dev_lock);
+        cfs_spin_lock(&obd->obd_dev_lock);
         /* cleanup drops this */
         class_incref(obd, "setup", obd);
-        spin_unlock(&obd->obd_dev_lock);
+        cfs_spin_unlock(&obd->obd_dev_lock);
 
         CDEBUG(D_IOCTL, "finished setup of obd %s (uuid %s)\n",
                obd->obd_name, obd->obd_uuid.uuid);
@@ -434,14 +434,14 @@ int class_detach(struct obd_device *obd, struct lustre_cfg *lcfg)
                 RETURN(-EBUSY);
         }
 
-        spin_lock(&obd->obd_dev_lock);
+        cfs_spin_lock(&obd->obd_dev_lock);
         if (!obd->obd_attached) {
-                spin_unlock(&obd->obd_dev_lock);
+                cfs_spin_unlock(&obd->obd_dev_lock);
                 CERROR("OBD device %d not attached\n", obd->obd_minor);
                 RETURN(-ENODEV);
         }
         obd->obd_attached = 0;
-        spin_unlock(&obd->obd_dev_lock);
+        cfs_spin_unlock(&obd->obd_dev_lock);
 
         CDEBUG(D_IOCTL, "detach on obd %s (uuid %s)\n",
                obd->obd_name, obd->obd_uuid.uuid);
@@ -467,15 +467,15 @@ int class_cleanup(struct obd_device *obd, struct lustre_cfg *lcfg)
                 RETURN(-ENODEV);
         }
 
-        spin_lock(&obd->obd_dev_lock);
+        cfs_spin_lock(&obd->obd_dev_lock);
         if (obd->obd_stopping) {
-                spin_unlock(&obd->obd_dev_lock);
+                cfs_spin_unlock(&obd->obd_dev_lock);
                 CERROR("OBD %d already stopping\n", obd->obd_minor);
                 RETURN(-ENODEV);
         }
         /* Leave this on forever */
         obd->obd_stopping = 1;
-        spin_unlock(&obd->obd_dev_lock);
+        cfs_spin_unlock(&obd->obd_dev_lock);
 
         if (lcfg->lcfg_bufcount >= 2 && LUSTRE_CFG_BUFLEN(lcfg, 1) > 0) {
                 for (flag = lustre_cfg_string(lcfg, 1); *flag != 0; flag++)
@@ -508,12 +508,12 @@ int class_cleanup(struct obd_device *obd, struct lustre_cfg *lcfg)
 
         /* The three references that should be remaining are the
          * obd_self_export and the attach and setup references. */
-        if (atomic_read(&obd->obd_refcount) > 3) {
+        if (cfs_atomic_read(&obd->obd_refcount) > 3) {
                 /* refcounf - 3 might be the number of real exports
                    (excluding self export). But class_incref is called
                    by other things as well, so don't count on it. */
                 CDEBUG(D_IOCTL, "%s: forcing exports to disconnect: %d\n",
-                       obd->obd_name, atomic_read(&obd->obd_refcount) - 3);
+                       obd->obd_name, cfs_atomic_read(&obd->obd_refcount) - 3);
                 dump_exports(obd, 0);
                 class_disconnect_exports(obd);
         }
@@ -550,9 +550,9 @@ struct obd_device *class_incref(struct obd_device *obd,
                                 const char *scope, const void *source)
 {
         lu_ref_add_atomic(&obd->obd_reference, scope, source);
-        atomic_inc(&obd->obd_refcount);
+        cfs_atomic_inc(&obd->obd_refcount);
         CDEBUG(D_INFO, "incref %s (%p) now %d\n", obd->obd_name, obd,
-               atomic_read(&obd->obd_refcount));
+               cfs_atomic_read(&obd->obd_refcount));
 
         return obd;
 }
@@ -562,10 +562,10 @@ void class_decref(struct obd_device *obd, const char *scope, const void *source)
         int err;
         int refs;
 
-        spin_lock(&obd->obd_dev_lock);
-        atomic_dec(&obd->obd_refcount);
-        refs = atomic_read(&obd->obd_refcount);
-        spin_unlock(&obd->obd_dev_lock);
+        cfs_spin_lock(&obd->obd_dev_lock);
+        cfs_atomic_dec(&obd->obd_refcount);
+        refs = cfs_atomic_read(&obd->obd_refcount);
+        cfs_spin_unlock(&obd->obd_dev_lock);
         lu_ref_del(&obd->obd_reference, scope, source);
 
         CDEBUG(D_INFO, "Decref %s (%p) now %d\n", obd->obd_name, obd, refs);
@@ -574,9 +574,9 @@ void class_decref(struct obd_device *obd, const char *scope, const void *source)
                 /* All exports have been destroyed; there should
                    be no more in-progress ops by this point.*/
 
-                spin_lock(&obd->obd_self_export->exp_lock);
+                cfs_spin_lock(&obd->obd_self_export->exp_lock);
                 obd->obd_self_export->exp_flags |= exp_flags_from_obd(obd);
-                spin_unlock(&obd->obd_self_export->exp_lock);
+                cfs_spin_unlock(&obd->obd_self_export->exp_lock);
 
                 /* note that we'll recurse into class_decref again */
                 class_unlink_export(obd->obd_self_export);
@@ -671,7 +671,7 @@ struct lustre_profile *class_get_profile(const char * prof)
         struct lustre_profile *lprof;
 
         ENTRY;
-        list_for_each_entry(lprof, &lustre_profile_list, lp_list) {
+        cfs_list_for_each_entry(lprof, &lustre_profile_list, lp_list) {
                 if (!strcmp(lprof->lp_profile, prof)) {
                         RETURN(lprof);
                 }
@@ -713,7 +713,7 @@ int class_add_profile(int proflen, char *prof, int osclen, char *osc,
                 memcpy(lprof->lp_md, mdc, mdclen);
         }
 
-        list_add(&lprof->lp_list, &lustre_profile_list);
+        cfs_list_add(&lprof->lp_list, &lustre_profile_list);
         RETURN(err);
 
 out:
@@ -736,7 +736,7 @@ void class_del_profile(const char *prof)
 
         lprof = class_get_profile(prof);
         if (lprof) {
-                list_del(&lprof->lp_list);
+                cfs_list_del(&lprof->lp_list);
                 OBD_FREE(lprof->lp_profile, strlen(lprof->lp_profile) + 1);
                 OBD_FREE(lprof->lp_dt, strlen(lprof->lp_dt) + 1);
                 if (lprof->lp_md)
@@ -752,8 +752,8 @@ void class_del_profiles(void)
         struct lustre_profile *lprof, *n;
         ENTRY;
 
-        list_for_each_entry_safe(lprof, n, &lustre_profile_list, lp_list) {
-                list_del(&lprof->lp_list);
+        cfs_list_for_each_entry_safe(lprof, n, &lustre_profile_list, lp_list) {
+                cfs_list_del(&lprof->lp_list);
                 OBD_FREE(lprof->lp_profile, strlen(lprof->lp_profile) + 1);
                 OBD_FREE(lprof->lp_dt, strlen(lprof->lp_dt) + 1);
                 if (lprof->lp_md)
@@ -1431,11 +1431,11 @@ uuid_hash(cfs_hash_t *hs,  void *key, unsigned mask)
 }
 
 static void *
-uuid_key(struct hlist_node *hnode)
+uuid_key(cfs_hlist_node_t *hnode)
 {
         struct obd_export *exp;
 
-        exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
+        exp = cfs_hlist_entry(hnode, struct obd_export, exp_uuid_hash);
 
         RETURN(&exp->exp_client_uuid);
 }
@@ -1445,34 +1445,34 @@ uuid_key(struct hlist_node *hnode)
  *       state with this function
  */
 static int
-uuid_compare(void *key, struct hlist_node *hnode)
+uuid_compare(void *key, cfs_hlist_node_t *hnode)
 {
         struct obd_export *exp;
 
         LASSERT(key);
-        exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
+        exp = cfs_hlist_entry(hnode, struct obd_export, exp_uuid_hash);
 
         RETURN(obd_uuid_equals((struct obd_uuid *)key,&exp->exp_client_uuid) &&
                !exp->exp_failed);
 }
 
 static void *
-uuid_export_get(struct hlist_node *hnode)
+uuid_export_get(cfs_hlist_node_t *hnode)
 {
         struct obd_export *exp;
 
-        exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
+        exp = cfs_hlist_entry(hnode, struct obd_export, exp_uuid_hash);
         class_export_get(exp);
 
         RETURN(exp);
 }
 
 static void *
-uuid_export_put(struct hlist_node *hnode)
+uuid_export_put(cfs_hlist_node_t *hnode)
 {
         struct obd_export *exp;
 
-        exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
+        exp = cfs_hlist_entry(hnode, struct obd_export, exp_uuid_hash);
         class_export_put(exp);
 
         RETURN(exp);
@@ -1498,11 +1498,11 @@ nid_hash(cfs_hash_t *hs,  void *key, unsigned mask)
 }
 
 static void *
-nid_key(struct hlist_node *hnode)
+nid_key(cfs_hlist_node_t *hnode)
 {
         struct obd_export *exp;
 
-        exp = hlist_entry(hnode, struct obd_export, exp_nid_hash);
+        exp = cfs_hlist_entry(hnode, struct obd_export, exp_nid_hash);
 
         RETURN(&exp->exp_connection->c_peer.nid);
 }
@@ -1512,34 +1512,34 @@ nid_key(struct hlist_node *hnode)
  *       state with this function
  */
 static int
-nid_compare(void *key, struct hlist_node *hnode)
+nid_compare(void *key, cfs_hlist_node_t *hnode)
 {
         struct obd_export *exp;
 
         LASSERT(key);
-        exp = hlist_entry(hnode, struct obd_export, exp_nid_hash);
+        exp = cfs_hlist_entry(hnode, struct obd_export, exp_nid_hash);
 
         RETURN(exp->exp_connection->c_peer.nid == *(lnet_nid_t *)key &&
                !exp->exp_failed);
 }
 
 static void *
-nid_export_get(struct hlist_node *hnode)
+nid_export_get(cfs_hlist_node_t *hnode)
 {
         struct obd_export *exp;
 
-        exp = hlist_entry(hnode, struct obd_export, exp_nid_hash);
+        exp = cfs_hlist_entry(hnode, struct obd_export, exp_nid_hash);
         class_export_get(exp);
 
         RETURN(exp);
 }
 
 static void *
-nid_export_put(struct hlist_node *hnode)
+nid_export_put(cfs_hlist_node_t *hnode)
 {
         struct obd_export *exp;
 
-        exp = hlist_entry(hnode, struct obd_export, exp_nid_hash);
+        exp = cfs_hlist_entry(hnode, struct obd_export, exp_nid_hash);
         class_export_put(exp);
 
         RETURN(exp);
@@ -1559,38 +1559,38 @@ static cfs_hash_ops_t nid_hash_ops = {
  */
 
 static void *
-nidstats_key(struct hlist_node *hnode)
+nidstats_key(cfs_hlist_node_t *hnode)
 {
         struct nid_stat *ns;
 
-        ns = hlist_entry(hnode, struct nid_stat, nid_hash);
+        ns = cfs_hlist_entry(hnode, struct nid_stat, nid_hash);
 
         RETURN(&ns->nid);
 }
 
 static int
-nidstats_compare(void *key, struct hlist_node *hnode)
+nidstats_compare(void *key, cfs_hlist_node_t *hnode)
 {
         RETURN(*(lnet_nid_t *)nidstats_key(hnode) == *(lnet_nid_t *)key);
 }
 
 static void *
-nidstats_get(struct hlist_node *hnode)
+nidstats_get(cfs_hlist_node_t *hnode)
 {
         struct nid_stat *ns;
 
-        ns = hlist_entry(hnode, struct nid_stat, nid_hash);
+        ns = cfs_hlist_entry(hnode, struct nid_stat, nid_hash);
         nidstat_getref(ns);
 
         RETURN(ns);
 }
 
 static void *
-nidstats_put(struct hlist_node *hnode)
+nidstats_put(cfs_hlist_node_t *hnode)
 {
         struct nid_stat *ns;
 
-        ns = hlist_entry(hnode, struct nid_stat, nid_hash);
+        ns = cfs_hlist_entry(hnode, struct nid_stat, nid_hash);
         nidstat_putref(ns);
 
         RETURN(ns);
index de4a406..61d934b 100644 (file)
@@ -61,17 +61,18 @@ static void (*kill_super_cb)(struct super_block *sb) = NULL;
 
 /*********** mount lookup *********/
 
-DECLARE_MUTEX(lustre_mount_info_lock);
+CFS_DECLARE_MUTEX(lustre_mount_info_lock);
 static CFS_LIST_HEAD(server_mount_info_list);
 
 static struct lustre_mount_info *server_find_mount(const char *name)
 {
-        struct list_head *tmp;
+        cfs_list_t *tmp;
         struct lustre_mount_info *lmi;
         ENTRY;
 
-        list_for_each(tmp, &server_mount_info_list) {
-                lmi = list_entry(tmp, struct lustre_mount_info, lmi_list_chain);
+        cfs_list_for_each(tmp, &server_mount_info_list) {
+                lmi = cfs_list_entry(tmp, struct lustre_mount_info,
+                                     lmi_list_chain);
                 if (strcmp(name, lmi->lmi_name) == 0)
                         RETURN(lmi);
         }
@@ -101,10 +102,10 @@ static int server_register_mount(const char *name, struct super_block *sb,
         }
         strcpy(name_cp, name);
 
-        down(&lustre_mount_info_lock);
+        cfs_down(&lustre_mount_info_lock);
 
         if (server_find_mount(name)) {
-                up(&lustre_mount_info_lock);
+                cfs_up(&lustre_mount_info_lock);
                 OBD_FREE(lmi, sizeof(*lmi));
                 OBD_FREE(name_cp, strlen(name) + 1);
                 CERROR("Already registered %s\n", name);
@@ -113,12 +114,12 @@ static int server_register_mount(const char *name, struct super_block *sb,
         lmi->lmi_name = name_cp;
         lmi->lmi_sb = sb;
         lmi->lmi_mnt = mnt;
-        list_add(&lmi->lmi_list_chain, &server_mount_info_list);
+        cfs_list_add(&lmi->lmi_list_chain, &server_mount_info_list);
 
-        up(&lustre_mount_info_lock);
+        cfs_up(&lustre_mount_info_lock);
 
         CDEBUG(D_MOUNT, "reg_mnt %p from %s, vfscount=%d\n",
-               lmi->lmi_mnt, name, atomic_read(&lmi->lmi_mnt->mnt_count));
+               lmi->lmi_mnt, name, cfs_atomic_read(&lmi->lmi_mnt->mnt_count));
 
         RETURN(0);
 }
@@ -129,21 +130,21 @@ static int server_deregister_mount(const char *name)
         struct lustre_mount_info *lmi;
         ENTRY;
 
-        down(&lustre_mount_info_lock);
+        cfs_down(&lustre_mount_info_lock);
         lmi = server_find_mount(name);
         if (!lmi) {
-                up(&lustre_mount_info_lock);
+                cfs_up(&lustre_mount_info_lock);
                 CERROR("%s not registered\n", name);
                 RETURN(-ENOENT);
         }
 
         CDEBUG(D_MOUNT, "dereg_mnt %p from %s, vfscount=%d\n",
-               lmi->lmi_mnt, name, atomic_read(&lmi->lmi_mnt->mnt_count));
+               lmi->lmi_mnt, name, cfs_atomic_read(&lmi->lmi_mnt->mnt_count));
 
         OBD_FREE(lmi->lmi_name, strlen(lmi->lmi_name) + 1);
-        list_del(&lmi->lmi_list_chain);
+        cfs_list_del(&lmi->lmi_list_chain);
         OBD_FREE(lmi, sizeof(*lmi));
-        up(&lustre_mount_info_lock);
+        cfs_up(&lustre_mount_info_lock);
 
         RETURN(0);
 }
@@ -157,20 +158,20 @@ struct lustre_mount_info *server_get_mount(const char *name)
         struct lustre_sb_info *lsi;
         ENTRY;
 
-        down(&lustre_mount_info_lock);
+        cfs_down(&lustre_mount_info_lock);
         lmi = server_find_mount(name);
-        up(&lustre_mount_info_lock);
+        cfs_up(&lustre_mount_info_lock);
         if (!lmi) {
                 CERROR("Can't find mount for %s\n", name);
                 RETURN(NULL);
         }
         lsi = s2lsi(lmi->lmi_sb);
         mntget(lmi->lmi_mnt);
-        atomic_inc(&lsi->lsi_mounts);
+        cfs_atomic_inc(&lsi->lsi_mounts);
 
         CDEBUG(D_MOUNT, "get_mnt %p from %s, refs=%d, vfscount=%d\n",
-               lmi->lmi_mnt, name, atomic_read(&lsi->lsi_mounts),
-               atomic_read(&lmi->lmi_mnt->mnt_count));
+               lmi->lmi_mnt, name, cfs_atomic_read(&lsi->lsi_mounts),
+               cfs_atomic_read(&lmi->lmi_mnt->mnt_count));
 
         RETURN(lmi);
 }
@@ -185,9 +186,9 @@ struct lustre_mount_info *server_get_mount_2(const char *name)
         struct lustre_mount_info *lmi;
         ENTRY;
 
-        down(&lustre_mount_info_lock);
+        cfs_down(&lustre_mount_info_lock);
         lmi = server_find_mount(name);
-        up(&lustre_mount_info_lock);
+        cfs_up(&lustre_mount_info_lock);
         if (!lmi)
                 CERROR("Can't find mount for %s\n", name);
 
@@ -197,9 +198,9 @@ struct lustre_mount_info *server_get_mount_2(const char *name)
 static void unlock_mntput(struct vfsmount *mnt)
 {
         if (kernel_locked()) {
-                unlock_kernel();
+                cfs_unlock_kernel();
                 mntput(mnt);
-                lock_kernel();
+                cfs_lock_kernel();
         } else {
                 mntput(mnt);
         }
@@ -218,9 +219,9 @@ int server_put_mount(const char *name, struct vfsmount *mnt)
         /* This might be the last one, can't deref after this */
         unlock_mntput(mnt);
 
-        down(&lustre_mount_info_lock);
+        cfs_down(&lustre_mount_info_lock);
         lmi = server_find_mount(name);
-        up(&lustre_mount_info_lock);
+        cfs_up(&lustre_mount_info_lock);
         if (!lmi) {
                 CERROR("Can't find mount for %s\n", name);
                 RETURN(-ENOENT);
@@ -229,7 +230,7 @@ int server_put_mount(const char *name, struct vfsmount *mnt)
         LASSERT(lmi->lmi_mnt == mnt);
 
         CDEBUG(D_MOUNT, "put_mnt %p from %s, refs=%d, vfscount=%d\n",
-               lmi->lmi_mnt, name, atomic_read(&lsi->lsi_mounts), count);
+               lmi->lmi_mnt, name, cfs_atomic_read(&lsi->lsi_mounts), count);
 
         if (lustre_put_lsi(lmi->lmi_sb)) {
                 CDEBUG(D_MOUNT, "Last put of mnt %p from %s, vfscount=%d\n",
@@ -557,7 +558,7 @@ static int server_stop_mgs(struct super_block *sb)
         RETURN(rc);
 }
 
-DECLARE_MUTEX(mgc_start_lock);
+CFS_DECLARE_MUTEX(mgc_start_lock);
 
 /** Set up a mgc obd to process startup logs
  *
@@ -619,7 +620,7 @@ static int lustre_start_mgc(struct super_block *sb)
 
         mgssec = lsi->lsi_lmd->lmd_mgssec ? lsi->lsi_lmd->lmd_mgssec : "";
 
-        mutex_down(&mgc_start_lock);
+        cfs_mutex_down(&mgc_start_lock);
 
         obd = class_name2obd(mgcname);
         if (obd && !obd->obd_stopping) {
@@ -630,7 +631,7 @@ static int lustre_start_mgc(struct super_block *sb)
                         GOTO(out_free, rc);
 
                 /* Re-using an existing MGC */
-                atomic_inc(&obd->u.cli.cl_mgc_refcount);
+                cfs_atomic_inc(&obd->u.cli.cl_mgc_refcount);
 
                 recov_bk = 0;
                 /* If we are restarting the MGS, don't try to keep the MGC's
@@ -749,7 +750,7 @@ static int lustre_start_mgc(struct super_block *sb)
 
         /* Keep a refcount of servers/clients who started with "mount",
            so we know when we can get rid of the mgc. */
-        atomic_set(&obd->u.cli.cl_mgc_refcount, 1);
+        cfs_atomic_set(&obd->u.cli.cl_mgc_refcount, 1);
 
         /* Try all connections, but only once. */
         recov_bk = 1;
@@ -781,7 +782,7 @@ out:
            to the same mgc.*/
         lsi->lsi_mgc = obd;
 out_free:
-        mutex_up(&mgc_start_lock);
+        cfs_mutex_up(&mgc_start_lock);
 
         if (mgcname)
                 OBD_FREE(mgcname, len);
@@ -805,13 +806,13 @@ static int lustre_stop_mgc(struct super_block *sb)
                 RETURN(-ENOENT);
         lsi->lsi_mgc = NULL;
 
-        mutex_down(&mgc_start_lock);
-        LASSERT(atomic_read(&obd->u.cli.cl_mgc_refcount) > 0);
-        if (!atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) {
+        cfs_mutex_down(&mgc_start_lock);
+        LASSERT(cfs_atomic_read(&obd->u.cli.cl_mgc_refcount) > 0);
+        if (!cfs_atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) {
                 /* This is not fatal, every client that stops
                    will call in here. */
                 CDEBUG(D_MOUNT, "mgc still has %d references.\n",
-                       atomic_read(&obd->u.cli.cl_mgc_refcount));
+                       cfs_atomic_read(&obd->u.cli.cl_mgc_refcount));
                 GOTO(out, rc = -EBUSY);
         }
 
@@ -857,7 +858,7 @@ out:
                 OBD_FREE(niduuid, len);
 
         /* class_import_put will get rid of the additional connections */
-        mutex_up(&mgc_start_lock);
+        cfs_mutex_up(&mgc_start_lock);
         RETURN(rc);
 }
 
@@ -895,7 +896,7 @@ static int server_mgc_clear_fs(struct obd_device *mgc)
         RETURN(rc);
 }
 
-DECLARE_MUTEX(server_start_lock);
+CFS_DECLARE_MUTEX(server_start_lock);
 
 /* Stop MDS/OSS if nobody is using them */
 static int server_stop_servers(int lddflags, int lsiflags)
@@ -905,7 +906,7 @@ static int server_stop_servers(int lddflags, int lsiflags)
         int rc = 0;
         ENTRY;
 
-        mutex_down(&server_start_lock);
+        cfs_mutex_down(&server_start_lock);
 
         /* Either an MDT or an OST or neither  */
         /* if this was an MDT, and there are no more MDT's, clean up the MDS */
@@ -929,7 +930,7 @@ static int server_stop_servers(int lddflags, int lsiflags)
                         rc = err;
         }
 
-        mutex_up(&server_start_lock);
+        cfs_mutex_up(&server_start_lock);
 
         RETURN(rc);
 }
@@ -1072,7 +1073,7 @@ static int server_start_targets(struct super_block *sb, struct vfsmount *mnt)
         /* If we're an MDT, make sure the global MDS is running */
         if (lsi->lsi_ldd->ldd_flags & LDD_F_SV_TYPE_MDT) {
                 /* make sure the MDS is started */
-                mutex_down(&server_start_lock);
+                cfs_mutex_down(&server_start_lock);
                 obd = class_name2obd(LUSTRE_MDS_OBDNAME);
                 if (!obd) {
                         rc = lustre_start_simple(LUSTRE_MDS_OBDNAME,
@@ -1081,19 +1082,19 @@ static int server_start_targets(struct super_block *sb, struct vfsmount *mnt)
                                                  LUSTRE_MDS_OBDNAME"_uuid",
                                                  0, 0);
                         if (rc) {
-                                mutex_up(&server_start_lock);
+                                cfs_mutex_up(&server_start_lock);
                                 CERROR("failed to start MDS: %d\n", rc);
                                 RETURN(rc);
                         }
                 }
-                mutex_up(&server_start_lock);
+                cfs_mutex_up(&server_start_lock);
         }
 #endif
 
         /* If we're an OST, make sure the global OSS is running */
         if (lsi->lsi_ldd->ldd_flags & LDD_F_SV_TYPE_OST) {
                 /* make sure OSS is started */
-                mutex_down(&server_start_lock);
+                cfs_mutex_down(&server_start_lock);
                 obd = class_name2obd(LUSTRE_OSS_OBDNAME);
                 if (!obd) {
                         rc = lustre_start_simple(LUSTRE_OSS_OBDNAME,
@@ -1101,12 +1102,12 @@ static int server_start_targets(struct super_block *sb, struct vfsmount *mnt)
                                                  LUSTRE_OSS_OBDNAME"_uuid",
                                                  0, 0);
                         if (rc) {
-                                mutex_up(&server_start_lock);
+                                cfs_mutex_up(&server_start_lock);
                                 CERROR("failed to start OSS: %d\n", rc);
                                 RETURN(rc);
                         }
                 }
-                mutex_up(&server_start_lock);
+                cfs_mutex_up(&server_start_lock);
         }
 
         /* Set the mgc fs to our server disk.  This allows the MGC
@@ -1200,7 +1201,7 @@ struct lustre_sb_info *lustre_init_lsi(struct super_block *sb)
         lsi->lsi_lmd->lmd_exclude_count = 0;
         s2lsi_nocast(sb) = lsi;
         /* we take 1 extra ref for our setup */
-        atomic_set(&lsi->lsi_mounts, 1);
+        cfs_atomic_set(&lsi->lsi_mounts, 1);
 
         /* Default umount style */
         lsi->lsi_flags = LSI_UMOUNT_FAILOVER;
@@ -1217,7 +1218,7 @@ static int lustre_free_lsi(struct super_block *sb)
         CDEBUG(D_MOUNT, "Freeing lsi %p\n", lsi);
 
         /* someone didn't call server_put_mount. */
-        LASSERT(atomic_read(&lsi->lsi_mounts) == 0);
+        LASSERT(cfs_atomic_read(&lsi->lsi_mounts) == 0);
 
         if (lsi->lsi_ldd != NULL)
                 OBD_FREE(lsi->lsi_ldd, sizeof(*lsi->lsi_ldd));
@@ -1258,8 +1259,8 @@ static int lustre_put_lsi(struct super_block *sb)
 
         LASSERT(lsi != NULL);
 
-        CDEBUG(D_MOUNT, "put %p %d\n", sb, atomic_read(&lsi->lsi_mounts));
-        if (atomic_dec_and_test(&lsi->lsi_mounts)) {
+        CDEBUG(D_MOUNT, "put %p %d\n", sb, cfs_atomic_read(&lsi->lsi_mounts));
+        if (cfs_atomic_dec_and_test(&lsi->lsi_mounts)) {
                 lustre_free_lsi(sb);
                 RETURN(1);
         }
@@ -1385,7 +1386,7 @@ static void server_wait_finished(struct vfsmount *mnt)
 
        cfs_waitq_init(&waitq);
 
-       while (cfs_atomic_read(&mnt->mnt_count) > 1) {
+       while (atomic_read(&mnt->mnt_count) > 1) {
                if (waited && (waited % 30 == 0))
                        LCONSOLE_WARN("Mount still busy with %d refs after "
                                       "%d secs.\n",
@@ -1396,7 +1397,7 @@ static void server_wait_finished(struct vfsmount *mnt)
                blocked = l_w_e_set_sigs(sigmask(SIGKILL));
                cfs_waitq_wait_event_interruptible_timeout(
                        waitq,
-                       (cfs_atomic_read(&mnt->mnt_count) == 1),
+                       (atomic_read(&mnt->mnt_count) == 1),
                        cfs_time_seconds(3),
                        rc);
                cfs_block_sigs(blocked);
@@ -1530,10 +1531,10 @@ static void server_umount_begin(struct super_block *sb)
 }
 
 #ifndef HAVE_STATFS_DENTRY_PARAM
-static int server_statfs (struct super_block *sb, struct kstatfs *buf)
+static int server_statfs (struct super_block *sb, cfs_kstatfs_t *buf)
 {
 #else
-static int server_statfs (struct dentry *dentry, struct kstatfs *buf)
+static int server_statfs (struct dentry *dentry, cfs_kstatfs_t *buf)
 {
         struct super_block *sb = dentry->d_sb;
 #endif
@@ -1571,7 +1572,7 @@ static struct super_operations server_ops =
         .statfs         = server_statfs,
 };
 
-#define log2(n) ffz(~(n))
+#define log2(n) cfs_ffz(~(n))
 #define LUSTRE_SUPER_MAGIC 0x0BD00BD1
 
 static int server_fill_super_common(struct super_block *sb)
@@ -2026,7 +2027,7 @@ int lustre_fill_super(struct super_block *sb, void *data, int silent)
          * Disable lockdep during mount, because mount locking patterns are
          * `special'.
          */
-        lockdep_off();
+        cfs_lockdep_off();
 
         /* Figure out the lmd from the mount options */
         if (lmd_parse((char *)data, lmd)) {
@@ -2074,7 +2075,7 @@ out:
                 CDEBUG(D_SUPER, "Mount %s complete\n",
                        lmd->lmd_dev);
         }
-        lockdep_on();
+        cfs_lockdep_on();
         return rc;
 }
 
index 153048a..774f0ba 100644 (file)
@@ -163,7 +163,8 @@ void obdo_from_iattr(struct obdo *oa, struct iattr *attr, unsigned int ia_valid)
         if (ia_valid & ATTR_MODE) {
                 oa->o_mode = attr->ia_mode;
                 oa->o_valid |= OBD_MD_FLTYPE | OBD_MD_FLMODE;
-                if (!in_group_p(oa->o_gid) && !cfs_capable(CFS_CAP_FSETID))
+                if (!cfs_curproc_is_in_groups(oa->o_gid) &&
+                    !cfs_capable(CFS_CAP_FSETID))
                         oa->o_mode &= ~S_ISGID;
         }
         if (ia_valid & ATTR_UID) {
@@ -211,7 +212,8 @@ void iattr_from_obdo(struct iattr *attr, struct obdo *oa, obd_flag valid)
         if (valid & OBD_MD_FLMODE) {
                 attr->ia_mode = (attr->ia_mode & S_IFMT)|(oa->o_mode & ~S_IFMT);
                 attr->ia_valid |= ATTR_MODE;
-                if (!in_group_p(oa->o_gid) && !cfs_capable(CFS_CAP_FSETID))
+                if (!cfs_curproc_is_in_groups(oa->o_gid) &&
+                    !cfs_capable(CFS_CAP_FSETID))
                         attr->ia_mode &= ~S_ISGID;
         }
         if (valid & OBD_MD_FLUID) {
index 6196380..d7689ee 100644 (file)
@@ -54,7 +54,7 @@
 #include <obd_support.h>
 #include <obd_class.h>
 
-void statfs_pack(struct obd_statfs *osfs, struct kstatfs *sfs)
+void statfs_pack(struct obd_statfs *osfs, cfs_kstatfs_t *sfs)
 {
         memset(osfs, 0, sizeof(*osfs));
         osfs->os_type = sfs->f_type;
@@ -67,7 +67,7 @@ void statfs_pack(struct obd_statfs *osfs, struct kstatfs *sfs)
         osfs->os_namelen = sfs->f_namelen;
 }
 
-void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs)
+void statfs_unpack(cfs_kstatfs_t *sfs, struct obd_statfs *osfs)
 {
         memset(sfs, 0, sizeof(*sfs));
         sfs->f_type = osfs->os_type;
index 58b308c..72fb655 100644 (file)
@@ -110,9 +110,9 @@ static int echo_destroy_export(struct obd_export *exp)
 {
         obd_id id;
 
-        spin_lock(&obddev->u.echo.eo_lock);
+        cfs_spin_lock(&obddev->u.echo.eo_lock);
         id = ++obddev->u.echo.eo_lastino;
-        spin_unlock(&obddev->u.echo.eo_lock);
+        cfs_spin_unlock(&obddev->u.echo.eo_lock);
 
         return id;
 }
@@ -431,7 +431,7 @@ int echo_preprw(int cmd, struct obd_export *export, struct obdo *oa,
                 }
         }
 
-        atomic_add(*pages, &obd->u.echo.eo_prep);
+        cfs_atomic_add(*pages, &obd->u.echo.eo_prep);
 
         if (cmd & OBD_BRW_READ)
                 lprocfs_counter_add(obd->obd_stats, LPROC_ECHO_READ_BYTES,
@@ -441,7 +441,7 @@ int echo_preprw(int cmd, struct obd_export *export, struct obdo *oa,
                                     tot_bytes);
 
         CDEBUG(D_PAGE, "%d pages allocated after prep\n",
-               atomic_read(&obd->u.echo.eo_prep));
+               cfs_atomic_read(&obd->u.echo.eo_prep));
 
         RETURN(0);
 
@@ -458,7 +458,7 @@ preprw_cleanup:
                  * lose the extra ref gained above */
                 OBD_PAGE_FREE(res[i].page);
                 res[i].page = NULL;
-                atomic_dec(&obd->u.echo.eo_prep);
+                cfs_atomic_dec(&obd->u.echo.eo_prep);
         }
 
         return rc;
@@ -518,14 +518,14 @@ int echo_commitrw(int cmd, struct obd_export *export, struct obdo *oa,
 
         }
 
-        atomic_sub(pgs, &obd->u.echo.eo_prep);
+        cfs_atomic_sub(pgs, &obd->u.echo.eo_prep);
 
         CDEBUG(D_PAGE, "%d pages remain after commit\n",
-               atomic_read(&obd->u.echo.eo_prep));
+               cfs_atomic_read(&obd->u.echo.eo_prep));
         RETURN(rc);
 
 commitrw_cleanup:
-        atomic_sub(pgs, &obd->u.echo.eo_prep);
+        cfs_atomic_sub(pgs, &obd->u.echo.eo_prep);
 
         CERROR("cleaning up %d pages (%d obdos)\n",
                niocount - pgs - 1, objcount);
@@ -538,7 +538,7 @@ commitrw_cleanup:
 
                 /* NB see comment above regarding persistent pages */
                 OBD_PAGE_FREE(page);
-                atomic_dec(&obd->u.echo.eo_prep);
+                cfs_atomic_dec(&obd->u.echo.eo_prep);
         }
         return rc;
 }
@@ -552,7 +552,7 @@ static int echo_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
         char                       ns_name[48];
         ENTRY;
 
-        spin_lock_init(&obd->u.echo.eo_lock);
+        cfs_spin_lock_init(&obd->u.echo.eo_lock);
         obd->u.echo.eo_lastino = ECHO_INIT_OBJID;
 
         sprintf(ns_name, "echotgt-%s", obd->obd_uuid.uuid);
@@ -594,16 +594,16 @@ static int echo_cleanup(struct obd_device *obd)
         lprocfs_obd_cleanup(obd);
         lprocfs_free_obd_stats(obd);
 
-        ldlm_lock_decref (&obd->u.echo.eo_nl_lock, LCK_NL);
+        ldlm_lock_decref(&obd->u.echo.eo_nl_lock, LCK_NL);
 
         /* XXX Bug 3413; wait for a bit to ensure the BL callback has
          * happened before calling ldlm_namespace_free() */
-        cfs_schedule_timeout (CFS_TASK_UNINT, cfs_time_seconds(1));
+        cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT, cfs_time_seconds(1));
 
         ldlm_namespace_free(obd->obd_namespace, NULL, obd->obd_force);
         obd->obd_namespace = NULL;
 
-        leaked = atomic_read(&obd->u.echo.eo_prep);
+        leaked = cfs_atomic_read(&obd->u.echo.eo_prep);
         if (leaked != 0)
                 CERROR("%d prep/commitrw pages leaked\n", leaked);
 
index 3f54f2e..2109aa8 100644 (file)
@@ -69,9 +69,9 @@ struct echo_object {
         struct cl_object_header eo_hdr;
 
         struct echo_device     *eo_dev;
-        struct list_head        eo_obj_chain;
+        cfs_list_t              eo_obj_chain;
         struct lov_stripe_md   *eo_lsm;
-        atomic_t                eo_npages;
+        cfs_atomic_t            eo_npages;
         int                     eo_deleted;
 };
 
@@ -87,10 +87,10 @@ struct echo_page {
 
 struct echo_lock {
         struct cl_lock_slice   el_cl;
-        struct list_head       el_chain;
+        cfs_list_t             el_chain;
         struct echo_object    *el_object;
         __u64                  el_cookie;
-        atomic_t               el_refcount;
+        cfs_atomic_t           el_refcount;
 };
 
 struct echo_io {
@@ -285,7 +285,7 @@ static void echo_page_fini(const struct lu_env *env,
         cfs_page_t *vmpage      = ep->ep_vmpage;
         ENTRY;
 
-        atomic_dec(&eco->eo_npages);
+        cfs_atomic_dec(&eco->eo_npages);
         page_cache_release(vmpage);
         OBD_SLAB_FREE_PTR(ep, echo_page_kmem);
         EXIT;
@@ -339,7 +339,7 @@ static void echo_lock_fini(const struct lu_env *env,
 {
         struct echo_lock *ecl = cl2echo_lock(slice);
 
-        LASSERT(list_empty(&ecl->el_chain));
+        LASSERT(cfs_list_empty(&ecl->el_chain));
         OBD_SLAB_FREE_PTR(ecl, echo_lock_kmem);
 }
 
@@ -348,7 +348,7 @@ static void echo_lock_delete(const struct lu_env *env,
 {
         struct echo_lock *ecl      = cl2echo_lock(slice);
 
-        LASSERT(list_empty(&ecl->el_chain));
+        LASSERT(cfs_list_empty(&ecl->el_chain));
 }
 
 static int echo_lock_fits_into(const struct lu_env *env,
@@ -386,7 +386,7 @@ static struct cl_page *echo_page_init(const struct lu_env *env,
                 ep->ep_vmpage = vmpage;
                 page_cache_get(vmpage);
                 cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
-                atomic_inc(&eco->eo_npages);
+                cfs_atomic_inc(&eco->eo_npages);
         }
         RETURN(ERR_PTR(ep ? 0 : -ENOMEM));
 }
@@ -409,7 +409,7 @@ static int echo_lock_init(const struct lu_env *env,
                 cl_lock_slice_add(lock, &el->el_cl, obj, &echo_lock_ops);
                 el->el_object = cl2echo_obj(obj);
                 CFS_INIT_LIST_HEAD(&el->el_chain);
-                atomic_set(&el->el_refcount, 0);
+                cfs_atomic_set(&el->el_refcount, 0);
         }
         RETURN(el == NULL ? -ENOMEM : 0);
 }
@@ -459,14 +459,14 @@ static int echo_object_init(const struct lu_env *env, struct lu_object *obj,
         LASSERT(econf->eoc_md);
         eco->eo_lsm = *econf->eoc_md;
         eco->eo_dev = ed;
-        atomic_set(&eco->eo_npages, 0);
+        cfs_atomic_set(&eco->eo_npages, 0);
 
         /* clear the lsm pointer so that it won't get freed. */
         *econf->eoc_md = NULL;
 
-        spin_lock(&ec->ec_lock);
-        list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
-        spin_unlock(&ec->ec_lock);
+        cfs_spin_lock(&ec->ec_lock);
+        cfs_list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
+        cfs_spin_unlock(&ec->ec_lock);
 
         RETURN(0);
 }
@@ -478,11 +478,11 @@ static void echo_object_free(const struct lu_env *env, struct lu_object *obj)
         struct lov_stripe_md *lsm  = eco->eo_lsm;
         ENTRY;
 
-        LASSERT(atomic_read(&eco->eo_npages) == 0);
+        LASSERT(cfs_atomic_read(&eco->eo_npages) == 0);
 
-        spin_lock(&ec->ec_lock);
-        list_del_init(&eco->eo_obj_chain);
-        spin_unlock(&ec->ec_lock);
+        cfs_spin_lock(&ec->ec_lock);
+        cfs_list_del_init(&eco->eo_obj_chain);
+        cfs_spin_unlock(&ec->ec_lock);
 
         lu_object_fini(obj);
         lu_object_header_fini(obj->lo_header);
@@ -794,25 +794,26 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
         CDEBUG(D_INFO, "echo device:%p is going to be freed, next = %p\n", ed, next);
 
         /* destroy locks */
-        spin_lock(&ec->ec_lock);
-        while (!list_empty(&ec->ec_locks)) {
-                struct echo_lock *ecl = list_entry(ec->ec_locks.next,
-                                                   struct echo_lock, el_chain);
+        cfs_spin_lock(&ec->ec_lock);
+        while (!cfs_list_empty(&ec->ec_locks)) {
+                struct echo_lock *ecl = cfs_list_entry(ec->ec_locks.next,
+                                                       struct echo_lock,
+                                                       el_chain);
                 int still_used = 0;
 
-                if (atomic_dec_and_test(&ecl->el_refcount))
-                        list_del_init(&ecl->el_chain);
+                if (cfs_atomic_dec_and_test(&ecl->el_refcount))
+                        cfs_list_del_init(&ecl->el_chain);
                 else
                         still_used = 1;
-                spin_unlock(&ec->ec_lock);
+                cfs_spin_unlock(&ec->ec_lock);
 
                 CERROR("echo client: pending lock %p refs %d\n",
-                       ecl, atomic_read(&ecl->el_refcount));
+                       ecl, cfs_atomic_read(&ecl->el_refcount));
 
                 echo_lock_release(env, ecl, still_used);
-                spin_lock(&ec->ec_lock);
+                cfs_spin_lock(&ec->ec_lock);
         }
-        spin_unlock(&ec->ec_lock);
+        cfs_spin_unlock(&ec->ec_lock);
 
         LASSERT(ed->ed_site);
         lu_site_purge(env, &ed->ed_site->cs_lu, -1);
@@ -822,26 +823,28 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
          * all of cached objects. Anyway, probably the echo device is being
          * parallelly accessed.
          */
-        spin_lock(&ec->ec_lock);
-        list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
+        cfs_spin_lock(&ec->ec_lock);
+        cfs_list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
                 eco->eo_deleted = 1;
-        spin_unlock(&ec->ec_lock);
+        cfs_spin_unlock(&ec->ec_lock);
 
         /* purge again */
         lu_site_purge(env, &ed->ed_site->cs_lu, -1);
 
-        CDEBUG(D_INFO, "Waiting for the reference of echo object to be dropped\n");
+        CDEBUG(D_INFO,
+               "Waiting for the reference of echo object to be dropped\n");
 
         /* Wait for the last reference to be dropped. */
-        spin_lock(&ec->ec_lock);
-        while (!list_empty(&ec->ec_objects)) {
-                spin_unlock(&ec->ec_lock);
+        cfs_spin_lock(&ec->ec_lock);
+        while (!cfs_list_empty(&ec->ec_objects)) {
+                cfs_spin_unlock(&ec->ec_lock);
                 CERROR("echo_client still has objects at cleanup time, "
                        "wait for 1 second\n");
-                cfs_schedule_timeout(CFS_TASK_UNINT, cfs_time_seconds(1));
-                spin_lock(&ec->ec_lock);
+                cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
+                                                   cfs_time_seconds(1));
+                cfs_spin_lock(&ec->ec_lock);
         }
-        spin_unlock(&ec->ec_lock);
+        cfs_spin_unlock(&ec->ec_lock);
 
         CDEBUG(D_INFO, "No object exists, exiting...\n");
 
@@ -965,7 +968,7 @@ static int cl_echo_object_put(struct echo_object *eco)
         if (eco->eo_deleted) {
                 struct lu_object_header *loh = obj->co_lu.lo_header;
                 LASSERT(&eco->eo_hdr == luh2coh(loh));
-                set_bit(LU_OBJECT_HEARD_BANSHEE, &loh->loh_flags);
+                cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &loh->loh_flags);
                 cl_object_prune(env, obj);
         }
 
@@ -1006,14 +1009,14 @@ static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
                 rc = cl_wait(env, lck);
                 if (rc == 0) {
                         el = cl2echo_lock(cl_lock_at(lck, &echo_device_type));
-                        spin_lock(&ec->ec_lock);
-                        if (list_empty(&el->el_chain)) {
-                                list_add(&el->el_chain, &ec->ec_locks);
+                        cfs_spin_lock(&ec->ec_lock);
+                        if (cfs_list_empty(&el->el_chain)) {
+                                cfs_list_add(&el->el_chain, &ec->ec_locks);
                                 el->el_cookie = ++ec->ec_unique;
                         }
-                        atomic_inc(&el->el_refcount);
+                        cfs_atomic_inc(&el->el_refcount);
                         *cookie = el->el_cookie;
-                        spin_unlock(&ec->ec_lock);
+                        cfs_spin_unlock(&ec->ec_lock);
                 } else
                         cl_lock_release(env, lck, "ec enqueue", cfs_current());
         }
@@ -1056,25 +1059,25 @@ static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed,
 {
         struct echo_client_obd *ec = ed->ed_ec;
         struct echo_lock       *ecl = NULL;
-        struct list_head       *el;
+        cfs_list_t             *el;
         int found = 0, still_used = 0;
         ENTRY;
 
         LASSERT(ec != NULL);
-        spin_lock (&ec->ec_lock);
-        list_for_each (el, &ec->ec_locks) {
-                ecl = list_entry (el, struct echo_lock, el_chain);
+        cfs_spin_lock (&ec->ec_lock);
+        cfs_list_for_each (el, &ec->ec_locks) {
+                ecl = cfs_list_entry (el, struct echo_lock, el_chain);
                 CDEBUG(D_INFO, "ecl: %p, cookie: %llx\n", ecl, ecl->el_cookie);
                 found = (ecl->el_cookie == cookie);
                 if (found) {
-                        if (atomic_dec_and_test(&ecl->el_refcount))
-                                list_del_init(&ecl->el_chain);
+                        if (cfs_atomic_dec_and_test(&ecl->el_refcount))
+                                cfs_list_del_init(&ecl->el_chain);
                         else
                                 still_used = 1;
                         break;
                 }
         }
-        spin_unlock (&ec->ec_lock);
+        cfs_spin_unlock (&ec->ec_lock);
 
         if (!found)
                 RETURN(-ENOENT);
@@ -1225,12 +1228,12 @@ echo_copyout_lsm (struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
         if (nob > ulsm_nob)
                 return (-EINVAL);
 
-        if (copy_to_user (ulsm, lsm, sizeof(ulsm)))
+        if (cfs_copy_to_user (ulsm, lsm, sizeof(ulsm)))
                 return (-EFAULT);
 
         for (i = 0; i < lsm->lsm_stripe_count; i++) {
-                if (copy_to_user (ulsm->lsm_oinfo[i], lsm->lsm_oinfo[i],
-                                  sizeof(lsm->lsm_oinfo[0])))
+                if (cfs_copy_to_user (ulsm->lsm_oinfo[i], lsm->lsm_oinfo[i],
+                                      sizeof(lsm->lsm_oinfo[0])))
                         return (-EFAULT);
         }
         return 0;
@@ -1246,7 +1249,7 @@ echo_copyin_lsm (struct echo_device *ed, struct lov_stripe_md *lsm,
         if (ulsm_nob < sizeof (*lsm))
                 return (-EINVAL);
 
-        if (copy_from_user (lsm, ulsm, sizeof (*lsm)))
+        if (cfs_copy_from_user (lsm, ulsm, sizeof (*lsm)))
                 return (-EFAULT);
 
         if (lsm->lsm_stripe_count > ec->ec_nstripes ||
@@ -1257,9 +1260,10 @@ echo_copyin_lsm (struct echo_device *ed, struct lov_stripe_md *lsm,
 
 
         for (i = 0; i < lsm->lsm_stripe_count; i++) {
-                if (copy_from_user(lsm->lsm_oinfo[i],
-                                   ((struct lov_stripe_md *)ulsm)->lsm_oinfo[i],
-                                   sizeof(lsm->lsm_oinfo[0])))
+                if (cfs_copy_from_user(lsm->lsm_oinfo[i],
+                                       ((struct lov_stripe_md *)ulsm)-> \
+                                       lsm_oinfo[i],
+                                       sizeof(lsm->lsm_oinfo[0])))
                         return (-EFAULT);
         }
         return (0);
@@ -1789,7 +1793,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp,
         int                     i;
         ENTRY;
 
-        unlock_kernel();
+        cfs_unlock_kernel();
 
         memset(&dummy_oti, 0, sizeof(dummy_oti));
 
@@ -1917,7 +1921,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp,
                 ldlm_lock_decref(&ack_lock->lock, ack_lock->mode);
         }
 
-        lock_kernel();
+        cfs_lock_kernel();
 
         return rc;
 }
@@ -1943,7 +1947,7 @@ static int echo_client_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
                 RETURN(-EINVAL);
         }
 
-        spin_lock_init (&ec->ec_lock);
+        cfs_spin_lock_init (&ec->ec_lock);
         CFS_INIT_LIST_HEAD (&ec->ec_objects);
         CFS_INIT_LIST_HEAD (&ec->ec_locks);
         ec->ec_unique = 0;
@@ -1964,9 +1968,9 @@ static int echo_client_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
         rc = obd_connect(NULL, &ec->ec_exp, tgt, &echo_uuid, ocd, NULL);
         if (rc == 0) {
                 /* Turn off pinger because it connects to tgt obd directly. */
-                spin_lock(&tgt->obd_dev_lock);
-                list_del_init(&ec->ec_exp->exp_obd_chain_timed);
-                spin_unlock(&tgt->obd_dev_lock);
+                cfs_spin_lock(&tgt->obd_dev_lock);
+                cfs_list_del_init(&ec->ec_exp->exp_obd_chain_timed);
+                cfs_spin_unlock(&tgt->obd_dev_lock);
         }
 
         OBD_FREE(ocd, sizeof(*ocd));
@@ -1986,12 +1990,12 @@ static int echo_client_cleanup(struct obd_device *obddev)
         int rc;
         ENTRY;
 
-        if (!list_empty(&obddev->obd_exports)) {
+        if (!cfs_list_empty(&obddev->obd_exports)) {
                 CERROR("still has clients!\n");
                 RETURN(-EBUSY);
         }
 
-        LASSERT(atomic_read(&ec->ec_exp->exp_refcount) > 0);
+        LASSERT(cfs_atomic_read(&ec->ec_exp->exp_refcount) > 0);
         rc = obd_disconnect(ec->ec_exp);
         if (rc != 0)
                 CERROR("fail to disconnect device: %d\n", rc);
@@ -2034,10 +2038,10 @@ static int echo_client_disconnect(struct obd_export *exp)
         ec = &obd->u.echo_client;
 
         /* no more contention on export's lock list */
-        while (!list_empty (&exp->exp_ec_data.eced_locks)) {
-                ecl = list_entry (exp->exp_ec_data.eced_locks.next,
-                                  struct ec_lock, ecl_exp_chain);
-                list_del (&ecl->ecl_exp_chain);
+        while (!cfs_list_empty (&exp->exp_ec_data.eced_locks)) {
+                ecl = cfs_list_entry (exp->exp_ec_data.eced_locks.next,
+                                      struct ec_lock, ecl_exp_chain);
+                cfs_list_del (&ecl->ecl_exp_chain);
 
                 rc = obd_cancel(ec->ec_exp, ecl->ecl_object->eco_lsm,
                                  ecl->ecl_mode, &ecl->ecl_lock_handle);
index d5a4837..1d062b4 100644 (file)
@@ -107,9 +107,9 @@ int filter_version_get_check(struct obd_export *exp,
             oti->oti_pre_version != curr_version) {
                 CDEBUG(D_INODE, "Version mismatch "LPX64" != "LPX64"\n",
                        oti->oti_pre_version, curr_version);
-                spin_lock(&exp->exp_lock);
+                cfs_spin_lock(&exp->exp_lock);
                 exp->exp_vbr_failed = 1;
-                spin_unlock(&exp->exp_lock);
+                cfs_spin_unlock(&exp->exp_lock);
                 RETURN (-EOVERFLOW);
         }
         oti->oti_pre_version = curr_version;
@@ -134,12 +134,12 @@ int filter_finish_transno(struct obd_export *exp, struct inode *inode,
         if (!exp->exp_obd->obd_replayable || oti == NULL)
                 RETURN(rc);
 
-        mutex_down(&fed->fed_lcd_lock);
+        cfs_mutex_down(&fed->fed_lcd_lock);
         lcd = fed->fed_lcd;
         /* if the export has already been disconnected, we have no last_rcvd slot,
          * update server data with latest transno then */
         if (lcd == NULL) {
-                mutex_up(&fed->fed_lcd_lock);
+                cfs_mutex_up(&fed->fed_lcd_lock);
                 CWARN("commit transaction for disconnected client %s: rc %d\n",
                       exp->exp_client_uuid.uuid, rc);
                 err = filter_update_server_data(exp->exp_obd,
@@ -149,7 +149,7 @@ int filter_finish_transno(struct obd_export *exp, struct inode *inode,
         }
 
         /* we don't allocate new transnos for replayed requests */
-        spin_lock(&filter->fo_translock);
+        cfs_spin_lock(&filter->fo_translock);
         if (oti->oti_transno == 0) {
                 last_rcvd = le64_to_cpu(filter->fo_fsd->lsd_last_transno) + 1;
                 filter->fo_fsd->lsd_last_transno = cpu_to_le64(last_rcvd);
@@ -165,7 +165,7 @@ int filter_finish_transno(struct obd_export *exp, struct inode *inode,
         lcd->lcd_last_transno = cpu_to_le64(last_rcvd);
         lcd->lcd_pre_versions[0] = cpu_to_le64(oti->oti_pre_version);
         lcd->lcd_last_xid = cpu_to_le64(oti->oti_xid);
-        spin_unlock(&filter->fo_translock);
+        cfs_spin_unlock(&filter->fo_translock);
 
         if (inode)
                 fsfilt_set_version(exp->exp_obd, inode, last_rcvd);
@@ -198,7 +198,7 @@ int filter_finish_transno(struct obd_export *exp, struct inode *inode,
 
         CDEBUG(log_pri, "wrote trans "LPU64" for client %s at #%d: err = %d\n",
                last_rcvd, lcd->lcd_uuid, fed->fed_lr_idx, err);
-        mutex_up(&fed->fed_lcd_lock);
+        cfs_mutex_up(&fed->fed_lcd_lock);
         RETURN(rc);
 }
 
@@ -216,7 +216,7 @@ static void init_brw_stats(struct brw_stats *brw_stats)
 {
         int i;
         for (i = 0; i < BRW_LAST; i++)
-                spin_lock_init(&brw_stats->hist[i].oh_lock);
+                cfs_spin_lock_init(&brw_stats->hist[i].oh_lock);
 }
 
 static int lprocfs_init_rw_stats(struct obd_device *obd,
@@ -326,20 +326,20 @@ static int filter_client_add(struct obd_device *obd, struct obd_export *exp,
          * there's no need for extra complication here
          */
         if (new_client) {
-                cl_idx = find_first_zero_bit(bitmap, LR_MAX_CLIENTS);
+                cl_idx = cfs_find_first_zero_bit(bitmap, LR_MAX_CLIENTS);
         repeat:
                 if (cl_idx >= LR_MAX_CLIENTS) {
                         CERROR("no room for %u client - fix LR_MAX_CLIENTS\n",
                                cl_idx);
                         RETURN(-EOVERFLOW);
                 }
-                if (test_and_set_bit(cl_idx, bitmap)) {
-                        cl_idx = find_next_zero_bit(bitmap, LR_MAX_CLIENTS,
-                                                    cl_idx);
+                if (cfs_test_and_set_bit(cl_idx, bitmap)) {
+                        cl_idx = cfs_find_next_zero_bit(bitmap, LR_MAX_CLIENTS,
+                                                        cl_idx);
                         goto repeat;
                 }
         } else {
-                if (test_and_set_bit(cl_idx, bitmap)) {
+                if (cfs_test_and_set_bit(cl_idx, bitmap)) {
                         CERROR("FILTER client %d: bit already set in bitmap!\n",
                                cl_idx);
                         LBUG();
@@ -349,7 +349,7 @@ static int filter_client_add(struct obd_device *obd, struct obd_export *exp,
         fed->fed_lr_idx = cl_idx;
         fed->fed_lr_off = le32_to_cpu(filter->fo_fsd->lsd_client_start) +
                 cl_idx * le16_to_cpu(filter->fo_fsd->lsd_client_size);
-        init_mutex(&fed->fed_lcd_lock);
+        cfs_init_mutex(&fed->fed_lcd_lock);
         LASSERTF(fed->fed_lr_off > 0, "fed_lr_off = %llu\n", fed->fed_lr_off);
 
         CDEBUG(D_INFO, "client at index %d (%llu) with UUID '%s' added\n",
@@ -380,9 +380,9 @@ static int filter_client_add(struct obd_device *obd, struct obd_export *exp,
                                                    target_client_add_cb,
                                                    class_export_cb_get(exp));
                         if (rc == 0) {
-                                spin_lock(&exp->exp_lock);
+                                cfs_spin_lock(&exp->exp_lock);
                                 exp->exp_need_sync = 1;
-                                spin_unlock(&exp->exp_lock);
+                                cfs_spin_unlock(&exp->exp_lock);
                         }
                         rc = fsfilt_write_record(obd, filter->fo_rcvd_filp,
                                                  fed->fed_lcd,
@@ -440,7 +440,7 @@ static int filter_client_free(struct obd_export *exp)
 
         /* Clear the bit _after_ zeroing out the client so we don't
            race with filter_client_add and zero out new clients.*/
-        if (!test_bit(fed->fed_lr_idx, filter->fo_last_rcvd_slots)) {
+        if (!cfs_test_bit(fed->fed_lr_idx, filter->fo_last_rcvd_slots)) {
                 CERROR("FILTER client %u: bit already clear in bitmap!!\n",
                        fed->fed_lr_idx);
                 LBUG();
@@ -452,11 +452,11 @@ static int filter_client_free(struct obd_export *exp)
          * be in server data or in client data in case of failure */
         filter_update_server_data(obd, filter->fo_rcvd_filp, filter->fo_fsd);
 
-        mutex_down(&fed->fed_lcd_lock);
+        cfs_mutex_down(&fed->fed_lcd_lock);
         rc = fsfilt_write_record(obd, filter->fo_rcvd_filp, &zero_lcd,
                                  sizeof(zero_lcd), &off, 0);
         fed->fed_lcd = NULL;
-        mutex_up(&fed->fed_lcd_lock);
+        cfs_mutex_up(&fed->fed_lcd_lock);
         pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
 
         CDEBUG(rc == 0 ? D_INFO : D_ERROR,
@@ -464,7 +464,8 @@ static int filter_client_free(struct obd_export *exp)
                lcd->lcd_uuid, fed->fed_lr_idx, fed->fed_lr_off,
                LAST_RCVD, rc);
 
-        if (!test_and_clear_bit(fed->fed_lr_idx, filter->fo_last_rcvd_slots)) {
+        if (!cfs_test_and_clear_bit(fed->fed_lr_idx,
+                                    filter->fo_last_rcvd_slots)) {
                 CERROR("FILTER client %u: bit already clear in bitmap!!\n",
                        fed->fed_lr_idx);
                 LBUG();
@@ -472,9 +473,9 @@ static int filter_client_free(struct obd_export *exp)
         OBD_FREE_PTR(lcd);
         RETURN(0);
 free:
-        mutex_down(&fed->fed_lcd_lock);
+        cfs_mutex_down(&fed->fed_lcd_lock);
         fed->fed_lcd = NULL;
-        mutex_up(&fed->fed_lcd_lock);
+        cfs_mutex_up(&fed->fed_lcd_lock);
         OBD_FREE_PTR(lcd);
 
         return 0;
@@ -489,7 +490,7 @@ static inline void filter_fmd_put_nolock(struct filter_export_data *fed,
                 /* XXX when we have persistent reservations and the handle
                  * is stored herein we need to drop it here. */
                 fed->fed_mod_count--;
-                list_del(&fmd->fmd_list);
+                cfs_list_del(&fmd->fmd_list);
                 OBD_SLAB_FREE(fmd, ll_fmd_cachep, sizeof(*fmd));
         }
 }
@@ -503,9 +504,9 @@ void filter_fmd_put(struct obd_export *exp, struct filter_mod_data *fmd)
                 return;
 
         fed = &exp->exp_filter_data;
-        spin_lock(&fed->fed_lock);
+        cfs_spin_lock(&fed->fed_lock);
         filter_fmd_put_nolock(fed, fmd); /* caller reference */
-        spin_unlock(&fed->fed_lock);
+        cfs_spin_unlock(&fed->fed_lock);
 }
 
 /* expire entries from the end of the list if there are too many
@@ -516,25 +517,25 @@ static void filter_fmd_expire_nolock(struct filter_obd *filter,
 {
         struct filter_mod_data *fmd, *tmp;
 
-        list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
+        cfs_list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
                 if (fmd == keep)
                         break;
 
-                if (time_before(jiffies, fmd->fmd_expire) &&
+                if (cfs_time_before(jiffies, fmd->fmd_expire) &&
                     fed->fed_mod_count < filter->fo_fmd_max_num)
                         break;
 
-                list_del_init(&fmd->fmd_list);
+                cfs_list_del_init(&fmd->fmd_list);
                 filter_fmd_put_nolock(fed, fmd); /* list reference */
         }
 }
 
 void filter_fmd_expire(struct obd_export *exp)
 {
-        spin_lock(&exp->exp_filter_data.fed_lock);
+        cfs_spin_lock(&exp->exp_filter_data.fed_lock);
         filter_fmd_expire_nolock(&exp->exp_obd->u.filter,
                                  &exp->exp_filter_data, NULL);
-        spin_unlock(&exp->exp_filter_data.fed_lock);
+        cfs_spin_unlock(&exp->exp_filter_data.fed_lock);
 }
 
 /* find specified objid, group in export fmd list.
@@ -547,11 +548,11 @@ static struct filter_mod_data *filter_fmd_find_nolock(struct filter_obd *filter,
 
         LASSERT_SPIN_LOCKED(&fed->fed_lock);
 
-        list_for_each_entry_reverse(fmd, &fed->fed_mod_list, fmd_list) {
+        cfs_list_for_each_entry_reverse(fmd, &fed->fed_mod_list, fmd_list) {
                 if (fmd->fmd_id == objid && fmd->fmd_gr == group) {
                         found = fmd;
-                        list_del(&fmd->fmd_list);
-                        list_add_tail(&fmd->fmd_list, &fed->fed_mod_list);
+                        cfs_list_del(&fmd->fmd_list);
+                        cfs_list_add_tail(&fmd->fmd_list, &fed->fed_mod_list);
                         fmd->fmd_expire = jiffies + filter->fo_fmd_max_age;
                         break;
                 }
@@ -568,12 +569,12 @@ struct filter_mod_data *filter_fmd_find(struct obd_export *exp,
 {
         struct filter_mod_data *fmd;
 
-        spin_lock(&exp->exp_filter_data.fed_lock);
+        cfs_spin_lock(&exp->exp_filter_data.fed_lock);
         fmd = filter_fmd_find_nolock(&exp->exp_obd->u.filter,
                                      &exp->exp_filter_data, objid, group);
         if (fmd)
                 fmd->fmd_refcount++;    /* caller reference */
-        spin_unlock(&exp->exp_filter_data.fed_lock);
+        cfs_spin_unlock(&exp->exp_filter_data.fed_lock);
 
         return fmd;
 }
@@ -591,11 +592,12 @@ struct filter_mod_data *filter_fmd_get(struct obd_export *exp,
 
         OBD_SLAB_ALLOC_PTR_GFP(fmd_new, ll_fmd_cachep, CFS_ALLOC_IO);
 
-        spin_lock(&fed->fed_lock);
+        cfs_spin_lock(&fed->fed_lock);
         found = filter_fmd_find_nolock(&exp->exp_obd->u.filter,fed,objid,group);
         if (fmd_new) {
                 if (found == NULL) {
-                        list_add_tail(&fmd_new->fmd_list, &fed->fed_mod_list);
+                        cfs_list_add_tail(&fmd_new->fmd_list,
+                                          &fed->fed_mod_list);
                         fmd_new->fmd_id = objid;
                         fmd_new->fmd_gr = group;
                         fmd_new->fmd_refcount++;   /* list reference */
@@ -611,7 +613,7 @@ struct filter_mod_data *filter_fmd_get(struct obd_export *exp,
                         exp->exp_obd->u.filter.fo_fmd_max_age;
         }
 
-        spin_unlock(&fed->fed_lock);
+        cfs_spin_unlock(&fed->fed_lock);
 
         return found;
 }
@@ -625,13 +627,13 @@ static void filter_fmd_drop(struct obd_export *exp, obd_id objid, obd_gr group)
 {
         struct filter_mod_data *found = NULL;
 
-        spin_lock(&exp->exp_filter_data.fed_lock);
+        cfs_spin_lock(&exp->exp_filter_data.fed_lock);
         found = filter_fmd_find_nolock(&exp->exp_filter_data, objid, group);
         if (found) {
-                list_del_init(&found->fmd_list);
+                cfs_list_del_init(&found->fmd_list);
                 filter_fmd_put_nolock(&exp->exp_filter_data, found);
         }
-        spin_unlock(&exp->exp_filter_data.fed_lock);
+        cfs_spin_unlock(&exp->exp_filter_data.fed_lock);
 }
 #else
 #define filter_fmd_drop(exp, objid, group)
@@ -643,22 +645,22 @@ static void filter_fmd_cleanup(struct obd_export *exp)
         struct filter_export_data *fed = &exp->exp_filter_data;
         struct filter_mod_data *fmd = NULL, *tmp;
 
-        spin_lock(&fed->fed_lock);
-        list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
-                list_del_init(&fmd->fmd_list);
+        cfs_spin_lock(&fed->fed_lock);
+        cfs_list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
+                cfs_list_del_init(&fmd->fmd_list);
                 filter_fmd_put_nolock(fed, fmd);
         }
-        spin_unlock(&fed->fed_lock);
+        cfs_spin_unlock(&fed->fed_lock);
 }
 
 static int filter_init_export(struct obd_export *exp)
 {
-        spin_lock_init(&exp->exp_filter_data.fed_lock);
+        cfs_spin_lock_init(&exp->exp_filter_data.fed_lock);
         CFS_INIT_LIST_HEAD(&exp->exp_filter_data.fed_mod_list);
 
-        spin_lock(&exp->exp_lock);
+        cfs_spin_lock(&exp->exp_lock);
         exp->exp_connecting = 1;
-        spin_unlock(&exp->exp_lock);
+        cfs_spin_unlock(&exp->exp_lock);
 
         return ldlm_init_export(exp);
 }
@@ -896,13 +898,13 @@ static int filter_init_server_data(struct obd_device *obd, struct file * filp)
 
                         /* VBR: set export last committed */
                         exp->exp_last_committed = last_rcvd;
-                        spin_lock(&exp->exp_lock);
+                        cfs_spin_lock(&exp->exp_lock);
                         exp->exp_connecting = 0;
                         exp->exp_in_recovery = 0;
-                        spin_unlock(&exp->exp_lock);
-                        spin_lock_bh(&obd->obd_processing_task_lock);
+                        cfs_spin_unlock(&exp->exp_lock);
+                        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
                         obd->obd_max_recoverable_clients++;
-                        spin_unlock_bh(&obd->obd_processing_task_lock);
+                        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
                         lcd = NULL;
                         class_export_put(exp);
                 }
@@ -1204,14 +1206,14 @@ static int filter_read_groups(struct obd_device *obd, int last_group,
         struct filter_obd *filter = &obd->u.filter;
         int old_count, group, rc = 0;
 
-        down(&filter->fo_init_lock);
+        cfs_down(&filter->fo_init_lock);
         old_count = filter->fo_group_count;
         for (group = old_count; group <= last_group; group++) {
                 rc = filter_read_group_internal(obd, group, create);
                 if (rc != 0)
                         break;
         }
-        up(&filter->fo_init_lock);
+        cfs_up(&filter->fo_init_lock);
         return rc;
 }
 
@@ -1412,9 +1414,9 @@ static void filter_set_last_id(struct filter_obd *filter,
         LASSERT(filter->fo_fsd != NULL);
         LASSERT(group <= filter->fo_group_count);
 
-        spin_lock(&filter->fo_objidlock);
+        cfs_spin_lock(&filter->fo_objidlock);
         filter->fo_last_objids[group] = id;
-        spin_unlock(&filter->fo_objidlock);
+        cfs_spin_unlock(&filter->fo_objidlock);
 }
 
 obd_id filter_last_id(struct filter_obd *filter, obd_gr group)
@@ -1424,9 +1426,9 @@ obd_id filter_last_id(struct filter_obd *filter, obd_gr group)
         LASSERT(group <= filter->fo_group_count);
 
         /* FIXME: object groups */
-        spin_lock(&filter->fo_objidlock);
+        cfs_spin_lock(&filter->fo_objidlock);
         id = filter->fo_last_objids[group];
-        spin_unlock(&filter->fo_objidlock);
+        cfs_spin_unlock(&filter->fo_objidlock);
         return id;
 }
 
@@ -1665,7 +1667,7 @@ static enum interval_iter filter_intent_cb(struct interval_node *n,
         if (interval_high(n) <= size)
                 return INTERVAL_ITER_STOP;
 
-        list_for_each_entry(lck, &node->li_group, l_sl_policy) {
+        cfs_list_for_each_entry(lck, &node->li_group, l_sl_policy) {
                 /* Don't send glimpse ASTs to liblustre clients.
                  * They aren't listening for them, and they do
                  * entirely synchronous I/O anyways. */
@@ -1742,13 +1744,14 @@ static int filter_intent_policy(struct ldlm_namespace *ns,
 
         /* FIXME: we should change the policy function slightly, to not make
          * this list at all, since we just turn around and free it */
-        while (!list_empty(&rpc_list)) {
+        while (!cfs_list_empty(&rpc_list)) {
                 struct ldlm_lock *wlock =
-                        list_entry(rpc_list.next, struct ldlm_lock, l_cp_ast);
+                        cfs_list_entry(rpc_list.next, struct ldlm_lock,
+                                       l_cp_ast);
                 LASSERT((lock->l_flags & LDLM_FL_AST_SENT) == 0);
                 LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
                 lock->l_flags &= ~LDLM_FL_CP_REQD;
-                list_del_init(&wlock->l_cp_ast);
+                cfs_list_del_init(&wlock->l_cp_ast);
                 LDLM_LOCK_RELEASE(wlock);
         }
 
@@ -1917,10 +1920,10 @@ static int filter_adapt_sptlrpc_conf(struct obd_device *obd, int initial)
 
         sptlrpc_target_update_exp_flavor(obd, &tmp_rset);
 
-        write_lock(&filter->fo_sptlrpc_lock);
+        cfs_write_lock(&filter->fo_sptlrpc_lock);
         sptlrpc_rule_set_free(&filter->fo_sptlrpc_rset);
         filter->fo_sptlrpc_rset = tmp_rset;
-        write_unlock(&filter->fo_sptlrpc_lock);
+        cfs_write_unlock(&filter->fo_sptlrpc_lock);
 
         return 0;
 }
@@ -2055,16 +2058,16 @@ int filter_common_setup(struct obd_device *obd, struct lustre_cfg* lcfg,
         obd->obd_lvfs_ctxt.fs = get_ds();
         obd->obd_lvfs_ctxt.cb_ops = filter_lvfs_ops;
 
-        init_mutex(&filter->fo_init_lock);
+        cfs_init_mutex(&filter->fo_init_lock);
         filter->fo_committed_group = 0;
         filter->fo_destroys_in_progress = 0;
         for (i = 0; i < 32; i++)
-                sema_init(&filter->fo_create_locks[i], 1);
+                cfs_sema_init(&filter->fo_create_locks[i], 1);
 
-        spin_lock_init(&filter->fo_translock);
-        spin_lock_init(&filter->fo_objidlock);
+        cfs_spin_lock_init(&filter->fo_translock);
+        cfs_spin_lock_init(&filter->fo_objidlock);
         CFS_INIT_LIST_HEAD(&filter->fo_export_list);
-        sema_init(&filter->fo_alloc_lock, 1);
+        cfs_sema_init(&filter->fo_alloc_lock, 1);
         init_brw_stats(&filter->fo_filter_stats);
         filter->fo_read_cache = 1; /* enable read-only cache by default */
         filter->fo_writethrough_cache = 1; /* enable writethrough cache */
@@ -2077,7 +2080,7 @@ int filter_common_setup(struct obd_device *obd, struct lustre_cfg* lcfg,
                 GOTO(err_ops, rc);
 
         CFS_INIT_LIST_HEAD(&filter->fo_llog_list);
-        spin_lock_init(&filter->fo_llog_list_lock);
+        cfs_spin_lock_init(&filter->fo_llog_list_lock);
 
         filter->fo_fl_oss_capa = 1;
 
@@ -2104,7 +2107,7 @@ int filter_common_setup(struct obd_device *obd, struct lustre_cfg* lcfg,
                 GOTO(err_post, rc);
         }
 
-        rwlock_init(&filter->fo_sptlrpc_lock);
+        cfs_rwlock_init(&filter->fo_sptlrpc_lock);
         sptlrpc_rule_set_init(&filter->fo_sptlrpc_rset);
         /* do this after llog being initialized */
         filter_adapt_sptlrpc_conf(obd, 1);
@@ -2399,20 +2402,20 @@ static int filter_llog_finish(struct obd_device *obd, int count)
                  * This is safe to do, as llog is already synchronized
                  * and its import may go.
                  */
-                mutex_down(&ctxt->loc_sem);
+                cfs_mutex_down(&ctxt->loc_sem);
                 if (ctxt->loc_imp) {
                         class_import_put(ctxt->loc_imp);
                         ctxt->loc_imp = NULL;
                 }
-                mutex_up(&ctxt->loc_sem);
+                cfs_mutex_up(&ctxt->loc_sem);
                 llog_ctxt_put(ctxt);
         }
 
         if (filter->fo_lcm) {
-                mutex_down(&ctxt->loc_sem);
+                cfs_mutex_down(&ctxt->loc_sem);
                 llog_recov_thread_fini(filter->fo_lcm, obd->obd_force);
                 filter->fo_lcm = NULL;
-                mutex_up(&ctxt->loc_sem);
+                cfs_mutex_up(&ctxt->loc_sem);
         }
         RETURN(filter_olg_fini(&obd->obd_olg));
 }
@@ -2426,7 +2429,7 @@ filter_find_olg_internal(struct filter_obd *filter, int group)
         struct obd_llog_group *olg;
 
         LASSERT_SPIN_LOCKED(&filter->fo_llog_list_lock);
-        list_for_each_entry(olg, &filter->fo_llog_list, olg_list) {
+        cfs_list_for_each_entry(olg, &filter->fo_llog_list, olg_list) {
                 if (olg->olg_group == group)
                         RETURN(olg);
         }
@@ -2446,9 +2449,9 @@ struct obd_llog_group *filter_find_olg(struct obd_device *obd, int group)
         if (group == FILTER_GROUP_LLOG)
                 RETURN(&obd->obd_olg);
 
-        spin_lock(&filter->fo_llog_list_lock);
+        cfs_spin_lock(&filter->fo_llog_list_lock);
         olg = filter_find_olg_internal(filter, group);
-        spin_unlock(&filter->fo_llog_list_lock);
+        cfs_spin_unlock(&filter->fo_llog_list_lock);
 
         RETURN(olg);
 }
@@ -2468,7 +2471,7 @@ struct obd_llog_group *filter_find_create_olg(struct obd_device *obd, int group)
         if (group == FILTER_GROUP_LLOG)
                 RETURN(&obd->obd_olg);
 
-        spin_lock(&filter->fo_llog_list_lock);
+        cfs_spin_lock(&filter->fo_llog_list_lock);
         olg = filter_find_olg_internal(filter, group);
         if (olg) {
                 if (olg->olg_initializing) {
@@ -2482,28 +2485,28 @@ struct obd_llog_group *filter_find_create_olg(struct obd_device *obd, int group)
                GOTO(out_unlock, olg = ERR_PTR(-ENOMEM));
 
         llog_group_init(olg, group);
-        list_add(&olg->olg_list, &filter->fo_llog_list);
+        cfs_list_add(&olg->olg_list, &filter->fo_llog_list);
         olg->olg_initializing = 1;
-        spin_unlock(&filter->fo_llog_list_lock);
+        cfs_spin_unlock(&filter->fo_llog_list_lock);
 
         rc = obd_llog_init(obd, olg, obd, NULL);
         if (rc) {
-               spin_lock(&filter->fo_llog_list_lock);
-               list_del(&olg->olg_list);
-               spin_unlock(&filter->fo_llog_list_lock);
+               cfs_spin_lock(&filter->fo_llog_list_lock);
+               cfs_list_del(&olg->olg_list);
+               cfs_spin_unlock(&filter->fo_llog_list_lock);
                OBD_FREE_PTR(olg);
                GOTO(out, olg = ERR_PTR(-ENOMEM));
         }
-        spin_lock(&filter->fo_llog_list_lock);
+        cfs_spin_lock(&filter->fo_llog_list_lock);
         olg->olg_initializing = 0;
-        spin_unlock(&filter->fo_llog_list_lock);
+        cfs_spin_unlock(&filter->fo_llog_list_lock);
         CDEBUG(D_OTHER, "%s: new llog group %u (0x%p)\n",
               obd->obd_name, group, olg);
 out:
         RETURN(olg);
 
 out_unlock:
-        spin_unlock(&filter->fo_llog_list_lock);
+        cfs_spin_unlock(&filter->fo_llog_list_lock);
         GOTO(out, olg);
 }
 
@@ -2536,9 +2539,9 @@ static int filter_llog_connect(struct obd_export *exp,
               obd->obd_name, body->lgdc_logid.lgl_oid,
               body->lgdc_logid.lgl_ogr, body->lgdc_logid.lgl_ogen);
 
-        spin_lock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
         obd->u.filter.fo_mds_ost_sync = 1;
-        spin_unlock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
         rc = llog_connect(ctxt, &body->lgdc_logid,
                           &body->lgdc_gen, NULL);
         llog_ctxt_put(ctxt);
@@ -2553,7 +2556,7 @@ static int filter_llog_preclean(struct obd_device *obd)
 {
         struct obd_llog_group *olg, *tmp;
         struct filter_obd *filter;
-        struct list_head  remove_list;
+        cfs_list_t  remove_list;
         int rc = 0;
         ENTRY;
 
@@ -2564,17 +2567,17 @@ static int filter_llog_preclean(struct obd_device *obd)
         filter = &obd->u.filter;
         CFS_INIT_LIST_HEAD(&remove_list);
 
-        spin_lock(&filter->fo_llog_list_lock);
-        while (!list_empty(&filter->fo_llog_list)) {
-                olg = list_entry(filter->fo_llog_list.next,
-                                 struct obd_llog_group, olg_list);
-                list_del(&olg->olg_list);
-                list_add(&olg->olg_list, &remove_list);
+        cfs_spin_lock(&filter->fo_llog_list_lock);
+        while (!cfs_list_empty(&filter->fo_llog_list)) {
+                olg = cfs_list_entry(filter->fo_llog_list.next,
+                                     struct obd_llog_group, olg_list);
+                cfs_list_del(&olg->olg_list);
+                cfs_list_add(&olg->olg_list, &remove_list);
         }
-        spin_unlock(&filter->fo_llog_list_lock);
+        cfs_spin_unlock(&filter->fo_llog_list_lock);
 
-        list_for_each_entry_safe(olg, tmp, &remove_list, olg_list) {
-                list_del_init(&olg->olg_list);
+        cfs_list_for_each_entry_safe(olg, tmp, &remove_list, olg_list) {
+                cfs_list_del_init(&olg->olg_list);
                 rc = filter_olg_fini(olg);
                 if (rc)
                         CERROR("failed to cleanup llogging subsystem for %u\n",
@@ -2684,12 +2687,12 @@ static int filter_connect_internal(struct obd_export *exp,
                 struct filter_obd *filter = &exp->exp_obd->u.filter;
                 obd_size left, want;
 
-                spin_lock(&exp->exp_obd->obd_osfs_lock);
+                cfs_spin_lock(&exp->exp_obd->obd_osfs_lock);
                 left = filter_grant_space_left(exp);
                 want = data->ocd_grant;
                 filter_grant(exp, fed->fed_grant, want, left, (reconnect == 0));
                 data->ocd_grant = fed->fed_grant;
-                spin_unlock(&exp->exp_obd->obd_osfs_lock);
+                cfs_spin_unlock(&exp->exp_obd->obd_osfs_lock);
 
                 CDEBUG(D_CACHE, "%s: cli %s/%p ocd_grant: %d want: "
                        LPU64" left: "LPU64"\n", exp->exp_obd->obd_name,
@@ -2868,7 +2871,7 @@ static void filter_grant_sanity_check(struct obd_device *obd, const char *func)
         obd_size tot_dirty = 0, tot_pending = 0, tot_granted = 0;
         obd_size fo_tot_dirty, fo_tot_pending, fo_tot_granted;
 
-        if (list_empty(&obd->obd_exports))
+        if (cfs_list_empty(&obd->obd_exports))
                 return;
 
         /* We don't want to do this for large machines that do lots of
@@ -2876,9 +2879,9 @@ static void filter_grant_sanity_check(struct obd_device *obd, const char *func)
         if (obd->obd_num_exports > 100)
                 return;
 
-        spin_lock(&obd->obd_osfs_lock);
-        spin_lock(&obd->obd_dev_lock);
-        list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
+        cfs_spin_lock(&obd->obd_osfs_lock);
+        cfs_spin_lock(&obd->obd_dev_lock);
+        cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
                 int error = 0;
                 fed = &exp->exp_filter_data;
                 if (fed->fed_grant < 0 || fed->fed_pending < 0 ||
@@ -2909,8 +2912,8 @@ static void filter_grant_sanity_check(struct obd_device *obd, const char *func)
         fo_tot_granted = obd->u.filter.fo_tot_granted;
         fo_tot_pending = obd->u.filter.fo_tot_pending;
         fo_tot_dirty = obd->u.filter.fo_tot_dirty;
-        spin_unlock(&obd->obd_dev_lock);
-        spin_unlock(&obd->obd_osfs_lock);
+        cfs_spin_unlock(&obd->obd_dev_lock);
+        cfs_spin_unlock(&obd->obd_osfs_lock);
 
         /* Do these assertions outside the spinlocks so we don't kill system */
         if (tot_granted != fo_tot_granted)
@@ -2943,7 +2946,7 @@ static void filter_grant_discard(struct obd_export *exp)
         struct filter_obd *filter = &obd->u.filter;
         struct filter_export_data *fed = &exp->exp_filter_data;
 
-        spin_lock(&obd->obd_osfs_lock);
+        cfs_spin_lock(&obd->obd_osfs_lock);
         LASSERTF(filter->fo_tot_granted >= fed->fed_grant,
                  "%s: tot_granted "LPU64" cli %s/%p fed_grant %ld\n",
                  obd->obd_name, filter->fo_tot_granted,
@@ -2962,7 +2965,7 @@ static void filter_grant_discard(struct obd_export *exp)
         fed->fed_dirty = 0;
         fed->fed_grant = 0;
 
-        spin_unlock(&obd->obd_osfs_lock);
+        cfs_spin_unlock(&obd->obd_osfs_lock);
 }
 
 static int filter_destroy_export(struct obd_export *exp)
@@ -3018,8 +3021,8 @@ static void filter_sync_llogs(struct obd_device *obd, struct obd_export *dexp)
                 /* look for group with min. number, but > worked */
                 olg_min = NULL;
                 group = 1 << 30;
-                spin_lock(&filter->fo_llog_list_lock);
-                list_for_each_entry(olg, &filter->fo_llog_list, olg_list) {
+                cfs_spin_lock(&filter->fo_llog_list_lock);
+                cfs_list_for_each_entry(olg, &filter->fo_llog_list, olg_list) {
                         if (olg->olg_group <= worked) {
                                 /* this group is already synced */
                                 continue;
@@ -3032,7 +3035,7 @@ static void filter_sync_llogs(struct obd_device *obd, struct obd_export *dexp)
                         olg_min = olg;
                         group = olg->olg_group;
                 }
-                spin_unlock(&filter->fo_llog_list_lock);
+                cfs_spin_unlock(&filter->fo_llog_list_lock);
 
                 if (olg_min == NULL)
                         break;
@@ -3542,7 +3545,7 @@ static int filter_destroy_precreated(struct obd_export *exp, struct obdo *oa,
         doa.o_gr = oa->o_gr;
         doa.o_mode = S_IFREG;
 
-        if (!test_bit(doa.o_gr, &filter->fo_destroys_in_progress)) {
+        if (!cfs_test_bit(doa.o_gr, &filter->fo_destroys_in_progress)) {
                 CERROR("%s:["LPU64"] destroys_in_progress already cleared\n",
                        exp->exp_obd->obd_name, doa.o_gr);
                 RETURN(0);
@@ -3582,7 +3585,7 @@ static int filter_destroy_precreated(struct obd_export *exp, struct obdo *oa,
                 oa->o_id = last;
                 rc = 0;
         }
-        clear_bit(doa.o_gr, &filter->fo_destroys_in_progress);
+        cfs_clear_bit(doa.o_gr, &filter->fo_destroys_in_progress);
 
         RETURN(rc);
 }
@@ -3608,12 +3611,12 @@ static int filter_handle_precreate(struct obd_export *exp, struct obdo *oa,
                         RETURN(0);
                 }
                 /* This causes inflight precreates to abort and drop lock */
-                set_bit(group, &filter->fo_destroys_in_progress);
-                down(&filter->fo_create_locks[group]);
-                if (!test_bit(group, &filter->fo_destroys_in_progress)) {
+                cfs_set_bit(group, &filter->fo_destroys_in_progress);
+                cfs_down(&filter->fo_create_locks[group]);
+                if (!cfs_test_bit(group, &filter->fo_destroys_in_progress)) {
                         CERROR("%s:["LPU64"] destroys_in_progress already cleared\n",
                                exp->exp_obd->obd_name, group);
-                        up(&filter->fo_create_locks[group]);
+                        cfs_up(&filter->fo_create_locks[group]);
                         RETURN(0);
                 }
                 diff = oa->o_id - last;
@@ -3635,10 +3638,10 @@ static int filter_handle_precreate(struct obd_export *exp, struct obdo *oa,
                         GOTO(out, rc);
                 } else {
                         /* XXX: Used by MDS for the first time! */
-                        clear_bit(group, &filter->fo_destroys_in_progress);
+                        cfs_clear_bit(group, &filter->fo_destroys_in_progress);
                 }
         } else {
-                down(&filter->fo_create_locks[group]);
+                cfs_down(&filter->fo_create_locks[group]);
                 if (oti->oti_conn_cnt < exp->exp_conn_cnt) {
                         CERROR("%s: dropping old precreate request\n",
                                obd->obd_name);
@@ -3667,7 +3670,7 @@ static int filter_handle_precreate(struct obd_export *exp, struct obdo *oa,
         /* else diff == 0 */
         GOTO(out, rc = 0);
 out:
-        up(&filter->fo_create_locks[group]);
+        cfs_up(&filter->fo_create_locks[group]);
         return rc;
 }
 
@@ -3682,10 +3685,10 @@ static int filter_statfs(struct obd_device *obd, struct obd_statfs *osfs,
         /* at least try to account for cached pages.  its still racey and
          * might be under-reporting if clients haven't announced their
          * caches with brw recently */
-        spin_lock(&obd->obd_osfs_lock);
+        cfs_spin_lock(&obd->obd_osfs_lock);
         rc = fsfilt_statfs(obd, obd->u.obt.obt_sb, max_age);
         memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
-        spin_unlock(&obd->obd_osfs_lock);
+        cfs_spin_unlock(&obd->obd_osfs_lock);
 
         CDEBUG(D_SUPER | D_CACHE, "blocks cached "LPU64" granted "LPU64
                " pending "LPU64" free "LPU64" avail "LPU64"\n",
@@ -3752,11 +3755,11 @@ static __u64 filter_calc_free_inodes(struct obd_device *obd)
         int rc;
         __u64 os_ffree = -1;
 
-        spin_lock(&obd->obd_osfs_lock);
+        cfs_spin_lock(&obd->obd_osfs_lock);
         rc = fsfilt_statfs(obd, obd->u.obt.obt_sb, cfs_time_shift_64(1));
         if (rc == 0)
                 os_ffree = obd->obd_osfs.os_ffree;
-        spin_unlock(&obd->obd_osfs_lock);
+        cfs_spin_unlock(&obd->obd_osfs_lock);
 
         return os_ffree;
 }
@@ -3798,7 +3801,8 @@ static int filter_precreate(struct obd_device *obd, struct obdo *oa,
                 OBD_ALLOC(osfs, sizeof(*osfs));
                 if (osfs == NULL)
                         RETURN(-ENOMEM);
-                rc = filter_statfs(obd, osfs, cfs_time_current_64() - HZ, 0);
+                rc = filter_statfs(obd, osfs, cfs_time_current_64() - CFS_HZ,
+                                   0);
                 if (rc == 0 && osfs->os_bavail < (osfs->os_blocks >> 10)) {
                         CDEBUG(D_RPCTRACE,"%s: not enough space for create "
                                LPU64"\n", obd->obd_name, osfs->os_bavail <<
@@ -3817,7 +3821,7 @@ static int filter_precreate(struct obd_device *obd, struct obdo *oa,
         for (i = 0; i < *num && err == 0; i++) {
                 int cleanup_phase = 0;
 
-                if (test_bit(group, &filter->fo_destroys_in_progress)) {
+                if (cfs_test_bit(group, &filter->fo_destroys_in_progress)) {
                         CWARN("%s: create aborted by destroy\n",
                               obd->obd_name);
                         rc = -EAGAIN;
@@ -3934,7 +3938,7 @@ set_last_id:
 
                 if (rc)
                         break;
-                if (time_after(jiffies, enough_time)) {
+                if (cfs_time_after(jiffies, enough_time)) {
                         CDEBUG(D_RPCTRACE,
                                "%s: precreate slow - want %d got %d \n",
                                obd->obd_name, *num, i);
@@ -4003,9 +4007,9 @@ static int filter_create(struct obd_export *exp, struct obdo *oa,
                         rc = -EINVAL;
                 } else {
                         diff = 1;
-                        down(&filter->fo_create_locks[oa->o_gr]);
+                        cfs_down(&filter->fo_create_locks[oa->o_gr]);
                         rc = filter_precreate(obd, oa, oa->o_gr, &diff);
-                        up(&filter->fo_create_locks[oa->o_gr]);
+                        cfs_up(&filter->fo_create_locks[oa->o_gr]);
                 }
         } else {
                 rc = filter_handle_precreate(exp, oa, oa->o_gr, oti);
@@ -4403,9 +4407,9 @@ static int filter_set_grant_shrink(struct obd_export *exp,
                                    struct ost_body *body)
 {
         /* handle shrink grant */
-        spin_lock(&exp->exp_obd->obd_osfs_lock);
+        cfs_spin_lock(&exp->exp_obd->obd_osfs_lock);
         filter_grant_incoming(exp, &body->oa);
-        spin_unlock(&exp->exp_obd->obd_osfs_lock);
+        cfs_spin_unlock(&exp->exp_obd->obd_osfs_lock);
 
         RETURN(0);
 
@@ -4655,7 +4659,7 @@ static int __init obdfilter_init(void)
 
         lprocfs_filter_init_vars(&lvars);
 
-        request_module("%s", "lquota");
+        cfs_request_module("%s", "lquota");
         OBD_ALLOC(obdfilter_created_scratchpad,
                   OBDFILTER_CREATED_SCRATCHPAD_ENTRIES *
                   sizeof(*obdfilter_created_scratchpad));
index a93f115..ed35959 100644 (file)
@@ -62,8 +62,8 @@ int filter_update_capa_key(struct obd_device *obd, struct lustre_capa_key *new)
         struct filter_capa_key *k, *keys[2] = { NULL, NULL };
         int i;
 
-        spin_lock(&capa_lock);
-        list_for_each_entry(k, &filter->fo_capa_keys, k_list) {
+        cfs_spin_lock(&capa_lock);
+        cfs_list_for_each_entry(k, &filter->fo_capa_keys, k_list) {
                 if (k->k_key.lk_mdsid != new->lk_mdsid)
                         continue;
 
@@ -75,7 +75,7 @@ int filter_update_capa_key(struct obd_device *obd, struct lustre_capa_key *new)
                         keys[0] = k;
                 }
         }
-        spin_unlock(&capa_lock);
+        cfs_spin_unlock(&capa_lock);
 
         for (i = 0; i < 2; i++) {
                 if (!keys[i])
@@ -85,9 +85,9 @@ int filter_update_capa_key(struct obd_device *obd, struct lustre_capa_key *new)
                 /* maybe because of recovery or other reasons, MDS sent the
                  * the old capability key again.
                  */
-                spin_lock(&capa_lock);
+                cfs_spin_lock(&capa_lock);
                 keys[i]->k_key = *new;
-                spin_unlock(&capa_lock);
+                cfs_spin_unlock(&capa_lock);
 
                 RETURN(0);
         }
@@ -102,11 +102,11 @@ int filter_update_capa_key(struct obd_device *obd, struct lustre_capa_key *new)
                 CFS_INIT_LIST_HEAD(&k->k_list);
         }
 
-        spin_lock(&capa_lock);
+        cfs_spin_lock(&capa_lock);
         k->k_key = *new;
-        if (list_empty(&k->k_list))
-                list_add(&k->k_list, &filter->fo_capa_keys);
-        spin_unlock(&capa_lock);
+        if (cfs_list_empty(&k->k_list))
+                cfs_list_add(&k->k_list, &filter->fo_capa_keys);
+        cfs_spin_unlock(&capa_lock);
 
         DEBUG_CAPA_KEY(D_SEC, new, "new");
         RETURN(0);
@@ -162,12 +162,12 @@ int filter_auth_capa(struct obd_export *exp, struct lu_fid *fid, obd_gr group,
 
         oc = capa_lookup(filter->fo_capa_hash, capa, 0);
         if (oc) {
-                spin_lock(&oc->c_lock);
+                cfs_spin_lock(&oc->c_lock);
                 if (capa_is_expired(oc)) {
                         DEBUG_CAPA(D_ERROR, capa, "expired");
                         rc = -ESTALE;
                 }
-                spin_unlock(&oc->c_lock);
+                cfs_spin_unlock(&oc->c_lock);
 
                 capa_put(oc);
                 RETURN(rc);
@@ -178,8 +178,8 @@ int filter_auth_capa(struct obd_export *exp, struct lu_fid *fid, obd_gr group,
                 RETURN(-ESTALE);
         }
 
-        spin_lock(&capa_lock);
-        list_for_each_entry(k, &filter->fo_capa_keys, k_list) {
+        cfs_spin_lock(&capa_lock);
+        cfs_list_for_each_entry(k, &filter->fo_capa_keys, k_list) {
                 if (k->k_key.lk_mdsid == mdsid) {
                         keys_ready = 1;
                         if (k->k_key.lk_keyid == capa_keyid(capa)) {
@@ -189,7 +189,7 @@ int filter_auth_capa(struct obd_export *exp, struct lu_fid *fid, obd_gr group,
                         }
                 }
         }
-        spin_unlock(&capa_lock);
+        cfs_spin_unlock(&capa_lock);
 
         if (!keys_ready) {
                 CDEBUG(D_SEC, "MDS hasn't propagated capability keys yet, "
@@ -251,15 +251,15 @@ int filter_capa_fixoa(struct obd_export *exp, struct obdo *oa, obd_gr group,
                 struct filter_capa_key *k;
                 int found = 0;
 
-                spin_lock(&capa_lock);
-                list_for_each_entry(k, &filter->fo_capa_keys, k_list) {
+                cfs_spin_lock(&capa_lock);
+                cfs_list_for_each_entry(k, &filter->fo_capa_keys, k_list) {
                         if (k->k_key.lk_mdsid == mdsid &&
                             k->k_key.lk_keyid == capa_keyid(capa)) {
                                 found = 1;
                                 break;
                         }
                 }
-                spin_unlock(&capa_lock);
+                cfs_spin_unlock(&capa_lock);
 
                 if (found) {
                         union {
@@ -295,10 +295,10 @@ void filter_free_capa_keys(struct filter_obd *filter)
 {
         struct filter_capa_key *key, *n;
 
-        spin_lock(&capa_lock);
-        list_for_each_entry_safe(key, n, &filter->fo_capa_keys, k_list) {
-                list_del_init(&key->k_list);
+        cfs_spin_lock(&capa_lock);
+        cfs_list_for_each_entry_safe(key, n, &filter->fo_capa_keys, k_list) {
+                cfs_list_del_init(&key->k_list);
                 OBD_FREE(key, sizeof(*key));
         }
-        spin_unlock(&capa_lock);
+        cfs_spin_unlock(&capa_lock);
 }
index 748c8b4..d3df199 100644 (file)
@@ -71,12 +71,13 @@ extern struct file_operations filter_per_nid_stats_fops;
 
 /* per-client-per-object persistent state (LRU) */
 struct filter_mod_data {
-        struct list_head fmd_list;      /* linked to fed_mod_list */
-        __u64            fmd_id;        /* object being written to */
-        __u64            fmd_gr;        /* group being written to */
-        __u64            fmd_mactime_xid;/* xid highest {m,a,c}time setattr */
-        unsigned long    fmd_expire;    /* jiffies when it should expire */
-        int              fmd_refcount;  /* reference counter - list holds 1 */
+        cfs_list_t       fmd_list;       /* linked to fed_mod_list */
+        __u64            fmd_id;         /* object being written to */
+        __u64            fmd_gr;         /* group being written to */
+        __u64            fmd_mactime_xid;/* xid highest {m,a,c}time
+                                              * setattr */
+        unsigned long    fmd_expire;   /* jiffies when it should expire */
+        int              fmd_refcount; /* reference counter, list holds 1 */
 };
 
 #ifdef HAVE_BGL_SUPPORT
@@ -85,7 +86,7 @@ struct filter_mod_data {
 #define FILTER_FMD_MAX_NUM_DEFAULT  32
 #endif
 /* Client cache seconds */
-#define FILTER_FMD_MAX_AGE_DEFAULT ((obd_timeout + 10) * HZ)
+#define FILTER_FMD_MAX_AGE_DEFAULT ((obd_timeout + 10) * CFS_HZ)
 
 #ifndef HAVE_PAGE_CONSTANT
 #define mapping_cap_page_constant_write(mapping) 0
index 368842e..7a2700f 100644 (file)
@@ -132,7 +132,7 @@ void filter_grant_incoming(struct obd_export *exp, struct obdo *oa)
                 CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
                        obd->obd_name, exp->exp_client_uuid.uuid, exp,
                        fed->fed_dirty, fed->fed_pending, fed->fed_grant);
-                spin_unlock(&obd->obd_osfs_lock);
+                cfs_spin_unlock(&obd->obd_osfs_lock);
                 LBUG();
         }
         EXIT;
@@ -152,10 +152,11 @@ obd_size filter_grant_space_left(struct obd_export *exp)
 
         LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
 
-        if (cfs_time_before_64(obd->obd_osfs_age, cfs_time_current_64() - HZ)) {
+        if (cfs_time_before_64(obd->obd_osfs_age,
+                               cfs_time_current_64() - CFS_HZ)) {
 restat:
                 rc = fsfilt_statfs(obd, obd->u.obt.obt_sb,
-                                   cfs_time_current_64() + HZ);
+                                   cfs_time_current_64() + CFS_HZ);
                 if (rc) /* N.B. statfs can't really fail */
                         RETURN(0);
                 statfs_done = 1;
@@ -241,7 +242,7 @@ long filter_grant(struct obd_export *exp, obd_size current_grant,
                                        "current"LPU64"\n",
                                        obd->obd_name, exp->exp_client_uuid.uuid,
                                        exp, fed->fed_grant, want,current_grant);
-                                spin_unlock(&obd->obd_osfs_lock);
+                                cfs_spin_unlock(&obd->obd_osfs_lock);
                                 LBUG();
                         }
                 }
@@ -399,12 +400,12 @@ static int filter_preprw_read(int cmd, struct obd_export *exp, struct obdo *oa,
                 RETURN(rc);
 
         if (oa && oa->o_valid & OBD_MD_FLGRANT) {
-                spin_lock(&obd->obd_osfs_lock);
+                cfs_spin_lock(&obd->obd_osfs_lock);
                 filter_grant_incoming(exp, oa);
 
                 if (!(oa->o_flags & OBD_FL_SHRINK_GRANT))
                         oa->o_grant = 0;
-                spin_unlock(&obd->obd_osfs_lock);
+                cfs_spin_unlock(&obd->obd_osfs_lock);
         }
 
         iobuf = filter_iobuf_get(&obd->u.filter, oti);
@@ -431,7 +432,7 @@ static int filter_preprw_read(int cmd, struct obd_export *exp, struct obdo *oa,
         fsfilt_check_slow(obd, now, "preprw_read setup");
 
         /* find pages for all segments, fill array with them */
-        do_gettimeofday(&start);
+        cfs_gettimeofday(&start);
         for (i = 0, lnb = res; i < *npages; i++, lnb++) {
 
                 lnb->dentry = dentry;
@@ -463,7 +464,7 @@ static int filter_preprw_read(int cmd, struct obd_export *exp, struct obdo *oa,
                 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_CACHE_MISS, 1);
                 filter_iobuf_add_page(obd, iobuf, inode, lnb->page);
         }
-        do_gettimeofday(&end);
+        cfs_gettimeofday(&end);
         timediff = cfs_timeval_sub(&end, &start, NULL);
         lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_GET_PAGE, timediff);
 
@@ -622,7 +623,7 @@ static int filter_grant_check(struct obd_export *exp, struct obdo *oa,
                 CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
                        exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
                        fed->fed_dirty, fed->fed_pending, fed->fed_grant);
-                spin_unlock(&exp->exp_obd->obd_osfs_lock);
+                cfs_spin_unlock(&exp->exp_obd->obd_osfs_lock);
                 LBUG();
         }
         return rc;
@@ -709,7 +710,7 @@ static int filter_preprw_write(int cmd, struct obd_export *exp, struct obdo *oa,
         fmd = filter_fmd_find(exp, obj->ioo_id, obj->ioo_gr);
 
         LASSERT(oa != NULL);
-        spin_lock(&obd->obd_osfs_lock);
+        cfs_spin_lock(&obd->obd_osfs_lock);
         filter_grant_incoming(exp, oa);
         if (fmd && fmd->fmd_mactime_xid > oti->oti_xid)
                 oa->o_valid &= ~(OBD_MD_FLMTIME | OBD_MD_FLCTIME |
@@ -733,7 +734,7 @@ static int filter_preprw_write(int cmd, struct obd_export *exp, struct obdo *oa,
                 oa->o_grant = filter_grant(exp, oa->o_grant, oa->o_undirty,
                                            left, 1);
 
-        spin_unlock(&obd->obd_osfs_lock);
+        cfs_spin_unlock(&obd->obd_osfs_lock);
         filter_fmd_put(exp, fmd);
 
         if (rc)
@@ -748,7 +749,7 @@ static int filter_preprw_write(int cmd, struct obd_export *exp, struct obdo *oa,
          * multiple writes or single truncate. */
         down_read(&dentry->d_inode->i_alloc_sem);
 
-        do_gettimeofday(&start);
+        cfs_gettimeofday(&start);
         for (i = 0, lnb = res; i < *npages; i++, lnb++) {
 
                 /* We still set up for ungranted pages so that granted pages
@@ -807,7 +808,7 @@ static int filter_preprw_write(int cmd, struct obd_export *exp, struct obdo *oa,
                 if (lnb->rc == 0)
                         tot_bytes += lnb->len;
         }
-        do_gettimeofday(&end);
+        cfs_gettimeofday(&end);
         timediff = cfs_timeval_sub(&end, &start, NULL);
         lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_GET_PAGE, timediff);
 
@@ -847,10 +848,10 @@ cleanup:
         case 1:
                 filter_iobuf_put(&obd->u.filter, iobuf, oti);
         case 0:
-                spin_lock(&obd->obd_osfs_lock);
+                cfs_spin_lock(&obd->obd_osfs_lock);
                 if (oa)
                         filter_grant_incoming(exp, oa);
-                spin_unlock(&obd->obd_osfs_lock);
+                cfs_spin_unlock(&obd->obd_osfs_lock);
                 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
                 break;
         default:;
@@ -930,7 +931,7 @@ void filter_grant_commit(struct obd_export *exp, int niocount,
         unsigned long pending = 0;
         int i;
 
-        spin_lock(&exp->exp_obd->obd_osfs_lock);
+        cfs_spin_lock(&exp->exp_obd->obd_osfs_lock);
         for (i = 0, lnb = res; i < niocount; i++, lnb++)
                 pending += lnb->lnb_grant_used;
 
@@ -950,7 +951,7 @@ void filter_grant_commit(struct obd_export *exp, int niocount,
                  filter->fo_tot_pending, pending);
         filter->fo_tot_pending -= pending;
 
-        spin_unlock(&exp->exp_obd->obd_osfs_lock);
+        cfs_spin_unlock(&exp->exp_obd->obd_osfs_lock);
 }
 
 int filter_commitrw(int cmd, struct obd_export *exp, struct obdo *oa,
index 72e0d65..7f860c1 100644 (file)
 /* 512byte block min */
 #define MAX_BLOCKS_PER_PAGE (CFS_PAGE_SIZE / 512)
 struct filter_iobuf {
-        atomic_t          dr_numreqs;  /* number of reqs being processed */
-        wait_queue_head_t dr_wait;
-        int               dr_max_pages;
-        int               dr_npages;
-        int               dr_error;
-        struct page     **dr_pages;
-        unsigned long    *dr_blocks;
-        unsigned int      dr_ignore_quota:1;
+        cfs_atomic_t       dr_numreqs;  /* number of reqs being processed */
+        cfs_waitq_t        dr_wait;
+        int                dr_max_pages;
+        int                dr_npages;
+        int                dr_error;
+        struct page      **dr_pages;
+        unsigned long     *dr_blocks;
+        unsigned int       dr_ignore_quota:1;
         struct filter_obd *dr_filter;
 };
 
@@ -74,34 +74,36 @@ static void record_start_io(struct filter_iobuf *iobuf, int rw, int size,
 {
         struct filter_obd *filter = iobuf->dr_filter;
 
-        atomic_inc(&iobuf->dr_numreqs);
+        cfs_atomic_inc(&iobuf->dr_numreqs);
 
         if (rw == OBD_BRW_READ) {
-                atomic_inc(&filter->fo_r_in_flight);
+                cfs_atomic_inc(&filter->fo_r_in_flight);
                 lprocfs_oh_tally(&filter->fo_filter_stats.hist[BRW_R_RPC_HIST],
-                                 atomic_read(&filter->fo_r_in_flight));
+                                 cfs_atomic_read(&filter->fo_r_in_flight));
                 lprocfs_oh_tally_log2(&filter->
                                        fo_filter_stats.hist[BRW_R_DISK_IOSIZE],
                                       size);
                 if (exp->exp_nid_stats && exp->exp_nid_stats->nid_brw_stats) {
                         lprocfs_oh_tally(&exp->exp_nid_stats->nid_brw_stats->
-                                          hist[BRW_R_RPC_HIST],
-                                         atomic_read(&filter->fo_r_in_flight));
+                                         hist[BRW_R_RPC_HIST],
+                                         cfs_atomic_read(&filter-> \
+                                         fo_r_in_flight));
                         lprocfs_oh_tally_log2(&exp->exp_nid_stats->
                                          nid_brw_stats->hist[BRW_R_DISK_IOSIZE],
                                               size);
                 }
         } else {
-                atomic_inc(&filter->fo_w_in_flight);
+                cfs_atomic_inc(&filter->fo_w_in_flight);
                 lprocfs_oh_tally(&filter->fo_filter_stats.hist[BRW_W_RPC_HIST],
-                                 atomic_read(&filter->fo_w_in_flight));
+                                 cfs_atomic_read(&filter->fo_w_in_flight));
                 lprocfs_oh_tally_log2(&filter->
                                        fo_filter_stats.hist[BRW_W_DISK_IOSIZE],
                                       size);
                 if (exp->exp_nid_stats && exp->exp_nid_stats->nid_brw_stats) {
                         lprocfs_oh_tally(&exp->exp_nid_stats->nid_brw_stats->
                                           hist[BRW_W_RPC_HIST],
-                                         atomic_read(&filter->fo_r_in_flight));
+                                         cfs_atomic_read(&filter-> \
+                                         fo_r_in_flight));
                         lprocfs_oh_tally_log2(&exp->exp_nid_stats->
                                         nid_brw_stats->hist[BRW_W_DISK_IOSIZE],
                                               size);
@@ -117,12 +119,12 @@ static void record_finish_io(struct filter_iobuf *iobuf, int rw, int rc)
          * DO NOT record procfs stats here!!! */
 
         if (rw == OBD_BRW_READ)
-                atomic_dec(&filter->fo_r_in_flight);
+                cfs_atomic_dec(&filter->fo_r_in_flight);
         else
-                atomic_dec(&filter->fo_w_in_flight);
+                cfs_atomic_dec(&filter->fo_w_in_flight);
 
-        if (atomic_dec_and_test(&iobuf->dr_numreqs))
-                wake_up(&iobuf->dr_wait);
+        if (cfs_atomic_dec_and_test(&iobuf->dr_numreqs))
+                cfs_waitq_signal(&iobuf->dr_wait);
 }
 
 static int dio_complete_routine(struct bio *bio, unsigned int done, int error)
@@ -150,13 +152,13 @@ static int dio_complete_routine(struct bio *bio, unsigned int done, int error)
                        "bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, "
                        "bi_private: %p\n", bio->bi_next, bio->bi_flags,
                        bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size,
-                       bio->bi_end_io, atomic_read(&bio->bi_cnt),
+                       bio->bi_end_io, cfs_atomic_read(&bio->bi_cnt),
                        bio->bi_private);
                 return 0;
         }
 
         /* the check is outside of the cycle for performance reason -bzzz */
-        if (!test_bit(BIO_RW, &bio->bi_rw)) {
+        if (!cfs_test_bit(BIO_RW, &bio->bi_rw)) {
                 bio_for_each_segment(bvl, bio, i) {
                         if (likely(error == 0))
                                 SetPageUptodate(bvl->bv_page);
@@ -218,8 +220,8 @@ struct filter_iobuf *filter_alloc_iobuf(struct filter_obd *filter,
                 goto failed_2;
 
         iobuf->dr_filter = filter;
-        init_waitqueue_head(&iobuf->dr_wait);
-        atomic_set(&iobuf->dr_numreqs, 0);
+        cfs_waitq_init(&iobuf->dr_wait);
+        cfs_atomic_set(&iobuf->dr_numreqs, 0);
         iobuf->dr_max_pages = num_pages;
         iobuf->dr_npages = 0;
         iobuf->dr_error = 0;
@@ -239,7 +241,7 @@ static void filter_clear_iobuf(struct filter_iobuf *iobuf)
 {
         iobuf->dr_npages = 0;
         iobuf->dr_error = 0;
-        atomic_set(&iobuf->dr_numreqs, 0);
+        cfs_atomic_set(&iobuf->dr_numreqs, 0);
 }
 
 void filter_free_iobuf(struct filter_iobuf *iobuf)
@@ -414,7 +416,8 @@ int filter_do_bio(struct obd_export *exp, struct inode *inode,
         }
 
  out:
-        wait_event(iobuf->dr_wait, atomic_read(&iobuf->dr_numreqs) == 0);
+        cfs_wait_event(iobuf->dr_wait,
+                       cfs_atomic_read(&iobuf->dr_numreqs) == 0);
 
         if (rw == OBD_BRW_READ) {
                 lprocfs_oh_tally(&obd->u.filter.fo_filter_stats.
@@ -461,7 +464,7 @@ int filter_direct_io(int rw, struct dentry *dchild, struct filter_iobuf *iobuf,
         struct inode *inode = dchild->d_inode;
         int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
         int rc, rc2, create;
-        struct semaphore *sem;
+        cfs_semaphore_t *sem;
         ENTRY;
 
         LASSERTF(iobuf->dr_npages <= iobuf->dr_max_pages, "%d,%d\n",
index a8caad3..e32bea2 100644 (file)
@@ -259,9 +259,9 @@ int filter_recov_log_mds_ost_cb(struct llog_handle *llh,
                 RETURN(LLOG_PROC_BREAK);
 
         if (rec == NULL) {
-                spin_lock_bh(&ctxt->loc_obd->obd_processing_task_lock);
+                cfs_spin_lock_bh(&ctxt->loc_obd->obd_processing_task_lock);
                 ctxt->loc_obd->u.filter.fo_mds_ost_sync = 0;
-                spin_unlock_bh(&ctxt->loc_obd->obd_processing_task_lock);
+                cfs_spin_unlock_bh(&ctxt->loc_obd->obd_processing_task_lock);
                 RETURN(0);
         }
 
index c66055e..f3df18a 100644 (file)
@@ -131,7 +131,7 @@ static int filter_lvbo_update(struct ldlm_resource *res,
 
         LASSERT(res);
 
-        down(&res->lr_lvb_sem);
+        cfs_down(&res->lr_lvb_sem);
         lvb = res->lr_lvb_data;
         if (lvb == NULL) {
                 CERROR("No lvb when running lvbo_update!\n");
@@ -224,7 +224,7 @@ out_dentry:
         f_dput(dentry);
 
 out:
-        up(&res->lr_lvb_sem);
+        cfs_up(&res->lr_lvb_sem);
         return rc;
 }
 
index e18d6e9..59b79e1 100644 (file)
@@ -183,7 +183,8 @@ int lprocfs_filter_rd_fmd_max_age(char *page, char **start, off_t off,
         struct obd_device *obd = data;
         int rc;
 
-        rc = snprintf(page, count, "%u\n", obd->u.filter.fo_fmd_max_age / HZ);
+        rc = snprintf(page, count, "%u\n",
+                      obd->u.filter.fo_fmd_max_age / CFS_HZ);
         return rc;
 }
 
@@ -201,7 +202,7 @@ int lprocfs_filter_wr_fmd_max_age(struct file *file, const char *buffer,
         if (val > 65536 || val < 1)
                 return -EINVAL;
 
-        obd->u.filter.fo_fmd_max_age = val * HZ;
+        obd->u.filter.fo_fmd_max_age = val * CFS_HZ;
         return count;
 }
 
@@ -299,9 +300,9 @@ static int lprocfs_filter_wr_cache(struct file *file, const char *buffer,
         if (rc)
                 return rc;
 
-        spin_lock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
         obd->u.filter.fo_read_cache = val;
-        spin_unlock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
         return count;
 }
 
@@ -326,9 +327,9 @@ static int lprocfs_filter_wr_wcache(struct file *file, const char *buffer,
         if (rc)
                 return rc;
 
-        spin_lock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
         obd->u.filter.fo_writethrough_cache = val;
-        spin_unlock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
         return count;
 }
 
@@ -359,9 +360,9 @@ int lprocfs_filter_wr_degraded(struct file *file, const char *buffer,
         if (rc)
                 return rc;
 
-        spin_lock(&obd->obd_osfs_lock);
+        cfs_spin_lock(&obd->obd_osfs_lock);
         obd->u.filter.fo_raid_degraded = !!val;
-        spin_unlock(&obd->obd_osfs_lock);
+        cfs_spin_unlock(&obd->obd_osfs_lock);
         return count;
 }
 
@@ -508,7 +509,7 @@ static void brw_stats_show(struct seq_file *seq, struct brw_stats *brw_stats)
         struct timeval now;
 
         /* this sampling races with updates */
-        do_gettimeofday(&now);
+        cfs_gettimeofday(&now);
         seq_printf(seq, "snapshot_time:         %lu.%lu (secs.usecs)\n",
                    now.tv_sec, now.tv_usec);
 
@@ -534,7 +535,7 @@ static void brw_stats_show(struct seq_file *seq, struct brw_stats *brw_stats)
 
         {
                 char title[24];
-                sprintf(title, "I/O time (1/%ds)", HZ);
+                sprintf(title, "I/O time (1/%ds)", CFS_HZ);
                 display_brw_stats(seq, title, "ios",
                                   &brw_stats->hist[BRW_R_IO_TIME],
                                   &brw_stats->hist[BRW_W_IO_TIME], 1);
index 4fe16bc..9140a58 100644 (file)
@@ -187,7 +187,7 @@ static int osc_wr_max_dirty_mb(struct file *file, const char *buffer,
 
         if (pages_number < 0 ||
             pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - CFS_PAGE_SHIFT) ||
-            pages_number > num_physpages / 4) /* 1/4 of RAM */
+            pages_number > cfs_num_physpages / 4) /* 1/4 of RAM */
                 return -ERANGE;
 
         client_obd_list_lock(&cli->cl_loi_list_lock);
@@ -499,7 +499,7 @@ static int osc_wd_checksum_type(struct file *file, const char *buffer,
 
         if (count > sizeof(kernbuf) - 1)
                 return -EINVAL;
-        if (copy_from_user(kernbuf, buffer, count))
+        if (cfs_copy_from_user(kernbuf, buffer, count))
                 return -EFAULT;
         if (count > 0 && kernbuf[count - 1] == '\n')
                 kernbuf[count - 1] = '\0';
@@ -522,7 +522,8 @@ static int osc_rd_resend_count(char *page, char **start, off_t off, int count,
 {
         struct obd_device *obd = data;
 
-        return snprintf(page, count, "%u\n", atomic_read(&obd->u.cli.cl_resends));
+        return snprintf(page, count, "%u\n",
+                        cfs_atomic_read(&obd->u.cli.cl_resends));
 }
 
 static int osc_wr_resend_count(struct file *file, const char *buffer,
@@ -538,7 +539,7 @@ static int osc_wr_resend_count(struct file *file, const char *buffer,
         if (val < 0)
                return -EINVAL;
 
-        atomic_set(&obd->u.cli.cl_resends, val);
+        cfs_atomic_set(&obd->u.cli.cl_resends, val);
 
         return count;
 }
@@ -586,7 +587,7 @@ static int osc_rd_destroys_in_flight(char *page, char **start, off_t off,
 {
         struct obd_device *obd = data;
         return snprintf(page, count, "%u\n",
-                        atomic_read(&obd->u.cli.cl_destroy_in_flight));
+                        cfs_atomic_read(&obd->u.cli.cl_destroy_in_flight));
 }
 
 static struct lprocfs_vars lprocfs_osc_obd_vars[] = {
@@ -650,7 +651,7 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
         unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
         int i;
 
-        do_gettimeofday(&now);
+        cfs_gettimeofday(&now);
 
         client_obd_list_lock(&cli->cl_loi_list_lock);
 
@@ -764,7 +765,7 @@ static int osc_stats_seq_show(struct seq_file *seq, void *v)
         struct obd_device *dev = seq->private;
         struct osc_stats *stats = &obd2osc_dev(dev)->od_stats;
 
-        do_gettimeofday(&now);
+        cfs_gettimeofday(&now);
 
         seq_printf(seq, "snapshot_time:         %lu.%lu (secs.usecs)\n",
                    now.tv_sec, now.tv_usec);
index 7f91fb1..fb5f74d 100644 (file)
@@ -73,7 +73,7 @@ struct osc_io {
         struct obdo        oi_oa;
         struct osc_punch_cbargs {
                 int               opc_rc;
-                struct completion opc_sync;
+                cfs_completion_t  opc_sync;
         } oi_punch_cbarg;
 };
 
@@ -114,17 +114,17 @@ struct osc_object {
          */
         struct cl_io       oo_debug_io;
         /** Serialization object for osc_object::oo_debug_io. */
-        struct mutex       oo_debug_mutex;
+        cfs_mutex_t        oo_debug_mutex;
 #endif
         /**
          * List of pages in transfer.
          */
-        struct list_head   oo_inflight[CRT_NR];
+        cfs_list_t         oo_inflight[CRT_NR];
         /**
          * Lock, protecting ccc_object::cob_inflight, because a seat-belt is
          * locked during take-off and landing.
          */
-        spinlock_t         oo_seatbelt;
+        cfs_spinlock_t     oo_seatbelt;
 };
 
 /*
@@ -290,7 +290,7 @@ struct osc_page {
          * Linkage into a per-osc_object list of pages in flight. For
          * debugging.
          */
-        struct list_head      ops_inflight;
+        cfs_list_t            ops_inflight;
         /**
          * Thread that submitted this page for transfer. For debugging.
          */
index 99d76f9..cbe8d5d 100644 (file)
@@ -90,7 +90,7 @@ static int osc_interpret_create(const struct lu_env *env,
         oscc = req->rq_async_args.pointer_arg[0];
         LASSERT(oscc && (oscc->oscc_obd != LP_POISON));
 
-        spin_lock(&oscc->oscc_lock);
+        cfs_spin_lock(&oscc->oscc_lock);
         oscc->oscc_flags &= ~OSCC_FLAG_CREATING;
         switch (rc) {
         case 0: {
@@ -119,7 +119,7 @@ static int osc_interpret_create(const struct lu_env *env,
                         }
                         oscc->oscc_last_id = body->oa.o_id;
                 }
-                spin_unlock(&oscc->oscc_lock);
+                cfs_spin_unlock(&oscc->oscc_lock);
                 break;
         }
         case -EROFS:
@@ -133,7 +133,7 @@ static int osc_interpret_create(const struct lu_env *env,
                                 oscc->oscc_grow_count = OST_MIN_PRECREATE;
                         }
                 }
-                spin_unlock(&oscc->oscc_lock);
+                cfs_spin_unlock(&oscc->oscc_lock);
                 DEBUG_REQ(D_INODE, req, "OST out of space, flagging");
                 break;
         case -EIO: {
@@ -141,7 +141,7 @@ static int osc_interpret_create(const struct lu_env *env,
                  * of filter (see filter_handle_precreate for detail)*/
                 if (body && body->oa.o_id > oscc->oscc_last_id)
                         oscc->oscc_last_id = body->oa.o_id;
-                spin_unlock(&oscc->oscc_lock);
+                cfs_spin_unlock(&oscc->oscc_lock);
                 break;
         }
         case -EINTR:
@@ -154,13 +154,13 @@ static int osc_interpret_create(const struct lu_env *env,
                  * IMP_DISCONN event */
                 oscc->oscc_flags |= OSCC_FLAG_RECOVERING;
                 /* oscc->oscc_grow_count = OST_MIN_PRECREATE; */
-                spin_unlock(&oscc->oscc_lock);
+                cfs_spin_unlock(&oscc->oscc_lock);
                 break;
         }
         default: {
                 oscc->oscc_flags |= OSCC_FLAG_RECOVERING;
                 oscc->oscc_grow_count = OST_MIN_PRECREATE;
-                spin_unlock(&oscc->oscc_lock);
+                cfs_spin_unlock(&oscc->oscc_lock);
                 DEBUG_REQ(D_ERROR, req,
                           "Unknown rc %d from async create: failing oscc", rc);
                 ptlrpc_fail_import(req->rq_import,
@@ -171,9 +171,9 @@ static int osc_interpret_create(const struct lu_env *env,
         CDEBUG(D_HA, "preallocated through id "LPU64" (next to use "LPU64")\n",
                oscc->oscc_last_id, oscc->oscc_next_id);
 
-        spin_lock(&oscc->oscc_lock);
-        list_for_each_entry_safe(fake_req, pos,
-                                 &oscc->oscc_wait_create_list, rq_list) {
+        cfs_spin_lock(&oscc->oscc_lock);
+        cfs_list_for_each_entry_safe(fake_req, pos,
+                                     &oscc->oscc_wait_create_list, rq_list) {
                 if (handle_async_create(fake_req, rc)  == -EAGAIN) {
                         oscc_internal_create(oscc);
                         /* sending request should be never fail because
@@ -181,7 +181,7 @@ static int osc_interpret_create(const struct lu_env *env,
                         GOTO(exit_wakeup, rc);
                 }
         }
-        spin_unlock(&oscc->oscc_lock);
+        cfs_spin_unlock(&oscc->oscc_lock);
 
 exit_wakeup:
         cfs_waitq_signal(&oscc->oscc_waitq);
@@ -198,7 +198,7 @@ static int oscc_internal_create(struct osc_creator *oscc)
 
         if ((oscc->oscc_flags & OSCC_FLAG_RECOVERING) ||
             (oscc->oscc_flags & OSCC_FLAG_DEGRADED)) {
-                spin_unlock(&oscc->oscc_lock);
+                cfs_spin_unlock(&oscc->oscc_lock);
                 RETURN(0);
         }
 
@@ -213,7 +213,7 @@ static int oscc_internal_create(struct osc_creator *oscc)
         }
 
         if (oscc->oscc_flags & OSCC_FLAG_CREATING) {
-                spin_unlock(&oscc->oscc_lock);
+                cfs_spin_unlock(&oscc->oscc_lock);
                 RETURN(0);
         }
 
@@ -221,15 +221,15 @@ static int oscc_internal_create(struct osc_creator *oscc)
                 oscc->oscc_grow_count = oscc->oscc_max_grow_count / 2;
 
         oscc->oscc_flags |= OSCC_FLAG_CREATING;
-        spin_unlock(&oscc->oscc_lock);
+        cfs_spin_unlock(&oscc->oscc_lock);
 
         request = ptlrpc_request_alloc_pack(oscc->oscc_obd->u.cli.cl_import,
                                             &RQF_OST_CREATE,
                                             LUSTRE_OST_VERSION, OST_CREATE);
         if (request == NULL) {
-                spin_lock(&oscc->oscc_lock);
+                cfs_spin_lock(&oscc->oscc_lock);
                 oscc->oscc_flags &= ~OSCC_FLAG_CREATING;
-                spin_unlock(&oscc->oscc_lock);
+                cfs_spin_unlock(&oscc->oscc_lock);
                 RETURN(-ENOMEM);
         }
 
@@ -237,13 +237,13 @@ static int oscc_internal_create(struct osc_creator *oscc)
         ptlrpc_at_set_req_timeout(request);
         body = req_capsule_client_get(&request->rq_pill, &RMF_OST_BODY);
 
-        spin_lock(&oscc->oscc_lock);
+        cfs_spin_lock(&oscc->oscc_lock);
         body->oa.o_id = oscc->oscc_last_id + oscc->oscc_grow_count;
         body->oa.o_gr = oscc->oscc_oa.o_gr;
         LASSERT_MDS_GROUP(body->oa.o_gr);
         body->oa.o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP;
         request->rq_async_args.space[0] = oscc->oscc_grow_count;
-        spin_unlock(&oscc->oscc_lock);
+        cfs_spin_unlock(&oscc->oscc_lock);
         CDEBUG(D_RPCTRACE, "prealloc through id "LPU64" (last seen "LPU64")\n",
                body->oa.o_id, oscc->oscc_last_id);
 
@@ -269,9 +269,9 @@ static int oscc_has_objects(struct osc_creator *oscc, int count)
 {
         int have_objs;
 
-        spin_lock(&oscc->oscc_lock);
+        cfs_spin_lock(&oscc->oscc_lock);
         have_objs = oscc_has_objects_nolock(oscc, count);
-        spin_unlock(&oscc->oscc_lock);
+        cfs_spin_unlock(&oscc->oscc_lock);
 
         return have_objs;
 }
@@ -283,7 +283,7 @@ static int oscc_wait_for_objects(struct osc_creator *oscc, int count)
 
         ost_unusable = oscc->oscc_obd->u.cli.cl_import->imp_invalid;
 
-        spin_lock(&oscc->oscc_lock);
+        cfs_spin_lock(&oscc->oscc_lock);
         ost_unusable |= (OSCC_FLAG_NOSPC | OSCC_FLAG_RDONLY |
                          OSCC_FLAG_EXITING) & oscc->oscc_flags;
         have_objs = oscc_has_objects_nolock(oscc, count);
@@ -292,7 +292,7 @@ static int oscc_wait_for_objects(struct osc_creator *oscc, int count)
                 /* they release lock himself */
                 have_objs = oscc_internal_create(oscc);
         else
-                spin_unlock(&oscc->oscc_lock);
+                cfs_spin_unlock(&oscc->oscc_lock);
 
         return have_objs || ost_unusable;
 }
@@ -319,9 +319,9 @@ static int oscc_in_sync(struct osc_creator *oscc)
 {
         int sync;
 
-        spin_lock(&oscc->oscc_lock);
+        cfs_spin_lock(&oscc->oscc_lock);
         sync = oscc->oscc_flags & OSCC_FLAG_SYNC_IN_PROGRESS;
-        spin_unlock(&oscc->oscc_lock);
+        cfs_spin_unlock(&oscc->oscc_lock);
 
         return sync;
 }
@@ -344,28 +344,28 @@ int osc_precreate(struct obd_export *exp)
                 RETURN(1000);
 
         /* Handle critical states first */
-        spin_lock(&oscc->oscc_lock);
+        cfs_spin_lock(&oscc->oscc_lock);
         if (oscc->oscc_flags & OSCC_FLAG_NOSPC ||
             oscc->oscc_flags & OSCC_FLAG_RDONLY ||
             oscc->oscc_flags & OSCC_FLAG_EXITING) {
-                spin_unlock(&oscc->oscc_lock);
+                cfs_spin_unlock(&oscc->oscc_lock);
                 RETURN(1000);
         }
 
         if (oscc->oscc_flags & OSCC_FLAG_RECOVERING ||
             oscc->oscc_flags & OSCC_FLAG_DEGRADED) {
-                spin_unlock(&oscc->oscc_lock);
+                cfs_spin_unlock(&oscc->oscc_lock);
                 RETURN(2);
         }
 
         if (oscc_has_objects_nolock(oscc, oscc->oscc_grow_count / 2)) {
-                spin_unlock(&oscc->oscc_lock);
+                cfs_spin_unlock(&oscc->oscc_lock);
                 RETURN(0);
         }
 
         if ((oscc->oscc_flags & OSCC_FLAG_SYNC_IN_PROGRESS) ||
             (oscc->oscc_flags & OSCC_FLAG_CREATING)) {
-                spin_unlock(&oscc->oscc_lock);
+                cfs_spin_unlock(&oscc->oscc_lock);
                 RETURN(1);
         }
 
@@ -426,15 +426,16 @@ out_wake:
 }
 
 static int async_create_interpret(const struct lu_env *env,
-                                  struct ptlrpc_request *req, void *data, int rc)
+                                  struct ptlrpc_request *req, void *data,
+                                  int rc)
 {
         struct osc_create_async_args *args = ptlrpc_req_async_args(req);
         struct osc_creator    *oscc = args->rq_oscc;
         int ret;
 
-        spin_lock(&oscc->oscc_lock);
+        cfs_spin_lock(&oscc->oscc_lock);
         ret = handle_async_create(req, rc);
-        spin_unlock(&oscc->oscc_lock);
+        cfs_spin_unlock(&oscc->oscc_lock);
 
         return ret;
 }
@@ -479,7 +480,7 @@ int osc_create_async(struct obd_export *exp, struct obd_info *oinfo,
         args->rq_lsm   = *ea;
         args->rq_oinfo = oinfo;
 
-        spin_lock(&oscc->oscc_lock);
+        cfs_spin_lock(&oscc->oscc_lock);
         /* try fast path */
         rc = handle_async_create(fake_req, 0);
         if (rc == -EAGAIN) {
@@ -487,12 +488,12 @@ int osc_create_async(struct obd_export *exp, struct obd_info *oinfo,
                 /* we not have objects - try wait */
                 is_add = ptlrpcd_add_req(fake_req, PSCOPE_OTHER);
                 if (!is_add)
-                        list_add(&fake_req->rq_list,
-                                 &oscc->oscc_wait_create_list);
+                        cfs_list_add(&fake_req->rq_list,
+                                     &oscc->oscc_wait_create_list);
                 else
                         rc = is_add;
         }
-        spin_unlock(&oscc->oscc_lock);
+        cfs_spin_unlock(&oscc->oscc_lock);
 
         if (rc != -EAGAIN)
                 /* need free request if was error hit or
@@ -529,13 +530,13 @@ int osc_create(struct obd_export *exp, struct obdo *oa,
         /* this is the special case where create removes orphans */
         if (oa->o_valid & OBD_MD_FLFLAGS &&
             oa->o_flags == OBD_FL_DELORPHAN) {
-                spin_lock(&oscc->oscc_lock);
+                cfs_spin_lock(&oscc->oscc_lock);
                 if (oscc->oscc_flags & OSCC_FLAG_SYNC_IN_PROGRESS) {
-                        spin_unlock(&oscc->oscc_lock);
+                        cfs_spin_unlock(&oscc->oscc_lock);
                         RETURN(-EBUSY);
                 }
                 if (!(oscc->oscc_flags & OSCC_FLAG_RECOVERING)) {
-                        spin_unlock(&oscc->oscc_lock);
+                        cfs_spin_unlock(&oscc->oscc_lock);
                         RETURN(0);
                 }
 
@@ -543,7 +544,7 @@ int osc_create(struct obd_export *exp, struct obdo *oa,
                 /* seting flag LOW we prevent extra grow precreate size
                  * and enforce use last assigned size */
                 oscc->oscc_flags |= OSCC_FLAG_LOW;
-                spin_unlock(&oscc->oscc_lock);
+                cfs_spin_unlock(&oscc->oscc_lock);
                 CDEBUG(D_HA, "%s: oscc recovery started - delete to "LPU64"\n",
                        oscc->oscc_obd->obd_name, oscc->oscc_next_id - 1);
 
@@ -555,7 +556,7 @@ int osc_create(struct obd_export *exp, struct obdo *oa,
 
                 rc = osc_real_create(exp, oa, ea, NULL);
 
-                spin_lock(&oscc->oscc_lock);
+                cfs_spin_lock(&oscc->oscc_lock);
                 oscc->oscc_flags &= ~OSCC_FLAG_SYNC_IN_PROGRESS;
                 if (rc == 0 || rc == -ENOSPC) {
                         struct obd_connect_data *ocd;
@@ -584,7 +585,7 @@ int osc_create(struct obd_export *exp, struct obdo *oa,
                 }
 
                 cfs_waitq_signal(&oscc->oscc_waitq);
-                spin_unlock(&oscc->oscc_lock);
+                cfs_spin_unlock(&oscc->oscc_lock);
 
                 if (rc < 0)
                         RETURN(rc);
@@ -607,31 +608,31 @@ int osc_create(struct obd_export *exp, struct obdo *oa,
                         CDEBUG(D_HA,"%s: error create %d\n",
                                oscc->oscc_obd->obd_name, rc);
 
-                spin_lock(&oscc->oscc_lock);
+                cfs_spin_lock(&oscc->oscc_lock);
 
                 /* wakeup but recovery did not finished */
                 if ((oscc->oscc_obd->u.cli.cl_import->imp_invalid) ||
                     (oscc->oscc_flags & OSCC_FLAG_RECOVERING)) {
                         rc = -EIO;
-                        spin_unlock(&oscc->oscc_lock);
+                        cfs_spin_unlock(&oscc->oscc_lock);
                         break;
                 }
 
                 if (oscc->oscc_flags & OSCC_FLAG_NOSPC) {
                         rc = -ENOSPC;
-                        spin_unlock(&oscc->oscc_lock);
+                        cfs_spin_unlock(&oscc->oscc_lock);
                         break;
                 }
 
                 if (oscc->oscc_flags & OSCC_FLAG_RDONLY) {
                         rc = -EROFS;
-                        spin_unlock(&oscc->oscc_lock);
+                        cfs_spin_unlock(&oscc->oscc_lock);
                         break;
                 }
 
                 // Should we report -EIO error ?
                 if (oscc->oscc_flags & OSCC_FLAG_EXITING) {
-                        spin_unlock(&oscc->oscc_lock);
+                        cfs_spin_unlock(&oscc->oscc_lock);
                         break;
                 }
 
@@ -641,14 +642,14 @@ int osc_create(struct obd_export *exp, struct obdo *oa,
                         lsm->lsm_object_id = oscc->oscc_next_id;
                         *ea = lsm;
                         oscc->oscc_next_id++;
-                        spin_unlock(&oscc->oscc_lock);
+                        cfs_spin_unlock(&oscc->oscc_lock);
 
                         CDEBUG(D_RPCTRACE, "%s: set oscc_next_id = "LPU64"\n",
                                exp->exp_obd->obd_name, oscc->oscc_next_id);
                         break;
                 }
 
-                spin_unlock(&oscc->oscc_lock);
+                cfs_spin_unlock(&oscc->oscc_lock);
         }
 
         if (rc == 0) {
@@ -676,7 +677,7 @@ void oscc_init(struct obd_device *obd)
         memset(oscc, 0, sizeof(*oscc));
 
         cfs_waitq_init(&oscc->oscc_waitq);
-        spin_lock_init(&oscc->oscc_lock);
+        cfs_spin_lock_init(&oscc->oscc_lock);
         oscc->oscc_obd = obd;
         oscc->oscc_grow_count = OST_MIN_PRECREATE;
         oscc->oscc_max_grow_count = OST_MAX_PRECREATE;
@@ -697,8 +698,8 @@ void oscc_fini(struct obd_device *obd)
         ENTRY;
 
 
-        spin_lock(&oscc->oscc_lock);
+        cfs_spin_lock(&oscc->oscc_lock);
         oscc->oscc_flags &= ~OSCC_FLAG_RECOVERING;
         oscc->oscc_flags |= OSCC_FLAG_EXITING;
-        spin_unlock(&oscc->oscc_lock);
+        cfs_spin_unlock(&oscc->oscc_lock);
 }
index 8d63452..819ed7f 100644 (file)
@@ -92,7 +92,7 @@ struct lu_kmem_descr osc_caches[] = {
         }
 };
 
-struct lock_class_key osc_ast_guard_class;
+cfs_lock_class_key_t osc_ast_guard_class;
 
 /*****************************************************************************
  *
index 90c2590..0fbf8d0 100644 (file)
@@ -63,9 +63,9 @@ struct osc_async_page {
         unsigned short          oap_cmd;
         unsigned short          oap_interrupted:1;
 
-        struct list_head        oap_pending_item;
-        struct list_head        oap_urgent_item;
-        struct list_head        oap_rpc_item;
+        cfs_list_t              oap_pending_item;
+        cfs_list_t              oap_urgent_item;
+        cfs_list_t              oap_rpc_item;
 
         obd_off                 oap_obj_off;
         unsigned                oap_page_off;
@@ -79,9 +79,9 @@ struct osc_async_page {
 
         const struct obd_async_page_ops *oap_caller_ops;
         void                    *oap_caller_data;
-        struct list_head         oap_page_list;
+        cfs_list_t               oap_page_list;
         struct ldlm_lock        *oap_ldlm_lock;
-        spinlock_t               oap_lock;
+        cfs_spinlock_t           oap_lock;
 };
 
 #define oap_page        oap_brw_page.pg
@@ -89,9 +89,9 @@ struct osc_async_page {
 #define oap_brw_flags   oap_brw_page.flag
 
 struct osc_cache_waiter {
-        struct list_head        ocw_entry;
+        cfs_list_t              ocw_entry;
         cfs_waitq_t             ocw_waitq;
-        struct osc_async_page   *ocw_oap;
+        struct osc_async_page  *ocw_oap;
         int                     ocw_rc;
 };
 
@@ -168,7 +168,7 @@ int osc_enter_cache_try(const struct lu_env *env,
                         struct osc_async_page *oap, int transient);
 
 struct cl_page *osc_oap2cl_page(struct osc_async_page *oap);
-extern spinlock_t osc_ast_guard;
+extern cfs_spinlock_t osc_ast_guard;
 
 int osc_cleanup(struct obd_device *obd);
 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg);
@@ -194,8 +194,8 @@ static inline int osc_recoverable_error(int rc)
 /* return 1 if osc should be resend request */
 static inline int osc_should_resend(int resend, struct client_obd *cli)
 {
-        return atomic_read(&cli->cl_resends) ?
-               atomic_read(&cli->cl_resends) > resend : 1;
+        return cfs_atomic_read(&cli->cl_resends) ?
+               cfs_atomic_read(&cli->cl_resends) > resend : 1;
 }
 
 #ifndef min_t
index c9ccf7d..6190433 100644 (file)
@@ -152,16 +152,16 @@ static int osc_io_submit(const struct lu_env *env,
                 exp = osc_export(osc);
 
                 if (priority > CRP_NORMAL) {
-                        spin_lock(&oap->oap_lock);
+                        cfs_spin_lock(&oap->oap_lock);
                         oap->oap_async_flags |= ASYNC_HP;
-                        spin_unlock(&oap->oap_lock);
+                        cfs_spin_unlock(&oap->oap_lock);
                 }
                 /*
                  * This can be checked without cli->cl_loi_list_lock, because
                  * ->oap_*_item are always manipulated when the page is owned.
                  */
-                if (!list_empty(&oap->oap_urgent_item) ||
-                    !list_empty(&oap->oap_rpc_item)) {
+                if (!cfs_list_empty(&oap->oap_urgent_item) ||
+                    !cfs_list_empty(&oap->oap_rpc_item)) {
                         result = -EBUSY;
                         break;
                 }
@@ -177,7 +177,7 @@ static int osc_io_submit(const struct lu_env *env,
                 result = cl_page_prep(env, io, page, crt);
                 if (result == 0) {
                         cl_page_list_move(qout, qin, page);
-                        if (list_empty(&oap->oap_pending_item)) {
+                        if (cfs_list_empty(&oap->oap_pending_item)) {
                                 osc_io_submit_page(env, cl2osc_io(env, ios),
                                                    opg, crt);
                         } else {
@@ -380,7 +380,7 @@ static int osc_punch_upcall(void *a, int rc)
         struct osc_punch_cbargs *args = a;
 
         args->opc_rc = rc;
-        complete(&args->opc_sync);
+        cfs_complete(&args->opc_sync);
         return 0;
 }
 
@@ -422,8 +422,9 @@ static void osc_trunc_check(const struct lu_env *env, struct cl_io *io,
         cl_page_list_disown(env, io, list);
         cl_page_list_fini(env, list);
 
-        spin_lock(&obj->oo_seatbelt);
-        list_for_each_entry(cp, &obj->oo_inflight[CRT_WRITE], ops_inflight) {
+        cfs_spin_lock(&obj->oo_seatbelt);
+        cfs_list_for_each_entry(cp, &obj->oo_inflight[CRT_WRITE],
+                                ops_inflight) {
                 page = cp->ops_cl.cpl_page;
                 if (page->cp_index >= start + partial) {
                         cfs_task_t *submitter;
@@ -437,7 +438,7 @@ static void osc_trunc_check(const struct lu_env *env, struct cl_io *io,
                         libcfs_debug_dumpstack(submitter);
                 }
         }
-        spin_unlock(&obj->oo_seatbelt);
+        cfs_spin_unlock(&obj->oo_seatbelt);
 }
 #else /* __KERNEL__ */
 # define osc_trunc_check(env, io, oio, size) do {;} while (0)
@@ -488,7 +489,7 @@ static int osc_io_trunc_start(const struct lu_env *env,
                 oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
 
                 capa = io->u.ci_truncate.tr_capa;
-                init_completion(&cbargs->opc_sync);
+                cfs_init_completion(&cbargs->opc_sync);
                 result = osc_punch_base(osc_export(cl2osc(obj)), oa, capa,
                                         osc_punch_upcall, cbargs, PTLRPCD_SET);
         }
@@ -504,7 +505,7 @@ static void osc_io_trunc_end(const struct lu_env *env,
         struct obdo             *oa     = &oio->oi_oa;
         int result;
 
-        wait_for_completion(&cbargs->opc_sync);
+        cfs_wait_for_completion(&cbargs->opc_sync);
 
         result = io->ci_result = cbargs->opc_rc;
         if (result == 0) {
@@ -631,7 +632,7 @@ static void osc_req_attr_set(const struct lu_env *env,
         }
         if (flags & OBD_MD_FLHANDLE) {
                 clerq = slice->crs_req;
-                LASSERT(!list_empty(&clerq->crq_pages));
+                LASSERT(!cfs_list_empty(&clerq->crq_pages));
                 apage = container_of(clerq->crq_pages.next,
                                      struct cl_page, cp_flight);
                 opg = osc_cl_page_osc(apage);
@@ -642,7 +643,8 @@ static void osc_req_attr_set(const struct lu_env *env,
                         struct cl_lock          *scan;
 
                         head = cl_object_header(apage->cp_obj);
-                        list_for_each_entry(scan, &head->coh_locks, cll_linkage)
+                        cfs_list_for_each_entry(scan, &head->coh_locks,
+                                                cll_linkage)
                                 CL_LOCK_DEBUG(D_ERROR, env, scan,
                                               "no cover page!\n");
                         CL_PAGE_DEBUG(D_ERROR, env, apage,
index 8cb79b1..c6e7942 100644 (file)
@@ -135,10 +135,10 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
 {
         struct ldlm_lock *dlmlock;
 
-        spin_lock(&osc_ast_guard);
+        cfs_spin_lock(&osc_ast_guard);
         dlmlock = olck->ols_lock;
         if (dlmlock == NULL) {
-                spin_unlock(&osc_ast_guard);
+                cfs_spin_unlock(&osc_ast_guard);
                 return;
         }
 
@@ -147,7 +147,7 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
          * call to osc_lock_detach() */
         dlmlock->l_ast_data = NULL;
         olck->ols_handle.cookie = 0ULL;
-        spin_unlock(&osc_ast_guard);
+        cfs_spin_unlock(&osc_ast_guard);
 
         lock_res_and_lock(dlmlock);
         if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
@@ -276,14 +276,14 @@ static int osc_enq2ldlm_flags(__u32 enqflags)
  * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
  * pointers. Initialized in osc_init().
  */
-spinlock_t osc_ast_guard;
+cfs_spinlock_t osc_ast_guard;
 
 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
 {
         struct osc_lock *olck;
 
         lock_res_and_lock(dlm_lock);
-        spin_lock(&osc_ast_guard);
+        cfs_spin_lock(&osc_ast_guard);
         olck = dlm_lock->l_ast_data;
         if (olck != NULL) {
                 struct cl_lock *lock = olck->ols_cl.cls_lock;
@@ -303,7 +303,7 @@ static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
                 } else
                         olck = NULL;
         }
-        spin_unlock(&osc_ast_guard);
+        cfs_spin_unlock(&osc_ast_guard);
         unlock_res_and_lock(dlm_lock);
         return olck;
 }
@@ -451,11 +451,11 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
         LASSERT(dlmlock != NULL);
 
         lock_res_and_lock(dlmlock);
-        spin_lock(&osc_ast_guard);
+        cfs_spin_lock(&osc_ast_guard);
         LASSERT(dlmlock->l_ast_data == olck);
         LASSERT(olck->ols_lock == NULL);
         olck->ols_lock = dlmlock;
-        spin_unlock(&osc_ast_guard);
+        cfs_spin_unlock(&osc_ast_guard);
 
         /*
          * Lock might be not yet granted. In this case, completion ast
@@ -515,11 +515,11 @@ static int osc_lock_upcall(void *cookie, int errcode)
                         dlmlock = ldlm_handle2lock(&olck->ols_handle);
                         if (dlmlock != NULL) {
                                 lock_res_and_lock(dlmlock);
-                                spin_lock(&osc_ast_guard);
+                                cfs_spin_lock(&osc_ast_guard);
                                 LASSERT(olck->ols_lock == NULL);
                                 dlmlock->l_ast_data = NULL;
                                 olck->ols_handle.cookie = 0ULL;
-                                spin_unlock(&osc_ast_guard);
+                                cfs_spin_unlock(&osc_ast_guard);
                                 unlock_res_and_lock(dlmlock);
                                 LDLM_LOCK_PUT(dlmlock);
                         }
@@ -890,7 +890,7 @@ static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
         unsigned long            weight;
         ENTRY;
 
-        might_sleep();
+        cfs_might_sleep();
         /*
          * osc_ldlm_weigh_ast has a complex context since it might be called
          * because of lock canceling, or from user's input. We have to make
@@ -1020,13 +1020,6 @@ static int osc_lock_compatible(const struct osc_lock *qing,
         return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ));
 }
 
-#ifndef list_for_each_entry_continue 
-#define list_for_each_entry_continue(pos, head, member)                 \
-        for (pos = list_entry(pos->member.next, typeof(*pos), member);  \
-             prefetch(pos->member.next), &pos->member != (head);        \
-             pos = list_entry(pos->member.next, typeof(*pos), member))
-#endif
-
 /**
  * Cancel all conflicting locks and wait for them to be destroyed.
  *
@@ -1058,8 +1051,8 @@ static int osc_lock_enqueue_wait(const struct lu_env *env,
         if (olck->ols_glimpse)
                 return 0;
 
-        spin_lock(&hdr->coh_lock_guard);
-        list_for_each_entry_continue(scan, &hdr->coh_locks, cll_linkage) {
+        cfs_spin_lock(&hdr->coh_lock_guard);
+        cfs_list_for_each_entry_continue(scan, &hdr->coh_locks, cll_linkage) {
                 struct cl_lock_descr *cld = &scan->cll_descr;
                 const struct osc_lock *scan_ols;
 
@@ -1101,7 +1094,7 @@ static int osc_lock_enqueue_wait(const struct lu_env *env,
                 conflict = scan;
                 break;
         }
-        spin_unlock(&hdr->coh_lock_guard);
+        cfs_spin_unlock(&hdr->coh_lock_guard);
 
         if (conflict) {
                 CDEBUG(D_DLMTRACE, "lock %p is confliced with %p, will wait\n",
@@ -1154,8 +1147,8 @@ static int osc_deadlock_is_possible(const struct lu_env *env,
         head = cl_object_header(obj);
 
         result = 0;
-        spin_lock(&head->coh_lock_guard);
-        list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
+        cfs_spin_lock(&head->coh_lock_guard);
+        cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
                 if (scan != lock) {
                         struct osc_lock *oscan;
 
@@ -1167,7 +1160,7 @@ static int osc_deadlock_is_possible(const struct lu_env *env,
                         }
                 }
         }
-        spin_unlock(&head->coh_lock_guard);
+        cfs_spin_unlock(&head->coh_lock_guard);
         RETURN(result);
 }
 
@@ -1413,7 +1406,7 @@ static int osc_lock_has_pages(struct osc_lock *olck)
                 plist = &osc_env_info(env)->oti_plist;
                 cl_page_list_init(plist);
 
-                mutex_lock(&oob->oo_debug_mutex);
+                cfs_mutex_lock(&oob->oo_debug_mutex);
 
                 io->ci_obj = cl_object_top(obj);
                 cl_io_init(env, io, CIT_MISC, io->ci_obj);
@@ -1429,7 +1422,7 @@ static int osc_lock_has_pages(struct osc_lock *olck)
                 cl_page_list_disown(env, io, plist);
                 cl_page_list_fini(env, plist);
                 cl_io_fini(env, io);
-                mutex_unlock(&oob->oo_debug_mutex);
+                cfs_mutex_unlock(&oob->oo_debug_mutex);
                 cl_env_nested_put(&nest, env);
         } else
                 result = 0;
index 42fc154..84efcb5 100644 (file)
@@ -78,9 +78,9 @@ static int osc_object_init(const struct lu_env *env, struct lu_object *obj,
 
         osc->oo_oinfo = cconf->u.coc_oinfo;
 #ifdef INVARIANT_CHECK
-        mutex_init(&osc->oo_debug_mutex);
+        cfs_mutex_init(&osc->oo_debug_mutex);
 #endif
-        spin_lock_init(&osc->oo_seatbelt);
+        cfs_spin_lock_init(&osc->oo_seatbelt);
         for (i = 0; i < CRT_NR; ++i)
                 CFS_INIT_LIST_HEAD(&osc->oo_inflight[i]);
         return 0;
@@ -92,7 +92,7 @@ static void osc_object_free(const struct lu_env *env, struct lu_object *obj)
         int i;
 
         for (i = 0; i < CRT_NR; ++i)
-                LASSERT(list_empty(&osc->oo_inflight[i]));
+                LASSERT(cfs_list_empty(&osc->oo_inflight[i]));
 
         lu_object_fini(obj);
         OBD_SLAB_FREE_PTR(osc, osc_object_kmem);
index 38581f5..56c3106 100644 (file)
@@ -67,7 +67,7 @@ static int osc_page_is_dlocked(const struct lu_env *env,
         ldlm_mode_t             dlmmode;
         int                     flags;
 
-        might_sleep();
+        cfs_might_sleep();
 
         info = osc_env_info(env);
         resname = &info->oti_resname;
@@ -121,8 +121,8 @@ static int osc_page_protected(const struct lu_env *env,
                 descr->cld_mode = mode;
                 descr->cld_start = page->cp_index;
                 descr->cld_end   = page->cp_index;
-                spin_lock(&hdr->coh_lock_guard);
-                list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
+                cfs_spin_lock(&hdr->coh_lock_guard);
+                cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
                         /*
                          * Lock-less sub-lock has to be either in HELD state
                          * (when io is actively going on), or in CACHED state,
@@ -139,7 +139,7 @@ static int osc_page_protected(const struct lu_env *env,
                                 break;
                         }
                 }
-                spin_unlock(&hdr->coh_lock_guard);
+                cfs_spin_unlock(&hdr->coh_lock_guard);
         }
         return result;
 }
@@ -200,10 +200,10 @@ static void osc_page_transfer_add(const struct lu_env *env,
         LINVRNT(cl_page_is_vmlocked(env, opg->ops_cl.cpl_page));
 
         obj = cl2osc(opg->ops_cl.cpl_obj);
-        spin_lock(&obj->oo_seatbelt);
-        list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
+        cfs_spin_lock(&obj->oo_seatbelt);
+        cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
         opg->ops_submitter = cfs_current();
-        spin_unlock(&obj->oo_seatbelt);
+        cfs_spin_unlock(&obj->oo_seatbelt);
 }
 
 static int osc_page_cache_add(const struct lu_env *env,
@@ -276,9 +276,9 @@ static int osc_page_fail(const struct lu_env *env,
 }
 
 
-static const char *osc_list(struct list_head *head)
+static const char *osc_list(cfs_list_t *head)
 {
-        return list_empty(head) ? "-" : "+";
+        return cfs_list_empty(head) ? "-" : "+";
 }
 
 static inline cfs_time_t osc_submit_duration(struct osc_page *opg)
@@ -363,9 +363,9 @@ static void osc_page_delete(const struct lu_env *env,
                               "Trying to teardown failed: %d\n", rc);
                 LASSERT(0);
         }
-        spin_lock(&obj->oo_seatbelt);
-        list_del_init(&opg->ops_inflight);
-        spin_unlock(&obj->oo_seatbelt);
+        cfs_spin_lock(&obj->oo_seatbelt);
+        cfs_list_del_init(&opg->ops_inflight);
+        cfs_spin_unlock(&obj->oo_seatbelt);
         EXIT;
 }
 
@@ -379,9 +379,9 @@ void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
 
         opg->ops_from = from;
         opg->ops_to   = to;
-        spin_lock(&oap->oap_lock);
+        cfs_spin_lock(&oap->oap_lock);
         oap->oap_async_flags |= ASYNC_COUNT_STABLE;
-        spin_unlock(&oap->oap_lock);
+        cfs_spin_unlock(&oap->oap_lock);
 }
 
 static int osc_page_cancel(const struct lu_env *env,
@@ -499,19 +499,19 @@ static int osc_completion(const struct lu_env *env,
         LASSERT(page->cp_req == NULL);
 
         /* As the transfer for this page is being done, clear the flags */
-        spin_lock(&oap->oap_lock);
+        cfs_spin_lock(&oap->oap_lock);
         oap->oap_async_flags = 0;
-        spin_unlock(&oap->oap_lock);
+        cfs_spin_unlock(&oap->oap_lock);
 
         crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
         /* Clear opg->ops_transfer_pinned before VM lock is released. */
         opg->ops_transfer_pinned = 0;
 
-        spin_lock(&obj->oo_seatbelt);
+        cfs_spin_lock(&obj->oo_seatbelt);
         LASSERT(opg->ops_submitter != NULL);
-        LASSERT(!list_empty(&opg->ops_inflight));
-        list_del_init(&opg->ops_inflight);
-        spin_unlock(&obj->oo_seatbelt);
+        LASSERT(!cfs_list_empty(&opg->ops_inflight));
+        cfs_list_del_init(&opg->ops_inflight);
+        cfs_spin_unlock(&obj->oo_seatbelt);
 
         opg->ops_submit_time = 0;
 
@@ -625,9 +625,9 @@ void osc_io_submit_page(const struct lu_env *env,
         else if (!(oap->oap_brw_page.flag & OBD_BRW_FROM_GRANT))
                 osc_enter_cache_try(env, cli, oap->oap_loi, oap, 1);
 
-        spin_lock(&oap->oap_lock);
+        cfs_spin_lock(&oap->oap_lock);
         oap->oap_async_flags |= OSC_FLAGS | flags;
-        spin_unlock(&oap->oap_lock);
+        cfs_spin_unlock(&oap->oap_lock);
 
         osc_oap_to_pending(oap);
         osc_page_transfer_get(opg, "transfer\0imm");
index cd7fac6..32394f7 100644 (file)
@@ -627,8 +627,8 @@ static int osc_sync(struct obd_export *exp, struct obdo *oa,
  * @objid. Found locks are added into @cancel list. Returns the amount of
  * locks added to @cancels list. */
 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
-                                   struct list_head *cancels, ldlm_mode_t mode,
-                                   int lock_flags)
+                                   cfs_list_t *cancels,
+                                   ldlm_mode_t mode, int lock_flags)
 {
         struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
         struct ldlm_res_id res_id;
@@ -655,19 +655,19 @@ static int osc_destroy_interpret(const struct lu_env *env,
 {
         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
 
-        atomic_dec(&cli->cl_destroy_in_flight);
+        cfs_atomic_dec(&cli->cl_destroy_in_flight);
         cfs_waitq_signal(&cli->cl_destroy_waitq);
         return 0;
 }
 
 static int osc_can_send_destroy(struct client_obd *cli)
 {
-        if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
+        if (cfs_atomic_inc_return(&cli->cl_destroy_in_flight) <=
             cli->cl_max_rpcs_in_flight) {
                 /* The destroy request can be sent */
                 return 1;
         }
-        if (atomic_dec_return(&cli->cl_destroy_in_flight) <
+        if (cfs_atomic_dec_return(&cli->cl_destroy_in_flight) <
             cli->cl_max_rpcs_in_flight) {
                 /*
                  * The counter has been modified between the two atomic
@@ -768,14 +768,15 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
                 CERROR("dirty %lu - %lu > dirty_max %lu\n",
                        cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max);
                 oa->o_undirty = 0;
-        } else if (atomic_read(&obd_dirty_pages) -
-                   atomic_read(&obd_dirty_transit_pages) > obd_max_dirty_pages + 1){
-                /* The atomic_read() allowing the atomic_inc() are not covered
-                 * by a lock thus they may safely race and trip this CERROR()
-                 * unless we add in a small fudge factor (+1). */
+        } else if (cfs_atomic_read(&obd_dirty_pages) -
+                   cfs_atomic_read(&obd_dirty_transit_pages) >
+                   obd_max_dirty_pages + 1){
+                /* The cfs_atomic_read() allowing the cfs_atomic_inc() are
+                 * not covered by a lock thus they may safely race and trip
+                 * this CERROR() unless we add in a small fudge factor (+1). */
                 CERROR("dirty %d - %d > system dirty_max %d\n",
-                       atomic_read(&obd_dirty_pages),
-                       atomic_read(&obd_dirty_transit_pages),
+                       cfs_atomic_read(&obd_dirty_pages),
+                       cfs_atomic_read(&obd_dirty_transit_pages),
                        obd_max_dirty_pages);
                 oa->o_undirty = 0;
         } else if (cli->cl_dirty_max - cli->cl_dirty > 0x7fffffff) {
@@ -810,7 +811,7 @@ static void osc_consume_write_grant(struct client_obd *cli,
 {
         LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock);
         LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
-        atomic_inc(&obd_dirty_pages);
+        cfs_atomic_inc(&obd_dirty_pages);
         cli->cl_dirty += CFS_PAGE_SIZE;
         cli->cl_avail_grant -= CFS_PAGE_SIZE;
         pga->flag |= OBD_BRW_FROM_GRANT;
@@ -835,11 +836,11 @@ static void osc_release_write_grant(struct client_obd *cli,
         }
 
         pga->flag &= ~OBD_BRW_FROM_GRANT;
-        atomic_dec(&obd_dirty_pages);
+        cfs_atomic_dec(&obd_dirty_pages);
         cli->cl_dirty -= CFS_PAGE_SIZE;
         if (pga->flag & OBD_BRW_NOCACHE) {
                 pga->flag &= ~OBD_BRW_NOCACHE;
-                atomic_dec(&obd_dirty_transit_pages);
+                cfs_atomic_dec(&obd_dirty_transit_pages);
                 cli->cl_dirty_transit -= CFS_PAGE_SIZE;
         }
         if (!sent) {
@@ -873,14 +874,15 @@ static unsigned long rpcs_in_flight(struct client_obd *cli)
 /* caller must hold loi_list_lock */
 void osc_wake_cache_waiters(struct client_obd *cli)
 {
-        struct list_head *l, *tmp;
+        cfs_list_t *l, *tmp;
         struct osc_cache_waiter *ocw;
 
         ENTRY;
-        list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
+        cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
                 /* if we can't dirty more, we must wait until some is written */
                 if ((cli->cl_dirty + CFS_PAGE_SIZE > cli->cl_dirty_max) ||
-                   (atomic_read(&obd_dirty_pages) + 1 > obd_max_dirty_pages)) {
+                   (cfs_atomic_read(&obd_dirty_pages) + 1 >
+                    obd_max_dirty_pages)) {
                         CDEBUG(D_CACHE, "no dirty room: dirty: %ld "
                                "osc max %ld, sys max %d\n", cli->cl_dirty,
                                cli->cl_dirty_max, obd_max_dirty_pages);
@@ -895,8 +897,8 @@ void osc_wake_cache_waiters(struct client_obd *cli)
                         return;
                 }
 
-                ocw = list_entry(l, struct osc_cache_waiter, ocw_entry);
-                list_del_init(&ocw->ocw_entry);
+                ocw = cfs_list_entry(l, struct osc_cache_waiter, ocw_entry);
+                cfs_list_del_init(&ocw->ocw_entry);
                 if (cli->cl_avail_grant < CFS_PAGE_SIZE) {
                         /* no more RPCs in flight to return grant, do sync IO */
                         ocw->ocw_rc = -EDQUOT;
@@ -1039,7 +1041,8 @@ static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data)
 {
         struct client_obd *client;
 
-        list_for_each_entry(client, &item->ti_obd_list, cl_grant_shrink_list) {
+        cfs_list_for_each_entry(client, &item->ti_obd_list,
+                                cl_grant_shrink_list) {
                 if (osc_should_shrink_grant(client))
                         osc_shrink_grant(client);
         }
@@ -1093,7 +1096,7 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
         LASSERT(cli->cl_avail_grant >= 0);
 
         if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
-            list_empty(&cli->cl_grant_shrink_list))
+            cfs_list_empty(&cli->cl_grant_shrink_list))
                 osc_add_shrink_grant(cli);
 }
 
@@ -1696,7 +1699,7 @@ int osc_brw_redo_request(struct ptlrpc_request *request,
 
         client_obd_list_lock(&aa->aa_cli->cl_loi_list_lock);
 
-        list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
+        cfs_list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
                 if (oap->oap_request != NULL) {
                         LASSERTF(request == oap->oap_request,
                                  "request %p != oap_request %p\n",
@@ -1718,10 +1721,10 @@ int osc_brw_redo_request(struct ptlrpc_request *request,
         new_aa = ptlrpc_req_async_args(new_req);
 
         CFS_INIT_LIST_HEAD(&new_aa->aa_oaps);
-        list_splice(&aa->aa_oaps, &new_aa->aa_oaps);
+        cfs_list_splice(&aa->aa_oaps, &new_aa->aa_oaps);
         CFS_INIT_LIST_HEAD(&aa->aa_oaps);
 
-        list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
+        cfs_list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
                 if (oap->oap_request) {
                         ptlrpc_req_finished(oap->oap_request);
                         oap->oap_request = ptlrpc_request_addref(new_req);
@@ -1927,7 +1930,7 @@ static int lop_makes_rpc(struct client_obd *cli, struct loi_oap_pages *lop,
          * queued.  this is our cheap solution for good batching in the case
          * where writepage marks some random page in the middle of the file
          * as urgent because of, say, memory pressure */
-        if (!list_empty(&lop->lop_urgent)) {
+        if (!cfs_list_empty(&lop->lop_urgent)) {
                 CDEBUG(D_CACHE, "urgent request forcing RPC\n");
                 RETURN(1);
         }
@@ -1937,7 +1940,7 @@ static int lop_makes_rpc(struct client_obd *cli, struct loi_oap_pages *lop,
                 /* trigger a write rpc stream as long as there are dirtiers
                  * waiting for space.  as they're waiting, they're not going to
                  * create more pages to coallesce with what's waiting.. */
-                if (!list_empty(&cli->cl_cache_waiters)) {
+                if (!cfs_list_empty(&cli->cl_cache_waiters)) {
                         CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
                         RETURN(1);
                 }
@@ -1958,10 +1961,10 @@ static int lop_makes_hprpc(struct loi_oap_pages *lop)
         struct osc_async_page *oap;
         ENTRY;
 
-        if (list_empty(&lop->lop_urgent))
+        if (cfs_list_empty(&lop->lop_urgent))
                 RETURN(0);
 
-        oap = list_entry(lop->lop_urgent.next,
+        oap = cfs_list_entry(lop->lop_urgent.next,
                          struct osc_async_page, oap_urgent_item);
 
         if (oap->oap_async_flags & ASYNC_HP) {
@@ -1972,13 +1975,13 @@ static int lop_makes_hprpc(struct loi_oap_pages *lop)
         RETURN(0);
 }
 
-static void on_list(struct list_head *item, struct list_head *list,
+static void on_list(cfs_list_t *item, cfs_list_t *list,
                     int should_be_on)
 {
-        if (list_empty(item) && should_be_on)
-                list_add_tail(item, list);
-        else if (!list_empty(item) && !should_be_on)
-                list_del_init(item);
+        if (cfs_list_empty(item) && should_be_on)
+                cfs_list_add_tail(item, list);
+        else if (!cfs_list_empty(item) && !should_be_on)
+                cfs_list_del_init(item);
 }
 
 /* maintain the loi's cli list membership invariants so that osc_send_oap_rpc
@@ -2043,9 +2046,9 @@ int osc_oap_interrupted(const struct lu_env *env, struct osc_async_page *oap)
          * page completion may be called only if ->cpo_prep() method was
          * executed by osc_io_submit(), that also adds page the to pending list
          */
-        if (!list_empty(&oap->oap_pending_item)) {
-                list_del_init(&oap->oap_pending_item);
-                list_del_init(&oap->oap_urgent_item);
+        if (!cfs_list_empty(&oap->oap_pending_item)) {
+                cfs_list_del_init(&oap->oap_pending_item);
+                cfs_list_del_init(&oap->oap_urgent_item);
 
                 loi = oap->oap_loi;
                 lop = (oap->oap_cmd & OBD_BRW_WRITE) ?
@@ -2092,10 +2095,10 @@ void osc_oap_to_pending(struct osc_async_page *oap)
                 lop = &oap->oap_loi->loi_read_lop;
 
         if (oap->oap_async_flags & ASYNC_HP)
-                list_add(&oap->oap_urgent_item, &lop->lop_urgent);
+                cfs_list_add(&oap->oap_urgent_item, &lop->lop_urgent);
         else if (oap->oap_async_flags & ASYNC_URGENT)
-                list_add_tail(&oap->oap_urgent_item, &lop->lop_urgent);
-        list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
+                cfs_list_add_tail(&oap->oap_urgent_item, &lop->lop_urgent);
+        cfs_list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
         lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, 1);
 }
 
@@ -2114,9 +2117,9 @@ static void osc_ap_completion(const struct lu_env *env,
                 oap->oap_request = NULL;
         }
 
-        spin_lock(&oap->oap_lock);
+        cfs_spin_lock(&oap->oap_lock);
         oap->oap_async_flags = 0;
-        spin_unlock(&oap->oap_lock);
+        cfs_spin_unlock(&oap->oap_lock);
         oap->oap_interrupted = 0;
 
         if (oap->oap_cmd & OBD_BRW_WRITE) {
@@ -2182,13 +2185,14 @@ static int brw_interpret(const struct lu_env *env,
         else
                 cli->cl_r_in_flight--;
 
-        async = list_empty(&aa->aa_oaps);
+        async = cfs_list_empty(&aa->aa_oaps);
         if (!async) { /* from osc_send_oap_rpc() */
                 struct osc_async_page *oap, *tmp;
                 /* the caller may re-use the oap after the completion call so
                  * we need to clean it up a little */
-                list_for_each_entry_safe(oap, tmp, &aa->aa_oaps, oap_rpc_item) {
-                        list_del_init(&oap->oap_rpc_item);
+                cfs_list_for_each_entry_safe(oap, tmp, &aa->aa_oaps,
+                                             oap_rpc_item) {
+                        cfs_list_del_init(&oap->oap_rpc_item);
                         osc_ap_completion(env, cli, aa->aa_oa, oap, 1, rc);
                 }
                 OBDO_FREE(aa->aa_oa);
@@ -2211,7 +2215,7 @@ static int brw_interpret(const struct lu_env *env,
 
 static struct ptlrpc_request *osc_build_req(const struct lu_env *env,
                                             struct client_obd *cli,
-                                            struct list_head *rpc_list,
+                                            cfs_list_t *rpc_list,
                                             int page_count, int cmd)
 {
         struct ptlrpc_request *req;
@@ -2230,7 +2234,7 @@ static struct ptlrpc_request *osc_build_req(const struct lu_env *env,
         int i, rc;
 
         ENTRY;
-        LASSERT(!list_empty(rpc_list));
+        LASSERT(!cfs_list_empty(rpc_list));
 
         memset(&crattr, 0, sizeof crattr);
         OBD_ALLOC(pga, sizeof(*pga) * page_count);
@@ -2242,7 +2246,7 @@ static struct ptlrpc_request *osc_build_req(const struct lu_env *env,
                 GOTO(out, req = ERR_PTR(-ENOMEM));
 
         i = 0;
-        list_for_each_entry(oap, rpc_list, oap_rpc_item) {
+        cfs_list_for_each_entry(oap, rpc_list, oap_rpc_item) {
                 struct cl_page *page = osc_oap2cl_page(oap);
                 if (ops == NULL) {
                         ops = oap->oap_caller_ops;
@@ -2299,7 +2303,7 @@ static struct ptlrpc_request *osc_build_req(const struct lu_env *env,
         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
         aa = ptlrpc_req_async_args(req);
         CFS_INIT_LIST_HEAD(&aa->aa_oaps);
-        list_splice(rpc_list, &aa->aa_oaps);
+        cfs_list_splice(rpc_list, &aa->aa_oaps);
         CFS_INIT_LIST_HEAD(rpc_list);
         aa->aa_clerq = clerq;
 out:
@@ -2312,8 +2316,8 @@ out:
                 /* this should happen rarely and is pretty bad, it makes the
                  * pending list not follow the dirty order */
                 client_obd_list_lock(&cli->cl_loi_list_lock);
-                list_for_each_entry_safe(oap, tmp, rpc_list, oap_rpc_item) {
-                        list_del_init(&oap->oap_rpc_item);
+                cfs_list_for_each_entry_safe(oap, tmp, rpc_list, oap_rpc_item) {
+                        cfs_list_del_init(&oap->oap_rpc_item);
 
                         /* queued sync pages can be torn down while the pages
                          * were between the pending list and the rpc */
@@ -2361,21 +2365,21 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
         /* ASYNC_HP pages first. At present, when the lock the pages is
          * to be canceled, the pages covered by the lock will be sent out
          * with ASYNC_HP. We have to send out them as soon as possible. */
-        list_for_each_entry_safe(oap, tmp, &lop->lop_urgent, oap_urgent_item) {
+        cfs_list_for_each_entry_safe(oap, tmp, &lop->lop_urgent, oap_urgent_item) {
                 if (oap->oap_async_flags & ASYNC_HP) 
-                        list_move(&oap->oap_pending_item, &tmp_list);
+                        cfs_list_move(&oap->oap_pending_item, &tmp_list);
                 else
-                        list_move_tail(&oap->oap_pending_item, &tmp_list);
+                        cfs_list_move_tail(&oap->oap_pending_item, &tmp_list);
                 if (++page_count >= cli->cl_max_pages_per_rpc)
                         break;
         }
 
-        list_splice(&tmp_list, &lop->lop_pending);
+        cfs_list_splice(&tmp_list, &lop->lop_pending);
         page_count = 0;
 
         /* first we find the pages we're allowed to work with */
-        list_for_each_entry_safe(oap, tmp, &lop->lop_pending,
-                                 oap_pending_item) {
+        cfs_list_for_each_entry_safe(oap, tmp, &lop->lop_pending,
+                                     oap_pending_item) {
                 ops = oap->oap_caller_ops;
 
                 LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, "
@@ -2431,15 +2435,15 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
                         case -EINTR:
                                 /* the io isn't needed.. tell the checks
                                  * below to complete the rpc with EINTR */
-                                spin_lock(&oap->oap_lock);
+                                cfs_spin_lock(&oap->oap_lock);
                                 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
-                                spin_unlock(&oap->oap_lock);
+                                cfs_spin_unlock(&oap->oap_lock);
                                 oap->oap_count = -EINTR;
                                 break;
                         case 0:
-                                spin_lock(&oap->oap_lock);
+                                cfs_spin_lock(&oap->oap_lock);
                                 oap->oap_async_flags |= ASYNC_READY;
-                                spin_unlock(&oap->oap_lock);
+                                cfs_spin_unlock(&oap->oap_lock);
                                 break;
                         default:
                                 LASSERTF(0, "oap %p page %p returned %d "
@@ -2473,9 +2477,9 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
 #endif
 
                 /* take the page out of our book-keeping */
-                list_del_init(&oap->oap_pending_item);
+                cfs_list_del_init(&oap->oap_pending_item);
                 lop_update_pending(cli, lop, cmd, -1);
-                list_del_init(&oap->oap_urgent_item);
+                cfs_list_del_init(&oap->oap_urgent_item);
 
                 if (page_count == 0)
                         starting_offset = (oap->oap_obj_off+oap->oap_page_off) &
@@ -2497,7 +2501,7 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
                 }
 
                 /* now put the page back in our accounting */
-                list_add_tail(&oap->oap_rpc_item, &rpc_list);
+                cfs_list_add_tail(&oap->oap_rpc_item, &rpc_list);
                 if (page_count == 0)
                         srvlock = !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK);
                 if (++page_count >= cli->cl_max_pages_per_rpc)
@@ -2535,7 +2539,7 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
 
         req = osc_build_req(env, cli, &rpc_list, page_count, cmd);
         if (IS_ERR(req)) {
-                LASSERT(list_empty(&rpc_list));
+                LASSERT(cfs_list_empty(&rpc_list));
                 loi_list_maint(cli, loi);
                 RETURN(PTR_ERR(req));
         }
@@ -2566,7 +2570,7 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
         /* queued sync pages can be torn down while the pages
          * were between the pending list and the rpc */
         tmp = NULL;
-        list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
+        cfs_list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
                 /* only one oap gets a request reference */
                 if (tmp == NULL)
                         tmp = oap;
@@ -2589,12 +2593,12 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
 
 #define LOI_DEBUG(LOI, STR, args...)                                     \
         CDEBUG(D_INODE, "loi ready %d wr %d:%d rd %d:%d " STR,           \
-               !list_empty(&(LOI)->loi_ready_item) ||                    \
-               !list_empty(&(LOI)->loi_hp_ready_item),                   \
+               !cfs_list_empty(&(LOI)->loi_ready_item) ||                \
+               !cfs_list_empty(&(LOI)->loi_hp_ready_item),               \
                (LOI)->loi_write_lop.lop_num_pending,                     \
-               !list_empty(&(LOI)->loi_write_lop.lop_urgent),            \
+               !cfs_list_empty(&(LOI)->loi_write_lop.lop_urgent),        \
                (LOI)->loi_read_lop.lop_num_pending,                      \
-               !list_empty(&(LOI)->loi_read_lop.lop_urgent),             \
+               !cfs_list_empty(&(LOI)->loi_read_lop.lop_urgent),         \
                args)                                                     \
 
 /* This is called by osc_check_rpcs() to find which objects have pages that
@@ -2606,31 +2610,32 @@ struct lov_oinfo *osc_next_loi(struct client_obd *cli)
         /* First return objects that have blocked locks so that they
          * will be flushed quickly and other clients can get the lock,
          * then objects which have pages ready to be stuffed into RPCs */
-        if (!list_empty(&cli->cl_loi_hp_ready_list))
-                RETURN(list_entry(cli->cl_loi_hp_ready_list.next,
-                                  struct lov_oinfo, loi_hp_ready_item));
-        if (!list_empty(&cli->cl_loi_ready_list))
-                RETURN(list_entry(cli->cl_loi_ready_list.next,
-                                  struct lov_oinfo, loi_ready_item));
+        if (!cfs_list_empty(&cli->cl_loi_hp_ready_list))
+                RETURN(cfs_list_entry(cli->cl_loi_hp_ready_list.next,
+                                      struct lov_oinfo, loi_hp_ready_item));
+        if (!cfs_list_empty(&cli->cl_loi_ready_list))
+                RETURN(cfs_list_entry(cli->cl_loi_ready_list.next,
+                                      struct lov_oinfo, loi_ready_item));
 
         /* then if we have cache waiters, return all objects with queued
          * writes.  This is especially important when many small files
          * have filled up the cache and not been fired into rpcs because
          * they don't pass the nr_pending/object threshhold */
-        if (!list_empty(&cli->cl_cache_waiters) &&
-            !list_empty(&cli->cl_loi_write_list))
-                RETURN(list_entry(cli->cl_loi_write_list.next,
-                                  struct lov_oinfo, loi_write_item));
+        if (!cfs_list_empty(&cli->cl_cache_waiters) &&
+            !cfs_list_empty(&cli->cl_loi_write_list))
+                RETURN(cfs_list_entry(cli->cl_loi_write_list.next,
+                                      struct lov_oinfo, loi_write_item));
 
         /* then return all queued objects when we have an invalid import
          * so that they get flushed */
         if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
-                if (!list_empty(&cli->cl_loi_write_list))
-                        RETURN(list_entry(cli->cl_loi_write_list.next,
-                                          struct lov_oinfo, loi_write_item));
-                if (!list_empty(&cli->cl_loi_read_list))
-                        RETURN(list_entry(cli->cl_loi_read_list.next,
-                                          struct lov_oinfo, loi_read_item));
+                if (!cfs_list_empty(&cli->cl_loi_write_list))
+                        RETURN(cfs_list_entry(cli->cl_loi_write_list.next,
+                                              struct lov_oinfo,
+                                              loi_write_item));
+                if (!cfs_list_empty(&cli->cl_loi_read_list))
+                        RETURN(cfs_list_entry(cli->cl_loi_read_list.next,
+                                              struct lov_oinfo, loi_read_item));
         }
         RETURN(NULL);
 }
@@ -2640,15 +2645,15 @@ static int osc_max_rpc_in_flight(struct client_obd *cli, struct lov_oinfo *loi)
         struct osc_async_page *oap;
         int hprpc = 0;
 
-        if (!list_empty(&loi->loi_write_lop.lop_urgent)) {
-                oap = list_entry(loi->loi_write_lop.lop_urgent.next,
-                                 struct osc_async_page, oap_urgent_item);
+        if (!cfs_list_empty(&loi->loi_write_lop.lop_urgent)) {
+                oap = cfs_list_entry(loi->loi_write_lop.lop_urgent.next,
+                                     struct osc_async_page, oap_urgent_item);
                 hprpc = !!(oap->oap_async_flags & ASYNC_HP);
         }
 
-        if (!hprpc && !list_empty(&loi->loi_read_lop.lop_urgent)) {
-                oap = list_entry(loi->loi_read_lop.lop_urgent.next,
-                                 struct osc_async_page, oap_urgent_item);
+        if (!hprpc && !cfs_list_empty(&loi->loi_read_lop.lop_urgent)) {
+                oap = cfs_list_entry(loi->loi_read_lop.lop_urgent.next,
+                                     struct osc_async_page, oap_urgent_item);
                 hprpc = !!(oap->oap_async_flags & ASYNC_HP);
         }
 
@@ -2718,14 +2723,14 @@ void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
 
                 /* attempt some inter-object balancing by issueing rpcs
                  * for each object in turn */
-                if (!list_empty(&loi->loi_hp_ready_item))
-                        list_del_init(&loi->loi_hp_ready_item);
-                if (!list_empty(&loi->loi_ready_item))
-                        list_del_init(&loi->loi_ready_item);
-                if (!list_empty(&loi->loi_write_item))
-                        list_del_init(&loi->loi_write_item);
-                if (!list_empty(&loi->loi_read_item))
-                        list_del_init(&loi->loi_read_item);
+                if (!cfs_list_empty(&loi->loi_hp_ready_item))
+                        cfs_list_del_init(&loi->loi_hp_ready_item);
+                if (!cfs_list_empty(&loi->loi_ready_item))
+                        cfs_list_del_init(&loi->loi_ready_item);
+                if (!cfs_list_empty(&loi->loi_write_item))
+                        cfs_list_del_init(&loi->loi_write_item);
+                if (!cfs_list_empty(&loi->loi_read_item))
+                        cfs_list_del_init(&loi->loi_read_item);
 
                 loi_list_maint(cli, loi);
 
@@ -2753,7 +2758,7 @@ static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
         int rc;
         ENTRY;
         client_obd_list_lock(&cli->cl_loi_list_lock);
-        rc = list_empty(&ocw->ocw_entry) || rpcs_in_flight(cli) == 0;
+        rc = cfs_list_empty(&ocw->ocw_entry) || rpcs_in_flight(cli) == 0;
         client_obd_list_unlock(&cli->cl_loi_list_lock);
         RETURN(rc);
 };
@@ -2773,7 +2778,7 @@ int osc_enter_cache_try(const struct lu_env *env,
                 osc_consume_write_grant(cli, &oap->oap_brw_page);
                 if (transient) {
                         cli->cl_dirty_transit += CFS_PAGE_SIZE;
-                        atomic_inc(&obd_dirty_transit_pages);
+                        cfs_atomic_inc(&obd_dirty_transit_pages);
                         oap->oap_brw_flags |= OBD_BRW_NOCACHE;
                 }
         }
@@ -2792,7 +2797,7 @@ static int osc_enter_cache(const struct lu_env *env,
         ENTRY;
 
         CDEBUG(D_CACHE, "dirty: %ld/%d dirty_max: %ld/%d dropped: %lu "
-               "grant: %lu\n", cli->cl_dirty, atomic_read(&obd_dirty_pages),
+               "grant: %lu\n", cli->cl_dirty, cfs_atomic_read(&obd_dirty_pages),
                cli->cl_dirty_max, obd_max_dirty_pages,
                cli->cl_lost_grant, cli->cl_avail_grant);
 
@@ -2804,7 +2809,7 @@ static int osc_enter_cache(const struct lu_env *env,
 
         /* Hopefully normal case - cache space and write credits available */
         if (cli->cl_dirty + CFS_PAGE_SIZE <= cli->cl_dirty_max &&
-            atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages &&
+            cfs_atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages &&
             osc_enter_cache_try(env, cli, loi, oap, 0))
                 RETURN(0);
 
@@ -2812,7 +2817,7 @@ static int osc_enter_cache(const struct lu_env *env,
          * is a little silly as this object may not have any pending but
          * other objects sure might. */
         if (cli->cl_w_in_flight) {
-                list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
+                cfs_list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
                 cfs_waitq_init(&ocw.ocw_waitq);
                 ocw.ocw_oap = oap;
                 ocw.ocw_rc = 0;
@@ -2825,8 +2830,8 @@ static int osc_enter_cache(const struct lu_env *env,
                 l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
 
                 client_obd_list_lock(&cli->cl_loi_list_lock);
-                if (!list_empty(&ocw.ocw_entry)) {
-                        list_del(&ocw.ocw_entry);
+                if (!cfs_list_empty(&ocw.ocw_entry)) {
+                        cfs_list_del(&ocw.ocw_entry);
                         RETURN(-EINTR);
                 }
                 RETURN(ocw.ocw_rc);
@@ -2847,7 +2852,7 @@ int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
         ENTRY;
 
         if (!page)
-                return size_round(sizeof(*oap));
+                return cfs_size_round(sizeof(*oap));
 
         oap = *res;
         oap->oap_magic = OAP_MAGIC;
@@ -2870,7 +2875,7 @@ int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
         CFS_INIT_LIST_HEAD(&oap->oap_rpc_item);
         CFS_INIT_LIST_HEAD(&oap->oap_page_list);
 
-        spin_lock_init(&oap->oap_lock);
+        cfs_spin_lock_init(&oap->oap_lock);
         CDEBUG(D_CACHE, "oap %p page %p obj off "LPU64"\n", oap, page, offset);
         RETURN(0);
 }
@@ -2901,9 +2906,9 @@ int osc_queue_async_io(const struct lu_env *env,
         if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
                 RETURN(-EIO);
 
-        if (!list_empty(&oap->oap_pending_item) ||
-            !list_empty(&oap->oap_urgent_item) ||
-            !list_empty(&oap->oap_rpc_item))
+        if (!cfs_list_empty(&oap->oap_pending_item) ||
+            !cfs_list_empty(&oap->oap_urgent_item) ||
+            !cfs_list_empty(&oap->oap_rpc_item))
                 RETURN(-EBUSY);
 
         /* check if the file's owner/group is over quota */
@@ -2940,9 +2945,9 @@ int osc_queue_async_io(const struct lu_env *env,
         /* Give a hint to OST that requests are coming from kswapd - bug19529 */
         if (libcfs_memory_pressure_get())
                 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
-        spin_lock(&oap->oap_lock);
+        cfs_spin_lock(&oap->oap_lock);
         oap->oap_async_flags = async_flags;
-        spin_unlock(&oap->oap_lock);
+        cfs_spin_unlock(&oap->oap_lock);
 
         if (cmd & OBD_BRW_WRITE) {
                 rc = osc_enter_cache(env, cli, loi, oap);
@@ -2975,7 +2980,7 @@ int osc_set_async_flags_base(struct client_obd *cli,
         int flags = 0;
         ENTRY;
 
-        LASSERT(!list_empty(&oap->oap_pending_item));
+        LASSERT(!cfs_list_empty(&oap->oap_pending_item));
 
         if (oap->oap_cmd & OBD_BRW_WRITE) {
                 lop = &loi->loi_write_lop;
@@ -2990,17 +2995,18 @@ int osc_set_async_flags_base(struct client_obd *cli,
                 flags |= ASYNC_READY;
 
         if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT) &&
-            list_empty(&oap->oap_rpc_item)) {
+            cfs_list_empty(&oap->oap_rpc_item)) {
                 if (oap->oap_async_flags & ASYNC_HP)
-                        list_add(&oap->oap_urgent_item, &lop->lop_urgent);
+                        cfs_list_add(&oap->oap_urgent_item, &lop->lop_urgent);
                 else
-                        list_add_tail(&oap->oap_urgent_item, &lop->lop_urgent);
+                        cfs_list_add_tail(&oap->oap_urgent_item,
+                                          &lop->lop_urgent);
                 flags |= ASYNC_URGENT;
                 loi_list_maint(cli, loi);
         }
-        spin_lock(&oap->oap_lock);
+        cfs_spin_lock(&oap->oap_lock);
         oap->oap_async_flags |= flags;
-        spin_unlock(&oap->oap_lock);
+        cfs_spin_unlock(&oap->oap_lock);
 
         LOI_DEBUG(loi, "oap %p page %p has flags %x\n", oap, oap->oap_page,
                         oap->oap_async_flags);
@@ -3032,20 +3038,20 @@ int osc_teardown_async_page(struct obd_export *exp,
 
         client_obd_list_lock(&cli->cl_loi_list_lock);
 
-        if (!list_empty(&oap->oap_rpc_item))
+        if (!cfs_list_empty(&oap->oap_rpc_item))
                 GOTO(out, rc = -EBUSY);
 
         osc_exit_cache(cli, oap, 0);
         osc_wake_cache_waiters(cli);
 
-        if (!list_empty(&oap->oap_urgent_item)) {
-                list_del_init(&oap->oap_urgent_item);
-                spin_lock(&oap->oap_lock);
+        if (!cfs_list_empty(&oap->oap_urgent_item)) {
+                cfs_list_del_init(&oap->oap_urgent_item);
+                cfs_spin_lock(&oap->oap_lock);
                 oap->oap_async_flags &= ~(ASYNC_URGENT | ASYNC_HP);
-                spin_unlock(&oap->oap_lock);
+                cfs_spin_unlock(&oap->oap_lock);
         }
-        if (!list_empty(&oap->oap_pending_item)) {
-                list_del_init(&oap->oap_pending_item);
+        if (!cfs_list_empty(&oap->oap_pending_item)) {
+                cfs_list_del_init(&oap->oap_pending_item);
                 lop_update_pending(cli, lop, oap->oap_cmd, -1);
         }
         loi_list_maint(cli, loi);
@@ -3068,10 +3074,10 @@ static void osc_set_lock_data_with_check(struct ldlm_lock *lock,
         LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
 
         lock_res_and_lock(lock);
-        spin_lock(&osc_ast_guard);
+        cfs_spin_lock(&osc_ast_guard);
         LASSERT(lock->l_ast_data == NULL || lock->l_ast_data == data);
         lock->l_ast_data = data;
-        spin_unlock(&osc_ast_guard);
+        cfs_spin_unlock(&osc_ast_guard);
         unlock_res_and_lock(lock);
 }
 
@@ -3480,7 +3486,7 @@ static int osc_statfs_interpret(const struct lu_env *env,
 
         /* Reinitialize the RDONLY and DEGRADED flags at the client
          * on each statfs, so they don't stay set permanently. */
-        spin_lock(&cli->cl_oscc.oscc_lock);
+        cfs_spin_lock(&cli->cl_oscc.oscc_lock);
 
         if (unlikely(msfs->os_state & OS_STATE_DEGRADED))
                 cli->cl_oscc.oscc_flags |= OSCC_FLAG_DEGRADED;
@@ -3514,7 +3520,7 @@ static int osc_statfs_interpret(const struct lu_env *env,
                 (msfs->os_ffree > 64) && (msfs->os_bavail > (used << 1))))
                         cli->cl_oscc.oscc_flags &= ~OSCC_FLAG_NOSPC;
 
-        spin_unlock(&cli->cl_oscc.oscc_lock);
+        cfs_spin_unlock(&cli->cl_oscc.oscc_lock);
 
         *aa->aa_oi->oi_osfs = *msfs;
 out:
@@ -3575,10 +3581,10 @@ static int osc_statfs(struct obd_device *obd, struct obd_statfs *osfs,
 
         /*Since the request might also come from lprocfs, so we need
          *sync this with client_disconnect_export Bug15684*/
-        down_read(&obd->u.cli.cl_sem);
+        cfs_down_read(&obd->u.cli.cl_sem);
         if (obd->u.cli.cl_import)
                 imp = class_import_get(obd->u.cli.cl_import);
-        up_read(&obd->u.cli.cl_sem);
+        cfs_up_read(&obd->u.cli.cl_sem);
         if (!imp)
                 RETURN(-ENODEV);
 
@@ -3647,7 +3653,7 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
         /* we only need the header part from user space to get lmm_magic and
          * lmm_stripe_count, (the header part is common to v1 and v3) */
         lum_size = sizeof(struct lov_user_md_v1);
-        if (copy_from_user(&lum, lump, lum_size))
+        if (cfs_copy_from_user(&lum, lump, lum_size))
                 RETURN(-EFAULT);
 
         if ((lum.lmm_magic != LOV_USER_MAGIC_V1) &&
@@ -3681,7 +3687,7 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
         lumk->lmm_object_gr = lsm->lsm_object_gr;
         lumk->lmm_stripe_count = 1;
 
-        if (copy_to_user(lump, lumk, lum_size))
+        if (cfs_copy_to_user(lump, lumk, lum_size))
                 rc = -EFAULT;
 
         if (lumk != &lum)
@@ -3699,7 +3705,7 @@ static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
         int err = 0;
         ENTRY;
 
-        if (!try_module_get(THIS_MODULE)) {
+        if (!cfs_try_module_get(THIS_MODULE)) {
                 CERROR("Can't get module. Is it alive?");
                 return -EINVAL;
         }
@@ -3737,7 +3743,7 @@ static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
 
                 memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
 
-                err = copy_to_user((void *)uarg, buf, len);
+                err = cfs_copy_to_user((void *)uarg, buf, len);
                 if (err)
                         err = -EFAULT;
                 obd_ioctl_freedata(buf, len);
@@ -3774,7 +3780,7 @@ static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
                 GOTO(out, err = -ENOTTY);
         }
 out:
-        module_put(THIS_MODULE);
+        cfs_module_put(THIS_MODULE);
         return err;
 }
 
@@ -3889,10 +3895,10 @@ static int osc_setinfo_mds_connect_import(struct obd_import *imp)
                 /* XXX return an error? skip setting below flags? */
         }
 
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
         imp->imp_server_timeout = 1;
         imp->imp_pingable = 1;
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
         CDEBUG(D_RPCTRACE, "pinging OST %s\n", obd2cli_tgt(imp->imp_obd));
 
         RETURN(rc);
@@ -3936,11 +3942,11 @@ static int osc_set_info_async(struct obd_export *exp, obd_count keylen,
 
                 /* avoid race between allocate new object and set next id
                  * from ll_sync thread */
-                spin_lock(&oscc->oscc_lock);
+                cfs_spin_lock(&oscc->oscc_lock);
                 new_val = *((obd_id*)val) + 1;
                 if (new_val > oscc->oscc_next_id)
                         oscc->oscc_next_id = new_val;
-                spin_unlock(&oscc->oscc_lock);                        
+                cfs_spin_unlock(&oscc->oscc_lock);
                 CDEBUG(D_HA, "%s: set oscc_next_id = "LPU64"\n",
                        exp->exp_obd->obd_name,
                        obd->u.cli.cl_oscc.oscc_next_id);
@@ -3951,9 +3957,9 @@ static int osc_set_info_async(struct obd_export *exp, obd_count keylen,
         if (KEY_IS(KEY_INIT_RECOV)) {
                 if (vallen != sizeof(int))
                         RETURN(-EINVAL);
-                spin_lock(&imp->imp_lock);
+                cfs_spin_lock(&imp->imp_lock);
                 imp->imp_initial_recov = *(int *)val;
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
                 CDEBUG(D_HA, "%s: set imp_initial_recov = %d\n",
                        exp->exp_obd->obd_name,
                        imp->imp_initial_recov);
@@ -4095,7 +4101,7 @@ static int osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
 
         LASSERT(olg == &obd->obd_olg);
 
-        mutex_down(&olg->olg_cat_processing);
+        cfs_mutex_down(&olg->olg_cat_processing);
         rc = llog_get_cat_list(disk_obd, name, *index, 1, &catid);
         if (rc) {
                 CERROR("rc: %d\n", rc);
@@ -4119,7 +4125,7 @@ static int osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
         }
 
  out:
-        mutex_up(&olg->olg_cat_processing);
+        cfs_mutex_up(&olg->olg_cat_processing);
 
         return rc;
 }
@@ -4230,9 +4236,9 @@ static int osc_import_event(struct obd_device *obd,
                 if (imp->imp_server_timeout) {
                         struct osc_creator *oscc = &obd->u.cli.cl_oscc;
 
-                        spin_lock(&oscc->oscc_lock);
+                        cfs_spin_lock(&oscc->oscc_lock);
                         oscc->oscc_flags |= OSCC_FLAG_RECOVERING;
-                        spin_unlock(&oscc->oscc_lock);
+                        cfs_spin_unlock(&oscc->oscc_lock);
                 }
                 cli = &obd->u.cli;
                 client_obd_list_lock(&cli->cl_loi_list_lock);
@@ -4271,9 +4277,9 @@ static int osc_import_event(struct obd_device *obd,
                 if (imp->imp_server_timeout) {
                         struct osc_creator *oscc = &obd->u.cli.cl_oscc;
 
-                        spin_lock(&oscc->oscc_lock);
+                        cfs_spin_lock(&oscc->oscc_lock);
                         oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
-                        spin_unlock(&oscc->oscc_lock);
+                        cfs_spin_unlock(&oscc->oscc_lock);
                 }
                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
                 break;
@@ -4335,7 +4341,7 @@ int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
                                             ptlrpc_add_rqs_to_pool);
 
                 CFS_INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
-                sema_init(&cli->cl_grant_sem, 1);
+                cfs_sema_init(&cli->cl_grant_sem, 1);
         }
 
         RETURN(rc);
@@ -4353,9 +4359,9 @@ static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
                 CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
                 /* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
                 ptlrpc_deactivate_import(imp);
-                spin_lock(&imp->imp_lock);
+                cfs_spin_lock(&imp->imp_lock);
                 imp->imp_pingable = 0;
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
                 break;
         }
         case OBD_CLEANUP_EXPORTS: {
@@ -4363,7 +4369,7 @@ static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
                    client import will not have been cleaned. */
                 if (obd->u.cli.cl_import) {
                         struct obd_import *imp;
-                        down_write(&obd->u.cli.cl_sem);
+                        cfs_down_write(&obd->u.cli.cl_sem);
                         imp = obd->u.cli.cl_import;
                         CDEBUG(D_CONFIG, "%s: client import never connected\n",
                                obd->obd_name);
@@ -4373,7 +4379,7 @@ static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
                                 imp->imp_rq_pool = NULL;
                         }
                         class_destroy_import(imp);
-                        up_write(&obd->u.cli.cl_sem);
+                        cfs_up_write(&obd->u.cli.cl_sem);
                         obd->u.cli.cl_import = NULL;
                 }
                 rc = obd_llog_finish(obd, 0);
@@ -4464,9 +4470,9 @@ struct obd_ops osc_obd_ops = {
         .o_process_config       = osc_process_config,
 };
 
-extern struct lu_kmem_descr  osc_caches[];
-extern spinlock_t            osc_ast_guard;
-extern struct lock_class_key osc_ast_guard_class;
+extern struct lu_kmem_descr osc_caches[];
+extern cfs_spinlock_t       osc_ast_guard;
+extern cfs_lock_class_key_t osc_ast_guard_class;
 
 int __init osc_init(void)
 {
@@ -4483,7 +4489,7 @@ int __init osc_init(void)
 
         lprocfs_osc_init_vars(&lvars);
 
-        request_module("lquota");
+        cfs_request_module("lquota");
         quota_interface = PORTAL_SYMBOL_GET(osc_quota_interface);
         lquota_init(quota_interface);
         init_obd_quota_ops(quota_interface, &osc_obd_ops);
@@ -4497,8 +4503,8 @@ int __init osc_init(void)
                 RETURN(rc);
         }
 
-        spin_lock_init(&osc_ast_guard);
-        lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
+        cfs_spin_lock_init(&osc_ast_guard);
+        cfs_lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
 
         osc_mds_ost_orig_logops = llog_lvfs_ops;
         osc_mds_ost_orig_logops.lop_setup = llog_obd_origin_setup;
index b21cac3..18b9539 100644 (file)
@@ -97,18 +97,18 @@ struct osd_object {
         /**
          * to protect index ops.
          */
-        struct rw_semaphore    oo_ext_idx_sem;
-        struct rw_semaphore    oo_sem;
+        cfs_rw_semaphore_t     oo_ext_idx_sem;
+        cfs_rw_semaphore_t     oo_sem;
         struct osd_directory  *oo_dir;
         /** protects inode attributes. */
-        spinlock_t             oo_guard;
+        cfs_spinlock_t         oo_guard;
         /**
          * Following two members are used to indicate the presence of dot and
          * dotdot in the given directory. This is required for interop mode
          * (b11826).
          */
-        int oo_compat_dot_created;
-        int oo_compat_dotdot_created;
+        int                    oo_compat_dot_created;
+        int                    oo_compat_dotdot_created;
 
         const struct lu_env   *oo_owner;
 #ifdef CONFIG_LOCKDEP
@@ -286,9 +286,9 @@ static struct lu_object *osd_object_alloc(const struct lu_env *env,
                         mo->oo_dt.do_ops = &osd_obj_ops;
 
                 l->lo_ops = &osd_lu_obj_ops;
-                init_rwsem(&mo->oo_sem);
-                init_rwsem(&mo->oo_ext_idx_sem);
-                spin_lock_init(&mo->oo_guard);
+                cfs_init_rwsem(&mo->oo_sem);
+                cfs_init_rwsem(&mo->oo_ext_idx_sem);
+                cfs_spin_lock_init(&mo->oo_guard);
                 return l;
         } else
                 return NULL;
@@ -710,7 +710,7 @@ static void osd_object_release(const struct lu_env *env,
 
         LASSERT(!lu_object_is_dying(l->lo_header));
         if (o->oo_inode != NULL && osd_inode_unlinked(o->oo_inode))
-                set_bit(LU_OBJECT_HEARD_BANSHEE, &l->lo_header->loh_flags);
+                cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &l->lo_header->loh_flags);
 }
 
 /*
@@ -737,13 +737,13 @@ static int osd_object_print(const struct lu_env *env, void *cookie,
  * Concurrency: shouldn't matter.
  */
 int osd_statfs(const struct lu_env *env, struct dt_device *d,
-               struct kstatfs *sfs)
+               cfs_kstatfs_t *sfs)
 {
         struct osd_device *osd = osd_dt_dev(d);
         struct super_block *sb = osd_sb(osd);
         int result = 0;
 
-        spin_lock(&osd->od_osfs_lock);
+        cfs_spin_lock(&osd->od_osfs_lock);
         /* cache 1 second */
         if (cfs_time_before_64(osd->od_osfs_age, cfs_time_shift_64(-1))) {
                 result = ll_do_statfs(sb, &osd->od_kstatfs);
@@ -753,7 +753,7 @@ int osd_statfs(const struct lu_env *env, struct dt_device *d,
 
         if (likely(result == 0))
                 *sfs = osd->od_kstatfs;
-        spin_unlock(&osd->od_osfs_lock);
+        cfs_spin_unlock(&osd->od_osfs_lock);
 
         return result;
 }
@@ -1031,7 +1031,7 @@ static void osd_object_read_lock(const struct lu_env *env,
         LINVRNT(osd_invariant(obj));
 
         LASSERT(obj->oo_owner != env);
-        down_read_nested(&obj->oo_sem, role);
+        cfs_down_read_nested(&obj->oo_sem, role);
 
         LASSERT(obj->oo_owner == NULL);
         oti->oti_r_locks++;
@@ -1046,7 +1046,7 @@ static void osd_object_write_lock(const struct lu_env *env,
         LINVRNT(osd_invariant(obj));
 
         LASSERT(obj->oo_owner != env);
-        down_write_nested(&obj->oo_sem, role);
+        cfs_down_write_nested(&obj->oo_sem, role);
 
         LASSERT(obj->oo_owner == NULL);
         obj->oo_owner = env;
@@ -1063,7 +1063,7 @@ static void osd_object_read_unlock(const struct lu_env *env,
 
         LASSERT(oti->oti_r_locks > 0);
         oti->oti_r_locks--;
-        up_read(&obj->oo_sem);
+        cfs_up_read(&obj->oo_sem);
 }
 
 static void osd_object_write_unlock(const struct lu_env *env,
@@ -1078,7 +1078,7 @@ static void osd_object_write_unlock(const struct lu_env *env,
         LASSERT(oti->oti_w_locks > 0);
         oti->oti_w_locks--;
         obj->oo_owner = NULL;
-        up_write(&obj->oo_sem);
+        cfs_up_write(&obj->oo_sem);
 }
 
 static int osd_object_write_locked(const struct lu_env *env,
@@ -1117,14 +1117,14 @@ static int capa_is_sane(const struct lu_env *env,
                 RETURN(-ESTALE);
         }
 
-        spin_lock(&capa_lock);
+        cfs_spin_lock(&capa_lock);
         for (i = 0; i < 2; i++) {
                 if (keys[i].lk_keyid == capa->lc_keyid) {
                         oti->oti_capa_key = keys[i];
                         break;
                 }
         }
-        spin_unlock(&capa_lock);
+        cfs_spin_unlock(&capa_lock);
 
         if (i == 2) {
                 DEBUG_CAPA(D_ERROR, capa, "no matched capa key");
@@ -1239,9 +1239,9 @@ static int osd_attr_get(const struct lu_env *env,
         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
                 return -EACCES;
 
-        spin_lock(&obj->oo_guard);
+        cfs_spin_lock(&obj->oo_guard);
         osd_inode_getattr(env, obj->oo_inode, attr);
-        spin_unlock(&obj->oo_guard);
+        cfs_spin_unlock(&obj->oo_guard);
         return 0;
 }
 
@@ -1331,9 +1331,9 @@ static int osd_attr_set(const struct lu_env *env,
         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
                 return -EACCES;
 
-        spin_lock(&obj->oo_guard);
+        cfs_spin_lock(&obj->oo_guard);
         rc = osd_inode_setattr(env, obj->oo_inode, attr);
-        spin_unlock(&obj->oo_guard);
+        cfs_spin_unlock(&obj->oo_guard);
 
         if (!rc)
                 mark_inode_dirty(obj->oo_inode);
@@ -1380,7 +1380,7 @@ static struct dentry * osd_child_dentry_get(const struct lu_env *env,
 
 
 static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
-                      umode_t mode,
+                      cfs_umode_t mode,
                       struct dt_allocation_hint *hint,
                       struct thandle *th)
 {
@@ -1522,7 +1522,7 @@ static int osd_mknod(struct osd_thread_info *info, struct osd_object *obj,
                      struct dt_object_format *dof,
                      struct thandle *th)
 {
-        umode_t mode = attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX);
+        cfs_umode_t mode = attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX);
         int result;
 
         LINVRNT(osd_invariant(obj));
@@ -1575,7 +1575,7 @@ static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
 
 
 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
-                        struct dt_object *parent, umode_t child_mode)
+                        struct dt_object *parent, cfs_umode_t child_mode)
 {
         LASSERT(ah);
 
@@ -1687,9 +1687,9 @@ static int __osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
         rc = inode->i_op->setxattr(dentry, name, buf->lb_buf,
                                    buf->lb_len, fs_flags);
         /* ctime should not be updated with server-side time. */
-        spin_lock(&obj->oo_guard);
+        cfs_spin_lock(&obj->oo_guard);
         inode->i_ctime = *t;
-        spin_unlock(&obj->oo_guard);
+        cfs_spin_unlock(&obj->oo_guard);
         mark_inode_dirty(inode);
         return rc;
 }
@@ -1874,10 +1874,10 @@ static void osd_object_ref_add(const struct lu_env *env,
         LASSERT(osd_write_locked(env, obj));
         LASSERT(th != NULL);
 
-        spin_lock(&obj->oo_guard);
+        cfs_spin_lock(&obj->oo_guard);
         LASSERT(inode->i_nlink < LDISKFS_LINK_MAX);
         inode->i_nlink++;
-        spin_unlock(&obj->oo_guard);
+        cfs_spin_unlock(&obj->oo_guard);
         mark_inode_dirty(inode);
         LINVRNT(osd_invariant(obj));
 }
@@ -1897,10 +1897,10 @@ static void osd_object_ref_del(const struct lu_env *env,
         LASSERT(osd_write_locked(env, obj));
         LASSERT(th != NULL);
 
-        spin_lock(&obj->oo_guard);
+        cfs_spin_lock(&obj->oo_guard);
         LASSERT(inode->i_nlink > 0);
         inode->i_nlink--;
-        spin_unlock(&obj->oo_guard);
+        cfs_spin_unlock(&obj->oo_guard);
         mark_inode_dirty(inode);
         LINVRNT(osd_invariant(obj));
 }
@@ -1997,9 +1997,9 @@ static int osd_xattr_del(const struct lu_env *env,
         *t = inode->i_ctime;
         rc = inode->i_op->removexattr(dentry, name);
         /* ctime should not be updated with server-side time. */
-        spin_lock(&obj->oo_guard);
+        cfs_spin_lock(&obj->oo_guard);
         inode->i_ctime = *t;
-        spin_unlock(&obj->oo_guard);
+        cfs_spin_unlock(&obj->oo_guard);
         mark_inode_dirty(inode);
         return rc;
 }
@@ -2046,9 +2046,9 @@ static struct obd_capa *osd_capa_get(const struct lu_env *env,
                 __u32 d[4], s[4];
 
                 s[0] = obj->oo_inode->i_uid;
-                get_random_bytes(&(s[1]), sizeof(__u32));
+                ll_get_random_bytes(&(s[1]), sizeof(__u32));
                 s[2] = obj->oo_inode->i_gid;
-                get_random_bytes(&(s[3]), sizeof(__u32));
+                ll_get_random_bytes(&(s[3]), sizeof(__u32));
                 rc = capa_encrypt_id(d, s, key->lk_key, CAPA_HMAC_KEY_MAX_LEN);
                 if (unlikely(rc))
                         RETURN(ERR_PTR(rc));
@@ -2074,9 +2074,9 @@ static struct obd_capa *osd_capa_get(const struct lu_env *env,
                 RETURN(oc);
         }
 
-        spin_lock(&capa_lock);
+        cfs_spin_lock(&capa_lock);
         *key = dev->od_capa_keys[1];
-        spin_unlock(&capa_lock);
+        cfs_spin_unlock(&capa_lock);
 
         capa->lc_keyid = key->lk_keyid;
         capa->lc_expiry = cfs_time_current_sec() + dev->od_capa_timeout;
@@ -2234,7 +2234,7 @@ static int osd_index_try(const struct lu_env *env, struct dt_object *dt,
                 OBD_ALLOC_PTR(dir);
                 if (dir != NULL) {
 
-                        spin_lock(&obj->oo_guard);
+                        cfs_spin_lock(&obj->oo_guard);
                         if (obj->oo_dir == NULL)
                                 obj->oo_dir = dir;
                         else
@@ -2242,12 +2242,12 @@ static int osd_index_try(const struct lu_env *env, struct dt_object *dt,
                                  * Concurrent thread allocated container data.
                                  */
                                 OBD_FREE_PTR(dir);
-                        spin_unlock(&obj->oo_guard);
+                        cfs_spin_unlock(&obj->oo_guard);
                         /*
                          * Now, that we have container data, serialize its
                          * initialization.
                          */
-                        down_write(&obj->oo_ext_idx_sem);
+                        cfs_down_write(&obj->oo_ext_idx_sem);
                         /*
                          * recheck under lock.
                          */
@@ -2255,7 +2255,7 @@ static int osd_index_try(const struct lu_env *env, struct dt_object *dt,
                                 result = osd_iam_container_init(env, obj, dir);
                         else
                                 result = 0;
-                        up_write(&obj->oo_ext_idx_sem);
+                        cfs_up_write(&obj->oo_ext_idx_sem);
                 } else
                         result = -ENOMEM;
         } else
@@ -2479,7 +2479,7 @@ static int osd_index_ea_delete(const struct lu_env *env, struct dt_object *dt,
         dentry = osd_child_dentry_get(env, obj,
                                       (char *)key, strlen((char *)key));
 
-        down_write(&obj->oo_ext_idx_sem);
+        cfs_down_write(&obj->oo_ext_idx_sem);
         bh = ll_ldiskfs_find_entry(dir, dentry, &de);
         if (bh) {
                 struct osd_thread_info *oti = osd_oti_get(env);
@@ -2491,16 +2491,16 @@ static int osd_index_ea_delete(const struct lu_env *env, struct dt_object *dt,
                 rc = ldiskfs_delete_entry(oh->ot_handle,
                                 dir, de, bh);
                 /* xtime should not be updated with server-side time. */
-                spin_lock(&obj->oo_guard);
+                cfs_spin_lock(&obj->oo_guard);
                 dir->i_ctime = *ctime;
                 dir->i_mtime = *mtime;
-                spin_unlock(&obj->oo_guard);
+                cfs_spin_unlock(&obj->oo_guard);
                 mark_inode_dirty(dir);
                 brelse(bh);
         } else
                 rc = -ENOENT;
 
-        up_write(&obj->oo_ext_idx_sem);
+        cfs_up_write(&obj->oo_ext_idx_sem);
         LASSERT(osd_invariant(obj));
         RETURN(rc);
 }
@@ -2752,7 +2752,7 @@ static int osd_ea_lookup_rec(const struct lu_env *env, struct osd_object *obj,
         dentry = osd_child_dentry_get(env, obj,
                                       (char *)key, strlen((char *)key));
 
-        down_read(&obj->oo_ext_idx_sem);
+        cfs_down_read(&obj->oo_ext_idx_sem);
         bh = ll_ldiskfs_find_entry(dir, dentry, &de);
         if (bh) {
                 ino = le32_to_cpu(de->inode);
@@ -2761,7 +2761,7 @@ static int osd_ea_lookup_rec(const struct lu_env *env, struct osd_object *obj,
         } else
                 rc = -ENOENT;
 
-        up_read(&obj->oo_ext_idx_sem);
+        cfs_up_read(&obj->oo_ext_idx_sem);
         RETURN (rc);
 }
 
@@ -2870,18 +2870,18 @@ static int osd_index_ea_insert(const struct lu_env *env, struct dt_object *dt,
                 else
                         current->cap_effective &= ~CFS_CAP_SYS_RESOURCE_MASK;
 #endif
-                down_write(&obj->oo_ext_idx_sem);
+                cfs_down_write(&obj->oo_ext_idx_sem);
                 rc = osd_ea_add_rec(env, obj, child, name, th);
-                up_write(&obj->oo_ext_idx_sem);
+                cfs_up_write(&obj->oo_ext_idx_sem);
 #ifdef HAVE_QUOTA_SUPPORT
                 current->cap_effective = save;
 #endif
                 osd_object_put(env, child);
                 /* xtime should not be updated with server-side time. */
-                spin_lock(&obj->oo_guard);
+                cfs_spin_lock(&obj->oo_guard);
                 inode->i_ctime = *ctime;
                 inode->i_mtime = *mtime;
-                spin_unlock(&obj->oo_guard);
+                cfs_spin_unlock(&obj->oo_guard);
                 mark_inode_dirty(inode);
         } else {
                 rc = PTR_ERR(child);
@@ -3265,7 +3265,7 @@ static int osd_ldiskfs_filldir(char *buf, const char *name, int namelen,
         memcpy(ent->oied_name, name, namelen);
 
         it->oie_rd_dirent++;
-        it->oie_dirent = (void *) ent + size_round(sizeof(*ent) + namelen);
+        it->oie_dirent = (void *) ent + cfs_size_round(sizeof(*ent) + namelen);
         RETURN(0);
 }
 
@@ -3289,11 +3289,11 @@ static int osd_ldiskfs_it_fill(const struct dt_it *di)
         it->oie_dirent = it->oie_buf;
         it->oie_rd_dirent = 0;
 
-        down_read(&obj->oo_ext_idx_sem);
+        cfs_down_read(&obj->oo_ext_idx_sem);
         result = inode->i_fop->readdir(&it->oie_file, it,
                                        (filldir_t) osd_ldiskfs_filldir);
 
-        up_read(&obj->oo_ext_idx_sem);
+        cfs_up_read(&obj->oo_ext_idx_sem);
 
         if (it->oie_rd_dirent == 0) {
                 result = -EIO;
@@ -3324,9 +3324,10 @@ static int osd_it_ea_next(const struct lu_env *env, struct dt_it *di)
         ENTRY;
 
         if (it->oie_it_dirent < it->oie_rd_dirent) {
-                it->oie_dirent = (void *) it->oie_dirent +
-                                 size_round(sizeof(struct osd_it_ea_dirent) +
-                                            it->oie_dirent->oied_namelen);
+                it->oie_dirent =
+                        (void *) it->oie_dirent +
+                        cfs_size_round(sizeof(struct osd_it_ea_dirent) +
+                                       it->oie_dirent->oied_namelen);
                 it->oie_it_dirent++;
                 RETURN(0);
         } else {
@@ -3655,7 +3656,7 @@ static struct lu_device *osd_device_alloc(const struct lu_env *env,
                         l = osd2lu_dev(o);
                         l->ld_ops = &osd_lu_ops;
                         o->od_dt_dev.dd_ops = &osd_dt_ops;
-                        spin_lock_init(&o->od_osfs_lock);
+                        cfs_spin_lock_init(&o->od_osfs_lock);
                         o->od_osfs_age = cfs_time_shift_64(-1000);
                         o->od_capa_hash = init_capa_hash();
                         if (o->od_capa_hash == NULL) {
index bd55ff1..ab036a2 100644 (file)
  *
  * No locking. Callers synchronize.
  */
-static LIST_HEAD(iam_formats);
+static CFS_LIST_HEAD(iam_formats);
 
 void iam_format_register(struct iam_format *fmt)
 {
-        list_add(&fmt->if_linkage, &iam_formats);
+        cfs_list_add(&fmt->if_linkage, &iam_formats);
 }
 EXPORT_SYMBOL(iam_format_register);
 
@@ -155,7 +155,7 @@ static int iam_format_guess(struct iam_container *c)
         }
 
         result = -ENOENT;
-        list_for_each_entry(fmt, &iam_formats, if_linkage) {
+        cfs_list_for_each_entry(fmt, &iam_formats, if_linkage) {
                 result = fmt->if_guess(c);
                 if (result == 0)
                         break;
@@ -172,7 +172,7 @@ int iam_container_init(struct iam_container *c,
         memset(c, 0, sizeof *c);
         c->ic_descr  = descr;
         c->ic_object = inode;
-        init_rwsem(&c->ic_sem);
+        cfs_init_rwsem(&c->ic_sem);
         return 0;
 }
 EXPORT_SYMBOL(iam_container_init);
@@ -364,7 +364,7 @@ static int iam_leaf_load(struct iam_path *path)
         block = path->ip_frame->leaf;
         if (block == 0) {
                 /* XXX bug 11027 */
-                printk(KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
+                printk(CFS_KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
                        (long unsigned)path->ip_frame->leaf,
                        dx_get_count(dx_node_get_entries(path, path->ip_frame)),
                        path->ip_frames[0].bh, path->ip_frames[1].bh,
@@ -581,22 +581,22 @@ static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
 
 void iam_container_write_lock(struct iam_container *ic)
 {
-        down_write(&ic->ic_sem);
+        cfs_down_write(&ic->ic_sem);
 }
 
 void iam_container_write_unlock(struct iam_container *ic)
 {
-        up_write(&ic->ic_sem);
+        cfs_up_write(&ic->ic_sem);
 }
 
 void iam_container_read_lock(struct iam_container *ic)
 {
-        down_read(&ic->ic_sem);
+        cfs_down_read(&ic->ic_sem);
 }
 
 void iam_container_read_unlock(struct iam_container *ic)
 {
-        up_read(&ic->ic_sem);
+        cfs_up_read(&ic->ic_sem);
 }
 
 /*
index 546a686..2c71983 100644 (file)
@@ -447,15 +447,15 @@ struct iam_container {
          * Underlying flat file. IO against this object is issued to
          * read/write nodes.
          */
-        struct inode     *ic_object;
+        struct inode      *ic_object;
         /*
          * container flavor.
          */
-        struct iam_descr *ic_descr;
+        struct iam_descr  *ic_descr;
         /*
          * read-write lock protecting index consistency.
          */
-        struct rw_semaphore ic_sem;
+        cfs_rw_semaphore_t ic_sem;
 };
 
 /*
@@ -1010,9 +1010,9 @@ static inline void iam_lock_bh(struct buffer_head volatile *bh)
 {
         DX_DEVAL(iam_lock_stats.dls_bh_lock++);
 #ifdef CONFIG_SMP
-        while (test_and_set_bit(BH_DXLock, &bh->b_state)) {
+        while (cfs_test_and_set_bit(BH_DXLock, &bh->b_state)) {
                 DX_DEVAL(iam_lock_stats.dls_bh_busy++);
-                while (test_bit(BH_DXLock, &bh->b_state))
+                while (cfs_test_bit(BH_DXLock, &bh->b_state))
                         cpu_relax();
         }
 #endif
@@ -1065,7 +1065,7 @@ struct iam_format {
         /*
          * Linkage into global list of container formats.
          */
-        struct list_head if_linkage;
+        cfs_list_t if_linkage;
 };
 
 void iam_format_register(struct iam_format *fmt);
index e206bc2..defa760 100644 (file)
@@ -246,7 +246,7 @@ static void l_print(struct iam_leaf *leaf, struct iam_lentry *entry)
         char h[3];
 
         area = (char *)entry;
-        printk(KERN_EMERG "[");
+        printk(CFS_KERN_EMERG "[");
         for (i = iam_lfix_key_size(leaf); i > 0; --i, ++area)
                 printk("%s", hex(*area, h));
         printk("]-(");
@@ -263,7 +263,7 @@ static void lfix_print(struct iam_leaf *leaf)
 
         entry = leaf->il_entries;
         count = lentry_count_get(leaf);
-        printk(KERN_EMERG "lfix: %p %p %d\n", leaf, leaf->il_at, count);
+        printk(CFS_KERN_EMERG "lfix: %p %p %d\n", leaf, leaf->il_at, count);
         for (i = 0; i < count; ++i, entry = iam_lfix_shift(leaf, entry, 1))
                 l_print(leaf, entry);
 }
index fa40aae..e3a4224 100644 (file)
@@ -291,7 +291,7 @@ void n_print(const struct iam_leaf *l)
 {
         struct lvar_leaf_entry *scan;
 
-        printk(KERN_EMERG "used: %d\n", h_used(n_head(l)));
+        printk(CFS_KERN_EMERG "used: %d\n", h_used(n_head(l)));
         for (scan = n_start(l); scan < n_end(l); scan = e_next(l, scan))
                 e_print(scan);
 }
index 58c2464..b3da83a 100644 (file)
@@ -118,7 +118,7 @@ struct osd_device {
         unsigned long             od_capa_timeout;
         __u32                     od_capa_alg;
         struct lustre_capa_key   *od_capa_keys;
-        struct hlist_head        *od_capa_hash;
+        cfs_hlist_head_t         *od_capa_hash;
 
         cfs_proc_dir_entry_t     *od_proc_entry;
         struct lprocfs_stats     *od_stats;
@@ -126,8 +126,8 @@ struct osd_device {
          * statfs optimization: we cache a bit.
          */
         cfs_time_t                od_osfs_age;
-        struct kstatfs            od_kstatfs;
-        spinlock_t                od_osfs_lock;
+        cfs_kstatfs_t             od_kstatfs;
+        cfs_spinlock_t            od_osfs_lock;
 
         /**
          * The following flag indicates, if it is interop mode or not.
@@ -269,7 +269,7 @@ void osd_lprocfs_time_end(const struct lu_env *env,
                           struct osd_device *osd, int op);
 #endif
 int osd_statfs(const struct lu_env *env, struct dt_device *dev,
-               struct kstatfs *sfs);
+               cfs_kstatfs_t *sfs);
 
 /*
  * Invariants, assertions.
index a66a034..a0ac44c 100644 (file)
@@ -81,7 +81,7 @@ struct oi_descr {
 };
 
 /** to serialize concurrent OI index initialization */
-static struct mutex oi_init_lock;
+static cfs_mutex_t oi_init_lock;
 
 static struct dt_index_features oi_feat = {
         .dif_flags       = DT_IND_UPDATE,
@@ -139,7 +139,7 @@ int osd_oi_init(struct osd_thread_info *info,
         int i;
 
         env = info->oti_env;
-        mutex_lock(&oi_init_lock);
+        cfs_mutex_lock(&oi_init_lock);
         memset(oi, 0, sizeof *oi);
 retry:
         for (i = rc = 0; i < OSD_OI_FID_NR && rc == 0; ++i) {
@@ -173,7 +173,7 @@ retry:
         if (rc != 0)
                 osd_oi_fini(info, oi);
 
-        mutex_unlock(&oi_init_lock);
+        cfs_mutex_unlock(&oi_init_lock);
         return rc;
 }
 
@@ -274,6 +274,6 @@ int osd_oi_delete(struct osd_thread_info *info,
 
 int osd_oi_mod_init()
 {
-        mutex_init(&oi_init_lock);
+        cfs_mutex_init(&oi_init_lock);
         return 0;
 }
index 9cb7559..c8473da 100644 (file)
@@ -257,7 +257,7 @@ static int ost_statfs(struct ptlrpc_request *req)
         osfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
 
         req->rq_status = obd_statfs(req->rq_export->exp_obd, osfs,
-                                    cfs_time_current_64() - HZ, 0);
+                                    cfs_time_current_64() - CFS_HZ, 0);
         if (req->rq_status != 0)
                 CERROR("ost: statfs failed: rc %d\n", req->rq_status);
 
@@ -644,10 +644,10 @@ static int ost_brw_read(struct ptlrpc_request *req, struct obd_trans_info *oti)
 
         /* Check if there is eviction in progress, and if so, wait for it to
          * finish */
-        if (unlikely(atomic_read(&exp->exp_obd->obd_evict_inprogress))) {
+        if (unlikely(cfs_atomic_read(&exp->exp_obd->obd_evict_inprogress))) {
                 lwi = LWI_INTR(NULL, NULL); // We do not care how long it takes
                 rc = l_wait_event(exp->exp_obd->obd_evict_inprogress_waitq,
-                        !atomic_read(&exp->exp_obd->obd_evict_inprogress),
+                        !cfs_atomic_read(&exp->exp_obd->obd_evict_inprogress),
                         &lwi);
         }
         if (exp->exp_failed)
@@ -772,13 +772,13 @@ static int ost_brw_read(struct ptlrpc_request *req, struct obd_trans_info *oti)
         if (rc == 0) {
                 /* Check if there is eviction in progress, and if so, wait for
                  * it to finish */
-                if (unlikely(atomic_read(&exp->exp_obd->
-                                                obd_evict_inprogress))) {
+                if (unlikely(cfs_atomic_read(&exp->exp_obd->
+                                             obd_evict_inprogress))) {
                         lwi = LWI_INTR(NULL, NULL);
                         rc = l_wait_event(exp->exp_obd->
-                                                obd_evict_inprogress_waitq,
-                                          !atomic_read(&exp->exp_obd->
-                                                        obd_evict_inprogress),
+                                          obd_evict_inprogress_waitq,
+                                          !cfs_atomic_read(&exp->exp_obd->
+                                          obd_evict_inprogress),
                                           &lwi);
                 }
                 /* Check if client was evicted or tried to reconnect already */
@@ -916,10 +916,10 @@ static int ost_brw_write(struct ptlrpc_request *req, struct obd_trans_info *oti)
 
         /* Check if there is eviction in progress, and if so, wait for it to
          * finish */
-        if (unlikely(atomic_read(&exp->exp_obd->obd_evict_inprogress))) {
+        if (unlikely(cfs_atomic_read(&exp->exp_obd->obd_evict_inprogress))) {
                 lwi = LWI_INTR(NULL, NULL); // We do not care how long it takes
                 rc = l_wait_event(exp->exp_obd->obd_evict_inprogress_waitq,
-                        !atomic_read(&exp->exp_obd->obd_evict_inprogress),
+                        !cfs_atomic_read(&exp->exp_obd->obd_evict_inprogress),
                         &lwi);
         }
         if (exp->exp_failed)
@@ -1420,9 +1420,9 @@ do {                                                                    \
         reply->ocd_connect_flags &= ~(OBD_CONNECT_RMT_CLIENT |          \
                                       OBD_CONNECT_RMT_CLIENT_FORCE |    \
                                       OBD_CONNECT_OSS_CAPA);            \
-        spin_lock(&exp->exp_lock);                                      \
+        cfs_spin_lock(&exp->exp_lock);                                  \
         exp->exp_connect_flags = reply->ocd_connect_flags;              \
-        spin_unlock(&exp->exp_lock);                                    \
+        cfs_spin_unlock(&exp->exp_lock);                                \
 } while (0)
 
 static int ost_init_sec_level(struct ptlrpc_request *req)
@@ -1519,9 +1519,9 @@ static int ost_init_sec_level(struct ptlrpc_request *req)
                         if (!filter->fo_fl_oss_capa)
                                 reply->ocd_connect_flags &= ~OBD_CONNECT_OSS_CAPA;
 
-                        spin_lock(&exp->exp_lock);
+                        cfs_spin_lock(&exp->exp_lock);
                         exp->exp_connect_flags = reply->ocd_connect_flags;
-                        spin_unlock(&exp->exp_lock);
+                        cfs_spin_unlock(&exp->exp_lock);
                 }
                 break;
         default:
@@ -1554,14 +1554,14 @@ static int ost_connect_check_sptlrpc(struct ptlrpc_request *req)
         }
 
         if (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_INVALID) {
-                read_lock(&filter->fo_sptlrpc_lock);
+                cfs_read_lock(&filter->fo_sptlrpc_lock);
                 sptlrpc_target_choose_flavor(&filter->fo_sptlrpc_rset,
                                              req->rq_sp_from,
                                              req->rq_peer.nid,
                                              &flvr);
-                read_unlock(&filter->fo_sptlrpc_lock);
+                cfs_read_unlock(&filter->fo_sptlrpc_lock);
 
-                spin_lock(&exp->exp_lock);
+                cfs_spin_lock(&exp->exp_lock);
 
                 exp->exp_sp_peer = req->rq_sp_from;
                 exp->exp_flvr = flvr;
@@ -1575,7 +1575,7 @@ static int ost_connect_check_sptlrpc(struct ptlrpc_request *req)
                         rc = -EACCES;
                 }
 
-                spin_unlock(&exp->exp_lock);
+                cfs_spin_unlock(&exp->exp_lock);
         } else {
                 if (exp->exp_sp_peer != req->rq_sp_from) {
                         CERROR("RPC source %s doesn't match %s\n",
@@ -2003,9 +2003,9 @@ int ost_handle(struct ptlrpc_request *req)
                 obd = req->rq_export->exp_obd;
 
                 /* Check for aborted recovery. */
-                spin_lock_bh(&obd->obd_processing_task_lock);
+                cfs_spin_lock_bh(&obd->obd_processing_task_lock);
                 recovering = obd->obd_recovering;
-                spin_unlock_bh(&obd->obd_processing_task_lock);
+                cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
                 if (recovering) {
                         rc = ost_filter_recovery_request(req, obd,
                                                          &should_process);
@@ -2310,14 +2310,14 @@ static int ost_setup(struct obd_device *obd, struct lustre_cfg* lcfg)
         int rc;
         ENTRY;
 
-        rc = cleanup_group_info();
+        rc = cfs_cleanup_group_info();
         if (rc)
                 RETURN(rc);
 
         lprocfs_ost_init_vars(&lvars);
         lprocfs_obd_setup(obd, lvars.obd_vars);
 
-        sema_init(&ost->ost_health_sem, 1);
+        cfs_sema_init(&ost->ost_health_sem, 1);
 
         if (oss_num_threads) {
                 /* If oss_num_threads is set, it is the min and the max. */
@@ -2328,7 +2328,8 @@ static int ost_setup(struct obd_device *obd, struct lustre_cfg* lcfg)
                 oss_max_threads = oss_min_threads = oss_num_threads;
         } else {
                 /* Base min threads on memory and cpus */
-                oss_min_threads = num_possible_cpus() * CFS_NUM_CACHEPAGES >>
+                oss_min_threads =
+                        cfs_num_possible_cpus() * CFS_NUM_CACHEPAGES >>
                         (27 - CFS_PAGE_SHIFT);
                 if (oss_min_threads < OSS_THREADS_MIN)
                         oss_min_threads = OSS_THREADS_MIN;
@@ -2430,20 +2431,20 @@ static int ost_cleanup(struct obd_device *obd)
 
         ping_evictor_stop();
 
-        spin_lock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
         if (obd->obd_recovering) {
                 target_cancel_recovery_timer(obd);
                 obd->obd_recovering = 0;
         }
-        spin_unlock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
 
-        down(&ost->ost_health_sem);
+        cfs_down(&ost->ost_health_sem);
         ptlrpc_unregister_service(ost->ost_service);
         ptlrpc_unregister_service(ost->ost_create_service);
         ptlrpc_unregister_service(ost->ost_io_service);
         ost->ost_service = NULL;
         ost->ost_create_service = NULL;
-        up(&ost->ost_health_sem);
+        cfs_up(&ost->ost_health_sem);
 
         lprocfs_obd_cleanup(obd);
 
@@ -2455,11 +2456,11 @@ static int ost_health_check(struct obd_device *obd)
         struct ost_obd *ost = &obd->u.ost;
         int rc = 0;
 
-        down(&ost->ost_health_sem);
+        cfs_down(&ost->ost_health_sem);
         rc |= ptlrpc_service_health_check(ost->ost_service);
         rc |= ptlrpc_service_health_check(ost->ost_create_service);
         rc |= ptlrpc_service_health_check(ost->ost_io_service);
-        up(&ost->ost_health_sem);
+        cfs_up(&ost->ost_health_sem);
 
         /*
          * health_check to return 0 on healthy
index f0d2f87..a26f440 100644 (file)
@@ -90,7 +90,7 @@ static inline struct ptlrpc_bulk_desc *new_bulk(int npages, int type, int portal
         if (!desc)
                 return NULL;
 
-        spin_lock_init(&desc->bd_lock);
+        cfs_spin_lock_init(&desc->bd_lock);
         cfs_waitq_init(&desc->bd_waitq);
         desc->bd_max_iov = npages;
         desc->bd_iov_count = 0;
@@ -302,11 +302,11 @@ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
         ENTRY;
 
         req->rq_early = 0;
-        spin_unlock(&req->rq_lock);
+        cfs_spin_unlock(&req->rq_lock);
 
         rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
         if (rc) {
-                spin_lock(&req->rq_lock);
+                cfs_spin_lock(&req->rq_lock);
                 RETURN(rc);
         }
 
@@ -321,7 +321,7 @@ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
 
         sptlrpc_cli_finish_early_reply(early_req);
 
-        spin_lock(&req->rq_lock);
+        cfs_spin_lock(&req->rq_lock);
 
         if (rc == 0) {
                 /* Adjust the local timeout for this req */
@@ -346,21 +346,21 @@ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
 
 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
 {
-        struct list_head *l, *tmp;
+        cfs_list_t *l, *tmp;
         struct ptlrpc_request *req;
 
         LASSERT(pool != NULL);
 
-        spin_lock(&pool->prp_lock);
-        list_for_each_safe(l, tmp, &pool->prp_req_list) {
-                req = list_entry(l, struct ptlrpc_request, rq_list);
-                list_del(&req->rq_list);
+        cfs_spin_lock(&pool->prp_lock);
+        cfs_list_for_each_safe(l, tmp, &pool->prp_req_list) {
+                req = cfs_list_entry(l, struct ptlrpc_request, rq_list);
+                cfs_list_del(&req->rq_list);
                 LASSERT(req->rq_reqbuf);
                 LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
                 OBD_FREE(req->rq_reqbuf, pool->prp_rq_size);
                 OBD_FREE(req, sizeof(*req));
         }
-        spin_unlock(&pool->prp_lock);
+        cfs_spin_unlock(&pool->prp_lock);
         OBD_FREE(pool, sizeof(*pool));
 }
 
@@ -372,17 +372,18 @@ void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
         while (size < pool->prp_rq_size + SPTLRPC_MAX_PAYLOAD)
                 size <<= 1;
 
-        LASSERTF(list_empty(&pool->prp_req_list) || size == pool->prp_rq_size,
+        LASSERTF(cfs_list_empty(&pool->prp_req_list) ||
+                 size == pool->prp_rq_size,
                  "Trying to change pool size with nonempty pool "
                  "from %d to %d bytes\n", pool->prp_rq_size, size);
 
-        spin_lock(&pool->prp_lock);
+        cfs_spin_lock(&pool->prp_lock);
         pool->prp_rq_size = size;
         for (i = 0; i < num_rq; i++) {
                 struct ptlrpc_request *req;
                 struct lustre_msg *msg;
 
-                spin_unlock(&pool->prp_lock);
+                cfs_spin_unlock(&pool->prp_lock);
                 OBD_ALLOC(req, sizeof(struct ptlrpc_request));
                 if (!req)
                         return;
@@ -394,10 +395,10 @@ void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
                 req->rq_reqbuf = msg;
                 req->rq_reqbuf_len = size;
                 req->rq_pool = pool;
-                spin_lock(&pool->prp_lock);
-                list_add_tail(&req->rq_list, &pool->prp_req_list);
+                cfs_spin_lock(&pool->prp_lock);
+                cfs_list_add_tail(&req->rq_list, &pool->prp_req_list);
         }
-        spin_unlock(&pool->prp_lock);
+        cfs_spin_unlock(&pool->prp_lock);
         return;
 }
 
@@ -414,14 +415,14 @@ ptlrpc_init_rq_pool(int num_rq, int msgsize,
         /* Request next power of two for the allocation, because internally
            kernel would do exactly this */
 
-        spin_lock_init(&pool->prp_lock);
+        cfs_spin_lock_init(&pool->prp_lock);
         CFS_INIT_LIST_HEAD(&pool->prp_req_list);
         pool->prp_rq_size = msgsize;
         pool->prp_populate = populate_pool;
 
         populate_pool(pool, num_rq);
 
-        if (list_empty(&pool->prp_req_list)) {
+        if (cfs_list_empty(&pool->prp_req_list)) {
                 /* have not allocated a single request for the pool */
                 OBD_FREE(pool, sizeof (struct ptlrpc_request_pool));
                 pool = NULL;
@@ -438,21 +439,21 @@ ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool)
         if (!pool)
                 return NULL;
 
-        spin_lock(&pool->prp_lock);
+        cfs_spin_lock(&pool->prp_lock);
 
         /* See if we have anything in a pool, and bail out if nothing,
          * in writeout path, where this matters, this is safe to do, because
          * nothing is lost in this case, and when some in-flight requests
          * complete, this code will be called again. */
-        if (unlikely(list_empty(&pool->prp_req_list))) {
-                spin_unlock(&pool->prp_lock);
+        if (unlikely(cfs_list_empty(&pool->prp_req_list))) {
+                cfs_spin_unlock(&pool->prp_lock);
                 return NULL;
         }
 
-        request = list_entry(pool->prp_req_list.next, struct ptlrpc_request,
-                             rq_list);
-        list_del_init(&request->rq_list);
-        spin_unlock(&pool->prp_lock);
+        request = cfs_list_entry(pool->prp_req_list.next, struct ptlrpc_request,
+                                 rq_list);
+        cfs_list_del_init(&request->rq_list);
+        cfs_spin_unlock(&pool->prp_lock);
 
         LASSERT(request->rq_reqbuf);
         LASSERT(request->rq_pool);
@@ -470,11 +471,11 @@ static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
 {
         struct ptlrpc_request_pool *pool = request->rq_pool;
 
-        spin_lock(&pool->prp_lock);
-        LASSERT(list_empty(&request->rq_list));
+        cfs_spin_lock(&pool->prp_lock);
+        LASSERT(cfs_list_empty(&request->rq_list));
         LASSERT(!request->rq_receiving_reply);
-        list_add_tail(&request->rq_list, &pool->prp_req_list);
-        spin_unlock(&pool->prp_lock);
+        cfs_list_add_tail(&request->rq_list, &pool->prp_req_list);
+        cfs_spin_unlock(&pool->prp_lock);
 }
 
 static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
@@ -523,7 +524,7 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
 
         ptlrpc_at_set_req_timeout(request);
 
-        spin_lock_init(&request->rq_lock);
+        cfs_spin_lock_init(&request->rq_lock);
         CFS_INIT_LIST_HEAD(&request->rq_list);
         CFS_INIT_LIST_HEAD(&request->rq_timed_list);
         CFS_INIT_LIST_HEAD(&request->rq_replay_list);
@@ -533,7 +534,7 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
         CFS_INIT_LIST_HEAD(&request->rq_exp_list);
         cfs_waitq_init(&request->rq_reply_waitq);
         request->rq_xid = ptlrpc_next_xid();
-        atomic_set(&request->rq_refcount, 1);
+        cfs_atomic_set(&request->rq_refcount, 1);
 
         lustre_msg_set_opc(request->rq_reqmsg, opcode);
 
@@ -707,7 +708,7 @@ struct ptlrpc_request *ptlrpc_prep_fakereq(struct obd_import *imp,
         request->rq_no_delay = request->rq_no_resend = 1;
         request->rq_fake = 1;
 
-        spin_lock_init(&request->rq_lock);
+        cfs_spin_lock_init(&request->rq_lock);
         CFS_INIT_LIST_HEAD(&request->rq_list);
         CFS_INIT_LIST_HEAD(&request->rq_replay_list);
         CFS_INIT_LIST_HEAD(&request->rq_set_chain);
@@ -716,7 +717,7 @@ struct ptlrpc_request *ptlrpc_prep_fakereq(struct obd_import *imp,
         cfs_waitq_init(&request->rq_reply_waitq);
 
         request->rq_xid = ptlrpc_next_xid();
-        atomic_set(&request->rq_refcount, 1);
+        cfs_atomic_set(&request->rq_refcount, 1);
 
         RETURN(request);
 }
@@ -732,7 +733,7 @@ void ptlrpc_fakereq_finished(struct ptlrpc_request *req)
         }
 
         ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
-        list_del_init(&req->rq_list);
+        cfs_list_del_init(&req->rq_list);
 }
 
 
@@ -747,7 +748,7 @@ struct ptlrpc_request_set *ptlrpc_prep_set(void)
         CFS_INIT_LIST_HEAD(&set->set_requests);
         cfs_waitq_init(&set->set_waitq);
         set->set_remaining = 0;
-        spin_lock_init(&set->set_new_req_lock);
+        cfs_spin_lock_init(&set->set_new_req_lock);
         CFS_INIT_LIST_HEAD(&set->set_new_requests);
         CFS_INIT_LIST_HEAD(&set->set_cblist);
 
@@ -757,8 +758,8 @@ struct ptlrpc_request_set *ptlrpc_prep_set(void)
 /* Finish with this set; opposite of prep_set. */
 void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
 {
-        struct list_head *tmp;
-        struct list_head *next;
+        cfs_list_t       *tmp;
+        cfs_list_t       *next;
         int               expected_phase;
         int               n = 0;
         ENTRY;
@@ -766,9 +767,10 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
         /* Requests on the set should either all be completed, or all be new */
         expected_phase = (set->set_remaining == 0) ?
                          RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
-        list_for_each (tmp, &set->set_requests) {
+        cfs_list_for_each (tmp, &set->set_requests) {
                 struct ptlrpc_request *req =
-                        list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+                        cfs_list_entry(tmp, struct ptlrpc_request,
+                                       rq_set_chain);
 
                 LASSERT(req->rq_phase == expected_phase);
                 n++;
@@ -777,10 +779,11 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
         LASSERTF(set->set_remaining == 0 || set->set_remaining == n, "%d / %d\n",
                  set->set_remaining, n);
 
-        list_for_each_safe(tmp, next, &set->set_requests) {
+        cfs_list_for_each_safe(tmp, next, &set->set_requests) {
                 struct ptlrpc_request *req =
-                        list_entry(tmp, struct ptlrpc_request, rq_set_chain);
-                list_del_init(&req->rq_set_chain);
+                        cfs_list_entry(tmp, struct ptlrpc_request,
+                                       rq_set_chain);
+                cfs_list_del_init(&req->rq_set_chain);
 
                 LASSERT(req->rq_phase == expected_phase);
 
@@ -810,7 +813,7 @@ int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
 
         cbdata->psc_interpret = fn;
         cbdata->psc_data = data;
-        list_add_tail(&cbdata->psc_item, &set->set_cblist);
+        cfs_list_add_tail(&cbdata->psc_item, &set->set_cblist);
 
         RETURN(0);
 }
@@ -819,7 +822,7 @@ void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
                         struct ptlrpc_request *req)
 {
         /* The set takes over the caller's request reference */
-        list_add_tail(&req->rq_set_chain, &set->set_requests);
+        cfs_list_add_tail(&req->rq_set_chain, &set->set_requests);
         req->rq_set = set;
         set->set_remaining++;
 }
@@ -837,16 +840,16 @@ int ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
          * Let caller know that we stopped and will not handle this request.
          * It needs to take care itself of request.
          */
-        if (test_bit(LIOD_STOP, &pc->pc_flags))
+        if (cfs_test_bit(LIOD_STOP, &pc->pc_flags))
                 return -EALREADY;
 
-        spin_lock(&set->set_new_req_lock);
+        cfs_spin_lock(&set->set_new_req_lock);
         /*
          * The set takes over the caller's request reference.
          */
-        list_add_tail(&req->rq_set_chain, &set->set_new_requests);
+        cfs_list_add_tail(&req->rq_set_chain, &set->set_new_requests);
         req->rq_set = set;
-        spin_unlock(&set->set_new_req_lock);
+        cfs_spin_unlock(&set->set_new_req_lock);
 
         cfs_waitq_signal(&set->set_waitq);
         return 0;
@@ -883,7 +886,7 @@ static int ptlrpc_import_delay_req(struct obd_import *imp,
         } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
                    imp->imp_state == LUSTRE_IMP_CONNECTING) {
                 /* allow CONNECT even if import is invalid */ ;
-                if (atomic_read(&imp->imp_inval_count) != 0) {
+                if (cfs_atomic_read(&imp->imp_inval_count) != 0) {
                         DEBUG_REQ(D_ERROR, req, "invalidate in flight");
                         *status = -EIO;
                 }
@@ -902,7 +905,7 @@ static int ptlrpc_import_delay_req(struct obd_import *imp,
                 *status = -EIO;
         } else if (req->rq_send_state != imp->imp_state) {
                 /* invalidate in progress - any requests should be drop */
-                if (atomic_read(&imp->imp_inval_count) != 0) {
+                if (cfs_atomic_read(&imp->imp_inval_count) != 0) {
                         DEBUG_REQ(D_ERROR, req, "invalidate in flight");
                         *status = -EIO;
                 } else if (imp->imp_dlm_fake || req->rq_no_delay) {
@@ -915,7 +918,6 @@ static int ptlrpc_import_delay_req(struct obd_import *imp,
         RETURN(delay);
 }
 
-
 /* Conditionally suppress specific console messages */
 static int ptlrpc_console_allow(struct ptlrpc_request *req)
 {
@@ -1046,7 +1048,7 @@ static int after_reply(struct ptlrpc_request *req)
         if (rc)
                 RETURN(rc);
 
-        do_gettimeofday(&work_start);
+        cfs_gettimeofday(&work_start);
         timediff = cfs_timeval_sub(&work_start, &req->rq_arrival_time, NULL);
         if (obd->obd_svc_stats != NULL) {
                 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
@@ -1101,7 +1103,7 @@ static int after_reply(struct ptlrpc_request *req)
         }
 
         if (imp->imp_replayable) {
-                spin_lock(&imp->imp_lock);
+                cfs_spin_lock(&imp->imp_lock);
                 /*
                  * No point in adding already-committed requests to the replay
                  * list, we will just remove them immediately. b=9829
@@ -1114,9 +1116,9 @@ static int after_reply(struct ptlrpc_request *req)
                         ptlrpc_save_versions(req);
                         ptlrpc_retain_replayable_request(req, imp);
                 } else if (req->rq_commit_cb != NULL) {
-                        spin_unlock(&imp->imp_lock);
+                        cfs_spin_unlock(&imp->imp_lock);
                         req->rq_commit_cb(req);
-                        spin_lock(&imp->imp_lock);
+                        cfs_spin_lock(&imp->imp_lock);
                 }
 
                 /*
@@ -1127,7 +1129,7 @@ static int after_reply(struct ptlrpc_request *req)
                                 lustre_msg_get_last_committed(req->rq_repmsg);
                 }
                 ptlrpc_free_committed(imp);
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
         }
 
         RETURN(rc);
@@ -1146,37 +1148,37 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
         ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
 
         imp = req->rq_import;
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
 
         req->rq_import_generation = imp->imp_generation;
 
         if (ptlrpc_import_delay_req(imp, req, &rc)) {
-                spin_lock(&req->rq_lock);
+                cfs_spin_lock(&req->rq_lock);
                 req->rq_waiting = 1;
-                spin_unlock(&req->rq_lock);
+                cfs_spin_unlock(&req->rq_lock);
 
                 DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
                           "(%s != %s)", lustre_msg_get_status(req->rq_reqmsg),
                           ptlrpc_import_state_name(req->rq_send_state),
                           ptlrpc_import_state_name(imp->imp_state));
-                LASSERT(list_empty(&req->rq_list));
-                list_add_tail(&req->rq_list, &imp->imp_delayed_list);
-                atomic_inc(&req->rq_import->imp_inflight);
-                spin_unlock(&imp->imp_lock);
+                LASSERT(cfs_list_empty(&req->rq_list));
+                cfs_list_add_tail(&req->rq_list, &imp->imp_delayed_list);
+                cfs_atomic_inc(&req->rq_import->imp_inflight);
+                cfs_spin_unlock(&imp->imp_lock);
                 RETURN(0);
         }
 
         if (rc != 0) {
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
                 req->rq_status = rc;
                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
                 RETURN(rc);
         }
 
-        LASSERT(list_empty(&req->rq_list));
-        list_add_tail(&req->rq_list, &imp->imp_sending_list);
-        atomic_inc(&req->rq_import->imp_inflight);
-        spin_unlock(&imp->imp_lock);
+        LASSERT(cfs_list_empty(&req->rq_list));
+        cfs_list_add_tail(&req->rq_list, &imp->imp_sending_list);
+        cfs_atomic_inc(&req->rq_import->imp_inflight);
+        cfs_spin_unlock(&imp->imp_lock);
 
         lustre_msg_set_status(req->rq_reqmsg, cfs_curproc_pid());
 
@@ -1210,16 +1212,17 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
 /* this sends any unsent RPCs in @set and returns TRUE if all are sent */
 int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
 {
-        struct list_head *tmp;
+        cfs_list_t *tmp;
         int force_timer_recalc = 0;
         ENTRY;
 
         if (set->set_remaining == 0)
                 RETURN(1);
 
-        list_for_each(tmp, &set->set_requests) {
+        cfs_list_for_each(tmp, &set->set_requests) {
                 struct ptlrpc_request *req =
-                        list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+                        cfs_list_entry(tmp, struct ptlrpc_request,
+                                       rq_set_chain);
                 struct obd_import *imp = req->rq_import;
                 int rc = 0;
 
@@ -1328,13 +1331,15 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                                 if (!ptlrpc_unregister_reply(req, 1))
                                         continue;
 
-                                spin_lock(&imp->imp_lock);
+                                cfs_spin_lock(&imp->imp_lock);
                                 if (ptlrpc_import_delay_req(imp, req, &status)){
                                         /* put on delay list - only if we wait
                                          * recovery finished - before send */
-                                        list_del_init(&req->rq_list);
-                                        list_add_tail(&req->rq_list, &imp->imp_delayed_list);
-                                        spin_unlock(&imp->imp_lock);
+                                        cfs_list_del_init(&req->rq_list);
+                                        cfs_list_add_tail(&req->rq_list,
+                                                          &imp-> \
+                                                          imp_delayed_list);
+                                        cfs_spin_unlock(&imp->imp_lock);
                                         continue;
                                 }
 
@@ -1342,22 +1347,22 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                                         req->rq_status = status;
                                         ptlrpc_rqphase_move(req,
                                                 RQ_PHASE_INTERPRET);
-                                        spin_unlock(&imp->imp_lock);
+                                        cfs_spin_unlock(&imp->imp_lock);
                                         GOTO(interpret, req->rq_status);
                                 }
                                 if (req->rq_no_resend && !req->rq_wait_ctx) {
                                         req->rq_status = -ENOTCONN;
                                         ptlrpc_rqphase_move(req,
                                                 RQ_PHASE_INTERPRET);
-                                        spin_unlock(&imp->imp_lock);
+                                        cfs_spin_unlock(&imp->imp_lock);
                                         GOTO(interpret, req->rq_status);
                                 }
 
-                                list_del_init(&req->rq_list);
-                                list_add_tail(&req->rq_list,
+                                cfs_list_del_init(&req->rq_list);
+                                cfs_list_add_tail(&req->rq_list,
                                               &imp->imp_sending_list);
 
-                                spin_unlock(&imp->imp_lock);
+                                cfs_spin_unlock(&imp->imp_lock);
 
                                 req->rq_waiting = 0;
 
@@ -1410,27 +1415,27 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                                 force_timer_recalc = 1;
                         }
 
-                        spin_lock(&req->rq_lock);
+                        cfs_spin_lock(&req->rq_lock);
 
                         if (ptlrpc_client_early(req)) {
                                 ptlrpc_at_recv_early_reply(req);
-                                spin_unlock(&req->rq_lock);
+                                cfs_spin_unlock(&req->rq_lock);
                                 continue;
                         }
 
                         /* Still waiting for a reply? */
                         if (ptlrpc_client_recv(req)) {
-                                spin_unlock(&req->rq_lock);
+                                cfs_spin_unlock(&req->rq_lock);
                                 continue;
                         }
 
                         /* Did we actually receive a reply? */
                         if (!ptlrpc_client_replied(req)) {
-                                spin_unlock(&req->rq_lock);
+                                cfs_spin_unlock(&req->rq_lock);
                                 continue;
                         }
 
-                        spin_unlock(&req->rq_lock);
+                        cfs_spin_unlock(&req->rq_lock);
 
                         req->rq_status = after_reply(req);
                         if (req->rq_resend)
@@ -1491,16 +1496,16 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                        libcfs_nid2str(imp->imp_connection->c_peer.nid),
                        req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : -1);
 
-                spin_lock(&imp->imp_lock);
+                cfs_spin_lock(&imp->imp_lock);
                 /* Request already may be not on sending or delaying list. This
                  * may happen in the case of marking it erroneous for the case
                  * ptlrpc_import_delay_req(req, status) find it impossible to
                  * allow sending this rpc and returns *status != 0. */
-                if (!list_empty(&req->rq_list)) {
-                        list_del_init(&req->rq_list);
-                        atomic_dec(&imp->imp_inflight);
+                if (!cfs_list_empty(&req->rq_list)) {
+                        cfs_list_del_init(&req->rq_list);
+                        cfs_atomic_dec(&imp->imp_inflight);
                 }
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
 
                 set->set_remaining--;
                 cfs_waitq_broadcast(&imp->imp_recovery_waitq);
@@ -1517,9 +1522,9 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
         int rc = 0;
         ENTRY;
 
-        spin_lock(&req->rq_lock);
+        cfs_spin_lock(&req->rq_lock);
         req->rq_timedout = 1;
-        spin_unlock(&req->rq_lock);
+        cfs_spin_unlock(&req->rq_lock);
 
         DEBUG_REQ(req->rq_fake ? D_INFO : D_WARNING, req, 
                   "Request x"LPU64" sent from %s to NID %s "CFS_DURATION_T"s "
@@ -1547,7 +1552,7 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
         if (req->rq_fake)
                RETURN(1);
 
-        atomic_inc(&imp->imp_timeouts);
+        cfs_atomic_inc(&imp->imp_timeouts);
 
         /* The DLM server doesn't want recovery run on its imports. */
         if (imp->imp_dlm_fake)
@@ -1561,10 +1566,10 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
                 DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)",
                           ptlrpc_import_state_name(req->rq_send_state),
                           ptlrpc_import_state_name(imp->imp_state));
-                spin_lock(&req->rq_lock);
+                cfs_spin_lock(&req->rq_lock);
                 req->rq_status = -ETIMEDOUT;
                 req->rq_err = 1;
-                spin_unlock(&req->rq_lock);
+                cfs_spin_unlock(&req->rq_lock);
                 RETURN(1);
         }
 
@@ -1583,7 +1588,7 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
 int ptlrpc_expired_set(void *data)
 {
         struct ptlrpc_request_set *set = data;
-        struct list_head          *tmp;
+        cfs_list_t                *tmp;
         time_t                     now = cfs_time_current_sec();
         ENTRY;
 
@@ -1592,9 +1597,10 @@ int ptlrpc_expired_set(void *data)
         /*
          * A timeout expired. See which reqs it applies to...
          */
-        list_for_each (tmp, &set->set_requests) {
+        cfs_list_for_each (tmp, &set->set_requests) {
                 struct ptlrpc_request *req =
-                        list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+                        cfs_list_entry(tmp, struct ptlrpc_request,
+                                       rq_set_chain);
 
                 /* don't expire request waiting for context */
                 if (req->rq_wait_ctx)
@@ -1625,22 +1631,23 @@ int ptlrpc_expired_set(void *data)
 
 void ptlrpc_mark_interrupted(struct ptlrpc_request *req)
 {
-        spin_lock(&req->rq_lock);
+        cfs_spin_lock(&req->rq_lock);
         req->rq_intr = 1;
-        spin_unlock(&req->rq_lock);
+        cfs_spin_unlock(&req->rq_lock);
 }
 
 void ptlrpc_interrupted_set(void *data)
 {
         struct ptlrpc_request_set *set = data;
-        struct list_head *tmp;
+        cfs_list_t *tmp;
 
         LASSERT(set != NULL);
         CERROR("INTERRUPTED SET %p\n", set);
 
-        list_for_each(tmp, &set->set_requests) {
+        cfs_list_for_each(tmp, &set->set_requests) {
                 struct ptlrpc_request *req =
-                        list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+                        cfs_list_entry(tmp, struct ptlrpc_request,
+                                       rq_set_chain);
 
                 if (req->rq_phase != RQ_PHASE_RPC &&
                     req->rq_phase != RQ_PHASE_UNREGISTERING)
@@ -1655,7 +1662,7 @@ void ptlrpc_interrupted_set(void *data)
  */
 int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
 {
-        struct list_head      *tmp;
+        cfs_list_t            *tmp;
         time_t                 now = cfs_time_current_sec();
         int                    timeout = 0;
         struct ptlrpc_request *req;
@@ -1664,8 +1671,8 @@ int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
 
         SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
 
-        list_for_each(tmp, &set->set_requests) {
-                req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+        cfs_list_for_each(tmp, &set->set_requests) {
+                req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain);
 
                 /*
                  * Request in-flight?
@@ -1702,17 +1709,17 @@ int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
 
 int ptlrpc_set_wait(struct ptlrpc_request_set *set)
 {
-        struct list_head      *tmp;
+        cfs_list_t            *tmp;
         struct ptlrpc_request *req;
         struct l_wait_info     lwi;
         int                    rc, timeout;
         ENTRY;
 
-        if (list_empty(&set->set_requests))
+        if (cfs_list_empty(&set->set_requests))
                 RETURN(0);
 
-        list_for_each(tmp, &set->set_requests) {
-                req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+        cfs_list_for_each(tmp, &set->set_requests) {
+                req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain);
                 if (req->rq_phase == RQ_PHASE_NEW)
                         (void)ptlrpc_send_new_req(req);
         }
@@ -1760,8 +1767,8 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
         LASSERT(set->set_remaining == 0);
 
         rc = 0;
-        list_for_each(tmp, &set->set_requests) {
-                req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+        cfs_list_for_each(tmp, &set->set_requests) {
+                req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain);
 
                 LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
                 if (req->rq_status != 0)
@@ -1776,9 +1783,9 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
                 struct ptlrpc_set_cbdata *cbdata, *n;
                 int err;
 
-                list_for_each_entry_safe(cbdata, n,
+                cfs_list_for_each_entry_safe(cbdata, n,
                                          &set->set_cblist, psc_item) {
-                        list_del_init(&cbdata->psc_item);
+                        cfs_list_del_init(&cbdata->psc_item);
                         err = cbdata->psc_interpret(set, cbdata->psc_data, rc);
                         if (err && !rc)
                                 rc = err;
@@ -1799,9 +1806,9 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
 
         LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
         LASSERTF(request->rq_rqbd == NULL, "req %p\n",request);/* client-side */
-        LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
-        LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
-        LASSERTF(list_empty(&request->rq_exp_list), "req %p\n", request);
+        LASSERTF(cfs_list_empty(&request->rq_list), "req %p\n", request);
+        LASSERTF(cfs_list_empty(&request->rq_set_chain), "req %p\n", request);
+        LASSERTF(cfs_list_empty(&request->rq_exp_list), "req %p\n", request);
         LASSERTF(!request->rq_replay, "req %p\n", request);
         LASSERT(request->rq_cli_ctx || request->rq_fake);
 
@@ -1811,14 +1818,14 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
          * request->rq_reqmsg to NULL while osc_close is dereferencing it. */
         if (request->rq_import != NULL) {
                 if (!locked)
-                        spin_lock(&request->rq_import->imp_lock);
-                list_del_init(&request->rq_replay_list);
+                        cfs_spin_lock(&request->rq_import->imp_lock);
+                cfs_list_del_init(&request->rq_replay_list);
                 if (!locked)
-                        spin_unlock(&request->rq_import->imp_lock);
+                        cfs_spin_unlock(&request->rq_import->imp_lock);
         }
-        LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request);
+        LASSERTF(cfs_list_empty(&request->rq_replay_list), "req %p\n", request);
 
-        if (atomic_read(&request->rq_refcount) != 0) {
+        if (cfs_atomic_read(&request->rq_refcount) != 0) {
                 DEBUG_REQ(D_ERROR, request,
                           "freeing request with nonzero refcount");
                 LBUG();
@@ -1871,9 +1878,9 @@ static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
         }
 
         DEBUG_REQ(D_INFO, request, "refcount now %u",
-                  atomic_read(&request->rq_refcount) - 1);
+                  cfs_atomic_read(&request->rq_refcount) - 1);
 
-        if (atomic_dec_and_test(&request->rq_refcount)) {
+        if (cfs_atomic_dec_and_test(&request->rq_refcount)) {
                 __ptlrpc_free_req(request, locked);
                 RETURN(1);
         }
@@ -1906,7 +1913,7 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
         /*
          * Might sleep.
          */
-        LASSERT(!in_interrupt());
+        LASSERT(!cfs_in_interrupt());
 
         /*
          * Let's setup deadline for reply unlink.
@@ -1973,7 +1980,7 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
 /* caller must hold imp->imp_lock */
 void ptlrpc_free_committed(struct obd_import *imp)
 {
-        struct list_head *tmp, *saved;
+        cfs_list_t *tmp, *saved;
         struct ptlrpc_request *req;
         struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
         ENTRY;
@@ -1996,8 +2003,9 @@ void ptlrpc_free_committed(struct obd_import *imp)
         imp->imp_last_transno_checked = imp->imp_peer_committed_transno;
         imp->imp_last_generation_checked = imp->imp_generation;
 
-        list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
-                req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
+        cfs_list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
+                req = cfs_list_entry(tmp, struct ptlrpc_request,
+                                     rq_replay_list);
 
                 /* XXX ok to remove when 1357 resolved - rread 05/29/03  */
                 LASSERT(req != last_req);
@@ -2026,12 +2034,12 @@ void ptlrpc_free_committed(struct obd_import *imp)
                 DEBUG_REQ(D_RPCTRACE, req, "commit (last_committed "LPU64")",
                           imp->imp_peer_committed_transno);
 free_req:
-                spin_lock(&req->rq_lock);
+                cfs_spin_lock(&req->rq_lock);
                 req->rq_replay = 0;
-                spin_unlock(&req->rq_lock);
+                cfs_spin_unlock(&req->rq_lock);
                 if (req->rq_commit_cb != NULL)
                         req->rq_commit_cb(req);
-                list_del_init(&req->rq_replay_list);
+                cfs_list_del_init(&req->rq_replay_list);
                 __ptlrpc_req_finished(req, 1);
         }
 
@@ -2052,7 +2060,7 @@ void ptlrpc_resend_req(struct ptlrpc_request *req)
         lustre_msg_set_handle(req->rq_reqmsg, &(struct lustre_handle){ 0 });
         req->rq_status = -EAGAIN;
 
-        spin_lock(&req->rq_lock);
+        cfs_spin_lock(&req->rq_lock);
         req->rq_resend = 1;
         req->rq_net_err = 0;
         req->rq_timedout = 0;
@@ -2065,7 +2073,7 @@ void ptlrpc_resend_req(struct ptlrpc_request *req)
                        old_xid, req->rq_xid);
         }
         ptlrpc_client_wake_req(req);
-        spin_unlock(&req->rq_lock);
+        cfs_spin_unlock(&req->rq_lock);
 }
 
 /* XXX: this function and rq_status are currently unused */
@@ -2074,24 +2082,24 @@ void ptlrpc_restart_req(struct ptlrpc_request *req)
         DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
         req->rq_status = -ERESTARTSYS;
 
-        spin_lock(&req->rq_lock);
+        cfs_spin_lock(&req->rq_lock);
         req->rq_restart = 1;
         req->rq_timedout = 0;
         ptlrpc_client_wake_req(req);
-        spin_unlock(&req->rq_lock);
+        cfs_spin_unlock(&req->rq_lock);
 }
 
 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
 {
         ENTRY;
-        atomic_inc(&req->rq_refcount);
+        cfs_atomic_inc(&req->rq_refcount);
         RETURN(req);
 }
 
 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
                                       struct obd_import *imp)
 {
-        struct list_head *tmp;
+        cfs_list_t *tmp;
 
         LASSERT_SPIN_LOCKED(&imp->imp_lock);
 
@@ -2105,7 +2113,7 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
         lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
 
         /* don't re-add requests that have been replayed */
-        if (!list_empty(&req->rq_replay_list))
+        if (!cfs_list_empty(&req->rq_replay_list))
                 return;
 
         lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
@@ -2113,9 +2121,10 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
         LASSERT(imp->imp_replayable);
         /* Balanced in ptlrpc_free_committed, usually. */
         ptlrpc_request_addref(req);
-        list_for_each_prev(tmp, &imp->imp_replay_list) {
+        cfs_list_for_each_prev(tmp, &imp->imp_replay_list) {
                 struct ptlrpc_request *iter =
-                        list_entry(tmp, struct ptlrpc_request, rq_replay_list);
+                        cfs_list_entry(tmp, struct ptlrpc_request,
+                                       rq_replay_list);
 
                 /* We may have duplicate transnos if we create and then
                  * open a file, or for closes retained if to match creating
@@ -2132,11 +2141,11 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
                                 continue;
                 }
 
-                list_add(&req->rq_replay_list, &iter->rq_replay_list);
+                cfs_list_add(&req->rq_replay_list, &iter->rq_replay_list);
                 return;
         }
 
-        list_add(&req->rq_replay_list, &imp->imp_replay_list);
+        cfs_list_add(&req->rq_replay_list, &imp->imp_replay_list);
 }
 
 int ptlrpc_queue_wait(struct ptlrpc_request *req)
@@ -2179,7 +2188,7 @@ static int ptlrpc_replay_interpret(const struct lu_env *env,
         struct obd_import *imp = req->rq_import;
 
         ENTRY;
-        atomic_dec(&imp->imp_replay_inflight);
+        cfs_atomic_dec(&imp->imp_replay_inflight);
 
         if (!ptlrpc_client_replied(req)) {
                 CERROR("request replay timed out, restarting recovery\n");
@@ -2195,10 +2204,10 @@ static int ptlrpc_replay_interpret(const struct lu_env *env,
         if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) {
                 /** replay was failed due to version mismatch */
                 DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n");
-                spin_lock(&imp->imp_lock);
+                cfs_spin_lock(&imp->imp_lock);
                 imp->imp_vbr_failed = 1;
                 imp->imp_no_lock_replay = 1;
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
         } else {
                 /** The transno had better not change over replay. */
                 LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) ==
@@ -2209,12 +2218,12 @@ static int ptlrpc_replay_interpret(const struct lu_env *env,
                          lustre_msg_get_transno(req->rq_repmsg));
         }
 
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
         /** if replays by version then gap was occur on server, no trust to locks */
         if (lustre_msg_get_flags(req->rq_repmsg) & MSG_VERSION_REPLAY)
                 imp->imp_no_lock_replay = 1;
         imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
         LASSERT(imp->imp_last_replay_transno);
 
         DEBUG_REQ(D_HA, req, "got rep");
@@ -2238,10 +2247,10 @@ static int ptlrpc_replay_interpret(const struct lu_env *env,
          * imp_last_replay_transno shouldn't be set to 0 anyway
          */
         if (req->rq_transno > 0) {
-                spin_lock(&imp->imp_lock);
+                cfs_spin_lock(&imp->imp_lock);
                 LASSERT(req->rq_transno <= imp->imp_last_replay_transno);
                 imp->imp_last_replay_transno = req->rq_transno;
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
         } else
                 CERROR("Transno is 0 during replay!\n");
         /* continue with recovery */
@@ -2283,7 +2292,7 @@ int ptlrpc_replay_req(struct ptlrpc_request *req)
 
         DEBUG_REQ(D_HA, req, "REPLAY");
 
-        atomic_inc(&req->rq_import->imp_replay_inflight);
+        cfs_atomic_inc(&req->rq_import->imp_replay_inflight);
         ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
 
         ptlrpcd_add_req(req, PSCOPE_OTHER);
@@ -2292,47 +2301,47 @@ int ptlrpc_replay_req(struct ptlrpc_request *req)
 
 void ptlrpc_abort_inflight(struct obd_import *imp)
 {
-        struct list_head *tmp, *n;
+        cfs_list_t *tmp, *n;
         ENTRY;
 
         /* Make sure that no new requests get processed for this import.
          * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
          * this flag and then putting requests on sending_list or delayed_list.
          */
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
 
         /* XXX locking?  Maybe we should remove each request with the list
          * locked?  Also, how do we know if the requests on the list are
          * being freed at this time?
          */
-        list_for_each_safe(tmp, n, &imp->imp_sending_list) {
+        cfs_list_for_each_safe(tmp, n, &imp->imp_sending_list) {
                 struct ptlrpc_request *req =
-                        list_entry(tmp, struct ptlrpc_request, rq_list);
+                        cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
 
                 DEBUG_REQ(D_RPCTRACE, req, "inflight");
 
-                spin_lock (&req->rq_lock);
+                cfs_spin_lock (&req->rq_lock);
                 if (req->rq_import_generation < imp->imp_generation) {
                         req->rq_err = 1;
                         req->rq_status = -EINTR;
                         ptlrpc_client_wake_req(req);
                 }
-                spin_unlock (&req->rq_lock);
+                cfs_spin_unlock (&req->rq_lock);
         }
 
-        list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
+        cfs_list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
                 struct ptlrpc_request *req =
-                        list_entry(tmp, struct ptlrpc_request, rq_list);
+                        cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
 
                 DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
 
-                spin_lock (&req->rq_lock);
+                cfs_spin_lock (&req->rq_lock);
                 if (req->rq_import_generation < imp->imp_generation) {
                         req->rq_err = 1;
                         req->rq_status = -EINTR;
                         ptlrpc_client_wake_req(req);
                 }
-                spin_unlock (&req->rq_lock);
+                cfs_spin_unlock (&req->rq_lock);
         }
 
         /* Last chance to free reqs left on the replay list, but we
@@ -2340,36 +2349,37 @@ void ptlrpc_abort_inflight(struct obd_import *imp)
         if (imp->imp_replayable)
                 ptlrpc_free_committed(imp);
 
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
 
         EXIT;
 }
 
 void ptlrpc_abort_set(struct ptlrpc_request_set *set)
 {
-        struct list_head *tmp, *pos;
+        cfs_list_t *tmp, *pos;
 
         LASSERT(set != NULL);
 
-        list_for_each_safe(pos, tmp, &set->set_requests) {
+        cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
                 struct ptlrpc_request *req =
-                        list_entry(pos, struct ptlrpc_request, rq_set_chain);
+                        cfs_list_entry(pos, struct ptlrpc_request,
+                                       rq_set_chain);
 
-                spin_lock(&req->rq_lock);
+                cfs_spin_lock(&req->rq_lock);
                 if (req->rq_phase != RQ_PHASE_RPC) {
-                        spin_unlock(&req->rq_lock);
+                        cfs_spin_unlock(&req->rq_lock);
                         continue;
                 }
 
                 req->rq_err = 1;
                 req->rq_status = -EINTR;
                 ptlrpc_client_wake_req(req);
-                spin_unlock(&req->rq_lock);
+                cfs_spin_unlock(&req->rq_lock);
         }
 }
 
 static __u64 ptlrpc_last_xid;
-static spinlock_t ptlrpc_last_xid_lock;
+static cfs_spinlock_t ptlrpc_last_xid_lock;
 
 /* Initialize the XID for the node.  This is common among all requests on
  * this node, and only requires the property that it is monotonically
@@ -2390,7 +2400,7 @@ void ptlrpc_init_xid(void)
 {
         time_t now = cfs_time_current_sec();
 
-        spin_lock_init(&ptlrpc_last_xid_lock);
+        cfs_spin_lock_init(&ptlrpc_last_xid_lock);
         if (now < YEAR_2004) {
                 ll_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid));
                 ptlrpc_last_xid >>= 2;
@@ -2403,9 +2413,9 @@ void ptlrpc_init_xid(void)
 __u64 ptlrpc_next_xid(void)
 {
         __u64 tmp;
-        spin_lock(&ptlrpc_last_xid_lock);
+        cfs_spin_lock(&ptlrpc_last_xid_lock);
         tmp = ++ptlrpc_last_xid;
-        spin_unlock(&ptlrpc_last_xid_lock);
+        cfs_spin_unlock(&ptlrpc_last_xid_lock);
         return tmp;
 }
 
@@ -2414,9 +2424,9 @@ __u64 ptlrpc_sample_next_xid(void)
 #if BITS_PER_LONG == 32
         /* need to avoid possible word tearing on 32-bit systems */
         __u64 tmp;
-        spin_lock(&ptlrpc_last_xid_lock);
+        cfs_spin_lock(&ptlrpc_last_xid_lock);
         tmp = ptlrpc_last_xid + 1;
-        spin_unlock(&ptlrpc_last_xid_lock);
+        cfs_spin_unlock(&ptlrpc_last_xid_lock);
         return tmp;
 #else
         /* No need to lock, since returned value is racy anyways */
index a4dac74..a41b202 100644 (file)
@@ -65,8 +65,8 @@ ptlrpc_connection_get(lnet_process_id_t peer, lnet_nid_t self,
 
         conn->c_peer = peer;
         conn->c_self = self;
-        INIT_HLIST_NODE(&conn->c_hash);
-        atomic_set(&conn->c_refcount, 1);
+        CFS_INIT_HLIST_NODE(&conn->c_hash);
+        cfs_atomic_set(&conn->c_refcount, 1);
         if (uuid)
                 obd_str2uuid(&conn->c_remote_uuid, uuid->uuid);
 
@@ -84,7 +84,7 @@ ptlrpc_connection_get(lnet_process_id_t peer, lnet_nid_t self,
         EXIT;
 out:
         CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
-               conn, atomic_read(&conn->c_refcount),
+               conn, cfs_atomic_read(&conn->c_refcount),
                libcfs_nid2str(conn->c_peer.nid));
         return conn;
 }
@@ -97,7 +97,7 @@ int ptlrpc_connection_put(struct ptlrpc_connection *conn)
         if (!conn)
                 RETURN(rc);
 
-        LASSERT(!hlist_unhashed(&conn->c_hash));
+        LASSERT(!cfs_hlist_unhashed(&conn->c_hash));
 
         /*
          * We do not remove connection from hashtable and
@@ -115,11 +115,11 @@ int ptlrpc_connection_put(struct ptlrpc_connection *conn)
          * when ptlrpc_connection_fini()->lh_exit->conn_exit()
          * path is called.
          */
-        if (atomic_dec_return(&conn->c_refcount) == 1)
+        if (cfs_atomic_dec_return(&conn->c_refcount) == 1)
                 rc = 1;
 
         CDEBUG(D_INFO, "PUT conn=%p refcount %d to %s\n",
-               conn, atomic_read(&conn->c_refcount),
+               conn, cfs_atomic_read(&conn->c_refcount),
                libcfs_nid2str(conn->c_peer.nid));
 
         RETURN(rc);
@@ -130,9 +130,9 @@ ptlrpc_connection_addref(struct ptlrpc_connection *conn)
 {
         ENTRY;
 
-        atomic_inc(&conn->c_refcount);
+        cfs_atomic_inc(&conn->c_refcount);
         CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
-               conn, atomic_read(&conn->c_refcount),
+               conn, cfs_atomic_read(&conn->c_refcount),
                libcfs_nid2str(conn->c_peer.nid));
 
         RETURN(conn);
@@ -168,63 +168,63 @@ conn_hashfn(cfs_hash_t *hs,  void *key, unsigned mask)
 }
 
 static int
-conn_compare(void *key, struct hlist_node *hnode)
+conn_compare(void *key, cfs_hlist_node_t *hnode)
 {
         struct ptlrpc_connection *conn;
         lnet_process_id_t *conn_key;
 
         LASSERT(key != NULL);
         conn_key = (lnet_process_id_t*)key;
-        conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+        conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
 
         return conn_key->nid == conn->c_peer.nid &&
                conn_key->pid == conn->c_peer.pid;
 }
 
 static void *
-conn_key(struct hlist_node *hnode)
+conn_key(cfs_hlist_node_t *hnode)
 {
         struct ptlrpc_connection *conn;
-        conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+        conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
         return &conn->c_peer;
 }
 
 static void *
-conn_get(struct hlist_node *hnode)
+conn_get(cfs_hlist_node_t *hnode)
 {
         struct ptlrpc_connection *conn;
 
-        conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
-        atomic_inc(&conn->c_refcount);
+        conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+        cfs_atomic_inc(&conn->c_refcount);
 
         return conn;
 }
 
 static void *
-conn_put(struct hlist_node *hnode)
+conn_put(cfs_hlist_node_t *hnode)
 {
         struct ptlrpc_connection *conn;
 
-        conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
-        atomic_dec(&conn->c_refcount);
+        conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+        cfs_atomic_dec(&conn->c_refcount);
 
         return conn;
 }
 
 static void
-conn_exit(struct hlist_node *hnode)
+conn_exit(cfs_hlist_node_t *hnode)
 {
         struct ptlrpc_connection *conn;
 
-        conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+        conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
         /*
          * Nothing should be left. Connection user put it and
          * connection also was deleted from table by this time
          * so we should have 0 refs.
          */
-        LASSERTF(atomic_read(&conn->c_refcount) == 0,
+        LASSERTF(cfs_atomic_read(&conn->c_refcount) == 0,
                  "Busy connection with %d refs\n",
-                 atomic_read(&conn->c_refcount));
+                 cfs_atomic_read(&conn->c_refcount));
         OBD_FREE_PTR(conn);
 }
 
index 27f0fec..018dc62 100644 (file)
@@ -75,9 +75,9 @@ void request_out_callback(lnet_event_t *ev)
                 /* Failed send: make it seem like the reply timed out, just
                  * like failing sends in client.c does currently...  */
 
-                spin_lock(&req->rq_lock);
+                cfs_spin_lock(&req->rq_lock);
                 req->rq_net_err = 1;
-                spin_unlock(&req->rq_lock);
+                cfs_spin_unlock(&req->rq_lock);
 
                 ptlrpc_client_wake_req(req);
         }
@@ -106,7 +106,7 @@ void reply_in_callback(lnet_event_t *ev)
            for adaptive timeouts' early reply. */
         LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
 
-        spin_lock(&req->rq_lock);
+        cfs_spin_lock(&req->rq_lock);
 
         req->rq_receiving_reply = 0;
         req->rq_early = 0;
@@ -170,7 +170,7 @@ out_wake:
         /* NB don't unlock till after wakeup; req can disappear under us
          * since we don't have our own ref */
         ptlrpc_client_wake_req(req);
-        spin_unlock(&req->rq_lock);
+        cfs_spin_unlock(&req->rq_lock);
         EXIT;
 }
 
@@ -194,7 +194,7 @@ void client_bulk_callback (lnet_event_t *ev)
                "event type %d, status %d, desc %p\n",
                ev->type, ev->status, desc);
 
-        spin_lock(&desc->bd_lock);
+        cfs_spin_lock(&desc->bd_lock);
 
         LASSERT(desc->bd_network_rw);
         desc->bd_network_rw = 0;
@@ -213,7 +213,7 @@ void client_bulk_callback (lnet_event_t *ev)
          * otherwise */
         ptlrpc_client_wake_req(desc->bd_req);
 
-        spin_unlock(&desc->bd_lock);
+        cfs_spin_unlock(&desc->bd_lock);
         EXIT;
 }
 
@@ -269,7 +269,7 @@ void request_in_callback(lnet_event_t *ev)
         req->rq_reqbuf = ev->md.start + ev->offset;
         if (ev->type == LNET_EVENT_PUT && ev->status == 0)
                 req->rq_reqdata_len = ev->mlength;
-        do_gettimeofday(&req->rq_arrival_time);
+        cfs_gettimeofday(&req->rq_arrival_time);
         req->rq_peer = ev->initiator;
         req->rq_self = ev->target.nid;
         req->rq_rqbd = rqbd;
@@ -277,19 +277,19 @@ void request_in_callback(lnet_event_t *ev)
 #ifdef CRAY_XT3
         req->rq_uid = ev->uid;
 #endif
-        spin_lock_init(&req->rq_lock);
+        cfs_spin_lock_init(&req->rq_lock);
         CFS_INIT_LIST_HEAD(&req->rq_timed_list);
-        atomic_set(&req->rq_refcount, 1);
+        cfs_atomic_set(&req->rq_refcount, 1);
         if (ev->type == LNET_EVENT_PUT)
                 CDEBUG(D_RPCTRACE, "incoming req@%p x"LPU64" msgsize %u\n",
                        req, req->rq_xid, ev->mlength);
 
         CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer));
 
-        spin_lock(&service->srv_lock);
+        cfs_spin_lock(&service->srv_lock);
 
         req->rq_history_seq = service->srv_request_seq++;
-        list_add_tail(&req->rq_history_list, &service->srv_request_history);
+        cfs_list_add_tail(&req->rq_history_list, &service->srv_request_history);
 
         if (ev->unlinked) {
                 service->srv_nrqbd_receiving--;
@@ -310,14 +310,14 @@ void request_in_callback(lnet_event_t *ev)
                 rqbd->rqbd_refcount++;
         }
 
-        list_add_tail(&req->rq_list, &service->srv_req_in_queue);
+        cfs_list_add_tail(&req->rq_list, &service->srv_req_in_queue);
         service->srv_n_queued_reqs++;
 
         /* NB everything can disappear under us once the request
          * has been queued and we unlock, so do the wake now... */
         cfs_waitq_signal(&service->srv_waitq);
 
-        spin_unlock(&service->srv_lock);
+        cfs_spin_unlock(&service->srv_lock);
         EXIT;
 }
 
@@ -340,7 +340,7 @@ void reply_out_callback(lnet_event_t *ev)
                  * net's ref on 'rs' */
                 LASSERT (ev->unlinked);
                 ptlrpc_rs_decref(rs);
-                atomic_dec (&svc->srv_outstanding_replies);
+                cfs_atomic_dec (&svc->srv_outstanding_replies);
                 EXIT;
                 return;
         }
@@ -350,14 +350,14 @@ void reply_out_callback(lnet_event_t *ev)
         if (ev->unlinked) {
                 /* Last network callback. The net's ref on 'rs' stays put
                  * until ptlrpc_handle_rs() is done with it */
-                spin_lock(&svc->srv_lock);
-                spin_lock(&rs->rs_lock);
+                cfs_spin_lock(&svc->srv_lock);
+                cfs_spin_lock(&rs->rs_lock);
                 rs->rs_on_net = 0;
                 if (!rs->rs_no_ack ||
                     rs->rs_transno <= rs->rs_export->exp_obd->obd_last_committed)
                         ptlrpc_schedule_difficult_reply (rs);
-                spin_unlock(&rs->rs_lock);
-                spin_unlock(&svc->srv_lock);
+                cfs_spin_unlock(&rs->rs_lock);
+                cfs_spin_unlock(&svc->srv_lock);
         }
 
         EXIT;
@@ -383,7 +383,7 @@ void server_bulk_callback (lnet_event_t *ev)
                "event type %d, status %d, desc %p\n",
                ev->type, ev->status, desc);
 
-        spin_lock(&desc->bd_lock);
+        cfs_spin_lock(&desc->bd_lock);
 
         if ((ev->type == LNET_EVENT_ACK ||
              ev->type == LNET_EVENT_REPLY) &&
@@ -402,7 +402,7 @@ void server_bulk_callback (lnet_event_t *ev)
                 cfs_waitq_signal(&desc->bd_waitq);
         }
 
-        spin_unlock(&desc->bd_lock);
+        cfs_spin_unlock(&desc->bd_lock);
         EXIT;
 }
 
@@ -569,7 +569,7 @@ CFS_LIST_HEAD(liblustre_idle_callbacks);
 void *liblustre_services_callback;
 
 void *
-liblustre_register_waitidle_callback (struct list_head *callback_list,
+liblustre_register_waitidle_callback (cfs_list_t *callback_list,
                                       const char *name,
                                       int (*fn)(void *arg), void *arg)
 {
@@ -581,7 +581,7 @@ liblustre_register_waitidle_callback (struct list_head *callback_list,
         llwc->llwc_name = name;
         llwc->llwc_fn = fn;
         llwc->llwc_arg = arg;
-        list_add_tail(&llwc->llwc_list, callback_list);
+        cfs_list_add_tail(&llwc->llwc_list, callback_list);
 
         return (llwc);
 }
@@ -591,7 +591,7 @@ liblustre_deregister_waitidle_callback (void *opaque)
 {
         struct liblustre_wait_callback *llwc = opaque;
 
-        list_del(&llwc->llwc_list);
+        cfs_list_del(&llwc->llwc_list);
         OBD_FREE(llwc, sizeof(*llwc));
 }
 
@@ -653,7 +653,7 @@ int liblustre_waiting = 0;
 int
 liblustre_wait_event (int timeout)
 {
-        struct list_head               *tmp;
+        cfs_list_t                     *tmp;
         struct liblustre_wait_callback *llwc;
         int                             found_something = 0;
 
@@ -666,9 +666,10 @@ liblustre_wait_event (int timeout)
                         found_something = 1;
 
                 /* Give all registered callbacks a bite at the cherry */
-                list_for_each(tmp, &liblustre_wait_callbacks) {
-                        llwc = list_entry(tmp, struct liblustre_wait_callback,
-                                          llwc_list);
+                cfs_list_for_each(tmp, &liblustre_wait_callbacks) {
+                        llwc = cfs_list_entry(tmp,
+                                              struct liblustre_wait_callback,
+                                              llwc_list);
 
                         if (llwc->llwc_fn(llwc->llwc_arg))
                                 found_something = 1;
@@ -693,7 +694,7 @@ liblustre_wait_idle(void)
 {
         static int recursed = 0;
 
-        struct list_head               *tmp;
+        cfs_list_t                     *tmp;
         struct liblustre_wait_callback *llwc;
         int                             idle = 0;
 
@@ -705,9 +706,10 @@ liblustre_wait_idle(void)
 
                 idle = 1;
 
-                list_for_each(tmp, &liblustre_idle_callbacks) {
-                        llwc = list_entry(tmp, struct liblustre_wait_callback,
-                                          llwc_list);
+                cfs_list_for_each(tmp, &liblustre_idle_callbacks) {
+                        llwc = cfs_list_entry(tmp,
+                                              struct liblustre_wait_callback,
+                                              llwc_list);
 
                         if (!llwc->llwc_fn(llwc->llwc_arg)) {
                                 idle = 0;
@@ -735,7 +737,7 @@ int ptlrpc_init_portals(void)
                 liblustre_register_wait_callback("liblustre_check_services",
                                                  &liblustre_check_services,
                                                  NULL);
-        init_completion_module(liblustre_wait_event);
+        cfs_init_completion_module(liblustre_wait_event);
 #endif
         rc = ptlrpcd_addref();
         if (rc == 0)
index 1bc9e10..1983143 100644 (file)
@@ -101,11 +101,11 @@ struct subflavor_desc {
 
 /* Each mechanism is described by the following struct: */
 struct gss_api_mech {
-        struct list_head        gm_list;
-        struct module          *gm_owner;
+        cfs_list_t              gm_list;
+        cfs_module_t           *gm_owner;
         char                   *gm_name;
         rawobj_t                gm_oid;
-        atomic_t                gm_count;
+        cfs_atomic_t            gm_count;
         struct gss_api_ops     *gm_ops;
         int                     gm_sf_num;
         struct subflavor_desc  *gm_sfs;
index 370004c..28a28b7 100644 (file)
@@ -138,11 +138,11 @@ int ctx_init_pack_request(struct obd_import *imp,
         /* 4. now the token */
         LASSERT(size >= (sizeof(__u32) + token_size));
         *p++ = cpu_to_le32(((__u32) token_size));
-        if (copy_from_user(p, token, token_size)) {
+        if (cfs_copy_from_user(p, token, token_size)) {
                 CERROR("can't copy token\n");
                 return -EFAULT;
         }
-        size -= sizeof(__u32) + size_round4(token_size);
+        size -= sizeof(__u32) + cfs_size_round4(token_size);
 
         req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, offset,
                                                 msg->lm_buflens[offset] - size, 0);
@@ -173,8 +173,8 @@ int ctx_init_parse_reply(struct lustre_msg *msg, int swabbed,
                 return -EPROTO;
         }
 
-        if (outlen < (4 + 2) * 4 + size_round4(ghdr->gh_handle.len) +
-                     size_round4(msg->lm_buflens[2])) {
+        if (outlen < (4 + 2) * 4 + cfs_size_round4(ghdr->gh_handle.len) +
+                     cfs_size_round4(msg->lm_buflens[2])) {
                 CERROR("output buffer size %ld too small\n", outlen);
                 return -EFAULT;
         }
@@ -182,16 +182,16 @@ int ctx_init_parse_reply(struct lustre_msg *msg, int swabbed,
         status = 0;
         effective = 0;
 
-        if (copy_to_user(outbuf, &status, 4))
+        if (cfs_copy_to_user(outbuf, &status, 4))
                 return -EFAULT;
         outbuf += 4;
-        if (copy_to_user(outbuf, &ghdr->gh_major, 4))
+        if (cfs_copy_to_user(outbuf, &ghdr->gh_major, 4))
                 return -EFAULT;
         outbuf += 4;
-        if (copy_to_user(outbuf, &ghdr->gh_minor, 4))
+        if (cfs_copy_to_user(outbuf, &ghdr->gh_minor, 4))
                 return -EFAULT;
         outbuf += 4;
-        if (copy_to_user(outbuf, &ghdr->gh_seqwin, 4))
+        if (cfs_copy_to_user(outbuf, &ghdr->gh_seqwin, 4))
                 return -EFAULT;
         outbuf += 4;
         effective += 4 * 4;
@@ -199,10 +199,10 @@ int ctx_init_parse_reply(struct lustre_msg *msg, int swabbed,
         /* handle */
         obj_len = ghdr->gh_handle.len;
         round_len = (obj_len + 3) & ~ 3;
-        if (copy_to_user(outbuf, &obj_len, 4))
+        if (cfs_copy_to_user(outbuf, &obj_len, 4))
                 return -EFAULT;
         outbuf += 4;
-        if (copy_to_user(outbuf, (char *) ghdr->gh_handle.data, round_len))
+        if (cfs_copy_to_user(outbuf, (char *) ghdr->gh_handle.data, round_len))
                 return -EFAULT;
         outbuf += round_len;
         effective += 4 + round_len;
@@ -210,10 +210,10 @@ int ctx_init_parse_reply(struct lustre_msg *msg, int swabbed,
         /* out token */
         obj_len = msg->lm_buflens[2];
         round_len = (obj_len + 3) & ~ 3;
-        if (copy_to_user(outbuf, &obj_len, 4))
+        if (cfs_copy_to_user(outbuf, &obj_len, 4))
                 return -EFAULT;
         outbuf += 4;
-        if (copy_to_user(outbuf, lustre_msg_buf(msg, 2, 0), round_len))
+        if (cfs_copy_to_user(outbuf, lustre_msg_buf(msg, 2, 0), round_len))
                 return -EFAULT;
         outbuf += round_len;
         effective += 4 + round_len;
@@ -252,7 +252,7 @@ int gss_do_ctx_init_rpc(__user char *buffer, unsigned long count)
                        "version\n", count, (unsigned long) sizeof(param));
                 RETURN(-EINVAL);
         }
-        if (copy_from_user(&param, buffer, sizeof(param))) {
+        if (cfs_copy_from_user(&param, buffer, sizeof(param))) {
                 CERROR("failed copy data from lgssd\n");
                 RETURN(-EFAULT);
         }
@@ -280,10 +280,10 @@ int gss_do_ctx_init_rpc(__user char *buffer, unsigned long count)
                 RETURN(-EINVAL);
         }
 
-        spin_lock(&obd->obd_dev_lock);
+        cfs_spin_lock(&obd->obd_dev_lock);
         if (obd->obd_stopping) {
                 CERROR("obd %s has stopped\n", obdname);
-                spin_unlock(&obd->obd_dev_lock);
+                cfs_spin_unlock(&obd->obd_dev_lock);
                 RETURN(-EINVAL);
         }
 
@@ -291,18 +291,18 @@ int gss_do_ctx_init_rpc(__user char *buffer, unsigned long count)
             strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME) &&
             strcmp(obd->obd_type->typ_name, LUSTRE_MGC_NAME)) {
                 CERROR("obd %s is not a client device\n", obdname);
-                spin_unlock(&obd->obd_dev_lock);
+                cfs_spin_unlock(&obd->obd_dev_lock);
                 RETURN(-EINVAL);
         }
-        spin_unlock(&obd->obd_dev_lock);
+        cfs_spin_unlock(&obd->obd_dev_lock);
 
-        down_read(&obd->u.cli.cl_sem);
+        cfs_down_read(&obd->u.cli.cl_sem);
         if (obd->u.cli.cl_import == NULL) {
                 CERROR("obd %s: import has gone\n", obd->obd_name);
                 RETURN(-EINVAL);
         }
         imp = class_import_get(obd->u.cli.cl_import);
-        up_read(&obd->u.cli.cl_sem);
+        cfs_up_read(&obd->u.cli.cl_sem);
 
         if (imp->imp_deactive) {
                 CERROR("import has been deactivated\n");
@@ -367,7 +367,7 @@ int gss_do_ctx_init_rpc(__user char *buffer, unsigned long count)
         param.reply_length = lsize;
 
 out_copy:
-        if (copy_to_user(buffer, &param, sizeof(param)))
+        if (cfs_copy_to_user(buffer, &param, sizeof(param)))
                 rc = -EFAULT;
         else
                 rc = 0;
@@ -386,7 +386,7 @@ int gss_do_ctx_fini_rpc(struct gss_cli_ctx *gctx)
         int                      rc;
         ENTRY;
 
-        LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+        LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
 
         if (cli_ctx_is_error(ctx) || !cli_ctx_is_uptodate(ctx)) {
                 CDEBUG(D_SEC, "ctx %p(%u->%s) not uptodate, "
@@ -395,7 +395,7 @@ int gss_do_ctx_fini_rpc(struct gss_cli_ctx *gctx)
                 RETURN(0);
         }
 
-        might_sleep();
+        cfs_might_sleep();
 
         CWARN("%s ctx %p idx "LPX64" (%u->%s)\n",
               sec_is_reverse(ctx->cc_sec) ?
index e09f444..5b51491 100644 (file)
@@ -207,7 +207,7 @@ static inline __u64 gss_handle_to_u64(rawobj_t *handle)
                                          GSS_SEQ_WIN_MAIN / 4)
 
 struct gss_svc_seq_data {
-        spinlock_t              ssd_lock;
+        cfs_spinlock_t          ssd_lock;
         /*
          * highest sequence number seen so far, for main and back window
          */
@@ -264,7 +264,7 @@ struct gss_cli_ctx {
         __u32                   gc_flavor;
         __u32                   gc_proc;
         __u32                   gc_win;
-        atomic_t                gc_seq;
+        cfs_atomic_t            gc_seq;
         rawobj_t                gc_handle;
         struct gss_ctx         *gc_mechctx;
         /* handle for the buddy svc ctx */
@@ -280,14 +280,14 @@ struct gss_cli_ctx_keyring {
 struct gss_sec {
         struct ptlrpc_sec       gs_base;
         struct gss_api_mech    *gs_mech;
-        spinlock_t              gs_lock;
+        cfs_spinlock_t          gs_lock;
         __u64                   gs_rvs_hdl;
 };
 
 struct gss_sec_pipefs {
         struct gss_sec          gsp_base;
         int                     gsp_chash_size;  /* must be 2^n */
-        struct hlist_head       gsp_chash[0];
+        cfs_hlist_head_t        gsp_chash[0];
 };
 
 /*
@@ -300,7 +300,7 @@ struct gss_sec_keyring {
         /*
          * all contexts listed here. access is protected by sec spinlock.
          */
-        struct hlist_head       gsk_clist;
+        cfs_hlist_head_t        gsk_clist;
         /*
          * specially point to root ctx (only one at a time). access is
          * protected by sec spinlock.
@@ -309,10 +309,10 @@ struct gss_sec_keyring {
         /*
          * specially serialize upcalls for root context.
          */
-        struct mutex            gsk_root_uc_lock;
+        cfs_mutex_t             gsk_root_uc_lock;
 
 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
-        struct mutex            gsk_uc_lock;        /* serialize upcalls */
+        cfs_mutex_t             gsk_uc_lock;        /* serialize upcalls */
 #endif
 };
 
index adfebcf..465e714 100644 (file)
@@ -128,14 +128,14 @@ static int sec_install_rctx_kr(struct ptlrpc_sec *sec,
 static inline void keyring_upcall_lock(struct gss_sec_keyring *gsec_kr)
 {
 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
-        mutex_lock(&gsec_kr->gsk_uc_lock);
+        cfs_mutex_lock(&gsec_kr->gsk_uc_lock);
 #endif
 }
 
 static inline void keyring_upcall_unlock(struct gss_sec_keyring *gsec_kr)
 {
 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
-        mutex_unlock(&gsec_kr->gsk_uc_lock);
+        cfs_mutex_unlock(&gsec_kr->gsk_uc_lock);
 #endif
 }
 
@@ -166,7 +166,7 @@ void ctx_start_timer_kr(struct ptlrpc_cli_ctx *ctx, long timeout)
         LASSERT(timer);
 
         CDEBUG(D_SEC, "ctx %p: start timer %lds\n", ctx, timeout);
-        timeout = timeout * HZ + cfs_time_current();
+        timeout = timeout * CFS_HZ + cfs_time_current();
 
         init_timer(timer);
         timer->expires = timeout;
@@ -224,8 +224,8 @@ struct ptlrpc_cli_ctx *ctx_create_kr(struct ptlrpc_sec *sec,
         }
 
         ctx->cc_expire = cfs_time_current_sec() + KEYRING_UPCALL_TIMEOUT;
-        clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
-        atomic_inc(&ctx->cc_refcount); /* for the caller */
+        cfs_clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
+        cfs_atomic_inc(&ctx->cc_refcount); /* for the caller */
 
         return ctx;
 }
@@ -239,9 +239,9 @@ static void ctx_destroy_kr(struct ptlrpc_cli_ctx *ctx)
 
         /* at this time the association with key has been broken. */
         LASSERT(sec);
-        LASSERT(atomic_read(&sec->ps_refcount) > 0);
-        LASSERT(atomic_read(&sec->ps_nctx) > 0);
-        LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+        LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
+        LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0);
+        LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
         LASSERT(gctx_kr->gck_key == NULL);
 
         ctx_clear_timer_kr(ctx);
@@ -252,7 +252,7 @@ static void ctx_destroy_kr(struct ptlrpc_cli_ctx *ctx)
 
         OBD_FREE_PTR(gctx_kr);
 
-        atomic_dec(&sec->ps_nctx);
+        cfs_atomic_dec(&sec->ps_nctx);
         sptlrpc_sec_put(sec);
 }
 
@@ -261,16 +261,16 @@ static void ctx_release_kr(struct ptlrpc_cli_ctx *ctx, int sync)
         if (sync) {
                 ctx_destroy_kr(ctx);
         } else {
-                atomic_inc(&ctx->cc_refcount);
+                cfs_atomic_inc(&ctx->cc_refcount);
                 sptlrpc_gc_add_ctx(ctx);
         }
 }
 
 static void ctx_put_kr(struct ptlrpc_cli_ctx *ctx, int sync)
 {
-        LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+        LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
 
-        if (atomic_dec_and_test(&ctx->cc_refcount))
+        if (cfs_atomic_dec_and_test(&ctx->cc_refcount))
                 ctx_release_kr(ctx, sync);
 }
 
@@ -288,16 +288,16 @@ static void ctx_put_kr(struct ptlrpc_cli_ctx *ctx, int sync)
  *   - lock ctx -> unlist -> unlock ctx -> lock key -> unbind -> unlock key
  */
 
-static inline void spin_lock_if(spinlock_t *lock, int condition)
+static inline void spin_lock_if(cfs_spinlock_t *lock, int condition)
 {
         if (condition)
-                spin_lock(lock);
+                cfs_spin_lock(lock);
 }
 
-static inline void spin_unlock_if(spinlock_t *lock, int condition)
+static inline void spin_unlock_if(cfs_spinlock_t *lock, int condition)
 {
         if (condition)
-                spin_unlock(lock);
+                cfs_spin_unlock(lock);
 }
 
 static void ctx_enlist_kr(struct ptlrpc_cli_ctx *ctx, int is_root, int locked)
@@ -305,14 +305,14 @@ static void ctx_enlist_kr(struct ptlrpc_cli_ctx *ctx, int is_root, int locked)
         struct ptlrpc_sec      *sec = ctx->cc_sec;
         struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
 
-        LASSERT(!test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
-        LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+        LASSERT(!cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
+        LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
 
         spin_lock_if(&sec->ps_lock, !locked);
 
-        atomic_inc(&ctx->cc_refcount);
-        set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
-        hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
+        cfs_atomic_inc(&ctx->cc_refcount);
+        cfs_set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
+        cfs_hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
         if (is_root)
                 gsec_kr->gsk_root_ctx = ctx;
 
@@ -332,7 +332,7 @@ static int ctx_unlist_kr(struct ptlrpc_cli_ctx *ctx, int locked)
         struct gss_sec_keyring  *gsec_kr = sec2gsec_keyring(sec);
 
         /* if hashed bit has gone, leave the job to somebody who is doing it */
-        if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
+        if (cfs_test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
                 return 0;
 
         /* drop ref inside spin lock to prevent race with other operations */
@@ -340,8 +340,8 @@ static int ctx_unlist_kr(struct ptlrpc_cli_ctx *ctx, int locked)
 
         if (gsec_kr->gsk_root_ctx == ctx)
                 gsec_kr->gsk_root_ctx = NULL;
-        hlist_del_init(&ctx->cc_cache);
-        atomic_dec(&ctx->cc_refcount);
+        cfs_hlist_del_init(&ctx->cc_cache);
+        cfs_atomic_dec(&ctx->cc_refcount);
 
         spin_unlock_if(&sec->ps_lock, !locked);
 
@@ -354,14 +354,14 @@ static int ctx_unlist_kr(struct ptlrpc_cli_ctx *ctx, int locked)
  */
 static void bind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
 {
-        LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+        LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
         LASSERT(atomic_read(&key->usage) > 0);
         LASSERT(ctx2gctx_keyring(ctx)->gck_key == NULL);
         LASSERT(key->payload.data == NULL);
 
         /* at this time context may or may not in list. */
         key_get(key);
-        atomic_inc(&ctx->cc_refcount);
+        cfs_atomic_inc(&ctx->cc_refcount);
         ctx2gctx_keyring(ctx)->gck_key = key;
         key->payload.data = ctx;
 }
@@ -373,7 +373,7 @@ static void bind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
 static void unbind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
 {
         LASSERT(key->payload.data == ctx);
-        LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+        LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
 
         /* must revoke the key, or others may treat it as newly created */
         key_revoke_locked(key);
@@ -444,14 +444,14 @@ static void kill_key_locked(struct key *key)
 /*
  * caller should hold one ref on contexts in freelist.
  */
-static void dispose_ctx_list_kr(struct hlist_head *freelist)
+static void dispose_ctx_list_kr(cfs_hlist_head_t *freelist)
 {
-        struct hlist_node      *pos, *next;
+        cfs_hlist_node_t       *pos, *next;
         struct ptlrpc_cli_ctx  *ctx;
         struct gss_cli_ctx     *gctx;
 
-        hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) {
-                hlist_del_init(&ctx->cc_cache);
+        cfs_hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) {
+                cfs_hlist_del_init(&ctx->cc_cache);
 
                 /* reverse ctx: update current seq to buddy svcctx if exist.
                  * ideally this should be done at gss_cli_ctx_finalize(), but
@@ -465,7 +465,7 @@ static void dispose_ctx_list_kr(struct hlist_head *freelist)
                 if (!rawobj_empty(&gctx->gc_svc_handle) &&
                     sec_is_reverse(gctx->gc_base.cc_sec)) {
                         gss_svc_upcall_update_sequence(&gctx->gc_svc_handle,
-                                        (__u32) atomic_read(&gctx->gc_seq));
+                                        (__u32) cfs_atomic_read(&gctx->gc_seq));
                 }
 
                 /* we need to wakeup waiting reqs here. the context might
@@ -488,18 +488,19 @@ struct ptlrpc_cli_ctx * sec_lookup_root_ctx_kr(struct ptlrpc_sec *sec)
         struct gss_sec_keyring  *gsec_kr = sec2gsec_keyring(sec);
         struct ptlrpc_cli_ctx   *ctx = NULL;
 
-        spin_lock(&sec->ps_lock);
+        cfs_spin_lock(&sec->ps_lock);
 
         ctx = gsec_kr->gsk_root_ctx;
 
         if (ctx == NULL && unlikely(sec_is_reverse(sec))) {
-                struct hlist_node      *node;
+                cfs_hlist_node_t       *node;
                 struct ptlrpc_cli_ctx  *tmp;
 
                 /* reverse ctx, search root ctx in list, choose the one
                  * with shortest expire time, which is most possibly have
                  * an established peer ctx at client side. */
-                hlist_for_each_entry(tmp, node, &gsec_kr->gsk_clist, cc_cache) {
+                cfs_hlist_for_each_entry(tmp, node, &gsec_kr->gsk_clist,
+                                         cc_cache) {
                         if (ctx == NULL || ctx->cc_expire == 0 ||
                             ctx->cc_expire > tmp->cc_expire) {
                                 ctx = tmp;
@@ -510,12 +511,12 @@ struct ptlrpc_cli_ctx * sec_lookup_root_ctx_kr(struct ptlrpc_sec *sec)
         }
 
         if (ctx) {
-                LASSERT(atomic_read(&ctx->cc_refcount) > 0);
-                LASSERT(!hlist_empty(&gsec_kr->gsk_clist));
-                atomic_inc(&ctx->cc_refcount);
+                LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+                LASSERT(!cfs_hlist_empty(&gsec_kr->gsk_clist));
+                cfs_atomic_inc(&ctx->cc_refcount);
         }
 
-        spin_unlock(&sec->ps_lock);
+        cfs_spin_unlock(&sec->ps_lock);
 
         return ctx;
 }
@@ -528,19 +529,19 @@ void rvs_sec_install_root_ctx_kr(struct ptlrpc_sec *sec,
                                  struct key *key)
 {
         struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
-        struct hlist_node      *hnode;
+        cfs_hlist_node_t       *hnode;
         struct ptlrpc_cli_ctx  *ctx;
         cfs_time_t              now;
         ENTRY;
 
         LASSERT(sec_is_reverse(sec));
 
-        spin_lock(&sec->ps_lock);
+        cfs_spin_lock(&sec->ps_lock);
 
         now = cfs_time_current_sec();
 
         /* set all existing ctxs short expiry */
-        hlist_for_each_entry(ctx, hnode, &gsec_kr->gsk_clist, cc_cache) {
+        cfs_hlist_for_each_entry(ctx, hnode, &gsec_kr->gsk_clist, cc_cache) {
                 if (ctx->cc_expire > now + RVS_CTX_EXPIRE_NICE) {
                         ctx->cc_early_expire = 1;
                         ctx->cc_expire = now + RVS_CTX_EXPIRE_NICE;
@@ -556,7 +557,7 @@ void rvs_sec_install_root_ctx_kr(struct ptlrpc_sec *sec,
         if (key)
                 bind_key_ctx(key, new_ctx);
 
-        spin_unlock(&sec->ps_lock);
+        cfs_spin_unlock(&sec->ps_lock);
 }
 
 static void construct_key_desc(void *buf, int bufsize,
@@ -584,9 +585,9 @@ struct ptlrpc_sec * gss_sec_create_kr(struct obd_import *imp,
 
         CFS_INIT_HLIST_HEAD(&gsec_kr->gsk_clist);
         gsec_kr->gsk_root_ctx = NULL;
-        mutex_init(&gsec_kr->gsk_root_uc_lock);
+        cfs_mutex_init(&gsec_kr->gsk_root_uc_lock);
 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
-        mutex_init(&gsec_kr->gsk_uc_lock);
+        cfs_mutex_init(&gsec_kr->gsk_uc_lock);
 #endif
 
         if (gss_sec_create_common(&gsec_kr->gsk_base, &gss_policy_keyring,
@@ -614,7 +615,7 @@ void gss_sec_destroy_kr(struct ptlrpc_sec *sec)
 
         CDEBUG(D_SEC, "destroy %s@%p\n", sec->ps_policy->sp_name, sec);
 
-        LASSERT(hlist_empty(&gsec_kr->gsk_clist));
+        LASSERT(cfs_hlist_empty(&gsec_kr->gsk_clist));
         LASSERT(gsec_kr->gsk_root_ctx == NULL);
 
         gss_sec_destroy_common(gsec);
@@ -712,7 +713,7 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec,
          * the root upcall lock, make sure nobody else populated new root
          * context after last check. */
         if (is_root) {
-                mutex_lock(&gsec_kr->gsk_root_uc_lock);
+                cfs_mutex_lock(&gsec_kr->gsk_root_uc_lock);
 
                 ctx = sec_lookup_root_ctx_kr(sec);
                 if (ctx)
@@ -773,13 +774,13 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec,
         if (likely(key->payload.data != NULL)) {
                 ctx = key->payload.data;
 
-                LASSERT(atomic_read(&ctx->cc_refcount) >= 1);
+                LASSERT(cfs_atomic_read(&ctx->cc_refcount) >= 1);
                 LASSERT(ctx2gctx_keyring(ctx)->gck_key == key);
                 LASSERT(atomic_read(&key->usage) >= 2);
 
                 /* simply take a ref and return. it's upper layer's
                  * responsibility to detect & replace dead ctx. */
-                atomic_inc(&ctx->cc_refcount);
+                cfs_atomic_inc(&ctx->cc_refcount);
         } else {
                 /* pre initialization with a cli_ctx. this can't be done in
                  * key_instantiate() because we'v no enough information
@@ -810,7 +811,7 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec,
         key_put(key);
 out:
         if (is_root)
-                mutex_unlock(&gsec_kr->gsk_root_uc_lock);
+                cfs_mutex_unlock(&gsec_kr->gsk_root_uc_lock);
         RETURN(ctx);
 }
 
@@ -819,8 +820,8 @@ void gss_sec_release_ctx_kr(struct ptlrpc_sec *sec,
                             struct ptlrpc_cli_ctx *ctx,
                             int sync)
 {
-        LASSERT(atomic_read(&sec->ps_refcount) > 0);
-        LASSERT(atomic_read(&ctx->cc_refcount) == 0);
+        LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
+        LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
         ctx_release_kr(ctx, sync);
 }
 
@@ -878,46 +879,46 @@ void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec,
                              int grace, int force)
 {
         struct gss_sec_keyring *gsec_kr;
-        struct hlist_head       freelist = CFS_HLIST_HEAD_INIT;
-        struct hlist_node      *pos, *next;
+        cfs_hlist_head_t        freelist = CFS_HLIST_HEAD_INIT;
+        cfs_hlist_node_t       *pos, *next;
         struct ptlrpc_cli_ctx  *ctx;
         ENTRY;
 
         gsec_kr = sec2gsec_keyring(sec);
 
-        spin_lock(&sec->ps_lock);
-        hlist_for_each_entry_safe(ctx, pos, next,
-                                  &gsec_kr->gsk_clist, cc_cache) {
-                LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+        cfs_spin_lock(&sec->ps_lock);
+        cfs_hlist_for_each_entry_safe(ctx, pos, next,
+                                      &gsec_kr->gsk_clist, cc_cache) {
+                LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
 
                 if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
                         continue;
 
                 /* at this moment there's at least 2 base reference:
                  * key association and in-list. */
-                if (atomic_read(&ctx->cc_refcount) > 2) {
+                if (cfs_atomic_read(&ctx->cc_refcount) > 2) {
                         if (!force)
                                 continue;
                         CWARN("flush busy ctx %p(%u->%s, extra ref %d)\n",
                               ctx, ctx->cc_vcred.vc_uid,
                               sec2target_str(ctx->cc_sec),
-                              atomic_read(&ctx->cc_refcount) - 2);
+                              cfs_atomic_read(&ctx->cc_refcount) - 2);
                 }
 
-                set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
+                cfs_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
                 if (!grace)
-                        clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+                        cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
 
-                atomic_inc(&ctx->cc_refcount);
+                cfs_atomic_inc(&ctx->cc_refcount);
 
                 if (ctx_unlist_kr(ctx, 1)) {
-                        hlist_add_head(&ctx->cc_cache, &freelist);
+                        cfs_hlist_add_head(&ctx->cc_cache, &freelist);
                 } else {
-                        LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
-                        atomic_dec(&ctx->cc_refcount);
+                        LASSERT(cfs_atomic_read(&ctx->cc_refcount) >= 2);
+                        cfs_atomic_dec(&ctx->cc_refcount);
                 }
         }
-        spin_unlock(&sec->ps_lock);
+        cfs_spin_unlock(&sec->ps_lock);
 
         dispose_ctx_list_kr(&freelist);
         EXIT;
@@ -931,7 +932,8 @@ int gss_sec_flush_ctx_cache_kr(struct ptlrpc_sec *sec,
         ENTRY;
 
         CDEBUG(D_SEC, "sec %p(%d, nctx %d), uid %d, grace %d, force %d\n",
-               sec, atomic_read(&sec->ps_refcount), atomic_read(&sec->ps_nctx),
+               sec, cfs_atomic_read(&sec->ps_refcount),
+               cfs_atomic_read(&sec->ps_nctx),
                uid, grace, force);
 
         if (uid != -1 && uid != 0)
@@ -946,29 +948,29 @@ static
 void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec)
 {
         struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
-        struct hlist_head       freelist = CFS_HLIST_HEAD_INIT;
-        struct hlist_node      *pos, *next;
+        cfs_hlist_head_t        freelist = CFS_HLIST_HEAD_INIT;
+        cfs_hlist_node_t       *pos, *next;
         struct ptlrpc_cli_ctx  *ctx;
         ENTRY;
 
         CWARN("running gc\n");
 
-        spin_lock(&sec->ps_lock);
-        hlist_for_each_entry_safe(ctx, pos, next,
-                                  &gsec_kr->gsk_clist, cc_cache) {
-                LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+        cfs_spin_lock(&sec->ps_lock);
+        cfs_hlist_for_each_entry_safe(ctx, pos, next,
+                                      &gsec_kr->gsk_clist, cc_cache) {
+                LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
 
-                atomic_inc(&ctx->cc_refcount);
+                cfs_atomic_inc(&ctx->cc_refcount);
 
                 if (cli_ctx_check_death(ctx) && ctx_unlist_kr(ctx, 1)) {
-                        hlist_add_head(&ctx->cc_cache, &freelist);
+                        cfs_hlist_add_head(&ctx->cc_cache, &freelist);
                         CWARN("unhashed ctx %p\n", ctx);
                 } else {
-                        LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
-                        atomic_dec(&ctx->cc_refcount);
+                        LASSERT(cfs_atomic_read(&ctx->cc_refcount) >= 2);
+                        cfs_atomic_dec(&ctx->cc_refcount);
                 }
         }
-        spin_unlock(&sec->ps_lock);
+        cfs_spin_unlock(&sec->ps_lock);
 
         dispose_ctx_list_kr(&freelist);
         EXIT;
@@ -979,14 +981,14 @@ static
 int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq)
 {
         struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
-        struct hlist_node      *pos, *next;
+        cfs_hlist_node_t       *pos, *next;
         struct ptlrpc_cli_ctx  *ctx;
         struct gss_cli_ctx     *gctx;
         time_t                  now = cfs_time_current_sec();
         ENTRY;
 
-        spin_lock(&sec->ps_lock);
-        hlist_for_each_entry_safe(ctx, pos, next,
+        cfs_spin_lock(&sec->ps_lock);
+        cfs_hlist_for_each_entry_safe(ctx, pos, next,
                                   &gsec_kr->gsk_clist, cc_cache) {
                 struct key             *key;
                 char                    flags_str[40];
@@ -1008,11 +1010,11 @@ int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq)
                            "seq %d, win %u, key %08x(ref %d), "
                            "hdl "LPX64":"LPX64", mech: %s\n",
                            ctx, ctx->cc_vcred.vc_uid,
-                           atomic_read(&ctx->cc_refcount),
+                           cfs_atomic_read(&ctx->cc_refcount),
                            ctx->cc_expire,
                            ctx->cc_expire ?  ctx->cc_expire - now : 0,
                            flags_str,
-                           atomic_read(&gctx->gc_seq),
+                           cfs_atomic_read(&gctx->gc_seq),
                            gctx->gc_win,
                            key ? key->serial : 0,
                            key ? atomic_read(&key->usage) : 0,
@@ -1020,7 +1022,7 @@ int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq)
                            gss_handle_to_u64(&gctx->gc_svc_handle),
                            mech);
         }
-        spin_unlock(&sec->ps_lock);
+        cfs_spin_unlock(&sec->ps_lock);
 
         RETURN(0);
 }
@@ -1039,7 +1041,7 @@ int gss_cli_ctx_refresh_kr(struct ptlrpc_cli_ctx *ctx)
 static
 int gss_cli_ctx_validate_kr(struct ptlrpc_cli_ctx *ctx)
 {
-        LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+        LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
         LASSERT(ctx->cc_sec);
 
         if (cli_ctx_check_death(ctx)) {
@@ -1055,7 +1057,7 @@ int gss_cli_ctx_validate_kr(struct ptlrpc_cli_ctx *ctx)
 static
 void gss_cli_ctx_die_kr(struct ptlrpc_cli_ctx *ctx, int grace)
 {
-        LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+        LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
         LASSERT(ctx->cc_sec);
 
         cli_ctx_expire(ctx);
@@ -1231,9 +1233,9 @@ int gss_kt_instantiate(struct key *key, const void *data, size_t datalen)
          */
         LASSERT(cfs_current()->signal->session_keyring);
 
-        lockdep_off();
+        cfs_lockdep_off();
         rc = key_link(cfs_current()->signal->session_keyring, key);
-        lockdep_on();
+        cfs_lockdep_on();
         if (unlikely(rc)) {
                 CERROR("failed to link key %08x to keyring %08x: %d\n",
                        key->serial,
@@ -1280,7 +1282,7 @@ int gss_kt_update(struct key *key, const void *data, size_t datalen)
                         RETURN(rc);
         }
 
-        LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+        LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
         LASSERT(ctx->cc_sec);
 
         ctx_clear_timer_kr(ctx);
@@ -1358,7 +1360,7 @@ out:
                 cli_ctx_expire(ctx);
 
                 if (rc != -ERESTART)
-                        set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
+                        cfs_set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
         }
 
         /* let user space think it's a success */
index 5bd9f08..788edb3 100644 (file)
@@ -77,7 +77,7 @@
 #include "gss_asn1.h"
 #include "gss_krb5.h"
 
-static spinlock_t krb5_seq_lock;
+static cfs_spinlock_t krb5_seq_lock;
 
 struct krb5_enctype {
         char           *ke_dispname;
@@ -777,9 +777,9 @@ static void fill_krb5_header(struct krb5_ctx *kctx,
         }
 
         khdr->kh_filler = 0xff;
-        spin_lock(&krb5_seq_lock);
+        cfs_spin_lock(&krb5_seq_lock);
         khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
-        spin_unlock(&krb5_seq_lock);
+        cfs_spin_unlock(&krb5_seq_lock);
 }
 
 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
@@ -1248,7 +1248,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
         fill_krb5_header(kctx, khdr, 1);
 
         /* generate confounder */
-        get_random_bytes(conf, ke->ke_conf_size);
+        ll_get_random_bytes(conf, ke->ke_conf_size);
 
         /* get encryption blocksize. note kc_keye might not associated with
          * a tfm, currently only for arcfour-hmac */
@@ -1418,7 +1418,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
         fill_krb5_header(kctx, khdr, 1);
 
         /* generate confounder */
-        get_random_bytes(conf, ke->ke_conf_size);
+        ll_get_random_bytes(conf, ke->ke_conf_size);
 
         /* get encryption blocksize. note kc_keye might not associated with
          * a tfm, currently only for arcfour-hmac */
@@ -1828,7 +1828,7 @@ int __init init_kerberos_module(void)
 {
         int status;
 
-        spin_lock_init(&krb5_seq_lock);
+        cfs_spin_lock_init(&krb5_seq_lock);
 
         status = lgss_mech_register(&gss_kerberos_mech);
         if (status)
index f77b509..d0cf3ee 100644 (file)
 #include "gss_api.h"
 
 static CFS_LIST_HEAD(registered_mechs);
-static spinlock_t registered_mechs_lock = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t registered_mechs_lock = CFS_SPIN_LOCK_UNLOCKED;
 
 int lgss_mech_register(struct gss_api_mech *gm)
 {
-        spin_lock(&registered_mechs_lock);
-        list_add(&gm->gm_list, &registered_mechs);
-        spin_unlock(&registered_mechs_lock);
+        cfs_spin_lock(&registered_mechs_lock);
+        cfs_list_add(&gm->gm_list, &registered_mechs);
+        cfs_spin_unlock(&registered_mechs_lock);
         CWARN("Register %s mechanism\n", gm->gm_name);
         return 0;
 }
 
 void lgss_mech_unregister(struct gss_api_mech *gm)
 {
-        spin_lock(&registered_mechs_lock);
-        list_del(&gm->gm_list);
-        spin_unlock(&registered_mechs_lock);
+        cfs_spin_lock(&registered_mechs_lock);
+        cfs_list_del(&gm->gm_list);
+        cfs_spin_unlock(&registered_mechs_lock);
         CWARN("Unregister %s mechanism\n", gm->gm_name);
 }
 
 
 struct gss_api_mech *lgss_mech_get(struct gss_api_mech *gm)
 {
-        __module_get(gm->gm_owner);
+        __cfs_module_get(gm->gm_owner);
         return gm;
 }
 
@@ -99,16 +99,16 @@ struct gss_api_mech *lgss_name_to_mech(char *name)
 {
         struct gss_api_mech *pos, *gm = NULL;
 
-        spin_lock(&registered_mechs_lock);
-        list_for_each_entry(pos, &registered_mechs, gm_list) {
+        cfs_spin_lock(&registered_mechs_lock);
+        cfs_list_for_each_entry(pos, &registered_mechs, gm_list) {
                 if (0 == strcmp(name, pos->gm_name)) {
-                        if (!try_module_get(pos->gm_owner))
+                        if (!cfs_try_module_get(pos->gm_owner))
                                 continue;
                         gm = pos;
                         break;
                 }
         }
-        spin_unlock(&registered_mechs_lock);
+        cfs_spin_unlock(&registered_mechs_lock);
         return gm;
 
 }
@@ -129,24 +129,24 @@ struct gss_api_mech *lgss_subflavor_to_mech(__u32 subflavor)
 {
         struct gss_api_mech *pos, *gm = NULL;
 
-        spin_lock(&registered_mechs_lock);
-        list_for_each_entry(pos, &registered_mechs, gm_list) {
-                if (!try_module_get(pos->gm_owner))
+        cfs_spin_lock(&registered_mechs_lock);
+        cfs_list_for_each_entry(pos, &registered_mechs, gm_list) {
+                if (!cfs_try_module_get(pos->gm_owner))
                         continue;
                 if (!mech_supports_subflavor(pos, subflavor)) {
-                        module_put(pos->gm_owner);
+                        cfs_module_put(pos->gm_owner);
                         continue;
                 }
                 gm = pos;
                 break;
         }
-        spin_unlock(&registered_mechs_lock);
+        cfs_spin_unlock(&registered_mechs_lock);
         return gm;
 }
 
 void lgss_mech_put(struct gss_api_mech *gm)
 {
-        module_put(gm->gm_owner);
+        cfs_module_put(gm->gm_owner);
 }
 
 /* The mech could probably be determined from the token instead, but it's just
index ade0f53..af9da9a 100644 (file)
@@ -127,36 +127,36 @@ void ctx_destroy_pf(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx)
 
         OBD_FREE_PTR(gctx);
 
-        atomic_dec(&sec->ps_nctx);
+        cfs_atomic_dec(&sec->ps_nctx);
         sptlrpc_sec_put(sec);
 }
 
 static
-void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *hash)
+void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *hash)
 {
-        set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
-        atomic_inc(&ctx->cc_refcount);
-        hlist_add_head(&ctx->cc_cache, hash);
+        cfs_set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
+        cfs_atomic_inc(&ctx->cc_refcount);
+        cfs_hlist_add_head(&ctx->cc_cache, hash);
 }
 
 /*
  * caller must hold spinlock
  */
 static
-void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist)
+void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *freelist)
 {
         LASSERT_SPIN_LOCKED(&ctx->cc_sec->ps_lock);
-        LASSERT(atomic_read(&ctx->cc_refcount) > 0);
-        LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
-        LASSERT(!hlist_unhashed(&ctx->cc_cache));
+        LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+        LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
+        LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
 
-        clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
+        cfs_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
 
-        if (atomic_dec_and_test(&ctx->cc_refcount)) {
-                __hlist_del(&ctx->cc_cache);
-                hlist_add_head(&ctx->cc_cache, freelist);
+        if (cfs_atomic_dec_and_test(&ctx->cc_refcount)) {
+                __cfs_hlist_del(&ctx->cc_cache);
+                cfs_hlist_add_head(&ctx->cc_cache, freelist);
         } else {
-                hlist_del_init(&ctx->cc_cache);
+                cfs_hlist_del_init(&ctx->cc_cache);
         }
 }
 
@@ -164,7 +164,8 @@ void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist)
  * return 1 if the context is dead.
  */
 static
-int ctx_check_death_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist)
+int ctx_check_death_pf(struct ptlrpc_cli_ctx *ctx,
+                       cfs_hlist_head_t *freelist)
 {
         if (cli_ctx_check_death(ctx)) {
                 if (freelist)
@@ -177,11 +178,11 @@ int ctx_check_death_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist)
 
 static inline
 int ctx_check_death_locked_pf(struct ptlrpc_cli_ctx *ctx,
-                              struct hlist_head *freelist)
+                              cfs_hlist_head_t *freelist)
 {
         LASSERT(ctx->cc_sec);
-        LASSERT(atomic_read(&ctx->cc_refcount) > 0);
-        LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
+        LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+        LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
 
         return ctx_check_death_pf(ctx, freelist);
 }
@@ -197,17 +198,19 @@ int ctx_match_pf(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred)
 }
 
 static
-void ctx_list_destroy_pf(struct hlist_head *head)
+void ctx_list_destroy_pf(cfs_hlist_head_t *head)
 {
         struct ptlrpc_cli_ctx *ctx;
 
-        while (!hlist_empty(head)) {
-                ctx = hlist_entry(head->first, struct ptlrpc_cli_ctx, cc_cache);
+        while (!cfs_hlist_empty(head)) {
+                ctx = cfs_hlist_entry(head->first, struct ptlrpc_cli_ctx,
+                                      cc_cache);
 
-                LASSERT(atomic_read(&ctx->cc_refcount) == 0);
-                LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+                LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
+                LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT,
+                                     &ctx->cc_flags) == 0);
 
-                hlist_del_init(&ctx->cc_cache);
+                cfs_hlist_del_init(&ctx->cc_cache);
                 ctx_destroy_pf(ctx->cc_sec, ctx);
         }
 }
@@ -230,22 +233,22 @@ static
 void gss_cli_ctx_die_pf(struct ptlrpc_cli_ctx *ctx, int grace)
 {
         LASSERT(ctx->cc_sec);
-        LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+        LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
 
         cli_ctx_expire(ctx);
 
-        spin_lock(&ctx->cc_sec->ps_lock);
+        cfs_spin_lock(&ctx->cc_sec->ps_lock);
 
-        if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
-                LASSERT(!hlist_unhashed(&ctx->cc_cache));
-                LASSERT(atomic_read(&ctx->cc_refcount) > 1);
+        if (cfs_test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
+                LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
+                LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 1);
 
-                hlist_del_init(&ctx->cc_cache);
-                if (atomic_dec_and_test(&ctx->cc_refcount))
+                cfs_hlist_del_init(&ctx->cc_cache);
+                if (cfs_atomic_dec_and_test(&ctx->cc_refcount))
                         LBUG();
         }
 
-        spin_unlock(&ctx->cc_sec->ps_lock);
+        cfs_spin_unlock(&ctx->cc_sec->ps_lock);
 }
 
 /****************************************
@@ -264,7 +267,7 @@ void gss_sec_ctx_replace_pf(struct gss_sec *gsec,
 {
         struct gss_sec_pipefs *gsec_pf;
         struct ptlrpc_cli_ctx *ctx;
-        struct hlist_node *pos, *next;
+        cfs_hlist_node_t      *pos, *next;
         CFS_HLIST_HEAD(freelist);
         unsigned int hash;
         ENTRY;
@@ -275,10 +278,10 @@ void gss_sec_ctx_replace_pf(struct gss_sec *gsec,
                               (__u64) new->cc_vcred.vc_uid);
         LASSERT(hash < gsec_pf->gsp_chash_size);
 
-        spin_lock(&gsec->gs_base.ps_lock);
+        cfs_spin_lock(&gsec->gs_base.ps_lock);
 
-        hlist_for_each_entry_safe(ctx, pos, next,
-                                  &gsec_pf->gsp_chash[hash], cc_cache) {
+        cfs_hlist_for_each_entry_safe(ctx, pos, next,
+                                      &gsec_pf->gsp_chash[hash], cc_cache) {
                 if (!ctx_match_pf(ctx, &new->cc_vcred))
                         continue;
 
@@ -289,7 +292,7 @@ void gss_sec_ctx_replace_pf(struct gss_sec *gsec,
 
         ctx_enhash_pf(new, &gsec_pf->gsp_chash[hash]);
 
-        spin_unlock(&gsec->gs_base.ps_lock);
+        cfs_spin_unlock(&gsec->gs_base.ps_lock);
 
         ctx_list_destroy_pf(&freelist);
         EXIT;
@@ -323,11 +326,11 @@ int gss_install_rvs_cli_ctx_pf(struct gss_sec *gsec,
 
 static
 void gss_ctx_cache_gc_pf(struct gss_sec_pipefs *gsec_pf,
-                         struct hlist_head *freelist)
+                         cfs_hlist_head_t *freelist)
 {
         struct ptlrpc_sec       *sec;
         struct ptlrpc_cli_ctx   *ctx;
-        struct hlist_node       *pos, *next;
+        cfs_hlist_node_t        *pos, *next;
         int i;
         ENTRY;
 
@@ -336,8 +339,8 @@ void gss_ctx_cache_gc_pf(struct gss_sec_pipefs *gsec_pf,
         CDEBUG(D_SEC, "do gc on sec %s@%p\n", sec->ps_policy->sp_name, sec);
 
         for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
-                hlist_for_each_entry_safe(ctx, pos, next,
-                                          &gsec_pf->gsp_chash[i], cc_cache)
+                cfs_hlist_for_each_entry_safe(ctx, pos, next,
+                                              &gsec_pf->gsp_chash[i], cc_cache)
                         ctx_check_death_locked_pf(ctx, freelist);
         }
 
@@ -363,7 +366,7 @@ struct ptlrpc_sec* gss_sec_create_pf(struct obd_import *imp,
                 hash_size = GSS_SEC_PIPEFS_CTX_HASH_SIZE;
 
         alloc_size = sizeof(*gsec_pf) +
-                     sizeof(struct hlist_head) * hash_size;
+                     sizeof(cfs_hlist_head_t) * hash_size;
 
         OBD_ALLOC(gsec_pf, alloc_size);
         if (!gsec_pf)
@@ -413,7 +416,7 @@ void gss_sec_destroy_pf(struct ptlrpc_sec *sec)
         gss_sec_destroy_common(gsec);
 
         OBD_FREE(gsec, sizeof(*gsec_pf) +
-                       sizeof(struct hlist_head) * gsec_pf->gsp_chash_size);
+                       sizeof(cfs_hlist_head_t) * gsec_pf->gsp_chash_size);
 }
 
 static
@@ -424,13 +427,13 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_pf(struct ptlrpc_sec *sec,
         struct gss_sec         *gsec;
         struct gss_sec_pipefs  *gsec_pf;
         struct ptlrpc_cli_ctx  *ctx = NULL, *new = NULL;
-        struct hlist_head      *hash_head;
-        struct hlist_node      *pos, *next;
+        cfs_hlist_head_t       *hash_head;
+        cfs_hlist_node_t       *pos, *next;
         CFS_HLIST_HEAD(freelist);
         unsigned int            hash, gc = 0, found = 0;
         ENTRY;
 
-        might_sleep();
+        cfs_might_sleep();
 
         gsec = container_of(sec, struct gss_sec, gs_base);
         gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
@@ -441,7 +444,7 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_pf(struct ptlrpc_sec *sec,
         LASSERT(hash < gsec_pf->gsp_chash_size);
 
 retry:
-        spin_lock(&sec->ps_lock);
+        cfs_spin_lock(&sec->ps_lock);
 
         /* gc_next == 0 means never do gc */
         if (remove_dead && sec->ps_gc_next &&
@@ -450,7 +453,7 @@ retry:
                 gc = 1;
         }
 
-        hlist_for_each_entry_safe(ctx, pos, next, hash_head, cc_cache) {
+        cfs_hlist_for_each_entry_safe(ctx, pos, next, hash_head, cc_cache) {
                 if (gc == 0 &&
                     ctx_check_death_locked_pf(ctx,
                                               remove_dead ? &freelist : NULL))
@@ -465,19 +468,19 @@ retry:
         if (found) {
                 if (new && new != ctx) {
                         /* lost the race, just free it */
-                        hlist_add_head(&new->cc_cache, &freelist);
+                        cfs_hlist_add_head(&new->cc_cache, &freelist);
                         new = NULL;
                 }
 
                 /* hot node, move to head */
                 if (hash_head->first != &ctx->cc_cache) {
-                        __hlist_del(&ctx->cc_cache);
-                        hlist_add_head(&ctx->cc_cache, hash_head);
+                        __cfs_hlist_del(&ctx->cc_cache);
+                        cfs_hlist_add_head(&ctx->cc_cache, hash_head);
                 }
         } else {
                 /* don't allocate for reverse sec */
                 if (sec_is_reverse(sec)) {
-                        spin_unlock(&sec->ps_lock);
+                        cfs_spin_unlock(&sec->ps_lock);
                         RETURN(NULL);
                 }
 
@@ -485,10 +488,11 @@ retry:
                         ctx_enhash_pf(new, hash_head);
                         ctx = new;
                 } else if (create) {
-                        spin_unlock(&sec->ps_lock);
+                        cfs_spin_unlock(&sec->ps_lock);
                         new = ctx_create_pf(sec, vcred);
                         if (new) {
-                                clear_bit(PTLRPC_CTX_NEW_BIT, &new->cc_flags);
+                                cfs_clear_bit(PTLRPC_CTX_NEW_BIT,
+                                              &new->cc_flags);
                                 goto retry;
                         }
                 } else
@@ -497,9 +501,9 @@ retry:
 
         /* hold a ref */
         if (ctx)
-                atomic_inc(&ctx->cc_refcount);
+                cfs_atomic_inc(&ctx->cc_refcount);
 
-        spin_unlock(&sec->ps_lock);
+        cfs_spin_unlock(&sec->ps_lock);
 
         /* the allocator of the context must give the first push to refresh */
         if (new) {
@@ -516,13 +520,13 @@ void gss_sec_release_ctx_pf(struct ptlrpc_sec *sec,
                             struct ptlrpc_cli_ctx *ctx,
                             int sync)
 {
-        LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
-        LASSERT(hlist_unhashed(&ctx->cc_cache));
+        LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+        LASSERT(cfs_hlist_unhashed(&ctx->cc_cache));
 
         /* if required async, we must clear the UPTODATE bit to prevent extra
          * rpcs during destroy procedure. */
         if (!sync)
-                clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+                cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
 
         /* destroy this context */
         ctx_destroy_pf(sec, ctx);
@@ -546,7 +550,7 @@ int gss_sec_flush_ctx_cache_pf(struct ptlrpc_sec *sec,
         struct gss_sec          *gsec;
         struct gss_sec_pipefs   *gsec_pf;
         struct ptlrpc_cli_ctx   *ctx;
-        struct hlist_node *pos, *next;
+        cfs_hlist_node_t        *pos, *next;
         CFS_HLIST_HEAD(freelist);
         int i, busy = 0;
         ENTRY;
@@ -556,35 +560,36 @@ int gss_sec_flush_ctx_cache_pf(struct ptlrpc_sec *sec,
         gsec = container_of(sec, struct gss_sec, gs_base);
         gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
 
-        spin_lock(&sec->ps_lock);
+        cfs_spin_lock(&sec->ps_lock);
         for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
-                hlist_for_each_entry_safe(ctx, pos, next,
-                                          &gsec_pf->gsp_chash[i], cc_cache) {
-                        LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+                cfs_hlist_for_each_entry_safe(ctx, pos, next,
+                                              &gsec_pf->gsp_chash[i],
+                                              cc_cache) {
+                        LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
 
                         if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
                                 continue;
 
-                        if (atomic_read(&ctx->cc_refcount) > 1) {
+                        if (cfs_atomic_read(&ctx->cc_refcount) > 1) {
                                 busy++;
                                 if (!force)
                                         continue;
 
                                 CWARN("flush busy(%d) ctx %p(%u->%s) by force, "
                                       "grace %d\n",
-                                      atomic_read(&ctx->cc_refcount),
+                                      cfs_atomic_read(&ctx->cc_refcount),
                                       ctx, ctx->cc_vcred.vc_uid,
                                       sec2target_str(ctx->cc_sec), grace);
                         }
                         ctx_unhash_pf(ctx, &freelist);
 
-                        set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
+                        cfs_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
                         if (!grace)
-                                clear_bit(PTLRPC_CTX_UPTODATE_BIT,
-                                          &ctx->cc_flags);
+                                cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT,
+                                              &ctx->cc_flags);
                 }
         }
-        spin_unlock(&sec->ps_lock);
+        cfs_spin_unlock(&sec->ps_lock);
 
         ctx_list_destroy_pf(&freelist);
         RETURN(busy);
@@ -633,20 +638,20 @@ struct gss_upcall_msg_data {
 
 struct gss_upcall_msg {
         struct rpc_pipe_msg             gum_base;
-        atomic_t                        gum_refcount;
-        struct list_head                gum_list;
+        cfs_atomic_t                    gum_refcount;
+        cfs_list_t                      gum_list;
         __u32                           gum_mechidx;
         struct gss_sec                 *gum_gsec;
         struct gss_cli_ctx             *gum_gctx;
         struct gss_upcall_msg_data      gum_data;
 };
 
-static atomic_t upcall_seq = ATOMIC_INIT(0);
+static cfs_atomic_t upcall_seq = CFS_ATOMIC_INIT(0);
 
 static inline
 __u32 upcall_get_sequence(void)
 {
-        return (__u32) atomic_inc_return(&upcall_seq);
+        return (__u32) cfs_atomic_inc_return(&upcall_seq);
 }
 
 enum mech_idx_t {
@@ -664,20 +669,20 @@ __u32 mech_name2idx(const char *name)
 /* pipefs dentries for each mechanisms */
 static struct dentry *de_pipes[MECH_MAX] = { NULL, };
 /* all upcall messgaes linked here */
-static struct list_head upcall_lists[MECH_MAX];
+static cfs_list_t upcall_lists[MECH_MAX];
 /* and protected by this */
-static spinlock_t upcall_locks[MECH_MAX];
+static cfs_spinlock_t upcall_locks[MECH_MAX];
 
 static inline
 void upcall_list_lock(int idx)
 {
-        spin_lock(&upcall_locks[idx]);
+        cfs_spin_lock(&upcall_locks[idx]);
 }
 
 static inline
 void upcall_list_unlock(int idx)
 {
-        spin_unlock(&upcall_locks[idx]);
+        cfs_spin_unlock(&upcall_locks[idx]);
 }
 
 static
@@ -686,7 +691,7 @@ void upcall_msg_enlist(struct gss_upcall_msg *msg)
         __u32 idx = msg->gum_mechidx;
 
         upcall_list_lock(idx);
-        list_add(&msg->gum_list, &upcall_lists[idx]);
+        cfs_list_add(&msg->gum_list, &upcall_lists[idx]);
         upcall_list_unlock(idx);
 }
 
@@ -696,7 +701,7 @@ void upcall_msg_delist(struct gss_upcall_msg *msg)
         __u32 idx = msg->gum_mechidx;
 
         upcall_list_lock(idx);
-        list_del_init(&msg->gum_list);
+        cfs_list_del_init(&msg->gum_list);
         upcall_list_unlock(idx);
 }
 
@@ -708,9 +713,9 @@ static
 void gss_release_msg(struct gss_upcall_msg *gmsg)
 {
         ENTRY;
-        LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
+        LASSERT(cfs_atomic_read(&gmsg->gum_refcount) > 0);
 
-        if (!atomic_dec_and_test(&gmsg->gum_refcount)) {
+        if (!cfs_atomic_dec_and_test(&gmsg->gum_refcount)) {
                 EXIT;
                 return;
         }
@@ -721,8 +726,8 @@ void gss_release_msg(struct gss_upcall_msg *gmsg)
                 gmsg->gum_gctx = NULL;
         }
 
-        LASSERT(list_empty(&gmsg->gum_list));
-        LASSERT(list_empty(&gmsg->gum_base.list));
+        LASSERT(cfs_list_empty(&gmsg->gum_list));
+        LASSERT(cfs_list_empty(&gmsg->gum_base.list));
         OBD_FREE_PTR(gmsg);
         EXIT;
 }
@@ -735,12 +740,12 @@ void gss_unhash_msg_nolock(struct gss_upcall_msg *gmsg)
         LASSERT(idx < MECH_MAX);
         LASSERT_SPIN_LOCKED(&upcall_locks[idx]);
 
-        if (list_empty(&gmsg->gum_list))
+        if (cfs_list_empty(&gmsg->gum_list))
                 return;
 
-        list_del_init(&gmsg->gum_list);
-        LASSERT(atomic_read(&gmsg->gum_refcount) > 1);
-        atomic_dec(&gmsg->gum_refcount);
+        cfs_list_del_init(&gmsg->gum_list);
+        LASSERT(cfs_atomic_read(&gmsg->gum_refcount) > 1);
+        cfs_atomic_dec(&gmsg->gum_refcount);
 }
 
 static
@@ -760,9 +765,9 @@ void gss_msg_fail_ctx(struct gss_upcall_msg *gmsg)
         if (gmsg->gum_gctx) {
                 struct ptlrpc_cli_ctx *ctx = &gmsg->gum_gctx->gc_base;
 
-                LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+                LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
                 sptlrpc_cli_ctx_expire(ctx);
-                set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
+                cfs_set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
         }
 }
 
@@ -772,14 +777,14 @@ struct gss_upcall_msg * gss_find_upcall(__u32 mechidx, __u32 seq)
         struct gss_upcall_msg *gmsg;
 
         upcall_list_lock(mechidx);
-        list_for_each_entry(gmsg, &upcall_lists[mechidx], gum_list) {
+        cfs_list_for_each_entry(gmsg, &upcall_lists[mechidx], gum_list) {
                 if (gmsg->gum_data.gum_seq != seq)
                         continue;
 
-                LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
+                LASSERT(cfs_atomic_read(&gmsg->gum_refcount) > 0);
                 LASSERT(gmsg->gum_mechidx == mechidx);
 
-                atomic_inc(&gmsg->gum_refcount);
+                cfs_atomic_inc(&gmsg->gum_refcount);
                 upcall_list_unlock(mechidx);
                 return gmsg;
         }
@@ -816,7 +821,7 @@ ssize_t gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
 
         if (mlen > buflen)
                 mlen = buflen;
-        left = copy_to_user(dst, data, mlen);
+        left = cfs_copy_to_user(dst, data, mlen);
         if (left < 0) {
                 msg->errno = left;
                 RETURN(left);
@@ -847,7 +852,7 @@ ssize_t gss_pipe_downcall(struct file *filp, const char *src, size_t mlen)
         if (!buf)
                 RETURN(-ENOMEM);
 
-        if (copy_from_user(buf, src, mlen)) {
+        if (cfs_copy_from_user(buf, src, mlen)) {
                 CERROR("failed copy user space data\n");
                 GOTO(out_free, rc = -EFAULT);
         }
@@ -875,7 +880,7 @@ ssize_t gss_pipe_downcall(struct file *filp, const char *src, size_t mlen)
         gss_unhash_msg(gss_msg);
         gctx = gss_msg->gum_gctx;
         LASSERT(gctx);
-        LASSERT(atomic_read(&gctx->gc_base.cc_refcount) > 0);
+        LASSERT(cfs_atomic_read(&gctx->gc_base.cc_refcount) > 0);
 
         /* timeout is not in use for now */
         if (simple_get_bytes(&data, &datalen, &timeout, sizeof(timeout)))
@@ -924,11 +929,11 @@ ssize_t gss_pipe_downcall(struct file *filp, const char *src, size_t mlen)
                 ctx = &gctx->gc_base;
                 sptlrpc_cli_ctx_expire(ctx);
                 if (rc != -ERESTART || gss_err != GSS_S_COMPLETE)
-                        set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
+                        cfs_set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
 
                 CERROR("refresh ctx %p(uid %d) failed: %d/0x%08x: %s\n",
                        ctx, ctx->cc_vcred.vc_uid, rc, gss_err,
-                       test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags) ?
+                       cfs_test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags) ?
                        "fatal error" : "non-fatal");
         }
 
@@ -954,7 +959,7 @@ void gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
         static cfs_time_t               ratelimit = 0;
         ENTRY;
 
-        LASSERT(list_empty(&msg->list));
+        LASSERT(cfs_list_empty(&msg->list));
 
         /* normally errno is >= 0 */
         if (msg->errno >= 0) {
@@ -964,14 +969,14 @@ void gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
 
         gmsg = container_of(msg, struct gss_upcall_msg, gum_base);
         gumd = &gmsg->gum_data;
-        LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
+        LASSERT(cfs_atomic_read(&gmsg->gum_refcount) > 0);
 
         CERROR("failed msg %p (seq %u, uid %u, svc %u, nid "LPX64", obd %.*s): "
                "errno %d\n", msg, gumd->gum_seq, gumd->gum_uid, gumd->gum_svc,
                gumd->gum_nid, (int) sizeof(gumd->gum_obd),
                gumd->gum_obd, msg->errno);
 
-        atomic_inc(&gmsg->gum_refcount);
+        cfs_atomic_inc(&gmsg->gum_refcount);
         gss_unhash_msg(gmsg);
         if (msg->errno == -ETIMEDOUT || msg->errno == -EPIPE) {
                 cfs_time_t now = cfs_time_current_sec();
@@ -997,14 +1002,14 @@ void gss_pipe_release(struct inode *inode)
         LASSERT(idx < MECH_MAX);
 
         upcall_list_lock(idx);
-        while (!list_empty(&upcall_lists[idx])) {
+        while (!cfs_list_empty(&upcall_lists[idx])) {
                 struct gss_upcall_msg      *gmsg;
                 struct gss_upcall_msg_data *gumd;
 
-                gmsg = list_entry(upcall_lists[idx].next,
-                                  struct gss_upcall_msg, gum_list);
+                gmsg = cfs_list_entry(upcall_lists[idx].next,
+                                      struct gss_upcall_msg, gum_list);
                 gumd = &gmsg->gum_data;
-                LASSERT(list_empty(&gmsg->gum_base.list));
+                LASSERT(cfs_list_empty(&gmsg->gum_base.list));
 
                 CERROR("failing remaining msg %p:seq %u, uid %u, svc %u, "
                        "nid "LPX64", obd %.*s\n", gmsg,
@@ -1013,7 +1018,7 @@ void gss_pipe_release(struct inode *inode)
                        gumd->gum_obd);
 
                 gmsg->gum_base.errno = -EPIPE;
-                atomic_inc(&gmsg->gum_refcount);
+                cfs_atomic_inc(&gmsg->gum_refcount);
                 gss_unhash_msg_nolock(gmsg);
 
                 gss_msg_fail_ctx(gmsg);
@@ -1046,7 +1051,7 @@ int gss_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx)
         int                         rc = 0;
         ENTRY;
 
-        might_sleep();
+        cfs_might_sleep();
 
         LASSERT(ctx->cc_sec);
         LASSERT(ctx->cc_sec->ps_import);
@@ -1072,7 +1077,7 @@ int gss_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx)
         gmsg->gum_base.errno = 0;
 
         /* init upcall msg */
-        atomic_set(&gmsg->gum_refcount, 1);
+        cfs_atomic_set(&gmsg->gum_refcount, 1);
         gmsg->gum_mechidx = mech_name2idx(gsec->gs_mech->gm_name);
         gmsg->gum_gsec = gsec;
         gmsg->gum_gctx = container_of(sptlrpc_cli_ctx_get(ctx),
@@ -1210,7 +1215,7 @@ int __init gss_init_pipefs_upcall(void)
 
         de_pipes[MECH_KRB5] = de;
         CFS_INIT_LIST_HEAD(&upcall_lists[MECH_KRB5]);
-        spin_lock_init(&upcall_locks[MECH_KRB5]);
+        cfs_spin_lock_init(&upcall_locks[MECH_KRB5]);
 
         return 0;
 }
@@ -1221,7 +1226,7 @@ void __exit gss_exit_pipefs_upcall(void)
         __u32   i;
 
         for (i = 0; i < MECH_MAX; i++) {
-                LASSERT(list_empty(&upcall_lists[i]));
+                LASSERT(cfs_list_empty(&upcall_lists[i]));
 
                 /* dput pipe dentry here might cause lgssd oops. */
                 de_pipes[i] = NULL;
index d972b00..fe03be0 100644 (file)
@@ -122,7 +122,7 @@ int rawobj_serialize(rawobj_t *obj, __u32 **buf, __u32 *buflen)
         LASSERT(buf);
         LASSERT(buflen);
 
-        len = size_round4(obj->len);
+        len = cfs_size_round4(obj->len);
 
         if (*buflen < 4 + len) {
                 CERROR("buflen %u <  %u\n", *buflen, 4 + len);
@@ -157,7 +157,7 @@ static int __rawobj_extract(rawobj_t *obj, __u32 **buf, __u32 *buflen,
                 return 0;
         }
 
-        len = local ? obj->len : size_round4(obj->len);
+        len = local ? obj->len : cfs_size_round4(obj->len);
         if (*buflen < len) {
                 CERROR("buflen %u < %u\n", *buflen, len);
                 obj->len = 0;
index ba9e808..9877c9f 100644 (file)
 
 #define GSS_SVC_UPCALL_TIMEOUT  (20)
 
-static spinlock_t __ctx_index_lock;
+static cfs_spinlock_t __ctx_index_lock;
 static __u64 __ctx_index;
 
 __u64 gss_get_next_ctx_index(void)
 {
         __u64 idx;
 
-        spin_lock(&__ctx_index_lock);
+        cfs_spin_lock(&__ctx_index_lock);
         idx = __ctx_index++;
-        spin_unlock(&__ctx_index_lock);
+        cfs_spin_unlock(&__ctx_index_lock);
 
         return idx;
 }
@@ -106,7 +106,7 @@ static inline unsigned long hash_mem(char *buf, int length, int bits)
                 len++;
 
                 if ((len & (BITS_PER_LONG/8-1)) == 0)
-                        hash = hash_long(hash^l, BITS_PER_LONG);
+                        hash = cfs_hash_long(hash^l, BITS_PER_LONG);
         } while (len);
 
         return hash >> (BITS_PER_LONG - bits);
@@ -124,7 +124,7 @@ struct rsi {
         struct cache_head       h;
         __u32                   lustre_svc;
         __u64                   nid;
-        wait_queue_head_t       waitq;
+        cfs_waitq_t             waitq;
         rawobj_t                in_handle, in_token;
         rawobj_t                out_handle, out_token;
         int                     major_status, minor_status;
@@ -193,7 +193,7 @@ static inline void __rsi_init(struct rsi *new, struct rsi *item)
 
         new->lustre_svc = item->lustre_svc;
         new->nid = item->nid;
-        init_waitqueue_head(&new->waitq);
+        cfs_waitq_init(&new->waitq);
 }
 
 static inline void __rsi_update(struct rsi *new, struct rsi *item)
@@ -338,7 +338,7 @@ static int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
 out:
         rsi_free(&rsii);
         if (rsip) {
-                wake_up_all(&rsip->waitq);
+                cfs_waitq_broadcast(&rsip->waitq);
                 cache_put(&rsip->h, &rsi_cache);
         } else {
                 status = -ENOMEM;
@@ -355,7 +355,7 @@ static void rsi_put(struct cache_head *item, struct cache_detail *cd)
 {
         struct rsi *rsi = container_of(item, struct rsi, h);
 
-        LASSERT(atomic_read(&item->refcnt) > 0);
+        LASSERT(cfs_atomic_read(&item->refcnt) > 0);
 
         if (cache_put(item, cd)) {
                 LASSERT(item->next == NULL);
@@ -456,7 +456,7 @@ static int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
 out:
         rsi_free(&rsii);
         if (rsip) {
-                wake_up_all(&rsip->waitq);
+                cfs_waitq_broadcast(&rsip->waitq);
                 rsi_put(&rsip->h, &rsi_cache);
         }
 
@@ -573,7 +573,7 @@ static inline void __rsc_update(struct rsc *new, struct rsc *tmp)
         tmp->ctx.gsc_mechctx = NULL;
 
         memset(&new->ctx.gsc_seqdata, 0, sizeof(new->ctx.gsc_seqdata));
-        spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
+        cfs_spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
 }
 
 #ifdef HAVE_SUNRPC_CACHE_V2
@@ -688,7 +688,7 @@ static int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
                 goto out;
         if (rv == -ENOENT) {
                 CERROR("NOENT? set rsc entry negative\n");
-                set_bit(CACHE_NEGATIVE, &rsci.h.flags);
+                cfs_set_bit(CACHE_NEGATIVE, &rsci.h.flags);
         } else {
                 rawobj_t tmp_buf;
                 unsigned long ctx_expiry;
@@ -750,7 +750,7 @@ static void rsc_put(struct cache_head *item, struct cache_detail *cd)
 {
         struct rsc *rsci = container_of(item, struct rsc, h);
 
-        LASSERT(atomic_read(&item->refcnt) > 0);
+        LASSERT(cfs_atomic_read(&item->refcnt) > 0);
 
         if (cache_put(item, cd)) {
                 LASSERT(item->next == NULL);
@@ -835,7 +835,7 @@ static int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
                 goto out;
         if (rv == -ENOENT) {
                 CERROR("NOENT? set rsc entry negative\n");
-                set_bit(CACHE_NEGATIVE, &rsci.h.flags);
+                cfs_set_bit(CACHE_NEGATIVE, &rsci.h.flags);
         } else {
                 struct gss_api_mech *gm;
                 rawobj_t tmp_buf;
@@ -960,7 +960,7 @@ static void rsc_flush(rsc_entry_match *match, long data)
         int n;
         ENTRY;
 
-        write_lock(&rsc_cache.hash_lock);
+        cfs_write_lock(&rsc_cache.hash_lock);
         for (n = 0; n < RSC_HASHMAX; n++) {
                 for (ch = &rsc_cache.hash_table[n]; *ch;) {
                         rscp = container_of(*ch, struct rsc, h);
@@ -974,12 +974,12 @@ static void rsc_flush(rsc_entry_match *match, long data)
                         *ch = (*ch)->next;
                         rscp->h.next = NULL;
                         cache_get(&rscp->h);
-                        set_bit(CACHE_NEGATIVE, &rscp->h.flags);
+                        cfs_set_bit(CACHE_NEGATIVE, &rscp->h.flags);
                         COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
                         rsc_cache.entries--;
                 }
         }
-        write_unlock(&rsc_cache.hash_lock);
+        cfs_write_unlock(&rsc_cache.hash_lock);
         EXIT;
 }
 
@@ -1207,7 +1207,7 @@ int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
         struct ptlrpc_reply_state *rs;
         struct rsc                *rsci = NULL;
         struct rsi                *rsip = NULL, rsikey;
-        wait_queue_t               wait;
+        cfs_waitlink_t             wait;
         int                        replen = sizeof(struct ptlrpc_body);
         struct gss_rep_header     *rephdr;
         int                        first_check = 1;
@@ -1246,9 +1246,9 @@ int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
         }
 
         cache_get(&rsip->h); /* take an extra ref */
-        init_waitqueue_head(&rsip->waitq);
-        init_waitqueue_entry(&wait, current);
-        add_wait_queue(&rsip->waitq, &wait);
+        cfs_waitq_init(&rsip->waitq);
+        cfs_waitlink_init(&wait);
+        cfs_waitq_add(&rsip->waitq, &wait);
 
 cache_check:
         /* Note each time cache_check() will drop a reference if return
@@ -1263,13 +1263,14 @@ cache_check:
                         first_check = 0;
 
                         read_lock(&rsi_cache.hash_lock);
-                        valid = test_bit(CACHE_VALID, &rsip->h.flags);
+                        valid = cfs_test_bit(CACHE_VALID, &rsip->h.flags);
                         if (valid == 0)
-                                set_current_state(TASK_INTERRUPTIBLE);
+                                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
                         read_unlock(&rsi_cache.hash_lock);
 
                         if (valid == 0)
-                                schedule_timeout(GSS_SVC_UPCALL_TIMEOUT * HZ);
+                                cfs_schedule_timeout(GSS_SVC_UPCALL_TIMEOUT *
+                                                     CFS_HZ);
 
                         cache_get(&rsip->h);
                         goto cache_check;
@@ -1289,7 +1290,7 @@ cache_check:
                 break;
         }
 
-        remove_wait_queue(&rsip->waitq, &wait);
+        cfs_waitq_del(&rsip->waitq, &wait);
         cache_put(&rsip->h, &rsi_cache);
 
         if (rc)
@@ -1325,7 +1326,7 @@ cache_check:
         }
 
         grctx->src_init = 1;
-        grctx->src_reserve_len = size_round4(rsip->out_token.len);
+        grctx->src_reserve_len = cfs_size_round4(rsip->out_token.len);
 
         rc = lustre_pack_reply_v2(req, 1, &replen, NULL, 0);
         if (rc) {
@@ -1369,7 +1370,7 @@ out:
         if (rsci) {
                 /* if anything went wrong, we don't keep the context too */
                 if (rc != SECSVC_OK)
-                        set_bit(CACHE_NEGATIVE, &rsci->h.flags);
+                        cfs_set_bit(CACHE_NEGATIVE, &rsci->h.flags);
                 else
                         CDEBUG(D_SEC, "create rsc with idx "LPX64"\n",
                                gss_handle_to_u64(&rsci->handle));
@@ -1407,7 +1408,7 @@ void gss_svc_upcall_destroy_ctx(struct gss_svc_ctx *ctx)
         struct rsc *rsc = container_of(ctx, struct rsc, ctx);
 
         /* can't be found */
-        set_bit(CACHE_NEGATIVE, &rsc->h.flags);
+        cfs_set_bit(CACHE_NEGATIVE, &rsc->h.flags);
         /* to be removed at next scan */
         rsc->h.expiry_time = 1;
 }
@@ -1416,14 +1417,14 @@ int __init gss_init_svc_upcall(void)
 {
         int     i;
 
-        spin_lock_init(&__ctx_index_lock);
+        cfs_spin_lock_init(&__ctx_index_lock);
         /*
          * this helps reducing context index confliction. after server reboot,
          * conflicting request from clients might be filtered out by initial
          * sequence number checking, thus no chance to sent error notification
          * back to clients.
          */
-        get_random_bytes(&__ctx_index, sizeof(__ctx_index));
+        ll_get_random_bytes(&__ctx_index, sizeof(__ctx_index));
 
 
         cache_register(&rsi_cache);
@@ -1437,9 +1438,9 @@ int __init gss_init_svc_upcall(void)
         for (i = 0; i < 6; i++) {
                 if (atomic_read(&rsi_cache.readers) > 0)
                         break;
-                set_current_state(TASK_UNINTERRUPTIBLE);
-                LASSERT(HZ >= 4);
-                schedule_timeout(HZ / 4);
+                cfs_set_current_state(TASK_UNINTERRUPTIBLE);
+                LASSERT(CFS_HZ >= 4);
+                cfs_schedule_timeout(CFS_HZ / 4);
         }
 
         if (atomic_read(&rsi_cache.readers) == 0)
index 489c870..e30c122 100644 (file)
@@ -70,26 +70,26 @@ static struct proc_dir_entry *gss_proc_lk = NULL;
  * statistic of "out-of-sequence-window"
  */
 static struct {
-        spinlock_t      oos_lock;
-        atomic_t        oos_cli_count;       /* client occurrence */
+        cfs_spinlock_t  oos_lock;
+        cfs_atomic_t    oos_cli_count;       /* client occurrence */
         int             oos_cli_behind;      /* client max seqs behind */
-        atomic_t        oos_svc_replay[3];   /* server replay detected */
-        atomic_t        oos_svc_pass[3];     /* server verified ok */
+        cfs_atomic_t    oos_svc_replay[3];   /* server replay detected */
+        cfs_atomic_t    oos_svc_pass[3];     /* server verified ok */
 } gss_stat_oos = {
-        .oos_cli_count  = ATOMIC_INIT(0),
+        .oos_cli_count  = CFS_ATOMIC_INIT(0),
         .oos_cli_behind = 0,
-        .oos_svc_replay = { ATOMIC_INIT(0), },
-        .oos_svc_pass   = { ATOMIC_INIT(0), },
+        .oos_svc_replay = { CFS_ATOMIC_INIT(0), },
+        .oos_svc_pass   = { CFS_ATOMIC_INIT(0), },
 };
 
 void gss_stat_oos_record_cli(int behind)
 {
-        atomic_inc(&gss_stat_oos.oos_cli_count);
+        cfs_atomic_inc(&gss_stat_oos.oos_cli_count);
 
-        spin_lock(&gss_stat_oos.oos_lock);
+        cfs_spin_lock(&gss_stat_oos.oos_lock);
         if (behind > gss_stat_oos.oos_cli_behind)
                 gss_stat_oos.oos_cli_behind = behind;
-        spin_unlock(&gss_stat_oos.oos_lock);
+        cfs_spin_unlock(&gss_stat_oos.oos_lock);
 }
 
 void gss_stat_oos_record_svc(int phase, int replay)
@@ -97,9 +97,9 @@ void gss_stat_oos_record_svc(int phase, int replay)
         LASSERT(phase >= 0 && phase <= 2);
 
         if (replay)
-                atomic_inc(&gss_stat_oos.oos_svc_replay[phase]);
+                cfs_atomic_inc(&gss_stat_oos.oos_svc_replay[phase]);
         else
-                atomic_inc(&gss_stat_oos.oos_svc_pass[phase]);
+                cfs_atomic_inc(&gss_stat_oos.oos_svc_pass[phase]);
 }
 
 static int gss_proc_read_oos(char *page, char **start, off_t off, int count,
@@ -121,12 +121,12 @@ static int gss_proc_read_oos(char *page, char **start, off_t off, int count,
                         "  phase 2:             %d\n",
                         GSS_SEQ_WIN_MAIN,
                         GSS_SEQ_WIN_BACK,
-                        atomic_read(&gss_stat_oos.oos_cli_count),
+                        cfs_atomic_read(&gss_stat_oos.oos_cli_count),
                         gss_stat_oos.oos_cli_behind,
-                        atomic_read(&gss_stat_oos.oos_svc_replay[0]),
-                        atomic_read(&gss_stat_oos.oos_svc_replay[1]),
-                        atomic_read(&gss_stat_oos.oos_svc_replay[2]),
-                        atomic_read(&gss_stat_oos.oos_svc_pass[2]));
+                        cfs_atomic_read(&gss_stat_oos.oos_svc_replay[0]),
+                        cfs_atomic_read(&gss_stat_oos.oos_svc_replay[1]),
+                        cfs_atomic_read(&gss_stat_oos.oos_svc_replay[2]),
+                        cfs_atomic_read(&gss_stat_oos.oos_svc_pass[2]));
 
         return written;
 }
@@ -202,7 +202,7 @@ int gss_init_lproc(void)
 {
         int     rc;
 
-        spin_lock_init(&gss_stat_oos.oos_lock);
+        cfs_spin_lock_init(&gss_stat_oos.oos_lock);
 
         gss_proc_root = lprocfs_register("gss", sptlrpc_proc_root,
                                          gss_lprocfs_vars, NULL);
index a6e5fbe..555ab73 100644 (file)
@@ -333,11 +333,11 @@ out_free:
 
 int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
 {
-        LASSERT(atomic_read(&ctx->cc_refcount));
+        LASSERT(cfs_atomic_read(&ctx->cc_refcount));
 
-        if (!test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
+        if (!cfs_test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
                 if (!ctx->cc_early_expire)
-                        clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+                        cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
 
                 CWARN("ctx %p(%u->%s) get expired: %lu(%+lds)\n",
                       ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
@@ -391,7 +391,7 @@ void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx)
          * someone else, in which case nobody will make further use
          * of it. we don't care, and mark it UPTODATE will help
          * destroying server side context when it be destroied. */
-        set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+        cfs_set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
 
         if (sec_is_reverse(ctx->cc_sec)) {
                 CWARN("server installed reverse ctx %p idx "LPX64", "
@@ -513,7 +513,7 @@ int gss_do_check_seq(unsigned long *window, __u32 win_size, __u32 *max_seq,
                  */
                 switch (phase) {
                 case 0:
-                        if (test_bit(seq_num % win_size, window))
+                        if (cfs_test_bit(seq_num % win_size, window))
                                 goto replay;
                         break;
                 case 1:
@@ -545,7 +545,7 @@ int gss_check_seq_num(struct gss_svc_seq_data *ssd, __u32 seq_num, int set)
 {
         int rc = 0;
 
-        spin_lock(&ssd->ssd_lock);
+        cfs_spin_lock(&ssd->ssd_lock);
 
         if (set == 0) {
                 /*
@@ -579,7 +579,7 @@ int gss_check_seq_num(struct gss_svc_seq_data *ssd, __u32 seq_num, int set)
                         gss_stat_oos_record_svc(2, 0);
         }
 exit:
-        spin_unlock(&ssd->ssd_lock);
+        cfs_spin_unlock(&ssd->ssd_lock);
         return rc;
 }
 
@@ -670,7 +670,7 @@ int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx,
                 flags |= LUSTRE_GSS_PACK_USER;
 
 redo:
-        seq = atomic_inc_return(&gctx->gc_seq);
+        seq = cfs_atomic_inc_return(&gctx->gc_seq);
 
         rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
                           ctx->cc_sec->ps_part,
@@ -687,8 +687,8 @@ redo:
          *
          * Note: null mode dosen't check sequence number. */
         if (svc != SPTLRPC_SVC_NULL &&
-            atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
-                int behind = atomic_read(&gctx->gc_seq) - seq;
+            cfs_atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
+                int behind = cfs_atomic_read(&gctx->gc_seq) - seq;
 
                 gss_stat_oos_record_cli(behind);
                 CWARN("req %p: %u behind, retry signing\n", req, behind);
@@ -944,7 +944,7 @@ int gss_cli_ctx_seal(struct ptlrpc_cli_ctx *ctx,
                 ghdr->gh_flags |= LUSTRE_GSS_PACK_USER;
 
 redo:
-        ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
+        ghdr->gh_seq = cfs_atomic_inc_return(&gctx->gc_seq);
 
         /* buffer objects */
         hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
@@ -963,14 +963,14 @@ redo:
         LASSERT(token.len <= buflens[1]);
 
         /* see explain in gss_cli_ctx_sign() */
-        if (unlikely(atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
+        if (unlikely(cfs_atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
                      GSS_SEQ_REPACK_THRESHOLD)) {
-                int behind = atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
+                int behind = cfs_atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
 
                 gss_stat_oos_record_cli(behind);
                 CWARN("req %p: %u behind, retry sealing\n", req, behind);
 
-                ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
+                ghdr->gh_seq = cfs_atomic_inc_return(&gctx->gc_seq);
                 goto redo;
         }
 
@@ -1121,18 +1121,18 @@ int gss_sec_create_common(struct gss_sec *gsec,
                 return -EOPNOTSUPP;
         }
 
-        spin_lock_init(&gsec->gs_lock);
+        cfs_spin_lock_init(&gsec->gs_lock);
         gsec->gs_rvs_hdl = 0ULL;
 
         /* initialize upper ptlrpc_sec */
         sec = &gsec->gs_base;
         sec->ps_policy = policy;
-        atomic_set(&sec->ps_refcount, 0);
-        atomic_set(&sec->ps_nctx, 0);
+        cfs_atomic_set(&sec->ps_refcount, 0);
+        cfs_atomic_set(&sec->ps_nctx, 0);
         sec->ps_id = sptlrpc_get_next_secid();
         sec->ps_flvr = *sf;
         sec->ps_import = class_import_get(imp);
-        spin_lock_init(&sec->ps_lock);
+        cfs_spin_lock_init(&sec->ps_lock);
         CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
 
         if (!svcctx) {
@@ -1158,8 +1158,8 @@ void gss_sec_destroy_common(struct gss_sec *gsec)
         ENTRY;
 
         LASSERT(sec->ps_import);
-        LASSERT(atomic_read(&sec->ps_refcount) == 0);
-        LASSERT(atomic_read(&sec->ps_nctx) == 0);
+        LASSERT(cfs_atomic_read(&sec->ps_refcount) == 0);
+        LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
 
         if (gsec->gs_mech) {
                 lgss_mech_put(gsec->gs_mech);
@@ -1187,23 +1187,23 @@ int gss_cli_ctx_init_common(struct ptlrpc_sec *sec,
         struct gss_cli_ctx    *gctx = ctx2gctx(ctx);
 
         gctx->gc_win = 0;
-        atomic_set(&gctx->gc_seq, 0);
+        cfs_atomic_set(&gctx->gc_seq, 0);
 
         CFS_INIT_HLIST_NODE(&ctx->cc_cache);
-        atomic_set(&ctx->cc_refcount, 0);
+        cfs_atomic_set(&ctx->cc_refcount, 0);
         ctx->cc_sec = sec;
         ctx->cc_ops = ctxops;
         ctx->cc_expire = 0;
         ctx->cc_flags = PTLRPC_CTX_NEW;
         ctx->cc_vcred = *vcred;
-        spin_lock_init(&ctx->cc_lock);
+        cfs_spin_lock_init(&ctx->cc_lock);
         CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
         CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
 
         /* take a ref on belonging sec, balanced in ctx destroying */
-        atomic_inc(&sec->ps_refcount);
+        cfs_atomic_inc(&sec->ps_refcount);
         /* statistic only */
-        atomic_inc(&sec->ps_nctx);
+        cfs_atomic_inc(&sec->ps_nctx);
 
         CDEBUG(D_SEC, "%s@%p: create ctx %p(%u->%s)\n",
                sec->ps_policy->sp_name, ctx->cc_sec,
@@ -1221,8 +1221,8 @@ int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec,
 {
         struct gss_cli_ctx *gctx = ctx2gctx(ctx);
 
-        LASSERT(atomic_read(&sec->ps_nctx) > 0);
-        LASSERT(atomic_read(&ctx->cc_refcount) == 0);
+        LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0);
+        LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
         LASSERT(ctx->cc_sec == sec);
 
         /*
@@ -1239,12 +1239,12 @@ int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec,
                  * asynchronous which finished by request_out_callback(). so
                  * we add refcount, whoever drop finally drop the refcount to
                  * 0 should responsible for the rest of destroy. */
-                atomic_inc(&ctx->cc_refcount);
+                cfs_atomic_inc(&ctx->cc_refcount);
 
                 gss_do_ctx_fini_rpc(gctx);
                 gss_cli_ctx_finalize(gctx);
 
-                if (!atomic_dec_and_test(&ctx->cc_refcount))
+                if (!cfs_atomic_dec_and_test(&ctx->cc_refcount))
                         return 1;
         }
 
@@ -1879,16 +1879,16 @@ void gss_svc_reqctx_free(struct gss_svc_reqctx *grctx)
 static inline
 void gss_svc_reqctx_addref(struct gss_svc_reqctx *grctx)
 {
-        LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
-        atomic_inc(&grctx->src_base.sc_refcount);
+        LASSERT(cfs_atomic_read(&grctx->src_base.sc_refcount) > 0);
+        cfs_atomic_inc(&grctx->src_base.sc_refcount);
 }
 
 static inline
 void gss_svc_reqctx_decref(struct gss_svc_reqctx *grctx)
 {
-        LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
+        LASSERT(cfs_atomic_read(&grctx->src_base.sc_refcount) > 0);
 
-        if (atomic_dec_and_test(&grctx->src_base.sc_refcount))
+        if (cfs_atomic_dec_and_test(&grctx->src_base.sc_refcount))
                 gss_svc_reqctx_free(grctx);
 }
 
@@ -2383,7 +2383,7 @@ int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req)
                 RETURN(SECSVC_DROP);
 
         grctx->src_base.sc_policy = sptlrpc_policy_get(policy);
-        atomic_set(&grctx->src_base.sc_refcount, 1);
+        cfs_atomic_set(&grctx->src_base.sc_refcount, 1);
         req->rq_svc_ctx = &grctx->src_base;
         gw = &grctx->src_wirectx;
 
@@ -2774,7 +2774,7 @@ void gss_svc_free_rs(struct ptlrpc_reply_state *rs)
 
 void gss_svc_free_ctx(struct ptlrpc_svc_ctx *ctx)
 {
-        LASSERT(atomic_read(&ctx->sc_refcount) == 0);
+        LASSERT(cfs_atomic_read(&ctx->sc_refcount) == 0);
         gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx));
 }
 
@@ -2800,7 +2800,7 @@ int gss_copy_rvc_cli_ctx(struct ptlrpc_cli_ctx *cli_ctx,
          * each reverse root ctx will record its latest sequence number on its
          * buddy svcctx before be destroied, so here we continue use it.
          */
-        atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
+        cfs_atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
 
         if (gss_svc_upcall_dup_handle(&cli_gctx->gc_svc_handle, svc_gctx)) {
                 CERROR("failed to dup svc handle\n");
index e6cde5c..6e3c8ce 100644 (file)
@@ -84,9 +84,9 @@ do {                                                                           \
 
 #define IMPORT_SET_STATE(imp, state)            \
 do {                                            \
-        spin_lock(&imp->imp_lock);              \
+        cfs_spin_lock(&imp->imp_lock);          \
         IMPORT_SET_STATE_NOLOCK(imp, state);    \
-        spin_unlock(&imp->imp_lock);            \
+        cfs_spin_unlock(&imp->imp_lock);        \
 } while(0)
 
 
@@ -102,12 +102,12 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
  * though. */
 int ptlrpc_init_import(struct obd_import *imp)
 {
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
 
         imp->imp_generation++;
         imp->imp_state =  LUSTRE_IMP_NEW;
 
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
 
         return 0;
 }
@@ -144,7 +144,7 @@ int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt)
 {
         int rc = 0;
 
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
 
         if (imp->imp_state == LUSTRE_IMP_FULL &&
             (conn_cnt == 0 || conn_cnt == imp->imp_conn_cnt)) {
@@ -170,7 +170,7 @@ int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt)
                 }
                 ptlrpc_deactivate_timeouts(imp);
                 IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
 
                 if (obd_dump_on_timeout)
                         libcfs_debug_dumplog();
@@ -178,7 +178,7 @@ int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt)
                 obd_import_event(imp->imp_obd, imp, IMP_EVENT_DISCON);
                 rc = 1;
         } else {
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
                 CDEBUG(D_HA, "%s: import %p already %s (conn %u, was %u): %s\n",
                        imp->imp_client->cli_name, imp,
                        (imp->imp_state == LUSTRE_IMP_FULL &&
@@ -199,7 +199,7 @@ static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp)
         CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd));
         imp->imp_invalid = 1;
         imp->imp_generation++;
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
 
         ptlrpc_abort_inflight(imp);
         obd_import_event(imp->imp_obd, imp, IMP_EVENT_INACTIVE);
@@ -213,7 +213,7 @@ static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp)
  */
 void ptlrpc_deactivate_import(struct obd_import *imp)
 {
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
         ptlrpc_deactivate_and_unlock_import(imp);
 }
 
@@ -244,16 +244,16 @@ ptlrpc_inflight_deadline(struct ptlrpc_request *req, time_t now)
 static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp)
 {
         time_t now = cfs_time_current_sec();
-        struct list_head *tmp, *n;
+        cfs_list_t *tmp, *n;
         struct ptlrpc_request *req;
         unsigned int timeout = 0;
 
-        spin_lock(&imp->imp_lock);
-        list_for_each_safe(tmp, n, &imp->imp_sending_list) {
-                req = list_entry(tmp, struct ptlrpc_request, rq_list);
+        cfs_spin_lock(&imp->imp_lock);
+        cfs_list_for_each_safe(tmp, n, &imp->imp_sending_list) {
+                req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
                 timeout = max(ptlrpc_inflight_deadline(req, now), timeout);
         }
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
         return timeout;
 }
 
@@ -265,13 +265,13 @@ static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp)
  */
 void ptlrpc_invalidate_import(struct obd_import *imp)
 {
-        struct list_head *tmp, *n;
+        cfs_list_t *tmp, *n;
         struct ptlrpc_request *req;
         struct l_wait_info lwi;
         unsigned int timeout;
         int rc;
 
-        atomic_inc(&imp->imp_inval_count);
+        cfs_atomic_inc(&imp->imp_inval_count);
 
         /*
          * If this is an invalid MGC connection, then don't bother
@@ -315,16 +315,18 @@ void ptlrpc_invalidate_import(struct obd_import *imp)
                         (timeout > 1)?cfs_time_seconds(1):cfs_time_seconds(1)/2,
                         NULL, NULL);
                 rc = l_wait_event(imp->imp_recovery_waitq,
-                                (atomic_read(&imp->imp_inflight) == 0), &lwi);
+                                  (cfs_atomic_read(&imp->imp_inflight) == 0),
+                                  &lwi);
                 if (rc) {
                         const char *cli_tgt = obd2cli_tgt(imp->imp_obd);
 
                         CERROR("%s: rc = %d waiting for callback (%d != 0)\n",
-                               cli_tgt, rc, atomic_read(&imp->imp_inflight));
+                               cli_tgt, rc,
+                               cfs_atomic_read(&imp->imp_inflight));
 
-                        spin_lock(&imp->imp_lock);
-                        if (atomic_read(&imp->imp_inflight) == 0) {
-                                int count = atomic_read(&imp->imp_unregistering);
+                        cfs_spin_lock(&imp->imp_lock);
+                        if (cfs_atomic_read(&imp->imp_inflight) == 0) {
+                                int count = cfs_atomic_read(&imp->imp_unregistering);
 
                                 /* We know that "unregistering" rpcs only can
                                  * survive in sending or delaying lists (they
@@ -340,19 +342,19 @@ void ptlrpc_invalidate_import(struct obd_import *imp)
                                  * this point. */
                                 rc = 0;
                         } else {
-                                list_for_each_safe(tmp, n,
-                                                   &imp->imp_sending_list) {
-                                        req = list_entry(tmp,
-                                                         struct ptlrpc_request,
-                                                         rq_list);
+                                cfs_list_for_each_safe(tmp, n,
+                                                       &imp->imp_sending_list) {
+                                        req = cfs_list_entry(tmp,
+                                                             struct ptlrpc_request,
+                                                             rq_list);
                                         DEBUG_REQ(D_ERROR, req,
                                                   "still on sending list");
                                 }
-                                list_for_each_safe(tmp, n,
-                                                   &imp->imp_delayed_list) {
-                                        req = list_entry(tmp,
-                                                         struct ptlrpc_request,
-                                                         rq_list);
+                                cfs_list_for_each_safe(tmp, n,
+                                                       &imp->imp_delayed_list) {
+                                        req = cfs_list_entry(tmp,
+                                                             struct ptlrpc_request,
+                                                             rq_list);
                                         DEBUG_REQ(D_ERROR, req,
                                                   "still on delayed list");
                                 }
@@ -361,9 +363,10 @@ void ptlrpc_invalidate_import(struct obd_import *imp)
                                        "Network is sluggish? Waiting them "
                                        "to error out.\n", cli_tgt,
                                        ptlrpc_phase2str(RQ_PHASE_UNREGISTERING),
-                                       atomic_read(&imp->imp_unregistering));
+                                       cfs_atomic_read(&imp->
+                                                       imp_unregistering));
                         }
-                        spin_unlock(&imp->imp_lock);
+                        cfs_spin_unlock(&imp->imp_lock);
                   }
         } while (rc != 0);
 
@@ -371,12 +374,12 @@ void ptlrpc_invalidate_import(struct obd_import *imp)
          * Let's additionally check that no new rpcs added to import in
          * "invalidate" state.
          */
-        LASSERT(atomic_read(&imp->imp_inflight) == 0);
+        LASSERT(cfs_atomic_read(&imp->imp_inflight) == 0);
 out:
         obd_import_event(imp->imp_obd, imp, IMP_EVENT_INVALIDATE);
         sptlrpc_import_flush_all_ctx(imp);
 
-        atomic_dec(&imp->imp_inval_count);
+        cfs_atomic_dec(&imp->imp_inval_count);
         cfs_waitq_broadcast(&imp->imp_recovery_waitq);
 }
 
@@ -385,10 +388,10 @@ void ptlrpc_activate_import(struct obd_import *imp)
 {
         struct obd_device *obd = imp->imp_obd;
 
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
         imp->imp_invalid = 0;
         ptlrpc_activate_timeouts(imp);
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
         obd_import_event(obd, imp, IMP_EVENT_ACTIVE);
 }
 
@@ -411,9 +414,9 @@ void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt)
                 CDEBUG(D_HA, "%s: waking up pinger\n",
                        obd2cli_tgt(imp->imp_obd));
 
-                spin_lock(&imp->imp_lock);
+                cfs_spin_lock(&imp->imp_lock);
                 imp->imp_force_verify = 1;
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
 
                 ptlrpc_pinger_wake_up();
         }
@@ -428,15 +431,15 @@ int ptlrpc_reconnect_import(struct obd_import *imp)
         /* Do a fresh connect next time by zeroing the handle */
         ptlrpc_disconnect_import(imp, 1);
         /* Wait for all invalidate calls to finish */
-        if (atomic_read(&imp->imp_inval_count) > 0) {
+        if (cfs_atomic_read(&imp->imp_inval_count) > 0) {
                 int rc;
                 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
                 rc = l_wait_event(imp->imp_recovery_waitq,
-                                  (atomic_read(&imp->imp_inval_count) == 0),
+                                  (cfs_atomic_read(&imp->imp_inval_count) == 0),
                                   &lwi);
                 if (rc)
                         CERROR("Interrupted, inval=%d\n",
-                               atomic_read(&imp->imp_inval_count));
+                               cfs_atomic_read(&imp->imp_inval_count));
         }
 
         /* Allow reconnect attempts */
@@ -457,16 +460,16 @@ static int import_select_connection(struct obd_import *imp)
         int tried_all = 1;
         ENTRY;
 
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
 
-        if (list_empty(&imp->imp_conn_list)) {
+        if (cfs_list_empty(&imp->imp_conn_list)) {
                 CERROR("%s: no connections available\n",
                         imp->imp_obd->obd_name);
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
                 RETURN(-EINVAL);
         }
 
-        list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
+        cfs_list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
                 CDEBUG(D_HA, "%s: connect to NID %s last attempt "LPU64"\n",
                        imp->imp_obd->obd_name,
                        libcfs_nid2str(conn->oic_conn->c_peer.nid),
@@ -551,7 +554,7 @@ static int import_select_connection(struct obd_import *imp)
                imp->imp_obd->obd_name, imp, imp_conn->oic_uuid.uuid,
                libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
 
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
 
         RETURN(0);
 }
@@ -562,12 +565,12 @@ static int import_select_connection(struct obd_import *imp)
 static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno)
 {
         struct ptlrpc_request *req;
-        struct list_head *tmp;
+        cfs_list_t *tmp;
 
-        if (list_empty(&imp->imp_replay_list))
+        if (cfs_list_empty(&imp->imp_replay_list))
                 return 0;
         tmp = imp->imp_replay_list.next;
-        req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
+        req = cfs_list_entry(tmp, struct ptlrpc_request, rq_replay_list);
         *transno = req->rq_transno;
         if (req->rq_transno == 0) {
                 DEBUG_REQ(D_ERROR, req, "zero transno in replay");
@@ -593,17 +596,17 @@ int ptlrpc_connect_import(struct obd_import *imp, char *new_uuid)
         int rc;
         ENTRY;
 
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
         if (imp->imp_state == LUSTRE_IMP_CLOSED) {
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
                 CERROR("can't connect to a closed import\n");
                 RETURN(-EINVAL);
         } else if (imp->imp_state == LUSTRE_IMP_FULL) {
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
                 CERROR("already connected\n");
                 RETURN(0);
         } else if (imp->imp_state == LUSTRE_IMP_CONNECTING) {
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
                 CERROR("already connecting\n");
                 RETURN(-EALREADY);
         }
@@ -620,7 +623,7 @@ int ptlrpc_connect_import(struct obd_import *imp, char *new_uuid)
 
         set_transno = ptlrpc_first_transno(imp,
                                            &imp->imp_connect_data.ocd_transno);
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
 
         if (new_uuid) {
                 struct obd_uuid uuid;
@@ -650,9 +653,9 @@ int ptlrpc_connect_import(struct obd_import *imp, char *new_uuid)
                 if (imp->imp_recon_bk) {
                         CDEBUG(D_HA, "Last reconnection attempt (%d) for %s\n",
                                imp->imp_conn_cnt, obd2cli_tgt(imp->imp_obd));
-                        spin_lock(&imp->imp_lock);
+                        cfs_spin_lock(&imp->imp_lock);
                         imp->imp_last_recon = 1;
-                        spin_unlock(&imp->imp_lock);
+                        cfs_spin_unlock(&imp->imp_lock);
                 }
         }
 
@@ -715,9 +718,9 @@ int ptlrpc_connect_import(struct obd_import *imp, char *new_uuid)
         aa->pcaa_initial_connect = initial_connect;
 
         if (aa->pcaa_initial_connect) {
-                spin_lock(&imp->imp_lock);
+                cfs_spin_lock(&imp->imp_lock);
                 imp->imp_replayable = 1;
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
                 lustre_msg_add_op_flags(request->rq_reqmsg,
                                         MSG_CONNECT_INITIAL);
         }
@@ -747,14 +750,14 @@ static void ptlrpc_maybe_ping_import_soon(struct obd_import *imp)
 
         ENTRY;
 
-        spin_lock(&imp->imp_lock);
-        if (list_empty(&imp->imp_conn_list))
+        cfs_spin_lock(&imp->imp_lock);
+        if (cfs_list_empty(&imp->imp_conn_list))
                 GOTO(unlock, 0);
 
 #ifdef __KERNEL__
-        imp_conn = list_entry(imp->imp_conn_list.prev,
-                              struct obd_import_conn,
-                              oic_item);
+        imp_conn = cfs_list_entry(imp->imp_conn_list.prev,
+                                  struct obd_import_conn,
+                                  oic_item);
 
         /* XXX: When the failover node is the primary node, it is possible
          * to have two identical connections in imp_conn_list. We must
@@ -771,7 +774,7 @@ static void ptlrpc_maybe_ping_import_soon(struct obd_import *imp)
 #endif
 
  unlock:
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
 
         if (wake_pinger)
                 ptlrpc_pinger_wake_up();
@@ -797,9 +800,9 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
         int msg_flags;
         ENTRY;
 
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
         if (imp->imp_state == LUSTRE_IMP_CLOSED) {
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
                 RETURN(0);
         }
 
@@ -807,7 +810,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
                 /* if this reconnect to busy export - not need select new target
                  * for connecting*/
                 imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc);
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
                 GOTO(out, rc);
         }
 
@@ -822,12 +825,12 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
         if (aa->pcaa_initial_connect) {
                 if (msg_flags & MSG_CONNECT_REPLAYABLE) {
                         imp->imp_replayable = 1;
-                        spin_unlock(&imp->imp_lock);
+                        cfs_spin_unlock(&imp->imp_lock);
                         CDEBUG(D_HA, "connected to replayable target: %s\n",
                                obd2cli_tgt(imp->imp_obd));
                 } else {
                         imp->imp_replayable = 0;
-                        spin_unlock(&imp->imp_lock);
+                        cfs_spin_unlock(&imp->imp_lock);
                 }
 
                 /* if applies, adjust the imp->imp_msg_magic here
@@ -850,7 +853,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
 
                 GOTO(finish, rc = 0);
         } else {
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
         }
 
         /* Determine what recovery state to move the import to. */
@@ -911,9 +914,9 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
                                imp->imp_obd->obd_name,
                                obd2cli_tgt(imp->imp_obd));
 
-                        spin_lock(&imp->imp_lock);
+                        cfs_spin_lock(&imp->imp_lock);
                         imp->imp_resend_replay = 1;
-                        spin_unlock(&imp->imp_lock);
+                        cfs_spin_unlock(&imp->imp_lock);
 
                         IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
                 } else {
@@ -970,14 +973,15 @@ finish:
                 ocd = req_capsule_server_sized_get(&request->rq_pill,
                                                    &RMF_CONNECT_DATA, ret);
 
-                spin_lock(&imp->imp_lock);
-                list_del(&imp->imp_conn_current->oic_item);
-                list_add(&imp->imp_conn_current->oic_item, &imp->imp_conn_list);
+                cfs_spin_lock(&imp->imp_lock);
+                cfs_list_del(&imp->imp_conn_current->oic_item);
+                cfs_list_add(&imp->imp_conn_current->oic_item,
+                             &imp->imp_conn_list);
                 imp->imp_last_success_conn =
                         imp->imp_conn_current->oic_last_attempt;
 
                 if (ocd == NULL) {
-                        spin_unlock(&imp->imp_lock);
+                        cfs_spin_unlock(&imp->imp_lock);
                         CERROR("Wrong connect data from server\n");
                         rc = -EPROTO;
                         GOTO(out, rc);
@@ -986,7 +990,7 @@ finish:
                 imp->imp_connect_data = *ocd;
 
                 exp = class_conn2export(&imp->imp_dlm_handle);
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
 
                 /* check that server granted subset of flags we asked for. */
                 LASSERTF((ocd->ocd_connect_flags &
@@ -1114,12 +1118,12 @@ finish:
 out:
         if (rc != 0) {
                 IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
-                spin_lock(&imp->imp_lock);
+                cfs_spin_lock(&imp->imp_lock);
                 if (aa->pcaa_initial_connect && !imp->imp_initial_recov &&
                     (request->rq_import_generation == imp->imp_generation))
                         ptlrpc_deactivate_and_unlock_import(imp);
                 else
-                        spin_unlock(&imp->imp_lock);
+                        cfs_spin_unlock(&imp->imp_lock);
 
                 if ((imp->imp_recon_bk && imp->imp_last_recon) ||
                     (rc == -EACCES)) {
@@ -1170,9 +1174,9 @@ out:
                        (char *)imp->imp_connection->c_remote_uuid.uuid, rc);
         }
 
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
         imp->imp_last_recon = 0;
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
 
         cfs_waitq_broadcast(&imp->imp_recovery_waitq);
         RETURN(rc);
@@ -1183,7 +1187,7 @@ static int completed_replay_interpret(const struct lu_env *env,
                                       void * data, int rc)
 {
         ENTRY;
-        atomic_dec(&req->rq_import->imp_replay_inflight);
+        cfs_atomic_dec(&req->rq_import->imp_replay_inflight);
         if (req->rq_status == 0 &&
             !req->rq_import->imp_vbr_failed) {
                 ptlrpc_import_recovery_state_machine(req->rq_import);
@@ -1192,9 +1196,9 @@ static int completed_replay_interpret(const struct lu_env *env,
                         CDEBUG(D_WARNING,
                                "%s: version recovery fails, reconnecting\n",
                                req->rq_import->imp_obd->obd_name);
-                        spin_lock(&req->rq_import->imp_lock);
+                        cfs_spin_lock(&req->rq_import->imp_lock);
                         req->rq_import->imp_vbr_failed = 0;
-                        spin_unlock(&req->rq_import->imp_lock);
+                        cfs_spin_unlock(&req->rq_import->imp_lock);
                 } else {
                         CDEBUG(D_HA, "%s: LAST_REPLAY message error: %d, "
                                      "reconnecting\n",
@@ -1212,13 +1216,13 @@ static int signal_completed_replay(struct obd_import *imp)
         struct ptlrpc_request *req;
         ENTRY;
 
-        LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
-        atomic_inc(&imp->imp_replay_inflight);
+        LASSERT(cfs_atomic_read(&imp->imp_replay_inflight) == 0);
+        cfs_atomic_inc(&imp->imp_replay_inflight);
 
         req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING, LUSTRE_OBD_VERSION,
                                         OBD_PING);
         if (req == NULL) {
-                atomic_dec(&imp->imp_replay_inflight);
+                cfs_atomic_dec(&imp->imp_replay_inflight);
                 RETURN(-ENOMEM);
         }
 
@@ -1312,7 +1316,7 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
                        obd2cli_tgt(imp->imp_obd));
                 rc = ptlrpc_replay_next(imp, &inflight);
                 if (inflight == 0 &&
-                    atomic_read(&imp->imp_replay_inflight) == 0) {
+                    cfs_atomic_read(&imp->imp_replay_inflight) == 0) {
                         IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS);
                         rc = ldlm_replay_locks(imp);
                         if (rc)
@@ -1322,7 +1326,7 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
         }
 
         if (imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS) {
-                if (atomic_read(&imp->imp_replay_inflight) == 0) {
+                if (cfs_atomic_read(&imp->imp_replay_inflight) == 0) {
                         IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_WAIT);
                         rc = signal_completed_replay(imp);
                         if (rc)
@@ -1332,7 +1336,7 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
         }
 
         if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT) {
-                if (atomic_read(&imp->imp_replay_inflight) == 0) {
+                if (cfs_atomic_read(&imp->imp_replay_inflight) == 0) {
                         IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
                 }
         }
@@ -1414,11 +1418,11 @@ int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
 
         }
 
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
         if (imp->imp_state != LUSTRE_IMP_FULL)
                 GOTO(out, 0);
 
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
 
         req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_DISCONNECT,
                                         LUSTRE_OBD_VERSION, rq_opc);
@@ -1448,7 +1452,7 @@ int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
         }
 
 set_state:
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
 out:
         if (noclose)
                 IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
@@ -1457,7 +1461,7 @@ out:
         memset(&imp->imp_remote_handle, 0, sizeof(imp->imp_remote_handle));
         /* Try all connections in the future - bz 12758 */
         imp->imp_last_recon = 0;
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
 
         RETURN(rc);
 }
@@ -1466,10 +1470,10 @@ void ptlrpc_cleanup_imp(struct obd_import *imp)
 {
         ENTRY;
 
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
         IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
         imp->imp_generation++;
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
         ptlrpc_abort_inflight(imp);
 
         EXIT;
@@ -1498,7 +1502,7 @@ int at_add(struct adaptive_timeout *at, unsigned int val)
                    drop to 0, and because 0 could mean an error */
                 return 0;
 
-        spin_lock(&at->at_lock);
+        cfs_spin_lock(&at->at_lock);
 
         if (unlikely(at->at_binstart == 0)) {
                 /* Special case to remove default from history */
@@ -1554,7 +1558,7 @@ int at_add(struct adaptive_timeout *at, unsigned int val)
         /* if we changed, report the old value */
         old = (at->at_current != old) ? old : 0;
 
-        spin_unlock(&at->at_lock);
+        cfs_spin_unlock(&at->at_lock);
         return old;
 }
 
@@ -1573,7 +1577,7 @@ int import_at_get_index(struct obd_import *imp, int portal)
         }
 
         /* Not found in list, add it under a lock */
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
 
         /* Check unused under lock */
         for (; i < IMP_AT_MAX_PORTALS; i++) {
@@ -1589,6 +1593,6 @@ int import_at_get_index(struct obd_import *imp, int portal)
 
         at->iat_portal[i] = portal;
 out:
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
         return i;
 }
index 0f7461f..6ca074b 100644 (file)
@@ -1897,7 +1897,8 @@ int req_capsule_fmt_size(__u32 magic, const struct req_format *fmt,
 
         for (; i < fmt->rf_fields[loc].nr; ++i)
                 if (fmt->rf_fields[loc].d[i]->rmf_size != -1)
-                        size += size_round(fmt->rf_fields[loc].d[i]->rmf_size);
+                        size += cfs_size_round(fmt->rf_fields[loc].d[i]->
+                                               rmf_size);
         return size;
 }
 
index bded4c3..17f7142 100644 (file)
@@ -58,7 +58,7 @@
 #include <libcfs/list.h>
 
 #define LLOG_CLIENT_ENTRY(ctxt, imp) do {                             \
-        mutex_down(&ctxt->loc_sem);                                   \
+        cfs_mutex_down(&ctxt->loc_sem);                               \
         if (ctxt->loc_imp) {                                          \
                 imp = class_import_get(ctxt->loc_imp);                \
         } else {                                                      \
                        "but I'll try again next time.  Not fatal.\n", \
                        ctxt->loc_idx);                                \
                 imp = NULL;                                           \
-                mutex_up(&ctxt->loc_sem);                             \
+                cfs_mutex_up(&ctxt->loc_sem);                         \
                 return (-EINVAL);                                     \
         }                                                             \
-        mutex_up(&ctxt->loc_sem);                                     \
+        cfs_mutex_up(&ctxt->loc_sem);                                 \
 } while(0)
 
 #define LLOG_CLIENT_EXIT(ctxt, imp) do {                              \
-        mutex_down(&ctxt->loc_sem);                                   \
+        cfs_mutex_down(&ctxt->loc_sem);                               \
         if (ctxt->loc_imp != imp)                                     \
                 CWARN("loc_imp has changed from %p to %p\n",          \
                        ctxt->loc_imp, imp);                           \
         class_import_put(imp);                                        \
-        mutex_up(&ctxt->loc_sem);                                     \
+        cfs_mutex_up(&ctxt->loc_sem);                                 \
 } while(0)
 
 /* This is a callback from the llog_* functions.
index 00bdd58..8be6eed 100644 (file)
@@ -76,7 +76,7 @@ int llog_origin_connect(struct llog_ctxt *ctxt,
 
         ENTRY;
 
-        if (list_empty(&ctxt->loc_handle->u.chd.chd_head)) {
+        if (cfs_list_empty(&ctxt->loc_handle->u.chd.chd_head)) {
                 CDEBUG(D_HA, "there is no record related to ctxt %p\n", ctxt);
                 RETURN(0);
         }
@@ -161,7 +161,7 @@ int llog_receptor_accept(struct llog_ctxt *ctxt, struct obd_import *imp)
         ENTRY;
 
         LASSERT(ctxt);
-        mutex_down(&ctxt->loc_sem);
+        cfs_mutex_down(&ctxt->loc_sem);
         if (ctxt->loc_imp != imp) {
                 if (ctxt->loc_imp) {
                         CWARN("changing the import %p - %p\n",
@@ -170,7 +170,7 @@ int llog_receptor_accept(struct llog_ctxt *ctxt, struct obd_import *imp)
                 }
                 ctxt->loc_imp = class_import_get(imp);
         }
-        mutex_up(&ctxt->loc_sem);
+        cfs_mutex_up(&ctxt->loc_sem);
         RETURN(0);
 }
 EXPORT_SYMBOL(llog_receptor_accept);
@@ -194,13 +194,13 @@ int llog_initiator_connect(struct llog_ctxt *ctxt)
         new_imp = ctxt->loc_obd->u.cli.cl_import;
         LASSERTF(ctxt->loc_imp == NULL || ctxt->loc_imp == new_imp,
                  "%p - %p\n", ctxt->loc_imp, new_imp);
-        mutex_down(&ctxt->loc_sem);
+        cfs_mutex_down(&ctxt->loc_sem);
         if (ctxt->loc_imp != new_imp) {
                 if (ctxt->loc_imp)
                         class_import_put(ctxt->loc_imp);
                 ctxt->loc_imp = class_import_get(new_imp);
         }
-        mutex_up(&ctxt->loc_sem);
+        cfs_mutex_up(&ctxt->loc_sem);
         RETURN(0);
 }
 EXPORT_SYMBOL(llog_initiator_connect);
index b81b32d..7a25f55 100644 (file)
@@ -605,7 +605,7 @@ static int llog_catinfo_deletions(struct obd_device *obd, char *buf,
         if (!idarray)
                 GOTO(release_ctxt, rc = -ENOMEM);
 
-        mutex_down(&obd->obd_olg.olg_cat_processing);
+        cfs_mutex_down(&obd->obd_olg.olg_cat_processing);
         rc = llog_get_cat_list(obd, name, 0, count, idarray);
         if (rc)
                 GOTO(out_free, rc);
@@ -651,7 +651,7 @@ static int llog_catinfo_deletions(struct obd_device *obd, char *buf,
 out_pop:
         pop_ctxt(&saved, &ctxt->loc_exp->exp_obd->obd_lvfs_ctxt, NULL);
 out_free:
-        mutex_up(&obd->obd_olg.olg_cat_processing);
+        cfs_mutex_up(&obd->obd_olg.olg_cat_processing);
         OBD_VFREE(idarray, size);
 release_ctxt:
         llog_ctxt_put(ctxt);
index 7334696..8b0d281 100644 (file)
@@ -284,12 +284,12 @@ ptlrpc_lprocfs_write_req_history_max(struct file *file, const char *buffer,
          * hose a kernel by allowing the request history to grow too
          * far. */
         bufpages = (svc->srv_buf_size + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
-        if (val > num_physpages/(2 * bufpages))
+        if (val > cfs_num_physpages/(2 * bufpages))
                 return -ERANGE;
 
-        spin_lock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
         svc->srv_max_history_rqbds = val;
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&svc->srv_lock);
 
         return count;
 }
@@ -320,9 +320,9 @@ ptlrpc_lprocfs_wr_threads_min(struct file *file, const char *buffer,
         if (val > svc->srv_threads_max)
                 return -ERANGE;
 
-        spin_lock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
         svc->srv_threads_min = val;
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&svc->srv_lock);
 
         return count;
 }
@@ -362,9 +362,9 @@ ptlrpc_lprocfs_wr_threads_max(struct file *file, const char *buffer,
         if (val < svc->srv_threads_min)
                 return -ERANGE;
 
-        spin_lock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
         svc->srv_threads_max = val;
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&svc->srv_lock);
 
         return count;
 }
@@ -379,7 +379,7 @@ ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service *svc,
                                     struct ptlrpc_srh_iterator *srhi,
                                     __u64 seq)
 {
-        struct list_head      *e;
+        cfs_list_t            *e;
         struct ptlrpc_request *req;
 
         if (srhi->srhi_req != NULL &&
@@ -392,7 +392,7 @@ ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service *svc,
                  * be near the head), we shouldn't have to do long
                  * re-scans */
                 LASSERT (srhi->srhi_seq == srhi->srhi_req->rq_history_seq);
-                LASSERT (!list_empty(&svc->srv_request_history));
+                LASSERT (!cfs_list_empty(&svc->srv_request_history));
                 e = &srhi->srhi_req->rq_history_list;
         } else {
                 /* search from start */
@@ -400,7 +400,7 @@ ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service *svc,
         }
 
         while (e != &svc->srv_request_history) {
-                req = list_entry(e, struct ptlrpc_request, rq_history_list);
+                req = cfs_list_entry(e, struct ptlrpc_request, rq_history_list);
 
                 if (req->rq_history_seq >= seq) {
                         srhi->srhi_seq = req->rq_history_seq;
@@ -427,9 +427,9 @@ ptlrpc_lprocfs_svc_req_history_start(struct seq_file *s, loff_t *pos)
         srhi->srhi_seq = 0;
         srhi->srhi_req = NULL;
 
-        spin_lock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
         rc = ptlrpc_lprocfs_svc_req_history_seek(svc, srhi, *pos);
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&svc->srv_lock);
 
         if (rc == 0) {
                 *pos = srhi->srhi_seq;
@@ -457,9 +457,9 @@ ptlrpc_lprocfs_svc_req_history_next(struct seq_file *s,
         struct ptlrpc_srh_iterator  *srhi = iter;
         int                          rc;
 
-        spin_lock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
         rc = ptlrpc_lprocfs_svc_req_history_seek(svc, srhi, *pos + 1);
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&svc->srv_lock);
 
         if (rc != 0) {
                 OBD_FREE(srhi, sizeof(*srhi));
@@ -507,7 +507,7 @@ static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter)
         struct ptlrpc_request      *req;
         int                         rc;
 
-        spin_lock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
 
         rc = ptlrpc_lprocfs_svc_req_history_seek(svc, srhi, srhi->srhi_seq);
 
@@ -533,7 +533,7 @@ static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter)
                         svc->srv_request_history_print_fn(s, srhi->srhi_req);
         }
 
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&svc->srv_lock);
 
         return rc;
 }
@@ -611,9 +611,9 @@ static int ptlrpc_lprocfs_wr_hp_ratio(struct file *file, const char *buffer,
         if (val < 0)
                 return -ERANGE;
 
-        spin_lock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
         svc->srv_hpreq_ratio = val;
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&svc->srv_lock);
         return count;
 }
 
index b1a4f31..8be8f00 100644 (file)
@@ -169,7 +169,7 @@ void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
         struct l_wait_info       lwi;
         int                      rc;
 
-        LASSERT(!in_interrupt());               /* might sleep */
+        LASSERT(!cfs_in_interrupt());           /* might sleep */
 
         if (!ptlrpc_server_bulk_active(desc))   /* completed or */
                 return;                         /* never started */
@@ -284,7 +284,7 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
         int                      rc;
         ENTRY;
 
-        LASSERT(!in_interrupt());     /* might sleep */
+        LASSERT(!cfs_in_interrupt());     /* might sleep */
 
         /* Let's setup deadline for reply unlink. */
         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
@@ -435,7 +435,7 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
                 CERROR("not replying on NULL connection\n"); /* bug 9635 */
                 return -ENOTCONN;
         }
-        atomic_inc (&svc->srv_outstanding_replies);
+        cfs_atomic_inc (&svc->srv_outstanding_replies);
         ptlrpc_rs_addref(rs);                   /* +1 ref for the network */
 
         rc = sptlrpc_svc_wrap_reply(req);
@@ -451,7 +451,7 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
                            req->rq_xid, req->rq_reply_off);
 out:
         if (unlikely(rc != 0)) {
-                atomic_dec (&svc->srv_outstanding_replies);
+                cfs_atomic_dec (&svc->srv_outstanding_replies);
                 ptlrpc_req_drop_rs(req);
         }
         ptlrpc_connection_put(conn);
@@ -574,7 +574,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
                 }
         }
 
-        spin_lock(&request->rq_lock);
+        cfs_spin_lock(&request->rq_lock);
         /* If the MD attach succeeds, there _will_ be a reply_in callback */
         request->rq_receiving_reply = !noreply;
         /* We are responsible for unlinking the reply buffer */
@@ -587,7 +587,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
         request->rq_resend = 0;
         request->rq_restart = 0;
         request->rq_reply_truncate = 0;
-        spin_unlock(&request->rq_lock);
+        cfs_spin_unlock(&request->rq_lock);
 
         if (!noreply) {
                 reply_md.start     = request->rq_repbuf;
@@ -608,10 +608,10 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
                 if (rc != 0) {
                         CERROR("LNetMDAttach failed: %d\n", rc);
                         LASSERT (rc == -ENOMEM);
-                        spin_lock(&request->rq_lock);
+                        cfs_spin_lock(&request->rq_lock);
                         /* ...but the MD attach didn't succeed... */
                         request->rq_receiving_reply = 0;
-                        spin_unlock(&request->rq_lock);
+                        cfs_spin_unlock(&request->rq_lock);
                         GOTO(cleanup_me, rc = -ENOMEM);
                 }
 
@@ -625,11 +625,11 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
         ptlrpc_request_addref(request);
         if (obd->obd_svc_stats != NULL)
                 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
-                                atomic_read(&request->rq_import->imp_inflight));
+                        cfs_atomic_read(&request->rq_import->imp_inflight));
 
         OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
 
-        do_gettimeofday(&request->rq_arrival_time);
+        cfs_gettimeofday(&request->rq_arrival_time);
         request->rq_sent = cfs_time_current_sec();
         /* We give the server rq_timeout secs to process the req, and
            add the network latency for our local timeout. */
index c871388..14b2d31 100644 (file)
@@ -57,7 +57,8 @@
 
 static inline int lustre_msg_hdr_size_v2(int count)
 {
-        return size_round(offsetof(struct lustre_msg_v2, lm_buflens[count]));
+        return cfs_size_round(offsetof(struct lustre_msg_v2,
+                                       lm_buflens[count]));
 }
 
 int lustre_msg_hdr_size(__u32 magic, int count)
@@ -130,7 +131,7 @@ int lustre_msg_size_v2(int count, __u32 *lengths)
 
         size = lustre_msg_hdr_size_v2(count);
         for (i = 0; i < count; i++)
-                size += size_round(lengths[i]);
+                size += cfs_size_round(lengths[i]);
 
         return size;
 }
@@ -245,20 +246,20 @@ int lustre_pack_request(struct ptlrpc_request *req, __u32 magic, int count,
 
 #if RS_DEBUG
 CFS_LIST_HEAD(ptlrpc_rs_debug_lru);
-spinlock_t ptlrpc_rs_debug_lock;
+cfs_spinlock_t ptlrpc_rs_debug_lock;
 
 #define PTLRPC_RS_DEBUG_LRU_ADD(rs)                                     \
 do {                                                                    \
-        spin_lock(&ptlrpc_rs_debug_lock);                               \
-        list_add_tail(&(rs)->rs_debug_list, &ptlrpc_rs_debug_lru);      \
-        spin_unlock(&ptlrpc_rs_debug_lock);                             \
+        cfs_spin_lock(&ptlrpc_rs_debug_lock);                           \
+        cfs_list_add_tail(&(rs)->rs_debug_list, &ptlrpc_rs_debug_lru);  \
+        cfs_spin_unlock(&ptlrpc_rs_debug_lock);                         \
 } while (0)
 
 #define PTLRPC_RS_DEBUG_LRU_DEL(rs)             \
 do {                                            \
-        spin_lock(&ptlrpc_rs_debug_lock);       \
-        list_del(&(rs)->rs_debug_list);         \
-        spin_unlock(&ptlrpc_rs_debug_lock);     \
+        cfs_spin_lock(&ptlrpc_rs_debug_lock);   \
+        cfs_list_del(&(rs)->rs_debug_list);     \
+        cfs_spin_unlock(&ptlrpc_rs_debug_lock); \
 } while (0)
 #else
 # define PTLRPC_RS_DEBUG_LRU_ADD(rs) do {} while(0)
@@ -269,26 +270,27 @@ struct ptlrpc_reply_state *lustre_get_emerg_rs(struct ptlrpc_service *svc)
 {
         struct ptlrpc_reply_state *rs = NULL;
 
-        spin_lock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
         /* See if we have anything in a pool, and wait if nothing */
-        while (list_empty(&svc->srv_free_rs_list)) {
+        while (cfs_list_empty(&svc->srv_free_rs_list)) {
                 struct l_wait_info lwi;
                 int rc;
-                spin_unlock(&svc->srv_lock);
+                cfs_spin_unlock(&svc->srv_lock);
                 /* If we cannot get anything for some long time, we better
                    bail out instead of waiting infinitely */
                 lwi = LWI_TIMEOUT(cfs_time_seconds(10), NULL, NULL);
                 rc = l_wait_event(svc->srv_free_rs_waitq,
-                                  !list_empty(&svc->srv_free_rs_list), &lwi);
+                                  !cfs_list_empty(&svc->srv_free_rs_list),
+                                  &lwi);
                 if (rc)
                         goto out;
-                spin_lock(&svc->srv_lock);
+                cfs_spin_lock(&svc->srv_lock);
         }
 
-        rs = list_entry(svc->srv_free_rs_list.next, struct ptlrpc_reply_state,
-                        rs_list);
-        list_del(&rs->rs_list);
-        spin_unlock(&svc->srv_lock);
+        rs = cfs_list_entry(svc->srv_free_rs_list.next,
+                            struct ptlrpc_reply_state, rs_list);
+        cfs_list_del(&rs->rs_list);
+        cfs_spin_unlock(&svc->srv_lock);
         LASSERT(rs);
         memset(rs, 0, svc->srv_max_reply_size);
         rs->rs_service = svc;
@@ -303,9 +305,9 @@ void lustre_put_emerg_rs(struct ptlrpc_reply_state *rs)
 
         LASSERT(svc);
 
-        spin_lock(&svc->srv_lock);
-        list_add(&rs->rs_list, &svc->srv_free_rs_list);
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
+        cfs_list_add(&rs->rs_list, &svc->srv_free_rs_list);
+        cfs_spin_unlock(&svc->srv_lock);
         cfs_waitq_signal(&svc->srv_free_rs_waitq);
 }
 
@@ -327,14 +329,14 @@ int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
                 RETURN(rc);
 
         rs = req->rq_reply_state;
-        atomic_set(&rs->rs_refcount, 1);        /* 1 ref for rq_reply_state */
+        cfs_atomic_set(&rs->rs_refcount, 1);    /* 1 ref for rq_reply_state */
         rs->rs_cb_id.cbid_fn = reply_out_callback;
         rs->rs_cb_id.cbid_arg = rs;
         rs->rs_service = req->rq_rqbd->rqbd_service;
         CFS_INIT_LIST_HEAD(&rs->rs_exp_list);
         CFS_INIT_LIST_HEAD(&rs->rs_obd_list);
         CFS_INIT_LIST_HEAD(&rs->rs_list);
-        spin_lock_init(&rs->rs_lock);
+        cfs_spin_lock_init(&rs->rs_lock);
 
         req->rq_replen = msg_len;
         req->rq_reply_state = rs;
@@ -408,7 +410,7 @@ void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size)
 
         offset = lustre_msg_hdr_size_v2(bufcount);
         for (i = 0; i < n; i++)
-                offset += size_round(m->lm_buflens[i]);
+                offset += cfs_size_round(m->lm_buflens[i]);
 
         return (char *)m + offset;
 }
@@ -440,7 +442,7 @@ int lustre_shrink_msg_v2(struct lustre_msg_v2 *msg, int segment,
         if (move_data && msg->lm_bufcount > segment + 1) {
                 tail = lustre_msg_buf_v2(msg, segment + 1, 0);
                 for (n = segment + 1; n < msg->lm_bufcount; n++)
-                        tail_len += size_round(msg->lm_buflens[n]);
+                        tail_len += cfs_size_round(msg->lm_buflens[n]);
         }
 
         msg->lm_buflens[segment] = newlen;
@@ -486,14 +488,14 @@ void lustre_free_reply_state(struct ptlrpc_reply_state *rs)
 {
         PTLRPC_RS_DEBUG_LRU_DEL(rs);
 
-        LASSERT (atomic_read(&rs->rs_refcount) == 0);
+        LASSERT (cfs_atomic_read(&rs->rs_refcount) == 0);
         LASSERT (!rs->rs_difficult || rs->rs_handled);
         LASSERT (!rs->rs_on_net);
         LASSERT (!rs->rs_scheduled);
         LASSERT (rs->rs_export == NULL);
         LASSERT (rs->rs_nlocks == 0);
-        LASSERT (list_empty(&rs->rs_exp_list));
-        LASSERT (list_empty(&rs->rs_obd_list));
+        LASSERT (cfs_list_empty(&rs->rs_exp_list));
+        LASSERT (cfs_list_empty(&rs->rs_obd_list));
 
         sptlrpc_svc_free_rs(rs);
 }
@@ -534,7 +536,7 @@ static int lustre_unpack_msg_v2(struct lustre_msg_v2 *m, int len)
         for (i = 0; i < m->lm_bufcount; i++) {
                 if (swabbed)
                         __swab32s(&m->lm_buflens[i]);
-                required_len += size_round(m->lm_buflens[i]);
+                required_len += cfs_size_round(m->lm_buflens[i]);
         }
 
         if (len < required_len) {
@@ -2161,8 +2163,10 @@ void _debug_req(struct ptlrpc_request *req, __u32 mask,
                            (char *)req->rq_export->exp_connection->c_remote_uuid.uuid : "<?>",
                            req->rq_request_portal, req->rq_reply_portal,
                            req->rq_reqlen, req->rq_replen,
-                           req->rq_early_count, req->rq_timedout, req->rq_deadline,
-                           atomic_read(&req->rq_refcount), DEBUG_REQ_FLAGS(req),
+                           req->rq_early_count, req->rq_timedout,
+                           req->rq_deadline,
+                           cfs_atomic_read(&req->rq_refcount),
+                           DEBUG_REQ_FLAGS(req),
                            req->rq_reqmsg && req_ptlrpc_body_swabbed(req) ?
                            lustre_msg_get_flags(req->rq_reqmsg) : -1,
                            req->rq_repmsg && rep_ptlrpc_body_swabbed(req) ?
index 391ddcb..fdc172f 100644 (file)
@@ -48,9 +48,9 @@
 #include <obd_class.h>
 #include "ptlrpc_internal.h"
 
-struct semaphore pinger_sem;
+cfs_semaphore_t pinger_sem;
 static CFS_LIST_HEAD(pinger_imports);
-static struct list_head timeout_list = CFS_LIST_HEAD_INIT(timeout_list);
+static cfs_list_t timeout_list = CFS_LIST_HEAD_INIT(timeout_list);
 struct ptlrpc_request *
 ptlrpc_prep_ping(struct obd_import *imp)
 {
@@ -138,7 +138,7 @@ static inline int ptlrpc_next_reconnect(struct obd_import *imp)
                 return cfs_time_shift(obd_timeout);
 }
 
-static atomic_t suspend_timeouts = ATOMIC_INIT(0);
+static cfs_atomic_t suspend_timeouts = CFS_ATOMIC_INIT(0);
 static cfs_time_t suspend_wakeup_time = 0;
 
 cfs_duration_t pinger_check_timeout(cfs_time_t time)
@@ -147,21 +147,21 @@ cfs_duration_t pinger_check_timeout(cfs_time_t time)
         cfs_time_t timeout = PING_INTERVAL;
 
         /* The timeout list is a increase order sorted list */
-        mutex_down(&pinger_sem);
-        list_for_each_entry(item, &timeout_list, ti_chain) {
+        cfs_mutex_down(&pinger_sem);
+        cfs_list_for_each_entry(item, &timeout_list, ti_chain) {
                 int ti_timeout = item->ti_timeout;
                 if (timeout > ti_timeout)
                         timeout = ti_timeout;
                 break;
         }
-        mutex_up(&pinger_sem);
+        cfs_mutex_up(&pinger_sem);
 
         return cfs_time_sub(cfs_time_add(time, cfs_time_seconds(timeout)),
                                          cfs_time_current());
 }
 
 #ifdef __KERNEL__
-static wait_queue_head_t suspend_timeouts_waitq;
+static cfs_waitq_t suspend_timeouts_waitq;
 #endif
 
 cfs_time_t ptlrpc_suspend_wakeup_time(void)
@@ -176,8 +176,9 @@ void ptlrpc_deactivate_timeouts(struct obd_import *imp)
         if (imp->imp_no_timeout)
                 return;
         imp->imp_no_timeout = 1;
-        atomic_inc(&suspend_timeouts);
-        CDEBUG(D_HA|D_WARNING, "deactivate timeouts %u\n", atomic_read(&suspend_timeouts));
+        cfs_atomic_inc(&suspend_timeouts);
+        CDEBUG(D_HA|D_WARNING, "deactivate timeouts %u\n",
+               cfs_atomic_read(&suspend_timeouts));
 #endif
 }
 
@@ -188,18 +189,19 @@ void ptlrpc_activate_timeouts(struct obd_import *imp)
         if (!imp->imp_no_timeout)
                 return;
         imp->imp_no_timeout = 0;
-        LASSERT(atomic_read(&suspend_timeouts) > 0);
-        if (atomic_dec_and_test(&suspend_timeouts)) {
+        LASSERT(cfs_atomic_read(&suspend_timeouts) > 0);
+        if (cfs_atomic_dec_and_test(&suspend_timeouts)) {
                 suspend_wakeup_time = cfs_time_current();
-                wake_up(&suspend_timeouts_waitq);
+                cfs_waitq_signal(&suspend_timeouts_waitq);
         }
-        CDEBUG(D_HA|D_WARNING, "activate timeouts %u\n", atomic_read(&suspend_timeouts));
+        CDEBUG(D_HA|D_WARNING, "activate timeouts %u\n",
+               cfs_atomic_read(&suspend_timeouts));
 #endif
 }
 
 int ptlrpc_check_suspend(void)
 {
-        if (atomic_read(&suspend_timeouts))
+        if (cfs_atomic_read(&suspend_timeouts))
                 return 1;
         return 0;
 }
@@ -208,12 +210,12 @@ int ptlrpc_check_and_wait_suspend(struct ptlrpc_request *req)
 {
         struct l_wait_info lwi;
 
-        if (atomic_read(&suspend_timeouts)) {
+        if (cfs_atomic_read(&suspend_timeouts)) {
                 DEBUG_REQ(D_NET, req, "-- suspend %d regular timeout",
-                          atomic_read(&suspend_timeouts));
+                          cfs_atomic_read(&suspend_timeouts));
                 lwi = LWI_INTR(NULL, NULL);
                 l_wait_event(suspend_timeouts_waitq,
-                             atomic_read(&suspend_timeouts) == 0, &lwi);
+                             cfs_atomic_read(&suspend_timeouts) == 0, &lwi);
                 DEBUG_REQ(D_NET, req, "-- recharge regular timeout");
                 return 1;
         }
@@ -227,12 +229,12 @@ static void ptlrpc_pinger_process_import(struct obd_import *imp,
 {
         int force, level;
 
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
         level = imp->imp_state;
         force = imp->imp_force_verify;
         if (force)
                 imp->imp_force_verify = 0;
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
 
         CDEBUG(level == LUSTRE_IMP_FULL ? D_INFO : D_HA,
                "level %s/%u force %u deactive %u pingable %u\n",
@@ -277,16 +279,16 @@ static int ptlrpc_pinger_main(void *arg)
                 struct l_wait_info lwi;
                 cfs_duration_t time_to_next_wake;
                 struct timeout_item *item;
-                struct list_head *iter;
+                cfs_list_t *iter;
 
-                mutex_down(&pinger_sem);
-                list_for_each_entry(item, &timeout_list, ti_chain) {
+                cfs_mutex_down(&pinger_sem);
+                cfs_list_for_each_entry(item, &timeout_list, ti_chain) {
                         item->ti_cb(item, item->ti_cb_data);
                 }
-                list_for_each(iter, &pinger_imports) {
+                cfs_list_for_each(iter, &pinger_imports) {
                         struct obd_import *imp =
-                                list_entry(iter, struct obd_import,
-                                           imp_pinger_chain);
+                                cfs_list_entry(iter, struct obd_import,
+                                               imp_pinger_chain);
 
                         ptlrpc_pinger_process_import(imp, this_ping);
                         /* obd_timeout might have changed */
@@ -296,7 +298,7 @@ static int ptlrpc_pinger_main(void *arg)
                                                         cfs_time_seconds(PING_INTERVAL))))
                                 ptlrpc_update_next_ping(imp);
                 }
-                mutex_up(&pinger_sem);
+                cfs_mutex_up(&pinger_sem);
                 /* update memory usage info */
                 obd_update_maxusage();
 
@@ -389,10 +391,10 @@ int ptlrpc_stop_pinger(void)
                 RETURN(-EALREADY);
 
         ptlrpc_pinger_remove_timeouts();
-        mutex_down(&pinger_sem);
+        cfs_mutex_down(&pinger_sem);
         pinger_thread->t_flags = SVC_STOPPING;
         cfs_waitq_signal(&pinger_thread->t_ctl_waitq);
-        mutex_up(&pinger_sem);
+        cfs_mutex_up(&pinger_sem);
 
         l_wait_event(pinger_thread->t_ctl_waitq,
                      (pinger_thread->t_flags & SVC_STOPPED), &lwi);
@@ -410,21 +412,21 @@ void ptlrpc_pinger_sending_on_import(struct obd_import *imp)
 int ptlrpc_pinger_add_import(struct obd_import *imp)
 {
         ENTRY;
-        if (!list_empty(&imp->imp_pinger_chain))
+        if (!cfs_list_empty(&imp->imp_pinger_chain))
                 RETURN(-EALREADY);
 
-        mutex_down(&pinger_sem);
+        cfs_mutex_down(&pinger_sem);
         CDEBUG(D_HA, "adding pingable import %s->%s\n",
                imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
         /* if we add to pinger we want recovery on this import */
         imp->imp_obd->obd_no_recov = 0;
         ptlrpc_update_next_ping(imp);
         /* XXX sort, blah blah */
-        list_add_tail(&imp->imp_pinger_chain, &pinger_imports);
+        cfs_list_add_tail(&imp->imp_pinger_chain, &pinger_imports);
         class_import_get(imp);
 
         ptlrpc_pinger_wake_up();
-        mutex_up(&pinger_sem);
+        cfs_mutex_up(&pinger_sem);
 
         RETURN(0);
 }
@@ -432,17 +434,17 @@ int ptlrpc_pinger_add_import(struct obd_import *imp)
 int ptlrpc_pinger_del_import(struct obd_import *imp)
 {
         ENTRY;
-        if (list_empty(&imp->imp_pinger_chain))
+        if (cfs_list_empty(&imp->imp_pinger_chain))
                 RETURN(-ENOENT);
 
-        mutex_down(&pinger_sem);
-        list_del_init(&imp->imp_pinger_chain);
+        cfs_mutex_down(&pinger_sem);
+        cfs_list_del_init(&imp->imp_pinger_chain);
         CDEBUG(D_HA, "removing pingable import %s->%s\n",
                imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
         /* if we remove from pinger we don't want recovery on this import */
         imp->imp_obd->obd_no_recov = 1;
         class_import_put(imp);
-        mutex_up(&pinger_sem);
+        cfs_mutex_up(&pinger_sem);
         RETURN(0);
 }
 
@@ -481,19 +483,19 @@ ptlrpc_pinger_register_timeout(int time, enum timeout_event event,
 
         LASSERT_SEM_LOCKED(&pinger_sem);
 
-        list_for_each_entry(item, &timeout_list, ti_chain)
+        cfs_list_for_each_entry(item, &timeout_list, ti_chain)
                 if (item->ti_event == event)
                         goto out;
 
         item = ptlrpc_new_timeout(time, event, cb, data);
         if (item) {
-                list_for_each_entry_reverse(tmp, &timeout_list, ti_chain) {
+                cfs_list_for_each_entry_reverse(tmp, &timeout_list, ti_chain) {
                         if (tmp->ti_timeout < time) {
-                                list_add(&item->ti_chain, &tmp->ti_chain);
+                                cfs_list_add(&item->ti_chain, &tmp->ti_chain);
                                 goto out;
                         }
                 }
-                list_add(&item->ti_chain, &timeout_list);
+                cfs_list_add(&item->ti_chain, &timeout_list);
         }
 out:
         return item;
@@ -504,46 +506,46 @@ out:
  */
 int ptlrpc_add_timeout_client(int time, enum timeout_event event,
                               timeout_cb_t cb, void *data,
-                              struct list_head *obd_list)
+                              cfs_list_t *obd_list)
 {
         struct timeout_item *ti;
 
-        mutex_down(&pinger_sem);
+        cfs_mutex_down(&pinger_sem);
         ti = ptlrpc_pinger_register_timeout(time, event, cb, data);
         if (!ti) {
-                mutex_up(&pinger_sem);
+                cfs_mutex_up(&pinger_sem);
                 return (-EINVAL);
         }
-        list_add(obd_list, &ti->ti_obd_list);
-        mutex_up(&pinger_sem);
+        cfs_list_add(obd_list, &ti->ti_obd_list);
+        cfs_mutex_up(&pinger_sem);
         return 0;
 }
 
-int ptlrpc_del_timeout_client(struct list_head *obd_list,
+int ptlrpc_del_timeout_client(cfs_list_t *obd_list,
                               enum timeout_event event)
 {
         struct timeout_item *ti = NULL, *item;
 
-        if (list_empty(obd_list))
+        if (cfs_list_empty(obd_list))
                 return 0;
-        mutex_down(&pinger_sem);
-        list_del_init(obd_list);
+        cfs_mutex_down(&pinger_sem);
+        cfs_list_del_init(obd_list);
         /**
          * If there are no obd attached to the timeout event
          * list, remove this timeout event from the pinger
          */
-        list_for_each_entry(item, &timeout_list, ti_chain) {
+        cfs_list_for_each_entry(item, &timeout_list, ti_chain) {
                 if (item->ti_event == event) {
                         ti = item;
                         break;
                 }
         }
         LASSERTF(ti != NULL, "ti is NULL ! \n");
-        if (list_empty(&ti->ti_obd_list)) {
-                list_del(&ti->ti_chain);
+        if (cfs_list_empty(&ti->ti_obd_list)) {
+                cfs_list_del(&ti->ti_chain);
                 OBD_FREE_PTR(ti);
         }
-        mutex_up(&pinger_sem);
+        cfs_mutex_up(&pinger_sem);
         return 0;
 }
 
@@ -551,13 +553,13 @@ int ptlrpc_pinger_remove_timeouts(void)
 {
         struct timeout_item *item, *tmp;
 
-        mutex_down(&pinger_sem);
-        list_for_each_entry_safe(item, tmp, &timeout_list, ti_chain) {
-                LASSERT(list_empty(&item->ti_obd_list));
-                list_del(&item->ti_chain);
+        cfs_mutex_down(&pinger_sem);
+        cfs_list_for_each_entry_safe(item, tmp, &timeout_list, ti_chain) {
+                LASSERT(cfs_list_empty(&item->ti_obd_list));
+                cfs_list_del(&item->ti_chain);
                 OBD_FREE_PTR(item);
         }
-        mutex_up(&pinger_sem);
+        cfs_mutex_up(&pinger_sem);
         return 0;
 }
 
@@ -575,29 +577,29 @@ void ptlrpc_pinger_wake_up()
 
 static int               pet_refcount = 0;
 static int               pet_state;
-static wait_queue_head_t pet_waitq;
+static cfs_waitq_t       pet_waitq;
 CFS_LIST_HEAD(pet_list);
-static spinlock_t        pet_lock = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t    pet_lock = CFS_SPIN_LOCK_UNLOCKED;
 
 int ping_evictor_wake(struct obd_export *exp)
 {
         struct obd_device *obd;
 
-        spin_lock(&pet_lock);
+        cfs_spin_lock(&pet_lock);
         if (pet_state != PET_READY) {
                 /* eventually the new obd will call here again. */
-                spin_unlock(&pet_lock);
+                cfs_spin_unlock(&pet_lock);
                 return 1;
         }
 
         obd = class_exp2obd(exp);
-        if (list_empty(&obd->obd_evict_list)) {
+        if (cfs_list_empty(&obd->obd_evict_list)) {
                 class_incref(obd, __FUNCTION__, cfs_current());
-                list_add(&obd->obd_evict_list, &pet_list);
+                cfs_list_add(&obd->obd_evict_list, &pet_list);
         }
-        spin_unlock(&pet_lock);
+        cfs_spin_unlock(&pet_lock);
 
-        wake_up(&pet_waitq);
+        cfs_waitq_signal(&pet_waitq);
         return 0;
 }
 
@@ -614,20 +616,20 @@ static int ping_evictor_main(void *arg)
         CDEBUG(D_HA, "Starting Ping Evictor\n");
         pet_state = PET_READY;
         while (1) {
-                l_wait_event(pet_waitq, (!list_empty(&pet_list)) ||
+                l_wait_event(pet_waitq, (!cfs_list_empty(&pet_list)) ||
                              (pet_state == PET_TERMINATE), &lwi);
 
                 /* loop until all obd's will be removed */
-                if ((pet_state == PET_TERMINATE) && list_empty(&pet_list))
+                if ((pet_state == PET_TERMINATE) && cfs_list_empty(&pet_list))
                         break;
 
                 /* we only get here if pet_exp != NULL, and the end of this
                  * loop is the only place which sets it NULL again, so lock
                  * is not strictly necessary. */
-                spin_lock(&pet_lock);
-                obd = list_entry(pet_list.next, struct obd_device,
-                                 obd_evict_list);
-                spin_unlock(&pet_lock);
+                cfs_spin_lock(&pet_lock);
+                obd = cfs_list_entry(pet_list.next, struct obd_device,
+                                     obd_evict_list);
+                cfs_spin_unlock(&pet_lock);
 
                 expire_time = cfs_time_current_sec() - PING_EVICT_TIMEOUT;
 
@@ -638,13 +640,14 @@ static int ping_evictor_main(void *arg)
                  * the obd lock (class_unlink_export), which means we can't
                  * lose the last ref on the export.  If they've already been
                  * removed from the list, we won't find them here. */
-                spin_lock(&obd->obd_dev_lock);
-                while (!list_empty(&obd->obd_exports_timed)) {
-                        exp = list_entry(obd->obd_exports_timed.next,
-                                         struct obd_export,exp_obd_chain_timed);
+                cfs_spin_lock(&obd->obd_dev_lock);
+                while (!cfs_list_empty(&obd->obd_exports_timed)) {
+                        exp = cfs_list_entry(obd->obd_exports_timed.next,
+                                             struct obd_export,
+                                             exp_obd_chain_timed);
                         if (expire_time > exp->exp_last_request_time) {
                                 class_export_get(exp);
-                                spin_unlock(&obd->obd_dev_lock);
+                                cfs_spin_unlock(&obd->obd_dev_lock);
                                  LCONSOLE_WARN("%s: haven't heard from client %s"
                                               " (at %s) in %ld seconds. I think"
                                               " it's dead, and I am evicting"
@@ -662,17 +665,17 @@ static int ping_evictor_main(void *arg)
                                        exp->exp_last_request_time);
                                 class_fail_export(exp);
                                 class_export_put(exp);
-                                spin_lock(&obd->obd_dev_lock);
+                                cfs_spin_lock(&obd->obd_dev_lock);
                         } else {
                                 /* List is sorted, so everyone below is ok */
                                 break;
                         }
                 }
-                spin_unlock(&obd->obd_dev_lock);
+                cfs_spin_unlock(&obd->obd_dev_lock);
 
-                spin_lock(&pet_lock);
-                list_del_init(&obd->obd_evict_list);
-                spin_unlock(&pet_lock);
+                cfs_spin_lock(&pet_lock);
+                cfs_list_del_init(&obd->obd_evict_list);
+                cfs_spin_unlock(&pet_lock);
 
                 class_decref(obd, __FUNCTION__, cfs_current());
         }
@@ -688,7 +691,7 @@ void ping_evictor_start(void)
         if (++pet_refcount > 1)
                 return;
 
-        init_waitqueue_head(&pet_waitq);
+        cfs_waitq_init(&pet_waitq);
 
         rc = cfs_kernel_thread(ping_evictor_main, NULL, CLONE_VM | CLONE_FILES);
         if (rc < 0) {
@@ -704,7 +707,7 @@ void ping_evictor_stop(void)
                 return;
 
         pet_state = PET_TERMINATE;
-        wake_up(&pet_waitq);
+        cfs_waitq_signal(&pet_waitq);
 }
 EXPORT_SYMBOL(ping_evictor_stop);
 #else /* !__KERNEL__ */
@@ -726,7 +729,7 @@ static int pinger_check_rpcs(void *arg)
         cfs_time_t curtime = cfs_time_current();
         struct ptlrpc_request *req;
         struct ptlrpc_request_set *set;
-        struct list_head *iter;
+        cfs_list_t *iter;
         struct obd_import *imp;
         struct pinger_data *pd = &pinger_args;
         int rc;
@@ -740,7 +743,7 @@ static int pinger_check_rpcs(void *arg)
         }
 
         /* have we reached ping point? */
-        if (!pd->pd_set && time_before(curtime, pd->pd_next_ping)) {
+        if (!pd->pd_set && cfs_time_before(curtime, pd->pd_next_ping)) {
                 pd->pd_recursion--;
                 return 0;
         }
@@ -759,19 +762,19 @@ static int pinger_check_rpcs(void *arg)
         set = pd->pd_set;
 
         /* add rpcs into set */
-        mutex_down(&pinger_sem);
-        list_for_each(iter, &pinger_imports) {
-                struct obd_import *imp =
-                        list_entry(iter, struct obd_import, imp_pinger_chain);
+        cfs_mutex_down(&pinger_sem);
+        cfs_list_for_each(iter, &pinger_imports) {
+                struct obd_import *imp = cfs_list_entry(iter, struct obd_import,
+                                                        imp_pinger_chain);
                 int generation, level;
 
                 if (cfs_time_aftereq(pd->pd_this_ping,
                                      imp->imp_next_ping - 5 * CFS_TICK)) {
                         /* Add a ping. */
-                        spin_lock(&imp->imp_lock);
+                        cfs_spin_lock(&imp->imp_lock);
                         generation = imp->imp_generation;
                         level = imp->imp_state;
-                        spin_unlock(&imp->imp_lock);
+                        cfs_spin_unlock(&imp->imp_lock);
 
                         if (level != LUSTRE_IMP_FULL) {
                                 CDEBUG(D_HA,
@@ -803,16 +806,16 @@ static int pinger_check_rpcs(void *arg)
                 }
         }
         pd->pd_this_ping = curtime;
-        mutex_up(&pinger_sem);
+        cfs_mutex_up(&pinger_sem);
 
         /* Might be empty, that's OK. */
         if (set->set_remaining == 0)
                 CDEBUG(D_RPCTRACE, "nothing to ping\n");
 
-        list_for_each(iter, &set->set_requests) {
+        cfs_list_for_each(iter, &set->set_requests) {
                 struct ptlrpc_request *req =
-                        list_entry(iter, struct ptlrpc_request,
-                                   rq_set_chain);
+                        cfs_list_entry(iter, struct ptlrpc_request,
+                                       rq_set_chain);
                 DEBUG_REQ(D_RPCTRACE, req, "pinging %s->%s",
                           req->rq_import->imp_obd->obd_uuid.uuid,
                           obd2cli_tgt(req->rq_import->imp_obd));
@@ -831,10 +834,10 @@ do_check_set:
         }
 
         /* Expire all the requests that didn't come back. */
-        mutex_down(&pinger_sem);
-        list_for_each(iter, &set->set_requests) {
-                req = list_entry(iter, struct ptlrpc_request,
-                                 rq_set_chain);
+        cfs_mutex_down(&pinger_sem);
+        cfs_list_for_each(iter, &set->set_requests) {
+                req = cfs_list_entry(iter, struct ptlrpc_request,
+                                     rq_set_chain);
 
                 if (req->rq_phase == RQ_PHASE_COMPLETE)
                         continue;
@@ -849,15 +852,15 @@ do_check_set:
                  * phase and take care of inflights. */
                 ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
                 imp = req->rq_import;
-                spin_lock(&imp->imp_lock);
-                if (!list_empty(&req->rq_list)) {
-                        list_del_init(&req->rq_list);
-                        atomic_dec(&imp->imp_inflight);
+                cfs_spin_lock(&imp->imp_lock);
+                if (!cfs_list_empty(&req->rq_list)) {
+                        cfs_list_del_init(&req->rq_list);
+                        cfs_atomic_dec(&imp->imp_inflight);
                 }
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
                 set->set_remaining--;
         }
-        mutex_up(&pinger_sem);
+        cfs_mutex_up(&pinger_sem);
 
         ptlrpc_set_destroy(set);
         pd->pd_set = NULL;
@@ -898,26 +901,26 @@ int ptlrpc_stop_pinger(void)
 void ptlrpc_pinger_sending_on_import(struct obd_import *imp)
 {
 #ifdef ENABLE_PINGER
-        mutex_down(&pinger_sem);
+        cfs_mutex_down(&pinger_sem);
         ptlrpc_update_next_ping(imp);
         if (pinger_args.pd_set == NULL &&
-            time_before(imp->imp_next_ping, pinger_args.pd_next_ping)) {
+            cfs_time_before(imp->imp_next_ping, pinger_args.pd_next_ping)) {
                 CDEBUG(D_HA, "set next ping to "CFS_TIME_T"(cur "CFS_TIME_T")\n",
                         imp->imp_next_ping, cfs_time_current());
                 pinger_args.pd_next_ping = imp->imp_next_ping;
         }
-        mutex_up(&pinger_sem);
+        cfs_mutex_up(&pinger_sem);
 #endif
 }
 
 int ptlrpc_add_timeout_client(int time, enum timeout_event event,
                               timeout_cb_t cb, void *data,
-                              struct list_head *obd_list)
+                              cfs_list_t *obd_list)
 {
         return 0;
 }
 
-int ptlrpc_del_timeout_client(struct list_head *obd_list,
+int ptlrpc_del_timeout_client(cfs_list_t *obd_list,
                               enum timeout_event event)
 {
         return 0;
@@ -926,17 +929,17 @@ int ptlrpc_del_timeout_client(struct list_head *obd_list,
 int ptlrpc_pinger_add_import(struct obd_import *imp)
 {
         ENTRY;
-        if (!list_empty(&imp->imp_pinger_chain))
+        if (!cfs_list_empty(&imp->imp_pinger_chain))
                 RETURN(-EALREADY);
 
         CDEBUG(D_HA, "adding pingable import %s->%s\n",
                imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
         ptlrpc_pinger_sending_on_import(imp);
 
-        mutex_down(&pinger_sem);
-        list_add_tail(&imp->imp_pinger_chain, &pinger_imports);
+        cfs_mutex_down(&pinger_sem);
+        cfs_list_add_tail(&imp->imp_pinger_chain, &pinger_imports);
         class_import_get(imp);
-        mutex_up(&pinger_sem);
+        cfs_mutex_up(&pinger_sem);
 
         RETURN(0);
 }
@@ -944,15 +947,15 @@ int ptlrpc_pinger_add_import(struct obd_import *imp)
 int ptlrpc_pinger_del_import(struct obd_import *imp)
 {
         ENTRY;
-        if (list_empty(&imp->imp_pinger_chain))
+        if (cfs_list_empty(&imp->imp_pinger_chain))
                 RETURN(-ENOENT);
 
-        mutex_down(&pinger_sem);
-        list_del_init(&imp->imp_pinger_chain);
+        cfs_mutex_down(&pinger_sem);
+        cfs_list_del_init(&imp->imp_pinger_chain);
         CDEBUG(D_HA, "removing pingable import %s->%s\n",
                imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
         class_import_put(imp);
-        mutex_up(&pinger_sem);
+        cfs_mutex_up(&pinger_sem);
         RETURN(0);
 }
 
@@ -962,7 +965,7 @@ void ptlrpc_pinger_wake_up()
         /* XXX force pinger to run, if needed */
         struct obd_import *imp;
         ENTRY;
-        list_for_each_entry(imp, &pinger_imports, imp_pinger_chain) {
+        cfs_list_for_each_entry(imp, &pinger_imports, imp_pinger_chain) {
                 CDEBUG(D_RPCTRACE, "checking import %s->%s\n",
                        imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
 #ifdef ENABLE_LIBLUSTRE_RECOVERY
index b5a9e8e..a460271 100644 (file)
 
 #include "ptlrpc_internal.h"
 
-extern spinlock_t ptlrpc_last_xid_lock;
-extern spinlock_t ptlrpc_rs_debug_lock;
-extern spinlock_t ptlrpc_all_services_lock;
-extern struct semaphore pinger_sem;
-extern struct semaphore ptlrpcd_sem;
+extern cfs_spinlock_t ptlrpc_last_xid_lock;
+extern cfs_spinlock_t ptlrpc_rs_debug_lock;
+extern cfs_spinlock_t ptlrpc_all_services_lock;
+extern cfs_semaphore_t pinger_sem;
+extern cfs_semaphore_t ptlrpcd_sem;
 
 __init int ptlrpc_init(void)
 {
@@ -62,10 +62,10 @@ __init int ptlrpc_init(void)
         ENTRY;
 
         lustre_assert_wire_constants();
-        spin_lock_init(&ptlrpc_rs_debug_lock);
-        spin_lock_init(&ptlrpc_all_services_lock);
-        init_mutex(&pinger_sem);
-        init_mutex(&ptlrpcd_sem);
+        cfs_spin_lock_init(&ptlrpc_rs_debug_lock);
+        cfs_spin_lock_init(&ptlrpc_all_services_lock);
+        cfs_init_mutex(&pinger_sem);
+        cfs_init_mutex(&ptlrpcd_sem);
         ptlrpc_init_xid();
 
         rc = req_layout_init();
index a9f3f9d..42e3716 100644 (file)
@@ -90,7 +90,7 @@ static struct ptlrpcd_scope_ctl ptlrpcd_scopes[PSCOPE_NR] = {
         }
 };
 
-struct semaphore ptlrpcd_sem;
+cfs_semaphore_t ptlrpcd_sem;
 static int ptlrpcd_users = 0;
 
 void ptlrpcd_wake(struct ptlrpc_request *req)
@@ -108,14 +108,15 @@ void ptlrpcd_wake(struct ptlrpc_request *req)
  */
 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
 {
-        struct list_head *tmp, *pos;
+        cfs_list_t *tmp, *pos;
 
-        list_for_each_safe(pos, tmp, &set->set_requests) {
+        cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
                 struct ptlrpc_request *req =
-                        list_entry(pos, struct ptlrpc_request, rq_set_chain);
+                        cfs_list_entry(pos, struct ptlrpc_request,
+                                       rq_set_chain);
 
                 LASSERT(req->rq_phase == RQ_PHASE_NEW);
-                list_del_init(&req->rq_set_chain);
+                cfs_list_del_init(&req->rq_set_chain);
                 req->rq_set = NULL;
                 ptlrpcd_add_req(req, PSCOPE_OTHER);
                 set->set_remaining--;
@@ -159,22 +160,22 @@ int ptlrpcd_add_req(struct ptlrpc_request *req, enum ptlrpcd_scope scope)
 
 static int ptlrpcd_check(const struct lu_env *env, struct ptlrpcd_ctl *pc)
 {
-        struct list_head *tmp, *pos;
+        cfs_list_t *tmp, *pos;
         struct ptlrpc_request *req;
         int rc = 0;
         ENTRY;
 
-        spin_lock(&pc->pc_set->set_new_req_lock);
-        list_for_each_safe(pos, tmp, &pc->pc_set->set_new_requests) {
-                req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
-                list_del_init(&req->rq_set_chain);
+        cfs_spin_lock(&pc->pc_set->set_new_req_lock);
+        cfs_list_for_each_safe(pos, tmp, &pc->pc_set->set_new_requests) {
+                req = cfs_list_entry(pos, struct ptlrpc_request, rq_set_chain);
+                cfs_list_del_init(&req->rq_set_chain);
                 ptlrpc_set_add_req(pc->pc_set, req);
                 /*
                  * Need to calculate its timeout.
                  */
                 rc = 1;
         }
-        spin_unlock(&pc->pc_set->set_new_req_lock);
+        cfs_spin_unlock(&pc->pc_set->set_new_req_lock);
 
         if (pc->pc_set->set_remaining) {
                 rc = rc | ptlrpc_check_set(env, pc->pc_set);
@@ -183,13 +184,13 @@ static int ptlrpcd_check(const struct lu_env *env, struct ptlrpcd_ctl *pc)
                  * XXX: our set never completes, so we prune the completed
                  * reqs after each iteration. boy could this be smarter.
                  */
-                list_for_each_safe(pos, tmp, &pc->pc_set->set_requests) {
-                        req = list_entry(pos, struct ptlrpc_request,
+                cfs_list_for_each_safe(pos, tmp, &pc->pc_set->set_requests) {
+                        req = cfs_list_entry(pos, struct ptlrpc_request,
                                          rq_set_chain);
                         if (req->rq_phase != RQ_PHASE_COMPLETE)
                                 continue;
 
-                        list_del_init(&req->rq_set_chain);
+                        cfs_list_del_init(&req->rq_set_chain);
                         req->rq_set = NULL;
                         ptlrpc_req_finished (req);
                 }
@@ -199,9 +200,9 @@ static int ptlrpcd_check(const struct lu_env *env, struct ptlrpcd_ctl *pc)
                 /*
                  * If new requests have been added, make sure to wake up.
                  */
-                spin_lock(&pc->pc_set->set_new_req_lock);
-                rc = !list_empty(&pc->pc_set->set_new_requests);
-                spin_unlock(&pc->pc_set->set_new_req_lock);
+                cfs_spin_lock(&pc->pc_set->set_new_req_lock);
+                rc = !cfs_list_empty(&pc->pc_set->set_new_requests);
+                cfs_spin_unlock(&pc->pc_set->set_new_req_lock);
         }
 
         RETURN(rc);
@@ -231,7 +232,7 @@ static int ptlrpcd(void *arg)
                                      LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
         }
 
-        complete(&pc->pc_starting);
+        cfs_complete(&pc->pc_starting);
 
         if (rc != 0)
                 RETURN(rc);
@@ -275,8 +276,8 @@ static int ptlrpcd(void *arg)
                 /*
                  * Abort inflight rpcs for forced stop case.
                  */
-                if (test_bit(LIOD_STOP, &pc->pc_flags)) {
-                        if (test_bit(LIOD_FORCE, &pc->pc_flags))
+                if (cfs_test_bit(LIOD_STOP, &pc->pc_flags)) {
+                        if (cfs_test_bit(LIOD_FORCE, &pc->pc_flags))
                                 ptlrpc_abort_set(pc->pc_set);
                         exit++;
                 }
@@ -290,14 +291,14 @@ static int ptlrpcd(void *arg)
         /*
          * Wait for inflight requests to drain.
          */
-        if (!list_empty(&pc->pc_set->set_requests))
+        if (!cfs_list_empty(&pc->pc_set->set_requests))
                 ptlrpc_set_wait(pc->pc_set);
         lu_context_fini(&env.le_ctx);
-        complete(&pc->pc_finishing);
+        cfs_complete(&pc->pc_finishing);
 
-        clear_bit(LIOD_START, &pc->pc_flags);
-        clear_bit(LIOD_STOP, &pc->pc_flags);
-        clear_bit(LIOD_FORCE, &pc->pc_flags);
+        cfs_clear_bit(LIOD_START, &pc->pc_flags);
+        cfs_clear_bit(LIOD_STOP, &pc->pc_flags);
+        cfs_clear_bit(LIOD_FORCE, &pc->pc_flags);
         return 0;
 }
 
@@ -324,7 +325,7 @@ int ptlrpcd_check_async_rpcs(void *arg)
                         /*
                          * XXX: send replay requests.
                          */
-                        if (test_bit(LIOD_RECOVERY, &pc->pc_flags))
+                        if (cfs_test_bit(LIOD_RECOVERY, &pc->pc_flags))
                                 rc = ptlrpcd_check(&pc->pc_env, pc);
                 }
         }
@@ -337,7 +338,7 @@ int ptlrpcd_idle(void *arg)
 {
         struct ptlrpcd_ctl *pc = arg;
 
-        return (list_empty(&pc->pc_set->set_new_requests) &&
+        return (cfs_list_empty(&pc->pc_set->set_new_requests) &&
                 pc->pc_set->set_remaining == 0);
 }
 
@@ -351,15 +352,15 @@ int ptlrpcd_start(const char *name, struct ptlrpcd_ctl *pc)
         /*
          * Do not allow start second thread for one pc.
          */
-        if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
+        if (cfs_test_and_set_bit(LIOD_START, &pc->pc_flags)) {
                 CERROR("Starting second thread (%s) for same pc %p\n",
                        name, pc);
                 RETURN(-EALREADY);
         }
 
-        init_completion(&pc->pc_starting);
-        init_completion(&pc->pc_finishing);
-        spin_lock_init(&pc->pc_lock);
+        cfs_init_completion(&pc->pc_starting);
+        cfs_init_completion(&pc->pc_finishing);
+        cfs_spin_lock_init(&pc->pc_lock);
         strncpy(pc->pc_name, name, sizeof(pc->pc_name) - 1);
         pc->pc_set = ptlrpc_prep_set();
         if (pc->pc_set == NULL)
@@ -383,7 +384,7 @@ int ptlrpcd_start(const char *name, struct ptlrpcd_ctl *pc)
                 GOTO(out, rc);
         }
         rc = 0;
-        wait_for_completion(&pc->pc_starting);
+        cfs_wait_for_completion(&pc->pc_starting);
 #else
         pc->pc_wait_callback =
                 liblustre_register_wait_callback("ptlrpcd_check_async_rpcs",
@@ -394,23 +395,23 @@ int ptlrpcd_start(const char *name, struct ptlrpcd_ctl *pc)
 #endif
 out:
         if (rc)
-                clear_bit(LIOD_START, &pc->pc_flags);
+                cfs_clear_bit(LIOD_START, &pc->pc_flags);
         RETURN(rc);
 }
 
 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
 {
-        if (!test_bit(LIOD_START, &pc->pc_flags)) {
+        if (!cfs_test_bit(LIOD_START, &pc->pc_flags)) {
                 CERROR("Thread for pc %p was not started\n", pc);
                 return;
         }
 
-        set_bit(LIOD_STOP, &pc->pc_flags);
+        cfs_set_bit(LIOD_STOP, &pc->pc_flags);
         if (force)
-                set_bit(LIOD_FORCE, &pc->pc_flags);
+                cfs_set_bit(LIOD_FORCE, &pc->pc_flags);
         cfs_waitq_signal(&pc->pc_set->set_waitq);
 #ifdef __KERNEL__
-        wait_for_completion(&pc->pc_finishing);
+        cfs_wait_for_completion(&pc->pc_finishing);
 #else
         liblustre_deregister_wait_callback(pc->pc_wait_callback);
         liblustre_deregister_idle_callback(pc->pc_idle_callback);
@@ -432,7 +433,7 @@ void ptlrpcd_fini(void)
 
                         pc = &ptlrpcd_scopes[i].pscope_thread[j].pt_ctl;
 
-                        if (test_bit(LIOD_START, &pc->pc_flags))
+                        if (cfs_test_bit(LIOD_START, &pc->pc_flags))
                                 ptlrpcd_stop(pc, 0);
                 }
         }
@@ -446,7 +447,7 @@ int ptlrpcd_addref(void)
         int j;
         ENTRY;
 
-        mutex_down(&ptlrpcd_sem);
+        cfs_mutex_down(&ptlrpcd_sem);
         if (++ptlrpcd_users == 1) {
                 for (i = 0; rc == 0 && i < PSCOPE_NR; ++i) {
                         for (j = 0; rc == 0 && j < PT_NR; ++j) {
@@ -456,7 +457,7 @@ int ptlrpcd_addref(void)
                                 pt = &ptlrpcd_scopes[i].pscope_thread[j];
                                 pc = &pt->pt_ctl;
                                 if (j == PT_RECOVERY)
-                                        set_bit(LIOD_RECOVERY, &pc->pc_flags);
+                                        cfs_set_bit(LIOD_RECOVERY, &pc->pc_flags);
                                 rc = ptlrpcd_start(pt->pt_name, pc);
                         }
                 }
@@ -465,14 +466,14 @@ int ptlrpcd_addref(void)
                         ptlrpcd_fini();
                 }
         }
-        mutex_up(&ptlrpcd_sem);
+        cfs_mutex_up(&ptlrpcd_sem);
         RETURN(rc);
 }
 
 void ptlrpcd_decref(void)
 {
-        mutex_down(&ptlrpcd_sem);
+        cfs_mutex_down(&ptlrpcd_sem);
         if (--ptlrpcd_users == 0)
                 ptlrpcd_fini();
-        mutex_up(&ptlrpcd_sem);
+        cfs_mutex_up(&ptlrpcd_sem);
 }
index 45163a4..3af66cf 100644 (file)
@@ -67,7 +67,7 @@
 #include <lustre_log.h>
 #include "ptlrpc_internal.h"
 
-static atomic_t                   llcd_count = ATOMIC_INIT(0);
+static cfs_atomic_t               llcd_count = CFS_ATOMIC_INIT(0);
 static cfs_mem_cache_t           *llcd_cache = NULL;
 
 #ifdef __KERNEL__
@@ -112,15 +112,15 @@ static struct llog_canceld_ctxt *llcd_alloc(struct llog_commit_master *lcm)
         llcd->llcd_cookiebytes = 0;
         llcd->llcd_size = size;
 
-        spin_lock(&lcm->lcm_lock);
+        cfs_spin_lock(&lcm->lcm_lock);
         llcd->llcd_lcm = lcm;
-        atomic_inc(&lcm->lcm_count);
-        list_add_tail(&llcd->llcd_list, &lcm->lcm_llcds);
-        spin_unlock(&lcm->lcm_lock);
-        atomic_inc(&llcd_count);
+        cfs_atomic_inc(&lcm->lcm_count);
+        cfs_list_add_tail(&llcd->llcd_list, &lcm->lcm_llcds);
+        cfs_spin_unlock(&lcm->lcm_lock);
+        cfs_atomic_inc(&llcd_count);
 
         CDEBUG(D_RPCTRACE, "Alloc llcd %p on lcm %p (%d)\n",
-               llcd, lcm, atomic_read(&lcm->lcm_count));
+               llcd, lcm, cfs_atomic_read(&lcm->lcm_count));
 
         return llcd;
 }
@@ -134,23 +134,23 @@ static void llcd_free(struct llog_canceld_ctxt *llcd)
         int size;
 
         if (lcm) {
-                if (atomic_read(&lcm->lcm_count) == 0) {
+                if (cfs_atomic_read(&lcm->lcm_count) == 0) {
                         CERROR("Invalid llcd free %p\n", llcd);
                         llcd_print(llcd, __FUNCTION__, __LINE__);
                         LBUG();
                 }
-                spin_lock(&lcm->lcm_lock);
-                LASSERT(!list_empty(&llcd->llcd_list));
-                list_del_init(&llcd->llcd_list);
-                atomic_dec(&lcm->lcm_count);
-                spin_unlock(&lcm->lcm_lock);
+                cfs_spin_lock(&lcm->lcm_lock);
+                LASSERT(!cfs_list_empty(&llcd->llcd_list));
+                cfs_list_del_init(&llcd->llcd_list);
+                cfs_atomic_dec(&lcm->lcm_count);
+                cfs_spin_unlock(&lcm->lcm_lock);
 
                 CDEBUG(D_RPCTRACE, "Free llcd %p on lcm %p (%d)\n",
-                       llcd, lcm, atomic_read(&lcm->lcm_count));
+                       llcd, lcm, cfs_atomic_read(&lcm->lcm_count));
         }
 
-        LASSERT(atomic_read(&llcd_count) > 0);
-        atomic_dec(&llcd_count);
+        LASSERT(cfs_atomic_read(&llcd_count) > 0);
+        cfs_atomic_dec(&llcd_count);
 
         size = offsetof(struct llog_canceld_ctxt, llcd_cookies) +
             llcd->llcd_size;
@@ -227,7 +227,7 @@ static int llcd_send(struct llog_canceld_ctxt *llcd)
          * Check if we're in exit stage. Do not send llcd in
          * this case.
          */
-        if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
+        if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
                 GOTO(exit, rc = -ENODEV);
 
         CDEBUG(D_RPCTRACE, "Sending llcd %p\n", llcd);
@@ -415,7 +415,7 @@ void llog_recov_thread_stop(struct llog_commit_master *lcm, int force)
          * Let all know that we're stopping. This will also make
          * llcd_send() refuse any new llcds.
          */
-        set_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags);
+        cfs_set_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags);
 
         /*
          * Stop processing thread. No new rpcs will be accepted for
@@ -428,20 +428,20 @@ void llog_recov_thread_stop(struct llog_commit_master *lcm, int force)
          * those forgotten in sync may still be attached to ctxt. Let's
          * print them.
          */
-        if (atomic_read(&lcm->lcm_count) != 0) {
+        if (cfs_atomic_read(&lcm->lcm_count) != 0) {
                 struct llog_canceld_ctxt *llcd;
-                struct list_head         *tmp;
+                cfs_list_t               *tmp;
 
                 CERROR("Busy llcds found (%d) on lcm %p\n",
-                       atomic_read(&lcm->lcm_count), lcm);
+                       cfs_atomic_read(&lcm->lcm_count), lcm);
 
-                spin_lock(&lcm->lcm_lock);
-                list_for_each(tmp, &lcm->lcm_llcds) {
-                        llcd = list_entry(tmp, struct llog_canceld_ctxt,
-                                          llcd_list);
+                cfs_spin_lock(&lcm->lcm_lock);
+                cfs_list_for_each(tmp, &lcm->lcm_llcds) {
+                        llcd = cfs_list_entry(tmp, struct llog_canceld_ctxt,
+                                              llcd_list);
                         llcd_print(llcd, __FUNCTION__, __LINE__);
                 }
-                spin_unlock(&lcm->lcm_lock);
+                cfs_spin_unlock(&lcm->lcm_lock);
 
                 /*
                  * No point to go further with busy llcds at this point
@@ -478,9 +478,9 @@ struct llog_commit_master *llog_recov_thread_init(char *name)
         snprintf(lcm->lcm_name, sizeof(lcm->lcm_name),
                  "lcm_%s", name);
 
-        atomic_set(&lcm->lcm_count, 0);
-        atomic_set(&lcm->lcm_refcount, 1);
-        spin_lock_init(&lcm->lcm_lock);
+        cfs_atomic_set(&lcm->lcm_count, 0);
+        cfs_atomic_set(&lcm->lcm_refcount, 1);
+        cfs_spin_lock_init(&lcm->lcm_lock);
         CFS_INIT_LIST_HEAD(&lcm->lcm_llcds);
         rc = llog_recov_thread_start(lcm);
         if (rc) {
@@ -565,10 +565,10 @@ int llog_obd_repl_connect(struct llog_ctxt *ctxt,
         /*
          * Start recovery in separate thread.
          */
-        mutex_down(&ctxt->loc_sem);
+        cfs_mutex_down(&ctxt->loc_sem);
         ctxt->loc_gen = *gen;
         rc = llog_recov_thread_replay(ctxt, ctxt->llog_proc_cb, logid);
-        mutex_up(&ctxt->loc_sem);
+        cfs_mutex_up(&ctxt->loc_sem);
 
         RETURN(rc);
 }
@@ -590,7 +590,7 @@ int llog_obd_repl_cancel(struct llog_ctxt *ctxt,
 
         LASSERT(ctxt != NULL);
 
-        mutex_down(&ctxt->loc_sem);
+        cfs_mutex_down(&ctxt->loc_sem);
         if (!ctxt->loc_lcm) {
                 CDEBUG(D_RPCTRACE, "No lcm for ctxt %p\n", ctxt);
                 GOTO(out, rc = -ENODEV);
@@ -607,7 +607,7 @@ int llog_obd_repl_cancel(struct llog_ctxt *ctxt,
                 GOTO(out, rc = -ENODEV);
         }
 
-        if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags)) {
+        if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags)) {
                 CDEBUG(D_RPCTRACE, "Commit thread is stopping for ctxt %p\n",
                        ctxt);
                 GOTO(out, rc = -ENODEV);
@@ -627,7 +627,7 @@ int llog_obd_repl_cancel(struct llog_ctxt *ctxt,
                          * Allocation is successful, let's check for stop
                          * flag again to fall back as soon as possible.
                          */
-                        if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
+                        if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
                                 GOTO(out, rc = -ENODEV);
                 }
 
@@ -646,7 +646,7 @@ int llog_obd_repl_cancel(struct llog_ctxt *ctxt,
                          * Allocation is successful, let's check for stop
                          * flag again to fall back as soon as possible.
                          */
-                        if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
+                        if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
                                 GOTO(out, rc = -ENODEV);
                 }
 
@@ -671,7 +671,7 @@ int llog_obd_repl_cancel(struct llog_ctxt *ctxt,
 out:
         if (rc)
                 llcd_put(ctxt);
-        mutex_up(&ctxt->loc_sem);
+        cfs_mutex_up(&ctxt->loc_sem);
         return rc;
 }
 EXPORT_SYMBOL(llog_obd_repl_cancel);
@@ -684,7 +684,7 @@ int llog_obd_repl_sync(struct llog_ctxt *ctxt, struct obd_export *exp)
         /*
          * Flush any remaining llcd.
          */
-        mutex_down(&ctxt->loc_sem);
+        cfs_mutex_down(&ctxt->loc_sem);
         if (exp && (ctxt->loc_imp == exp->exp_imp_reverse)) {
                 /*
                  * This is ost->mds connection, we can't be sure that mds
@@ -692,7 +692,7 @@ int llog_obd_repl_sync(struct llog_ctxt *ctxt, struct obd_export *exp)
                  */
                 CDEBUG(D_RPCTRACE, "Kill cached llcd\n");
                 llcd_put(ctxt);
-                mutex_up(&ctxt->loc_sem);
+                cfs_mutex_up(&ctxt->loc_sem);
         } else {
                 /*
                  * This is either llog_sync() from generic llog code or sync
@@ -700,7 +700,7 @@ int llog_obd_repl_sync(struct llog_ctxt *ctxt, struct obd_export *exp)
                  * llcds to the target with waiting for completion.
                  */
                 CDEBUG(D_RPCTRACE, "Sync cached llcd\n");
-                mutex_up(&ctxt->loc_sem);
+                cfs_mutex_up(&ctxt->loc_sem);
                 rc = llog_cancel(ctxt, NULL, 0, NULL, OBD_LLOG_FL_SENDNOW);
         }
         RETURN(rc);
@@ -749,9 +749,9 @@ void llog_recov_fini(void)
                  * In 2.6.22 cfs_mem_cache_destroy() will not return error
                  * for busy resources. Let's check it another way.
                  */
-                LASSERTF(atomic_read(&llcd_count) == 0,
+                LASSERTF(cfs_atomic_read(&llcd_count) == 0,
                          "Can't destroy llcd cache! Number of "
-                         "busy llcds: %d\n", atomic_read(&llcd_count));
+                         "busy llcds: %d\n", cfs_atomic_read(&llcd_count));
                 cfs_mem_cache_destroy(llcd_cache);
                 llcd_cache = NULL;
         }
index 1ca69ff..79c3d3a 100644 (file)
@@ -73,7 +73,7 @@ void ptlrpc_initiate_recovery(struct obd_import *imp)
 int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
 {
         int rc = 0;
-        struct list_head *tmp, *pos;
+        cfs_list_t *tmp, *pos;
         struct ptlrpc_request *req = NULL;
         __u64 last_transno;
         ENTRY;
@@ -83,11 +83,11 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
         /* It might have committed some after we last spoke, so make sure we
          * get rid of them now.
          */
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
         imp->imp_last_transno_checked = 0;
         ptlrpc_free_committed(imp);
         last_transno = imp->imp_last_replay_transno;
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
 
         CDEBUG(D_HA, "import %p from %s committed "LPU64" last "LPU64"\n",
                imp, obd2cli_tgt(imp->imp_obd),
@@ -108,8 +108,9 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
          * imp_lock is being held by ptlrpc_replay, but it's not. it's
          * just a little race...
          */
-        list_for_each_safe(tmp, pos, &imp->imp_replay_list) {
-                req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
+        cfs_list_for_each_safe(tmp, pos, &imp->imp_replay_list) {
+                req = cfs_list_entry(tmp, struct ptlrpc_request,
+                                     rq_replay_list);
 
                 /* If need to resend the last sent transno (because a
                    reconnect has occurred), then stop on the matching
@@ -125,9 +126,9 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
                 req = NULL;
         }
 
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
         imp->imp_resend_replay = 0;
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
 
         if (req != NULL) {
                 rc = ptlrpc_replay_req(req);
@@ -153,37 +154,38 @@ int ptlrpc_resend(struct obd_import *imp)
          */
         /* Well... what if lctl recover is called twice at the same time?
          */
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
         if (imp->imp_state != LUSTRE_IMP_RECOVER) {
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
                 RETURN(-1);
         }
 
-        list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) {
+        cfs_list_for_each_entry_safe(req, next, &imp->imp_sending_list,
+                                     rq_list) {
                 LASSERTF((long)req > CFS_PAGE_SIZE && req != LP_POISON,
                          "req %p bad\n", req);
                 LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
                 if (!req->rq_no_resend)
                         ptlrpc_resend_req(req);
         }
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
 
         RETURN(0);
 }
 
 void ptlrpc_wake_delayed(struct obd_import *imp)
 {
-        struct list_head *tmp, *pos;
+        cfs_list_t *tmp, *pos;
         struct ptlrpc_request *req;
 
-        spin_lock(&imp->imp_lock);
-        list_for_each_safe(tmp, pos, &imp->imp_delayed_list) {
-                req = list_entry(tmp, struct ptlrpc_request, rq_list);
+        cfs_spin_lock(&imp->imp_lock);
+        cfs_list_for_each_safe(tmp, pos, &imp->imp_delayed_list) {
+                req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
 
                 DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set);
                 ptlrpc_client_wake_req(req);
         }
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
 }
 
 void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req)
@@ -212,10 +214,10 @@ void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req)
 
         /* Wait for recovery to complete and resend. If evicted, then
            this request will be errored out later.*/
-        spin_lock(&failed_req->rq_lock);
+        cfs_spin_lock(&failed_req->rq_lock);
         if (!failed_req->rq_no_resend)
                 failed_req->rq_resend = 1;
-        spin_unlock(&failed_req->rq_lock);
+        cfs_spin_unlock(&failed_req->rq_lock);
 
         EXIT;
 }
@@ -243,9 +245,9 @@ int ptlrpc_set_import_active(struct obd_import *imp, int active)
 
                 /* set before invalidate to avoid messages about imp_inval
                  * set without imp_deactive in ptlrpc_import_delay_req */
-                spin_lock(&imp->imp_lock);
+                cfs_spin_lock(&imp->imp_lock);
                 imp->imp_deactive = 1;
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
 
                 ptlrpc_invalidate_import(imp);
         }
@@ -266,19 +268,19 @@ int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid)
         int rc;
         ENTRY;
 
-        spin_lock(&imp->imp_lock);
-        if (atomic_read(&imp->imp_inval_count)) {
-                spin_unlock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
+        if (cfs_atomic_read(&imp->imp_inval_count)) {
+                cfs_spin_unlock(&imp->imp_lock);
                 RETURN(-EINVAL);
         }
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
 
         /* force import to be disconnected. */
         ptlrpc_set_import_discon(imp, 0);
 
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
         imp->imp_deactive = 0;
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
 
         rc = ptlrpc_recover_import_no_retry(imp, new_uuid);
 
@@ -288,12 +290,12 @@ int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid)
 int ptlrpc_import_in_recovery(struct obd_import *imp)
 {
         int in_recovery = 1;
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
         if (imp->imp_state == LUSTRE_IMP_FULL ||
             imp->imp_state == LUSTRE_IMP_CLOSED ||
             imp->imp_state == LUSTRE_IMP_DISCON)
                 in_recovery = 0;
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
         return in_recovery;
 }
 
@@ -306,11 +308,11 @@ static int ptlrpc_recover_import_no_retry(struct obd_import *imp,
         ENTRY;
 
         /* Check if reconnect is already in progress */
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
         if (imp->imp_state != LUSTRE_IMP_DISCON) {
                 in_recovery = 1;
         }
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
 
         if (in_recovery == 1)
                 RETURN(-EALREADY);
index 6926fc9..55bd19d 100644 (file)
@@ -66,7 +66,7 @@
  * policy registers                            *
  ***********************************************/
 
-static rwlock_t policy_lock;
+static cfs_rwlock_t policy_lock;
 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
         NULL,
 };
@@ -82,13 +82,13 @@ int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
         if (number >= SPTLRPC_POLICY_MAX)
                 return -EINVAL;
 
-        write_lock(&policy_lock);
+        cfs_write_lock(&policy_lock);
         if (unlikely(policies[number])) {
-                write_unlock(&policy_lock);
+                cfs_write_unlock(&policy_lock);
                 return -EALREADY;
         }
         policies[number] = policy;
-        write_unlock(&policy_lock);
+        cfs_write_unlock(&policy_lock);
 
         CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
         return 0;
@@ -101,16 +101,16 @@ int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
 
         LASSERT(number < SPTLRPC_POLICY_MAX);
 
-        write_lock(&policy_lock);
+        cfs_write_lock(&policy_lock);
         if (unlikely(policies[number] == NULL)) {
-                write_unlock(&policy_lock);
+                cfs_write_unlock(&policy_lock);
                 CERROR("%s: already unregistered\n", policy->sp_name);
                 return -EINVAL;
         }
 
         LASSERT(policies[number] == policy);
         policies[number] = NULL;
-        write_unlock(&policy_lock);
+        cfs_write_unlock(&policy_lock);
 
         CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
         return 0;
@@ -120,8 +120,8 @@ EXPORT_SYMBOL(sptlrpc_unregister_policy);
 static
 struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
 {
-        static DECLARE_MUTEX(load_mutex);
-        static atomic_t           loaded = ATOMIC_INIT(0);
+        static CFS_DECLARE_MUTEX(load_mutex);
+        static cfs_atomic_t       loaded = CFS_ATOMIC_INIT(0);
         struct ptlrpc_sec_policy *policy;
         __u16                     number = SPTLRPC_FLVR_POLICY(flavor);
         __u16                     flag = 0;
@@ -130,29 +130,29 @@ struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
                 return NULL;
 
         while (1) {
-                read_lock(&policy_lock);
+                cfs_read_lock(&policy_lock);
                 policy = policies[number];
-                if (policy && !try_module_get(policy->sp_owner))
+                if (policy && !cfs_try_module_get(policy->sp_owner))
                         policy = NULL;
                 if (policy == NULL)
-                        flag = atomic_read(&loaded);
-                read_unlock(&policy_lock);
+                        flag = cfs_atomic_read(&loaded);
+                cfs_read_unlock(&policy_lock);
 
                 if (policy != NULL || flag != 0 ||
                     number != SPTLRPC_POLICY_GSS)
                         break;
 
                 /* try to load gss module, once */
-                mutex_down(&load_mutex);
-                if (atomic_read(&loaded) == 0) {
-                        if (request_module("ptlrpc_gss") == 0)
+                cfs_mutex_down(&load_mutex);
+                if (cfs_atomic_read(&loaded) == 0) {
+                        if (cfs_request_module("ptlrpc_gss") == 0)
                                 CWARN("module ptlrpc_gss loaded on demand\n");
                         else
                                 CERROR("Unable to load module ptlrpc_gss\n");
 
-                        atomic_set(&loaded, 1);
+                        cfs_atomic_set(&loaded, 1);
                 }
-                mutex_up(&load_mutex);
+                cfs_mutex_up(&load_mutex);
         }
 
         return policy;
@@ -287,8 +287,8 @@ struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
 
 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
 {
-        LASSERT(atomic_read(&ctx->cc_refcount) > 0);
-        atomic_inc(&ctx->cc_refcount);
+        LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+        cfs_atomic_inc(&ctx->cc_refcount);
         return ctx;
 }
 EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
@@ -298,9 +298,9 @@ void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
         struct ptlrpc_sec *sec = ctx->cc_sec;
 
         LASSERT(sec);
-        LASSERT(atomic_read(&ctx->cc_refcount));
+        LASSERT(cfs_atomic_read(&ctx->cc_refcount));
 
-        if (!atomic_dec_and_test(&ctx->cc_refcount))
+        if (!cfs_atomic_dec_and_test(&ctx->cc_refcount))
                 return;
 
         sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
@@ -322,12 +322,13 @@ void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
 {
         struct ptlrpc_request *req, *next;
 
-        spin_lock(&ctx->cc_lock);
-        list_for_each_entry_safe(req, next, &ctx->cc_req_list, rq_ctx_chain) {
-                list_del_init(&req->rq_ctx_chain);
+        cfs_spin_lock(&ctx->cc_lock);
+        cfs_list_for_each_entry_safe(req, next, &ctx->cc_req_list,
+                                     rq_ctx_chain) {
+                cfs_list_del_init(&req->rq_ctx_chain);
                 ptlrpc_client_wake_req(req);
         }
-        spin_unlock(&ctx->cc_lock);
+        cfs_spin_unlock(&ctx->cc_lock);
 }
 EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
 
@@ -345,13 +346,13 @@ static int import_sec_check_expire(struct obd_import *imp)
 {
         int     adapt = 0;
 
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
         if (imp->imp_sec_expire &&
             imp->imp_sec_expire < cfs_time_current_sec()) {
                 adapt = 1;
                 imp->imp_sec_expire = 0;
         }
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
 
         if (!adapt)
                 return 0;
@@ -427,10 +428,10 @@ void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
         /* request might be asked to release earlier while still
          * in the context waiting list.
          */
-        if (!list_empty(&req->rq_ctx_chain)) {
-                spin_lock(&req->rq_cli_ctx->cc_lock);
-                list_del_init(&req->rq_ctx_chain);
-                spin_unlock(&req->rq_cli_ctx->cc_lock);
+        if (!cfs_list_empty(&req->rq_ctx_chain)) {
+                cfs_spin_lock(&req->rq_cli_ctx->cc_lock);
+                cfs_list_del_init(&req->rq_ctx_chain);
+                cfs_spin_unlock(&req->rq_cli_ctx->cc_lock);
         }
 
         sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
@@ -528,14 +529,15 @@ int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
         LASSERT(newctx);
 
         if (unlikely(newctx == oldctx && 
-                     test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
+                     cfs_test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
                 /*
                  * still get the old dead ctx, usually means system too busy
                  */
                 CWARN("ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
                       newctx, newctx->cc_flags);
 
-                cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE, HZ);
+                cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
+                                                   CFS_HZ);
         } else {
                 /*
                  * it's possible newctx == oldctx if we're switching
@@ -591,18 +593,18 @@ void ctx_refresh_interrupt(void *data)
 {
         struct ptlrpc_request *req = data;
 
-        spin_lock(&req->rq_lock);
+        cfs_spin_lock(&req->rq_lock);
         req->rq_intr = 1;
-        spin_unlock(&req->rq_lock);
+        cfs_spin_unlock(&req->rq_lock);
 }
 
 static
 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
 {
-        spin_lock(&ctx->cc_lock);
-        if (!list_empty(&req->rq_ctx_chain))
-                list_del_init(&req->rq_ctx_chain);
-        spin_unlock(&ctx->cc_lock);
+        cfs_spin_lock(&ctx->cc_lock);
+        if (!cfs_list_empty(&req->rq_ctx_chain))
+                cfs_list_del_init(&req->rq_ctx_chain);
+        cfs_spin_unlock(&ctx->cc_lock);
 }
 
 /*
@@ -652,11 +654,11 @@ again:
         if (cli_ctx_is_eternal(ctx))
                 RETURN(0);
 
-        if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
+        if (unlikely(cfs_test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
                 LASSERT(ctx->cc_ops->refresh);
                 ctx->cc_ops->refresh(ctx);
         }
-        LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
+        LASSERT(cfs_test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
 
         LASSERT(ctx->cc_ops->validate);
         if (ctx->cc_ops->validate(ctx) == 0) {
@@ -664,7 +666,7 @@ again:
                 RETURN(0);
         }
 
-        if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
+        if (unlikely(cfs_test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
                 req->rq_err = 1;
                 req_off_ctx_list(req, ctx);
                 RETURN(-EPERM);
@@ -694,14 +696,14 @@ again:
          *  2. Current context never be refreshed, then we are fine: we
          *     never really send request with old context before.
          */
-        if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
+        if (cfs_test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
             unlikely(req->rq_reqmsg) &&
             lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
                 req_off_ctx_list(req, ctx);
                 RETURN(0);
         }
 
-        if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
+        if (unlikely(cfs_test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
                 req_off_ctx_list(req, ctx);
                 /*
                  * don't switch ctx if import was deactivated
@@ -727,24 +729,24 @@ again:
         /* Now we're sure this context is during upcall, add myself into
          * waiting list
          */
-        spin_lock(&ctx->cc_lock);
-        if (list_empty(&req->rq_ctx_chain))
-                list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
-        spin_unlock(&ctx->cc_lock);
+        cfs_spin_lock(&ctx->cc_lock);
+        if (cfs_list_empty(&req->rq_ctx_chain))
+                cfs_list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
+        cfs_spin_unlock(&ctx->cc_lock);
 
         if (timeout < 0)
                 RETURN(-EWOULDBLOCK);
 
         /* Clear any flags that may be present from previous sends */
         LASSERT(req->rq_receiving_reply == 0);
-        spin_lock(&req->rq_lock);
+        cfs_spin_lock(&req->rq_lock);
         req->rq_err = 0;
         req->rq_timedout = 0;
         req->rq_resend = 0;
         req->rq_restart = 0;
-        spin_unlock(&req->rq_lock);
+        cfs_spin_unlock(&req->rq_lock);
 
-        lwi = LWI_TIMEOUT_INTR(timeout * HZ, ctx_refresh_timeout,
+        lwi = LWI_TIMEOUT_INTR(timeout * CFS_HZ, ctx_refresh_timeout,
                                ctx_refresh_interrupt, req);
         rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
 
@@ -811,9 +813,9 @@ void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
 
         sec = req->rq_cli_ctx->cc_sec;
 
-        spin_lock(&sec->ps_lock);
+        cfs_spin_lock(&sec->ps_lock);
         req->rq_flvr = sec->ps_flvr;
-        spin_unlock(&sec->ps_lock);
+        cfs_spin_unlock(&sec->ps_lock);
 
         /* force SVC_NULL for context initiation rpc, SVC_INTG for context
          * destruction rpc */
@@ -860,7 +862,7 @@ int sptlrpc_import_check_ctx(struct obd_import *imp)
         int rc;
         ENTRY;
 
-        might_sleep();
+        cfs_might_sleep();
 
         sec = sptlrpc_import_sec_ref(imp);
         ctx = get_my_ctx(sec);
@@ -884,8 +886,8 @@ int sptlrpc_import_check_ctx(struct obd_import *imp)
         if (!req)
                 RETURN(-ENOMEM);
 
-        spin_lock_init(&req->rq_lock);
-        atomic_set(&req->rq_refcount, 10000);
+        cfs_spin_lock_init(&req->rq_lock);
+        cfs_atomic_set(&req->rq_refcount, 10000);
         CFS_INIT_LIST_HEAD(&req->rq_ctx_chain);
         cfs_waitq_init(&req->rq_reply_waitq);
         req->rq_import = imp;
@@ -893,7 +895,7 @@ int sptlrpc_import_check_ctx(struct obd_import *imp)
         req->rq_cli_ctx = ctx;
 
         rc = sptlrpc_req_refresh_ctx(req, 0);
-        LASSERT(list_empty(&req->rq_ctx_chain));
+        LASSERT(cfs_list_empty(&req->rq_ctx_chain));
         sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
         OBD_FREE_PTR(req);
 
@@ -1063,10 +1065,10 @@ int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
                 GOTO(err_req, rc = -ENOMEM);
 
         /* sanity checkings and copy data out, do it inside spinlock */
-        spin_lock(&req->rq_lock);
+        cfs_spin_lock(&req->rq_lock);
 
         if (req->rq_replied) {
-                spin_unlock(&req->rq_lock);
+                cfs_spin_unlock(&req->rq_lock);
                 GOTO(err_buf, rc = -EALREADY);
         }
 
@@ -1076,7 +1078,7 @@ int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
 
         if (req->rq_reply_off != 0) {
                 CERROR("early reply with offset %u\n", req->rq_reply_off);
-                spin_unlock(&req->rq_lock);
+                cfs_spin_unlock(&req->rq_lock);
                 GOTO(err_buf, rc = -EPROTO);
         }
 
@@ -1084,21 +1086,21 @@ int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
                 /* even another early arrived the size should be the same */
                 CERROR("data size has changed from %u to %u\n",
                        early_size, req->rq_nob_received);
-                spin_unlock(&req->rq_lock);
+                cfs_spin_unlock(&req->rq_lock);
                 GOTO(err_buf, rc = -EINVAL);
         }
 
         if (req->rq_nob_received < sizeof(struct lustre_msg)) {
                 CERROR("early reply length %d too small\n",
                        req->rq_nob_received);
-                spin_unlock(&req->rq_lock);
+                cfs_spin_unlock(&req->rq_lock);
                 GOTO(err_buf, rc = -EALREADY);
         }
 
         memcpy(early_buf, req->rq_repbuf, early_size);
-        spin_unlock(&req->rq_lock);
+        cfs_spin_unlock(&req->rq_lock);
 
-        spin_lock_init(&early_req->rq_lock);
+        cfs_spin_lock_init(&early_req->rq_lock);
         early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
         early_req->rq_flvr = req->rq_flvr;
         early_req->rq_repbuf = early_buf;
@@ -1145,11 +1147,11 @@ void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
 /*
  * "fixed" sec (e.g. null) use sec_id < 0
  */
-static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
+static cfs_atomic_t sptlrpc_sec_id = CFS_ATOMIC_INIT(1);
 
 int sptlrpc_get_next_secid(void)
 {
-        return atomic_inc_return(&sptlrpc_sec_id);
+        return cfs_atomic_inc_return(&sptlrpc_sec_id);
 }
 EXPORT_SYMBOL(sptlrpc_get_next_secid);
 
@@ -1172,8 +1174,8 @@ static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
 {
         struct ptlrpc_sec_policy *policy = sec->ps_policy;
 
-        LASSERT(atomic_read(&sec->ps_refcount) == 0);
-        LASSERT(atomic_read(&sec->ps_nctx) == 0);
+        LASSERT(cfs_atomic_read(&sec->ps_refcount) == 0);
+        LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
         LASSERT(policy->sp_cops->destroy_sec);
 
         CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
@@ -1190,7 +1192,7 @@ EXPORT_SYMBOL(sptlrpc_sec_destroy);
 
 static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
 {
-        LASSERT(atomic_read(&sec->ps_refcount) > 0);
+        LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
 
         if (sec->ps_policy->sp_cops->kill_sec) {
                 sec->ps_policy->sp_cops->kill_sec(sec);
@@ -1202,8 +1204,8 @@ static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
 {
         if (sec) {
-                LASSERT(atomic_read(&sec->ps_refcount) > 0);
-                atomic_inc(&sec->ps_refcount);
+                LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
+                cfs_atomic_inc(&sec->ps_refcount);
         }
 
         return sec;
@@ -1213,10 +1215,10 @@ EXPORT_SYMBOL(sptlrpc_sec_get);
 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
 {
         if (sec) {
-                LASSERT(atomic_read(&sec->ps_refcount) > 0);
+                LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
 
-                if (atomic_dec_and_test(&sec->ps_refcount)) {
-                        LASSERT(atomic_read(&sec->ps_nctx) == 0);
+                if (cfs_atomic_dec_and_test(&sec->ps_refcount)) {
+                        LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
 
                         sptlrpc_gc_del_sec(sec);
                         sec_cop_destroy_sec(sec);
@@ -1266,7 +1268,7 @@ struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
 
         sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
         if (sec) {
-                atomic_inc(&sec->ps_refcount);
+                cfs_atomic_inc(&sec->ps_refcount);
 
                 sec->ps_part = sp;
 
@@ -1283,9 +1285,9 @@ struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
 {
         struct ptlrpc_sec *sec;
 
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
         sec = sptlrpc_sec_get(imp->imp_sec);
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
 
         return sec;
 }
@@ -1296,12 +1298,12 @@ static void sptlrpc_import_sec_install(struct obd_import *imp,
 {
         struct ptlrpc_sec *old_sec;
 
-        LASSERT(atomic_read(&sec->ps_refcount) > 0);
+        LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
 
-        spin_lock(&imp->imp_lock);
+        cfs_spin_lock(&imp->imp_lock);
         old_sec = imp->imp_sec;
         imp->imp_sec = sec;
-        spin_unlock(&imp->imp_lock);
+        cfs_spin_unlock(&imp->imp_lock);
 
         if (old_sec) {
                 sptlrpc_sec_kill(old_sec);
@@ -1336,9 +1338,9 @@ static void sptlrpc_import_sec_adapt_inplace(struct obd_import *imp,
                       sptlrpc_secflags2str(sf->sf_flags,
                                            str2, sizeof(str2)));
 
-        spin_lock(&sec->ps_lock);
+        cfs_spin_lock(&sec->ps_lock);
         flavor_copy(&sec->ps_flvr, sf);
-        spin_unlock(&sec->ps_lock);
+        cfs_spin_unlock(&sec->ps_lock);
 }
 
 /*
@@ -1357,7 +1359,7 @@ int sptlrpc_import_sec_adapt(struct obd_import *imp,
         int                         rc = 0;
         ENTRY;
 
-        might_sleep();
+        cfs_might_sleep();
 
         if (imp == NULL)
                 RETURN(0);
@@ -1418,7 +1420,7 @@ int sptlrpc_import_sec_adapt(struct obd_import *imp,
                       sptlrpc_flavor2name(&sf, str, sizeof(str)));
         }
 
-        mutex_down(&imp->imp_sec_mutex);
+        cfs_mutex_down(&imp->imp_sec_mutex);
 
         newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
         if (newsec) {
@@ -1430,7 +1432,7 @@ int sptlrpc_import_sec_adapt(struct obd_import *imp,
                 rc = -EPERM;
         }
 
-        mutex_up(&imp->imp_sec_mutex);
+        cfs_mutex_up(&imp->imp_sec_mutex);
 out:
         sptlrpc_sec_put(sec);
         RETURN(rc);
@@ -1492,7 +1494,7 @@ int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
         int rc;
 
         LASSERT(ctx);
-        LASSERT(atomic_read(&ctx->cc_refcount));
+        LASSERT(cfs_atomic_read(&ctx->cc_refcount));
         LASSERT(ctx->cc_sec);
         LASSERT(ctx->cc_sec->ps_policy);
         LASSERT(req->rq_reqmsg == NULL);
@@ -1517,7 +1519,7 @@ void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
         struct ptlrpc_sec_policy *policy;
 
         LASSERT(ctx);
-        LASSERT(atomic_read(&ctx->cc_refcount));
+        LASSERT(cfs_atomic_read(&ctx->cc_refcount));
         LASSERT(ctx->cc_sec);
         LASSERT(ctx->cc_sec->ps_policy);
 
@@ -1608,7 +1610,7 @@ int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
         ENTRY;
 
         LASSERT(ctx);
-        LASSERT(atomic_read(&ctx->cc_refcount));
+        LASSERT(cfs_atomic_read(&ctx->cc_refcount));
         LASSERT(ctx->cc_sec);
         LASSERT(ctx->cc_sec->ps_policy);
 
@@ -1626,7 +1628,7 @@ void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
         ENTRY;
 
         LASSERT(ctx);
-        LASSERT(atomic_read(&ctx->cc_refcount));
+        LASSERT(cfs_atomic_read(&ctx->cc_refcount));
         LASSERT(ctx->cc_sec);
         LASSERT(ctx->cc_sec->ps_policy);
 
@@ -1699,7 +1701,7 @@ int sptlrpc_target_export_check(struct obd_export *exp,
         if (req->rq_ctx_fini)
                 return 0;
 
-        spin_lock(&exp->exp_lock);
+        cfs_spin_lock(&exp->exp_lock);
 
         /* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
          * the first req with the new flavor, then treat it as current flavor,
@@ -1728,7 +1730,7 @@ int sptlrpc_target_export_check(struct obd_export *exp,
                 if (req->rq_auth_gss &&
                     !(req->rq_ctx_init && (req->rq_auth_usr_root ||
                                            req->rq_auth_usr_mdt))) {
-                        spin_unlock(&exp->exp_lock);
+                        cfs_spin_unlock(&exp->exp_lock);
                         CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d)\n",
                                req->rq_auth_gss, req->rq_ctx_init,
                                req->rq_auth_usr_root, req->rq_auth_usr_mdt);
@@ -1736,7 +1738,7 @@ int sptlrpc_target_export_check(struct obd_export *exp,
                 }
 
                 exp->exp_flvr_adapt = 0;
-                spin_unlock(&exp->exp_lock);
+                cfs_spin_unlock(&exp->exp_lock);
 
                 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
                                                 req->rq_svc_ctx, &flavor);
@@ -1749,7 +1751,7 @@ int sptlrpc_target_export_check(struct obd_export *exp,
                  * gss root ctx init */
                 if (!req->rq_auth_gss || !req->rq_ctx_init ||
                     (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt)) {
-                        spin_unlock(&exp->exp_lock);
+                        cfs_spin_unlock(&exp->exp_lock);
                         return 0;
                 }
 
@@ -1758,7 +1760,7 @@ int sptlrpc_target_export_check(struct obd_export *exp,
                  * shortly, and let _this_ rpc pass through */
                 if (exp->exp_flvr_changed) {
                         LASSERT(exp->exp_flvr_adapt);
-                        spin_unlock(&exp->exp_lock);
+                        cfs_spin_unlock(&exp->exp_lock);
                         return 0;
                 }
 
@@ -1769,7 +1771,7 @@ int sptlrpc_target_export_check(struct obd_export *exp,
                                exp->exp_flvr_old[0].sf_rpc,
                                exp->exp_flvr_old[1].sf_rpc);
                         flavor = exp->exp_flvr;
-                        spin_unlock(&exp->exp_lock);
+                        cfs_spin_unlock(&exp->exp_lock);
 
                         return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
                                                         req->rq_svc_ctx,
@@ -1779,7 +1781,7 @@ int sptlrpc_target_export_check(struct obd_export *exp,
                                "install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
                                exp->exp_flvr_old[0].sf_rpc,
                                exp->exp_flvr_old[1].sf_rpc);
-                        spin_unlock(&exp->exp_lock);
+                        cfs_spin_unlock(&exp->exp_lock);
 
                         return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
                                                            req->rq_svc_ctx);
@@ -1796,7 +1798,7 @@ int sptlrpc_target_export_check(struct obd_export *exp,
                                        exp->exp_flvr_old[1].sf_rpc,
                                        exp->exp_flvr_expire[0] -
                                                 cfs_time_current_sec());
-                                spin_unlock(&exp->exp_lock);
+                                cfs_spin_unlock(&exp->exp_lock);
                                 return 0;
                         }
                 } else {
@@ -1821,7 +1823,7 @@ int sptlrpc_target_export_check(struct obd_export *exp,
                                        exp->exp_flvr_old[1].sf_rpc,
                                        exp->exp_flvr_expire[1] -
                                                 cfs_time_current_sec());
-                                spin_unlock(&exp->exp_lock);
+                                cfs_spin_unlock(&exp->exp_lock);
                                 return 0;
                         }
                 } else {
@@ -1838,7 +1840,7 @@ int sptlrpc_target_export_check(struct obd_export *exp,
                        exp->exp_flvr_old[1].sf_rpc);
         }
 
-        spin_unlock(&exp->exp_lock);
+        cfs_spin_unlock(&exp->exp_lock);
 
         CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u) with "
               "unauthorized flavor %x, expect %x|%x(%+ld)|%x(%+ld)\n",
@@ -1866,16 +1868,16 @@ void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
 
         LASSERT(obd);
 
-        spin_lock(&obd->obd_dev_lock);
+        cfs_spin_lock(&obd->obd_dev_lock);
 
-        list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
+        cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
                 if (exp->exp_connection == NULL)
                         continue;
 
                 /* note if this export had just been updated flavor
                  * (exp_flvr_changed == 1), this will override the
                  * previous one. */
-                spin_lock(&exp->exp_lock);
+                cfs_spin_lock(&exp->exp_lock);
                 sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
                                              exp->exp_connection->c_peer.nid,
                                              &new_flvr);
@@ -1891,10 +1893,10 @@ void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
                                exp->exp_flvr.sf_rpc,
                                exp->exp_flvr_old[1].sf_rpc);
                 }
-                spin_unlock(&exp->exp_lock);
+                cfs_spin_unlock(&exp->exp_lock);
         }
 
-        spin_unlock(&obd->obd_dev_lock);
+        cfs_spin_unlock(&obd->obd_dev_lock);
 }
 EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
 
@@ -2077,8 +2079,8 @@ void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
         if (ctx == NULL)
                 return;
 
-        LASSERT(atomic_read(&ctx->sc_refcount) > 0);
-        atomic_inc(&ctx->sc_refcount);
+        LASSERT(cfs_atomic_read(&ctx->sc_refcount) > 0);
+        cfs_atomic_inc(&ctx->sc_refcount);
 }
 
 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
@@ -2088,8 +2090,8 @@ void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
         if (ctx == NULL)
                 return;
 
-        LASSERT(atomic_read(&ctx->sc_refcount) > 0);
-        if (atomic_dec_and_test(&ctx->sc_refcount)) {
+        LASSERT(cfs_atomic_read(&ctx->sc_refcount) > 0);
+        if (cfs_atomic_dec_and_test(&ctx->sc_refcount)) {
                 if (ctx->sc_policy->sp_sops->free_ctx)
                         ctx->sc_policy->sp_sops->free_ctx(ctx);
         }
@@ -2103,7 +2105,7 @@ void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
         if (ctx == NULL)
                 return;
 
-        LASSERT(atomic_read(&ctx->sc_refcount) > 0);
+        LASSERT(cfs_atomic_read(&ctx->sc_refcount) > 0);
         if (ctx->sc_policy->sp_sops->invalidate_ctx)
                 ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
 }
@@ -2388,7 +2390,7 @@ int __init sptlrpc_init(void)
 {
         int rc;
 
-        rwlock_init(&policy_lock);
+        cfs_rwlock_init(&policy_lock);
 
         rc = sptlrpc_gc_init();
         if (rc)
index a131099..1d9d8de 100644 (file)
@@ -107,7 +107,7 @@ static struct ptlrpc_enc_page_pool {
         /*
          * in-pool pages bookkeeping
          */
-        spinlock_t       epp_lock;        /* protect following fields */
+        cfs_spinlock_t   epp_lock;        /* protect following fields */
         unsigned long    epp_total_pages; /* total pages in pools */
         unsigned long    epp_free_pages;  /* current pages available */
 
@@ -132,8 +132,8 @@ static struct ptlrpc_enc_page_pool {
 /*
  * memory shrinker
  */
-const int pools_shrinker_seeks = DEFAULT_SEEKS;
-static struct shrinker *pools_shrinker = NULL;
+const int pools_shrinker_seeks = CFS_DEFAULT_SEEKS;
+static struct cfs_shrinker *pools_shrinker = NULL;
 
 
 /*
@@ -144,7 +144,7 @@ int sptlrpc_proc_read_enc_pool(char *page, char **start, off_t off, int count,
 {
         int     rc;
 
-        spin_lock(&page_pools.epp_lock);
+        cfs_spin_lock(&page_pools.epp_lock);
 
         rc = snprintf(page, count,
                       "physical pages:          %lu\n"
@@ -166,7 +166,7 @@ int sptlrpc_proc_read_enc_pool(char *page, char **start, off_t off, int count,
                       "max waitqueue depth:     %u\n"
                       "max wait time:           "CFS_TIME_T"/%u\n"
                       ,
-                      num_physpages,
+                      cfs_num_physpages,
                       PAGES_PER_POOL,
                       page_pools.epp_max_pages,
                       page_pools.epp_max_pools,
@@ -183,10 +183,10 @@ int sptlrpc_proc_read_enc_pool(char *page, char **start, off_t off, int count,
                       page_pools.epp_st_missings,
                       page_pools.epp_st_lowfree,
                       page_pools.epp_st_max_wqlen,
-                      page_pools.epp_st_max_wait, HZ
+                      page_pools.epp_st_max_wait, CFS_HZ
                      );
 
-        spin_unlock(&page_pools.epp_lock);
+        cfs_spin_unlock(&page_pools.epp_lock);
         return rc;
 }
 
@@ -242,7 +242,7 @@ static void enc_pools_release_free_pages(long npages)
 static int enc_pools_shrink(int nr_to_scan, unsigned int gfp_mask)
 {
         if (unlikely(nr_to_scan != 0)) {
-                spin_lock(&page_pools.epp_lock);
+                cfs_spin_lock(&page_pools.epp_lock);
                 nr_to_scan = min(nr_to_scan, (int) page_pools.epp_free_pages -
                                              PTLRPC_MAX_BRW_PAGES);
                 if (nr_to_scan > 0) {
@@ -253,7 +253,7 @@ static int enc_pools_shrink(int nr_to_scan, unsigned int gfp_mask)
                         page_pools.epp_st_shrinks++;
                         page_pools.epp_last_shrink = cfs_time_current_sec();
                 }
-                spin_unlock(&page_pools.epp_lock);
+                cfs_spin_unlock(&page_pools.epp_lock);
         }
 
         /*
@@ -262,9 +262,9 @@ static int enc_pools_shrink(int nr_to_scan, unsigned int gfp_mask)
          */
         if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
                      CACHE_QUIESCENT_PERIOD)) {
-                spin_lock(&page_pools.epp_lock);
+                cfs_spin_lock(&page_pools.epp_lock);
                 page_pools.epp_idle_idx = IDLE_IDX_MAX;
-                spin_unlock(&page_pools.epp_lock);
+                cfs_spin_unlock(&page_pools.epp_lock);
         }
 
         LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
@@ -320,7 +320,7 @@ static void enc_pools_insert(cfs_page_t ***pools, int npools, int npages)
         LASSERT(npages_to_npools(npages) == npools);
         LASSERT(page_pools.epp_growing);
 
-        spin_lock(&page_pools.epp_lock);
+        cfs_spin_lock(&page_pools.epp_lock);
 
         /*
          * (1) fill all the free slots of current pools.
@@ -387,12 +387,12 @@ static void enc_pools_insert(cfs_page_t ***pools, int npools, int npages)
         CDEBUG(D_SEC, "add %d pages to total %lu\n", npages,
                page_pools.epp_total_pages);
 
-        spin_unlock(&page_pools.epp_lock);
+        cfs_spin_unlock(&page_pools.epp_lock);
 }
 
 static int enc_pools_add_pages(int npages)
 {
-        static DECLARE_MUTEX(sem_add_pages);
+        static CFS_DECLARE_MUTEX(sem_add_pages);
         cfs_page_t   ***pools;
         int             npools, alloced = 0;
         int             i, j, rc = -ENOMEM;
@@ -400,7 +400,7 @@ static int enc_pools_add_pages(int npages)
         if (npages < PTLRPC_MAX_BRW_PAGES)
                 npages = PTLRPC_MAX_BRW_PAGES;
 
-        down(&sem_add_pages);
+        cfs_down(&sem_add_pages);
 
         if (npages + page_pools.epp_total_pages > page_pools.epp_max_pages)
                 npages = page_pools.epp_max_pages - page_pools.epp_total_pages;
@@ -442,7 +442,7 @@ out:
                 CERROR("Failed to allocate %d enc pages\n", npages);
         }
 
-        up(&sem_add_pages);
+        cfs_up(&sem_add_pages);
         return rc;
 }
 
@@ -513,7 +513,7 @@ int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
         if (desc->bd_enc_iov == NULL)
                 return -ENOMEM;
 
-        spin_lock(&page_pools.epp_lock);
+        cfs_spin_lock(&page_pools.epp_lock);
 
         page_pools.epp_st_access++;
 again:
@@ -529,9 +529,9 @@ again:
                 if (enc_pools_should_grow(desc->bd_iov_count, now)) {
                         page_pools.epp_growing = 1;
 
-                        spin_unlock(&page_pools.epp_lock);
+                        cfs_spin_unlock(&page_pools.epp_lock);
                         enc_pools_add_pages(page_pools.epp_pages_short / 2);
-                        spin_lock(&page_pools.epp_lock);
+                        cfs_spin_lock(&page_pools.epp_lock);
 
                         page_pools.epp_growing = 0;
 
@@ -542,15 +542,15 @@ again:
                                 page_pools.epp_st_max_wqlen =
                                                 page_pools.epp_waitqlen;
 
-                        set_current_state(CFS_TASK_UNINT);
+                        cfs_set_current_state(CFS_TASK_UNINT);
                         cfs_waitlink_init(&waitlink);
                         cfs_waitq_add(&page_pools.epp_waitq, &waitlink);
 
-                        spin_unlock(&page_pools.epp_lock);
+                        cfs_spin_unlock(&page_pools.epp_lock);
                         cfs_waitq_wait(&waitlink, CFS_TASK_UNINT);
                         cfs_waitq_del(&page_pools.epp_waitq, &waitlink);
                         LASSERT(page_pools.epp_waitqlen > 0);
-                        spin_lock(&page_pools.epp_lock);
+                        cfs_spin_lock(&page_pools.epp_lock);
                         page_pools.epp_waitqlen--;
                 }
 
@@ -602,7 +602,7 @@ again:
 
         page_pools.epp_last_access = cfs_time_current_sec();
 
-        spin_unlock(&page_pools.epp_lock);
+        cfs_spin_unlock(&page_pools.epp_lock);
         return 0;
 }
 EXPORT_SYMBOL(sptlrpc_enc_pool_get_pages);
@@ -617,7 +617,7 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
 
         LASSERT(desc->bd_iov_count > 0);
 
-        spin_lock(&page_pools.epp_lock);
+        cfs_spin_lock(&page_pools.epp_lock);
 
         p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
         g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
@@ -644,7 +644,7 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
 
         enc_pools_wakeup();
 
-        spin_unlock(&page_pools.epp_lock);
+        cfs_spin_unlock(&page_pools.epp_lock);
 
         OBD_FREE(desc->bd_enc_iov,
                  desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
@@ -661,21 +661,21 @@ int sptlrpc_enc_pool_add_user(void)
 {
         int     need_grow = 0;
 
-        spin_lock(&page_pools.epp_lock);
+        cfs_spin_lock(&page_pools.epp_lock);
         if (page_pools.epp_growing == 0 && page_pools.epp_total_pages == 0) {
                 page_pools.epp_growing = 1;
                 need_grow = 1;
         }
-        spin_unlock(&page_pools.epp_lock);
+        cfs_spin_unlock(&page_pools.epp_lock);
 
         if (need_grow) {
                 enc_pools_add_pages(PTLRPC_MAX_BRW_PAGES +
                                     PTLRPC_MAX_BRW_PAGES);
 
-                spin_lock(&page_pools.epp_lock);
+                cfs_spin_lock(&page_pools.epp_lock);
                 page_pools.epp_growing = 0;
                 enc_pools_wakeup();
-                spin_unlock(&page_pools.epp_lock);
+                cfs_spin_unlock(&page_pools.epp_lock);
         }
         return 0;
 }
@@ -714,7 +714,7 @@ int sptlrpc_enc_pool_init(void)
          * maximum capacity is 1/8 of total physical memory.
          * is the 1/8 a good number?
          */
-        page_pools.epp_max_pages = num_physpages / 8;
+        page_pools.epp_max_pages = cfs_num_physpages / 8;
         page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
 
         cfs_waitq_init(&page_pools.epp_waitq);
@@ -727,7 +727,7 @@ int sptlrpc_enc_pool_init(void)
         page_pools.epp_last_shrink = cfs_time_current_sec();
         page_pools.epp_last_access = cfs_time_current_sec();
 
-        spin_lock_init(&page_pools.epp_lock);
+        cfs_spin_lock_init(&page_pools.epp_lock);
         page_pools.epp_total_pages = 0;
         page_pools.epp_free_pages = 0;
 
@@ -745,7 +745,8 @@ int sptlrpc_enc_pool_init(void)
         if (page_pools.epp_pools == NULL)
                 return -ENOMEM;
 
-        pools_shrinker = set_shrinker(pools_shrinker_seeks, enc_pools_shrink);
+        pools_shrinker = cfs_set_shrinker(pools_shrinker_seeks,
+                                          enc_pools_shrink);
         if (pools_shrinker == NULL) {
                 enc_pools_free();
                 return -ENOMEM;
@@ -762,7 +763,7 @@ void sptlrpc_enc_pool_fini(void)
         LASSERT(page_pools.epp_pools);
         LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages);
 
-        remove_shrinker(pools_shrinker);
+        cfs_remove_shrinker(pools_shrinker);
 
         npools = npages_to_npools(page_pools.epp_total_pages);
         cleaned = enc_pools_cleanup(page_pools.epp_pools, npools);
@@ -778,7 +779,7 @@ void sptlrpc_enc_pool_fini(void)
                       page_pools.epp_st_grow_fails,
                       page_pools.epp_st_shrinks, page_pools.epp_st_access,
                       page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
-                      page_pools.epp_st_max_wait, HZ);
+                      page_pools.epp_st_max_wait, CFS_HZ);
         }
 }
 
index 946f922..9550687 100644 (file)
@@ -270,7 +270,7 @@ int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *rset)
         struct sptlrpc_rule *rules;
         int nslot;
 
-        might_sleep();
+        cfs_might_sleep();
 
         if (rset->srs_nrule < rset->srs_nslot)
                 return 0; 
@@ -328,7 +328,7 @@ int sptlrpc_rule_set_merge(struct sptlrpc_rule_set *rset,
         int                       spec_dir, spec_net;
         int                       rc, n, match = 0;
 
-        might_sleep();
+        cfs_might_sleep();
 
         spec_net = rule_spec_net(rule);
         spec_dir = rule_spec_dir(rule);
@@ -468,7 +468,7 @@ static int sptlrpc_rule_set_extract(struct sptlrpc_rule_set *gen,
         struct sptlrpc_rule     *rule;
         int                      i, n, rc;
 
-        might_sleep();
+        cfs_might_sleep();
 
         /* merge general rules firstly, then target-specific rules */
         for (i = 0; i < 2; i++) {
@@ -503,22 +503,22 @@ static int sptlrpc_rule_set_extract(struct sptlrpc_rule_set *gen,
  **********************************/
 
 struct sptlrpc_conf_tgt {
-        struct list_head        sct_list;
+        cfs_list_t              sct_list;
         char                    sct_name[MAX_OBD_NAME];
         struct sptlrpc_rule_set sct_rset;
 };
 
 struct sptlrpc_conf {
-        struct list_head        sc_list;
+        cfs_list_t              sc_list;
         char                    sc_fsname[MTI_NAME_MAXLEN];
         unsigned int            sc_modified;  /* modified during updating */
         unsigned int            sc_updated:1, /* updated copy from MGS */
                                 sc_local:1;   /* local copy from target */
         struct sptlrpc_rule_set sc_rset;      /* fs general rules */
-        struct list_head        sc_tgts;      /* target-specific rules */
+        cfs_list_t              sc_tgts;      /* target-specific rules */
 };
 
-static struct mutex sptlrpc_conf_lock;
+static cfs_mutex_t sptlrpc_conf_lock;
 static CFS_LIST_HEAD(sptlrpc_confs);
 
 static inline int is_hex(char c)
@@ -558,13 +558,13 @@ static void sptlrpc_conf_free_rsets(struct sptlrpc_conf *conf)
 
         sptlrpc_rule_set_free(&conf->sc_rset);
 
-        list_for_each_entry_safe(conf_tgt, conf_tgt_next,
-                                 &conf->sc_tgts, sct_list) {
+        cfs_list_for_each_entry_safe(conf_tgt, conf_tgt_next,
+                                     &conf->sc_tgts, sct_list) {
                 sptlrpc_rule_set_free(&conf_tgt->sct_rset);
-                list_del(&conf_tgt->sct_list);
+                cfs_list_del(&conf_tgt->sct_list);
                 OBD_FREE_PTR(conf_tgt);
         }
-        LASSERT(list_empty(&conf->sc_tgts));
+        LASSERT(cfs_list_empty(&conf->sc_tgts));
 
         conf->sc_updated = 0;
         conf->sc_local = 0;
@@ -575,7 +575,7 @@ static void sptlrpc_conf_free(struct sptlrpc_conf *conf)
         CDEBUG(D_SEC, "free sptlrpc conf %s\n", conf->sc_fsname);
 
         sptlrpc_conf_free_rsets(conf);
-        list_del(&conf->sc_list);
+        cfs_list_del(&conf->sc_list);
         OBD_FREE_PTR(conf);
 }
 
@@ -586,7 +586,7 @@ struct sptlrpc_conf_tgt *sptlrpc_conf_get_tgt(struct sptlrpc_conf *conf,
 {
         struct sptlrpc_conf_tgt *conf_tgt;
 
-        list_for_each_entry(conf_tgt, &conf->sc_tgts, sct_list) {
+        cfs_list_for_each_entry(conf_tgt, &conf->sc_tgts, sct_list) {
                 if (strcmp(conf_tgt->sct_name, name) == 0)
                         return conf_tgt;
         }
@@ -598,7 +598,7 @@ struct sptlrpc_conf_tgt *sptlrpc_conf_get_tgt(struct sptlrpc_conf *conf,
         if (conf_tgt) {
                 strncpy(conf_tgt->sct_name, name, sizeof(conf_tgt->sct_name));
                 sptlrpc_rule_set_init(&conf_tgt->sct_rset);
-                list_add(&conf_tgt->sct_list, &conf->sc_tgts);
+                cfs_list_add(&conf_tgt->sct_list, &conf->sc_tgts);
         }
 
         return conf_tgt;
@@ -610,7 +610,7 @@ struct sptlrpc_conf *sptlrpc_conf_get(const char *fsname,
 {
         struct sptlrpc_conf *conf;
 
-        list_for_each_entry(conf, &sptlrpc_confs, sc_list) {
+        cfs_list_for_each_entry(conf, &sptlrpc_confs, sc_list) {
                 if (strcmp(conf->sc_fsname, fsname) == 0)
                         return conf;
         }
@@ -625,7 +625,7 @@ struct sptlrpc_conf *sptlrpc_conf_get(const char *fsname,
         strcpy(conf->sc_fsname, fsname);
         sptlrpc_rule_set_init(&conf->sc_rset);
         CFS_INIT_LIST_HEAD(&conf->sc_tgts);
-        list_add(&conf->sc_list, &sptlrpc_confs);
+        cfs_list_add(&conf->sc_list, &sptlrpc_confs);
 
         CDEBUG(D_SEC, "create sptlrpc conf %s\n", conf->sc_fsname);
         return conf;
@@ -699,7 +699,7 @@ static int __sptlrpc_process_config(struct lustre_cfg *lcfg,
         if (conf == NULL) {
                 target2fsname(target, fsname, sizeof(fsname));
 
-                mutex_lock(&sptlrpc_conf_lock);
+                cfs_mutex_lock(&sptlrpc_conf_lock);
                 conf = sptlrpc_conf_get(fsname, 0);
                 if (conf == NULL) {
                         CERROR("can't find conf\n");
@@ -707,9 +707,9 @@ static int __sptlrpc_process_config(struct lustre_cfg *lcfg,
                 } else {
                         rc = sptlrpc_conf_merge_rule(conf, target, &rule);
                 }
-                mutex_unlock(&sptlrpc_conf_lock);
+                cfs_mutex_unlock(&sptlrpc_conf_lock);
         } else {
-                LASSERT(mutex_is_locked(&sptlrpc_conf_lock));
+                LASSERT(cfs_mutex_is_locked(&sptlrpc_conf_lock));
                 rc = sptlrpc_conf_merge_rule(conf, target, &rule);
         }
 
@@ -751,7 +751,7 @@ void sptlrpc_conf_log_update_begin(const char *logname)
         if (logname2fsname(logname, fsname, sizeof(fsname)))
                 return;
 
-        mutex_lock(&sptlrpc_conf_lock);
+        cfs_mutex_lock(&sptlrpc_conf_lock);
 
         conf = sptlrpc_conf_get(fsname, 0);
         if (conf && conf->sc_local) {
@@ -760,7 +760,7 @@ void sptlrpc_conf_log_update_begin(const char *logname)
         }
         conf->sc_modified = 0;
 
-        mutex_unlock(&sptlrpc_conf_lock);
+        cfs_mutex_unlock(&sptlrpc_conf_lock);
 }
 EXPORT_SYMBOL(sptlrpc_conf_log_update_begin);
 
@@ -775,7 +775,7 @@ void sptlrpc_conf_log_update_end(const char *logname)
         if (logname2fsname(logname, fsname, sizeof(fsname)))
                 return;
 
-        mutex_lock(&sptlrpc_conf_lock);
+        cfs_mutex_lock(&sptlrpc_conf_lock);
 
         conf = sptlrpc_conf_get(fsname, 0);
         if (conf) {
@@ -789,7 +789,7 @@ void sptlrpc_conf_log_update_end(const char *logname)
                 conf->sc_updated = 1;
         }
 
-        mutex_unlock(&sptlrpc_conf_lock);
+        cfs_mutex_unlock(&sptlrpc_conf_lock);
 }
 EXPORT_SYMBOL(sptlrpc_conf_log_update_end);
 
@@ -801,9 +801,9 @@ void sptlrpc_conf_log_start(const char *logname)
         if (logname2fsname(logname, fsname, sizeof(fsname)))
                 return;
 
-        mutex_lock(&sptlrpc_conf_lock);
+        cfs_mutex_lock(&sptlrpc_conf_lock);
         conf = sptlrpc_conf_get(fsname, 1);
-        mutex_unlock(&sptlrpc_conf_lock);
+        cfs_mutex_unlock(&sptlrpc_conf_lock);
 }
 EXPORT_SYMBOL(sptlrpc_conf_log_start);
 
@@ -815,11 +815,11 @@ void sptlrpc_conf_log_stop(const char *logname)
         if (logname2fsname(logname, fsname, sizeof(fsname)))
                 return;
 
-        mutex_lock(&sptlrpc_conf_lock);
+        cfs_mutex_lock(&sptlrpc_conf_lock);
         conf = sptlrpc_conf_get(fsname, 0);
         if (conf)
                 sptlrpc_conf_free(conf);
-        mutex_unlock(&sptlrpc_conf_lock);
+        cfs_mutex_unlock(&sptlrpc_conf_lock);
 }
 EXPORT_SYMBOL(sptlrpc_conf_log_stop);
 
@@ -861,7 +861,7 @@ void sptlrpc_conf_choose_flavor(enum lustre_sec_part from,
 
         target2fsname(target->uuid, name, sizeof(name));
 
-        mutex_lock(&sptlrpc_conf_lock);
+        cfs_mutex_lock(&sptlrpc_conf_lock);
 
         conf = sptlrpc_conf_get(name, 0);
         if (conf == NULL)
@@ -883,7 +883,7 @@ void sptlrpc_conf_choose_flavor(enum lustre_sec_part from,
 
         rc = sptlrpc_rule_set_choose(&conf->sc_rset, from, to, nid, sf);
 out:
-        mutex_unlock(&sptlrpc_conf_lock);
+        cfs_mutex_unlock(&sptlrpc_conf_lock);
 
         if (rc == 0)
                 get_default_flavor(sf);
@@ -921,18 +921,18 @@ void sptlrpc_conf_client_adapt(struct obd_device *obd)
         CDEBUG(D_SEC, "obd %s\n", obd->u.cli.cl_target_uuid.uuid);
 
         /* serialize with connect/disconnect import */
-        down_read(&obd->u.cli.cl_sem);
+        cfs_down_read(&obd->u.cli.cl_sem);
 
         imp = obd->u.cli.cl_import;
         if (imp) {
-                spin_lock(&imp->imp_lock);
+                cfs_spin_lock(&imp->imp_lock);
                 if (imp->imp_sec)
                         imp->imp_sec_expire = cfs_time_current_sec() +
                                               SEC_ADAPT_DELAY;
-                spin_unlock(&imp->imp_lock);
+                cfs_spin_unlock(&imp->imp_lock);
         }
 
-        up_read(&obd->u.cli.cl_sem);
+        cfs_up_read(&obd->u.cli.cl_sem);
         EXIT;
 }
 EXPORT_SYMBOL(sptlrpc_conf_client_adapt);
@@ -1002,7 +1002,7 @@ static int sptlrpc_record_rules(struct llog_handle *llh,
 
         sptlrpc_record_rule_set(llh, conf->sc_fsname, &conf->sc_rset);
 
-        list_for_each_entry(conf_tgt, &conf->sc_tgts, sct_list) {
+        cfs_list_for_each_entry(conf_tgt, &conf->sc_tgts, sct_list) {
                 sptlrpc_record_rule_set(llh, conf_tgt->sct_name,
                                         &conf_tgt->sct_rset);
         }
@@ -1197,7 +1197,7 @@ int sptlrpc_conf_target_get_rules(struct obd_device *obd,
 
         target2fsname(obd->obd_uuid.uuid, fsname, sizeof(fsname));
 
-        mutex_lock(&sptlrpc_conf_lock);
+        cfs_mutex_lock(&sptlrpc_conf_lock);
 
         conf = sptlrpc_conf_get(fsname, 0);
         if (conf == NULL) {
@@ -1234,14 +1234,14 @@ int sptlrpc_conf_target_get_rules(struct obd_device *obd,
                                       conf_tgt ? &conf_tgt->sct_rset: NULL,
                                       LUSTRE_SP_ANY, sp_dst, rset);
 out:
-        mutex_unlock(&sptlrpc_conf_lock);
+        cfs_mutex_unlock(&sptlrpc_conf_lock);
         RETURN(rc);
 }
 EXPORT_SYMBOL(sptlrpc_conf_target_get_rules);
 
 int  sptlrpc_conf_init(void)
 {
-        mutex_init(&sptlrpc_conf_lock);
+        cfs_mutex_init(&sptlrpc_conf_lock);
         return 0;
 }
 
@@ -1249,10 +1249,10 @@ void sptlrpc_conf_fini(void)
 {
         struct sptlrpc_conf  *conf, *conf_next;
 
-        mutex_lock(&sptlrpc_conf_lock);
-        list_for_each_entry_safe(conf, conf_next, &sptlrpc_confs, sc_list) {
+        cfs_mutex_lock(&sptlrpc_conf_lock);
+        cfs_list_for_each_entry_safe(conf, conf_next, &sptlrpc_confs, sc_list) {
                 sptlrpc_conf_free(conf);
         }
-        LASSERT(list_empty(&sptlrpc_confs));
-        mutex_unlock(&sptlrpc_conf_lock);
+        LASSERT(cfs_list_empty(&sptlrpc_confs));
+        cfs_mutex_unlock(&sptlrpc_conf_lock);
 }
index a5245e9..344711c 100644 (file)
 
 #ifdef __KERNEL__
 
-static struct mutex sec_gc_mutex;
+static cfs_mutex_t sec_gc_mutex;
 static CFS_LIST_HEAD(sec_gc_list);
-static spinlock_t sec_gc_list_lock;
+static cfs_spinlock_t sec_gc_list_lock;
 
 static CFS_LIST_HEAD(sec_gc_ctx_list);
-static spinlock_t sec_gc_ctx_list_lock;
+static cfs_spinlock_t sec_gc_ctx_list_lock;
 
 static struct ptlrpc_thread sec_gc_thread;
-static atomic_t sec_gc_wait_del = ATOMIC_INIT(0);
+static cfs_atomic_t sec_gc_wait_del = CFS_ATOMIC_INIT(0);
 
 
 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
 {
         LASSERT(sec->ps_policy->sp_cops->gc_ctx);
         LASSERT(sec->ps_gc_interval > 0);
-        LASSERT(list_empty(&sec->ps_gc_list));
+        LASSERT(cfs_list_empty(&sec->ps_gc_list));
 
         sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
 
-        spin_lock(&sec_gc_list_lock);
-        list_add_tail(&sec_gc_list, &sec->ps_gc_list);
-        spin_unlock(&sec_gc_list_lock);
+        cfs_spin_lock(&sec_gc_list_lock);
+        cfs_list_add_tail(&sec_gc_list, &sec->ps_gc_list);
+        cfs_spin_unlock(&sec_gc_list_lock);
 
         CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name);
 }
@@ -87,23 +87,23 @@ EXPORT_SYMBOL(sptlrpc_gc_add_sec);
 
 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
 {
-        if (list_empty(&sec->ps_gc_list))
+        if (cfs_list_empty(&sec->ps_gc_list))
                 return;
 
-        might_sleep();
+        cfs_might_sleep();
 
         /* signal before list_del to make iteration in gc thread safe */
-        atomic_inc(&sec_gc_wait_del);
+        cfs_atomic_inc(&sec_gc_wait_del);
 
-        spin_lock(&sec_gc_list_lock);
-        list_del_init(&sec->ps_gc_list);
-        spin_unlock(&sec_gc_list_lock);
+        cfs_spin_lock(&sec_gc_list_lock);
+        cfs_list_del_init(&sec->ps_gc_list);
+        cfs_spin_unlock(&sec_gc_list_lock);
 
         /* barrier */
-        mutex_lock(&sec_gc_mutex);
-        mutex_unlock(&sec_gc_mutex);
+        cfs_mutex_lock(&sec_gc_mutex);
+        cfs_mutex_unlock(&sec_gc_mutex);
 
-        atomic_dec(&sec_gc_wait_del);
+        cfs_atomic_dec(&sec_gc_wait_del);
 
         CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
 }
@@ -111,13 +111,13 @@ EXPORT_SYMBOL(sptlrpc_gc_del_sec);
 
 void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx)
 {
-        LASSERT(list_empty(&ctx->cc_gc_chain));
+        LASSERT(cfs_list_empty(&ctx->cc_gc_chain));
 
         CDEBUG(D_SEC, "hand over ctx %p(%u->%s)\n",
                ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
-        spin_lock(&sec_gc_ctx_list_lock);
-        list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
-        spin_unlock(&sec_gc_ctx_list_lock);
+        cfs_spin_lock(&sec_gc_ctx_list_lock);
+        cfs_list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
+        cfs_spin_unlock(&sec_gc_ctx_list_lock);
 
         sec_gc_thread.t_flags |= SVC_SIGNAL;
         cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
@@ -128,24 +128,24 @@ static void sec_process_ctx_list(void)
 {
         struct ptlrpc_cli_ctx *ctx;
 
-        spin_lock(&sec_gc_ctx_list_lock);
+        cfs_spin_lock(&sec_gc_ctx_list_lock);
 
-        while (!list_empty(&sec_gc_ctx_list)) {
-                ctx = list_entry(sec_gc_ctx_list.next,
-                                 struct ptlrpc_cli_ctx, cc_gc_chain);
-                list_del_init(&ctx->cc_gc_chain);
-                spin_unlock(&sec_gc_ctx_list_lock);
+        while (!cfs_list_empty(&sec_gc_ctx_list)) {
+                ctx = cfs_list_entry(sec_gc_ctx_list.next,
+                                     struct ptlrpc_cli_ctx, cc_gc_chain);
+                cfs_list_del_init(&ctx->cc_gc_chain);
+                cfs_spin_unlock(&sec_gc_ctx_list_lock);
 
                 LASSERT(ctx->cc_sec);
-                LASSERT(atomic_read(&ctx->cc_refcount) == 1);
+                LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 1);
                 CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n",
                        ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
                 sptlrpc_cli_ctx_put(ctx, 1);
 
-                spin_lock(&sec_gc_ctx_list_lock);
+                cfs_spin_lock(&sec_gc_ctx_list_lock);
         }
 
-        spin_unlock(&sec_gc_ctx_list_lock);
+        cfs_spin_unlock(&sec_gc_ctx_list_lock);
 }
 
 static void sec_do_gc(struct ptlrpc_sec *sec)
@@ -190,24 +190,24 @@ again:
                  * to trace each sec as order of expiry time.
                  * another issue here is we wakeup as fixed interval instead of
                  * according to each sec's expiry time */
-                mutex_lock(&sec_gc_mutex);
-                list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
+                cfs_mutex_lock(&sec_gc_mutex);
+                cfs_list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
                         /* if someone is waiting to be deleted, let it
                          * proceed as soon as possible. */
-                        if (atomic_read(&sec_gc_wait_del)) {
+                        if (cfs_atomic_read(&sec_gc_wait_del)) {
                                 CWARN("deletion pending, start over\n");
-                                mutex_unlock(&sec_gc_mutex);
+                                cfs_mutex_unlock(&sec_gc_mutex);
                                 goto again;
                         }
 
                         sec_do_gc(sec);
                 }
-                mutex_unlock(&sec_gc_mutex);
+                cfs_mutex_unlock(&sec_gc_mutex);
 
                 /* check ctx list again before sleep */
                 sec_process_ctx_list();
 
-                lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * HZ, NULL, NULL);
+                lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * CFS_HZ, NULL, NULL);
                 l_wait_event(thread->t_ctl_waitq,
                              thread->t_flags & (SVC_STOPPING | SVC_SIGNAL),
                              &lwi);
@@ -228,9 +228,9 @@ int sptlrpc_gc_init(void)
         struct l_wait_info lwi = { 0 };
         int                rc;
 
-        mutex_init(&sec_gc_mutex);
-        spin_lock_init(&sec_gc_list_lock);
-        spin_lock_init(&sec_gc_ctx_list_lock);
+        cfs_mutex_init(&sec_gc_mutex);
+        cfs_spin_lock_init(&sec_gc_list_lock);
+        cfs_spin_lock_init(&sec_gc_ctx_list_lock);
 
         /* initialize thread control */
         memset(&sec_gc_thread, 0, sizeof(sec_gc_thread));
index 5a6fae9..e7d40ad 100644 (file)
@@ -110,8 +110,9 @@ static int sptlrpc_info_lprocfs_seq_show(struct seq_file *seq, void *v)
         seq_printf(seq, "flags:         %s\n",
                    sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str)));
         seq_printf(seq, "id:            %d\n", sec->ps_id);
-        seq_printf(seq, "refcount:      %d\n", atomic_read(&sec->ps_refcount));
-        seq_printf(seq, "nctx:          %d\n", atomic_read(&sec->ps_nctx));
+        seq_printf(seq, "refcount:      %d\n",
+                   cfs_atomic_read(&sec->ps_refcount));
+        seq_printf(seq, "nctx:          %d\n", cfs_atomic_read(&sec->ps_nctx));
         seq_printf(seq, "gc internal    %ld\n", sec->ps_gc_interval);
         seq_printf(seq, "gc next        %ld\n",
                    sec->ps_gc_interval ?
index 041217e..67dbcfb 100644 (file)
@@ -143,7 +143,7 @@ struct ptlrpc_cli_ctx *null_lookup_ctx(struct ptlrpc_sec *sec,
                                        struct vfs_cred *vcred,
                                        int create, int remove_dead)
 {
-        atomic_inc(&null_cli_ctx.cc_refcount);
+        cfs_atomic_inc(&null_cli_ctx.cc_refcount);
         return &null_cli_ctx;
 }
 
@@ -274,7 +274,7 @@ int null_enlarge_reqbuf(struct ptlrpc_sec *sec,
 }
 
 static struct ptlrpc_svc_ctx null_svc_ctx = {
-        .sc_refcount    = ATOMIC_INIT(1),
+        .sc_refcount    = CFS_ATOMIC_INIT(1),
         .sc_policy      = &null_policy,
 };
 
@@ -295,7 +295,7 @@ int null_accept(struct ptlrpc_request *req)
         req->rq_reqlen = req->rq_reqdata_len;
 
         req->rq_svc_ctx = &null_svc_ctx;
-        atomic_inc(&req->rq_svc_ctx->sc_refcount);
+        cfs_atomic_inc(&req->rq_svc_ctx->sc_refcount);
 
         return SECSVC_OK;
 }
@@ -322,7 +322,7 @@ int null_alloc_rs(struct ptlrpc_request *req, int msgsize)
         }
 
         rs->rs_svc_ctx = req->rq_svc_ctx;
-        atomic_inc(&req->rq_svc_ctx->sc_refcount);
+        cfs_atomic_inc(&req->rq_svc_ctx->sc_refcount);
 
         rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
         rs->rs_repbuf_len = rs_size - sizeof(*rs);
@@ -335,8 +335,8 @@ int null_alloc_rs(struct ptlrpc_request *req, int msgsize)
 static
 void null_free_rs(struct ptlrpc_reply_state *rs)
 {
-        LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
-        atomic_dec(&rs->rs_svc_ctx->sc_refcount);
+        LASSERT(cfs_atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
+        cfs_atomic_dec(&rs->rs_svc_ctx->sc_refcount);
 
         if (!rs->rs_prealloc)
                 OBD_FREE(rs, rs->rs_size);
@@ -401,31 +401,31 @@ static struct ptlrpc_sec_policy null_policy = {
 
 static void null_init_internal(void)
 {
-        static HLIST_HEAD(__list);
+        static CFS_HLIST_HEAD(__list);
 
         null_sec.ps_policy = &null_policy;
-        atomic_set(&null_sec.ps_refcount, 1);     /* always busy */
+        cfs_atomic_set(&null_sec.ps_refcount, 1);     /* always busy */
         null_sec.ps_id = -1;
         null_sec.ps_import = NULL;
         null_sec.ps_flvr.sf_rpc = SPTLRPC_FLVR_NULL;
         null_sec.ps_flvr.sf_flags = 0;
         null_sec.ps_part = LUSTRE_SP_ANY;
         null_sec.ps_dying = 0;
-        spin_lock_init(&null_sec.ps_lock);
-        atomic_set(&null_sec.ps_nctx, 1);         /* for "null_cli_ctx" */
+        cfs_spin_lock_init(&null_sec.ps_lock);
+        cfs_atomic_set(&null_sec.ps_nctx, 1);         /* for "null_cli_ctx" */
         CFS_INIT_LIST_HEAD(&null_sec.ps_gc_list);
         null_sec.ps_gc_interval = 0;
         null_sec.ps_gc_next = 0;
 
-        hlist_add_head(&null_cli_ctx.cc_cache, &__list);
-        atomic_set(&null_cli_ctx.cc_refcount, 1);    /* for hash */
+        cfs_hlist_add_head(&null_cli_ctx.cc_cache, &__list);
+        cfs_atomic_set(&null_cli_ctx.cc_refcount, 1);    /* for hash */
         null_cli_ctx.cc_sec = &null_sec;
         null_cli_ctx.cc_ops = &null_ctx_ops;
         null_cli_ctx.cc_expire = 0;
         null_cli_ctx.cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_ETERNAL |
                                 PTLRPC_CTX_UPTODATE;
         null_cli_ctx.cc_vcred.vc_uid = 0;
-        spin_lock_init(&null_cli_ctx.cc_lock);
+        cfs_spin_lock_init(&null_cli_ctx.cc_lock);
         CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_req_list);
         CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain);
 }
index 2b34adc..6bfa013 100644 (file)
@@ -55,7 +55,7 @@
 
 struct plain_sec {
         struct ptlrpc_sec       pls_base;
-        rwlock_t                pls_lock;
+        cfs_rwlock_t            pls_lock;
         struct ptlrpc_cli_ctx  *pls_ctx;
 };
 
@@ -403,35 +403,35 @@ struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
 
         OBD_ALLOC_PTR(ctx_new);
 
-        write_lock(&plsec->pls_lock);
+        cfs_write_lock(&plsec->pls_lock);
 
         ctx = plsec->pls_ctx;
         if (ctx) {
-                atomic_inc(&ctx->cc_refcount);
+                cfs_atomic_inc(&ctx->cc_refcount);
 
                 if (ctx_new)
                         OBD_FREE_PTR(ctx_new);
         } else if (ctx_new) {
                 ctx = ctx_new;
 
-                atomic_set(&ctx->cc_refcount, 1); /* for cache */
+                cfs_atomic_set(&ctx->cc_refcount, 1); /* for cache */
                 ctx->cc_sec = &plsec->pls_base;
                 ctx->cc_ops = &plain_ctx_ops;
                 ctx->cc_expire = 0;
                 ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
                 ctx->cc_vcred.vc_uid = 0;
-                spin_lock_init(&ctx->cc_lock);
+                cfs_spin_lock_init(&ctx->cc_lock);
                 CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
                 CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
 
                 plsec->pls_ctx = ctx;
-                atomic_inc(&plsec->pls_base.ps_nctx);
-                atomic_inc(&plsec->pls_base.ps_refcount);
+                cfs_atomic_inc(&plsec->pls_base.ps_nctx);
+                cfs_atomic_inc(&plsec->pls_base.ps_refcount);
 
-                atomic_inc(&ctx->cc_refcount); /* for caller */
+                cfs_atomic_inc(&ctx->cc_refcount); /* for caller */
         }
 
-        write_unlock(&plsec->pls_lock);
+        cfs_write_unlock(&plsec->pls_lock);
 
         return ctx;
 }
@@ -444,8 +444,8 @@ void plain_destroy_sec(struct ptlrpc_sec *sec)
 
         LASSERT(sec->ps_policy == &plain_policy);
         LASSERT(sec->ps_import);
-        LASSERT(atomic_read(&sec->ps_refcount) == 0);
-        LASSERT(atomic_read(&sec->ps_nctx) == 0);
+        LASSERT(cfs_atomic_read(&sec->ps_refcount) == 0);
+        LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
         LASSERT(plsec->pls_ctx == NULL);
 
         class_import_put(sec->ps_import);
@@ -479,17 +479,17 @@ struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
         /*
          * initialize plain_sec
          */
-        rwlock_init(&plsec->pls_lock);
+        cfs_rwlock_init(&plsec->pls_lock);
         plsec->pls_ctx = NULL;
 
         sec = &plsec->pls_base;
         sec->ps_policy = &plain_policy;
-        atomic_set(&sec->ps_refcount, 0);
-        atomic_set(&sec->ps_nctx, 0);
+        cfs_atomic_set(&sec->ps_refcount, 0);
+        cfs_atomic_set(&sec->ps_nctx, 0);
         sec->ps_id = sptlrpc_get_next_secid();
         sec->ps_import = class_import_get(imp);
         sec->ps_flvr = *sf;
-        spin_lock_init(&sec->ps_lock);
+        cfs_spin_lock_init(&sec->ps_lock);
         CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
         sec->ps_gc_interval = 0;
         sec->ps_gc_next = 0;
@@ -516,11 +516,11 @@ struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
         struct ptlrpc_cli_ctx  *ctx;
         ENTRY;
 
-        read_lock(&plsec->pls_lock);
+        cfs_read_lock(&plsec->pls_lock);
         ctx = plsec->pls_ctx;
         if (ctx)
-                atomic_inc(&ctx->cc_refcount);
-        read_unlock(&plsec->pls_lock);
+                cfs_atomic_inc(&ctx->cc_refcount);
+        cfs_read_unlock(&plsec->pls_lock);
 
         if (unlikely(ctx == NULL))
                 ctx = plain_sec_install_ctx(plsec);
@@ -532,14 +532,14 @@ static
 void plain_release_ctx(struct ptlrpc_sec *sec,
                        struct ptlrpc_cli_ctx *ctx, int sync)
 {
-        LASSERT(atomic_read(&sec->ps_refcount) > 0);
-        LASSERT(atomic_read(&sec->ps_nctx) > 0);
-        LASSERT(atomic_read(&ctx->cc_refcount) == 0);
+        LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
+        LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0);
+        LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
         LASSERT(ctx->cc_sec == sec);
 
         OBD_FREE_PTR(ctx);
 
-        atomic_dec(&sec->ps_nctx);
+        cfs_atomic_dec(&sec->ps_nctx);
         sptlrpc_sec_put(sec);
 }
 
@@ -555,10 +555,10 @@ int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
         if (uid != -1)
                 RETURN(0);
 
-        write_lock(&plsec->pls_lock);
+        cfs_write_lock(&plsec->pls_lock);
         ctx = plsec->pls_ctx;
         plsec->pls_ctx = NULL;
-        write_unlock(&plsec->pls_lock);
+        cfs_write_unlock(&plsec->pls_lock);
 
         if (ctx)
                 sptlrpc_cli_ctx_put(ctx, 1);
@@ -732,7 +732,7 @@ int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
  ****************************************/
 
 static struct ptlrpc_svc_ctx plain_svc_ctx = {
-        .sc_refcount    = ATOMIC_INIT(1),
+        .sc_refcount    = CFS_ATOMIC_INIT(1),
         .sc_policy      = &plain_policy,
 };
 
@@ -803,7 +803,7 @@ int plain_accept(struct ptlrpc_request *req)
         req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
 
         req->rq_svc_ctx = &plain_svc_ctx;
-        atomic_inc(&req->rq_svc_ctx->sc_refcount);
+        cfs_atomic_inc(&req->rq_svc_ctx->sc_refcount);
 
         RETURN(SECSVC_OK);
 }
@@ -840,7 +840,7 @@ int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
         }
 
         rs->rs_svc_ctx = req->rq_svc_ctx;
-        atomic_inc(&req->rq_svc_ctx->sc_refcount);
+        cfs_atomic_inc(&req->rq_svc_ctx->sc_refcount);
         rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
         rs->rs_repbuf_len = rs_size - sizeof(*rs);
 
@@ -856,8 +856,8 @@ void plain_free_rs(struct ptlrpc_reply_state *rs)
 {
         ENTRY;
 
-        LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
-        atomic_dec(&rs->rs_svc_ctx->sc_refcount);
+        LASSERT(cfs_atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
+        cfs_atomic_dec(&rs->rs_svc_ctx->sc_refcount);
 
         if (!rs->rs_prealloc)
                 OBD_FREE(rs, rs->rs_size);
index 4818e7a..8f31ef1 100644 (file)
@@ -66,7 +66,7 @@ CFS_MODULE_PARM(at_extra, "i", int, 0644,
 static int ptlrpc_server_post_idle_rqbds (struct ptlrpc_service *svc);
 
 static CFS_LIST_HEAD(ptlrpc_all_services);
-spinlock_t ptlrpc_all_services_lock;
+cfs_spinlock_t ptlrpc_all_services_lock;
 
 static char *
 ptlrpc_alloc_request_buffer (int size)
@@ -111,10 +111,10 @@ ptlrpc_alloc_rqbd (struct ptlrpc_service *svc)
                 return (NULL);
         }
 
-        spin_lock(&svc->srv_lock);
-        list_add(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
+        cfs_spin_lock(&svc->srv_lock);
+        cfs_list_add(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
         svc->srv_nbufs++;
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&svc->srv_lock);
 
         return (rqbd);
 }
@@ -125,12 +125,12 @@ ptlrpc_free_rqbd (struct ptlrpc_request_buffer_desc *rqbd)
         struct ptlrpc_service *svc = rqbd->rqbd_service;
 
         LASSERT (rqbd->rqbd_refcount == 0);
-        LASSERT (list_empty(&rqbd->rqbd_reqs));
+        LASSERT (cfs_list_empty(&rqbd->rqbd_reqs));
 
-        spin_lock(&svc->srv_lock);
-        list_del(&rqbd->rqbd_list);
+        cfs_spin_lock(&svc->srv_lock);
+        cfs_list_del(&rqbd->rqbd_list);
         svc->srv_nbufs--;
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&svc->srv_lock);
 
         ptlrpc_free_request_buffer (rqbd->rqbd_buffer, svc->srv_buf_size);
         OBD_FREE_PTR(rqbd);
@@ -188,11 +188,11 @@ ptlrpc_save_lock(struct ptlrpc_request *req,
 #define HRT_STOPPING 1
 
 struct ptlrpc_hr_thread {
-        spinlock_t        hrt_lock;
-        unsigned long     hrt_flags;
-        cfs_waitq_t       hrt_wait;
-        struct list_head  hrt_queue;
-        struct completion hrt_completion;
+        cfs_spinlock_t        hrt_lock;
+        unsigned long         hrt_flags;
+        cfs_waitq_t           hrt_wait;
+        cfs_list_t            hrt_queue;
+        cfs_completion_t      hrt_completion;
 };
 
 struct ptlrpc_hr_service {
@@ -203,7 +203,7 @@ struct ptlrpc_hr_service {
 };
 
 struct rs_batch {
-        struct list_head        rsb_replies;
+        cfs_list_t              rsb_replies;
         struct ptlrpc_service  *rsb_svc;
         unsigned int            rsb_n_replies;
 };
@@ -245,10 +245,10 @@ static void rs_batch_dispatch(struct rs_batch *b)
                 if (hr->hr_index >= hr->hr_n_threads)
                         hr->hr_index = 0;
 
-                spin_lock(&hr->hr_threads[idx].hrt_lock);
-                list_splice_init(&b->rsb_replies,
-                                 &hr->hr_threads[idx].hrt_queue);
-                spin_unlock(&hr->hr_threads[idx].hrt_lock);
+                cfs_spin_lock(&hr->hr_threads[idx].hrt_lock);
+                cfs_list_splice_init(&b->rsb_replies,
+                                     &hr->hr_threads[idx].hrt_queue);
+                cfs_spin_unlock(&hr->hr_threads[idx].hrt_lock);
                 cfs_waitq_signal(&hr->hr_threads[idx].hrt_wait);
                 b->rsb_n_replies = 0;
         }
@@ -268,20 +268,20 @@ static void rs_batch_add(struct rs_batch *b, struct ptlrpc_reply_state *rs)
         if (svc != b->rsb_svc || b->rsb_n_replies >= MAX_SCHEDULED) {
                 if (b->rsb_svc != NULL) {
                         rs_batch_dispatch(b);
-                        spin_unlock(&b->rsb_svc->srv_lock);
+                        cfs_spin_unlock(&b->rsb_svc->srv_lock);
                 }
-                spin_lock(&svc->srv_lock);
+                cfs_spin_lock(&svc->srv_lock);
                 b->rsb_svc = svc;
         }
-        spin_lock(&rs->rs_lock);
+        cfs_spin_lock(&rs->rs_lock);
         rs->rs_scheduled_ever = 1;
         if (rs->rs_scheduled == 0) {
-                list_move(&rs->rs_list, &b->rsb_replies);
+                cfs_list_move(&rs->rs_list, &b->rsb_replies);
                 rs->rs_scheduled = 1;
                 b->rsb_n_replies++;
         }
         rs->rs_committed = 1;
-        spin_unlock(&rs->rs_lock);
+        cfs_spin_unlock(&rs->rs_lock);
 }
 
 /**
@@ -295,7 +295,7 @@ static void rs_batch_fini(struct rs_batch *b)
 {
         if (b->rsb_svc != 0) {
                 rs_batch_dispatch(b);
-                spin_unlock(&b->rsb_svc->srv_lock);
+                cfs_spin_unlock(&b->rsb_svc->srv_lock);
         }
 }
 
@@ -317,18 +317,18 @@ void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs)
         int idx;
         ENTRY;
 
-        LASSERT(list_empty(&rs->rs_list));
+        LASSERT(cfs_list_empty(&rs->rs_list));
 
         idx = hr->hr_index++;
         if (hr->hr_index >= hr->hr_n_threads)
                 hr->hr_index = 0;
-        spin_lock(&hr->hr_threads[idx].hrt_lock);
-        list_add_tail(&rs->rs_list, &hr->hr_threads[idx].hrt_queue);
-        spin_unlock(&hr->hr_threads[idx].hrt_lock);
+        cfs_spin_lock(&hr->hr_threads[idx].hrt_lock);
+        cfs_list_add_tail(&rs->rs_list, &hr->hr_threads[idx].hrt_queue);
+        cfs_spin_unlock(&hr->hr_threads[idx].hrt_lock);
         cfs_waitq_signal(&hr->hr_threads[idx].hrt_wait);
         EXIT;
 #else
-        list_add_tail(&rs->rs_list, &rs->rs_service->srv_reply_queue);
+        cfs_list_add_tail(&rs->rs_list, &rs->rs_service->srv_reply_queue);
 #endif
 }
 
@@ -348,7 +348,7 @@ ptlrpc_schedule_difficult_reply (struct ptlrpc_reply_state *rs)
         }
 
         rs->rs_scheduled = 1;
-        list_del_init(&rs->rs_list);
+        cfs_list_del_init(&rs->rs_list);
         ptlrpc_dispatch_difficult_reply(rs);
         EXIT;
 }
@@ -364,18 +364,18 @@ void ptlrpc_commit_replies(struct obd_export *exp)
          * to attend to complete them. */
 
         /* CAVEAT EMPTOR: spinlock ordering!!! */
-        spin_lock(&exp->exp_uncommitted_replies_lock);
-        list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies,
-                                 rs_obd_list) {
+        cfs_spin_lock(&exp->exp_uncommitted_replies_lock);
+        cfs_list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies,
+                                     rs_obd_list) {
                 LASSERT (rs->rs_difficult);
                 /* VBR: per-export last_committed */
                 LASSERT(rs->rs_export);
                 if (rs->rs_transno <= exp->exp_last_committed) {
-                        list_del_init(&rs->rs_obd_list);
+                        cfs_list_del_init(&rs->rs_obd_list);
                         rs_batch_add(&batch, rs);
                 }
         }
-        spin_unlock(&exp->exp_uncommitted_replies_lock);
+        cfs_spin_unlock(&exp->exp_uncommitted_replies_lock);
         rs_batch_fini(&batch);
         EXIT;
 }
@@ -388,23 +388,23 @@ ptlrpc_server_post_idle_rqbds (struct ptlrpc_service *svc)
         int                                posted = 0;
 
         for (;;) {
-                spin_lock(&svc->srv_lock);
+                cfs_spin_lock(&svc->srv_lock);
 
-                if (list_empty (&svc->srv_idle_rqbds)) {
-                        spin_unlock(&svc->srv_lock);
+                if (cfs_list_empty (&svc->srv_idle_rqbds)) {
+                        cfs_spin_unlock(&svc->srv_lock);
                         return (posted);
                 }
 
-                rqbd = list_entry(svc->srv_idle_rqbds.next,
-                                  struct ptlrpc_request_buffer_desc,
-                                  rqbd_list);
-                list_del (&rqbd->rqbd_list);
+                rqbd = cfs_list_entry(svc->srv_idle_rqbds.next,
+                                      struct ptlrpc_request_buffer_desc,
+                                      rqbd_list);
+                cfs_list_del (&rqbd->rqbd_list);
 
                 /* assume we will post successfully */
                 svc->srv_nrqbd_receiving++;
-                list_add (&rqbd->rqbd_list, &svc->srv_active_rqbds);
+                cfs_list_add (&rqbd->rqbd_list, &svc->srv_active_rqbds);
 
-                spin_unlock(&svc->srv_lock);
+                cfs_spin_unlock(&svc->srv_lock);
 
                 rc = ptlrpc_register_rqbd(rqbd);
                 if (rc != 0)
@@ -413,16 +413,16 @@ ptlrpc_server_post_idle_rqbds (struct ptlrpc_service *svc)
                 posted = 1;
         }
 
-        spin_lock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
 
         svc->srv_nrqbd_receiving--;
-        list_del(&rqbd->rqbd_list);
-        list_add_tail(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
+        cfs_list_del(&rqbd->rqbd_list);
+        cfs_list_add_tail(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
 
         /* Don't complain if no request buffers are posted right now; LNET
          * won't drop requests because we set the portal lazy! */
 
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&svc->srv_lock);
 
         return (-1);
 }
@@ -479,7 +479,7 @@ ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size, int max_reply_size,
         /* First initialise enough for early teardown */
 
         service->srv_name = name;
-        spin_lock_init(&service->srv_lock);
+        cfs_spin_lock_init(&service->srv_lock);
         CFS_INIT_LIST_HEAD(&service->srv_threads);
         cfs_waitq_init(&service->srv_waitq);
 
@@ -517,9 +517,9 @@ ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size, int max_reply_size,
 #endif
         CFS_INIT_LIST_HEAD(&service->srv_free_rs_list);
         cfs_waitq_init(&service->srv_free_rs_waitq);
-        atomic_set(&service->srv_n_difficult_replies, 0);
+        cfs_atomic_set(&service->srv_n_difficult_replies, 0);
 
-        spin_lock_init(&service->srv_at_lock);
+        cfs_spin_lock_init(&service->srv_at_lock);
         CFS_INIT_LIST_HEAD(&service->srv_req_in_queue);
 
         array = &service->srv_at_array;
@@ -529,7 +529,7 @@ ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size, int max_reply_size,
         array->paa_deadline = -1;
 
         /* allocate memory for srv_at_array (ptlrpc_at_array) */
-        OBD_ALLOC(array->paa_reqs_array, sizeof(struct list_head) * size);
+        OBD_ALLOC(array->paa_reqs_array, sizeof(cfs_list_t) * size);
         if (array->paa_reqs_array == NULL)
                 GOTO(failed, NULL);
 
@@ -545,9 +545,9 @@ ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size, int max_reply_size,
            timeout is less than this, we'll be sending an early reply. */
         at_init(&service->srv_at_estimate, 10, 0);
 
-        spin_lock (&ptlrpc_all_services_lock);
-        list_add (&service->srv_list, &ptlrpc_all_services);
-        spin_unlock (&ptlrpc_all_services_lock);
+        cfs_spin_lock (&ptlrpc_all_services_lock);
+        cfs_list_add (&service->srv_list, &ptlrpc_all_services);
+        cfs_spin_unlock (&ptlrpc_all_services_lock);
 
         /* Now allocate the request buffers */
         rc = ptlrpc_grow_req_bufs(service);
@@ -581,8 +581,8 @@ failed:
  */
 static void ptlrpc_server_free_request(struct ptlrpc_request *req)
 {
-        LASSERT(atomic_read(&req->rq_refcount) == 0);
-        LASSERT(list_empty(&req->rq_timed_list));
+        LASSERT(cfs_atomic_read(&req->rq_refcount) == 0);
+        LASSERT(cfs_list_empty(&req->rq_timed_list));
 
          /* DEBUG_REQ() assumes the reply state of a request with a valid
           * ref will not be destroyed until that reference is dropped. */
@@ -607,25 +607,25 @@ void ptlrpc_server_drop_request(struct ptlrpc_request *req)
         struct ptlrpc_request_buffer_desc *rqbd = req->rq_rqbd;
         struct ptlrpc_service             *svc = rqbd->rqbd_service;
         int                                refcount;
-        struct list_head                  *tmp;
-        struct list_head                  *nxt;
+        cfs_list_t                        *tmp;
+        cfs_list_t                        *nxt;
 
-        if (!atomic_dec_and_test(&req->rq_refcount))
+        if (!cfs_atomic_dec_and_test(&req->rq_refcount))
                 return;
 
-        spin_lock(&svc->srv_at_lock);
+        cfs_spin_lock(&svc->srv_at_lock);
         if (req->rq_at_linked) {
                 struct ptlrpc_at_array *array = &svc->srv_at_array;
                 __u32 index = req->rq_at_index;
 
-                LASSERT(!list_empty(&req->rq_timed_list));
-                list_del_init(&req->rq_timed_list);
+                LASSERT(!cfs_list_empty(&req->rq_timed_list));
+                cfs_list_del_init(&req->rq_timed_list);
                 req->rq_at_linked = 0;
                 array->paa_reqs_count[index]--;
                 array->paa_count--;
         } else
-                LASSERT(list_empty(&req->rq_timed_list));
-        spin_unlock(&svc->srv_at_lock);
+                LASSERT(cfs_list_empty(&req->rq_timed_list));
+        cfs_spin_unlock(&svc->srv_at_lock);
 
         /* finalize request */
         if (req->rq_export) {
@@ -633,70 +633,72 @@ void ptlrpc_server_drop_request(struct ptlrpc_request *req)
                 req->rq_export = NULL;
         }
 
-        spin_lock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
 
         svc->srv_n_active_reqs--;
-        list_add(&req->rq_list, &rqbd->rqbd_reqs);
+        cfs_list_add(&req->rq_list, &rqbd->rqbd_reqs);
 
         refcount = --(rqbd->rqbd_refcount);
         if (refcount == 0) {
                 /* request buffer is now idle: add to history */
-                list_del(&rqbd->rqbd_list);
-                list_add_tail(&rqbd->rqbd_list, &svc->srv_history_rqbds);
+                cfs_list_del(&rqbd->rqbd_list);
+                cfs_list_add_tail(&rqbd->rqbd_list, &svc->srv_history_rqbds);
                 svc->srv_n_history_rqbds++;
 
                 /* cull some history?
                  * I expect only about 1 or 2 rqbds need to be recycled here */
                 while (svc->srv_n_history_rqbds > svc->srv_max_history_rqbds) {
-                        rqbd = list_entry(svc->srv_history_rqbds.next,
-                                          struct ptlrpc_request_buffer_desc,
-                                          rqbd_list);
+                        rqbd = cfs_list_entry(svc->srv_history_rqbds.next,
+                                              struct ptlrpc_request_buffer_desc,
+                                              rqbd_list);
 
-                        list_del(&rqbd->rqbd_list);
+                        cfs_list_del(&rqbd->rqbd_list);
                         svc->srv_n_history_rqbds--;
 
                         /* remove rqbd's reqs from svc's req history while
                          * I've got the service lock */
-                        list_for_each(tmp, &rqbd->rqbd_reqs) {
-                                req = list_entry(tmp, struct ptlrpc_request,
-                                                 rq_list);
+                        cfs_list_for_each(tmp, &rqbd->rqbd_reqs) {
+                                req = cfs_list_entry(tmp, struct ptlrpc_request,
+                                                     rq_list);
                                 /* Track the highest culled req seq */
                                 if (req->rq_history_seq >
                                     svc->srv_request_max_cull_seq)
                                         svc->srv_request_max_cull_seq =
                                                 req->rq_history_seq;
-                                list_del(&req->rq_history_list);
+                                cfs_list_del(&req->rq_history_list);
                         }
 
-                        spin_unlock(&svc->srv_lock);
+                        cfs_spin_unlock(&svc->srv_lock);
 
-                        list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) {
-                                req = list_entry(rqbd->rqbd_reqs.next,
-                                                 struct ptlrpc_request,
-                                                 rq_list);
-                                list_del(&req->rq_list);
+                        cfs_list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) {
+                                req = cfs_list_entry(rqbd->rqbd_reqs.next,
+                                                     struct ptlrpc_request,
+                                                     rq_list);
+                                cfs_list_del(&req->rq_list);
                                 ptlrpc_server_free_request(req);
                         }
 
-                        spin_lock(&svc->srv_lock);
+                        cfs_spin_lock(&svc->srv_lock);
                         /*
                          * now all reqs including the embedded req has been
                          * disposed, schedule request buffer for re-use.
                          */
-                        LASSERT(atomic_read(&rqbd->rqbd_req.rq_refcount) == 0);
-                        list_add_tail(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
+                        LASSERT(cfs_atomic_read(&rqbd->rqbd_req.rq_refcount) ==
+                                0);
+                        cfs_list_add_tail(&rqbd->rqbd_list,
+                                          &svc->srv_idle_rqbds);
                 }
 
-                spin_unlock(&svc->srv_lock);
+                cfs_spin_unlock(&svc->srv_lock);
         } else if (req->rq_reply_state && req->rq_reply_state->rs_prealloc) {
                 /* If we are low on memory, we are not interested in history */
-                list_del(&req->rq_list);
-                list_del_init(&req->rq_history_list);
-                spin_unlock(&svc->srv_lock);
+                cfs_list_del(&req->rq_list);
+                cfs_list_del_init(&req->rq_history_list);
+                cfs_spin_unlock(&svc->srv_lock);
 
                 ptlrpc_server_free_request(req);
         } else {
-                spin_unlock(&svc->srv_lock);
+                cfs_spin_unlock(&svc->srv_lock);
         }
 }
 
@@ -740,21 +742,21 @@ static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay)
         /* exports may get disconnected from the chain even though the
            export has references, so we must keep the spin lock while
            manipulating the lists */
-        spin_lock(&exp->exp_obd->obd_dev_lock);
+        cfs_spin_lock(&exp->exp_obd->obd_dev_lock);
 
-        if (list_empty(&exp->exp_obd_chain_timed)) {
+        if (cfs_list_empty(&exp->exp_obd_chain_timed)) {
                 /* this one is not timed */
-                spin_unlock(&exp->exp_obd->obd_dev_lock);
+                cfs_spin_unlock(&exp->exp_obd->obd_dev_lock);
                 RETURN_EXIT;
         }
 
-        list_move_tail(&exp->exp_obd_chain_timed,
-                       &exp->exp_obd->obd_exports_timed);
+        cfs_list_move_tail(&exp->exp_obd_chain_timed,
+                           &exp->exp_obd->obd_exports_timed);
 
-        oldest_exp = list_entry(exp->exp_obd->obd_exports_timed.next,
-                                struct obd_export, exp_obd_chain_timed);
+        oldest_exp = cfs_list_entry(exp->exp_obd->obd_exports_timed.next,
+                                    struct obd_export, exp_obd_chain_timed);
         oldest_time = oldest_exp->exp_last_request_time;
-        spin_unlock(&exp->exp_obd->obd_dev_lock);
+        cfs_spin_unlock(&exp->exp_obd->obd_dev_lock);
 
         if (exp->exp_obd->obd_recovering) {
                 /* be nice to everyone during recovery */
@@ -820,10 +822,10 @@ static void ptlrpc_at_set_timer(struct ptlrpc_service *svc)
         struct ptlrpc_at_array *array = &svc->srv_at_array;
         __s32 next;
 
-        spin_lock(&svc->srv_at_lock);
+        cfs_spin_lock(&svc->srv_at_lock);
         if (array->paa_count == 0) {
                 cfs_timer_disarm(&svc->srv_at_timer);
-                spin_unlock(&svc->srv_at_lock);
+                cfs_spin_unlock(&svc->srv_at_lock);
                 return;
         }
 
@@ -834,7 +836,7 @@ static void ptlrpc_at_set_timer(struct ptlrpc_service *svc)
                 ptlrpc_at_timer((unsigned long)svc);
         else
                 cfs_timer_arm(&svc->srv_at_timer, cfs_time_shift(next));
-        spin_unlock(&svc->srv_at_lock);
+        cfs_spin_unlock(&svc->srv_at_lock);
         CDEBUG(D_INFO, "armed %s at %+ds\n", svc->srv_name, next);
 }
 
@@ -856,26 +858,28 @@ static int ptlrpc_at_add_timed(struct ptlrpc_request *req)
         if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0)
                 return(-ENOSYS);
 
-        spin_lock(&svc->srv_at_lock);
-        LASSERT(list_empty(&req->rq_timed_list));
+        cfs_spin_lock(&svc->srv_at_lock);
+        LASSERT(cfs_list_empty(&req->rq_timed_list));
 
         index = (unsigned long)req->rq_deadline % array->paa_size;
         if (array->paa_reqs_count[index] > 0) {
                 /* latest rpcs will have the latest deadlines in the list,
                  * so search backward. */
-                list_for_each_entry_reverse(rq, &array->paa_reqs_array[index],
-                                            rq_timed_list) {
+                cfs_list_for_each_entry_reverse(rq,
+                                                &array->paa_reqs_array[index],
+                                                rq_timed_list) {
                         if (req->rq_deadline >= rq->rq_deadline) {
-                                list_add(&req->rq_timed_list,
-                                         &rq->rq_timed_list);
+                                cfs_list_add(&req->rq_timed_list,
+                                             &rq->rq_timed_list);
                                 break;
                         }
                 }
         }
 
         /* Add the request at the head of the list */
-        if (list_empty(&req->rq_timed_list))
-                list_add(&req->rq_timed_list, &array->paa_reqs_array[index]);
+        if (cfs_list_empty(&req->rq_timed_list))
+                cfs_list_add(&req->rq_timed_list,
+                             &array->paa_reqs_array[index]);
 
         req->rq_at_linked = 1;
         req->rq_at_index = index;
@@ -885,7 +889,7 @@ static int ptlrpc_at_add_timed(struct ptlrpc_request *req)
                 array->paa_deadline = req->rq_deadline;
                 found = 1;
         }
-        spin_unlock(&svc->srv_at_lock);
+        cfs_spin_unlock(&svc->srv_at_lock);
 
         if (found)
                 ptlrpc_at_set_timer(svc);
@@ -976,9 +980,9 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
         reqcopy->rq_reqmsg = reqmsg;
         memcpy(reqmsg, req->rq_reqmsg, req->rq_reqlen);
 
-        LASSERT(atomic_read(&req->rq_refcount));
+        LASSERT(cfs_atomic_read(&req->rq_refcount));
         /** if it is last refcount then early reply isn't needed */
-        if (atomic_read(&req->rq_refcount) == 1) {
+        if (cfs_atomic_read(&req->rq_refcount) == 1) {
                 DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, "
                           "abort sending early reply\n");
                 GOTO(out, rc = -EINVAL);
@@ -1029,7 +1033,7 @@ out:
 static int ptlrpc_at_check_timed(struct ptlrpc_service *svc)
 {
         struct ptlrpc_request *rq, *n;
-        struct list_head work_list;
+        cfs_list_t work_list;
         struct ptlrpc_at_array *array = &svc->srv_at_array;
         __u32  index, count;
         time_t deadline;
@@ -1038,16 +1042,16 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service *svc)
         int first, counter = 0;
         ENTRY;
 
-        spin_lock(&svc->srv_at_lock);
+        cfs_spin_lock(&svc->srv_at_lock);
         if (svc->srv_at_check == 0) {
-                spin_unlock(&svc->srv_at_lock);
+                cfs_spin_unlock(&svc->srv_at_lock);
                 RETURN(0);
         }
         delay = cfs_time_sub(cfs_time_current(), svc->srv_at_checktime);
         svc->srv_at_check = 0;
 
         if (array->paa_count == 0) {
-                spin_unlock(&svc->srv_at_lock);
+                cfs_spin_unlock(&svc->srv_at_lock);
                 RETURN(0);
         }
 
@@ -1055,7 +1059,7 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service *svc)
         first = array->paa_deadline - now;
         if (first > at_early_margin) {
                 /* We've still got plenty of time.  Reset the timer. */
-                spin_unlock(&svc->srv_at_lock);
+                cfs_spin_unlock(&svc->srv_at_lock);
                 ptlrpc_at_set_timer(svc);
                 RETURN(0);
         }
@@ -1068,17 +1072,18 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service *svc)
         count = array->paa_count;
         while (count > 0) {
                 count -= array->paa_reqs_count[index];
-                list_for_each_entry_safe(rq, n, &array->paa_reqs_array[index],
-                                         rq_timed_list) {
+                cfs_list_for_each_entry_safe(rq, n,
+                                             &array->paa_reqs_array[index],
+                                             rq_timed_list) {
                         if (rq->rq_deadline <= now + at_early_margin) {
-                                list_del_init(&rq->rq_timed_list);
+                                cfs_list_del_init(&rq->rq_timed_list);
                                 /**
                                  * ptlrpc_server_drop_request() may drop
                                  * refcount to 0 already. Let's check this and
                                  * don't add entry to work_list
                                  */
-                                if (likely(atomic_inc_not_zero(&rq->rq_refcount)))
-                                        list_add(&rq->rq_timed_list, &work_list);
+                                if (likely(cfs_atomic_inc_not_zero(&rq->rq_refcount)))
+                                        cfs_list_add(&rq->rq_timed_list, &work_list);
                                 counter++;
                                 array->paa_reqs_count[index]--;
                                 array->paa_count--;
@@ -1097,7 +1102,7 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service *svc)
                         index = 0;
         }
         array->paa_deadline = deadline;
-        spin_unlock(&svc->srv_at_lock);
+        cfs_spin_unlock(&svc->srv_at_lock);
 
         /* we have a new earliest deadline, restart the timer */
         ptlrpc_at_set_timer(svc);
@@ -1117,10 +1122,10 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service *svc)
 
         /* we took additional refcount so entries can't be deleted from list, no
          * locking is needed */
-        while (!list_empty(&work_list)) {
-                rq = list_entry(work_list.next, struct ptlrpc_request,
-                                rq_timed_list);
-                list_del_init(&rq->rq_timed_list);
+        while (!cfs_list_empty(&work_list)) {
+                rq = cfs_list_entry(work_list.next, struct ptlrpc_request,
+                                    rq_timed_list);
+                cfs_list_del_init(&rq->rq_timed_list);
 
                 if (ptlrpc_at_send_early_reply(rq) == 0)
                         ptlrpc_at_add_timed(rq);
@@ -1147,9 +1152,10 @@ static int ptlrpc_hpreq_init(struct ptlrpc_service *svc,
                         RETURN(rc);
         }
         if (req->rq_export && req->rq_ops) {
-                spin_lock(&req->rq_export->exp_lock);
-                list_add(&req->rq_exp_list, &req->rq_export->exp_queued_rpc);
-                spin_unlock(&req->rq_export->exp_lock);
+                cfs_spin_lock(&req->rq_export->exp_lock);
+                cfs_list_add(&req->rq_exp_list,
+                             &req->rq_export->exp_queued_rpc);
+                cfs_spin_unlock(&req->rq_export->exp_lock);
         }
 
         RETURN(0);
@@ -1160,9 +1166,9 @@ static void ptlrpc_hpreq_fini(struct ptlrpc_request *req)
 {
         ENTRY;
         if (req->rq_export && req->rq_ops) {
-                spin_lock(&req->rq_export->exp_lock);
-                list_del_init(&req->rq_exp_list);
-                spin_unlock(&req->rq_export->exp_lock);
+                cfs_spin_lock(&req->rq_export->exp_lock);
+                cfs_list_del_init(&req->rq_exp_list);
+                cfs_spin_unlock(&req->rq_export->exp_lock);
         }
         EXIT;
 }
@@ -1182,17 +1188,17 @@ static void ptlrpc_hpreq_reorder_nolock(struct ptlrpc_service *svc,
 {
         ENTRY;
         LASSERT(svc != NULL);
-        spin_lock(&req->rq_lock);
+        cfs_spin_lock(&req->rq_lock);
         if (req->rq_hp == 0) {
                 int opc = lustre_msg_get_opc(req->rq_reqmsg);
 
                 /* Add to the high priority queue. */
-                list_move_tail(&req->rq_list, &svc->srv_request_hpq);
+                cfs_list_move_tail(&req->rq_list, &svc->srv_request_hpq);
                 req->rq_hp = 1;
                 if (opc != OBD_PING)
                         DEBUG_REQ(D_NET, req, "high priority req");
         }
-        spin_unlock(&req->rq_lock);
+        cfs_spin_unlock(&req->rq_lock);
         EXIT;
 }
 
@@ -1201,12 +1207,12 @@ void ptlrpc_hpreq_reorder(struct ptlrpc_request *req)
         struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
         ENTRY;
 
-        spin_lock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
         /* It may happen that the request is already taken for the processing
          * but still in the export list, do not re-add it into the HP list. */
         if (req->rq_phase == RQ_PHASE_NEW)
                 ptlrpc_hpreq_reorder_nolock(svc, req);
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&svc->srv_lock);
         EXIT;
 }
 
@@ -1238,17 +1244,18 @@ static int ptlrpc_server_request_add(struct ptlrpc_service *svc,
         if (rc < 0)
                 RETURN(rc);
 
-        spin_lock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
         /* Before inserting the request into the queue, check if it is not
          * inserted yet, or even already handled -- it may happen due to
          * a racing ldlm_server_blocking_ast(). */
-        if (req->rq_phase == RQ_PHASE_NEW && list_empty(&req->rq_list)) {
+        if (req->rq_phase == RQ_PHASE_NEW && cfs_list_empty(&req->rq_list)) {
                 if (rc)
                         ptlrpc_hpreq_reorder_nolock(svc, req);
                 else
-                        list_add_tail(&req->rq_list, &svc->srv_request_queue);
+                        cfs_list_add_tail(&req->rq_list,
+                                          &svc->srv_request_queue);
         }
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&svc->srv_lock);
 
         RETURN(0);
 }
@@ -1271,15 +1278,15 @@ ptlrpc_server_request_get(struct ptlrpc_service *svc, int force)
         ENTRY;
 
         if (ptlrpc_server_allow_normal(svc, force) &&
-            !list_empty(&svc->srv_request_queue) &&
-            (list_empty(&svc->srv_request_hpq) ||
+            !cfs_list_empty(&svc->srv_request_queue) &&
+            (cfs_list_empty(&svc->srv_request_hpq) ||
              svc->srv_hpreq_count >= svc->srv_hpreq_ratio)) {
-                req = list_entry(svc->srv_request_queue.next,
-                                 struct ptlrpc_request, rq_list);
+                req = cfs_list_entry(svc->srv_request_queue.next,
+                                     struct ptlrpc_request, rq_list);
                 svc->srv_hpreq_count = 0;
-        } else if (!list_empty(&svc->srv_request_hpq)) {
-                req = list_entry(svc->srv_request_hpq.next,
-                                 struct ptlrpc_request, rq_list);
+        } else if (!cfs_list_empty(&svc->srv_request_hpq)) {
+                req = cfs_list_entry(svc->srv_request_hpq.next,
+                                     struct ptlrpc_request, rq_list);
                 svc->srv_hpreq_count++;
         }
         RETURN(req);
@@ -1288,8 +1295,8 @@ ptlrpc_server_request_get(struct ptlrpc_service *svc, int force)
 static int ptlrpc_server_request_pending(struct ptlrpc_service *svc, int force)
 {
         return ((ptlrpc_server_allow_normal(svc, force) &&
-                 !list_empty(&svc->srv_request_queue)) ||
-                !list_empty(&svc->srv_request_hpq));
+                 !cfs_list_empty(&svc->srv_request_queue)) ||
+                !cfs_list_empty(&svc->srv_request_hpq));
 }
 
 /* Handle freshly incoming reqs, add to timed early reply list,
@@ -1304,18 +1311,18 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service *svc)
 
         LASSERT(svc);
 
-        spin_lock(&svc->srv_lock);
-        if (list_empty(&svc->srv_req_in_queue)) {
-                spin_unlock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
+        if (cfs_list_empty(&svc->srv_req_in_queue)) {
+                cfs_spin_unlock(&svc->srv_lock);
                 RETURN(0);
         }
 
-        req = list_entry(svc->srv_req_in_queue.next,
-                         struct ptlrpc_request, rq_list);
-        list_del_init (&req->rq_list);
+        req = cfs_list_entry(svc->srv_req_in_queue.next,
+                             struct ptlrpc_request, rq_list);
+        cfs_list_del_init (&req->rq_list);
         /* Consider this still a "queued" request as far as stats are
            concerned */
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&svc->srv_lock);
 
         /* go through security check/transform */
         rc = sptlrpc_svc_unwrap_request(req);
@@ -1427,10 +1434,10 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service *svc)
         RETURN(1);
 
 err_req:
-        spin_lock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
         svc->srv_n_queued_reqs--;
         svc->srv_n_active_reqs++;
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&svc->srv_lock);
         ptlrpc_server_finish_request(req);
 
         RETURN(1);
@@ -1451,25 +1458,25 @@ ptlrpc_server_handle_request(struct ptlrpc_service *svc,
 
         LASSERT(svc);
 
-        spin_lock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
         if (unlikely(!ptlrpc_server_request_pending(svc, 0) ||
             (
 #ifndef __KERNEL__
              /* !@%$# liblustre only has 1 thread */
-             atomic_read(&svc->srv_n_difficult_replies) != 0 &&
+             cfs_atomic_read(&svc->srv_n_difficult_replies) != 0 &&
 #endif
              svc->srv_n_active_reqs >= (svc->srv_threads_running - 1)))) {
                  /* Don't handle regular requests in the last thread, in order               * re
                   * to handle difficult replies (which might block other threads)
                   * as well as handle any incoming reqs, early replies, etc.
                   * That means we always need at least 2 service threads. */
-                spin_unlock(&svc->srv_lock);
+                cfs_spin_unlock(&svc->srv_lock);
                 RETURN(0);
              }
 
         request = ptlrpc_server_request_get(svc, 0);
         if  (request == NULL) {
-                spin_unlock(&svc->srv_lock);
+                cfs_spin_unlock(&svc->srv_lock);
                 RETURN(0);
         }
 
@@ -1481,19 +1488,19 @@ ptlrpc_server_handle_request(struct ptlrpc_service *svc,
 
         if (unlikely(fail_opc)) {
                 if (request->rq_export && request->rq_ops) {
-                        spin_unlock(&svc->srv_lock);
+                        cfs_spin_unlock(&svc->srv_lock);
                         OBD_FAIL_TIMEOUT(fail_opc, 4);
-                        spin_lock(&svc->srv_lock);
+                        cfs_spin_lock(&svc->srv_lock);
                         request = ptlrpc_server_request_get(svc, 0);
                         if  (request == NULL) {
-                                spin_unlock(&svc->srv_lock);
+                                cfs_spin_unlock(&svc->srv_lock);
                                 RETURN(0);
                         }
                         LASSERT(ptlrpc_server_request_pending(svc, 0));
                 }
         }
 
-        list_del_init(&request->rq_list);
+        cfs_list_del_init(&request->rq_list);
         svc->srv_n_queued_reqs--;
         svc->srv_n_active_reqs++;
         if (request->rq_hp)
@@ -1502,14 +1509,14 @@ ptlrpc_server_handle_request(struct ptlrpc_service *svc,
         /* The phase is changed under the lock here because we need to know
          * the request is under processing (see ptlrpc_hpreq_reorder()). */
         ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET);
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&svc->srv_lock);
 
         ptlrpc_hpreq_fini(request);
 
         if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
                 libcfs_debug_dumplog();
 
-        do_gettimeofday(&work_start);
+        cfs_gettimeofday(&work_start);
         timediff = cfs_timeval_sub(&work_start, &request->rq_arrival_time,NULL);
         if (likely(svc->srv_stats != NULL)) {
                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
@@ -1563,7 +1570,7 @@ ptlrpc_server_handle_request(struct ptlrpc_service *svc,
                (request->rq_export ?
                 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
                (request->rq_export ?
-                atomic_read(&request->rq_export->exp_refcount) : -99),
+                cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
                lustre_msg_get_status(request->rq_reqmsg), request->rq_xid,
                libcfs_id2str(request->rq_peer),
                lustre_msg_get_opc(request->rq_reqmsg));
@@ -1592,7 +1599,7 @@ put_conn:
                           request->rq_deadline));
         }
 
-        do_gettimeofday(&work_end);
+        cfs_gettimeofday(&work_end);
         timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
         CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
                "%s:%s+%d:%d:x"LPU64":%s:%d Request procesed in "
@@ -1601,7 +1608,7 @@ put_conn:
                 (request->rq_export ?
                  (char *)request->rq_export->exp_client_uuid.uuid : "0"),
                 (request->rq_export ?
-                 atomic_read(&request->rq_export->exp_refcount) : -99),
+                 cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
                 lustre_msg_get_status(request->rq_reqmsg),
                 request->rq_xid,
                 libcfs_id2str(request->rq_peer),
@@ -1634,10 +1641,10 @@ put_conn:
         }
 
 out_req:
-        spin_lock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
         if (request->rq_hp)
                 svc->srv_n_hpreq--;
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&svc->srv_lock);
         ptlrpc_server_finish_request(request);
 
         RETURN(1);
@@ -1661,12 +1668,12 @@ ptlrpc_handle_rs (struct ptlrpc_reply_state *rs)
 
         LASSERT (rs->rs_difficult);
         LASSERT (rs->rs_scheduled);
-        LASSERT (list_empty(&rs->rs_list));
+        LASSERT (cfs_list_empty(&rs->rs_list));
 
-        spin_lock (&exp->exp_lock);
+        cfs_spin_lock (&exp->exp_lock);
         /* Noop if removed already */
-        list_del_init (&rs->rs_exp_list);
-        spin_unlock (&exp->exp_lock);
+        cfs_list_del_init (&rs->rs_exp_list);
+        cfs_spin_unlock (&exp->exp_lock);
 
         /* The disk commit callback holds exp_uncommitted_replies_lock while it
          * iterates over newly committed replies, removing them from
@@ -1677,7 +1684,7 @@ ptlrpc_handle_rs (struct ptlrpc_reply_state *rs)
          * HRT threads and further commit callbacks by checking rs_committed
          * which is set in the commit callback while it holds both
          * rs_lock and exp_uncommitted_reples.
-         * 
+         *
          * If we see rs_committed clear, the commit callback _may_ not have
          * handled this reply yet and we race with it to grab
          * exp_uncommitted_replies_lock before removing the reply from
@@ -1691,12 +1698,12 @@ ptlrpc_handle_rs (struct ptlrpc_reply_state *rs)
          * rs_lock, which we do right next.
          */
         if (!rs->rs_committed) {
-                spin_lock(&exp->exp_uncommitted_replies_lock);
-                list_del_init(&rs->rs_obd_list);
-                spin_unlock(&exp->exp_uncommitted_replies_lock);
+                cfs_spin_lock(&exp->exp_uncommitted_replies_lock);
+                cfs_list_del_init(&rs->rs_obd_list);
+                cfs_spin_unlock(&exp->exp_uncommitted_replies_lock);
         }
 
-        spin_lock(&rs->rs_lock);
+        cfs_spin_lock(&rs->rs_lock);
 
         been_handled = rs->rs_handled;
         rs->rs_handled = 1;
@@ -1715,7 +1722,7 @@ ptlrpc_handle_rs (struct ptlrpc_reply_state *rs)
         }
 
         if ((!been_handled && rs->rs_on_net) || nlocks > 0) {
-                spin_unlock(&rs->rs_lock);
+                cfs_spin_unlock(&rs->rs_lock);
 
                 if (!been_handled && rs->rs_on_net) {
                         LNetMDUnlink(rs->rs_md_h);
@@ -1727,27 +1734,27 @@ ptlrpc_handle_rs (struct ptlrpc_reply_state *rs)
                         ldlm_lock_decref(&rs->rs_locks[nlocks],
                                          rs->rs_modes[nlocks]);
 
-                spin_lock(&rs->rs_lock);
+                cfs_spin_lock(&rs->rs_lock);
         }
 
         rs->rs_scheduled = 0;
 
         if (!rs->rs_on_net) {
                 /* Off the net */
-                spin_unlock(&rs->rs_lock);
+                cfs_spin_unlock(&rs->rs_lock);
 
                 class_export_put (exp);
                 rs->rs_export = NULL;
                 ptlrpc_rs_decref (rs);
-                atomic_dec (&svc->srv_outstanding_replies);
-                if (atomic_dec_and_test(&svc->srv_n_difficult_replies) &&
+                cfs_atomic_dec (&svc->srv_outstanding_replies);
+                if (cfs_atomic_dec_and_test(&svc->srv_n_difficult_replies) &&
                     svc->srv_is_stopping)
                         cfs_waitq_broadcast(&svc->srv_waitq);
                 RETURN(1);
         }
 
         /* still on the net; callback will schedule */
-        spin_unlock(&rs->rs_lock);
+        cfs_spin_unlock(&rs->rs_lock);
         RETURN(1);
 }
 
@@ -1767,14 +1774,14 @@ ptlrpc_server_handle_reply(struct ptlrpc_service *svc)
         struct ptlrpc_reply_state *rs = NULL;
         ENTRY;
 
-        spin_lock(&svc->srv_lock);
-        if (!list_empty(&svc->srv_reply_queue)) {
-                rs = list_entry(svc->srv_reply_queue.prev,
-                                struct ptlrpc_reply_state,
-                                rs_list);
-                list_del_init(&rs->rs_list);
+        cfs_spin_lock(&svc->srv_lock);
+        if (!cfs_list_empty(&svc->srv_reply_queue)) {
+                rs = cfs_list_entry(svc->srv_reply_queue.prev,
+                                    struct ptlrpc_reply_state,
+                                    rs_list);
+                cfs_list_del_init(&rs->rs_list);
         }
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&svc->srv_lock);
         if (rs != NULL)
                 ptlrpc_handle_rs(rs);
         RETURN(rs != NULL);
@@ -1786,14 +1793,14 @@ liblustre_check_services (void *arg)
 {
         int  did_something = 0;
         int  rc;
-        struct list_head *tmp, *nxt;
+        cfs_list_t *tmp, *nxt;
         ENTRY;
 
         /* I'm relying on being single threaded, not to have to lock
          * ptlrpc_all_services etc */
-        list_for_each_safe (tmp, nxt, &ptlrpc_all_services) {
+        cfs_list_for_each_safe (tmp, nxt, &ptlrpc_all_services) {
                 struct ptlrpc_service *svc =
-                        list_entry (tmp, struct ptlrpc_service, srv_list);
+                        cfs_list_entry (tmp, struct ptlrpc_service, srv_list);
 
                 if (svc->srv_threads_running != 0)     /* I've recursed */
                         continue;
@@ -1862,7 +1869,7 @@ static int ptlrpc_main(void *arg)
         struct obd_device      *dev = data->dev;
         struct ptlrpc_reply_state *rs;
 #ifdef WITH_GROUP_INFO
-        struct group_info *ginfo = NULL;
+        cfs_group_info_t *ginfo = NULL;
 #endif
         struct lu_env env;
         int counter = 0, rc = 0;
@@ -1877,26 +1884,28 @@ static int ptlrpc_main(void *arg)
         if (svc->srv_cpu_affinity) {
                 int cpu, num_cpu;
 
-                for (cpu = 0, num_cpu = 0; cpu < num_possible_cpus(); cpu++) {
-                        if (!cpu_online(cpu))
+                for (cpu = 0, num_cpu = 0; cpu < cfs_num_possible_cpus();
+                     cpu++) {
+                        if (!cfs_cpu_online(cpu))
                                 continue;
-                        if (num_cpu == thread->t_id % num_online_cpus())
+                        if (num_cpu == thread->t_id % cfs_num_online_cpus())
                                 break;
                         num_cpu++;
                 }
-                set_cpus_allowed(cfs_current(), node_to_cpumask(cpu_to_node(cpu)));
+                cfs_set_cpus_allowed(cfs_current(),
+                                     node_to_cpumask(cpu_to_node(cpu)));
         }
 #endif
 
 #ifdef WITH_GROUP_INFO
-        ginfo = groups_alloc(0);
+        ginfo = cfs_groups_alloc(0);
         if (!ginfo) {
                 rc = -ENOMEM;
                 goto out;
         }
 
-        set_current_groups(ginfo);
-        put_group_info(ginfo);
+        cfs_set_current_groups(ginfo);
+        cfs_put_group_info(ginfo);
 #endif
 
         if (svc->srv_init != NULL) {
@@ -1921,13 +1930,13 @@ static int ptlrpc_main(void *arg)
                 goto out_srv_fini;
         }
 
-        spin_lock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
         /* SVC_STOPPING may already be set here if someone else is trying
          * to stop the service while this new thread has been dynamically
          * forked. We still set SVC_RUNNING to let our creator know that
          * we are now running, however we will exit as soon as possible */
         thread->t_flags |= SVC_RUNNING;
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&svc->srv_lock);
 
         /*
          * wake up our creator. Note: @data is invalid after this point,
@@ -1935,12 +1944,12 @@ static int ptlrpc_main(void *arg)
          */
         cfs_waitq_signal(&thread->t_ctl_waitq);
 
-        thread->t_watchdog = lc_watchdog_add(GET_TIMEOUT(svc), NULL, NULL);
+        thread->t_watchdog = lc_watchdog_add(CFS_GET_TIMEOUT(svc), NULL, NULL);
 
-        spin_lock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
         svc->srv_threads_running++;
-        list_add(&rs->rs_list, &svc->srv_free_rs_list);
-        spin_unlock(&svc->srv_lock);
+        cfs_list_add(&rs->rs_list, &svc->srv_free_rs_list);
+        cfs_spin_unlock(&svc->srv_lock);
         cfs_waitq_signal(&svc->srv_free_rs_waitq);
 
         CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id,
@@ -1955,14 +1964,14 @@ static int ptlrpc_main(void *arg)
 
                 lc_watchdog_disable(thread->t_watchdog);
 
-                cond_resched();
+                cfs_cond_resched();
 
                 l_wait_event_exclusive (svc->srv_waitq,
                               thread->t_flags & SVC_STOPPING ||
                               svc->srv_is_stopping ||
-                              (!list_empty(&svc->srv_idle_rqbds) &&
+                              (!cfs_list_empty(&svc->srv_idle_rqbds) &&
                                svc->srv_rqbd_timeout == 0) ||
-                              !list_empty(&svc->srv_req_in_queue) ||
+                              !cfs_list_empty(&svc->srv_req_in_queue) ||
                               (ptlrpc_server_request_pending(svc, 0) &&
                                (svc->srv_n_active_reqs <
                                 (svc->srv_threads_running - 1))) ||
@@ -1972,7 +1981,7 @@ static int ptlrpc_main(void *arg)
                 if (thread->t_flags & SVC_STOPPING || svc->srv_is_stopping)
                         break;
 
-                lc_watchdog_touch(thread->t_watchdog, GET_TIMEOUT(svc));
+                lc_watchdog_touch(thread->t_watchdog, CFS_GET_TIMEOUT(svc));
 
                 ptlrpc_check_rqbd_pool(svc);
 
@@ -1981,7 +1990,7 @@ static int ptlrpc_main(void *arg)
                         /* Ignore return code - we tried... */
                         ptlrpc_start_thread(dev, svc);
 
-                if (!list_empty(&svc->srv_req_in_queue)) {
+                if (!cfs_list_empty(&svc->srv_req_in_queue)) {
                         /* Process all incoming reqs before handling any */
                         ptlrpc_server_handle_req_in(svc);
                         /* but limit ourselves in case of flood */
@@ -2001,7 +2010,7 @@ static int ptlrpc_main(void *arg)
                         lu_context_exit(&env.le_ctx);
                 }
 
-                if (!list_empty(&svc->srv_idle_rqbds) &&
+                if (!cfs_list_empty(&svc->srv_idle_rqbds) &&
                     ptlrpc_server_post_idle_rqbds(svc) < 0) {
                         /* I just failed to repost request buffers.  Wait
                          * for a timeout (unless something else happens)
@@ -2027,13 +2036,13 @@ out:
         CDEBUG(D_RPCTRACE, "service thread [ %p : %u ] %d exiting: rc %d\n",
                thread, thread->t_pid, thread->t_id, rc);
 
-        spin_lock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
         svc->srv_threads_running--; /* must know immediately */
         thread->t_id = rc;
         thread->t_flags = SVC_STOPPED;
 
         cfs_waitq_signal(&thread->t_ctl_waitq);
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&svc->srv_lock);
 
         return rc;
 }
@@ -2045,15 +2054,15 @@ struct ptlrpc_hr_args {
 };
 
 static int hrt_dont_sleep(struct ptlrpc_hr_thread *t,
-                          struct list_head *replies)
+                          cfs_list_t *replies)
 {
         int result;
 
-        spin_lock(&t->hrt_lock);
-        list_splice_init(&t->hrt_queue, replies);
-        result = test_bit(HRT_STOPPING, &t->hrt_flags) ||
-                !list_empty(replies);
-        spin_unlock(&t->hrt_lock);
+        cfs_spin_lock(&t->hrt_lock);
+        cfs_list_splice_init(&t->hrt_queue, replies);
+        result = cfs_test_bit(HRT_STOPPING, &t->hrt_flags) ||
+                !cfs_list_empty(replies);
+        cfs_spin_unlock(&t->hrt_lock);
         return result;
 }
 
@@ -2070,28 +2079,28 @@ static int ptlrpc_hr_main(void *arg)
 
         cfs_daemonize_ctxt(threadname);
 #if defined(CONFIG_SMP) && defined(HAVE_NODE_TO_CPUMASK)
-        set_cpus_allowed(cfs_current(),
-                         node_to_cpumask(cpu_to_node(hr_args->cpu_index)));
+        cfs_set_cpus_allowed(cfs_current(),
+                             node_to_cpumask(cpu_to_node(hr_args->cpu_index)));
 #endif
-        set_bit(HRT_RUNNING, &t->hrt_flags);
+        cfs_set_bit(HRT_RUNNING, &t->hrt_flags);
         cfs_waitq_signal(&t->hrt_wait);
 
-        while (!test_bit(HRT_STOPPING, &t->hrt_flags)) {
+        while (!cfs_test_bit(HRT_STOPPING, &t->hrt_flags)) {
 
-                cfs_wait_event(t->hrt_wait, hrt_dont_sleep(t, &replies));
-                while (!list_empty(&replies)) {
+                l_cfs_wait_event(t->hrt_wait, hrt_dont_sleep(t, &replies));
+                while (!cfs_list_empty(&replies)) {
                         struct ptlrpc_reply_state *rs;
 
-                        rs = list_entry(replies.prev,
-                                        struct ptlrpc_reply_state,
-                                        rs_list);
-                        list_del_init(&rs->rs_list);
+                        rs = cfs_list_entry(replies.prev,
+                                            struct ptlrpc_reply_state,
+                                            rs_list);
+                        cfs_list_del_init(&rs->rs_list);
                         ptlrpc_handle_rs(rs);
                 }
         }
 
-        clear_bit(HRT_RUNNING, &t->hrt_flags);
-        complete(&t->hrt_completion);
+        cfs_clear_bit(HRT_RUNNING, &t->hrt_flags);
+        cfs_complete(&t->hrt_completion);
 
         return 0;
 }
@@ -2110,10 +2119,10 @@ static int ptlrpc_start_hr_thread(struct ptlrpc_hr_service *hr, int n, int cpu)
         rc = cfs_kernel_thread(ptlrpc_hr_main, (void*)&args,
                                CLONE_VM|CLONE_FILES);
         if (rc < 0) {
-                complete(&t->hrt_completion);
+                cfs_complete(&t->hrt_completion);
                 GOTO(out, rc);
         }
-        cfs_wait_event(t->hrt_wait, test_bit(HRT_RUNNING, &t->hrt_flags));
+        l_cfs_wait_event(t->hrt_wait, cfs_test_bit(HRT_RUNNING, &t->hrt_flags));
         RETURN(0);
  out:
         return rc;
@@ -2123,9 +2132,9 @@ static void ptlrpc_stop_hr_thread(struct ptlrpc_hr_thread *t)
 {
         ENTRY;
 
-        set_bit(HRT_STOPPING, &t->hrt_flags);
+        cfs_set_bit(HRT_STOPPING, &t->hrt_flags);
         cfs_waitq_signal(&t->hrt_wait);
-        wait_for_completion(&t->hrt_completion);
+        cfs_wait_for_completion(&t->hrt_completion);
 
         EXIT;
 }
@@ -2152,9 +2161,9 @@ static int ptlrpc_start_hr_threads(struct ptlrpc_hr_service *hr)
 
         for (n = 0, cpu = 0; n < hr->hr_n_threads; n++) {
 #if defined(CONFIG_SMP) && defined(HAVE_NODE_TO_CPUMASK)
-                while(!cpu_online(cpu)) {
+                while(!cfs_cpu_online(cpu)) {
                         cpu++;
-                        if (cpu >= num_possible_cpus())
+                        if (cpu >= cfs_num_possible_cpus())
                                 cpu = 0;
                 }
 #endif
@@ -2185,18 +2194,18 @@ static void ptlrpc_stop_thread(struct ptlrpc_service *svc,
         CDEBUG(D_RPCTRACE, "Stopping thread [ %p : %u ]\n",
                thread, thread->t_pid);
 
-        spin_lock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
         /* let the thread know that we would like it to stop asap */
         thread->t_flags |= SVC_STOPPING;
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&svc->srv_lock);
 
         cfs_waitq_broadcast(&svc->srv_waitq);
         l_wait_event(thread->t_ctl_waitq,
                      (thread->t_flags & SVC_STOPPED), &lwi);
 
-        spin_lock(&svc->srv_lock);
-        list_del(&thread->t_link);
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
+        cfs_list_del(&thread->t_link);
+        cfs_spin_unlock(&svc->srv_lock);
 
         OBD_FREE_PTR(thread);
         EXIT;
@@ -2207,17 +2216,17 @@ void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
         struct ptlrpc_thread *thread;
         ENTRY;
 
-        spin_lock(&svc->srv_lock);
-        while (!list_empty(&svc->srv_threads)) {
-                thread = list_entry(svc->srv_threads.next,
-                                    struct ptlrpc_thread, t_link);
+        cfs_spin_lock(&svc->srv_lock);
+        while (!cfs_list_empty(&svc->srv_threads)) {
+                thread = cfs_list_entry(svc->srv_threads.next,
+                                        struct ptlrpc_thread, t_link);
 
-                spin_unlock(&svc->srv_lock);
+                cfs_spin_unlock(&svc->srv_lock);
                 ptlrpc_stop_thread(svc, thread);
-                spin_lock(&svc->srv_lock);
+                cfs_spin_lock(&svc->srv_lock);
         }
 
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&svc->srv_lock);
         EXIT;
 }
 
@@ -2270,15 +2279,15 @@ int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc)
                 RETURN(-ENOMEM);
         cfs_waitq_init(&thread->t_ctl_waitq);
 
-        spin_lock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
         if (svc->srv_threads_started >= svc->srv_threads_max) {
-                spin_unlock(&svc->srv_lock);
+                cfs_spin_unlock(&svc->srv_lock);
                 OBD_FREE_PTR(thread);
                 RETURN(-EMFILE);
         }
-        list_add(&thread->t_link, &svc->srv_threads);
+        cfs_list_add(&thread->t_link, &svc->srv_threads);
         id = svc->srv_threads_started++;
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&svc->srv_lock);
 
         thread->t_svc = svc;
         thread->t_id = id;
@@ -2297,10 +2306,10 @@ int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc)
         if (rc < 0) {
                 CERROR("cannot start thread '%s': rc %d\n", name, rc);
 
-                spin_lock(&svc->srv_lock);
-                list_del(&thread->t_link);
+                cfs_spin_lock(&svc->srv_lock);
+                cfs_list_del(&thread->t_link);
                 --svc->srv_threads_started;
-                spin_unlock(&svc->srv_lock);
+                cfs_spin_unlock(&svc->srv_lock);
 
                 OBD_FREE(thread, sizeof(*thread));
                 RETURN(rc);
@@ -2316,7 +2325,7 @@ int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc)
 int ptlrpc_hr_init(void)
 {
         int i;
-        int n_cpus = num_online_cpus();
+        int n_cpus = cfs_num_online_cpus();
         struct ptlrpc_hr_service *hr;
         int size;
         int rc;
@@ -2331,10 +2340,10 @@ int ptlrpc_hr_init(void)
         for (i = 0; i < n_cpus; i++) {
                 struct ptlrpc_hr_thread *t = &hr->hr_threads[i];
 
-                spin_lock_init(&t->hrt_lock);
+                cfs_spin_lock_init(&t->hrt_lock);
                 cfs_waitq_init(&t->hrt_wait);
                 CFS_INIT_LIST_HEAD(&t->hrt_queue);
-                init_completion(&t->hrt_completion);
+                cfs_init_completion(&t->hrt_completion);
         }
         hr->hr_n_threads = n_cpus;
         hr->hr_size = size;
@@ -2368,8 +2377,8 @@ static void ptlrpc_wait_replies(struct ptlrpc_service *svc)
                 int rc;
                 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(10),
                                                      NULL, NULL);
-                rc = l_wait_event(svc->srv_waitq,
-                                  atomic_read(&svc->srv_n_difficult_replies) == 0,
+                rc = l_wait_event(svc->srv_waitq, cfs_atomic_read(&svc-> \
+                                  srv_n_difficult_replies) == 0,
                                   &lwi);
                 if (rc == 0)
                         break;
@@ -2381,7 +2390,7 @@ int ptlrpc_unregister_service(struct ptlrpc_service *service)
 {
         int                   rc;
         struct l_wait_info    lwi;
-        struct list_head     *tmp;
+        cfs_list_t           *tmp;
         struct ptlrpc_reply_state *rs, *t;
         struct ptlrpc_at_array *array = &service->srv_at_array;
         ENTRY;
@@ -2390,11 +2399,11 @@ int ptlrpc_unregister_service(struct ptlrpc_service *service)
         cfs_timer_disarm(&service->srv_at_timer);
 
         ptlrpc_stop_all_threads(service);
-        LASSERT(list_empty(&service->srv_threads));
+        LASSERT(cfs_list_empty(&service->srv_threads));
 
-        spin_lock (&ptlrpc_all_services_lock);
-        list_del_init (&service->srv_list);
-        spin_unlock (&ptlrpc_all_services_lock);
+        cfs_spin_lock (&ptlrpc_all_services_lock);
+        cfs_list_del_init (&service->srv_list);
+        cfs_spin_unlock (&ptlrpc_all_services_lock);
 
         ptlrpc_lprocfs_unregister_service(service);
 
@@ -2409,10 +2418,10 @@ int ptlrpc_unregister_service(struct ptlrpc_service *service)
 
         /* Unlink all the request buffers.  This forces a 'final' event with
          * its 'unlink' flag set for each posted rqbd */
-        list_for_each(tmp, &service->srv_active_rqbds) {
+        cfs_list_for_each(tmp, &service->srv_active_rqbds) {
                 struct ptlrpc_request_buffer_desc *rqbd =
-                        list_entry(tmp, struct ptlrpc_request_buffer_desc,
-                                   rqbd_list);
+                        cfs_list_entry(tmp, struct ptlrpc_request_buffer_desc,
+                                       rqbd_list);
 
                 rc = LNetMDUnlink(rqbd->rqbd_md_h);
                 LASSERT (rc == 0 || rc == -ENOENT);
@@ -2421,9 +2430,9 @@ int ptlrpc_unregister_service(struct ptlrpc_service *service)
         /* Wait for the network to release any buffers it's currently
          * filling */
         for (;;) {
-                spin_lock(&service->srv_lock);
+                cfs_spin_lock(&service->srv_lock);
                 rc = service->srv_nrqbd_receiving;
-                spin_unlock(&service->srv_lock);
+                cfs_spin_unlock(&service->srv_lock);
 
                 if (rc == 0)
                         break;
@@ -2441,27 +2450,27 @@ int ptlrpc_unregister_service(struct ptlrpc_service *service)
         }
 
         /* schedule all outstanding replies to terminate them */
-        spin_lock(&service->srv_lock);
-        while (!list_empty(&service->srv_active_replies)) {
+        cfs_spin_lock(&service->srv_lock);
+        while (!cfs_list_empty(&service->srv_active_replies)) {
                 struct ptlrpc_reply_state *rs =
-                        list_entry(service->srv_active_replies.next,
-                                   struct ptlrpc_reply_state, rs_list);
-                spin_lock(&rs->rs_lock);
+                        cfs_list_entry(service->srv_active_replies.next,
+                                       struct ptlrpc_reply_state, rs_list);
+                cfs_spin_lock(&rs->rs_lock);
                 ptlrpc_schedule_difficult_reply(rs);
-                spin_unlock(&rs->rs_lock);
+                cfs_spin_unlock(&rs->rs_lock);
         }
-        spin_unlock(&service->srv_lock);
+        cfs_spin_unlock(&service->srv_lock);
 
         /* purge the request queue.  NB No new replies (rqbds all unlinked)
          * and no service threads, so I'm the only thread noodling the
          * request queue now */
-        while (!list_empty(&service->srv_req_in_queue)) {
+        while (!cfs_list_empty(&service->srv_req_in_queue)) {
                 struct ptlrpc_request *req =
-                        list_entry(service->srv_req_in_queue.next,
-                                   struct ptlrpc_request,
-                                   rq_list);
+                        cfs_list_entry(service->srv_req_in_queue.next,
+                                       struct ptlrpc_request,
+                                       rq_list);
 
-                list_del(&req->rq_list);
+                cfs_list_del(&req->rq_list);
                 service->srv_n_queued_reqs--;
                 service->srv_n_active_reqs++;
                 ptlrpc_server_finish_request(req);
@@ -2470,7 +2479,7 @@ int ptlrpc_unregister_service(struct ptlrpc_service *service)
                 struct ptlrpc_request *req;
 
                 req = ptlrpc_server_request_get(service, 1);
-                list_del(&req->rq_list);
+                cfs_list_del(&req->rq_list);
                 service->srv_n_queued_reqs--;
                 service->srv_n_active_reqs++;
                 ptlrpc_hpreq_fini(req);
@@ -2479,23 +2488,24 @@ int ptlrpc_unregister_service(struct ptlrpc_service *service)
         LASSERT(service->srv_n_queued_reqs == 0);
         LASSERT(service->srv_n_active_reqs == 0);
         LASSERT(service->srv_n_history_rqbds == 0);
-        LASSERT(list_empty(&service->srv_active_rqbds));
+        LASSERT(cfs_list_empty(&service->srv_active_rqbds));
 
         /* Now free all the request buffers since nothing references them
          * any more... */
-        while (!list_empty(&service->srv_idle_rqbds)) {
+        while (!cfs_list_empty(&service->srv_idle_rqbds)) {
                 struct ptlrpc_request_buffer_desc *rqbd =
-                        list_entry(service->srv_idle_rqbds.next,
-                                   struct ptlrpc_request_buffer_desc,
-                                   rqbd_list);
+                        cfs_list_entry(service->srv_idle_rqbds.next,
+                                       struct ptlrpc_request_buffer_desc,
+                                       rqbd_list);
 
                 ptlrpc_free_rqbd(rqbd);
         }
 
         ptlrpc_wait_replies(service);
 
-        list_for_each_entry_safe(rs, t, &service->srv_free_rs_list, rs_list) {
-                list_del(&rs->rs_list);
+        cfs_list_for_each_entry_safe(rs, t, &service->srv_free_rs_list,
+                                     rs_list) {
+                cfs_list_del(&rs->rs_list);
                 OBD_FREE(rs, service->srv_max_reply_size);
         }
 
@@ -2504,7 +2514,7 @@ int ptlrpc_unregister_service(struct ptlrpc_service *service)
 
         if (array->paa_reqs_array != NULL) {
                 OBD_FREE(array->paa_reqs_array,
-                         sizeof(struct list_head) * array->paa_size);
+                         sizeof(cfs_list_t) * array->paa_size);
                 array->paa_reqs_array = NULL;
         }
 
@@ -2532,23 +2542,23 @@ int ptlrpc_service_health_check(struct ptlrpc_service *svc)
         if (svc == NULL)
                 return 0;
 
-        do_gettimeofday(&right_now);
+        cfs_gettimeofday(&right_now);
 
-        spin_lock(&svc->srv_lock);
+        cfs_spin_lock(&svc->srv_lock);
         if (!ptlrpc_server_request_pending(svc, 1)) {
-                spin_unlock(&svc->srv_lock);
+                cfs_spin_unlock(&svc->srv_lock);
                 return 0;
         }
 
         /* How long has the next entry been waiting? */
-        if (list_empty(&svc->srv_request_queue))
-                request = list_entry(svc->srv_request_hpq.next,
-                                     struct ptlrpc_request, rq_list);
+        if (cfs_list_empty(&svc->srv_request_queue))
+                request = cfs_list_entry(svc->srv_request_hpq.next,
+                                         struct ptlrpc_request, rq_list);
         else
-                request = list_entry(svc->srv_request_queue.next,
-                                     struct ptlrpc_request, rq_list);
+                request = cfs_list_entry(svc->srv_request_queue.next,
+                                         struct ptlrpc_request, rq_list);
         timediff = cfs_timeval_sub(&right_now, &request->rq_arrival_time, NULL);
-        spin_unlock(&svc->srv_lock);
+        cfs_spin_unlock(&svc->srv_lock);
 
         if ((timediff / ONE_MILLION) > (AT_OFF ? obd_timeout * 3/2 :
                                         at_max)) {
index 0b6b2fe..4704c20 100644 (file)
@@ -109,32 +109,32 @@ static void obt_boot_epoch_update(struct obd_device *obd)
         __u32 start_epoch;
         struct obd_device_target *obt = &obd->u.obt;
         struct ptlrpc_request *req;
-        struct list_head client_list;
+        cfs_list_t client_list;
 
-        spin_lock(&obt->obt_translock);
+        cfs_spin_lock(&obt->obt_translock);
         start_epoch = lr_epoch(le64_to_cpu(obt->obt_last_transno)) + 1;
         obt->obt_last_transno = cpu_to_le64((__u64)start_epoch <<
                                             LR_EPOCH_BITS);
         obt->obt_lsd->lsd_start_epoch = cpu_to_le32(start_epoch);
-        spin_unlock(&obt->obt_translock);
+        cfs_spin_unlock(&obt->obt_translock);
 
         CFS_INIT_LIST_HEAD(&client_list);
-        spin_lock_bh(&obd->obd_processing_task_lock);
-        list_splice_init(&obd->obd_final_req_queue, &client_list);
-        spin_unlock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
+        cfs_list_splice_init(&obd->obd_final_req_queue, &client_list);
+        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
 
         /**
          * go through list of exports participated in recovery and
          * set new epoch for them
          */
-        list_for_each_entry(req, &client_list, rq_list) {
+        cfs_list_for_each_entry(req, &client_list, rq_list) {
                 LASSERT(!req->rq_export->exp_delayed);
                 obt_client_epoch_update(req->rq_export);
         }
         /** return list back at once */
-        spin_lock_bh(&obd->obd_processing_task_lock);
-        list_splice_init(&client_list, &obd->obd_final_req_queue);
-        spin_unlock_bh(&obd->obd_processing_task_lock);
+        cfs_spin_lock_bh(&obd->obd_processing_task_lock);
+        cfs_list_splice_init(&client_list, &obd->obd_final_req_queue);
+        cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
         obt_server_data_update(obd, 1);
 }
 
@@ -209,9 +209,9 @@ static int lut_server_data_update(const struct lu_env *env,
                lut->lut_lsd.lsd_uuid, lut->lut_mount_count,
                lut->lut_last_transno);
 
-        spin_lock(&lut->lut_translock);
+        cfs_spin_lock(&lut->lut_translock);
         lut->lut_lsd.lsd_last_transno = lut->lut_last_transno;
-        spin_unlock(&lut->lut_translock);
+        cfs_spin_unlock(&lut->lut_translock);
 
         lsd_cpu_to_le(&lut->lut_lsd, &tmp_lsd);
         if (lut->lut_last_rcvd != NULL)
@@ -240,7 +240,7 @@ void lut_boot_epoch_update(struct lu_target *lut)
         struct lu_env env;
         struct ptlrpc_request *req;
         __u32 start_epoch;
-        struct list_head client_list;
+        cfs_list_t client_list;
         int rc;
 
         if (lut->lut_obd->obd_stopping)
@@ -255,34 +255,34 @@ void lut_boot_epoch_update(struct lu_target *lut)
                 return;
         }
 
-        spin_lock(&lut->lut_translock);
+        cfs_spin_lock(&lut->lut_translock);
         start_epoch = lr_epoch(lut->lut_last_transno) + 1;
         lut->lut_last_transno = (__u64)start_epoch << LR_EPOCH_BITS;
         lut->lut_lsd.lsd_start_epoch = start_epoch;
-        spin_unlock(&lut->lut_translock);
+        cfs_spin_unlock(&lut->lut_translock);
 
         CFS_INIT_LIST_HEAD(&client_list);
         /**
          * The recovery is not yet finished and final queue can still be updated
          * with resend requests. Move final list to separate one for processing
          */
-        spin_lock_bh(&lut->lut_obd->obd_processing_task_lock);
-        list_splice_init(&lut->lut_obd->obd_final_req_queue, &client_list);
-        spin_unlock_bh(&lut->lut_obd->obd_processing_task_lock);
+        cfs_spin_lock_bh(&lut->lut_obd->obd_processing_task_lock);
+        cfs_list_splice_init(&lut->lut_obd->obd_final_req_queue, &client_list);
+        cfs_spin_unlock_bh(&lut->lut_obd->obd_processing_task_lock);
 
         /**
          * go through list of exports participated in recovery and
          * set new epoch for them
          */
-        list_for_each_entry(req, &client_list, rq_list) {
+        cfs_list_for_each_entry(req, &client_list, rq_list) {
                 LASSERT(!req->rq_export->exp_delayed);
                 if (!req->rq_export->exp_vbr_failed)
                         lut_client_epoch_update(&env, lut, req->rq_export);
         }
         /** return list back at once */
-        spin_lock_bh(&lut->lut_obd->obd_processing_task_lock);
-        list_splice_init(&client_list, &lut->lut_obd->obd_final_req_queue);
-        spin_unlock_bh(&lut->lut_obd->obd_processing_task_lock);
+        cfs_spin_lock_bh(&lut->lut_obd->obd_processing_task_lock);
+        cfs_list_splice_init(&client_list, &lut->lut_obd->obd_final_req_queue);
+        cfs_spin_unlock_bh(&lut->lut_obd->obd_processing_task_lock);
         /** update server epoch */
         lut_server_data_update(&env, lut, 1);
         lu_env_fini(&env);
@@ -297,17 +297,17 @@ void lut_cb_last_committed(struct lu_target *lut, __u64 transno,
 {
         struct obd_export *exp = data;
         LASSERT(exp->exp_obd == lut->lut_obd);
-        spin_lock(&lut->lut_translock);
+        cfs_spin_lock(&lut->lut_translock);
         if (transno > lut->lut_obd->obd_last_committed)
                 lut->lut_obd->obd_last_committed = transno;
 
         LASSERT(exp);
         if (transno > exp->exp_last_committed) {
                 exp->exp_last_committed = transno;
-                spin_unlock(&lut->lut_translock);
+                cfs_spin_unlock(&lut->lut_translock);
                 ptlrpc_commit_replies(exp);
         } else {
-                spin_unlock(&lut->lut_translock);
+                cfs_spin_unlock(&lut->lut_translock);
         }
         class_export_cb_put(exp);
         if (transno)
@@ -336,9 +336,9 @@ int lut_init(const struct lu_env *env, struct lu_target *lut,
         lut->lut_bottom = dt;
         lut->lut_last_rcvd = NULL;
 
-        spin_lock_init(&lut->lut_translock);
-        spin_lock_init(&lut->lut_client_bitmap_lock);
-        spin_lock_init(&lut->lut_trans_table_lock);
+        cfs_spin_lock_init(&lut->lut_translock);
+        cfs_spin_lock_init(&lut->lut_client_bitmap_lock);
+        cfs_spin_lock_init(&lut->lut_trans_table_lock);
 
         /** obdfilter has no lu_device stack yet */
         if (dt == NULL)
index 17e0e67..6ac0bc2 100644 (file)
@@ -219,7 +219,7 @@ static int auto_quota_on(struct obd_device *obd, int type,
         if (!oqctl)
                 RETURN(-ENOMEM);
 
-        down(&obt->obt_quotachecking);
+        cfs_down(&obt->obt_quotachecking);
         id = UGQUOTA2LQC(type);
         /* quota already turned on */
         if ((obt->obt_qctxt.lqc_flags & id) == id)
@@ -239,7 +239,7 @@ static int auto_quota_on(struct obd_device *obd, int type,
 
         if (is_master) {
                 mds = &obd->u.mds;
-                down(&mds->mds_qonoff_sem);
+                cfs_down(&mds->mds_qonoff_sem);
                 /* turn on cluster wide quota */
                 rc1 = mds_admin_quota_on(obd, oqctl);
                 if (rc1 && rc1 != -EALREADY) {
@@ -279,11 +279,11 @@ static int auto_quota_on(struct obd_device *obd, int type,
 
 out_ctxt:
         if (mds != NULL)
-                up(&mds->mds_qonoff_sem);
+                cfs_up(&mds->mds_qonoff_sem);
         pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
 
 out:
-        up(&obt->obt_quotachecking);
+        cfs_up(&obt->obt_quotachecking);
         OBD_FREE_PTR(oqctl);
         return rc;
 }
@@ -306,7 +306,7 @@ int lprocfs_quota_wr_type(struct file *file, const char *buffer,
         if (count > MAX_STYPE_SIZE)
                 return -EINVAL;
 
-        if (copy_from_user(stype, buffer, count))
+        if (cfs_copy_from_user(stype, buffer, count))
                 return -EFAULT;
 
         for (i = 0 ; i < count ; i++) {
index 9b1fcdb..6d0c262 100644 (file)
@@ -102,7 +102,7 @@ quota_create_lqs(unsigned long long lqs_key, struct lustre_quota_ctxt *qctxt)
 
         lqs->lqs_key = lqs_key;
 
-        spin_lock_init(&lqs->lqs_lock);
+        cfs_spin_lock_init(&lqs->lqs_lock);
         lqs->lqs_bwrite_pending = 0;
         lqs->lqs_iwrite_pending = 0;
         lqs->lqs_ino_rec = 0;
@@ -120,13 +120,13 @@ quota_create_lqs(unsigned long long lqs_key, struct lustre_quota_ctxt *qctxt)
         }
         lqs_initref(lqs);
 
-        spin_lock(&qctxt->lqc_lock);
+        cfs_spin_lock(&qctxt->lqc_lock);
         if (!qctxt->lqc_valid)
                 rc = -EBUSY;
         else
                 rc = cfs_hash_add_unique(qctxt->lqc_lqs_hash,
                                          &lqs->lqs_key, &lqs->lqs_hash);
-        spin_unlock(&qctxt->lqc_lock);
+        cfs_spin_unlock(&qctxt->lqc_lock);
 
         if (!rc)
                 lqs_getref(lqs);
@@ -202,7 +202,7 @@ int quota_adjust_slave_lqs(struct quota_adjust_qunit *oqaq,
 
         CDEBUG(D_QUOTA, "before: bunit: %lu, iunit: %lu.\n",
                lqs->lqs_bunit_sz, lqs->lqs_iunit_sz);
-        spin_lock(&lqs->lqs_lock);
+        cfs_spin_lock(&lqs->lqs_lock);
         for (i = 0; i < 2; i++) {
                 if (i == 0 && !QAQ_IS_ADJBLK(oqaq))
                         continue;
@@ -248,7 +248,7 @@ int quota_adjust_slave_lqs(struct quota_adjust_qunit *oqaq,
                 if (tmp < 0)
                         rc |= i ? LQS_INO_INCREASE : LQS_BLK_INCREASE;
         }
-        spin_unlock(&lqs->lqs_lock);
+        cfs_spin_unlock(&lqs->lqs_lock);
         CDEBUG(D_QUOTA, "after: bunit: %lu, iunit: %lu.\n",
                lqs->lqs_bunit_sz, lqs->lqs_iunit_sz);
 
index 4d0e67a..f00c7ce 100644 (file)
@@ -115,7 +115,7 @@ static int target_quotacheck_thread(void *data)
 
         rc = target_quotacheck_callback(exp, oqctl);
         class_export_put(exp);
-        up(qta->qta_sem);
+        cfs_up(qta->qta_sem);
         OBD_FREE_PTR(qta);
         return rc;
 }
@@ -132,7 +132,7 @@ int target_quota_check(struct obd_device *obd, struct obd_export *exp,
         if (!qta)
                 RETURN(ENOMEM);
 
-        down(&obt->obt_quotachecking);
+        cfs_down(&obt->obt_quotachecking);
 
         qta->qta_exp = exp;
         qta->qta_obd = obd;
@@ -174,7 +174,8 @@ int target_quota_check(struct obd_device *obd, struct obd_export *exp,
         /* we get ref for exp because target_quotacheck_callback() will use this
          * export later b=18126 */
         class_export_get(exp);
-        rc = kernel_thread(target_quotacheck_thread, qta, CLONE_VM|CLONE_FILES);
+        rc = cfs_kernel_thread(target_quotacheck_thread, qta,
+                               CLONE_VM|CLONE_FILES);
         if (rc >= 0) {
                 CDEBUG(D_INFO, "%s: target_quotacheck_thread: %d\n",
                        obd->obd_name, rc);
@@ -187,7 +188,7 @@ int target_quota_check(struct obd_device *obd, struct obd_export *exp,
         }
 
 out:
-        up(&obt->obt_quotachecking);
+        cfs_up(&obt->obt_quotachecking);
         OBD_FREE_PTR(qta);
         return rc;
 }
index 43e7c67..57ac31d 100644 (file)
@@ -70,8 +70,8 @@ unsigned long default_iunit_sz = 5120;              /* 5120 inodes */
 unsigned long default_itune_ratio = 50;             /* 50 percentage */
 
 cfs_mem_cache_t *qunit_cachep = NULL;
-struct list_head qunit_hash[NR_DQHASH];
-spinlock_t qunit_hash_lock = SPIN_LOCK_UNLOCKED;
+cfs_list_t qunit_hash[NR_DQHASH];
+cfs_spinlock_t qunit_hash_lock = CFS_SPIN_LOCK_UNLOCKED;
 
 /* please sync qunit_state with qunit_state_names */
 enum qunit_state {
@@ -103,13 +103,13 @@ static const char *qunit_state_names[] = {
 };
 
 struct lustre_qunit {
-        struct list_head lq_hash;          /** Hash list in memory */
-        atomic_t lq_refcnt;                /** Use count */
+        cfs_list_t lq_hash;      /** Hash list in memory */
+        cfs_atomic_t lq_refcnt;            /** Use count */
         struct lustre_quota_ctxt *lq_ctxt; /** Quota context this applies to */
         struct qunit_data lq_data;         /** See qunit_data */
         unsigned int lq_opc;               /** QUOTA_DQACQ, QUOTA_DQREL */
         cfs_waitq_t lq_waitq;              /** Threads waiting for this qunit */
-        spinlock_t lq_lock;                /** Protect the whole structure */
+        cfs_spinlock_t lq_lock;            /** Protect the whole structure */
         enum qunit_state lq_state;         /** Present the status of qunit */
         int lq_rc;                         /** The rc of lq_data */
         pid_t lq_owner;
@@ -117,19 +117,19 @@ struct lustre_qunit {
 
 #define QUNIT_SET_STATE(qunit, state)                                   \
 do {                                                                    \
-        spin_lock(&qunit->lq_lock);                                     \
+        cfs_spin_lock(&qunit->lq_lock);                                 \
         QDATA_DEBUG((&qunit->lq_data), "qunit(%p) lq_state(%s->%s), "   \
                     "lq_rc(%d), lq_owner(%d)\n",                        \
                     qunit, qunit_state_names[qunit->lq_state],          \
                     qunit_state_names[state], qunit->lq_rc,             \
                     qunit->lq_owner);                                   \
         qunit->lq_state = state;                                        \
-        spin_unlock(&qunit->lq_lock);                                   \
+        cfs_spin_unlock(&qunit->lq_lock);                               \
 } while(0)
 
 #define QUNIT_SET_STATE_AND_RC(qunit, state, rc)                        \
 do {                                                                    \
-        spin_lock(&qunit->lq_lock);                                     \
+        cfs_spin_lock(&qunit->lq_lock);                                 \
         qunit->lq_rc = rc;                                              \
         QDATA_DEBUG((&qunit->lq_data), "qunit(%p) lq_state(%s->%s), "   \
                     "lq_rc(%d), lq_owner(%d)\n",                        \
@@ -137,7 +137,7 @@ do {                                                                    \
                     qunit_state_names[state], qunit->lq_rc,             \
                     qunit->lq_owner);                                   \
         qunit->lq_state = state;                                        \
-        spin_unlock(&qunit->lq_lock);                                   \
+        cfs_spin_unlock(&qunit->lq_lock);                               \
 } while(0)
 
 int should_translate_quota (struct obd_import *imp)
@@ -156,10 +156,10 @@ void qunit_cache_cleanup(void)
         int i;
         ENTRY;
 
-        spin_lock(&qunit_hash_lock);
+        cfs_spin_lock(&qunit_hash_lock);
         for (i = 0; i < NR_DQHASH; i++)
-                LASSERT(list_empty(qunit_hash + i));
-        spin_unlock(&qunit_hash_lock);
+                LASSERT(cfs_list_empty(qunit_hash + i));
+        cfs_spin_unlock(&qunit_hash_lock);
 
         if (qunit_cachep) {
                 int rc;
@@ -182,10 +182,10 @@ int qunit_cache_init(void)
         if (!qunit_cachep)
                 RETURN(-ENOMEM);
 
-        spin_lock(&qunit_hash_lock);
+        cfs_spin_lock(&qunit_hash_lock);
         for (i = 0; i < NR_DQHASH; i++)
                 CFS_INIT_LIST_HEAD(qunit_hash + i);
-        spin_unlock(&qunit_hash_lock);
+        cfs_spin_unlock(&qunit_hash_lock);
         RETURN(0);
 }
 
@@ -213,7 +213,7 @@ static inline struct lustre_qunit *find_qunit(unsigned int hashent,
         struct qunit_data *tmp;
 
         LASSERT_SPIN_LOCKED(&qunit_hash_lock);
-        list_for_each_entry(qunit, qunit_hash + hashent, lq_hash) {
+        cfs_list_for_each_entry(qunit, qunit_hash + hashent, lq_hash) {
                 tmp = &qunit->lq_data;
                 if (qunit->lq_ctxt == qctxt &&
                     qdata->qd_id == tmp->qd_id &&
@@ -249,12 +249,12 @@ check_cur_qunit(struct obd_device *obd,
         if (!ll_sb_any_quota_active(sb))
                 RETURN(0);
 
-        spin_lock(&qctxt->lqc_lock);
+        cfs_spin_lock(&qctxt->lqc_lock);
         if (!qctxt->lqc_valid){
-                spin_unlock(&qctxt->lqc_lock);
+                cfs_spin_unlock(&qctxt->lqc_lock);
                 RETURN(0);
         }
-        spin_unlock(&qctxt->lqc_lock);
+        cfs_spin_unlock(&qctxt->lqc_lock);
 
         OBD_ALLOC_PTR(qctl);
         if (qctl == NULL)
@@ -293,7 +293,7 @@ check_cur_qunit(struct obd_device *obd,
                        QDATA_IS_GRP(qdata) ? "g" : "u", qdata->qd_id);
                 GOTO (out, ret = 0);
         }
-        spin_lock(&lqs->lqs_lock);
+        cfs_spin_lock(&lqs->lqs_lock);
 
         if (QDATA_IS_BLK(qdata)) {
                 qunit_sz = lqs->lqs_bunit_sz;
@@ -355,7 +355,7 @@ check_cur_qunit(struct obd_device *obd,
                record, qunit_sz, tune_sz, ret);
         LASSERT(ret == 0 || qdata->qd_count);
 
-        spin_unlock(&lqs->lqs_lock);
+        cfs_spin_unlock(&lqs->lqs_lock);
         lqs_putref(lqs);
 
         EXIT;
@@ -431,12 +431,12 @@ static struct lustre_qunit *alloc_qunit(struct lustre_quota_ctxt *qctxt,
                 RETURN(NULL);
 
         CFS_INIT_LIST_HEAD(&qunit->lq_hash);
-        init_waitqueue_head(&qunit->lq_waitq);
-        atomic_set(&qunit->lq_refcnt, 1);
+        cfs_waitq_init(&qunit->lq_waitq);
+        cfs_atomic_set(&qunit->lq_refcnt, 1);
         qunit->lq_ctxt = qctxt;
         memcpy(&qunit->lq_data, qdata, sizeof(*qdata));
         qunit->lq_opc = opc;
-        qunit->lq_lock = SPIN_LOCK_UNLOCKED;
+        qunit->lq_lock = CFS_SPIN_LOCK_UNLOCKED;
         QUNIT_SET_STATE_AND_RC(qunit, QUNIT_CREATED, 0);
         qunit->lq_owner = cfs_curproc_pid();
         RETURN(qunit);
@@ -449,13 +449,13 @@ static inline void free_qunit(struct lustre_qunit *qunit)
 
 static inline void qunit_get(struct lustre_qunit *qunit)
 {
-        atomic_inc(&qunit->lq_refcnt);
+        cfs_atomic_inc(&qunit->lq_refcnt);
 }
 
 static void qunit_put(struct lustre_qunit *qunit)
 {
-        LASSERT(atomic_read(&qunit->lq_refcnt));
-        if (atomic_dec_and_test(&qunit->lq_refcnt))
+        LASSERT(cfs_atomic_read(&qunit->lq_refcnt));
+        if (cfs_atomic_dec_and_test(&qunit->lq_refcnt))
                 free_qunit(qunit);
 }
 
@@ -477,12 +477,12 @@ static struct lustre_qunit *dqacq_in_flight(struct lustre_quota_ctxt *qctxt,
 static void
 insert_qunit_nolock(struct lustre_quota_ctxt *qctxt, struct lustre_qunit *qunit)
 {
-        struct list_head *head;
+        cfs_list_t *head;
 
-        LASSERT(list_empty(&qunit->lq_hash));
+        LASSERT(cfs_list_empty(&qunit->lq_hash));
         qunit_get(qunit);
         head = qunit_hash + qunit_hashfn(qctxt, &qunit->lq_data);
-        list_add(&qunit->lq_hash, head);
+        cfs_list_add(&qunit->lq_hash, head);
         QUNIT_SET_STATE(qunit, QUNIT_IN_HASH);
 }
 
@@ -494,12 +494,12 @@ static void compute_lqs_after_removing_qunit(struct lustre_qunit *qunit)
                                        qunit->lq_data.qd_id),
                                qunit->lq_ctxt, 0);
         if (lqs && !IS_ERR(lqs)) {
-                spin_lock(&lqs->lqs_lock);
+                cfs_spin_lock(&lqs->lqs_lock);
                 if (qunit->lq_opc == QUOTA_DQACQ)
                         quota_compute_lqs(&qunit->lq_data, lqs, 0, 1);
                 if (qunit->lq_opc == QUOTA_DQREL)
                         quota_compute_lqs(&qunit->lq_data, lqs, 0, 0);
-                spin_unlock(&lqs->lqs_lock);
+                cfs_spin_unlock(&lqs->lqs_lock);
                 /* this is for quota_search_lqs */
                 lqs_putref(lqs);
                 /* this is for schedule_dqacq */
@@ -509,10 +509,10 @@ static void compute_lqs_after_removing_qunit(struct lustre_qunit *qunit)
 
 static void remove_qunit_nolock(struct lustre_qunit *qunit)
 {
-        LASSERT(!list_empty(&qunit->lq_hash));
+        LASSERT(!cfs_list_empty(&qunit->lq_hash));
         LASSERT_SPIN_LOCKED(&qunit_hash_lock);
 
-        list_del_init(&qunit->lq_hash);
+        cfs_list_del_init(&qunit->lq_hash);
         QUNIT_SET_STATE(qunit, QUNIT_RM_FROM_HASH);
         qunit_put(qunit);
 }
@@ -533,10 +533,10 @@ void* quota_barrier(struct lustre_quota_ctxt *qctxt,
                 return NULL;
         }
 
-        INIT_LIST_HEAD(&qunit->lq_hash);
-        qunit->lq_lock = SPIN_LOCK_UNLOCKED;
-        init_waitqueue_head(&qunit->lq_waitq);
-        atomic_set(&qunit->lq_refcnt, 1);
+        CFS_INIT_LIST_HEAD(&qunit->lq_hash);
+        qunit->lq_lock = CFS_SPIN_LOCK_UNLOCKED;
+        cfs_waitq_init(&qunit->lq_waitq);
+        cfs_atomic_set(&qunit->lq_refcnt, 1);
         qunit->lq_ctxt = qctxt;
         qunit->lq_data.qd_id = oqctl->qc_id;
         qunit->lq_data.qd_flags =  oqctl->qc_type;
@@ -547,10 +547,10 @@ void* quota_barrier(struct lustre_quota_ctxt *qctxt,
         qunit->lq_opc = QUOTA_LAST_OPC;
 
         while (1) {
-                spin_lock(&qunit_hash_lock);
+                cfs_spin_lock(&qunit_hash_lock);
                 find_qunit = dqacq_in_flight(qctxt, &qunit->lq_data);
                 if (find_qunit) {
-                        spin_unlock(&qunit_hash_lock);
+                        cfs_spin_unlock(&qunit_hash_lock);
                         qunit_put(find_qunit);
                         qctxt_wait_pending_dqacq(qctxt, oqctl->qc_id,
                                                  oqctl->qc_type, isblk);
@@ -560,7 +560,7 @@ void* quota_barrier(struct lustre_quota_ctxt *qctxt,
                 break;
         }
         insert_qunit_nolock(qctxt, qunit);
-        spin_unlock(&qunit_hash_lock);
+        cfs_spin_unlock(&qunit_hash_lock);
         return qunit;
 }
 
@@ -574,11 +574,11 @@ void quota_unbarrier(void *handle)
         }
 
         LASSERT(qunit->lq_opc == QUOTA_LAST_OPC);
-        spin_lock(&qunit_hash_lock);
+        cfs_spin_lock(&qunit_hash_lock);
         remove_qunit_nolock(qunit);
-        spin_unlock(&qunit_hash_lock);
+        cfs_spin_unlock(&qunit_hash_lock);
         QUNIT_SET_STATE_AND_RC(qunit, QUNIT_FINISHED, QUOTA_REQ_RETURNED);
-        wake_up(&qunit->lq_waitq);
+        cfs_waitq_signal(&qunit->lq_waitq);
         qunit_put(qunit);
 }
 
@@ -705,12 +705,12 @@ out_mem:
         }
 out:
         /* remove the qunit from hash */
-        spin_lock(&qunit_hash_lock);
+        cfs_spin_lock(&qunit_hash_lock);
 
         qunit = dqacq_in_flight(qctxt, qdata);
         /* this qunit has been removed by qctxt_cleanup() */
         if (!qunit) {
-                spin_unlock(&qunit_hash_lock);
+                cfs_spin_unlock(&qunit_hash_lock);
                 QDATA_DEBUG(qdata, "%s is discarded because qunit isn't found\n",
                             opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
                 RETURN(err);
@@ -720,7 +720,7 @@ out:
         /* remove this qunit from lq_hash so that new processes cannot be added
          * to qunit->lq_waiters */
         remove_qunit_nolock(qunit);
-        spin_unlock(&qunit_hash_lock);
+        cfs_spin_unlock(&qunit_hash_lock);
 
         compute_lqs_after_removing_qunit(qunit);
 
@@ -728,7 +728,7 @@ out:
                 rc = QUOTA_REQ_RETURNED;
         QUNIT_SET_STATE_AND_RC(qunit, QUNIT_FINISHED, rc);
         /* wake up all waiters */
-        wake_up_all(&qunit->lq_waitq);
+        cfs_waitq_broadcast(&qunit->lq_waitq);
 
         /* this is for dqacq_in_flight() */
         qunit_put(qunit);
@@ -794,7 +794,7 @@ static int dqacq_interpret(const struct lu_env *env,
         LASSERT(req);
         LASSERT(req->rq_import);
 
-        down_read(&obt->obt_rwsem);
+        cfs_down_read(&obt->obt_rwsem);
         /* if a quota req timeouts or is dropped, we should update quota
          * statistics which will be handled in dqacq_completion. And in
          * this situation we should get qdata from request instead of
@@ -844,7 +844,7 @@ static int dqacq_interpret(const struct lu_env *env,
         rc = dqacq_completion(obd, qctxt, qdata, rc,
                               lustre_msg_get_opc(req->rq_reqmsg));
 
-        up_read(&obt->obt_rwsem);
+        cfs_up_read(&obt->obt_rwsem);
         RETURN(rc);
 }
 
@@ -856,10 +856,10 @@ int check_qm(struct lustre_quota_ctxt *qctxt)
         int rc;
         ENTRY;
 
-        spin_lock(&qctxt->lqc_lock);
+        cfs_spin_lock(&qctxt->lqc_lock);
         /* quit waiting when mds is back or qctxt is cleaned up */
         rc = qctxt->lqc_import || !qctxt->lqc_valid;
-        spin_unlock(&qctxt->lqc_lock);
+        cfs_spin_unlock(&qctxt->lqc_lock);
 
         RETURN(rc);
 }
@@ -871,9 +871,10 @@ void dqacq_interrupt(struct lustre_quota_ctxt *qctxt)
         int i;
         ENTRY;
 
-        spin_lock(&qunit_hash_lock);
+        cfs_spin_lock(&qunit_hash_lock);
         for (i = 0; i < NR_DQHASH; i++) {
-                list_for_each_entry_safe(qunit, tmp, &qunit_hash[i], lq_hash) {
+                cfs_list_for_each_entry_safe(qunit, tmp, &qunit_hash[i],
+                                             lq_hash) {
                         if (qunit->lq_ctxt != qctxt)
                                 continue;
 
@@ -882,10 +883,10 @@ void dqacq_interrupt(struct lustre_quota_ctxt *qctxt)
                          * if no others change it, then the waiters will return
                          * -EAGAIN to caller who can perform related quota
                          * acq/rel if necessary. */
-                        wake_up_all(&qunit->lq_waitq);
+                        cfs_waitq_broadcast(&qunit->lq_waitq);
                 }
         }
-        spin_unlock(&qunit_hash_lock);
+        cfs_spin_unlock(&qunit_hash_lock);
         EXIT;
 }
 
@@ -895,7 +896,7 @@ static int got_qunit(struct lustre_qunit *qunit, int is_master)
         int rc = 0;
         ENTRY;
 
-        spin_lock(&qunit->lq_lock);
+        cfs_spin_lock(&qunit->lq_lock);
         switch (qunit->lq_state) {
         case QUNIT_IN_HASH:
         case QUNIT_RM_FROM_HASH:
@@ -906,14 +907,14 @@ static int got_qunit(struct lustre_qunit *qunit, int is_master)
         default:
                 CERROR("invalid qunit state %d\n", qunit->lq_state);
         }
-        spin_unlock(&qunit->lq_lock);
+        cfs_spin_unlock(&qunit->lq_lock);
 
         if (!rc) {
-                spin_lock(&qctxt->lqc_lock);
+                cfs_spin_lock(&qctxt->lqc_lock);
                 rc = !qctxt->lqc_valid;
                 if (!is_master)
                         rc |= !qctxt->lqc_import;
-                spin_unlock(&qctxt->lqc_lock);
+                cfs_spin_unlock(&qctxt->lqc_lock);
         }
 
         RETURN(rc);
@@ -924,9 +925,9 @@ revoke_lqs_rec(struct lustre_qunit_size *lqs, struct qunit_data *qdata, int opc)
 {
         /* revoke lqs_xxx_rec which is computed in check_cur_qunit
          * b=18630 */
-        spin_lock(&lqs->lqs_lock);
+        cfs_spin_lock(&lqs->lqs_lock);
         quota_compute_lqs(qdata, lqs, 0, (opc == QUOTA_DQACQ) ? 1 : 0);
-        spin_unlock(&lqs->lqs_lock);
+        cfs_spin_unlock(&lqs->lqs_lock);
 }
 
 static int
@@ -947,7 +948,7 @@ schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
         ENTRY;
 
         LASSERT(opc == QUOTA_DQACQ || opc == QUOTA_DQREL);
-        do_gettimeofday(&work_start);
+        cfs_gettimeofday(&work_start);
 
         lqs = quota_search_lqs(LQS_KEY(QDATA_IS_GRP(qdata), qdata->qd_id),
                                qctxt, 0);
@@ -965,10 +966,10 @@ schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
 
         OBD_FAIL_TIMEOUT(OBD_FAIL_QUOTA_DELAY_SD, 5);
 
-        spin_lock(&qunit_hash_lock);
+        cfs_spin_lock(&qunit_hash_lock);
         qunit = dqacq_in_flight(qctxt, qdata);
         if (qunit) {
-                spin_unlock(&qunit_hash_lock);
+                cfs_spin_unlock(&qunit_hash_lock);
                 qunit_put(empty);
 
                 revoke_lqs_rec(lqs, qdata, opc);
@@ -979,7 +980,7 @@ schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
         qunit = empty;
         qunit_get(qunit);
         insert_qunit_nolock(qctxt, qunit);
-        spin_unlock(&qunit_hash_lock);
+        cfs_spin_unlock(&qunit_hash_lock);
 
         /* From here, the quota request will be sent anyway.
          * When this qdata request returned or is cancelled,
@@ -1001,7 +1002,7 @@ schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
                 /* this is for qunit_get() */
                 qunit_put(qunit);
 
-                do_gettimeofday(&work_end);
+                cfs_gettimeofday(&work_end);
                 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
                 if (opc == QUOTA_DQACQ)
                         lprocfs_counter_add(qctxt->lqc_stats,
@@ -1014,27 +1015,27 @@ schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
                 RETURN(rc ? rc : rc2);
         }
 
-        spin_lock(&qctxt->lqc_lock);
+        cfs_spin_lock(&qctxt->lqc_lock);
         if (!qctxt->lqc_import) {
-                spin_unlock(&qctxt->lqc_lock);
+                cfs_spin_unlock(&qctxt->lqc_lock);
                 QDATA_DEBUG(qdata, "lqc_import is invalid.\n");
 
-                spin_lock(&qunit_hash_lock);
+                cfs_spin_lock(&qunit_hash_lock);
                 remove_qunit_nolock(qunit);
-                spin_unlock(&qunit_hash_lock);
+                cfs_spin_unlock(&qunit_hash_lock);
 
                 compute_lqs_after_removing_qunit(qunit);
 
                 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_FINISHED, -EAGAIN);
-                wake_up_all(&qunit->lq_waitq);
+                cfs_waitq_broadcast(&qunit->lq_waitq);
 
                 /* this is for qunit_get() */
                 qunit_put(qunit);
                 /* this for alloc_qunit() */
                 qunit_put(qunit);
-                spin_lock(&qctxt->lqc_lock);
+                cfs_spin_lock(&qctxt->lqc_lock);
                 if (wait && !qctxt->lqc_import) {
-                        spin_unlock(&qctxt->lqc_lock);
+                        cfs_spin_unlock(&qctxt->lqc_lock);
 
                         LASSERT(oti && oti->oti_thread &&
                                 oti->oti_thread->t_watchdog);
@@ -1045,15 +1046,15 @@ schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
                                      check_qm(qctxt), &lwi);
                         CDEBUG(D_QUOTA, "wake up when quota master is back\n");
                         lc_watchdog_touch(oti->oti_thread->t_watchdog,
-                                 GET_TIMEOUT(oti->oti_thread->t_svc));
+                                 CFS_GET_TIMEOUT(oti->oti_thread->t_svc));
                 } else {
-                        spin_unlock(&qctxt->lqc_lock);
+                        cfs_spin_unlock(&qctxt->lqc_lock);
                 }
 
                 RETURN(-EAGAIN);
         }
         imp = class_import_get(qctxt->lqc_import);
-        spin_unlock(&qctxt->lqc_lock);
+        cfs_spin_unlock(&qctxt->lqc_lock);
 
         /* build dqacq/dqrel request */
         LASSERT(imp);
@@ -1104,16 +1105,16 @@ wait_completion:
                  * rc = -EBUSY, it means recovery is happening
                  * other rc < 0, it means real errors, functions who call
                  * schedule_dqacq should take care of this */
-                spin_lock(&qunit->lq_lock);
+                cfs_spin_lock(&qunit->lq_lock);
                 rc = qunit->lq_rc;
-                spin_unlock(&qunit->lq_lock);
+                cfs_spin_unlock(&qunit->lq_lock);
                 CDEBUG(D_QUOTA, "qunit(%p) finishes waiting: id(%u) flag(%u) "
                        "rc(%d) owner(%d)\n", qunit, qunit->lq_data.qd_id,
                        qunit->lq_data.qd_flags, rc, qunit->lq_owner);
         }
 
         qunit_put(qunit);
-        do_gettimeofday(&work_end);
+        cfs_gettimeofday(&work_end);
         timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
         if (opc == QUOTA_DQACQ)
                 lprocfs_counter_add(qctxt->lqc_stats,
@@ -1180,16 +1181,16 @@ qctxt_wait_pending_dqacq(struct lustre_quota_ctxt *qctxt, unsigned int id,
         int rc = 0;
         ENTRY;
 
-        do_gettimeofday(&work_start);
+        cfs_gettimeofday(&work_start);
         qdata.qd_id = id;
         qdata.qd_flags = type;
         if (isblk)
                 QDATA_SET_BLK(&qdata);
         qdata.qd_count = 0;
 
-        spin_lock(&qunit_hash_lock);
+        cfs_spin_lock(&qunit_hash_lock);
         qunit = dqacq_in_flight(qctxt, &qdata);
-        spin_unlock(&qunit_hash_lock);
+        cfs_spin_unlock(&qunit_hash_lock);
 
         if (qunit) {
                 struct qunit_data *p = &qunit->lq_data;
@@ -1200,19 +1201,19 @@ qctxt_wait_pending_dqacq(struct lustre_quota_ctxt *qctxt, unsigned int id,
                 CDEBUG(D_QUOTA, "qunit(%p) finishes waiting: rc(%d) "
                        "owner(%d)\n", qunit, qunit->lq_rc, qunit->lq_owner);
                 /* keep same as schedule_dqacq() b=17030 */
-                spin_lock(&qunit->lq_lock);
+                cfs_spin_lock(&qunit->lq_lock);
                 rc = qunit->lq_rc;
-                spin_unlock(&qunit->lq_lock);
+                cfs_spin_unlock(&qunit->lq_lock);
                 /* this is for dqacq_in_flight() */
                 qunit_put(qunit);
-                do_gettimeofday(&work_end);
+                cfs_gettimeofday(&work_end);
                 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
                 lprocfs_counter_add(qctxt->lqc_stats,
                                     isblk ? LQUOTA_WAIT_PENDING_BLK_QUOTA :
                                             LQUOTA_WAIT_PENDING_INO_QUOTA,
                                     timediff);
         } else {
-                do_gettimeofday(&work_end);
+                cfs_gettimeofday(&work_end);
                 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
                 lprocfs_counter_add(qctxt->lqc_stats,
                                     isblk ? LQUOTA_NOWAIT_PENDING_BLK_QUOTA :
@@ -1240,9 +1241,9 @@ qctxt_init(struct obd_device *obd, dqacq_handler_t handler)
 
         cfs_waitq_init(&qctxt->lqc_wait_for_qmaster);
         cfs_waitq_init(&qctxt->lqc_lqs_waitq);
-        atomic_set(&qctxt->lqc_lqs, 0);
-        spin_lock_init(&qctxt->lqc_lock);
-        spin_lock(&qctxt->lqc_lock);
+        cfs_atomic_set(&qctxt->lqc_lqs, 0);
+        cfs_spin_lock_init(&qctxt->lqc_lock);
+        cfs_spin_lock(&qctxt->lqc_lock);
         qctxt->lqc_handler = handler;
         qctxt->lqc_sb = sb;
         qctxt->lqc_obt = obt;
@@ -1263,7 +1264,7 @@ qctxt_init(struct obd_device *obd, dqacq_handler_t handler)
         qctxt->lqc_switch_seconds = 300; /* enlarging will wait 5 minutes
                                           * after the last shrinking */
         qctxt->lqc_sync_blk = 0;
-        spin_unlock(&qctxt->lqc_lock);
+        cfs_spin_unlock(&qctxt->lqc_lock);
 
         qctxt->lqc_lqs_hash = cfs_hash_create("LQS_HASH",
                                               HASH_LQS_CUR_BITS,
@@ -1288,7 +1289,7 @@ static int check_lqs(struct lustre_quota_ctxt *qctxt)
         int rc;
         ENTRY;
 
-        rc = !atomic_read(&qctxt->lqc_lqs);
+        rc = !cfs_atomic_read(&qctxt->lqc_lqs);
 
         RETURN(rc);
 }
@@ -1302,7 +1303,7 @@ void hash_put_lqs(void *obj, void *data)
 void qctxt_cleanup(struct lustre_quota_ctxt *qctxt, int force)
 {
         struct lustre_qunit *qunit, *tmp;
-        struct list_head tmp_list;
+        cfs_list_t tmp_list;
         struct l_wait_info lwi = { 0 };
         struct obd_device_target *obt = qctxt->lqc_obt;
         int i;
@@ -1310,28 +1311,29 @@ void qctxt_cleanup(struct lustre_quota_ctxt *qctxt, int force)
 
         CFS_INIT_LIST_HEAD(&tmp_list);
 
-        spin_lock(&qctxt->lqc_lock);
+        cfs_spin_lock(&qctxt->lqc_lock);
         qctxt->lqc_valid = 0;
-        spin_unlock(&qctxt->lqc_lock);
+        cfs_spin_unlock(&qctxt->lqc_lock);
 
-        spin_lock(&qunit_hash_lock);
+        cfs_spin_lock(&qunit_hash_lock);
         for (i = 0; i < NR_DQHASH; i++) {
-                list_for_each_entry_safe(qunit, tmp, &qunit_hash[i], lq_hash) {
+                cfs_list_for_each_entry_safe(qunit, tmp, &qunit_hash[i],
+                                             lq_hash) {
                         if (qunit->lq_ctxt != qctxt)
                                 continue;
                         remove_qunit_nolock(qunit);
-                        list_add(&qunit->lq_hash, &tmp_list);
+                        cfs_list_add(&qunit->lq_hash, &tmp_list);
                 }
         }
-        spin_unlock(&qunit_hash_lock);
+        cfs_spin_unlock(&qunit_hash_lock);
 
-        list_for_each_entry_safe(qunit, tmp, &tmp_list, lq_hash) {
-                list_del_init(&qunit->lq_hash);
+        cfs_list_for_each_entry_safe(qunit, tmp, &tmp_list, lq_hash) {
+                cfs_list_del_init(&qunit->lq_hash);
                 compute_lqs_after_removing_qunit(qunit);
 
                 /* wake up all waiters */
                 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_FINISHED, 0);
-                wake_up_all(&qunit->lq_waitq);
+                cfs_waitq_broadcast(&qunit->lq_waitq);
                 qunit_put(qunit);
         }
 
@@ -1339,16 +1341,16 @@ void qctxt_cleanup(struct lustre_quota_ctxt *qctxt, int force)
          * unpredicted. So we must wait until lqc_wait_for_qmaster is empty */
         while (cfs_waitq_active(&qctxt->lqc_wait_for_qmaster)) {
                 cfs_waitq_signal(&qctxt->lqc_wait_for_qmaster);
-                cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE,
-                                     cfs_time_seconds(1));
+                cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
+                                                   cfs_time_seconds(1));
         }
 
         cfs_hash_for_each_safe(qctxt->lqc_lqs_hash, hash_put_lqs, NULL);
         l_wait_event(qctxt->lqc_lqs_waitq, check_lqs(qctxt), &lwi);
-        down_write(&obt->obt_rwsem);
+        cfs_down_write(&obt->obt_rwsem);
         cfs_hash_destroy(qctxt->lqc_lqs_hash);
         qctxt->lqc_lqs_hash = NULL;
-        up_write(&obt->obt_rwsem);
+        cfs_up_write(&obt->obt_rwsem);
 
         ptlrpcd_decref();
 
@@ -1363,7 +1365,7 @@ void qctxt_cleanup(struct lustre_quota_ctxt *qctxt, int force)
 struct qslave_recov_thread_data {
         struct obd_device *obd;
         struct lustre_quota_ctxt *qctxt;
-        struct completion comp;
+        cfs_completion_t comp;
 };
 
 /* FIXME only recovery block quota by now */
@@ -1381,22 +1383,22 @@ static int qslave_recovery_main(void *arg)
         /* for obdfilter */
         class_incref(obd, "qslave_recovd_filter", obd);
 
-        complete(&data->comp);
+        cfs_complete(&data->comp);
 
-        spin_lock(&qctxt->lqc_lock);
+        cfs_spin_lock(&qctxt->lqc_lock);
         if (qctxt->lqc_recovery) {
-                spin_unlock(&qctxt->lqc_lock);
+                cfs_spin_unlock(&qctxt->lqc_lock);
                 class_decref(obd, "qslave_recovd_filter", obd);
                 RETURN(0);
         } else {
                 qctxt->lqc_recovery = 1;
-                spin_unlock(&qctxt->lqc_lock);
+                cfs_spin_unlock(&qctxt->lqc_lock);
         }
 
         for (type = USRQUOTA; type < MAXQUOTAS; type++) {
                 struct qunit_data qdata;
                 struct quota_info *dqopt = sb_dqopt(qctxt->lqc_sb);
-                struct list_head id_list;
+                cfs_list_t id_list;
                 struct dquot_id *dqid, *tmp;
                 int ret;
 
@@ -1417,8 +1419,8 @@ static int qslave_recovery_main(void *arg)
                 if (rc)
                         CERROR("Get ids from quota file failed. (rc:%d)\n", rc);
 
-                list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
-                        list_del_init(&dqid->di_link);
+                cfs_list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
+                        cfs_list_del_init(&dqid->di_link);
                         /* skip slave recovery on itself */
                         if (is_master(qctxt))
                                 goto free;
@@ -1450,9 +1452,9 @@ free:
                 }
         }
 
-        spin_lock(&qctxt->lqc_lock);
+        cfs_spin_lock(&qctxt->lqc_lock);
         qctxt->lqc_recovery = 0;
-        spin_unlock(&qctxt->lqc_lock);
+        cfs_spin_unlock(&qctxt->lqc_lock);
         class_decref(obd, "qslave_recovd_filter", obd);
         RETURN(rc);
 }
@@ -1469,14 +1471,15 @@ qslave_start_recovery(struct obd_device *obd, struct lustre_quota_ctxt *qctxt)
 
         data.obd = obd;
         data.qctxt = qctxt;
-        init_completion(&data.comp);
+        cfs_init_completion(&data.comp);
 
-        rc = kernel_thread(qslave_recovery_main, &data, CLONE_VM|CLONE_FILES);
+        rc = cfs_kernel_thread(qslave_recovery_main, &data,
+                               CLONE_VM|CLONE_FILES);
         if (rc < 0) {
                 CERROR("Cannot start quota recovery thread: rc %d\n", rc);
                 goto exit;
         }
-        wait_for_completion(&data.comp);
+        cfs_wait_for_completion(&data.comp);
 exit:
         EXIT;
 }
@@ -1518,11 +1521,11 @@ void build_lqs(struct obd_device *obd)
 {
         struct obd_device_target *obt = &obd->u.obt;
         struct lustre_quota_ctxt *qctxt = &obt->obt_qctxt;
-        struct list_head id_list;
+        cfs_list_t id_list;
         int i, rc;
 
         LASSERT_SEM_LOCKED(&obt->obt_quotachecking);
-        INIT_LIST_HEAD(&id_list);
+        CFS_INIT_LIST_HEAD(&id_list);
         for (i = 0; i < MAXQUOTAS; i++) {
                 struct dquot_id *dqid, *tmp;
 
@@ -1542,11 +1545,11 @@ void build_lqs(struct obd_device *obd)
                         continue;
                 }
 
-                list_for_each_entry_safe(dqid, tmp, &id_list,
-                                         di_link) {
+                cfs_list_for_each_entry_safe(dqid, tmp, &id_list,
+                                             di_link) {
                         struct lustre_qunit_size *lqs;
 
-                        list_del_init(&dqid->di_link);
+                        cfs_list_del_init(&dqid->di_link);
                         lqs = quota_search_lqs(LQS_KEY(i, dqid->di_id),
                                                qctxt, 1);
                         if (lqs && !IS_ERR(lqs)) {
@@ -1585,27 +1588,27 @@ lqs_hash(cfs_hash_t *hs, void *key, unsigned mask)
 }
 
 static int
-lqs_compare(void *key, struct hlist_node *hnode)
+lqs_compare(void *key, cfs_hlist_node_t *hnode)
 {
         struct lustre_qunit_size *q;
         int rc;
         ENTRY;
 
         LASSERT(key);
-        q = hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
+        q = cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
 
-        spin_lock(&q->lqs_lock);
+        cfs_spin_lock(&q->lqs_lock);
         rc = (q->lqs_key == *((unsigned long long *)key));
-        spin_unlock(&q->lqs_lock);
+        cfs_spin_unlock(&q->lqs_lock);
 
         RETURN(rc);
 }
 
 static void *
-lqs_get(struct hlist_node *hnode)
+lqs_get(cfs_hlist_node_t *hnode)
 {
         struct lustre_qunit_size *q =
-                hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
+                cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
         ENTRY;
 
         __lqs_getref(q);
@@ -1614,10 +1617,10 @@ lqs_get(struct hlist_node *hnode)
 }
 
 static void *
-lqs_put(struct hlist_node *hnode)
+lqs_put(cfs_hlist_node_t *hnode)
 {
         struct lustre_qunit_size *q =
-                hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
+                cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
         ENTRY;
 
         __lqs_putref(q);
@@ -1626,10 +1629,10 @@ lqs_put(struct hlist_node *hnode)
 }
 
 static void
-lqs_exit(struct hlist_node *hnode)
+lqs_exit(cfs_hlist_node_t *hnode)
 {
         struct lustre_qunit_size *q =
-                hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
+                cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
         ENTRY;
 
         /*
@@ -1637,9 +1640,9 @@ lqs_exit(struct hlist_node *hnode)
          * lqs also was deleted from table by this time
          * so we should have 0 refs.
          */
-        LASSERTF(atomic_read(&q->lqs_refcount) == 0,
+        LASSERTF(cfs_atomic_read(&q->lqs_refcount) == 0,
                  "Busy lqs %p with %d refs\n", q,
-                 atomic_read(&q->lqs_refcount));
+                 cfs_atomic_read(&q->lqs_refcount));
         OBD_FREE_PTR(q);
         EXIT;
 }
index a622e47..3650a67 100644 (file)
@@ -76,7 +76,7 @@ int mds_quota_ctl(struct obd_device *obd, struct obd_export *unused,
         int rc = 0;
         ENTRY;
 
-        do_gettimeofday(&work_start);
+        cfs_gettimeofday(&work_start);
         switch (oqctl->qc_cmd) {
         case Q_QUOTAON:
                 oqctl->qc_id = obt->obt_qfmt; /* override qfmt version */
@@ -119,7 +119,7 @@ int mds_quota_ctl(struct obd_device *obd, struct obd_export *unused,
                 CDEBUG(D_INFO, "mds_quotactl admin quota command %d, id %u, "
                                "type %d, failed: rc = %d\n",
                        oqctl->qc_cmd, oqctl->qc_id, oqctl->qc_type, rc);
-        do_gettimeofday(&work_end);
+        cfs_gettimeofday(&work_end);
         timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
         lprocfs_counter_add(qctxt->lqc_stats, LQUOTA_QUOTA_CTL, timediff);
 
@@ -141,16 +141,16 @@ int filter_quota_ctl(struct obd_device *unused, struct obd_export *exp,
         int rc = 0;
         ENTRY;
 
-        do_gettimeofday(&work_start);
+        cfs_gettimeofday(&work_start);
         switch (oqctl->qc_cmd) {
         case Q_FINVALIDATE:
         case Q_QUOTAON:
         case Q_QUOTAOFF:
-                down(&obt->obt_quotachecking);
+                cfs_down(&obt->obt_quotachecking);
                 if (oqctl->qc_cmd == Q_FINVALIDATE &&
                     (obt->obt_qctxt.lqc_flags & UGQUOTA2LQC(oqctl->qc_type))) {
                         CWARN("quota[%u] is on yet\n", oqctl->qc_type);
-                        up(&obt->obt_quotachecking);
+                        cfs_up(&obt->obt_quotachecking);
                         rc = -EBUSY;
                         break;
                 }
@@ -192,7 +192,7 @@ int filter_quota_ctl(struct obd_device *unused, struct obd_export *exp,
                                 else if (quota_is_off(qctxt, oqctl))
                                                 rc = -EALREADY;
                         }
-                        up(&obt->obt_quotachecking);
+                        cfs_up(&obt->obt_quotachecking);
                 }
 
                 break;
@@ -287,7 +287,7 @@ adjust:
                        obd->obd_name, oqctl->qc_cmd);
                 RETURN(-EFAULT);
         }
-        do_gettimeofday(&work_end);
+        cfs_gettimeofday(&work_end);
         timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
         lprocfs_counter_add(qctxt->lqc_stats, LQUOTA_QUOTA_CTL, timediff);
 
index 9555820..168f6df 100644 (file)
@@ -68,7 +68,7 @@
 #ifdef HAVE_QUOTA_SUPPORT
 
 static cfs_time_t last_print = 0;
-static spinlock_t last_print_lock = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t last_print_lock = CFS_SPIN_LOCK_UNLOCKED;
 
 static int filter_quota_setup(struct obd_device *obd)
 {
@@ -76,9 +76,9 @@ static int filter_quota_setup(struct obd_device *obd)
         struct obd_device_target *obt = &obd->u.obt;
         ENTRY;
 
-        init_rwsem(&obt->obt_rwsem);
+        cfs_init_rwsem(&obt->obt_rwsem);
         obt->obt_qfmt = LUSTRE_QUOTA_V2;
-        sema_init(&obt->obt_quotachecking, 1);
+        cfs_sema_init(&obt->obt_quotachecking, 1);
         rc = qctxt_init(obd, NULL);
         if (rc)
                 CERROR("initialize quota context failed! (rc:%d)\n", rc);
@@ -103,9 +103,9 @@ static int filter_quota_setinfo(struct obd_device *obd, void *data)
         LASSERT(imp != NULL);
 
         /* setup the quota context import */
-        spin_lock(&qctxt->lqc_lock);
+        cfs_spin_lock(&qctxt->lqc_lock);
         if (qctxt->lqc_import != NULL) {
-                spin_unlock(&qctxt->lqc_lock);
+                cfs_spin_unlock(&qctxt->lqc_lock);
                 if (qctxt->lqc_import == imp)
                         CDEBUG(D_WARNING, "%s: lqc_import(%p) of obd(%p) was "
                                "activated already.\n", obd->obd_name, imp, obd);
@@ -120,7 +120,7 @@ static int filter_quota_setinfo(struct obd_device *obd, void *data)
                 imp->imp_connect_data.ocd_connect_flags |=
                                 (exp->exp_connect_flags &
                                  (OBD_CONNECT_QUOTA64 | OBD_CONNECT_CHANGE_QS));
-                spin_unlock(&qctxt->lqc_lock);
+                cfs_spin_unlock(&qctxt->lqc_lock);
                 CDEBUG(D_QUOTA, "%s: lqc_import(%p) of obd(%p) is reactivated "
                        "now.\n", obd->obd_name, imp, obd);
 
@@ -146,16 +146,16 @@ static int filter_quota_clearinfo(struct obd_export *exp, struct obd_device *obd
 
         /* when exp->exp_imp_reverse is destroyed, the corresponding lqc_import
          * should be invalid b=12374 */
-        spin_lock(&qctxt->lqc_lock);
+        cfs_spin_lock(&qctxt->lqc_lock);
         if (qctxt->lqc_import == imp) {
                 qctxt->lqc_import = NULL;
-                spin_unlock(&qctxt->lqc_lock);
+                cfs_spin_unlock(&qctxt->lqc_lock);
                 CDEBUG(D_QUOTA, "%s: lqc_import(%p) of obd(%p) is invalid now.\n",
                        obd->obd_name, imp, obd);
                 ptlrpc_cleanup_imp(imp);
                 dqacq_interrupt(qctxt);
         } else {
-                spin_unlock(&qctxt->lqc_lock);
+                cfs_spin_unlock(&qctxt->lqc_lock);
         }
         RETURN(0);
 }
@@ -212,11 +212,11 @@ static int filter_quota_getflag(struct obd_device *obd, struct obdo *oa)
                                        rc);
                         break;
                 } else {
-                        spin_lock(&lqs->lqs_lock);
+                        cfs_spin_lock(&lqs->lqs_lock);
                         if (lqs->lqs_bunit_sz <= qctxt->lqc_sync_blk) {
                                 oa->o_flags |= (cnt == USRQUOTA) ?
                                         OBD_FL_NO_USRQUOTA : OBD_FL_NO_GRPQUOTA;
-                                spin_unlock(&lqs->lqs_lock);
+                                cfs_spin_unlock(&lqs->lqs_lock);
                                 CDEBUG(D_QUOTA, "set sync flag: bunit(%lu), "
                                        "sync_blk(%d)\n", lqs->lqs_bunit_sz,
                                        qctxt->lqc_sync_blk);
@@ -224,7 +224,7 @@ static int filter_quota_getflag(struct obd_device *obd, struct obdo *oa)
                                 lqs_putref(lqs);
                                 continue;
                         }
-                        spin_unlock(&lqs->lqs_lock);
+                        cfs_spin_unlock(&lqs->lqs_lock);
                         /* this is for quota_search_lqs */
                         lqs_putref(lqs);
                 }
@@ -276,12 +276,12 @@ static int quota_check_common(struct obd_device *obd, const unsigned int id[],
         int rc = 0, rc2[2] = { 0, 0 };
         ENTRY;
 
-        spin_lock(&qctxt->lqc_lock);
+        cfs_spin_lock(&qctxt->lqc_lock);
         if (!qctxt->lqc_valid){
-                spin_unlock(&qctxt->lqc_lock);
+                cfs_spin_unlock(&qctxt->lqc_lock);
                 RETURN(rc);
         }
-        spin_unlock(&qctxt->lqc_lock);
+        cfs_spin_unlock(&qctxt->lqc_lock);
 
         for (i = 0; i < MAXQUOTAS; i++) {
                 struct lustre_qunit_size *lqs = NULL;
@@ -309,7 +309,7 @@ static int quota_check_common(struct obd_device *obd, const unsigned int id[],
                 }
 
                 rc2[i] = compute_remquota(obd, qctxt, &qdata[i], isblk);
-                spin_lock(&lqs->lqs_lock);
+                cfs_spin_lock(&lqs->lqs_lock);
                 if (!cycle) {
                         if (isblk) {
                                 pending[i] = count * CFS_PAGE_SIZE;
@@ -319,7 +319,8 @@ static int quota_check_common(struct obd_device *obd, const unsigned int id[],
                                 if (inode) {
                                         mb = pending[i];
                                         rc = fsfilt_get_mblk(obd, qctxt->lqc_sb,
-                                                             &mb, inode,frags);
+                                                             &mb, inode,
+                                                             frags);
                                         if (rc)
                                                 CERROR("%s: can't get extra "
                                                        "meta blocks\n",
@@ -364,7 +365,7 @@ static int quota_check_common(struct obd_device *obd, const unsigned int id[],
                                 rc2[i] = QUOTA_RET_ACQUOTA;
                 }
 
-                spin_unlock(&lqs->lqs_lock);
+                cfs_spin_unlock(&lqs->lqs_lock);
 
                 if (lqs->lqs_blk_rec  < 0 &&
                     qdata[i].qd_count <
@@ -437,14 +438,14 @@ static int quota_chk_acq_common(struct obd_device *obd, struct obd_export *exp,
          * pre-dqacq in time and quota hash on ost is used up, we
          * have to wait for the completion of in flight dqacq/dqrel,
          * in order to get enough quota for write b=12588 */
-        do_gettimeofday(&work_start);
+        cfs_gettimeofday(&work_start);
         while ((rc = quota_check_common(obd, id, pending, count, cycle, isblk,
                                         inode, frags)) &
                QUOTA_RET_ACQUOTA) {
 
-                spin_lock(&qctxt->lqc_lock);
+                cfs_spin_lock(&qctxt->lqc_lock);
                 if (!qctxt->lqc_import && oti) {
-                        spin_unlock(&qctxt->lqc_lock);
+                        cfs_spin_unlock(&qctxt->lqc_lock);
 
                         LASSERT(oti && oti->oti_thread &&
                                 oti->oti_thread->t_watchdog);
@@ -455,9 +456,9 @@ static int quota_chk_acq_common(struct obd_device *obd, struct obd_export *exp,
                                      &lwi);
                         CDEBUG(D_QUOTA, "wake up when quota master is back\n");
                         lc_watchdog_touch(oti->oti_thread->t_watchdog,
-                                 GET_TIMEOUT(oti->oti_thread->t_svc));
+                                 CFS_GET_TIMEOUT(oti->oti_thread->t_svc));
                 } else {
-                        spin_unlock(&qctxt->lqc_lock);
+                        cfs_spin_unlock(&qctxt->lqc_lock);
                 }
 
                 cycle++;
@@ -499,36 +500,36 @@ static int quota_chk_acq_common(struct obd_device *obd, struct obd_export *exp,
 
                         if (oti && oti->oti_thread && oti->oti_thread->t_watchdog)
                                 lc_watchdog_touch(oti->oti_thread->t_watchdog,
-                                         GET_TIMEOUT(oti->oti_thread->t_svc));
+                                       CFS_GET_TIMEOUT(oti->oti_thread->t_svc));
                         CDEBUG(D_QUOTA, "rc: %d, count_err: %d\n", rc,
                                count_err++);
 
-                        init_waitqueue_head(&waitq);
+                        cfs_waitq_init(&waitq);
                         lwi = LWI_TIMEOUT(cfs_time_seconds(min(cycle, 10)), NULL,
                                           NULL);
                         l_wait_event(waitq, 0, &lwi);
                 }
 
                 if (rc < 0 || cycle % 10 == 0) {
-                        spin_lock(&last_print_lock);
+                        cfs_spin_lock(&last_print_lock);
                         if (last_print == 0 ||
                             cfs_time_before((last_print + cfs_time_seconds(30)),
                                             cfs_time_current())) {
                                 last_print = cfs_time_current();
-                                spin_unlock(&last_print_lock);
+                                cfs_spin_unlock(&last_print_lock);
                                 CWARN("still haven't managed to acquire quota "
                                       "space from the quota master after %d "
                                       "retries (err=%d, rc=%d)\n",
                                       cycle, count_err - 1, rc);
                         } else {
-                                spin_unlock(&last_print_lock);
+                                cfs_spin_unlock(&last_print_lock);
                         }
                 }
 
                 CDEBUG(D_QUOTA, "recheck quota with rc: %d, cycle: %d\n", rc,
                        cycle);
         }
-        do_gettimeofday(&work_end);
+        cfs_gettimeofday(&work_end);
         timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
         lprocfs_counter_add(qctxt->lqc_stats,
                             isblk ? LQUOTA_WAIT_FOR_CHK_BLK :
@@ -560,7 +561,7 @@ static int quota_pending_commit(struct obd_device *obd, const unsigned int id[],
         if (!ll_sb_any_quota_active(qctxt->lqc_sb))
                 RETURN(0);
 
-        do_gettimeofday(&work_start);
+        cfs_gettimeofday(&work_start);
         for (i = 0; i < MAXQUOTAS; i++) {
                 struct lustre_qunit_size *lqs = NULL;
 
@@ -587,7 +588,7 @@ static int quota_pending_commit(struct obd_device *obd, const unsigned int id[],
                         continue;
                 }
 
-                spin_lock(&lqs->lqs_lock);
+                cfs_spin_lock(&lqs->lqs_lock);
                 if (isblk) {
                         LASSERTF(lqs->lqs_bwrite_pending >= pending[i],
                                  "there are too many blocks! [id %u] [%c] "
@@ -609,14 +610,14 @@ static int quota_pending_commit(struct obd_device *obd, const unsigned int id[],
                        obd->obd_name,
                        isblk ? lqs->lqs_bwrite_pending : lqs->lqs_iwrite_pending,
                        i, pending[i], isblk);
-                spin_unlock(&lqs->lqs_lock);
+                cfs_spin_unlock(&lqs->lqs_lock);
 
                 /* for quota_search_lqs in pending_commit */
                 lqs_putref(lqs);
                 /* for quota_search_lqs in quota_check */
                 lqs_putref(lqs);
         }
-        do_gettimeofday(&work_end);
+        cfs_gettimeofday(&work_end);
         timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
         lprocfs_counter_add(qctxt->lqc_stats,
                             isblk ? LQUOTA_WAIT_FOR_COMMIT_BLK :
@@ -649,12 +650,12 @@ static int mds_quota_setup(struct obd_device *obd)
                 RETURN(0);
         }
 
-        init_rwsem(&obt->obt_rwsem);
+        cfs_init_rwsem(&obt->obt_rwsem);
         obt->obt_qfmt = LUSTRE_QUOTA_V2;
         mds->mds_quota_info.qi_version = LUSTRE_QUOTA_V2;
-        sema_init(&obt->obt_quotachecking, 1);
+        cfs_sema_init(&obt->obt_quotachecking, 1);
         /* initialize quota master and quota context */
-        sema_init(&mds->mds_qonoff_sem, 1);
+        cfs_sema_init(&mds->mds_qonoff_sem, 1);
         rc = qctxt_init(obd, dqacq_handler);
         if (rc) {
                 CERROR("%s: initialize quota context failed! (rc:%d)\n",
@@ -703,9 +704,9 @@ static int mds_quota_fs_cleanup(struct obd_device *obd)
         memset(&oqctl, 0, sizeof(oqctl));
         oqctl.qc_type = UGQUOTA;
 
-        down(&mds->mds_qonoff_sem);
+        cfs_down(&mds->mds_qonoff_sem);
         mds_admin_quota_off(obd, &oqctl);
-        up(&mds->mds_qonoff_sem);
+        cfs_up(&mds->mds_qonoff_sem);
         RETURN(0);
 }
 
@@ -724,15 +725,15 @@ static int quota_acquire_common(struct obd_device *obd, const unsigned int id[],
 #endif /* __KERNEL__ */
 
 struct osc_quota_info {
-        struct list_head        oqi_hash;       /* hash list */
+        cfs_list_t              oqi_hash;       /* hash list */
         struct client_obd      *oqi_cli;        /* osc obd */
         unsigned int            oqi_id;         /* uid/gid of a file */
         short                   oqi_type;       /* quota type */
 };
 
-spinlock_t qinfo_list_lock = SPIN_LOCK_UNLOCKED;
+cfs_spinlock_t qinfo_list_lock = CFS_SPIN_LOCK_UNLOCKED;
 
-static struct list_head qinfo_hash[NR_DQHASH];
+static cfs_list_t qinfo_hash[NR_DQHASH];
 /* SLAB cache for client quota context */
 cfs_mem_cache_t *qinfo_cachep = NULL;
 
@@ -749,18 +750,18 @@ static inline int hashfn(struct client_obd *cli, unsigned long id, int type)
 /* caller must hold qinfo_list_lock */
 static inline void insert_qinfo_hash(struct osc_quota_info *oqi)
 {
-        struct list_head *head = qinfo_hash +
+        cfs_list_t *head = qinfo_hash +
                 hashfn(oqi->oqi_cli, oqi->oqi_id, oqi->oqi_type);
 
         LASSERT_SPIN_LOCKED(&qinfo_list_lock);
-        list_add(&oqi->oqi_hash, head);
+        cfs_list_add(&oqi->oqi_hash, head);
 }
 
 /* caller must hold qinfo_list_lock */
 static inline void remove_qinfo_hash(struct osc_quota_info *oqi)
 {
         LASSERT_SPIN_LOCKED(&qinfo_list_lock);
-        list_del_init(&oqi->oqi_hash);
+        cfs_list_del_init(&oqi->oqi_hash);
 }
 
 /* caller must hold qinfo_list_lock */
@@ -772,7 +773,7 @@ static inline struct osc_quota_info *find_qinfo(struct client_obd *cli,
         ENTRY;
 
         LASSERT_SPIN_LOCKED(&qinfo_list_lock);
-        list_for_each_entry(oqi, &qinfo_hash[hashent], oqi_hash) {
+        cfs_list_for_each_entry(oqi, &qinfo_hash[hashent], oqi_hash) {
                 if (oqi->oqi_cli == cli &&
                     oqi->oqi_id == id && oqi->oqi_type == type)
                         return oqi;
@@ -809,7 +810,7 @@ int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[])
         int cnt, rc = QUOTA_OK;
         ENTRY;
 
-        spin_lock(&qinfo_list_lock);
+        cfs_spin_lock(&qinfo_list_lock);
         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
                 struct osc_quota_info *oqi = NULL;
 
@@ -820,7 +821,7 @@ int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[])
                         break;
                 }
         }
-        spin_unlock(&qinfo_list_lock);
+        cfs_spin_unlock(&qinfo_list_lock);
 
         if (rc == NO_QUOTA)
                 CDEBUG(D_QUOTA, "chkdq found noquota for %s %d\n",
@@ -856,13 +857,13 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
                         break;
                 }
 
-                spin_lock(&qinfo_list_lock);
+                cfs_spin_lock(&qinfo_list_lock);
                 old = find_qinfo(cli, id, cnt);
                 if (old && !noquota)
                         remove_qinfo_hash(old);
                 else if (!old && noquota)
                         insert_qinfo_hash(oqi);
-                spin_unlock(&qinfo_list_lock);
+                cfs_spin_unlock(&qinfo_list_lock);
 
                 if (old || !noquota)
                         free_qinfo(oqi);
@@ -887,16 +888,16 @@ int osc_quota_cleanup(struct obd_device *obd)
         int i;
         ENTRY;
 
-        spin_lock(&qinfo_list_lock);
+        cfs_spin_lock(&qinfo_list_lock);
         for (i = 0; i < NR_DQHASH; i++) {
-                list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
+                cfs_list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
                         if (oqi->oqi_cli != cli)
                                 continue;
                         remove_qinfo_hash(oqi);
                         free_qinfo(oqi);
                 }
         }
-        spin_unlock(&qinfo_list_lock);
+        cfs_spin_unlock(&qinfo_list_lock);
 
         RETURN(0);
 }
@@ -925,14 +926,14 @@ int osc_quota_exit(void)
         int i, rc;
         ENTRY;
 
-        spin_lock(&qinfo_list_lock);
+        cfs_spin_lock(&qinfo_list_lock);
         for (i = 0; i < NR_DQHASH; i++) {
-                list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
+                cfs_list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
                         remove_qinfo_hash(oqi);
                         free_qinfo(oqi);
                 }
         }
-        spin_unlock(&qinfo_list_lock);
+        cfs_spin_unlock(&qinfo_list_lock);
 
         rc = cfs_mem_cache_destroy(qinfo_cachep);
         LASSERTF(rc == 0, "couldn't destory qinfo_cachep slab\n");
index e3c88e6..4a046e5 100644 (file)
@@ -94,7 +94,7 @@
                lqs->lqs_bunit_sz, lqs->lqs_btune_sz, lqs->lqs_iunit_sz,       \
                lqs->lqs_itune_sz, lqs->lqs_bwrite_pending,                    \
                lqs->lqs_iwrite_pending, lqs->lqs_ino_rec,                     \
-               lqs->lqs_blk_rec, atomic_read(&lqs->lqs_refcount), ## arg);
+               lqs->lqs_blk_rec, cfs_atomic_read(&lqs->lqs_refcount), ## arg);
 
 
 /* quota_context.c */
@@ -219,8 +219,8 @@ static inline int client_quota_recoverable_error(int rc)
 
 static inline int client_quota_should_resend(int resend, struct client_obd *cli)
 {
-        return (atomic_read(&cli->cl_quota_resends) >= 0) ?
-                atomic_read(&cli->cl_quota_resends) > resend : 1;
+        return (cfs_atomic_read(&cli->cl_quota_resends) >= 0) ?
+                cfs_atomic_read(&cli->cl_quota_resends) > resend : 1;
 }
 
 #endif
index f0ccb99..180e4cd 100644 (file)
@@ -65,8 +65,8 @@
 #ifdef HAVE_QUOTA_SUPPORT
 
 /* lock ordering: mds->mds_qonoff_sem > dquot->dq_sem */
-static struct list_head lustre_dquot_hash[NR_DQHASH];
-static spinlock_t dquot_hash_lock = SPIN_LOCK_UNLOCKED;
+static cfs_list_t lustre_dquot_hash[NR_DQHASH];
+static cfs_spinlock_t dquot_hash_lock = CFS_SPIN_LOCK_UNLOCKED;
 
 cfs_mem_cache_t *lustre_dquot_cachep;
 
@@ -95,7 +95,7 @@ void lustre_dquot_exit(void)
         /* FIXME cleanup work ?? */
 
         for (i = 0; i < NR_DQHASH; i++) {
-                LASSERT(list_empty(lustre_dquot_hash + i));
+                LASSERT(cfs_list_empty(lustre_dquot_hash + i));
         }
         if (lustre_dquot_cachep) {
                 int rc;
@@ -127,7 +127,7 @@ static struct lustre_dquot *find_dquot(int hashent,
         ENTRY;
 
         LASSERT_SPIN_LOCKED(&dquot_hash_lock);
-        list_for_each_entry(dquot, &lustre_dquot_hash[hashent], dq_hash) {
+        cfs_list_for_each_entry(dquot, &lustre_dquot_hash[hashent], dq_hash) {
                 if (dquot->dq_info == lqi &&
                     dquot->dq_id == id && dquot->dq_type == type)
                         RETURN(dquot);
@@ -146,7 +146,7 @@ static struct lustre_dquot *alloc_dquot(struct lustre_quota_info *lqi,
                 RETURN(NULL);
 
         CFS_INIT_LIST_HEAD(&dquot->dq_hash);
-        init_mutex_locked(&dquot->dq_sem);
+        cfs_init_mutex_locked(&dquot->dq_sem);
         dquot->dq_refcnt = 1;
         dquot->dq_info = lqi;
         dquot->dq_id = id;
@@ -163,29 +163,29 @@ static void free_dquot(struct lustre_dquot *dquot)
 
 static void insert_dquot_nolock(struct lustre_dquot *dquot)
 {
-        struct list_head *head = lustre_dquot_hash +
+        cfs_list_t *head = lustre_dquot_hash +
             dquot_hashfn(dquot->dq_info, dquot->dq_id, dquot->dq_type);
-        LASSERT(list_empty(&dquot->dq_hash));
-        list_add(&dquot->dq_hash, head);
+        LASSERT(cfs_list_empty(&dquot->dq_hash));
+        cfs_list_add(&dquot->dq_hash, head);
 }
 
 static void remove_dquot_nolock(struct lustre_dquot *dquot)
 {
-        LASSERT(!list_empty(&dquot->dq_hash));
-        list_del_init(&dquot->dq_hash);
+        LASSERT(!cfs_list_empty(&dquot->dq_hash));
+        cfs_list_del_init(&dquot->dq_hash);
 }
 
 static void lustre_dqput(struct lustre_dquot *dquot)
 {
         ENTRY;
-        spin_lock(&dquot_hash_lock);
+        cfs_spin_lock(&dquot_hash_lock);
         LASSERT(dquot->dq_refcnt);
         dquot->dq_refcnt--;
         if (!dquot->dq_refcnt) {
                 remove_dquot_nolock(dquot);
                 free_dquot(dquot);
         }
-        spin_unlock(&dquot_hash_lock);
+        cfs_spin_unlock(&dquot_hash_lock);
         EXIT;
 }
 
@@ -200,20 +200,20 @@ static struct lustre_dquot *lustre_dqget(struct obd_device *obd,
         if ((empty = alloc_dquot(lqi, id, type)) == NULL)
                 RETURN(ERR_PTR(-ENOMEM));
 
-        spin_lock(&dquot_hash_lock);
+        cfs_spin_lock(&dquot_hash_lock);
         if ((dquot = find_dquot(hashent, lqi, id, type)) != NULL) {
                 dquot->dq_refcnt++;
-                spin_unlock(&dquot_hash_lock);
+                cfs_spin_unlock(&dquot_hash_lock);
                 free_dquot(empty);
         } else {
                 int rc;
 
                 dquot = empty;
                 insert_dquot_nolock(dquot);
-                spin_unlock(&dquot_hash_lock);
+                cfs_spin_unlock(&dquot_hash_lock);
 
                 rc = fsfilt_dquot(obd, dquot, QFILE_RD_DQUOT);
-                up(&dquot->dq_sem);
+                cfs_up(&dquot->dq_sem);
                 if (rc) {
                         CERROR("can't read dquot from admin quotafile! "
                                "(rc:%d)\n", rc);
@@ -237,11 +237,11 @@ static void init_oqaq(struct quota_adjust_qunit *oqaq,
         oqaq->qaq_flags = type;
         lqs = quota_search_lqs(LQS_KEY(type, id), qctxt, 0);
         if (lqs && !IS_ERR(lqs)) {
-                spin_lock(&lqs->lqs_lock);
+                cfs_spin_lock(&lqs->lqs_lock);
                 oqaq->qaq_bunit_sz = lqs->lqs_bunit_sz;
                 oqaq->qaq_iunit_sz = lqs->lqs_iunit_sz;
                 oqaq->qaq_flags    = lqs->lqs_flags;
-                spin_unlock(&lqs->lqs_lock);
+                cfs_spin_unlock(&lqs->lqs_lock);
                 lqs_putref(lqs);
         } else {
                 CDEBUG(D_QUOTA, "Can't find the lustre qunit size!\n");
@@ -273,7 +273,7 @@ int dqacq_adjust_qunit_sz(struct obd_device *obd, qid_t id, int type,
         if (!oqaq)
                 GOTO(out, rc = -ENOMEM);
 
-        down(&dquot->dq_sem);
+        cfs_down(&dquot->dq_sem);
         init_oqaq(oqaq, qctxt, id, type);
 
         rc = dquot_create_oqaq(qctxt, dquot, ost_num, mdt_num,
@@ -307,7 +307,7 @@ int dqacq_adjust_qunit_sz(struct obd_device *obd, qid_t id, int type,
         else
                 qid[USRQUOTA] = dquot->dq_id;
 
-        up(&dquot->dq_sem);
+        cfs_up(&dquot->dq_sem);
 
         rc = qctxt_adjust_qunit(obd, qctxt, qid, is_blk, 0, NULL);
         if (rc == -EDQUOT || rc == -EBUSY) {
@@ -331,7 +331,7 @@ out:
 
         RETURN(rc);
 out_sem:
-       up(&dquot->dq_sem);
+        cfs_up(&dquot->dq_sem);
        goto out;
 }
 
@@ -359,8 +359,8 @@ int dqacq_handler(struct obd_device *obd, struct qunit_data *qdata, int opc)
         DQUOT_DEBUG(dquot, "get dquot in dqacq_handler\n");
         QINFO_DEBUG(dquot->dq_info, "get dquot in dqadq_handler\n");
 
-        down(&mds->mds_qonoff_sem);
-        down(&dquot->dq_sem);
+        cfs_down(&mds->mds_qonoff_sem);
+        cfs_down(&dquot->dq_sem);
 
         if (dquot->dq_status & DQ_STATUS_RECOVERY) {
                 DQUOT_DEBUG(dquot, "this dquot is under recovering.\n");
@@ -432,8 +432,8 @@ int dqacq_handler(struct obd_device *obd, struct qunit_data *qdata, int opc)
         rc = fsfilt_dquot(obd, dquot, QFILE_WR_DQUOT);
         EXIT;
 out:
-        up(&dquot->dq_sem);
-        up(&mds->mds_qonoff_sem);
+        cfs_up(&dquot->dq_sem);
+        cfs_up(&mds->mds_qonoff_sem);
         lustre_dqput(dquot);
         if (rc != -EDQUOT)
                 dqacq_adjust_qunit_sz(obd, qdata->qd_id, QDATA_IS_GRP(qdata),
@@ -446,10 +446,10 @@ out:
                 qdata->qd_qunit  = QDATA_IS_BLK(qdata) ? qctxt->lqc_bunit_sz :
                                                          qctxt->lqc_iunit_sz;
         } else {
-                spin_lock(&lqs->lqs_lock);
+                cfs_spin_lock(&lqs->lqs_lock);
                 qdata->qd_qunit  = QDATA_IS_BLK(qdata) ? lqs->lqs_bunit_sz :
                                                          lqs->lqs_iunit_sz;
-                spin_unlock(&lqs->lqs_lock);
+                cfs_spin_unlock(&lqs->lqs_lock);
         }
 
         if (QDATA_IS_BLK(qdata))
@@ -595,9 +595,9 @@ int mds_quota_invalidate(struct obd_device *obd, struct obd_quotactl *oqctl)
             oqctl->qc_type != UGQUOTA)
                 RETURN(-EINVAL);
 
-        down(&obt->obt_quotachecking);
+        cfs_down(&obt->obt_quotachecking);
         push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
-        down(&mds->mds_qonoff_sem);
+        cfs_down(&mds->mds_qonoff_sem);
 
         for (i = 0; i < MAXQUOTAS; i++) {
                 struct file *fp;
@@ -625,9 +625,9 @@ int mds_quota_invalidate(struct obd_device *obd, struct obd_quotactl *oqctl)
                         filp_close(fp, 0);
         }
 
-        up(&mds->mds_qonoff_sem);
+        cfs_up(&mds->mds_qonoff_sem);
         pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
-        up(&obt->obt_quotachecking);
+        cfs_up(&obt->obt_quotachecking);
         RETURN(rc ? : rc1);
 }
 
@@ -644,18 +644,18 @@ int mds_quota_finvalidate(struct obd_device *obd, struct obd_quotactl *oqctl)
             oqctl->qc_type != UGQUOTA)
                 RETURN(-EINVAL);
 
-        down(&obt->obt_quotachecking);
+        cfs_down(&obt->obt_quotachecking);
         push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
-        down(&mds->mds_qonoff_sem);
+        cfs_down(&mds->mds_qonoff_sem);
 
         oqctl->qc_cmd = Q_FINVALIDATE;
         rc = fsfilt_quotactl(obd, obd->u.obt.obt_sb, oqctl);
         if (!rc)
                 rc = obd_quotactl(mds->mds_osc_exp, oqctl);
 
-        up(&mds->mds_qonoff_sem);
+        cfs_up(&mds->mds_qonoff_sem);
         pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
-        up(&obt->obt_quotachecking);
+        cfs_up(&obt->obt_quotachecking);
         RETURN(rc);
 }
 
@@ -673,7 +673,7 @@ int init_admin_quotafiles(struct obd_device *obd, struct obd_quotactl *oqctl)
 
         push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
 
-        down(&mds->mds_qonoff_sem);
+        cfs_down(&mds->mds_qonoff_sem);
 
         for (i = 0; i < MAXQUOTAS && !rc; i++) {
                 struct file *fp;
@@ -742,7 +742,7 @@ int init_admin_quotafiles(struct obd_device *obd, struct obd_quotactl *oqctl)
                 filp_close(fp, 0);
                 qinfo->qi_files[i] = NULL;
         }
-        up(&mds->mds_qonoff_sem);
+        cfs_up(&mds->mds_qonoff_sem);
 
         pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
         RETURN(rc);
@@ -851,16 +851,16 @@ int mds_quota_on(struct obd_device *obd, struct obd_quotactl *oqctl)
             oqctl->qc_type != UGQUOTA)
                 RETURN(-EINVAL);
 
-        down(&obt->obt_quotachecking);
+        cfs_down(&obt->obt_quotachecking);
         if (obt->obt_qctxt.lqc_immutable) {
                 LCONSOLE_ERROR("Failed to turn Quota on, immutable mode "
                                "(is SOM enabled?)\n");
-                up(&obt->obt_quotachecking);
+                cfs_up(&obt->obt_quotachecking);
                 RETURN(-ECANCELED);
         }
 
         push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
-        down(&mds->mds_qonoff_sem);
+        cfs_down(&mds->mds_qonoff_sem);
         rc2 = mds_admin_quota_on(obd, oqctl);
         if (rc2 && rc2 != -EALREADY) {
                 CWARN("mds quota[%d] is failed to be on for %d\n", oqctl->qc_type, rc2);
@@ -903,9 +903,9 @@ int mds_quota_on(struct obd_device *obd, struct obd_quotactl *oqctl)
         EXIT;
 
 out:
-        up(&mds->mds_qonoff_sem);
+        cfs_up(&mds->mds_qonoff_sem);
         pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
-        up(&obt->obt_quotachecking);
+        cfs_up(&obt->obt_quotachecking);
         return rc ? : (rc1 ? : rc2);
 }
 
@@ -930,7 +930,7 @@ int do_mds_quota_off(struct obd_device *obd, struct obd_quotactl *oqctl)
                 RETURN(-EINVAL);
 
         push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
-        down(&mds->mds_qonoff_sem);
+        cfs_down(&mds->mds_qonoff_sem);
         /* close admin quota files */
         rc2 = mds_admin_quota_off(obd, oqctl);
         if (rc2 && rc2 != -EALREADY) {
@@ -975,7 +975,7 @@ int do_mds_quota_off(struct obd_device *obd, struct obd_quotactl *oqctl)
         EXIT;
 
 out:
-        up(&mds->mds_qonoff_sem);
+        cfs_up(&mds->mds_qonoff_sem);
         pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
         return rc ? : (rc1 ? : rc2);
 }
@@ -986,9 +986,9 @@ int mds_quota_off(struct obd_device *obd, struct obd_quotactl *oqctl)
         int rc;
         ENTRY;
 
-        down(&obt->obt_quotachecking);
+        cfs_down(&obt->obt_quotachecking);
         rc = do_mds_quota_off(obd, oqctl);
-        up(&obt->obt_quotachecking);
+        cfs_up(&obt->obt_quotachecking);
         RETURN(rc);
 }
 
@@ -1004,7 +1004,7 @@ int mds_set_dqinfo(struct obd_device *obd, struct obd_quotactl *oqctl)
             oqctl->qc_type != GRPQUOTA)
                 RETURN(-EINVAL);
 
-        down(&mds->mds_qonoff_sem);
+        cfs_down(&mds->mds_qonoff_sem);
         if (qinfo->qi_files[oqctl->qc_type] == NULL) {
                 CWARN("quota[%u] is off\n", oqctl->qc_type);
                 GOTO(out, rc = -ESRCH);
@@ -1018,7 +1018,7 @@ int mds_set_dqinfo(struct obd_device *obd, struct obd_quotactl *oqctl)
         EXIT;
 
 out:
-        up(&mds->mds_qonoff_sem);
+        cfs_up(&mds->mds_qonoff_sem);
         return rc;
 }
 
@@ -1034,7 +1034,7 @@ int mds_get_dqinfo(struct obd_device *obd, struct obd_quotactl *oqctl)
             oqctl->qc_type != GRPQUOTA)
                 RETURN(-EINVAL);
 
-        down(&mds->mds_qonoff_sem);
+        cfs_down(&mds->mds_qonoff_sem);
         if (qinfo->qi_files[oqctl->qc_type] == NULL) {
                 CWARN("quota[%u] is off\n", oqctl->qc_type);
                 GOTO(out, rc = -ESRCH);
@@ -1046,7 +1046,7 @@ int mds_get_dqinfo(struct obd_device *obd, struct obd_quotactl *oqctl)
         EXIT;
 
 out:
-        up(&mds->mds_qonoff_sem);
+        cfs_up(&mds->mds_qonoff_sem);
         return rc;
 }
 
@@ -1350,7 +1350,7 @@ int mds_set_dqblk(struct obd_device *obd, struct obd_quotactl *oqctl)
         OBD_ALLOC_PTR(oqaq);
         if (!oqaq)
                 RETURN(-ENOMEM);
-        down(&mds->mds_qonoff_sem);
+        cfs_down(&mds->mds_qonoff_sem);
         init_oqaq(oqaq, qctxt, oqctl->qc_id, oqctl->qc_type);
 
         if (qinfo->qi_files[oqctl->qc_type] == NULL) {
@@ -1364,10 +1364,10 @@ int mds_set_dqblk(struct obd_device *obd, struct obd_quotactl *oqctl)
         DQUOT_DEBUG(dquot, "get dquot in mds_set_blk\n");
         QINFO_DEBUG(dquot->dq_info, "get dquot in mds_set_blk\n");
 
-        down(&dquot->dq_sem);
+        cfs_down(&dquot->dq_sem);
 
         if (dquot->dq_status) {
-                up(&dquot->dq_sem);
+                cfs_up(&dquot->dq_sem);
                 lustre_dqput(dquot);
                 GOTO(out_sem, rc = -EBUSY);
         }
@@ -1432,23 +1432,23 @@ int mds_set_dqblk(struct obd_device *obd, struct obd_quotactl *oqctl)
 
         rc = fsfilt_dquot(obd, dquot, QFILE_WR_DQUOT);
 
-        up(&dquot->dq_sem);
+        cfs_up(&dquot->dq_sem);
 
         if (rc) {
                 CERROR("set limit failed! (rc:%d)\n", rc);
                 goto out;
         }
 
-        up(&mds->mds_qonoff_sem);
+        cfs_up(&mds->mds_qonoff_sem);
 
         adjust_lqs(obd, oqaq);
 
         orig_set = ihardlimit || isoftlimit;
         now_set  = dqblk->dqb_ihardlimit || dqblk->dqb_isoftlimit;
         if (dqblk->dqb_valid & QIF_ILIMITS && orig_set != now_set) {
-                down(&dquot->dq_sem);
+                cfs_down(&dquot->dq_sem);
                 dquot->dq_dqb.dqb_curinodes = 0;
-                up(&dquot->dq_sem);
+                cfs_up(&dquot->dq_sem);
                 rc = mds_init_slave_ilimits(obd, oqctl, orig_set);
                 if (rc) {
                         CERROR("init slave ilimits failed! (rc:%d)\n", rc);
@@ -1459,9 +1459,9 @@ int mds_set_dqblk(struct obd_device *obd, struct obd_quotactl *oqctl)
         orig_set = bhardlimit || bsoftlimit;
         now_set  = dqblk->dqb_bhardlimit || dqblk->dqb_bsoftlimit;
         if (dqblk->dqb_valid & QIF_BLIMITS && orig_set != now_set) {
-                down(&dquot->dq_sem);
+                cfs_down(&dquot->dq_sem);
                 dquot->dq_dqb.dqb_curspace = 0;
-                up(&dquot->dq_sem);
+                cfs_up(&dquot->dq_sem);
                 rc = mds_init_slave_blimits(obd, oqctl, orig_set);
                 if (rc) {
                         CERROR("init slave blimits failed! (rc:%d)\n", rc);
@@ -1470,8 +1470,8 @@ int mds_set_dqblk(struct obd_device *obd, struct obd_quotactl *oqctl)
         }
 
 revoke_out:
-        down(&mds->mds_qonoff_sem);
-        down(&dquot->dq_sem);
+        cfs_down(&mds->mds_qonoff_sem);
+        cfs_down(&dquot->dq_sem);
         if (rc) {
                 /* cancel previous setting */
                 dquot->dq_dqb.dqb_ihardlimit = ihardlimit;
@@ -1482,16 +1482,16 @@ revoke_out:
                 dquot->dq_dqb.dqb_itime = itime;
         }
         rc2 = fsfilt_dquot(obd, dquot, QFILE_WR_DQUOT);
-        up(&dquot->dq_sem);
+        cfs_up(&dquot->dq_sem);
 
 out:
-        down(&dquot->dq_sem);
+        cfs_down(&dquot->dq_sem);
         dquot->dq_status &= ~DQ_STATUS_SET;
-        up(&dquot->dq_sem);
+        cfs_up(&dquot->dq_sem);
         lustre_dqput(dquot);
         EXIT;
 out_sem:
-        up(&mds->mds_qonoff_sem);
+        cfs_up(&mds->mds_qonoff_sem);
 
         if (oqaq)
                 OBD_FREE_PTR(oqaq);
@@ -1552,7 +1552,7 @@ int mds_get_dqblk(struct obd_device *obd, struct obd_quotactl *oqctl)
             oqctl->qc_type != GRPQUOTA)
                 RETURN(-EINVAL);
 
-        down(&mds->mds_qonoff_sem);
+        cfs_down(&mds->mds_qonoff_sem);
         dqblk->dqb_valid = 0;
         if (qinfo->qi_files[oqctl->qc_type] == NULL) {
                 CWARN("quota[%u] is off\n", oqctl->qc_type);
@@ -1563,7 +1563,7 @@ int mds_get_dqblk(struct obd_device *obd, struct obd_quotactl *oqctl)
         if (IS_ERR(dquot))
                 GOTO(out, rc = PTR_ERR(dquot));
 
-        down(&dquot->dq_sem);
+        cfs_down(&dquot->dq_sem);
         dqblk->dqb_ihardlimit = dquot->dq_dqb.dqb_ihardlimit;
         dqblk->dqb_isoftlimit = dquot->dq_dqb.dqb_isoftlimit;
         dqblk->dqb_bhardlimit = dquot->dq_dqb.dqb_bhardlimit;
@@ -1571,10 +1571,10 @@ int mds_get_dqblk(struct obd_device *obd, struct obd_quotactl *oqctl)
         dqblk->dqb_btime = dquot->dq_dqb.dqb_btime;
         dqblk->dqb_itime = dquot->dq_dqb.dqb_itime;
         dqblk->dqb_valid |= QIF_LIMITS | QIF_TIMES;
-        up(&dquot->dq_sem);
+        cfs_up(&dquot->dq_sem);
 
         lustre_dqput(dquot);
-        up(&mds->mds_qonoff_sem);
+        cfs_up(&mds->mds_qonoff_sem);
 
         /* the usages in admin quota file is inaccurate */
         dqblk->dqb_curinodes = 0;
@@ -1583,7 +1583,7 @@ int mds_get_dqblk(struct obd_device *obd, struct obd_quotactl *oqctl)
         EXIT;
         return rc;
 out:
-        up(&mds->mds_qonoff_sem);
+        cfs_up(&mds->mds_qonoff_sem);
         return rc;
 }
 
@@ -1625,7 +1625,7 @@ dquot_recovery(struct obd_device *obd, unsigned int id, unsigned short type)
                 RETURN(PTR_ERR(dquot));
         }
 
-        down(&dquot->dq_sem);
+        cfs_down(&dquot->dq_sem);
 
         /* don't recovery the dquot without limits or under setting */
         if (!(dquot->dq_dqb.dqb_bhardlimit || dquot->dq_dqb.dqb_bsoftlimit) ||
@@ -1633,7 +1633,7 @@ dquot_recovery(struct obd_device *obd, unsigned int id, unsigned short type)
                 GOTO(skip, rc = 0);
         dquot->dq_status |= DQ_STATUS_RECOVERY;
 
-        up(&dquot->dq_sem);
+        cfs_up(&dquot->dq_sem);
 
         /* get real bhardlimit from all slaves. */
         qctl->qc_cmd = Q_GETOQUOTA;
@@ -1652,8 +1652,8 @@ dquot_recovery(struct obd_device *obd, unsigned int id, unsigned short type)
         total_limits += qctl->qc_dqblk.dqb_bhardlimit;
 
         /* amend the usage of the administrative quotafile */
-        down(&mds->mds_qonoff_sem);
-        down(&dquot->dq_sem);
+        cfs_down(&mds->mds_qonoff_sem);
+        cfs_down(&dquot->dq_sem);
 
         dquot->dq_dqb.dqb_curspace = total_limits << QUOTABLOCK_BITS;
 
@@ -1661,14 +1661,14 @@ dquot_recovery(struct obd_device *obd, unsigned int id, unsigned short type)
         if (rc)
                 CERROR("write dquot failed! (rc:%d)\n", rc);
 
-        up(&dquot->dq_sem);
-        up(&mds->mds_qonoff_sem);
+        cfs_up(&dquot->dq_sem);
+        cfs_up(&mds->mds_qonoff_sem);
         EXIT;
 out:
-        down(&dquot->dq_sem);
+        cfs_down(&dquot->dq_sem);
         dquot->dq_status &= ~DQ_STATUS_RECOVERY;
 skip:
-        up(&dquot->dq_sem);
+        cfs_up(&dquot->dq_sem);
 
         lustre_dqput(dquot);
         OBD_FREE_PTR(qctl);
@@ -1677,7 +1677,7 @@ skip:
 
 struct qmaster_recov_thread_data {
         struct obd_device *obd;
-        struct completion comp;
+        cfs_completion_t comp;
 };
 
 static int qmaster_recovery_main(void *arg)
@@ -1697,27 +1697,27 @@ static int qmaster_recovery_main(void *arg)
         /* for lov */
         class_incref(mds->mds_osc_obd, "qmaster_recovd_lov", mds->mds_osc_obd);
 
-        complete(&data->comp);
+        cfs_complete(&data->comp);
 
         for (type = USRQUOTA; type < MAXQUOTAS; type++) {
-                struct list_head id_list;
+                cfs_list_t id_list;
                 struct dquot_id *dqid, *tmp;
 
-                down(&mds->mds_qonoff_sem);
+                cfs_down(&mds->mds_qonoff_sem);
                 if (qinfo->qi_files[type] == NULL) {
-                        up(&mds->mds_qonoff_sem);
+                        cfs_up(&mds->mds_qonoff_sem);
                         continue;
                 }
                 CFS_INIT_LIST_HEAD(&id_list);
                 rc = fsfilt_qids(obd, qinfo->qi_files[type], NULL, type,
                                  &id_list);
-                up(&mds->mds_qonoff_sem);
+                cfs_up(&mds->mds_qonoff_sem);
 
                 if (rc)
                         CERROR("error get ids from admin quotafile.(%d)\n", rc);
 
-                list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
-                        list_del_init(&dqid->di_link);
+                cfs_list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
+                        cfs_list_del_init(&dqid->di_link);
                         if (rc)
                                 goto free;
 
@@ -1745,25 +1745,26 @@ int mds_quota_recovery(struct obd_device *obd)
         if (unlikely(!mds->mds_quota || obd->obd_stopping))
                 RETURN(rc);
 
-        mutex_down(&obd->obd_dev_sem);
+        cfs_mutex_down(&obd->obd_dev_sem);
         if (mds->mds_lov_desc.ld_active_tgt_count != mds->mds_lov_objid_count) {
                 CWARN("Only %u/%u OSTs are active, abort quota recovery\n",
                       mds->mds_lov_desc.ld_active_tgt_count,
                       mds->mds_lov_objid_count);
-                mutex_up(&obd->obd_dev_sem);
+                cfs_mutex_up(&obd->obd_dev_sem);
                 RETURN(rc);
         }
-        mutex_up(&obd->obd_dev_sem);
+        cfs_mutex_up(&obd->obd_dev_sem);
 
         data.obd = obd;
-        init_completion(&data.comp);
+        cfs_init_completion(&data.comp);
 
-        rc = kernel_thread(qmaster_recovery_main, &data, CLONE_VM|CLONE_FILES);
+        rc = cfs_kernel_thread(qmaster_recovery_main, &data,
+                               CLONE_VM|CLONE_FILES);
         if (rc < 0)
                 CERROR("%s: cannot start quota recovery thread: rc %d\n",
                        obd->obd_name, rc);
 
-        wait_for_completion(&data.comp);
+        cfs_wait_for_completion(&data.comp);
         RETURN(rc);
 }
 
index 39ae318..457f726 100644 (file)
@@ -63,7 +63,7 @@
 
 static struct it_node {
         struct interval_node node;
-        struct list_head list;
+        cfs_list_t list;
         int hit, valid;
 } *it_array;
 static int it_count;
@@ -379,7 +379,7 @@ static int it_test_performance(struct interval_node *root, unsigned long len)
         /* list */
         contended_count = 0;
         gettimeofday(&start, NULL);
-        list_for_each_entry(n, &header, list) {
+        cfs_list_for_each_entry(n, &header, list) {
                 if (extent_overlapped(&ext, &n->node.in_extent)) {
                         count = LOOP_COUNT;
                         while (count--);
@@ -424,7 +424,7 @@ static struct interval_node *it_test_helper(struct interval_node *root)
                                 __F(&n->node.in_extent));
                         interval_erase(&n->node, &root);
                         n->valid = 0;
-                        list_del_init(&n->list);
+                        cfs_list_del_init(&n->list);
                 } else {
                         __u64 low, high;
                         low = (random() % max_count) & ALIGN_MASK;
@@ -437,7 +437,7 @@ static struct interval_node *it_test_helper(struct interval_node *root)
                         dprintf("Adding a node "__S"\n",
                                 __F(&n->node.in_extent));
                         n->valid = 1;
-                        list_add(&n->list, &header);
+                        cfs_list_add(&n->list, &header);
                 }
         }
 
@@ -474,9 +474,9 @@ static struct interval_node *it_test_init(int count)
                 n->hit = 0;
                 n->valid = 1;
                 if (i == 0)
-                        list_add_tail(&n->list, &header);
+                        cfs_list_add_tail(&n->list, &header);
                 else
-                        list_add_tail(&n->list, &it_array[rand()%i].list);
+                        cfs_list_add_tail(&n->list, &it_array[rand()%i].list);
         }
 
         return root;
index e76fe96..77e16a4 100644 (file)
@@ -909,8 +909,8 @@ int jt_get_version(int argc, char **argv)
 
         memset(buf, 0, sizeof(rawbuf));
         data->ioc_version = OBD_IOCTL_VERSION;
-        data->ioc_inllen1 = sizeof(rawbuf) - size_round(sizeof(*data));
-        data->ioc_inlbuf1 = buf + size_round(sizeof(*data));
+        data->ioc_inllen1 = sizeof(rawbuf) - cfs_size_round(sizeof(*data));
+        data->ioc_inlbuf1 = buf + cfs_size_round(sizeof(*data));
         data->ioc_len = obd_ioctl_packlen(data);
 
         rc = l2_ioctl(OBD_DEV_ID, OBD_GET_VERSION, buf);
@@ -991,8 +991,9 @@ int jt_obd_list_ioctl(int argc, char **argv)
         for (index = 0;; index++) {
                 memset(buf, 0, sizeof(rawbuf));
                 data->ioc_version = OBD_IOCTL_VERSION;
-                data->ioc_inllen1 = sizeof(rawbuf) - size_round(sizeof(*data));
-                data->ioc_inlbuf1 = buf + size_round(sizeof(*data));
+                data->ioc_inllen1 =
+                        sizeof(rawbuf) - cfs_size_round(sizeof(*data));
+                data->ioc_inlbuf1 = buf + cfs_size_round(sizeof(*data));
                 data->ioc_len = obd_ioctl_packlen(data);
                 data->ioc_count = index;
 
@@ -1648,7 +1649,7 @@ int jt_obd_test_brw(int argc, char **argv)
                         l_cond_broadcast(&shared_data->cond);
                 else
                         l_cond_wait(&shared_data->cond,
-                                          &shared_data->mutex);
+                                    &shared_data->mutex);
 
                 shmem_unlock ();
         }
@@ -2075,7 +2076,7 @@ int jt_llog_catlist(int argc, char **argv)
 
         memset(&data, 0x00, sizeof(data));
         data.ioc_dev = cur_device;
-        data.ioc_inllen1 = sizeof(rawbuf) - size_round(sizeof(data));
+        data.ioc_inllen1 = sizeof(rawbuf) - cfs_size_round(sizeof(data));
         memset(buf, 0, sizeof(rawbuf));
         rc = obd_ioctl_pack(&data, &buf, sizeof(rawbuf));
         if (rc) {
@@ -2106,8 +2107,8 @@ int jt_llog_info(int argc, char **argv)
         data.ioc_dev = cur_device;
         data.ioc_inllen1 = strlen(argv[1]) + 1;
         data.ioc_inlbuf1 = argv[1];
-        data.ioc_inllen2 = sizeof(rawbuf) - size_round(sizeof(data)) -
-                size_round(data.ioc_inllen1);
+        data.ioc_inllen2 = sizeof(rawbuf) - cfs_size_round(sizeof(data)) -
+                cfs_size_round(data.ioc_inllen1);
         memset(buf, 0, sizeof(rawbuf));
         rc = obd_ioctl_pack(&data, &buf, sizeof(rawbuf));
         if (rc) {
@@ -2151,10 +2152,10 @@ int jt_llog_print(int argc, char **argv)
                 data.ioc_inllen3 = strlen(to) + 1;
                 data.ioc_inlbuf3 = to;
         }
-        data.ioc_inllen4 = sizeof(rawbuf) - size_round(sizeof(data)) -
-                size_round(data.ioc_inllen1) -
-                size_round(data.ioc_inllen2) -
-                size_round(data.ioc_inllen3);
+        data.ioc_inllen4 = sizeof(rawbuf) - cfs_size_round(sizeof(data)) -
+                cfs_size_round(data.ioc_inllen1) -
+                cfs_size_round(data.ioc_inllen2) -
+                cfs_size_round(data.ioc_inllen3);
         memset(buf, 0, sizeof(rawbuf));
         rc = obd_ioctl_pack(&data, &buf, sizeof(rawbuf));
         if (rc) {
@@ -2233,10 +2234,10 @@ int jt_llog_check(int argc, char **argv)
                 data.ioc_inllen3 = strlen(to) + 1;
                 data.ioc_inlbuf3 = to;
         }
-        data.ioc_inllen4 = sizeof(rawbuf) - size_round(sizeof(data)) -
-                size_round(data.ioc_inllen1) -
-                size_round(data.ioc_inllen2) -
-                size_round(data.ioc_inllen3);
+        data.ioc_inllen4 = sizeof(rawbuf) - cfs_size_round(sizeof(data)) -
+                cfs_size_round(data.ioc_inllen1) -
+                cfs_size_round(data.ioc_inllen2) -
+                cfs_size_round(data.ioc_inllen3);
         memset(buf, 0, sizeof(rawbuf));
         rc = obd_ioctl_pack(&data, &buf, sizeof(rawbuf));
         if (rc) {