--- /dev/null
+#!/usr/bin/python
+
+# This script is for checking that patches don't introduce non-portable symbols
+# into the Lustre/LNET/libcfs code.
+#
+# Input:
+# 1. (Required) Filename (including path) of the diff file to be checked
+# 2. (Optional) path to the nn-final-symbol-list.txt file (By default, this
+# script looks for nn-final-symbol-list.txt in the current working
+# directory.)
+#
+# Output:
+# The output of this script is either PASS or FAIL (with WARNINGS).
+# FAIL means that there may have been symbols found that are not supposed
+# to be used. This requires the person running the script to look into the
+# WARNINGS that are in the output to determine if there is a problem.
+
+# Author: lisa.week@sun.com
+
+import string
+import re
+import sys
+import optparse
+import os.path
+import fileinput
+
+# Setup command line options for nn-check.py
+from optparse import OptionParser
+usage = "%prog DIFF-FILE [options]"
+parser = OptionParser(usage)
+parser.add_option("-s", "--symb", action="store", dest="symb_pathname",
+ help="(Optional) PATH to nn-final-symbol-list.txt file",
+ metavar="PATH")
+
+(options, args) = parser.parse_args()
+
+# Check if we have the minimum number of arguments supplied.
+if len(args) < 1:
+ parser.error("Incorrect number of arguments, see nn-check -h for help.")
+
+# Check if we were passed a path to the nn-final-symbol-list.txt file
+if options.symb_pathname:
+ symb_file = os.path.join(options.symb_pathname,
+ 'nn-final-symbol-list.txt')
+else:
+ symb_file = 'nn-final-symbol-list.txt'
+
+# Global Variables
+bad_symbol_cnt = 0
+symbol_dict = dict()
+
+# Function Definitions
+def search_symbol(line, linenum):
+ global bad_symbol_cnt
+
+ for key, val in symbol_dict.items():
+ regex_match = val.search(line)
+
+ if regex_match:
+ print_symbol = regex_match.group(0)
+ print 'WARNING: Found %s at line %d:' \
+ % (print_symbol, linenum)
+ print '%s' % line.rstrip()
+ bad_symbol_cnt += 1
+
+# The main portion of the script
+print '================='
+print 'Starting nn-check'
+print '================='
+
+# Open the nn-final-symbol-list.txt file and pull in the symbols to check into
+# a dictionary object.
+try:
+ f = fileinput.input(symb_file)
+except IOError:
+ print 'nn-check.py: error: %s not found.' % symb_file
+ print 'Is nn-final-symbol-list.txt is in your current working directory'
+ print 'or have you have passed nn-check.py a valid path to the file?'
+ sys.exit(1)
+
+
+for line in f:
+ stripped_symbol = line.rstrip()
+ symbol_dict[stripped_symbol] = re.compile(stripped_symbol)
+
+# Close nn-final-symbol-list.txt
+f.close()
+
+# Open the diff file passed to the script and parse it for the symbols from
+# nn-final-symbol-list.txt
+try:
+ f = fileinput.input(sys.argv[1])
+except IOError:
+ print 'nn-check.py: error: %s not found.' % sys.argv[1]
+ print 'Check the path provided for the diff file.'
+ sys.exit(1)
+
+index = re.compile(r'^\+\+\+ b/(.*)')
+plus = re.compile(r'^\+')
+for line in f:
+ # Check for the "diff --cc " delimiter in order to grab the file name.
+ index_match = index.match(line)
+
+ if index_match:
+ # Store the file name
+ filename=index_match.group(1)
+ print '--> Checking File: %s' % filename
+ else:
+ # Check if the line starts with a "+" character.
+ plus_match = plus.match(line)
+ if plus_match:
+ # The line starts with a "+" character. Look for
+ # non-portable symbols
+ search_symbol(line, f.lineno())
+ else:
+ continue
+
+# Close the diff file
+f.close()
+
+# Finish up and print the results of the script (i.e. total number of
+# bad symbols found)
+if bad_symbol_cnt != 0:
+ print '=============================='
+ print 'Finished nn-check status: FAIL'
+ print '=============================='
+ print 'Found %d potential problem(s). See "WARNINGS" from script output and refer to https://wikis.lustre.org/intra/index.php/Lustre_Name_Normalization for the complete set of rules to make sure you have not used a non-portable symbol.' % bad_symbol_cnt
+else:
+ print '=============================='
+ print 'Finished nn-check status: PASS'
+ print '=============================='
--- /dev/null
+\blist_head\b
+\bspin_unlock\b
+\bspin_lock\b
+\blist_empty\b
+\batomic_read\b
+\blist_entry\b
+\blist_add_tail\b
+\blist_del_init\b
+\batomic_inc\b
+\blist_for_each\b
+\bspinlock_t\b
+\blist_add\b
+\bspin_unlock_irqrestore\b
+\batomic_t\b
+\bup\b
+\bspin_lock_irqsave\b
+\bspin_lock_init\b
+\blist_for_each_entry\b
+\bwrite_unlock_irqrestore\b
+\batomic_set\b
+\bdown\b
+\bhlist_node\b
+\bHZ\b
+\batomic_dec\b
+\bcompletion\b
+\blist_for_each_safe\b
+\bin_interrupt\b
+\blist_for_each_entry_safe\b
+\bmutex_up\b
+\bwrite_lock_irqsave\b
+\bcopy_from_user\b
+\bcopy_to_user\b
+\batomic_dec_and_test\b
+\bmutex_down\b
+\bspin_unlock_bh\b
+\bsemaphore\b
+\bread_unlock\b
+\btest_bit\b
+\bup_write\b
+\bsize_round\b
+\bread_lock\b
+\bread_unlock_irqrestore\b
+\bset_current_state\b
+\bhlist_head\b
+\bspin_lock_bh\b
+\bdo_gettimeofday\b
+\bgroup_info\b
+\bset_bit\b
+\bdown_write\b
+\bup_read\b
+\bread_lock_irqsave\b
+\bwrite_unlock\b
+\bwrite_lock\b
+\brwlock_t\b
+\bhlist_entry\b
+\bmutex_unlock\b
+\bdown_read\b
+\bnum_physpages\b
+\bmutex\b
+\bsema_init\b
+\bmutex_lock\b
+\bclear_bit\b
+\bmb\b
+\bATOMIC_INIT\b
+\btime_after_eq\b
+\blist_splice_init\b
+\bcomplete\b
+\bkstatfs\b
+\bwait_for_completion\b
+\bnum_online_cpus\b
+\bhlist_unhashed\b
+\bLIST_HEAD\b
+\blist_for_each_entry_reverse\b
+\bSPIN_LOCK_UNLOCKED\b
+\binit_completion\b
+\bmight_sleep\b
+\brwlock_init\b
+\bkernel_thread\b
+\bhlist_add_head\b
+\blist_move\b
+\bunlock_kernel\b
+\bschedule_timeout\b
+\brw_semaphore\b
+\bmodule\b
+\bhlist_del_init\b
+\batomic_inc_return\b
+\btime_after\b
+\bmodule_put\b
+\binit_mutex\b
+\bget_random_bytes\b
+\bin_group_p\b
+\btime_before\b
+\bumode_t\b
+\binit_rwsem\b
+\bhlist_for_each_entry_safe\b
+\bmutex_init\b
+\block_kernel\b
+\btry_module_get\b
+\bCURRENT_TIME\b
+\brequest_module\b
+\block_class_key\b
+\bhlist_empty\b
+\bhlist_for_each_entry\b
+\bnum_possible_cpus\b
+\blist_splice\b
+\bour_cond_resched\b
+\bshrinker\b
+\bspin_unlock_irq\b
+\btest_and_set_bit\b
+\bDECLARE_MUTEX\b
+\bINIT_HLIST_NODE\b
+\bdown_write_nested\b
+\bspin_lock_irq\b
+\bsize_round4\b
+\bwait_event\b
+\bINIT_HLIST_HEAD\b
+\bMAX_SCHEDULE_TIMEOUT\b
+\bSLAB_HWCACHE_ALIGN\b
+\bcdebug_show\b
+\bcycles_t\b
+\bgroups_free\b
+\bDEFAULT_SEEKS\b
+\bGET_TIMEOUT\b
+\bremove_shrinker\b
+\bset_shrinker\b
+\batomic_sub\b
+\bgroups_alloc\b
+\bhlist_for_each\b
+\bhlist_for_each_safe\b
+\bNR_IRQS\b
+\bhlist_del\b
+\batomic_add_return\b
+\binit_MUTEX_LOCKED\b
+\binit_mutex_locked\b
+\blist_for_each_prev\b
+\bcpu_online\b
+\binit_MUTEX\b
+\bFREE_BITMAP\b
+\bL1_CACHE_ALIGN\b
+\batomic_dec_and_lock\b
+\bfind_first_zero_bit\b
+\bmutex_trylock\b
+\bHLIST_HEAD\b
+\batomic_dec_return\b
+\bcond_resched\b
+\bhash_long\b
+\bmutex_is_locked\b
+\bdown_read_nested\b
+\bmutex_lock_nested\b
+\bwait_event_interruptible_exclusive\b
+\bwait_event_interruptible\b
+\batomic_add\b
+\bCHECK_STACK\b
+\bfor_each_possible_cpu\b
+\bALLOCATE_BITMAP\b
+\bDEFINE_MUTEX\b
+\blist_empty_careful\b
+\bwrite_lock_bh\b
+\bwrite_unlock_bh\b
+\bTHREAD_SIZE\b
+\blist_for_each_entry_safe_from\b
+\bshrinker_t\b
+\bwait_for_completion_interruptible\b
+\bmutex_destroy\b
+\bdown_read_trylock\b
+\bdown_write_trylock\b
+\bfind_next_zero_bit\b
+\bspin_lock_nested\b
+\bspin_trylock\b
+\bbitmap_t\b
+\bsmp_processor_id\b
+\btracefile\b
+\btracefile_sem\b
+\bKERN_ERR\b
+\bDECLARE_COMPLETION\b
+\bhlist_add_after\b
+\bhlist_add_before\b
+\bhlist_for_each_entry_continue\b
+\bhlist_for_each_entry_from\b
+\bINIT_COMPLETION\b
+\bround_strlen\b
+\bRW_LOCK_UNLOCKED\b
+\bsize_round0\b
+\bsize_round16\b
+\bsize_round32\b
+\blist_t\b
+\bmutex_t\b
+\bCOMPLETION_INITIALIZER\b
+\bHLIST_HEAD_INIT\b
+\btime_before_eq\b
+\bspin_is_locked\b
+\btrace_daemon_command_usrstr\b
+\btrace_debug_print\b
+\btrace_dump_debug_buffer_usrstr\b
+\btrace_refill_stock\b
+\btrace_set_debug_mb_usrstr\b
+\bfind_first_bit\b
+\b__list_splice\b
+\btrace_assertion_failed\b
+\btracefile_exit\b
+\btracefile_init\b
+\btrace_flush_pages\b
+\btrace_start_thread\b
+\b__list_add\b
+\btcd_owns_tage\b
+\bKERN_ALERT\b
+\bmutex_down_trylock\b
+\bspin_lock_bh_init\b
+\btrace_get_debug_mb\b
+\btage_allocated\b
+\btrace_daemon_command\b
+\btrace_set_debug_mb\b
+\bfind_next_bit\b
+\btrace_stop_thread\b
+\btracefile_init_arch\b
+\btrace_get_tcd\b
+\btrace_lock_tcd\b
+\btrace_unlock_tcd\b
+\btrace_copyout_string\b
+\btracefile_dump_all_pages\b
+\b__list_del\b
+\bKERN_EMERG\b
+\btracefile_fini_arch\b
+\btrace_get_console_buffer\b
+\btrace_max_debug_mb\b
+\btrace_put_console_buffer\b
+\btrace_copyin_string\b
+\btracefile_read_lock\b
+\btracefile_read_unlock\b
+\btrace_allocate_string_buffer\b
+\btrace_free_string_buffer\b
+\bdebug_file_path_arr\b
+\btrace_thread_sem\b
+\btracefile_write_lock\b
+\btracefile_write_unlock\b
+\btracefile_size\b
+\bprint_to_console\b
+\btrace_put_tcd\b
+\bdebug_file_path\b
+\bfini_rwsem\b
+\bKERN_WARNING\b
+\bcpumask_t\b
+\bcpus_empty\b
+\bfor_each_cpu_mask\b
+\bcpu_set\b
+\bcpus_weight\b
+\bset_cpus_allowed\b
+\bnodemask_t\b
+\blist_del\b
+\blist_move_tail\b
+\b__hlist_del\b
+\brwlock_fini\b
+\batomic_sub_return\b
+\batomic_inc_and_test\b
+\batomic_sub_and_test\b
+\bcall_wait_handler\b
+\binit_completion_module\b
+\bSLAB_DESTROY_BY_RCU\b
+\bSLAB_KERNEL\b
+\bSLAB_NOFS\b
+\bTASK_INTERRUPTIBLE\b
+\bTASK_RUNNING\b
+\bTRACEFILE_SIZE\b
+\btrace_cleanup\b
+\btrace_page\b
+\btrace_get_tage\b
+\btrace_cpu_data\b
+\btrace_get_tage_try\b
+\btrace_data\b
+\btage_from_list\b
+\btage_alloc\b
+\btage_free\b
+\btage_to_tail\b
+\btcd_shrink\b
+\btcd_for_each\b
+\btcd_for_each_type_lock\b
+\bschedule_timeout_interruptible\b
+\bINIT_LIST_HEAD\b
+\b__fls\b
+\bfls\b
+\b__flz\b
+\bflz\b
+\b__ffs\b
+\bffs\b
+\b__ffz\b
+\bffz\b
+\bDEBUG_FILE_PATH_DEFAULT\b
+\btrace_data_union\b
+\bKERN_CRIT\b
+\bKERN_NOTICE\b
+\bKERN_INFO\b
+\bKERN_DEBUG\b
+\bput_group_info\b
+\bget_group_info\b
+\bcleanup_group_info\b
+\bset_current_groups\b
+\btest_and_clear_bit\b
+\btrace_console_buffers\b
+\bset_ptldebug_header\b
+\btrace_buf_idx_get\b
+\btrace_buf_type_t\b
+\bTCD_TYPE_PROC\b
+\bTCD_TYPE_SOFTIRQ\b
+\bTCD_TYPE_IRQ\b
+\bTCD_TYPE_MAX\b
+\bTCD_TYPE_PASSIVE\b
+\bTCD_TYPE_DISPATCH\b
+\brcu_head\b
+\blockdep_on\b
+\blockdep_off\b
+\blockdep_set_class\b
+\b__module_get\b
+\bmodule_refcount\b
+\bNR_CPUS\b
+\bTRACE_CONSOLE_BUFFER_SIZE\b
+\bcomplete_and_wait\b
+\batomic_add_unless\b
+\batomic_inc_not_zero\b
+\bschedule\b
+\bcomplete_and_exit\b
+\binit_waitqueue_head\b
+\binit_waitqueue_entry\b
+\badd_wait_queue\b
+\badd_wait_queue_exclusive\b
+\bremove_wait_queue\b
+\bwaitqueue_active\b
+\bwake_up\b
+\bwake_up_nr\b
+\bwake_up_all\b
+\bwait_queue_head_t\b
+\bwait_queue_t\b
+\bDECLARE_RWSEM\b
+\bCFS_DECL_RWSEM\b
+\blist_for_each_entry_continue\b
typedef struct {
int size;
unsigned long data[0];
-} bitmap_t;
+} cfs_bitmap_t;
#define CFS_BITMAP_SIZE(nbits) \
- (((nbits/BITS_PER_LONG)+1)*sizeof(long)+sizeof(bitmap_t))
+ (((nbits/BITS_PER_LONG)+1)*sizeof(long)+sizeof(cfs_bitmap_t))
static inline
-bitmap_t *ALLOCATE_BITMAP(int size)
+cfs_bitmap_t *CFS_ALLOCATE_BITMAP(int size)
{
- bitmap_t *ptr;
+ cfs_bitmap_t *ptr;
OBD_ALLOC(ptr, CFS_BITMAP_SIZE(size));
if (ptr == NULL)
RETURN (ptr);
}
-#define FREE_BITMAP(ptr) OBD_FREE(ptr, CFS_BITMAP_SIZE(ptr->size))
+#define CFS_FREE_BITMAP(ptr) OBD_FREE(ptr, CFS_BITMAP_SIZE(ptr->size))
static inline
-void cfs_bitmap_set(bitmap_t *bitmap, int nbit)
+void cfs_bitmap_set(cfs_bitmap_t *bitmap, int nbit)
{
- set_bit(nbit, bitmap->data);
+ cfs_set_bit(nbit, bitmap->data);
}
static inline
-void cfs_bitmap_clear(bitmap_t *bitmap, int nbit)
+void cfs_bitmap_clear(cfs_bitmap_t *bitmap, int nbit)
{
- test_and_clear_bit(nbit, bitmap->data);
+ cfs_test_and_clear_bit(nbit, bitmap->data);
}
static inline
-int cfs_bitmap_check(bitmap_t *bitmap, int nbit)
+int cfs_bitmap_check(cfs_bitmap_t *bitmap, int nbit)
{
- return test_bit(nbit, bitmap->data);
+ return cfs_test_bit(nbit, bitmap->data);
}
static inline
-int cfs_bitmap_test_and_clear(bitmap_t *bitmap, int nbit)
+int cfs_bitmap_test_and_clear(cfs_bitmap_t *bitmap, int nbit)
{
- return test_and_clear_bit(nbit, bitmap->data);
+ return cfs_test_and_clear_bit(nbit, bitmap->data);
}
/* return 0 is bitmap has none set bits */
static inline
-int cfs_bitmap_check_empty(bitmap_t *bitmap)
+int cfs_bitmap_check_empty(cfs_bitmap_t *bitmap)
{
- return find_first_bit(bitmap->data, bitmap->size) == bitmap->size;
+ return cfs_find_first_bit(bitmap->data, bitmap->size) == bitmap->size;
}
#define cfs_foreach_bit(bitmap, pos) \
- for((pos)=find_first_bit((bitmap)->data, bitmap->size); \
+ for((pos)=cfs_find_first_bit((bitmap)->data, bitmap->size); \
(pos) < (bitmap)->size; \
- (pos) = find_next_bit((bitmap)->data, (bitmap)->size, (pos)))
+ (pos) = cfs_find_next_bit((bitmap)->data, (bitmap)->size, (pos)))
#endif
#include <libcfs/list.h>
-/* for_each_possible_cpu is defined newly, the former is
- * for_each_cpu(eg. sles9 and sles10) b=15878 */
-#ifndef for_each_possible_cpu
-# ifdef for_each_cpu
-# define for_each_possible_cpu(cpu) for_each_cpu(cpu)
-# else
-# error for_each_possible_cpu is not supported by kernel!
-# endif
+#ifndef cfs_for_each_possible_cpu
+# error cfs_for_each_possible_cpu is not supported by kernel!
#endif
/* libcfs tcpip */
/* Enables a watchdog and resets its timer. */
void lc_watchdog_touch(struct lc_watchdog *lcw, int timeout);
-#define GET_TIMEOUT(svc) (max_t(int, obd_timeout, \
+#define CFS_GET_TIMEOUT(svc) (max_t(int, obd_timeout, \
AT_OFF ? 0 : at_get(&svc->srv_at_estimate)) * \
svc->srv_watchdog_factor)
extern cfs_duration_t libcfs_console_min_delay;
extern unsigned int libcfs_console_backoff;
extern unsigned int libcfs_debug_binary;
-extern char debug_file_path_arr[1024];
-#ifdef __KERNEL__
-extern char *debug_file_path;
-#endif
+extern char libcfs_debug_file_path_arr[1024];
int libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys);
int libcfs_debug_str2mask(int *mask, const char *str, int is_subsys);
/**
* Filters out logging messages based on mask and subsystem.
*/
-static inline int cdebug_show(unsigned int mask, unsigned int subsystem)
+static inline int cfs_cdebug_show(unsigned int mask, unsigned int subsystem)
{
return mask & D_CANTMASK ||
((libcfs_debug & mask) && (libcfs_subsystem_debug & subsystem));
#define __CDEBUG(cdls, mask, format, ...) \
do { \
- CHECK_STACK(); \
+ CFS_CHECK_STACK(); \
\
- if (cdebug_show(mask, DEBUG_SUBSYSTEM)) \
+ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) \
libcfs_debug_msg(cdls, DEBUG_SUBSYSTEM, mask, \
__FILE__, __FUNCTION__, __LINE__, \
format, ## __VA_ARGS__); \
} while (0)
#else /* !CDEBUG_ENABLED */
-static inline int cdebug_show(unsigned int mask, unsigned int subsystem)
+static inline int cfs_cdebug_show(unsigned int mask, unsigned int subsystem)
{
return 0;
}
libcfs_debug_vmsg2(cdls, subsys, mask, file, fn,line,NULL,NULL,format, ## __VA_ARGS__)
#define cdebug_va(cdls, mask, file, func, line, fmt, args) do { \
- CHECK_STACK(); \
+ CFS_CHECK_STACK(); \
\
- if (cdebug_show(mask, DEBUG_SUBSYSTEM)) \
+ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) \
libcfs_debug_vmsg(cdls, DEBUG_SUBSYSTEM, (mask), \
(file), (func), (line), fmt, args); \
} while(0)
#define cdebug(cdls, mask, file, func, line, fmt, ...) do { \
- CHECK_STACK(); \
+ CFS_CHECK_STACK(); \
\
- if (cdebug_show(mask, DEBUG_SUBSYSTEM)) \
+ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) \
libcfs_debug_msg(cdls, DEBUG_SUBSYSTEM, (mask), \
(file), (func), (line), fmt, ## __VA_ARGS__);\
} while(0)
const char *fn, const int line);
/* one more external symbol that tracefile provides: */
-extern int trace_copyout_string(char *usr_buffer, int usr_buffer_nob,
- const char *knl_buffer, char *append);
+extern int cfs_trace_copyout_string(char *usr_buffer, int usr_buffer_nob,
+ const char *knl_buffer, char *append);
#if defined(HAVE_BGL_SUPPORT)
-#define DEBUG_FILE_PATH_DEFAULT "/bgl/ion/tmp/lustre-log"
+#define LIBCFS_DEBUG_FILE_PATH_DEFAULT "/bgl/ion/tmp/lustre-log"
#elif defined(__arch_um__)
-#define DEBUG_FILE_PATH_DEFAULT "/r/tmp/lustre-log"
+#define LIBCFS_DEBUG_FILE_PATH_DEFAULT "/r/tmp/lustre-log"
#elif defined(__WINNT__)
-#define DEBUG_FILE_PATH_DEFAULT "\\SystemRoot\\temp\\lustre-log"
+#define LIBCFS_DEBUG_FILE_PATH_DEFAULT "\\SystemRoot\\temp\\lustre-log"
#else
-#define DEBUG_FILE_PATH_DEFAULT "/tmp/lustre-log"
+#define LIBCFS_DEBUG_FILE_PATH_DEFAULT "/tmp/lustre-log"
#endif
#endif /* __LIBCFS_DEBUG_H__ */
#if (defined __linux__ && defined __KERNEL__)
#include <linux/hash.h>
+
+#define cfs_hash_long(val, bits) hash_long(val, bits)
#else
/* Fast hashing routine for a long.
(C) 2002 William Lee Irwin III, IBM */
#error Define CFS_GOLDEN_RATIO_PRIME for your wordsize.
#endif
-static inline unsigned long hash_long(unsigned long val, unsigned int bits)
+static inline unsigned long cfs_hash_long(unsigned long val, unsigned int bits)
{
unsigned long hash = val;
#if 0
static inline unsigned long hash_ptr(void *ptr, unsigned int bits)
{
- return hash_long((unsigned long)ptr, bits);
+ return cfs_hash_long((unsigned long)ptr, bits);
}
#endif
struct cfs_hash_ops;
typedef struct cfs_hash_bucket {
- struct hlist_head hsb_head; /* entries list */
- atomic_t hsb_count; /* current entries */
- rwlock_t hsb_rwlock; /* cfs_hash_bucket */
+ cfs_hlist_head_t hsb_head; /* entries list */
+ cfs_atomic_t hsb_count; /* current entries */
+ cfs_rwlock_t hsb_rwlock; /* cfs_hash_bucket */
} cfs_hash_bucket_t;
#define CFS_MAX_HASH_NAME 16
int hs_min_theta; /* resize min threshold */
int hs_max_theta; /* resize max threshold */
int hs_flags; /* hash flags */
- atomic_t hs_count; /* current entries */
- atomic_t hs_rehash_count;/* resize count */
+ cfs_atomic_t hs_count; /* current entries */
+ cfs_atomic_t hs_rehash_count;/* resize count */
struct cfs_hash_bucket **hs_buckets; /* hash buckets */
struct cfs_hash_ops *hs_ops; /* hash operations */
- rwlock_t hs_rwlock; /* cfs_hash */
+ cfs_rwlock_t hs_rwlock; /* cfs_hash */
char hs_name[CFS_MAX_HASH_NAME];
} cfs_hash_t;
typedef struct cfs_hash_ops {
unsigned (*hs_hash)(cfs_hash_t *hs, void *key, unsigned mask);
- void * (*hs_key)(struct hlist_node *hnode);
- int (*hs_compare)(void *key, struct hlist_node *hnode);
- void * (*hs_get)(struct hlist_node *hnode);
- void * (*hs_put)(struct hlist_node *hnode);
- void (*hs_exit)(struct hlist_node *hnode);
+ void * (*hs_key)(cfs_hlist_node_t *hnode);
+ int (*hs_compare)(void *key, cfs_hlist_node_t *hnode);
+ void * (*hs_get)(cfs_hlist_node_t *hnode);
+ void * (*hs_put)(cfs_hlist_node_t *hnode);
+ void (*hs_exit)(cfs_hlist_node_t *hnode);
} cfs_hash_ops_t;
#define CFS_HASH_DEBUG 0x0001 /* Enable expensive debug checks */
}
static inline void *
-cfs_hash_key(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_key(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
{
LASSERT(hs);
LASSERT(hnode);
* ends up not being the case this would be a nice feature.
*/
static inline int
-cfs_hash_compare(cfs_hash_t *hs, void *key, struct hlist_node *hnode)
+cfs_hash_compare(cfs_hash_t *hs, void *key, cfs_hlist_node_t *hnode)
{
LASSERT(hs);
LASSERT(hnode);
}
static inline void *
-cfs_hash_get(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
{
LASSERT(hs);
LASSERT(hnode);
}
static inline void *
-cfs_hash_put(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
{
LASSERT(hs);
LASSERT(hnode);
}
static inline void
-cfs_hash_exit(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
{
LASSERT(hs);
LASSERT(hnode);
/* Validate hnode references the correct key */
static inline void
__cfs_hash_key_validate(cfs_hash_t *hs, void *key,
- struct hlist_node *hnode)
+ cfs_hlist_node_t *hnode)
{
if (unlikely(hs->hs_flags & CFS_HASH_DEBUG))
LASSERT(cfs_hash_compare(hs, key, hnode) > 0);
/* Validate hnode is in the correct bucket */
static inline void
__cfs_hash_bucket_validate(cfs_hash_t *hs, cfs_hash_bucket_t *hsb,
- struct hlist_node *hnode)
+ cfs_hlist_node_t *hnode)
{
unsigned i;
}
}
-static inline struct hlist_node *
+static inline cfs_hlist_node_t *
__cfs_hash_bucket_lookup(cfs_hash_t *hs,
cfs_hash_bucket_t *hsb, void *key)
{
- struct hlist_node *hnode;
+ cfs_hlist_node_t *hnode;
- hlist_for_each(hnode, &hsb->hsb_head)
+ cfs_hlist_for_each(hnode, &hsb->hsb_head)
if (cfs_hash_compare(hs, key, hnode) > 0)
return hnode;
static inline void *
__cfs_hash_bucket_add(cfs_hash_t *hs,
cfs_hash_bucket_t *hsb,
- struct hlist_node *hnode)
+ cfs_hlist_node_t *hnode)
{
- hlist_add_head(hnode, &(hsb->hsb_head));
- atomic_inc(&hsb->hsb_count);
- atomic_inc(&hs->hs_count);
+ cfs_hlist_add_head(hnode, &(hsb->hsb_head));
+ cfs_atomic_inc(&hsb->hsb_count);
+ cfs_atomic_inc(&hs->hs_count);
return cfs_hash_get(hs, hnode);
}
static inline void *
__cfs_hash_bucket_del(cfs_hash_t *hs,
cfs_hash_bucket_t *hsb,
- struct hlist_node *hnode)
+ cfs_hlist_node_t *hnode)
{
- hlist_del_init(hnode);
- LASSERT(atomic_read(&hsb->hsb_count) > 0);
- atomic_dec(&hsb->hsb_count);
- LASSERT(atomic_read(&hs->hs_count) > 0);
- atomic_dec(&hs->hs_count);
+ cfs_hlist_del_init(hnode);
+ LASSERT(cfs_atomic_read(&hsb->hsb_count) > 0);
+ cfs_atomic_dec(&hsb->hsb_count);
+ LASSERT(cfs_atomic_read(&hs->hs_count) > 0);
+ cfs_atomic_dec(&hs->hs_count);
return cfs_hash_put(hs, hnode);
}
/* Hash addition functions */
void cfs_hash_add(cfs_hash_t *hs, void *key,
- struct hlist_node *hnode);
+ cfs_hlist_node_t *hnode);
int cfs_hash_add_unique(cfs_hash_t *hs, void *key,
- struct hlist_node *hnode);
+ cfs_hlist_node_t *hnode);
void *cfs_hash_findadd_unique(cfs_hash_t *hs, void *key,
- struct hlist_node *hnode);
+ cfs_hlist_node_t *hnode);
/* Hash deletion functions */
-void *cfs_hash_del(cfs_hash_t *hs, void *key, struct hlist_node *hnode);
+void *cfs_hash_del(cfs_hash_t *hs, void *key, cfs_hlist_node_t *hnode);
void *cfs_hash_del_key(cfs_hash_t *hs, void *key);
/* Hash lookup/for_each functions */
*/
int cfs_hash_rehash(cfs_hash_t *hs, int bits);
void cfs_hash_rehash_key(cfs_hash_t *hs, void *old_key,
- void *new_key, struct hlist_node *hnode);
+ void *new_key, cfs_hlist_node_t *hnode);
#define CFS_HASH_THETA_BITS 10
static inline int __cfs_hash_theta(cfs_hash_t *hs)
{
- return (atomic_read(&hs->hs_count) <<
+ return (cfs_atomic_read(&hs->hs_count) <<
CFS_HASH_THETA_BITS) >> hs->hs_cur_bits;
}
#ifdef __KERNEL__
struct libcfs_ioctl_handler {
- struct list_head item;
+ cfs_list_t item;
int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_data *data);
};
static inline int libcfs_ioctl_packlen(struct libcfs_ioctl_data *data)
{
int len = sizeof(*data);
- len += size_round(data->ioc_inllen1);
- len += size_round(data->ioc_inllen2);
+ len += cfs_size_round(data->ioc_inllen1);
+ len += cfs_size_round(data->ioc_inllen2);
return len;
}
return 1;
}
if (data->ioc_inllen2 &&
- data->ioc_bulk[size_round(data->ioc_inllen1) +
+ data->ioc_bulk[cfs_size_round(data->ioc_inllen1) +
data->ioc_inllen2 - 1] != '\0') {
CERROR ("LIBCFS ioctl: inlbuf2 not 0 terminated\n");
return 1;
/*
* Schedule
*/
-void cfs_schedule_timeout(cfs_task_state_t state, int64_t timeout);
+void cfs_schedule_timeout_and_set_state(cfs_task_state_t state,
+ int64_t timeout);
+void cfs_schedule_timeout(int64_t timeout);
void cfs_schedule(void);
void cfs_pause(cfs_duration_t ticks);
int cfs_need_resched(void);
#define LBUG() lbug_with_loc(__FILE__, __FUNCTION__, __LINE__)
-extern atomic_t libcfs_kmemory;
+extern cfs_atomic_t libcfs_kmemory;
/*
* Memory
*/
# define libcfs_kmem_inc(ptr, size) \
do { \
- atomic_add(size, &libcfs_kmemory); \
+ cfs_atomic_add(size, &libcfs_kmemory); \
} while (0)
# define libcfs_kmem_dec(ptr, size) do { \
- atomic_sub(size, &libcfs_kmemory); \
+ cfs_atomic_sub(size, &libcfs_kmemory); \
} while (0)
#else
#define LIBCFS_ALLOC_GFP(ptr, size, mask) \
do { \
- LASSERT(!in_interrupt() || \
+ LASSERT(!cfs_in_interrupt() || \
(size <= LIBCFS_VMALLOC_SIZE && mask == CFS_ALLOC_ATOMIC));\
if (unlikely((size) > LIBCFS_VMALLOC_SIZE)) \
(ptr) = cfs_alloc_large(size); \
CERROR("LNET: out of memory at %s:%d (tried to alloc '" \
#ptr "' = %d)\n", __FILE__, __LINE__, (int)(size));\
CERROR("LNET: %d total bytes allocated by lnet\n", \
- atomic_read(&libcfs_kmemory)); \
+ cfs_atomic_read(&libcfs_kmemory)); \
break; \
} \
libcfs_kmem_inc((ptr), (size)); \
if (!((mask) & CFS_ALLOC_ZERO)) \
memset((ptr), 0, (size)); \
CDEBUG(D_MALLOC, "kmalloced '" #ptr "': %d at %p (tot %d).\n", \
- (int)(size), (ptr), atomic_read (&libcfs_kmemory)); \
+ (int)(size), (ptr), cfs_atomic_read (&libcfs_kmemory)); \
} while (0)
#define LIBCFS_ALLOC(ptr, size) \
} \
libcfs_kmem_dec((ptr), s); \
CDEBUG(D_MALLOC, "kfreed '" #ptr "': %d at %p (tot %d).\n", \
- s, (ptr), atomic_read(&libcfs_kmemory)); \
+ s, (ptr), cfs_atomic_read(&libcfs_kmemory)); \
if (unlikely(s > LIBCFS_VMALLOC_SIZE)) \
cfs_free_large(ptr); \
else \
int libcfs_str2anynid(lnet_nid_t *nid, const char *str);
char *libcfs_id2str(lnet_process_id_t id);
int cfs_iswhite(char c);
-void cfs_free_nidlist(struct list_head *list);
-int cfs_parse_nidlist(char *str, int len, struct list_head *list);
-int cfs_match_nid(lnet_nid_t nid, struct list_head *list);
+void cfs_free_nidlist(cfs_list_t *list);
+int cfs_parse_nidlist(char *str, int len, cfs_list_t *list);
+int cfs_match_nid(lnet_nid_t nid, cfs_list_t *list);
/* how an LNET NID encodes net:address */
#define LNET_NIDADDR(nid) ((__u32)((nid) & 0xffffffff))
/* logical equivalence */
#define equi(a, b) (!!(a) == !!(b))
-#ifndef CURRENT_TIME
-# define CURRENT_TIME time(0)
+#ifndef CFS_CURRENT_TIME
+# define CFS_CURRENT_TIME time(0)
#endif
/* --------------------------------------------------------------------
#define MKSTR(ptr) ((ptr))? (ptr) : ""
-static inline int size_round4 (int val)
+static inline int cfs_size_round4 (int val)
{
return (val + 3) & (~0x3);
}
-static inline int size_round (int val)
+static inline int cfs_size_round (int val)
{
return (val + 7) & (~0x7);
}
-static inline int size_round16(int val)
+static inline int cfs_size_round16(int val)
{
return (val + 0xf) & (~0xf);
}
-static inline int size_round32(int val)
+static inline int cfs_size_round32(int val)
{
return (val + 0x1f) & (~0x1f);
}
-static inline int size_round0(int val)
+static inline int cfs_size_round0(int val)
{
if (!val)
return 0;
return (val + 1 + 7) & (~0x7);
}
-static inline size_t round_strlen(char *fset)
+static inline size_t cfs_round_strlen(char *fset)
{
- return (size_t)size_round((int)strlen(fset) + 1);
+ return (size_t)cfs_size_round((int)strlen(fset) + 1);
}
#define LOGL(var,len,ptr) \
do { \
if (var) \
memcpy((char *)ptr, (const char *)var, len); \
- ptr += size_round(len); \
+ ptr += cfs_size_round(len); \
} while (0)
#define LOGU(var,len,ptr) \
do { \
if (var) \
memcpy((char *)var, (const char *)ptr, len); \
- ptr += size_round(len); \
+ ptr += cfs_size_round(len); \
} while (0)
#define LOGL0(var,len,ptr) \
break; \
memcpy((char *)ptr, (const char *)var, len); \
*((char *)(ptr) + len) = 0; \
- ptr += size_round(len + 1); \
+ ptr += cfs_size_round(len + 1); \
} while (0)
/**
return (cfs_time_t)(t1 - t2);
}
-static inline int cfs_time_before(cfs_time_t t1, cfs_time_t t2)
-{
- return time_before(t1, t2);
-}
-
-static inline int cfs_time_beforeq(cfs_time_t t1, cfs_time_t t2)
-{
- return time_before_eq(t1, t2);
-}
-
static inline int cfs_time_after(cfs_time_t t1, cfs_time_t t2)
{
return cfs_time_before(t2, t1);
EXTRA_DIST := kp30.h libcfs.h linux-fs.h linux-lock.h linux-mem.h \
linux-prim.h linux-time.h linux-tcpip.h lltrace.h \
- portals_compat25.h
\ No newline at end of file
+ portals_compat25.h linux-bitops.h linux-types.h
#define PageUptodate Page_Uptodate
#define our_recalc_sigpending(current) recalc_sigpending(current)
-#define num_online_cpus() smp_num_cpus
-static inline void our_cond_resched(void)
-{
- if (current->need_resched)
- schedule ();
-}
+#define cfs_num_online_cpus() smp_num_cpus
#define work_struct_t struct tq_struct
#define cfs_get_work_data(type,field,data) (data)
#else
#endif
+#define cfs_num_online_cpus() num_online_cpus()
#define wait_on_page wait_on_page_locked
#define our_recalc_sigpending(current) recalc_sigpending()
#define strtok(a,b) strpbrk(a, b)
-static inline void our_cond_resched(void)
-{
- cond_resched();
-}
#define work_struct_t struct work_struct
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) */
# define time(a) CURRENT_TIME
#ifndef num_possible_cpus
-#define num_possible_cpus() NR_CPUS
+#define cfs_num_possible_cpus() NR_CPUS
+#else
+#define cfs_num_possible_cpus() num_possible_cpus()
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
#define i_size_read(a) ((a)->i_size)
# if !KLWT_SUPPORT
typedef struct _lwt_page {
- struct list_head lwtp_list;
- struct page *lwtp_page;
- lwt_event_t *lwtp_events;
+ cfs_list_t lwtp_list;
+ struct page *lwtp_page;
+ lwt_event_t *lwtp_events;
} lwt_page_t;
typedef struct {
\
if (cpu->lwtc_current_index >= LWT_EVENTS_PER_PAGE) { \
cpu->lwtc_current_page = \
- list_entry (p->lwtp_list.next, \
- lwt_page_t, lwtp_list); \
+ cfs_list_entry (p->lwtp_list.next, \
+ lwt_page_t, lwtp_list); \
cpu->lwtc_current_index = 0; \
} \
\
extern int lwt_lookup_string (int *size, char *knlptr,
char *usrptr, int usrsize);
extern int lwt_control (int enable, int clear);
-extern int lwt_snapshot (cycles_t *now, int *ncpu, int *total_size,
+extern int lwt_snapshot (cfs_cycles_t *now, int *ncpu, int *total_size,
void *user_ptr, int user_size);
# else /* __KERNEL__ */
# define LWT_EVENT(p1,p2,p3,p4) /* no userland implementation yet */
}
#endif
+#define cfs_smp_processor_id() smp_processor_id()
+
#ifndef get_cpu
# ifdef CONFIG_PREEMPT
# define cfs_get_cpu() ({ preempt_disable(); smp_processor_id(); })
#include <libcfs/linux/linux-lock.h>
#include <libcfs/linux/linux-fs.h>
#include <libcfs/linux/linux-tcpip.h>
+#include <libcfs/linux/linux-bitops.h>
+#include <libcfs/linux/linux-types.h>
#include <libcfs/linux/kp30.h>
#ifdef HAVE_ASM_TYPES_H
#include <asm/timex.h>
#include <linux/sched.h> /* THREAD_SIZE */
+#define CFS_THREAD_SIZE THREAD_SIZE
#define LUSTRE_TRACE_SIZE (THREAD_SIZE >> 5)
#if !defined(__x86_64__)
#define __CHECK_STACK(file, func, line) \
do { \
- unsigned long _stack = CDEBUG_STACK(); \
+ unsigned long _stack = CDEBUG_STACK(); \
\
if (_stack > 3*THREAD_SIZE/4 && _stack > libcfs_stack) { \
libcfs_stack = _stack; \
/*panic("LBUG");*/ \
} \
} while (0)
-#define CHECK_STACK() __CHECK_STACK(__FILE__, __func__, __LINE__)
+#define CFS_CHECK_STACK() __CHECK_STACK(__FILE__, __func__, __LINE__)
#else /* __x86_64__ */
-#define CHECK_STACK() do { } while(0)
+#define CFS_CHECK_STACK() do { } while(0)
#define CDEBUG_STACK() (0L)
#endif /* __x86_64__ */
--- /dev/null
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/include/libcfs/linux/linux-bitops.h
+ */
+#include <linux/bitops.h>
+
+#define cfs_test_bit(nr, addr) test_bit(nr, addr)
+#define cfs_set_bit(nr, addr) set_bit(nr, addr)
+#define cfs_clear_bit(nr, addr) clear_bit(nr, addr)
+#define cfs_test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
+#define cfs_test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
+#define cfs_find_first_bit(addr, size) find_first_bit(addr, size)
+#define cfs_find_first_zero_bit(addr, size) find_first_zero_bit(addr, size)
+#define cfs_find_next_bit(addr, size, off) find_next_bit(addr, size, off)
+#define cfs_find_next_zero_bit(addr, size, off) \
+ find_next_zero_bit(addr, size, off)
+
+#define cfs_ffz(x) ffz(x)
+#define cfs_ffs(x) ffs(x)
+#define cfs_fls(x) fls(x)
typedef struct file cfs_file_t;
typedef struct dentry cfs_dentry_t;
typedef struct dirent64 cfs_dirent_t;
+typedef struct kstatfs cfs_kstatfs_t;
#define cfs_filp_size(f) (i_size_read((f)->f_dentry->d_inode))
#define cfs_filp_poff(f) (&(f)->f_pos)
* declared by CFS_DECL_* should be initialized explicitly.
*/
-
/*
- * spin_lock (use Linux kernel's primitives)
+ * spin_lock "implementation" (use Linux kernel's primitives)
*
* - spin_lock_init(x)
* - spin_lock(x)
+ * - spin_lock_bh(x)
+ * - spin_lock_bh_init(x)
* - spin_unlock(x)
+ * - spin_unlock_bh(x)
* - spin_trylock(x)
+ * - spin_is_locked(x)
*
+ * - spin_lock_irq(x)
* - spin_lock_irqsave(x, f)
* - spin_unlock_irqrestore(x, f)
+ * - read_lock_irqsave(lock, f)
+ * - write_lock_irqsave(lock, f)
+ * - write_unlock_irqrestore(lock, f)
+ *
+ * - SPIN_LOCK_UNLOCKED
*/
/*
- * rw_semaphore (use Linux kernel's primitives)
+ * spinlock "implementation"
+ */
+
+typedef spinlock_t cfs_spinlock_t;
+
+#define cfs_spin_lock_init(lock) spin_lock_init(lock)
+#define cfs_spin_lock(lock) spin_lock(lock)
+#define cfs_spin_lock_bh(lock) spin_lock_bh(lock)
+#define cfs_spin_lock_bh_init(lock) spin_lock_bh_init(lock)
+#define cfs_spin_unlock(lock) spin_unlock(lock)
+#define cfs_spin_unlock_bh(lock) spin_unlock_bh(lock)
+#define cfs_spin_trylock(lock) spin_trylock(lock)
+#define cfs_spin_is_locked(lock) spin_is_locked(lock)
+
+#define cfs_spin_lock_irq(lock) spin_lock_irq(lock)
+#define cfs_spin_unlock_irq(lock) spin_unlock_irq(lock)
+#define cfs_read_lock_irqsave(lock, f) read_lock_irqsave(lock, f)
+#define cfs_write_lock_irqsave(lock, f) write_lock_irqsave(lock, f)
+#define cfs_write_unlock_irqrestore(lock, f) write_unlock_irqrestore(lock, f)
+#define cfs_spin_lock_irqsave(lock, f) spin_lock_irqsave(lock, f)
+#define cfs_spin_unlock_irqrestore(lock, f) spin_unlock_irqrestore(lock, f)
+
+#define CFS_SPIN_LOCK_UNLOCKED SPIN_LOCK_UNLOCKED
+
+/*
+ * rw_semaphore "implementation" (use Linux kernel's primitives)
*
+ * - sema_init(x)
* - init_rwsem(x)
* - down_read(x)
* - up_read(x)
* - down_write(x)
* - up_write(x)
*/
-#define fini_rwsem(s) do {} while(0)
+typedef struct rw_semaphore cfs_rw_semaphore_t;
+
+#define cfs_sema_init(s, val) sema_init(s, val)
+#define cfs_init_rwsem(s) init_rwsem(s)
+#define cfs_down_read(s) down_read(s)
+#define cfs_down_read_trylock(s) down_read_trylock(s)
+#define cfs_up_read(s) up_read(s)
+#define cfs_down_write(s) down_write(s)
+#define cfs_down_write_trylock(s) down_write_trylock(s)
+#define cfs_up_write(s) up_write(s)
+
+#define cfs_fini_rwsem(s) do {} while(0)
+
+#define CFS_DECLARE_RWSEM(name) DECLARE_RWSEM(name)
/*
- * rwlock_t (use Linux kernel's primitives)
+ * semaphore "implementation" (use Linux kernel's primitives)
+ */
+typedef struct semaphore cfs_semaphore_t;
+
+/*
+ * rwlock_t "implementation" (use Linux kernel's primitives)
*
* - rwlock_init(x)
* - read_lock(x)
* - read_unlock(x)
* - write_lock(x)
* - write_unlock(x)
+ * - write_lock_bh(x)
+ * - write_unlock_bh(x)
+ *
+ * - RW_LOCK_UNLOCKED
*/
+typedef rwlock_t cfs_rwlock_t;
+
+#define cfs_rwlock_init(lock) rwlock_init(lock)
+#define cfs_read_lock(lock) read_lock(lock)
+#define cfs_read_unlock(lock) read_unlock(lock)
+#define cfs_read_unlock_irqrestore(lock,flags) \
+ read_unlock_irqrestore(lock, flags)
+#define cfs_write_lock(lock) write_lock(lock)
+#define cfs_write_unlock(lock) write_unlock(lock)
+#define cfs_write_lock_bh(lock) write_lock_bh(lock)
+#define cfs_write_unlock_bh(lock) write_unlock_bh(lock)
+
+#define CFS_RW_LOCK_UNLOCKED RW_LOCK_UNLOCKED
/*
- * mutex:
+ * completion "implementation" (use Linux kernel's primitives)
*
- * - init_mutex(x)
- * - init_mutex_locked(x)
- * - mutex_up(x)
- * - mutex_down(x)
+ * - DECLARE_COMPLETION(work)
+ * - INIT_COMPLETION(c)
+ * - COMPLETION_INITIALIZER(work)
+ * - init_completion(c)
+ * - complete(c)
+ * - wait_for_completion(c)
+ * - wait_for_completion_interruptible(c)
+ * - fini_completion(c)
*/
-#define init_mutex(x) init_MUTEX(x)
-#define init_mutex_locked(x) init_MUTEX_LOCKED(x)
-#define mutex_up(x) up(x)
-#define mutex_down(x) down(x)
-#define mutex_down_trylock(x) down_trylock(x)
+typedef struct completion cfs_completion_t;
+
+#define CFS_DECLARE_COMPLETION(work) DECLARE_COMPLETION(work)
+#define CFS_INIT_COMPLETION(c) INIT_COMPLETION(c)
+#define CFS_COMPLETION_INITIALIZER(work) COMPLETION_INITIALIZER(work)
+#define cfs_init_completion(c) init_completion(c)
+#define cfs_complete(c) complete(c)
+#define cfs_wait_for_completion(c) wait_for_completion(c)
+#define cfs_wait_for_completion_interruptible(c) \
+ wait_for_completion_interruptible(c)
+#define cfs_complete_and_exit(c, code) complete_and_exit(c, code)
+#define cfs_fini_completion(c) do { } while (0)
/*
- * completion (use Linux kernel's primitives)
+ * mutex "implementation" (use Linux kernel's primitives)
*
- * - init_complition(c)
- * - complete(c)
- * - wait_for_completion(c)
+ * - DECLARE_MUTEX(name)
+ * - mutex_init(x)
+ * - init_mutex(x)
+ * - init_mutex_locked(x)
+ * - init_MUTEX_LOCKED(x)
+ * - mutex_up(x)
+ * - mutex_down(x)
+ * - up(x)
+ * - down(x)
+ * - mutex_down_trylock(x)
+ * - mutex_lock(x)
+ * - mutex_unlock(x)
*/
+typedef struct mutex cfs_mutex_t;
+
+#define CFS_DEFINE_MUTEX(name) DEFINE_MUTEX(name)
+#define CFS_DECLARE_MUTEX(name) DECLARE_MUTEX(name)
+
+#define cfs_mutex_init(x) mutex_init(x)
+#define cfs_init_mutex(x) init_MUTEX(x)
+#define cfs_init_mutex_locked(x) init_MUTEX_LOCKED(x)
+#define cfs_mutex_up(x) up(x)
+#define cfs_mutex_down(x) down(x)
+#define cfs_up(x) up(x)
+#define cfs_down(x) down(x)
+#define cfs_mutex_down_trylock(x) down_trylock(x)
+#define cfs_mutex_lock(x) mutex_lock(x)
+#define cfs_mutex_unlock(x) mutex_unlock(x)
+#define cfs_mutex_trylock(x) mutex_trylock(x)
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
*
**************************************************************************/
-struct mutex;
-
-static inline void mutex_destroy(struct mutex *lock)
+static inline void cfs_mutex_destroy(cfs_mutex_t *lock)
{
}
*
* \retval 0 mutex is not locked. This should never happen.
*/
-static inline int mutex_is_locked(struct mutex *lock)
+static inline int cfs_mutex_is_locked(cfs_mutex_t *lock)
{
return 1;
}
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) */
+#define cfs_mutex_destroy(x) mutex_destroy(x)
+#define cfs_mutex_is_locked(x) mutex_is_locked(x)
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) */
+/*
+ * Kernel locking primitives
+ *
+ * - lock_kernel
+ * - unlock_kernel
+ */
+#define cfs_lock_kernel() lock_kernel()
+#define cfs_unlock_kernel() unlock_kernel()
+
#ifndef lockdep_set_class
/**************************************************************************
*
**************************************************************************/
-struct lock_class_key {
+typedef struct cfs_lock_class_key {
;
-};
+} cfs_lock_class_key_t;
-# define lockdep_set_class(lock, key) \
+#define cfs_lockdep_set_class(lock, key) \
do { (void)sizeof (lock);(void)sizeof (key); } while (0)
/* This has to be a macro, so that `subclass' can be undefined in kernels that
* do not support lockdep. */
-static inline void lockdep_off(void)
+static inline void cfs_lockdep_off(void)
{
}
-static inline void lockdep_on(void)
+static inline void cfs_lockdep_on(void)
{
}
+#else
+typedef struct lock_class_key cfs_lock_class_key_t;
+#define cfs_lockdep_set_class(lock, key) lockdep_set_class(lock, key)
+#define cfs_lockdep_off() lockdep_off()
+#define cfs_lockdep_on() lockdep_on()
#endif /* lockdep_set_class */
#ifndef CONFIG_DEBUG_LOCK_ALLOC
#ifndef mutex_lock_nested
-# define mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
+#define cfs_mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
+#else
+#define cfs_mutex_lock_nested(mutex, subclass) \
+ mutex_lock_nested(mutex, subclass)
#endif
#ifndef spin_lock_nested
-# define spin_lock_nested(lock, subclass) spin_lock(lock)
+#define cfs_spin_lock_nested(lock, subclass) spin_lock(lock)
+#else
+#define cfs_spin_lock_nested(lock, subclass) spin_lock_nested(lock, subclass)
#endif
#ifndef down_read_nested
-# define down_read_nested(lock, subclass) down_read(lock)
+#define cfs_down_read_nested(lock, subclass) down_read(lock)
+#else
+#define cfs_down_read_nested(lock, subclass) down_read_nested(lock, subclass)
#endif
#ifndef down_write_nested
-# define down_write_nested(lock, subclass) down_write(lock)
+#define cfs_down_write_nested(lock, subclass) down_write(lock)
+#else
+#define cfs_down_write_nested(lock, subclass) down_write_nested(lock, subclass)
#endif
#endif /* CONFIG_DEBUG_LOCK_ALLOC */
-/*
- * spinlock "implementation"
- */
-
-typedef spinlock_t cfs_spinlock_t;
-
-#define cfs_spin_lock_init(lock) spin_lock_init(lock)
-#define cfs_spin_lock(lock) spin_lock(lock)
-#define cfs_spin_lock_bh(lock) spin_lock_bh(lock)
-#define cfs_spin_unlock(lock) spin_unlock(lock)
-#define cfs_spin_unlock_bh(lock) spin_unlock_bh(lock)
-
-/*
- * rwlock "implementation"
- */
-
-typedef rwlock_t cfs_rwlock_t;
-
-#define cfs_rwlock_init(lock) rwlock_init(lock)
-#define cfs_read_lock(lock) read_lock(lock)
-#define cfs_read_unlock(lock) read_unlock(lock)
-#define cfs_write_lock_bh(lock) write_lock_bh(lock)
-#define cfs_write_unlock_bh(lock) write_unlock_bh(lock)
-
#endif /* __LIBCFS_LINUX_CFS_LOCK_H__ */
#define CFS_PAGE_SHIFT PAGE_CACHE_SHIFT
#define CFS_PAGE_MASK (~((__u64)CFS_PAGE_SIZE-1))
+#define cfs_num_physpages num_physpages
+
+#define cfs_copy_from_user(to, from, n) copy_from_user(to, from, n)
+#define cfs_copy_to_user(to, from, n) copy_to_user(to, from, n)
+
static inline void *cfs_page_address(cfs_page_t *page)
{
/*
#if BITS_PER_LONG == 32
/* limit to lowmem on 32-bit systems */
-#define CFS_NUM_CACHEPAGES min(num_physpages, 1UL << (30-CFS_PAGE_SHIFT) *3/4)
+#define CFS_NUM_CACHEPAGES \
+ min(cfs_num_physpages, 1UL << (30 - CFS_PAGE_SHIFT) * 3 / 4)
#else
-#define CFS_NUM_CACHEPAGES num_physpages
+#define CFS_NUM_CACHEPAGES cfs_num_physpages
#endif
/*
extern void cfs_mem_cache_free ( cfs_mem_cache_t *, void *);
extern int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem);
-/*
- */
#define CFS_DECL_MMSPACE mm_segment_t __oldfs
-#define CFS_MMSPACE_OPEN do { __oldfs = get_fs(); set_fs(get_ds());} while(0)
+#define CFS_MMSPACE_OPEN \
+ do { __oldfs = get_fs(); set_fs(get_ds());} while(0)
#define CFS_MMSPACE_CLOSE set_fs(__oldfs)
+#define CFS_SLAB_HWCACHE_ALIGN SLAB_HWCACHE_ALIGN
+#define CFS_SLAB_KERNEL SLAB_KERNEL
+#define CFS_SLAB_NOFS SLAB_NOFS
+
+/*
+ * Shrinker
+ */
+
+#ifndef HAVE_REGISTER_SHRINKER
+/* Shrinker callback */
+typedef shrinker_t cfs_shrinker_t;
+#define cfs_set_shrinker(seeks, shrinker) set_shrinker(seeks, shrinker)
+#define cfs_remove_shrinker(shrinker) remove_shrinker(shrinker)
+#endif /* !HAVE_REGISTER_SHRINKER */
+
+/* struct shrinker */
+#define cfs_shrinker shrinker
+
+#define CFS_DEFAULT_SEEKS DEFAULT_SEEKS
#endif /* __LINUX_CFS_MEM_H__ */
#include <libcfs/linux/linux-time.h>
+#define CFS_KERN_EMERG KERN_EMERG
+#define CFS_KERN_ALERT KERN_ALERT
+#define CFS_KERN_CRIT KERN_CRIT
+#define CFS_KERN_ERR KERN_ERR
+#define CFS_KERN_WARNING KERN_WARNING
+#define CFS_KERN_NOTICE KERN_NOTICE
+#define CFS_KERN_INFO KERN_INFO
+#define CFS_KERN_DEBUG KERN_DEBUG
+
+/*
+ * CPU
+ */
+#ifdef for_each_possible_cpu
+#define cfs_for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
+#elif defined(for_each_cpu)
+#define cfs_for_each_possible_cpu(cpu) for_each_cpu(cpu)
+#endif
+
+#ifdef NR_CPUS
+#define CFS_NR_CPUS NR_CPUS
+#else
+#define CFS_NR_CPUS 1
+#endif
+
+#define cfs_set_cpus_allowed(t, mask) set_cpus_allowed(t, mask)
+/*
+ * cache
+ */
+#define CFS_L1_CACHE_ALIGN(x) L1_CACHE_ALIGN(x)
+
+/*
+ * IRQs
+ */
+#define CFS_NR_IRQS NR_IRQS
+
#define CFS_EXPORT_SYMBOL(s) EXPORT_SYMBOL(s)
/*
#define cfs_symbol_get(s) inter_module_get(s)
#define cfs_symbol_put(s) inter_module_put(s)
#define cfs_module_get() MOD_INC_USE_COUNT
-#define cfs_module_put() MOD_DEC_USE_COUNT
#else
#define cfs_symbol_register(s, p) do {} while(0)
#define cfs_symbol_unregister(s) do {} while(0)
#define cfs_symbol_get(s) symbol_get(s)
#define cfs_symbol_put(s) symbol_put(s)
#define cfs_module_get() try_module_get(THIS_MODULE)
-#define cfs_module_put() module_put(THIS_MODULE)
+#define cfs_try_module_get(m) try_module_get(m)
+#define __cfs_module_get(m) __module_get(m)
+#define cfs_module_put(m) module_put(m)
+#define cfs_module_refcount(m) module_refcount(m)
#endif
+typedef struct module cfs_module_t;
+
/*
* Proc file system APIs
*/
#define CFS_TASK_UNINT TASK_UNINTERRUPTIBLE
#define CFS_TASK_RUNNING TASK_RUNNING
-#define cfs_set_current_state(state) set_current_state(state)
+#define cfs_set_current_state(state) set_current_state(state)
+#define cfs_wait_event(wq, cond) wait_event(wq, cond)
typedef wait_queue_t cfs_waitlink_t;
typedef wait_queue_head_t cfs_waitq_t;
/* Module interfaces */
#define cfs_module(name, version, init, fini) \
-module_init(init); \
-module_exit(fini)
+ module_init(init); \
+ module_exit(fini)
+#define cfs_request_module request_module
/*
* Signal
#endif
#ifndef wait_event_interruptible_timeout /* Only for RHEL3 2.4.21 kernel */
-#define __wait_event_interruptible_timeout(wq, condition, timeout, ret) \
+#define __wait_event_interruptible_timeout(wq, condition, timeout, ret) \
do { \
int __ret = 0; \
if (!(condition)) { \
ret = 0; \
if (!(condition)) \
__wait_event_interruptible_timeout(wq, condition, \
- timeout, ret); \
+ timeout, ret); \
} while (0)
#else
#define cfs_waitq_wait_event_interruptible_timeout(wq, c, timeout, ret) \
typedef atomic_t cfs_atomic_t;
-#define cfs_atomic_read(atom) atomic_read(atom)
-#define cfs_atomic_inc(atom) atomic_inc(atom)
-#define cfs_atomic_dec(atom) atomic_dec(atom)
-#define cfs_atomic_dec_and_test(atom) atomic_dec_and_test(atom)
-#define cfs_atomic_set(atom, value) atomic_set(atom, value)
-#define cfs_atomic_add(value, atom) atomic_add(value, atom)
-#define cfs_atomic_sub(value, atom) atomic_sub(value, atom)
+#define cfs_atomic_read(atom) atomic_read(atom)
+#define cfs_atomic_inc(atom) atomic_inc(atom)
+#define cfs_atomic_inc_and_test(atom) atomic_inc_and_test(atom)
+#define cfs_atomic_inc_return(atom) atomic_inc_return(atom)
+#define cfs_atomic_inc_not_zero(atom) atomic_inc_not_zero(atom)
+#define cfs_atomic_dec(atom) atomic_dec(atom)
+#define cfs_atomic_dec_and_test(atom) atomic_dec_and_test(atom)
+#define cfs_atomic_dec_and_lock(atom, lock) atomic_dec_and_lock(atom, lock)
+#define cfs_atomic_dec_return(atom) atomic_dec_return(atom)
+#define cfs_atomic_set(atom, value) atomic_set(atom, value)
+#define cfs_atomic_add(value, atom) atomic_add(value, atom)
+#define cfs_atomic_add_return(value, atom) atomic_add_return(value, atom)
+#define cfs_atomic_sub(value, atom) atomic_sub(value, atom)
+#define cfs_atomic_sub_and_test(value, atom) atomic_sub_and_test(value, atom)
+#define cfs_atomic_sub_return(value, atom) atomic_sub_return(value, atom)
+#define CFS_ATOMIC_INIT(i) ATOMIC_INIT(i)
/*
* membar
#define cfs_in_interrupt() in_interrupt()
+/*
+ * might_sleep
+ */
+#define cfs_might_sleep() might_sleep()
+
+/*
+ * group_info
+ */
+typedef struct group_info cfs_group_info_t;
+
+#define cfs_get_group_info(group_info) get_group_info(group_info)
+#define cfs_put_group_info(group_info) put_group_info(group_info)
+#define cfs_set_current_groups(group_info) set_current_groups(group_info)
+#define cfs_groups_free(group_info) groups_free(group_info)
+#define cfs_groups_alloc(gidsetsize) groups_alloc(gidsetsize)
+
+/*
+ * Random bytes
+ */
+#define cfs_get_random_bytes(buf, nbytes) get_random_bytes(buf, nbytes)
#endif
* cfs_time_t cfs_time_current(void);
* cfs_time_t cfs_time_add (cfs_time_t, cfs_duration_t);
* cfs_duration_t cfs_time_sub (cfs_time_t, cfs_time_t);
- * int cfs_time_before (cfs_time_t, cfs_time_t);
- * int cfs_time_beforeq(cfs_time_t, cfs_time_t);
+ * int cfs_impl_time_before (cfs_time_t, cfs_time_t);
+ * int cfs_impl_time_before_eq(cfs_time_t, cfs_time_t);
*
* cfs_duration_t cfs_duration_build(int64_t);
*
#define ONE_BILLION ((u_int64_t)1000000000)
#define ONE_MILLION 1000000
+#define CFS_HZ HZ
#ifndef __KERNEL__
#error This include is only for kernel use.
typedef unsigned long cfs_time_t; /* jiffies */
typedef long cfs_duration_t;
+typedef cycles_t cfs_cycles_t;
+static inline int cfs_time_before(cfs_time_t t1, cfs_time_t t2)
+{
+ return time_before(t1, t2);
+}
+
+static inline int cfs_time_beforeq(cfs_time_t t1, cfs_time_t t2)
+{
+ return time_before_eq(t1, t2);
+}
static inline cfs_time_t cfs_time_current(void)
{
s->tv_usec = t;
#else
s->tv_sec = d / HZ;
- s->tv_usec = ((d - (cfs_duration_t)s->tv_sec * HZ) * ONE_MILLION) / HZ;
+ s->tv_usec = ((d - (cfs_duration_t)s->tv_sec * HZ) * \
+ ONE_MILLION) / HZ;
#endif
}
#define CFS_TIME_T "%lu"
#define CFS_DURATION_T "%ld"
-#define cfs_do_gettimeofday(tv) do_gettimeofday(tv)
+#define cfs_gettimeofday(tv) do_gettimeofday(tv)
#endif /* __LIBCFS_LINUX_LINUX_TIME_H__ */
/*
--- /dev/null
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/include/libcfs/user-bitops.h
+ */
+#include <linux/types.h>
+
+typedef umode_t cfs_umode_t;
#endif
#ifndef HAVE_CPU_ONLINE
-#define cpu_online(cpu) ((1<<cpu) & (cpu_online_map))
+#define cfs_cpu_online(cpu) ((1<<cpu) & (cpu_online_map))
+#else
+#define cfs_cpu_online(cpu) cpu_online(cpu)
#endif
#ifndef HAVE_CPUMASK_T
-typedef unsigned long cpumask_t;
-#define cpu_set(cpu, map) set_bit(cpu, &(map))
-#define cpus_clear(map) memset(&(map), 0, sizeof(cpumask_t))
+typedef unsigned long cfs_cpumask_t;
+#define cfs_cpu_set(cpu, map) set_bit(cpu, &(map))
+#define cpus_clear(map) memset(&(map), 0, sizeof(cfs_cpumask_t))
#endif
#ifndef __user
#endif
#ifndef __fls
-#define __fls fls
+#define __cfs_fls fls
+#else
+#define __cfs_fls __fls
#endif
#define ll_proc_dointvec(table, write, filp, buffer, lenp, ppos) \
#include <linux/list.h>
-#define CFS_LIST_HEAD_INIT(n) LIST_HEAD_INIT(n)
-#define CFS_LIST_HEAD(n) LIST_HEAD(n)
-#define CFS_INIT_LIST_HEAD(p) INIT_LIST_HEAD(p)
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-#define CFS_HLIST_HEAD_INIT HLIST_HEAD_INIT
-#define CFS_HLIST_HEAD(n) HLIST_HEAD(n)
-#define CFS_INIT_HLIST_HEAD(p) INIT_HLIST_HEAD(p)
-#define CFS_INIT_HLIST_NODE(p) INIT_HLIST_NODE(p)
-#endif
+typedef struct list_head cfs_list_t;
+
+#define __cfs_list_add(new, prev, next) __list_add(new, prev, next)
+#define cfs_list_add(new, head) list_add(new, head)
+
+#define cfs_list_add_tail(new, head) list_add_tail(new, head)
+
+#define __cfs_list_del(prev, next) __list_del(prev, next)
+#define cfs_list_del(entry) list_del(entry)
+#define cfs_list_del_init(entry) list_del_init(entry)
+
+#define cfs_list_move(list, head) list_move(list, head)
+#define cfs_list_move_tail(list, head) list_move_tail(list, head)
+
+#define cfs_list_empty(head) list_empty(head)
+#define cfs_list_empty_careful(head) list_empty_careful(head)
+
+#define __cfs_list_splice(list, head) __list_splice(list, head)
+#define cfs_list_splice(list, head) list_splice(list, head)
+
+#define cfs_list_splice_init(list, head) list_splice_init(list, head)
+
+#define cfs_list_entry(ptr, type, member) list_entry(ptr, type, member)
+#define cfs_list_for_each(pos, head) list_for_each(pos, head)
+#define cfs_list_for_each_safe(pos, n, head) list_for_each_safe(pos, n, head)
+#define cfs_list_for_each_prev(pos, head) list_for_each_prev(pos, head)
+#define cfs_list_for_each_entry(pos, head, member) \
+ list_for_each_entry(pos, head, member)
+#define cfs_list_for_each_entry_reverse(pos, head, member) \
+ list_for_each_entry_reverse(pos, head, member)
+#define cfs_list_for_each_entry_safe(pos, n, head, member) \
+ list_for_each_entry_safe(pos, n, head, member)
+#ifdef list_for_each_entry_safe_from
+#define cfs_list_for_each_entry_safe_from(pos, n, head, member) \
+ list_for_each_entry_safe_from(pos, n, head, member)
+#endif /* list_for_each_entry_safe_from */
+#define cfs_list_for_each_entry_continue(pos, head, member) \
+ list_for_each_entry_continue(pos, head, member)
+
+#define CFS_LIST_HEAD_INIT(n) LIST_HEAD_INIT(n)
+#define CFS_LIST_HEAD(n) LIST_HEAD(n)
+#define CFS_INIT_LIST_HEAD(p) INIT_LIST_HEAD(p)
+
+typedef struct hlist_head cfs_hlist_head_t;
+typedef struct hlist_node cfs_hlist_node_t;
+
+#define cfs_hlist_unhashed(h) hlist_unhashed(h)
+
+#define cfs_hlist_empty(h) hlist_empty(h)
+
+#define __cfs_hlist_del(n) __hlist_del(n)
+#define cfs_hlist_del(n) hlist_del(n)
+#define cfs_hlist_del_init(n) hlist_del_init(n)
+
+#define cfs_hlist_add_head(n, next) hlist_add_head(n, next)
+#define cfs_hlist_add_before(n, next) hlist_add_before(n, next)
+#define cfs_hlist_add_after(n, next) hlist_add_after(n, next)
+
+#define cfs_hlist_entry(ptr, type, member) hlist_entry(ptr, type, member)
+#define cfs_hlist_for_each(pos, head) hlist_for_each(pos, head)
+#define cfs_hlist_for_each_safe(pos, n, head) \
+ hlist_for_each_safe(pos, n, head)
+#define cfs_hlist_for_each_entry(tpos, pos, head, member) \
+ hlist_for_each_entry(tpos, pos, head, member)
+#define cfs_hlist_for_each_entry_continue(tpos, pos, member) \
+ hlist_for_each_entry_continue(tpos, pos, member)
+#define cfs_hlist_for_each_entry_from(tpos, pos, member) \
+ hlist_for_each_entry_from(tpos, pos, member)
+#define cfs_hlist_for_each_entry_safe(tpos, pos, n, head, member) \
+ hlist_for_each_entry_safe(tpos, pos, n, head, member)
+
+#define CFS_HLIST_HEAD_INIT HLIST_HEAD_INIT
+#define CFS_HLIST_HEAD(n) HLIST_HEAD(n)
+#define CFS_INIT_HLIST_HEAD(p) INIT_HLIST_HEAD(p)
+#define CFS_INIT_HLIST_NODE(p) INIT_HLIST_NODE(p)
#else /* !defined (__linux__) || !defined(__KERNEL__) */
#define prefetch(a) ((void)a)
-struct list_head {
- struct list_head *next, *prev;
+struct cfs_list_head {
+ struct cfs_list_head *next, *prev;
};
-typedef struct list_head list_t;
+typedef struct cfs_list_head cfs_list_t;
#define CFS_LIST_HEAD_INIT(name) { &(name), &(name) }
#define CFS_LIST_HEAD(name) \
- struct list_head name = CFS_LIST_HEAD_INIT(name)
+ cfs_list_t name = CFS_LIST_HEAD_INIT(name)
#define CFS_INIT_LIST_HEAD(ptr) do { \
(ptr)->next = (ptr); (ptr)->prev = (ptr); \
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
-static inline void __list_add(struct list_head * new,
- struct list_head * prev,
- struct list_head * next)
+static inline void __cfs_list_add(cfs_list_t * new,
+ cfs_list_t * prev,
+ cfs_list_t * next)
{
next->prev = new;
new->next = next;
* Insert a new entry after the specified head.
* This is good for implementing stacks.
*/
-static inline void list_add(struct list_head *new, struct list_head *head)
+static inline void cfs_list_add(cfs_list_t *new,
+ cfs_list_t *head)
{
- __list_add(new, head, head->next);
+ __cfs_list_add(new, head, head->next);
}
/**
* Insert a new entry before the specified head.
* This is useful for implementing queues.
*/
-static inline void list_add_tail(struct list_head *new, struct list_head *head)
+static inline void cfs_list_add_tail(cfs_list_t *new,
+ cfs_list_t *head)
{
- __list_add(new, head->prev, head);
+ __cfs_list_add(new, head->prev, head);
}
/*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
-static inline void __list_del(struct list_head * prev, struct list_head * next)
+static inline void __cfs_list_del(cfs_list_t *prev,
+ cfs_list_t *next)
{
next->prev = prev;
prev->next = next;
/**
* Remove an entry from the list it is currently in.
* \param entry the entry to remove
- * Note: list_empty(entry) does not return true after this, the entry is in an undefined state.
+ * Note: list_empty(entry) does not return true after this, the entry is in an
+ * undefined state.
*/
-static inline void list_del(struct list_head *entry)
+static inline void cfs_list_del(cfs_list_t *entry)
{
- __list_del(entry->prev, entry->next);
+ __cfs_list_del(entry->prev, entry->next);
}
/**
* Remove an entry from the list it is currently in and reinitialize it.
* \param entry the entry to remove.
*/
-static inline void list_del_init(struct list_head *entry)
+static inline void cfs_list_del_init(cfs_list_t *entry)
{
- __list_del(entry->prev, entry->next);
+ __cfs_list_del(entry->prev, entry->next);
CFS_INIT_LIST_HEAD(entry);
}
/**
- * Remove an entry from the list it is currently in and insert it at the start of another list.
+ * Remove an entry from the list it is currently in and insert it at the start
+ * of another list.
* \param list the entry to move
* \param head the list to move it to
*/
-static inline void list_move(struct list_head *list, struct list_head *head)
+static inline void cfs_list_move(cfs_list_t *list,
+ cfs_list_t *head)
{
- __list_del(list->prev, list->next);
- list_add(list, head);
+ __cfs_list_del(list->prev, list->next);
+ cfs_list_add(list, head);
}
/**
- * Remove an entry from the list it is currently in and insert it at the end of another list.
+ * Remove an entry from the list it is currently in and insert it at the end of
+ * another list.
* \param list the entry to move
* \param head the list to move it to
*/
-static inline void list_move_tail(struct list_head *list,
- struct list_head *head)
+static inline void cfs_list_move_tail(cfs_list_t *list,
+ cfs_list_t *head)
{
- __list_del(list->prev, list->next);
- list_add_tail(list, head);
+ __cfs_list_del(list->prev, list->next);
+ cfs_list_add_tail(list, head);
}
/**
* Test whether a list is empty
* \param head the list to test.
*/
-static inline int list_empty(struct list_head *head)
+static inline int cfs_list_empty(cfs_list_t *head)
{
return head->next == head;
}
* Tests whether a list is empty _and_ checks that no other CPU might be
* in the process of modifying either member (next or prev)
*
- * NOTE: using list_empty_careful() without synchronization
+ * NOTE: using cfs_list_empty_careful() without synchronization
* can only be safe if the only activity that can happen
- * to the list entry is list_del_init(). Eg. it cannot be used
+ * to the list entry is cfs_list_del_init(). Eg. it cannot be used
* if another CPU could re-list_add() it.
*/
-static inline int list_empty_careful(const struct list_head *head)
+static inline int cfs_list_empty_careful(const cfs_list_t *head)
{
- struct list_head *next = head->next;
+ cfs_list_t *next = head->next;
return (next == head) && (next == head->prev);
}
-static inline void __list_splice(struct list_head *list,
- struct list_head *head)
+static inline void __cfs_list_splice(cfs_list_t *list,
+ cfs_list_t *head)
{
- struct list_head *first = list->next;
- struct list_head *last = list->prev;
- struct list_head *at = head->next;
+ cfs_list_t *first = list->next;
+ cfs_list_t *last = list->prev;
+ cfs_list_t *at = head->next;
first->prev = head;
head->next = first;
* The contents of \a list are added at the start of \a head. \a list is in an
* undefined state on return.
*/
-static inline void list_splice(struct list_head *list, struct list_head *head)
+static inline void cfs_list_splice(cfs_list_t *list,
+ cfs_list_t *head)
{
- if (!list_empty(list))
- __list_splice(list, head);
+ if (!cfs_list_empty(list))
+ __cfs_list_splice(list, head);
}
/**
* The contents of \a list are added at the start of \a head. \a list is empty
* on return.
*/
-static inline void list_splice_init(struct list_head *list,
- struct list_head *head)
+static inline void cfs_list_splice_init(cfs_list_t *list,
+ cfs_list_t *head)
{
- if (!list_empty(list)) {
- __list_splice(list, head);
+ if (!cfs_list_empty(list)) {
+ __cfs_list_splice(list, head);
CFS_INIT_LIST_HEAD(list);
}
}
/**
- * Get the container of a list
+ * Get the container of a list
* \param ptr the embedded list.
* \param type the type of the struct this is embedded in.
* \param member the member name of the list within the struct.
*/
-#define list_entry(ptr, type, member) \
+#define cfs_list_entry(ptr, type, member) \
((type *)((char *)(ptr)-(char *)(&((type *)0)->member)))
/**
* Iterate over a list
* \param pos the iterator
* \param head the list to iterate over
- *
+ *
* Behaviour is undefined if \a pos is removed from the list in the body of the
* loop.
*/
-#define list_for_each(pos, head) \
+#define cfs_list_for_each(pos, head) \
for (pos = (head)->next, prefetch(pos->next); pos != (head); \
pos = pos->next, prefetch(pos->next))
/**
- * iterate over a list safely
+ * Iterate over a list safely
* \param pos the iterator
* \param n temporary storage
* \param head the list to iterate over
* This is safe to use if \a pos could be removed from the list in the body of
* the loop.
*/
-#define list_for_each_safe(pos, n, head) \
+#define cfs_list_for_each_safe(pos, n, head) \
for (pos = (head)->next, n = pos->next; pos != (head); \
pos = n, n = pos->next)
/**
+ * Iterate over a list continuing after existing point
+ * \param pos the type * to use as a loop counter
+ * \param head the list head
+ * \param member the name of the list_struct within the struct
+ */
+#define cfs_list_for_each_entry_continue(pos, head, member) \
+ for (pos = cfs_list_entry(pos->member.next, typeof(*pos), member); \
+ prefetch(pos->member.next), &pos->member != (head); \
+ pos = cfs_list_entry(pos->member.next, typeof(*pos), member))
+
+/**
* \defgroup hlist Hash List
* Double linked lists with a single pointer list head.
* Mostly useful for hash tables where the two pointer list head is too
* @{
*/
-struct hlist_head {
- struct hlist_node *first;
-};
+typedef struct cfs_hlist_node {
+ struct cfs_hlist_node *next, **pprev;
+} cfs_hlist_node_t;
-struct hlist_node {
- struct hlist_node *next, **pprev;
-};
+typedef struct cfs_hlist_head {
+ cfs_hlist_node_t *first;
+} cfs_hlist_head_t;
/* @} */
*/
#define CFS_HLIST_HEAD_INIT { NULL_P }
-#define CFS_HLIST_HEAD(name) struct hlist_head name = { NULL_P }
+#define CFS_HLIST_HEAD(name) cfs_hlist_head_t name = { NULL_P }
#define CFS_INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL_P)
#define CFS_INIT_HLIST_NODE(ptr) ((ptr)->next = NULL_P, (ptr)->pprev = NULL_P)
-#define HLIST_HEAD_INIT CFS_HLIST_HEAD_INIT
-#define HLIST_HEAD(n) CFS_HLIST_HEAD(n)
-#define INIT_HLIST_HEAD(p) CFS_INIT_HLIST_HEAD(p)
-#define INIT_HLIST_NODE(p) CFS_INIT_HLIST_NODE(p)
-
-static inline int hlist_unhashed(const struct hlist_node *h)
+static inline int cfs_hlist_unhashed(const cfs_hlist_node_t *h)
{
return !h->pprev;
}
-static inline int hlist_empty(const struct hlist_head *h)
+static inline int cfs_hlist_empty(const cfs_hlist_head_t *h)
{
return !h->first;
}
-static inline void __hlist_del(struct hlist_node *n)
+static inline void __cfs_hlist_del(cfs_hlist_node_t *n)
{
- struct hlist_node *next = n->next;
- struct hlist_node **pprev = n->pprev;
+ cfs_hlist_node_t *next = n->next;
+ cfs_hlist_node_t **pprev = n->pprev;
*pprev = next;
if (next)
next->pprev = pprev;
}
-static inline void hlist_del(struct hlist_node *n)
+static inline void cfs_hlist_del(cfs_hlist_node_t *n)
{
- __hlist_del(n);
+ __cfs_hlist_del(n);
}
-static inline void hlist_del_init(struct hlist_node *n)
+static inline void cfs_hlist_del_init(cfs_hlist_node_t *n)
{
if (n->pprev) {
- __hlist_del(n);
- INIT_HLIST_NODE(n);
+ __cfs_hlist_del(n);
+ CFS_INIT_HLIST_NODE(n);
}
}
-static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+static inline void cfs_hlist_add_head(cfs_hlist_node_t *n,
+ cfs_hlist_head_t *h)
{
- struct hlist_node *first = h->first;
+ cfs_hlist_node_t *first = h->first;
n->next = first;
if (first)
first->pprev = &n->next;
}
/* next must be != NULL */
-static inline void hlist_add_before(struct hlist_node *n,
- struct hlist_node *next)
+static inline void cfs_hlist_add_before(cfs_hlist_node_t *n,
+ cfs_hlist_node_t *next)
{
n->pprev = next->pprev;
n->next = next;
*(n->pprev) = n;
}
-static inline void hlist_add_after(struct hlist_node *n,
- struct hlist_node *next)
+static inline void cfs_hlist_add_after(cfs_hlist_node_t *n,
+ cfs_hlist_node_t *next)
{
next->next = n->next;
n->next = next;
next->next->pprev = &next->next;
}
-#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+#define cfs_hlist_entry(ptr, type, member) container_of(ptr,type,member)
-#define hlist_for_each(pos, head) \
+#define cfs_hlist_for_each(pos, head) \
for (pos = (head)->first; pos && (prefetch(pos->next), 1); \
pos = pos->next)
-#define hlist_for_each_safe(pos, n, head) \
+#define cfs_hlist_for_each_safe(pos, n, head) \
for (pos = (head)->first; pos && (n = pos->next, 1); \
pos = n)
* \param head the head for your list.
* \param member the name of the hlist_node within the struct.
*/
-#define hlist_for_each_entry(tpos, pos, head, member) \
- for (pos = (head)->first; \
- pos && ({ prefetch(pos->next); 1;}) && \
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+#define cfs_hlist_for_each_entry(tpos, pos, head, member) \
+ for (pos = (head)->first; \
+ pos && ({ prefetch(pos->next); 1;}) && \
+ ({ tpos = cfs_hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
/**
* \param pos the &struct hlist_node to use as a loop counter.
* \param member the name of the hlist_node within the struct.
*/
-#define hlist_for_each_entry_continue(tpos, pos, member) \
- for (pos = (pos)->next; \
- pos && ({ prefetch(pos->next); 1;}) && \
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+#define cfs_hlist_for_each_entry_continue(tpos, pos, member) \
+ for (pos = (pos)->next; \
+ pos && ({ prefetch(pos->next); 1;}) && \
+ ({ tpos = cfs_hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
/**
* \param pos the &struct hlist_node to use as a loop counter.
* \param member the name of the hlist_node within the struct.
*/
-#define hlist_for_each_entry_from(tpos, pos, member) \
- for (; pos && ({ prefetch(pos->next); 1;}) && \
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+#define cfs_hlist_for_each_entry_from(tpos, pos, member) \
+ for (; pos && ({ prefetch(pos->next); 1;}) && \
+ ({ tpos = cfs_hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
/**
* \param head the head for your list.
* \param member the name of the hlist_node within the struct.
*/
-#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
- for (pos = (head)->first; \
- pos && ({ n = pos->next; 1; }) && \
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+#define cfs_hlist_for_each_entry_safe(tpos, pos, n, head, member) \
+ for (pos = (head)->first; \
+ pos && ({ n = pos->next; 1; }) && \
+ ({ tpos = cfs_hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = n)
/* @} */
#endif /* __linux__ && __KERNEL__ */
-#ifndef list_for_each_prev
+#ifndef cfs_list_for_each_prev
/**
* Iterate over a list in reverse order
* \param pos the &struct list_head to use as a loop counter.
* \param head the head for your list.
*/
-#define list_for_each_prev(pos, head) \
+#define cfs_list_for_each_prev(pos, head) \
for (pos = (head)->prev, prefetch(pos->prev); pos != (head); \
pos = pos->prev, prefetch(pos->prev))
-#endif /* list_for_each_prev */
+#endif /* cfs_list_for_each_prev */
-#ifndef list_for_each_entry
+#ifndef cfs_list_for_each_entry
/**
* Iterate over a list of given type
* \param pos the type * to use as a loop counter.
* \param head the head for your list.
* \param member the name of the list_struct within the struct.
*/
-#define list_for_each_entry(pos, head, member) \
- for (pos = list_entry((head)->next, typeof(*pos), member), \
- prefetch(pos->member.next); \
- &pos->member != (head); \
- pos = list_entry(pos->member.next, typeof(*pos), member), \
+#define cfs_list_for_each_entry(pos, head, member) \
+ for (pos = cfs_list_entry((head)->next, typeof(*pos), member), \
+ prefetch(pos->member.next); \
+ &pos->member != (head); \
+ pos = cfs_list_entry(pos->member.next, typeof(*pos), member), \
prefetch(pos->member.next))
-#endif /* list_for_each_entry */
+#endif /* cfs_list_for_each_entry */
-#ifndef list_for_each_entry_rcu
-#define list_for_each_entry_rcu(pos, head, member) \
- list_for_each_entry(pos, head, member)
-#endif
-
-#ifndef list_for_each_entry_reverse
+#ifndef cfs_list_for_each_entry_reverse
/**
* Iterate backwards over a list of given type.
* \param pos the type * to use as a loop counter.
* \param head the head for your list.
* \param member the name of the list_struct within the struct.
*/
-#define list_for_each_entry_reverse(pos, head, member) \
- for (pos = list_entry((head)->prev, typeof(*pos), member); \
- prefetch(pos->member.prev), &pos->member != (head); \
- pos = list_entry(pos->member.prev, typeof(*pos), member))
-#endif /* list_for_each_entry_reverse */
+#define cfs_list_for_each_entry_reverse(pos, head, member) \
+ for (pos = cfs_list_entry((head)->prev, typeof(*pos), member); \
+ prefetch(pos->member.prev), &pos->member != (head); \
+ pos = cfs_list_entry(pos->member.prev, typeof(*pos), member))
+#endif /* cfs_list_for_each_entry_reverse */
-#ifndef list_for_each_entry_safe
+#ifndef cfs_list_for_each_entry_safe
/**
* Iterate over a list of given type safe against removal of list entry
* \param pos the type * to use as a loop counter.
* \param head the head for your list.
* \param member the name of the list_struct within the struct.
*/
-#define list_for_each_entry_safe(pos, n, head, member) \
- for (pos = list_entry((head)->next, typeof(*pos), member), \
- n = list_entry(pos->member.next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = list_entry(n->member.next, typeof(*n), member))
+#define cfs_list_for_each_entry_safe(pos, n, head, member) \
+ for (pos = cfs_list_entry((head)->next, typeof(*pos), member), \
+ n = cfs_list_entry(pos->member.next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = n, n = cfs_list_entry(n->member.next, typeof(*n), member))
-#endif /* list_for_each_entry_safe */
+#endif /* cfs_list_for_each_entry_safe */
-#ifndef list_for_each_entry_safe_from
+#ifndef cfs_list_for_each_entry_safe_from
/**
* Iterate over a list continuing from an existing point
* \param pos the type * to use as a loop cursor.
* Iterate over list of given type from current point, safe against
* removal of list entry.
*/
-#define list_for_each_entry_safe_from(pos, n, head, member) \
- for (n = list_entry(pos->member.next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = list_entry(n->member.next, typeof(*n), member))
-#endif /* list_for_each_entry_safe_from */
+#define cfs_list_for_each_entry_safe_from(pos, n, head, member) \
+ for (n = cfs_list_entry(pos->member.next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = n, n = cfs_list_entry(n->member.next, typeof(*n), member))
+#endif /* cfs_list_for_each_entry_safe_from */
#define cfs_list_for_each_entry_typed(pos, head, type, member) \
- for (pos = list_entry((head)->next, type, member), \
- prefetch(pos->member.next); \
- &pos->member != (head); \
- pos = list_entry(pos->member.next, type, member), \
+ for (pos = cfs_list_entry((head)->next, type, member), \
+ prefetch(pos->member.next); \
+ &pos->member != (head); \
+ pos = cfs_list_entry(pos->member.next, type, member), \
prefetch(pos->member.next))
#define cfs_list_for_each_entry_reverse_typed(pos, head, type, member) \
- for (pos = list_entry((head)->prev, type, member); \
+ for (pos = cfs_list_entry((head)->prev, type, member); \
prefetch(pos->member.prev), &pos->member != (head); \
- pos = list_entry(pos->member.prev, type, member))
+ pos = cfs_list_entry(pos->member.prev, type, member))
#define cfs_list_for_each_entry_safe_typed(pos, n, head, type, member) \
- for (pos = list_entry((head)->next, type, member), \
- n = list_entry(pos->member.next, type, member); \
- &pos->member != (head); \
- pos = n, n = list_entry(n->member.next, type, member))
-
-#define cfs_list_for_each_entry_safe_from_typed(pos, n, head, type, member) \
- for (n = list_entry(pos->member.next, type, member); \
- &pos->member != (head); \
- pos = n, n = list_entry(n->member.next, type, member))
+ for (pos = cfs_list_entry((head)->next, type, member), \
+ n = cfs_list_entry(pos->member.next, type, member); \
+ &pos->member != (head); \
+ pos = n, n = cfs_list_entry(n->member.next, type, member))
+
+#define cfs_list_for_each_entry_safe_from_typed(pos, n, head, type, member) \
+ for (n = cfs_list_entry(pos->member.next, type, member); \
+ &pos->member != (head); \
+ pos = n, n = cfs_list_entry(n->member.next, type, member))
+
#define cfs_hlist_for_each_entry_typed(tpos, pos, head, type, member) \
for (pos = (head)->first; \
pos && (prefetch(pos->next), 1) && \
- (tpos = hlist_entry(pos, type, member), 1); \
+ (tpos = cfs_hlist_entry(pos, type, member), 1); \
pos = pos->next)
-#define cfs_hlist_for_each_entry_safe_typed(tpos, pos, n, head, type, member)\
- for (pos = (head)->first; \
- pos && (n = pos->next, 1) && \
- (tpos = hlist_entry(pos, type, member), 1); \
+#define cfs_hlist_for_each_entry_safe_typed(tpos, pos, n, head, type, member) \
+ for (pos = (head)->first; \
+ pos && (n = pos->next, 1) && \
+ (tpos = cfs_hlist_entry(pos, type, member), 1); \
pos = n)
#endif /* __LIBCFS_LUSTRE_LIST_H__ */
#include <libcfs/user-bitops.h>
#include <libcfs/posix/posix-kernelcomm.h>
-# define do_gettimeofday(tv) gettimeofday(tv, NULL);
-typedef unsigned long long cycles_t;
+# define cfs_gettimeofday(tv) gettimeofday(tv, NULL);
+typedef unsigned long long cfs_cycles_t;
#define IS_ERR(a) ((unsigned long)(a) > (unsigned long)-1000L)
#define PTR_ERR(a) ((long)(a))
# ifndef THREAD_SIZE /* x86_64 linux has THREAD_SIZE in userspace */
-# define THREAD_SIZE 8192
+# define CFS_THREAD_SIZE 8192
+# else
+# define CFS_THREAD_SIZE THREAD_SIZE
# endif
-#define LUSTRE_TRACE_SIZE (THREAD_SIZE >> 5)
+#define LUSTRE_TRACE_SIZE (CFS_THREAD_SIZE >> 5)
-#define CHECK_STACK() do { } while(0)
+#define CFS_CHECK_STACK() do { } while(0)
#define CDEBUG_STACK() (0L)
/* initial pid */
/**
* Module support (probably shouldn't be used in generic code?)
*/
-struct module {
+typedef struct cfs_module {
int count;
char *name;
-};
+} cfs_module_t;
static inline void MODULE_AUTHOR(char *name)
{
#define __init
#define __exit
-static inline int request_module(const char *name, ...)
+static inline int cfs_request_module(const char *name, ...)
{
return (-EINVAL);
}
-static inline void __module_get(struct module *module)
+static inline void __cfs_module_get(cfs_module_t *module)
{
}
-static inline int try_module_get(struct module *module)
+static inline int cfs_try_module_get(cfs_module_t *module)
{
return 1;
}
-static inline void module_put(struct module *module)
+static inline void cfs_module_put(cfs_module_t *module)
{
}
-static inline int module_refcount(struct module *m)
+static inline int cfs_module_refcount(cfs_module_t *m)
{
return 1;
}
*
***************************************************************************/
-struct shrinker {
+struct cfs_shrinker {
;
};
-#define DEFAULT_SEEKS (0)
+#define CFS_DEFAULT_SEEKS (0)
-typedef int (*shrinker_t)(int, unsigned int);
+typedef int (*cfs_shrinker_t)(int, unsigned int);
-static inline struct shrinker *set_shrinker(int seeks, shrinker_t shrinkert)
+static inline
+struct cfs_shrinker *cfs_set_shrinker(int seeks, cfs_shrinker_t shrink)
{
- return (struct shrinker *)0xdeadbea1; // Cannot return NULL here
+ return (struct cfs_shrinker *)0xdeadbea1; // Cannot return NULL here
}
-static inline void remove_shrinker(struct shrinker *shrinker)
+static inline void cfs_remove_shrinker(struct cfs_shrinker *shrinker)
{
}
***************************************************************************/
struct radix_tree_root {
- struct list_head list;
+ cfs_list_t list;
void *rnode;
};
struct radix_tree_node {
- struct list_head _node;
+ cfs_list_t _node;
unsigned long index;
void *item;
};
CFS_INIT_LIST_HEAD(&node->_node);
node->index = idx;
node->item = item;
- list_add_tail(&node->_node, &root->list);
+ cfs_list_add_tail(&node->_node, &root->list);
root->rnode = (void *)1001;
return 0;
}
{
struct radix_tree_node *node;
- if (list_empty(&root->list))
+ if (cfs_list_empty(&root->list))
return NULL;
cfs_list_for_each_entry_typed(node, &root->list,
if (p == NULL)
return NULL;
- list_del_init(&p->_node);
+ cfs_list_del_init(&p->_node);
item = p->item;
free(p);
- if (list_empty(&root->list))
+ if (cfs_list_empty(&root->list))
root->rnode = NULL;
return item;
#include <asm/types.h>
#ifndef HAVE_UMODE_T
-typedef unsigned short umode_t;
+typedef unsigned short cfs_umode_t;
+#else
+typedef umode_t cfs_umode_t;
#endif
/*
#define __LIBCFS_USER_BITOPS_H__
/* test if bit nr is set in bitmap addr; returns previous value of bit nr */
-static __inline__ int test_and_set_bit(int nr, unsigned long *addr)
+static __inline__ int cfs_test_and_set_bit(int nr, unsigned long *addr)
{
unsigned long mask;
return nr;
}
-#define set_bit(n, a) test_and_set_bit(n, a)
+#define cfs_set_bit(n, a) cfs_test_and_set_bit(n, a)
/* clear bit nr in bitmap addr; returns previous value of bit nr*/
-static __inline__ int test_and_clear_bit(int nr, unsigned long *addr)
+static __inline__ int cfs_test_and_clear_bit(int nr, unsigned long *addr)
{
unsigned long mask;
return nr;
}
-#define clear_bit(n, a) test_and_clear_bit(n, a)
+#define cfs_clear_bit(n, a) cfs_test_and_clear_bit(n, a)
-static __inline__ int test_bit(int nr, const unsigned long *addr)
+static __inline__ int cfs_test_bit(int nr, const unsigned long *addr)
{
return ((1UL << (nr & (BITS_PER_LONG - 1))) &
((addr)[nr / BITS_PER_LONG])) != 0;
}
/* using binary seach */
-static __inline__ unsigned long __fls(long data)
+static __inline__ unsigned long __cfs_fls(long data)
{
int pos = 32;
return pos;
}
-static __inline__ unsigned long __ffs(long data)
+static __inline__ unsigned long __cfs_ffs(long data)
{
int pos = 0;
return pos;
}
-#define __ffz(x) __ffs(~(x))
-#define __flz(x) __fls(~(x))
+#define __cfs_ffz(x) __cfs_ffs(~(x))
+#define __cfs_flz(x) __cfs_fls(~(x))
-unsigned long find_next_bit(unsigned long *addr,
- unsigned long size, unsigned long offset);
+unsigned long cfs_find_next_bit(unsigned long *addr,
+ unsigned long size, unsigned long offset);
-unsigned long find_next_zero_bit(unsigned long *addr,
- unsigned long size, unsigned long offset);
+unsigned long cfs_find_next_zero_bit(unsigned long *addr,
+ unsigned long size, unsigned long offset);
-#define find_first_bit(addr,size) (find_next_bit((addr),(size),0))
-#define find_first_zero_bit(addr,size) (find_next_zero_bit((addr),(size),0))
+#define cfs_find_first_bit(addr,size) (cfs_find_next_bit((addr),(size),0))
+#define cfs_find_first_zero_bit(addr,size) \
+ (cfs_find_next_zero_bit((addr),(size),0))
#endif
*/
/*
- * spin_lock
+ * cfs_spin_lock
*
- * - spin_lock_init(x)
- * - spin_lock(x)
- * - spin_unlock(x)
- * - spin_trylock(x)
+ * - cfs_spin_lock_init(x)
+ * - cfs_spin_lock(x)
+ * - cfs_spin_unlock(x)
+ * - cfs_spin_trylock(x)
+ * - cfs_spin_lock_bh_init(x)
+ * - cfs_spin_lock_bh(x)
+ * - cfs_spin_unlock_bh(x)
*
- * - spin_lock_irqsave(x, f)
- * - spin_unlock_irqrestore(x, f)
+ * - cfs_spin_is_locked(x)
+ * - cfs_spin_lock_irqsave(x, f)
+ * - cfs_spin_unlock_irqrestore(x, f)
*
* No-op implementation.
*/
-struct spin_lock {int foo;};
+struct cfs_spin_lock {int foo;};
-typedef struct spin_lock spinlock_t;
+typedef struct cfs_spin_lock cfs_spinlock_t;
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { }
+#define CFS_SPIN_LOCK_UNLOCKED (cfs_spinlock_t) { }
#define LASSERT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
#define LINVRNT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
#define LASSERT_SEM_LOCKED(sem) do {(void)sizeof(sem);} while(0)
-void spin_lock_init(spinlock_t *lock);
-void spin_lock(spinlock_t *lock);
-void spin_unlock(spinlock_t *lock);
-int spin_trylock(spinlock_t *lock);
-void spin_lock_bh_init(spinlock_t *lock);
-void spin_lock_bh(spinlock_t *lock);
-void spin_unlock_bh(spinlock_t *lock);
+void cfs_spin_lock_init(cfs_spinlock_t *lock);
+void cfs_spin_lock(cfs_spinlock_t *lock);
+void cfs_spin_unlock(cfs_spinlock_t *lock);
+int cfs_spin_trylock(cfs_spinlock_t *lock);
+void cfs_spin_lock_bh_init(cfs_spinlock_t *lock);
+void cfs_spin_lock_bh(cfs_spinlock_t *lock);
+void cfs_spin_unlock_bh(cfs_spinlock_t *lock);
-static inline int spin_is_locked(spinlock_t *l) {return 1;}
-static inline void spin_lock_irqsave(spinlock_t *l, unsigned long f){}
-static inline void spin_unlock_irqrestore(spinlock_t *l, unsigned long f){}
+static inline int cfs_spin_is_locked(cfs_spinlock_t *l) {return 1;}
+static inline void cfs_spin_lock_irqsave(cfs_spinlock_t *l, unsigned long f){}
+static inline void cfs_spin_unlock_irqrestore(cfs_spinlock_t *l,
+ unsigned long f){}
/*
* Semaphore
*
- * - sema_init(x, v)
+ * - cfs_sema_init(x, v)
* - __down(x)
* - __up(x)
*/
-typedef struct semaphore {
+typedef struct cfs_semaphore {
int foo;
-} mutex_t;
+} cfs_semaphore_t;
-void sema_init(struct semaphore *s, int val);
-void __down(struct semaphore *s);
-void __up(struct semaphore *s);
-
-/*
- * Mutex:
- *
- * - init_mutex(x)
- * - init_mutex_locked(x)
- * - mutex_up(x)
- * - mutex_down(x)
- */
-#define DECLARE_MUTEX(name) \
- struct semaphore name = { 1 }
-
-#define mutex_up(s) __up(s)
-#define up(s) mutex_up(s)
-#define mutex_down(s) __down(s)
-#define down(s) mutex_down(s)
-
-#define init_MUTEX(x) sema_init(x, 1)
-#define init_MUTEX_LOCKED(x) sema_init(x, 0)
-#define init_mutex(s) init_MUTEX(s)
+void cfs_sema_init(cfs_semaphore_t *s, int val);
+void __down(cfs_semaphore_t *s);
+void __up(cfs_semaphore_t *s);
/*
* Completion:
*
- * - init_completion(c)
- * - complete(c)
- * - wait_for_completion(c)
+ * - cfs_init_completion_module(c)
+ * - cfs_call_wait_handler(t)
+ * - cfs_init_completion(c)
+ * - cfs_complete(c)
+ * - cfs_wait_for_completion(c)
+ * - cfs_wait_for_completion_interruptible(c)
*/
-struct completion {
+typedef struct {
unsigned int done;
cfs_waitq_t wait;
-};
+} cfs_completion_t;
+
typedef int (*cfs_wait_handler_t) (int timeout);
-void init_completion_module(cfs_wait_handler_t handler);
-int call_wait_handler(int timeout);
-void init_completion(struct completion *c);
-void complete(struct completion *c);
-void wait_for_completion(struct completion *c);
-int wait_for_completion_interruptible(struct completion *c);
-
-#define COMPLETION_INITIALIZER(work) \
+void cfs_init_completion_module(cfs_wait_handler_t handler);
+int cfs_call_wait_handler(int timeout);
+void cfs_init_completion(cfs_completion_t *c);
+void cfs_complete(cfs_completion_t *c);
+void cfs_wait_for_completion(cfs_completion_t *c);
+int cfs_wait_for_completion_interruptible(cfs_completion_t *c);
+
+#define CFS_COMPLETION_INITIALIZER(work) \
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
-#define DECLARE_COMPLETION(work) \
- struct completion work = COMPLETION_INITIALIZER(work)
+#define CFS_DECLARE_COMPLETION(work) \
+ cfs_completion_t work = CFS_COMPLETION_INITIALIZER(work)
-#define INIT_COMPLETION(x) ((x).done = 0)
+#define CFS_INIT_COMPLETION(x) ((x).done = 0)
/*
- * rw_semaphore:
+ * cfs_rw_semaphore:
*
- * - init_rwsem(x)
- * - down_read(x)
- * - up_read(x)
- * - down_write(x)
- * - up_write(x)
+ * - cfs_init_rwsem(x)
+ * - cfs_down_read(x)
+ * - cfs_down_read_trylock(x)
+ * - cfs_down_write(struct cfs_rw_semaphore *s);
+ * - cfs_down_write_trylock(struct cfs_rw_semaphore *s);
+ * - cfs_up_read(x)
+ * - cfs_up_write(x)
+ * - cfs_fini_rwsem(x)
*/
-struct rw_semaphore {
+typedef struct cfs_rw_semaphore {
int foo;
-};
+} cfs_rw_semaphore_t;
-void init_rwsem(struct rw_semaphore *s);
-void down_read(struct rw_semaphore *s);
-int down_read_trylock(struct rw_semaphore *s);
-void down_write(struct rw_semaphore *s);
-int down_write_trylock(struct rw_semaphore *s);
-void up_read(struct rw_semaphore *s);
-void up_write(struct rw_semaphore *s);
-void fini_rwsem(struct rw_semaphore *s);
+void cfs_init_rwsem(cfs_rw_semaphore_t *s);
+void cfs_down_read(cfs_rw_semaphore_t *s);
+int cfs_down_read_trylock(cfs_rw_semaphore_t *s);
+void cfs_down_write(cfs_rw_semaphore_t *s);
+int cfs_down_write_trylock(cfs_rw_semaphore_t *s);
+void cfs_up_read(cfs_rw_semaphore_t *s);
+void cfs_up_write(cfs_rw_semaphore_t *s);
+void cfs_fini_rwsem(cfs_rw_semaphore_t *s);
/*
* read-write lock : Need to be investigated more!!
* XXX nikita: for now, let rwlock_t to be identical to rw_semaphore
*
- * - DECLARE_RWLOCK(l)
- * - rwlock_init(x)
- * - read_lock(x)
- * - read_unlock(x)
- * - write_lock(x)
- * - write_unlock(x)
+ * - cfs_rwlock_init(x)
+ * - cfs_read_lock(x)
+ * - cfs_read_unlock(x)
+ * - cfs_write_lock(x)
+ * - cfs_write_unlock(x)
+ * - cfs_write_lock_irqsave(x)
+ * - cfs_write_unlock_irqrestore(x)
+ * - cfs_read_lock_irqsave(x)
+ * - cfs_read_unlock_irqrestore(x)
*/
-typedef struct rw_semaphore rwlock_t;
-#define RW_LOCK_UNLOCKED (rwlock_t) { }
+typedef cfs_rw_semaphore_t cfs_rwlock_t;
+#define CFS_RW_LOCK_UNLOCKED (cfs_rwlock_t) { }
-#define rwlock_init(pl) init_rwsem(pl)
+#define cfs_rwlock_init(pl) cfs_init_rwsem(pl)
-#define read_lock(l) down_read(l)
-#define read_unlock(l) up_read(l)
-#define write_lock(l) down_write(l)
-#define write_unlock(l) up_write(l)
+#define cfs_read_lock(l) cfs_down_read(l)
+#define cfs_read_unlock(l) cfs_up_read(l)
+#define cfs_write_lock(l) cfs_down_write(l)
+#define cfs_write_unlock(l) cfs_up_write(l)
static inline void
-write_lock_irqsave(rwlock_t *l, unsigned long f) { write_lock(l); }
+cfs_write_lock_irqsave(cfs_rwlock_t *l, unsigned long f) { cfs_write_lock(l); }
static inline void
-write_unlock_irqrestore(rwlock_t *l, unsigned long f) { write_unlock(l); }
+cfs_write_unlock_irqrestore(cfs_rwlock_t *l, unsigned long f) { cfs_write_unlock(l); }
static inline void
-read_lock_irqsave(rwlock_t *l, unsigned long f) { read_lock(l); }
+cfs_read_lock_irqsave(cfs_rwlock_t *l, unsigned long f) { cfs_read_lock(l); }
static inline void
-read_unlock_irqrestore(rwlock_t *l, unsigned long f) { read_unlock(l); }
+cfs_read_unlock_irqrestore(cfs_rwlock_t *l, unsigned long f) { cfs_read_unlock(l); }
/*
- * Atomic for user-space
- * Copied from liblustre
+ * Atomic for single-threaded user-space
*/
-typedef struct { volatile int counter; } atomic_t;
-
-#define ATOMIC_INIT(i) { (i) }
-
-#define atomic_read(a) ((a)->counter)
-#define atomic_set(a,b) do {(a)->counter = b; } while (0)
-#define atomic_dec_and_test(a) ((--((a)->counter)) == 0)
-#define atomic_dec_and_lock(a,b) ((--((a)->counter)) == 0)
-#define atomic_inc(a) (((a)->counter)++)
-#define atomic_dec(a) do { (a)->counter--; } while (0)
-#define atomic_add(b,a) do {(a)->counter += b;} while (0)
-#define atomic_add_return(n,a) ((a)->counter += n)
-#define atomic_inc_return(a) atomic_add_return(1,a)
-#define atomic_sub(b,a) do {(a)->counter -= b;} while (0)
-#define atomic_sub_return(n,a) ((a)->counter -= n)
-#define atomic_dec_return(a) atomic_sub_return(1,a)
-#define atomic_add_unless(v, a, u) ((v)->counter != u ? (v)->counter += a : 0)
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+typedef struct { volatile int counter; } cfs_atomic_t;
+#define CFS_ATOMIC_INIT(i) { (i) }
+
+#define cfs_atomic_read(a) ((a)->counter)
+#define cfs_atomic_set(a,b) do {(a)->counter = b; } while (0)
+#define cfs_atomic_dec_and_test(a) ((--((a)->counter)) == 0)
+#define cfs_atomic_dec_and_lock(a,b) ((--((a)->counter)) == 0)
+#define cfs_atomic_inc(a) (((a)->counter)++)
+#define cfs_atomic_dec(a) do { (a)->counter--; } while (0)
+#define cfs_atomic_add(b,a) do {(a)->counter += b;} while (0)
+#define cfs_atomic_add_return(n,a) ((a)->counter += n)
+#define cfs_atomic_inc_return(a) cfs_atomic_add_return(1,a)
+#define cfs_atomic_sub(b,a) do {(a)->counter -= b;} while (0)
+#define cfs_atomic_sub_return(n,a) ((a)->counter -= n)
+#define cfs_atomic_dec_return(a) cfs_atomic_sub_return(1,a)
+#define cfs_atomic_add_unless(v, a, u) \
+ ((v)->counter != u ? (v)->counter += a : 0)
+#define cfs_atomic_inc_not_zero(v) cfs_atomic_add_unless((v), 1, 0)
#ifdef HAVE_LIBPTHREAD
#include <pthread.h>
/*
- * Completion
+ * Multi-threaded user space completion APIs
*/
-struct cfs_completion {
+typedef struct {
int c_done;
pthread_cond_t c_cond;
pthread_mutex_t c_mut;
-};
+} cfs_mt_completion_t;
-void cfs_init_completion(struct cfs_completion *c);
-void cfs_fini_completion(struct cfs_completion *c);
-void cfs_complete(struct cfs_completion *c);
-void cfs_wait_for_completion(struct cfs_completion *c);
+void cfs_mt_init_completion(cfs_mt_completion_t *c);
+void cfs_mt_fini_completion(cfs_mt_completion_t *c);
+void cfs_mt_complete(cfs_mt_completion_t *c);
+void cfs_mt_wait_for_completion(cfs_mt_completion_t *c);
/*
- * atomic.h
+ * Multi-threaded user space atomic APIs
*/
-typedef struct { volatile int counter; } cfs_atomic_t;
+typedef struct { volatile int counter; } cfs_mt_atomic_t;
-int cfs_atomic_read(cfs_atomic_t *a);
-void cfs_atomic_set(cfs_atomic_t *a, int b);
-int cfs_atomic_dec_and_test(cfs_atomic_t *a);
-void cfs_atomic_inc(cfs_atomic_t *a);
-void cfs_atomic_dec(cfs_atomic_t *a);
-void cfs_atomic_add(int b, cfs_atomic_t *a);
-void cfs_atomic_sub(int b, cfs_atomic_t *a);
+int cfs_mt_atomic_read(cfs_mt_atomic_t *a);
+void cfs_mt_atomic_set(cfs_mt_atomic_t *a, int b);
+int cfs_mt_atomic_dec_and_test(cfs_mt_atomic_t *a);
+void cfs_mt_atomic_inc(cfs_mt_atomic_t *a);
+void cfs_mt_atomic_dec(cfs_mt_atomic_t *a);
+void cfs_mt_atomic_add(int b, cfs_mt_atomic_t *a);
+void cfs_mt_atomic_sub(int b, cfs_mt_atomic_t *a);
#endif /* HAVE_LIBPTHREAD */
* Mutex interface.
*
**************************************************************************/
+#define CFS_DECLARE_MUTEX(name) \
+ cfs_semaphore_t name = { 1 }
+
+#define cfs_mutex_up(s) __up(s)
+#define cfs_up(s) cfs_mutex_up(s)
+#define cfs_mutex_down(s) __down(s)
+#define cfs_down(s) cfs_mutex_down(s)
+
+#define cfs_init_mutex(x) cfs_sema_init(x, 1)
+#define cfs_init_mutex_locked(x) cfs_sema_init(x, 0)
-struct mutex {
- struct semaphore m_sem;
-};
+typedef struct cfs_mutex {
+ cfs_semaphore_t m_sem;
+} cfs_mutex_t;
-#define DEFINE_MUTEX(m) struct mutex m
+#define CFS_DEFINE_MUTEX(m) cfs_mutex_t m
-static inline void mutex_init(struct mutex *mutex)
+static inline void cfs_mutex_init(cfs_mutex_t *mutex)
{
- init_mutex(&mutex->m_sem);
+ cfs_init_mutex(&mutex->m_sem);
}
-static inline void mutex_lock(struct mutex *mutex)
+static inline void cfs_mutex_lock(cfs_mutex_t *mutex)
{
- mutex_down(&mutex->m_sem);
+ cfs_mutex_down(&mutex->m_sem);
}
-static inline void mutex_unlock(struct mutex *mutex)
+static inline void cfs_mutex_unlock(cfs_mutex_t *mutex)
{
- mutex_up(&mutex->m_sem);
+ cfs_mutex_up(&mutex->m_sem);
}
/**
* \retval 0 try-lock succeeded (lock acquired).
* \retval errno indicates lock contention.
*/
-static inline int mutex_down_trylock(struct mutex *mutex)
+static inline int cfs_mutex_down_trylock(cfs_mutex_t *mutex)
{
return 0;
}
* \retval 1 try-lock succeeded (lock acquired).
* \retval 0 indicates lock contention.
*/
-static inline int mutex_trylock(struct mutex *mutex)
+static inline int cfs_mutex_trylock(cfs_mutex_t *mutex)
{
- return !mutex_down_trylock(mutex);
+ return !cfs_mutex_down_trylock(mutex);
}
-static inline void mutex_destroy(struct mutex *lock)
+static inline void cfs_mutex_destroy(cfs_mutex_t *lock)
{
}
*
* \retval 0 mutex is not locked. This should never happen.
*/
-static inline int mutex_is_locked(struct mutex *lock)
+static inline int cfs_mutex_is_locked(cfs_mutex_t *lock)
{
return 1;
}
*
**************************************************************************/
-struct lock_class_key {
+typedef struct cfs_lock_class_key {
int foo;
-};
+} cfs_lock_class_key_t;
-static inline void lockdep_set_class(void *lock, struct lock_class_key *key)
+static inline void cfs_lockdep_set_class(void *lock,
+ cfs_lock_class_key_t *key)
{
}
-static inline void lockdep_off(void)
+static inline void cfs_lockdep_off(void)
{
}
-static inline void lockdep_on(void)
+static inline void cfs_lockdep_on(void)
{
}
-/* This has to be a macro, so that can be undefined in kernels that do not
- * support lockdep. */
-#define mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
-#define spin_lock_nested(lock, subclass) spin_lock(lock)
-#define down_read_nested(lock, subclass) down_read(lock)
-#define down_write_nested(lock, subclass) down_write(lock)
+#define cfs_mutex_lock_nested(mutex, subclass) cfs_mutex_lock(mutex)
+#define cfs_spin_lock_nested(lock, subclass) cfs_spin_lock(lock)
+#define cfs_down_read_nested(lock, subclass) cfs_down_read(lock)
+#define cfs_down_write_nested(lock, subclass) cfs_down_write(lock)
/* !__KERNEL__ */
typedef struct page {
void *addr;
unsigned long index;
- struct list_head list;
+ cfs_list_t list;
unsigned long private;
/* internally used by liblustre file i/o */
#ifdef LIBLUSTRE_HANDLE_UNALIGNED_PAGE
int _managed;
#endif
- struct list_head _node;
+ cfs_list_t _node;
} cfs_page_t;
int size;
} cfs_mem_cache_t;
-#define SLAB_HWCACHE_ALIGN 0
-#define SLAB_DESTROY_BY_RCU 0
-#define SLAB_KERNEL 0
-#define SLAB_NOFS 0
+#define CFS_SLAB_HWCACHE_ALIGN 0
+#define CFS_SLAB_DESTROY_BY_RCU 0
+#define CFS_SLAB_KERNEL 0
+#define CFS_SLAB_NOFS 0
cfs_mem_cache_t *
cfs_mem_cache_create(const char *, size_t, size_t, unsigned long);
/*
* Copy to/from user
*/
-static inline int copy_from_user(void *a,void *b, int c)
+static inline int cfs_copy_from_user(void *a,void *b, int c)
{
memcpy(a,b,c);
return 0;
}
-static inline int copy_to_user(void *a,void *b, int c)
+static inline int cfs_copy_to_user(void *a,void *b, int c)
{
memcpy(a,b,c);
return 0;
* Just present a single processor until will add thread support.
*/
#ifndef smp_processor_id
-#define smp_processor_id() 0
+#define cfs_smp_processor_id() 0
+#else
+#define cfs_smp_processor_id() smp_processor_id()
#endif
#ifndef num_online_cpus
-#define num_online_cpus() 1
+#define cfs_num_online_cpus() 1
+#else
+#define cfs_num_online_cpus() num_online_cpus()
#endif
#ifndef num_possible_cpus
-#define num_possible_cpus() 1
+#define cfs_num_possible_cpus() 1
+#else
+#define cfs_num_possible_cpus() num_possible_cpus()
#endif
/*
- * Wait Queue.
+ * Wait Queue.
*/
typedef struct cfs_waitlink {
- struct list_head sleeping;
+ cfs_list_t sleeping;
void *process;
} cfs_waitlink_t;
typedef struct cfs_waitq {
- struct list_head sleepers;
+ cfs_list_t sleepers;
} cfs_waitq_t;
-/* XXX: need to replace wake_up with cfs_waitq_signal() */
-#define wake_up(q) cfs_waitq_signal(q)
-
/*
* Task states
*/
*/
typedef struct {
- struct list_head tl_list;
+ cfs_list_t tl_list;
void (*function)(ulong_ptr_t unused);
ulong_ptr_t data;
long expires;
} cfs_timer_t;
-#define in_interrupt() (0)
+#define cfs_in_interrupt() (0)
typedef void cfs_psdev_t;
}
#define cfs_lock_kernel() do {} while (0)
-#define cfs_sigfillset(l) do {} while (0)
+#define cfs_sigfillset(l) do {} while (0)
#define cfs_recalc_sigpending(l) do {} while (0)
#define cfs_kernel_thread(l,m,n) LBUG()
#define cfs_kthread_run(fn,d,fmt,...) LBUG()
})
#endif
+/*
+ * Groups
+ */
+typedef struct cfs_group_info {
+
+} cfs_group_info_t;
+
#ifndef min
# define min(x,y) ((x)<(y) ? (x) : (y))
#endif
typedef time_t cfs_time_t;
typedef time_t cfs_duration_t;
-/* looks like linux */
-#define time_after(a, b) ((long)(b) - (long)(a) < 0)
-#define time_before(a, b) time_after(b,a)
-#define time_after_eq(a,b) ((long)(a) - (long)(b) >= 0)
-#define time_before_eq(a,b) time_after_eq(b,a)
+#define cfs_time_before(a, b) ((long)(a) - (long)(b) < 0)
+#define cfs_time_beforeq(a, b) ((long)(b) - (long)(a) >= 0)
static inline cfs_time_t cfs_time_current(void)
{
static inline void l_mutex_lock(l_mutex_t *mutex)
{
#if L_LOCK_DEBUG
- printf("lock mutex :%s\n", mutex->s_name);
+ printf("lock cfs_mutex :%s\n", mutex->s_name);
#endif
sem_wait(mutex->s_sem);
}
static inline void l_mutex_unlock(l_mutex_t *mutex)
{
#if L_LOCK_DEBUG
- printf("unlock mutex: %s\n", mutex->s_name);
+ printf("unlock cfs_mutex: %s\n", mutex->s_name);
#endif
sem_post(mutex->s_sem);
}
#define CFS_SYSFS_MODULE_PARM 0 /* no sysfs access to module parameters */
-#define cond_resched our_cond_resched
-void our_cond_resched();
-
#define LASSERT_SPIN_LOCKED(lock) do {} while(0)
#define LASSERT_SEM_LOCKED(sem) LASSERT(down_trylock(sem) != 0)
#define cfs_assert _ASSERT
#ifndef get_cpu
-#define cfs_get_cpu() smp_processor_id()
+#define cfs_get_cpu() cfs_smp_processor_id()
#define cfs_put_cpu() do { } while (0)
#else
#define cfs_get_cpu() get_cpu()
#endif /* __KERNEL__*/
-#ifndef THREAD_SIZE
-# define THREAD_SIZE query_stack_size()
+#ifndef CFS_THREAD_SIZE
+# define CFS_THREAD_SIZE query_stack_size()
#endif
-#define LUSTRE_TRACE_SIZE (THREAD_SIZE >> 5)
+#define LUSTRE_TRACE_SIZE (CFS_THREAD_SIZE >> 5)
#ifdef __KERNEL__
-#define CDEBUG_STACK() (THREAD_SIZE - (__u32)IoGetRemainingStackSize())
-#define CHECK_STACK() do {} while(0)
+#define CDEBUG_STACK() (CFS_THREAD_SIZE - (__u32)IoGetRemainingStackSize())
+#define CFS_CHECK_STACK() do {} while(0)
#else /* !__KERNEL__ */
-#define CHECK_STACK() do { } while(0)
+#define CFS_CHECK_STACK() do { } while(0)
#define CDEBUG_STACK() (0L)
#endif /* __KERNEL__ */
unsigned long simple_strtoul(const char *cp,char **endp, unsigned int base);
-static inline int set_bit(int nr, void * addr)
+static inline int cfs_set_bit(int nr, void * addr)
{
(((volatile ULONG *) addr)[nr >> 5]) |= (1UL << (nr & 31));
return *((int *) addr);
}
-static inline int test_bit(int nr, void * addr)
+static inline int cfs_test_bit(int nr, void * addr)
{
return (int)(((1UL << (nr & 31)) & (((volatile ULONG *) addr)[nr >> 5])) != 0);
}
-static inline int clear_bit(int nr, void * addr)
+static inline int cfs_clear_bit(int nr, void * addr)
{
(((volatile ULONG *) addr)[nr >> 5]) &= (~(1UL << (nr & 31)));
return *((int *) addr);
}
-static inline int test_and_set_bit(int nr, volatile void *addr)
+static inline int cfs_test_and_set_bit(int nr, volatile void *addr)
{
int rc;
unsigned char mask;
return rc;
}
-#define ext2_set_bit(nr,addr) (set_bit(nr, addr), 0)
-#define ext2_clear_bit(nr,addr) (clear_bit(nr, addr), 0)
-#define ext2_test_bit(nr,addr) test_bit(nr, addr)
+#define ext2_set_bit(nr,addr) (cfs_set_bit(nr, addr), 0)
+#define ext2_clear_bit(nr,addr) (cfs_clear_bit(nr, addr), 0)
+#define ext2_test_bit(nr,addr) cfs_test_bit(nr, addr)
-static inline int ffs(int x)
+static inline int cfs_ffs(int x)
{
int r = 1;
return r;
}
-static inline unsigned long __ffs(unsigned long word)
+static inline unsigned long __cfs_ffs(unsigned long word)
{
int num = 0;
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
static inline
-int fls(int x)
+int cfs_fls(int x)
{
int r = 32;
return r;
}
-static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)
+static inline unsigned cfs_find_first_bit(const unsigned long *addr,
+ unsigned size)
{
unsigned x = 0;
while (x < size) {
unsigned long val = *addr++;
if (val)
- return __ffs(val) + x;
+ return __cfs_ffs(val) + x;
x += (sizeof(*addr)<<3);
}
return x;
}
}
-#define get_random_bytes(buf, len) read_random(buf, len)
+#define cfs_get_random_bytes(buf, len) read_random(buf, len)
/* do NOT use function or expression as parameters ... */
((unsigned char *)&addr)[1], \
((unsigned char *)&addr)[0]
-static int copy_from_user(void *to, void *from, int c)
+static int cfs_copy_from_user(void *to, void *from, int c)
{
memcpy(to, from, c);
return 0;
}
-static int copy_to_user(void *to, const void *from, int c)
+static int cfs_copy_to_user(void *to, const void *from, int c)
{
memcpy(to, from, c);
return 0;
0 \
)
-#define num_physpages (64 * 1024)
-#define CFS_NUM_CACHEPAGES num_physpages
+#define cfs_num_physpages (64 * 1024)
+#define CFS_NUM_CACHEPAGES cfs_num_physpages
#else
struct file_operations
{
- struct module *owner;
+ cfs_module_t *owner;
loff_t (*llseek)(struct file * file, loff_t offset, int origin);
ssize_t (*read) (struct file * file, char * buf, size_t nbytes, loff_t *ppos);
ssize_t (*write)(struct file * file, const char * buffer,
int i_uid;
int i_gid;
__u32 i_flags;
- mutex_t i_sem;
+ cfs_mutex_t i_sem;
void * i_priv;
};
#define I_FREEING 0x0001
struct dentry {
- atomic_t d_count;
+ cfs_atomic_t d_count;
struct {
int len;
char * name;
inode->i_size = i_size;
}
-struct kstatfs {
+typedef struct cfs_kstatfs {
u64 f_type;
long f_bsize;
u64 f_blocks;
long f_namelen;
long f_frsize;
long f_spare[5];
-};
+} cfs_kstatfs_t;
struct super_block {
void * s_fs_info;
/* all radix tree routines should be protected by external locks */
unsigned int
radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
- unsigned long first_index, unsigned int max_items);
+ unsigned long first_index, unsigned int max_items);
void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index);
-int radix_tree_insert(struct radix_tree_root *root,unsigned long index, void *item);
+int radix_tree_insert(struct radix_tree_root *root, unsigned long index,
+ void *item);
void *radix_tree_delete(struct radix_tree_root *root, unsigned long index);
-struct rcu_head {
+typedef struct cfs_rcu_head {
int foo;
-};
+} cfs_rcu_head_t;
#else /* !__KERNEL__ */
* spinlock & event definitions
*/
-typedef struct spin_lock spinlock_t;
+typedef struct cfs_spin_lock cfs_spinlock_t;
/* atomic */
-typedef struct { volatile int counter; } atomic_t;
+typedef struct { volatile int counter; } cfs_atomic_t;
-#define ATOMIC_INIT(i) { i }
+#define CFS_ATOMIC_INIT(i) { i }
-#define atomic_read(v) ((v)->counter)
-#define atomic_set(v,i) (((v)->counter) = (i))
+#define cfs_atomic_read(v) ((v)->counter)
+#define cfs_atomic_set(v,i) (((v)->counter) = (i))
-void FASTCALL atomic_add(int i, atomic_t *v);
-void FASTCALL atomic_sub(int i, atomic_t *v);
+void FASTCALL cfs_atomic_add(int i, cfs_atomic_t *v);
+void FASTCALL cfs_atomic_sub(int i, cfs_atomic_t *v);
-int FASTCALL atomic_sub_and_test(int i, atomic_t *v);
+int FASTCALL cfs_atomic_sub_and_test(int i, cfs_atomic_t *v);
-void FASTCALL atomic_inc(atomic_t *v);
-void FASTCALL atomic_dec(atomic_t *v);
+void FASTCALL cfs_atomic_inc(cfs_atomic_t *v);
+void FASTCALL cfs_atomic_dec(cfs_atomic_t *v);
-int FASTCALL atomic_dec_and_test(atomic_t *v);
-int FASTCALL atomic_inc_and_test(atomic_t *v);
+int FASTCALL cfs_atomic_dec_and_test(cfs_atomic_t *v);
+int FASTCALL cfs_atomic_inc_and_test(cfs_atomic_t *v);
-int FASTCALL atomic_add_return(int i, atomic_t *v);
-int FASTCALL atomic_sub_return(int i, atomic_t *v);
+int FASTCALL cfs_atomic_add_return(int i, cfs_atomic_t *v);
+int FASTCALL cfs_atomic_sub_return(int i, cfs_atomic_t *v);
-#define atomic_inc_return(v) atomic_add_return(1, v)
-#define atomic_dec_return(v) atomic_sub_return(1, v)
+#define cfs_atomic_inc_return(v) cfs_atomic_add_return(1, v)
+#define cfs_atomic_dec_return(v) cfs_atomic_sub_return(1, v)
-int FASTCALL atomic_dec_and_lock(atomic_t *v, spinlock_t *lock);
+int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, cfs_spinlock_t *lock);
/* event */
* N/A
*/
static inline void
- cfs_init_event(event_t *event, int type, int status)
+cfs_init_event(event_t *event, int type, int status)
{
KeInitializeEvent(
event,
NTSTATUS Status;
LARGE_INTEGER TimeOut;
- TimeOut.QuadPart = -1 * (10000000/HZ) * timeout;
+ TimeOut.QuadPart = -1 * (10000000/CFS_HZ) * timeout;
Status = KeWaitForSingleObject(
event,
*
*/
-struct spin_lock {
+struct cfs_spin_lock {
KSPIN_LOCK lock;
KIRQL irql;
};
-#define CFS_DECL_SPIN(name) spinlock_t name;
-#define CFS_DECL_SPIN_EXTERN(name) extern spinlock_t name;
+#define CFS_DECL_SPIN(name) cfs_spinlock_t name;
+#define CFS_DECL_SPIN_EXTERN(name) extern cfs_spinlock_t name;
-#define SPIN_LOCK_UNLOCKED {0}
+#define CFS_SPIN_LOCK_UNLOCKED {0}
-static inline void spin_lock_init(spinlock_t *lock)
+static inline void cfs_spin_lock_init(cfs_spinlock_t *lock)
{
KeInitializeSpinLock(&(lock->lock));
}
-static inline void spin_lock(spinlock_t *lock)
+static inline void cfs_spin_lock(cfs_spinlock_t *lock)
{
KeAcquireSpinLock(&(lock->lock), &(lock->irql));
}
-static inline void spin_lock_nested(spinlock_t *lock, unsigned subclass)
+static inline void cfs_spin_lock_nested(cfs_spinlock_t *lock, unsigned subclass)
{
KeAcquireSpinLock(&(lock->lock), &(lock->irql));
}
-static inline void spin_unlock(spinlock_t *lock)
+static inline void cfs_spin_unlock(cfs_spinlock_t *lock)
{
KIRQL irql = lock->irql;
KeReleaseSpinLock(&(lock->lock), irql);
}
-#define spin_lock_irqsave(lock, flags) do {(flags) = 0; spin_lock(lock);} while(0)
-#define spin_unlock_irqrestore(lock, flags) do {spin_unlock(lock);} while(0)
+#define cfs_spin_lock_irqsave(lock, flags) \
+do {(flags) = 0; cfs_spin_lock(lock);} while(0)
+
+#define cfs_spin_unlock_irqrestore(lock, flags) \
+do {cfs_spin_unlock(lock);} while(0)
/* There's no corresponding routine in windows kernel.
extern int libcfs_mp_system;
-static int spin_trylock(spinlock_t *lock)
+static int cfs_spin_trylock(cfs_spinlock_t *lock)
{
KIRQL Irql;
int rc = 0;
return rc;
}
-static int spin_is_locked(spinlock_t *lock)
+static int cfs_spin_is_locked(cfs_spinlock_t *lock)
{
#if _WIN32_WINNT >= 0x502
/* KeTestSpinLock only avalilable on 2k3 server or later */
/* synchronization between cpus: it will disable all DPCs
kernel task scheduler on the CPU */
-#define spin_lock_bh(x) spin_lock(x)
-#define spin_unlock_bh(x) spin_unlock(x)
-#define spin_lock_bh_init(x) spin_lock_init(x)
+#define cfs_spin_lock_bh(x) cfs_spin_lock(x)
+#define cfs_spin_unlock_bh(x) cfs_spin_unlock(x)
+#define cfs_spin_lock_bh_init(x) cfs_spin_lock_init(x)
/*
- * rw_semaphore (using ERESOURCE)
+ * cfs_rw_semaphore (using ERESOURCE)
*/
-typedef struct rw_semaphore {
+typedef struct cfs_rw_semaphore {
ERESOURCE rwsem;
-} rw_semaphore_t;
+} cfs_rw_semaphore_t;
-#define CFS_DECL_RWSEM(name) rw_semaphore_t name
-#define CFS_DECL_RWSEM_EXTERN(name) extern rw_semaphore_t name
-#define DECLARE_RWSEM CFS_DECL_RWSEM
+#define CFS_DECLARE_RWSEM(name) cfs_rw_semaphore_t name
+#define CFS_DECLARE_RWSEM_EXTERN(name) extern cfs_rw_semaphore_t name
/*
- * init_rwsem
- * To initialize the the rw_semaphore_t structure
+ * cfs_init_rwsem
+ * To initialize the the cfs_rw_semaphore_t structure
*
* Arguments:
- * rwsem: pointer to the rw_semaphore_t structure
+ * rwsem: pointer to the cfs_rw_semaphore_t structure
*
* Return Value:
* N/A
* N/A
*/
-static inline void init_rwsem(rw_semaphore_t *s)
+static inline void cfs_init_rwsem(cfs_rw_semaphore_t *s)
{
ExInitializeResourceLite(&s->rwsem);
}
-#define rwsem_init init_rwsem
+#define rwsem_init cfs_init_rwsem
/*
- * fini_rwsem
- * To finilize/destroy the the rw_semaphore_t structure
+ * cfs_fini_rwsem
+ * To finilize/destroy the the cfs_rw_semaphore_t structure
*
* Arguments:
- * rwsem: pointer to the rw_semaphore_t structure
+ * rwsem: pointer to the cfs_rw_semaphore_t structure
*
* Return Value:
* N/A
* Just define it NULL for other systems.
*/
-static inline void fini_rwsem(rw_semaphore_t *s)
+static inline void cfs_fini_rwsem(cfs_rw_semaphore_t *s)
{
ExDeleteResourceLite(&s->rwsem);
}
-#define rwsem_fini fini_rwsem
/*
- * down_read
- * To acquire read-lock of the rw_semahore
+ * cfs_down_read
+ * To acquire read-lock of the cfs_rw_semaphore
*
* Arguments:
- * rwsem: pointer to the rw_semaphore_t structure
+ * rwsem: pointer to the cfs_rw_semaphore_t structure
*
* Return Value:
* N/A
* N/A
*/
-static inline void down_read(struct rw_semaphore *s)
+static inline void cfs_down_read(cfs_rw_semaphore_t *s)
{
ExAcquireResourceSharedLite(&s->rwsem, TRUE);
}
-#define down_read_nested down_read
+#define cfs_down_read_nested cfs_down_read
/*
- * down_read_trylock
- * To acquire read-lock of the rw_semahore without blocking
+ * cfs_down_read_trylock
+ * To acquire read-lock of the cfs_rw_semaphore without blocking
*
* Arguments:
- * rwsem: pointer to the rw_semaphore_t structure
+ * rwsem: pointer to the cfs_rw_semaphore_t structure
*
* Return Value:
* Zero: failed to acquire the read lock
* This routine will return immediately without waiting.
*/
-static inline int down_read_trylock(struct rw_semaphore *s)
+static inline int cfs_down_read_trylock(cfs_rw_semaphore_t *s)
{
return ExAcquireResourceSharedLite(&s->rwsem, FALSE);
}
/*
- * down_write
- * To acquire write-lock of the rw_semahore
+ * cfs_down_write
+ * To acquire write-lock of the cfs_rw_semaphore
*
* Arguments:
- * rwsem: pointer to the rw_semaphore_t structure
+ * rwsem: pointer to the cfs_rw_semaphore_t structure
*
* Return Value:
* N/A
* N/A
*/
-static inline void down_write(struct rw_semaphore *s)
+static inline void cfs_down_write(cfs_rw_semaphore_t *s)
{
ExAcquireResourceExclusiveLite(&(s->rwsem), TRUE);
}
-#define down_write_nested down_write
+#define cfs_down_write_nested cfs_down_write
/*
* down_write_trylock
- * To acquire write-lock of the rw_semahore without blocking
+ * To acquire write-lock of the cfs_rw_semaphore without blocking
*
* Arguments:
- * rwsem: pointer to the rw_semaphore_t structure
+ * rwsem: pointer to the cfs_rw_semaphore_t structure
*
* Return Value:
* Zero: failed to acquire the write lock
* This routine will return immediately without waiting.
*/
-static inline int down_write_trylock(struct rw_semaphore *s)
+static inline int cfs_down_write_trylock(cfs_rw_semaphore_t *s)
{
return ExAcquireResourceExclusiveLite(&(s->rwsem), FALSE);
}
/*
- * up_read
- * To release read-lock of the rw_semahore
+ * cfs_up_read
+ * To release read-lock of the cfs_rw_semaphore
*
* Arguments:
- * rwsem: pointer to the rw_semaphore_t structure
+ * rwsem: pointer to the cfs_rw_semaphore_t structure
*
* Return Value:
* N/A
* N/A
*/
-static inline void up_read(struct rw_semaphore *s)
+static inline void cfs_up_read(cfs_rw_semaphore_t *s)
{
ExReleaseResourceForThreadLite(
&(s->rwsem),
/*
- * up_write
- * To release write-lock of the rw_semahore
+ * cfs_up_write
+ * To release write-lock of the cfs_rw_semaphore
*
* Arguments:
- * rwsem: pointer to the rw_semaphore_t structure
+ * rwsem: pointer to the cfs_rw_semaphore_t structure
*
* Return Value:
* N/A
* N/A
*/
-static inline void up_write(struct rw_semaphore *s)
+static inline void cfs_up_write(cfs_rw_semaphore_t *s)
{
ExReleaseResourceForThreadLite(
&(s->rwsem),
*/
typedef struct {
- spinlock_t guard;
- int count;
-} rwlock_t;
+ cfs_spinlock_t guard;
+ int count;
+} cfs_rwlock_t;
+
+void cfs_rwlock_init(cfs_rwlock_t * rwlock);
+void cfs_rwlock_fini(cfs_rwlock_t * rwlock);
-void rwlock_init(rwlock_t * rwlock);
-void rwlock_fini(rwlock_t * rwlock);
+void cfs_read_lock(cfs_rwlock_t * rwlock);
+void cfs_read_unlock(cfs_rwlock_t * rwlock);
+void cfs_write_lock(cfs_rwlock_t * rwlock);
+void cfs_write_unlock(cfs_rwlock_t * rwlock);
-void read_lock(rwlock_t * rwlock);
-void read_unlock(rwlock_t * rwlock);
-void write_lock(rwlock_t * rwlock);
-void write_unlock(rwlock_t * rwlock);
+#define cfs_write_lock_irqsave(l, f) do {f = 0; cfs_write_lock(l);} while(0)
+#define cfs_write_unlock_irqrestore(l, f) do {cfs_write_unlock(l);} while(0)
+#define cfs_read_lock_irqsave(l, f do {f=0; cfs_read_lock(l);} while(0)
+#define cfs_read_unlock_irqrestore(l, f) do {cfs_read_unlock(l);} while(0)
-#define write_lock_irqsave(l, f) do {f = 0; write_lock(l);} while(0)
-#define write_unlock_irqrestore(l, f) do {write_unlock(l);} while(0)
-#define read_lock_irqsave(l, f) do {f=0; read_lock(l);} while(0)
-#define read_unlock_irqrestore(l, f) do {read_unlock(l);} while(0)
+#define cfs_write_lock_bh cfs_write_lock
+#define cfs_write_unlock_bh cfs_write_unlock
-#define write_lock_bh write_lock
-#define write_unlock_bh write_unlock
+typedef struct cfs_lock_class_key {
+ int foo;
+} cfs_lock_class_key_t;
-struct lock_class_key {int foo;};
-#define lockdep_set_class(lock, class) do {} while(0)
+#define cfs_lockdep_set_class(lock, class) do {} while(0)
-static inline void lockdep_off(void)
+static inline void cfs_lockdep_off(void)
{
}
-static inline void lockdep_on(void)
+static inline void cfs_lockdep_on(void)
{
}
* - __up(x)
*/
-struct semaphore{
+typedef struct cfs_semaphore {
KSEMAPHORE sem;
-};
+} cfs_semaphore_t;
-static inline void sema_init(struct semaphore *s, int val)
+static inline void cfs_sema_init(cfs_semaphore_t *s, int val)
{
KeInitializeSemaphore(&s->sem, val, val);
}
-static inline void __down(struct semaphore *s)
+static inline void __down(cfs_semaphore_t *s)
{
KeWaitForSingleObject( &(s->sem), Executive,
KernelMode, FALSE, NULL );
}
-static inline void __up(struct semaphore *s)
+static inline void __up(cfs_semaphore_t *s)
{
KeReleaseSemaphore(&s->sem, 0, 1, FALSE);
}
-static inline int down_trylock(struct semaphore * s)
+static inline int down_trylock(cfs_semaphore_t *s)
{
LARGE_INTEGER timeout = {0};
NTSTATUS status =
* - mutex_down(x)
*/
-#define mutex semaphore
-typedef struct semaphore mutex_t;
+typedef struct cfs_semaphore cfs_mutex_t;
-#define DECLARE_MUTEX(x) mutex_t x
+#define CFS_DECLARE_MUTEX(x) cfs_mutex_t x
/*
* init_mutex
* Notes:
* N/A
*/
-#define mutex_init init_mutex
-static inline void init_mutex(mutex_t *mutex)
+#define cfs_mutex_init cfs_init_mutex
+static inline void cfs_init_mutex(cfs_mutex_t *mutex)
{
- sema_init(mutex, 1);
+ cfs_sema_init(mutex, 1);
}
-#define init_MUTEX init_mutex
/*
* mutex_down
* To acquire the mutex lock
* N/A
*/
-static inline void mutex_down(mutex_t *mutex)
+static inline void cfs_mutex_down(cfs_mutex_t *mutex)
{
__down(mutex);
}
-#define mutex_lock(m) mutex_down(m)
-#define mutex_trylock(s) down_trylock(s)
-#define mutex_lock_nested(m) mutex_down(m)
-#define down(m) mutex_down(m)
+#define cfs_mutex_lock(m) cfs_mutex_down(m)
+#define cfs_mutex_trylock(s) down_trylock(s)
+#define cfs_mutex_lock_nested(m) cfs_mutex_down(m)
+#define cfs_down(m) cfs_mutex_down(m)
/*
* mutex_up
* N/A
*/
-static inline void mutex_up(mutex_t *mutex)
+static inline void cfs_mutex_up(cfs_mutex_t *mutex)
{
__up(mutex);
}
-#define mutex_unlock(m) mutex_up(m)
-#define up(m) mutex_up(m)
+#define cfs_mutex_unlock(m) cfs_mutex_up(m)
+#define cfs_up(m) cfs_mutex_up(m)
/*
* init_mutex_locked
* N/A
*/
-static inline void init_mutex_locked(mutex_t *mutex)
+static inline void cfs_init_mutex_locked(cfs_mutex_t *mutex)
{
- init_mutex(mutex);
- mutex_down(mutex);
+ cfs_init_mutex(mutex);
+ cfs_mutex_down(mutex);
}
-#define init_MUTEX_LOCKED init_mutex_locked
-
-static inline void mutex_destroy(mutex_t *mutex)
+static inline void cfs_mutex_destroy(cfs_mutex_t *mutex)
{
}
* - wait_for_completion(c)
*/
-struct completion {
+typedef struct {
event_t event;
-};
+} cfs_completion_t;
/*
* N/A
*/
-static inline void init_completion(struct completion *c)
+static inline void cfs_init_completion(cfs_completion_t *c)
{
cfs_init_event(&(c->event), 1, FALSE);
}
* N/A
*/
-static inline void complete(struct completion *c)
+static inline void cfs_complete(cfs_completion_t *c)
{
cfs_wake_event(&(c->event));
}
* N/A
*/
-static inline void wait_for_completion(struct completion *c)
+static inline void cfs_wait_for_completion(cfs_completion_t *c)
{
cfs_wait_event_internal(&(c->event), 0);
}
-static inline int wait_for_completion_interruptible(struct completion *c)
+static inline int cfs_wait_for_completion_interruptible(cfs_completion_t *c)
{
cfs_wait_event_internal(&(c->event), 0);
return 0;
}
-/*
- * spinlock "implementation"
- */
-
-typedef spinlock_t cfs_spinlock_t;
-
-#define cfs_spin_lock_init(lock) spin_lock_init(lock)
-#define cfs_spin_lock(lock) spin_lock(lock)
-#define cfs_spin_lock_bh(lock) spin_lock_bh(lock)
-#define cfs_spin_unlock(lock) spin_unlock(lock)
-#define cfs_spin_unlock_bh(lock) spin_unlock_bh(lock)
-
-/*
- * rwlock "implementation"
- */
-
-typedef rwlock_t cfs_rwlock_t;
-
-#define cfs_rwlock_init(lock) rwlock_init(lock)
-#define cfs_read_lock(lock) read_lock(lock)
-#define cfs_read_unlock(lock) read_unlock(lock)
-#define cfs_write_lock_bh(lock) write_lock_bh(lock)
-#define cfs_write_unlock_bh(lock) write_unlock_bh(lock)
-
-/*
- * atomic
- */
-
-typedef atomic_t cfs_atomic_t;
-
-#define cfs_atomic_read(atom) atomic_read(atom)
-#define cfs_atomic_inc(atom) atomic_inc(atom)
-#define cfs_atomic_dec(atom) atomic_dec(atom)
-#define cfs_atomic_dec_and_test(atom) atomic_dec_and_test(atom)
-#define cfs_atomic_set(atom, value) atomic_set(atom, value)
-#define cfs_atomic_add(value, atom) atomic_add(value, atom)
-#define cfs_atomic_sub(value, atom) atomic_sub(value, atom)
-
#else /* !__KERNEL__ */
#endif /* !__KERNEL__ */
#endif
#define CFS_PAGE_MASK (~(PAGE_SIZE - 1))
typedef struct cfs_page {
- void * addr;
- atomic_t count;
- void * private;
- void * mapping;
- __u32 index;
- __u32 flags;
+ void * addr;
+ cfs_atomic_t count;
+ void * private;
+ void * mapping;
+ __u32 index;
+ __u32 flags;
} cfs_page_t;
#define page cfs_page
/* Make it prettier to test the above... */
#define UnlockPage(page) unlock_page(page)
-#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
+#define Page_Uptodate(page) cfs_test_bit(PG_uptodate, &(page)->flags)
#define SetPageUptodate(page) \
do { \
arch_set_page_uptodate(page); \
- set_bit(PG_uptodate, &(page)->flags); \
+ cfs_set_bit(PG_uptodate, &(page)->flags); \
} while (0)
-#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
-#define PageDirty(page) test_bit(PG_dirty, &(page)->flags)
-#define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags)
-#define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags)
-#define PageLocked(page) test_bit(PG_locked, &(page)->flags)
-#define LockPage(page) set_bit(PG_locked, &(page)->flags)
-#define TryLockPage(page) test_and_set_bit(PG_locked, &(page)->flags)
-#define PageChecked(page) test_bit(PG_checked, &(page)->flags)
-#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
-#define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
-#define PageLaunder(page) test_bit(PG_launder, &(page)->flags)
-#define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags)
-#define ClearPageLaunder(page) clear_bit(PG_launder, &(page)->flags)
-#define ClearPageArch1(page) clear_bit(PG_arch_1, &(page)->flags)
-
-#define PageError(page) test_bit(PG_error, &(page)->flags)
-#define SetPageError(page) set_bit(PG_error, &(page)->flags)
-#define ClearPageError(page) clear_bit(PG_error, &(page)->flags)
-#define PageReferenced(page) test_bit(PG_referenced, &(page)->flags)
-#define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
-#define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
-
-#define PageActive(page) test_bit(PG_active, &(page)->flags)
-#define SetPageActive(page) set_bit(PG_active, &(page)->flags)
-#define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
-
-#define PageWriteback(page) test_bit(PG_writeback, &(page)->flags)
-#define TestSetPageWriteback(page) test_and_set_bit(PG_writeback, \
+#define ClearPageUptodate(page) cfs_clear_bit(PG_uptodate, &(page)->flags)
+#define PageDirty(page) cfs_test_bit(PG_dirty, &(page)->flags)
+#define SetPageDirty(page) cfs_set_bit(PG_dirty, &(page)->flags)
+#define ClearPageDirty(page) cfs_clear_bit(PG_dirty, &(page)->flags)
+#define PageLocked(page) cfs_test_bit(PG_locked, &(page)->flags)
+#define LockPage(page) cfs_set_bit(PG_locked, &(page)->flags)
+#define TryLockPage(page) cfs_test_and_set_bit(PG_locked, &(page)->flags)
+#define PageChecked(page) cfs_test_bit(PG_checked, &(page)->flags)
+#define SetPageChecked(page) cfs_set_bit(PG_checked, &(page)->flags)
+#define ClearPageChecked(page) cfs_clear_bit(PG_checked, &(page)->flags)
+#define PageLaunder(page) cfs_test_bit(PG_launder, &(page)->flags)
+#define SetPageLaunder(page) cfs_set_bit(PG_launder, &(page)->flags)
+#define ClearPageLaunder(page) cfs_clear_bit(PG_launder, &(page)->flags)
+#define ClearPageArch1(page) cfs_clear_bit(PG_arch_1, &(page)->flags)
+
+#define PageError(page) cfs_test_bit(PG_error, &(page)->flags)
+#define SetPageError(page) cfs_set_bit(PG_error, &(page)->flags)
+#define ClearPageError(page) cfs_clear_bit(PG_error, &(page)->flags)
+#define PageReferenced(page) cfs_test_bit(PG_referenced, &(page)->flags)
+#define SetPageReferenced(page) cfs_set_bit(PG_referenced, &(page)->flags)
+#define ClearPageReferenced(page) cfs_clear_bit(PG_referenced, &(page)->flags)
+
+#define PageActive(page) cfs_test_bit(PG_active, &(page)->flags)
+#define SetPageActive(page) cfs_set_bit(PG_active, &(page)->flags)
+#define ClearPageActive(page) cfs_clear_bit(PG_active, &(page)->flags)
+
+#define PageWriteback(page) cfs_test_bit(PG_writeback, &(page)->flags)
+#define TestSetPageWriteback(page) cfs_test_and_set_bit(PG_writeback, \
&(page)->flags)
-#define TestClearPageWriteback(page) test_and_clear_bit(PG_writeback, \
+#define TestClearPageWriteback(page) cfs_test_and_clear_bit(PG_writeback, \
&(page)->flags)
#define __GFP_FS (1)
static inline void cfs_get_page(cfs_page_t *page)
{
- atomic_inc(&page->count);
+ cfs_atomic_inc(&page->count);
}
static inline void cfs_put_page(cfs_page_t *page)
{
- atomic_dec(&page->count);
+ cfs_atomic_dec(&page->count);
}
static inline int cfs_page_count(cfs_page_t *page)
{
- return atomic_read(&page->count);
+ return cfs_atomic_read(&page->count);
}
#define cfs_page_index(p) ((p)->index)
* SLAB allocator
*/
-#define SLAB_HWCACHE_ALIGN 0
+#define CFS_SLAB_HWCACHE_ALIGN 0
/* The cache name is limited to 20 chars */
};
-extern cfs_mem_cache_t * cfs_mem_cache_create (const char *, size_t, size_t, unsigned long);
-extern int cfs_mem_cache_destroy ( cfs_mem_cache_t * );
-extern void *cfs_mem_cache_alloc ( cfs_mem_cache_t *, int);
-extern void cfs_mem_cache_free ( cfs_mem_cache_t *, void *);
+extern cfs_mem_cache_t *cfs_mem_cache_create (const char *, size_t, size_t,
+ unsigned long);
+extern int cfs_mem_cache_destroy (cfs_mem_cache_t * );
+extern void *cfs_mem_cache_alloc (cfs_mem_cache_t *, int);
+extern void cfs_mem_cache_free (cfs_mem_cache_t *, void *);
/*
* shrinker
*/
typedef int (*shrink_callback)(int nr_to_scan, gfp_t gfp_mask);
-struct shrinker {
- shrink_callback cb;
+struct cfs_shrinker {
+ shrink_callback cb;
int seeks; /* seeks to recreate an obj */
/* These are for internal use */
- struct list_head list;
+ cfs_list_t list;
long nr; /* objs pending delete */
};
-struct shrinker * set_shrinker(int seeks, shrink_callback cb);
-void remove_shrinker(struct shrinker *s);
+struct cfs_shrinker *cfs_set_shrinker(int seeks, shrink_callback cb);
+void cfs_remove_shrinker(struct cfs_shrinker *s);
int start_shrinker_timer();
void stop_shrinker_timer();
#define CFS_MMSPACE_CLOSE do {} while(0)
-#define mb() do {} while(0)
-#define rmb() mb()
-#define wmb() mb()
-#define cfs_mb() mb()
+#define cfs_mb() do {} while(0)
+#define rmb() cfs_mb()
+#define wmb() cfs_mb()
/*
* MM defintions from (linux/mm.h)
*/
-#define DEFAULT_SEEKS 2 /* shrink seek */
+#define CFS_DEFAULT_SEEKS 2 /* shrink seek */
#else /* !__KERNEL__ */
#define CFS_SYMBOL_LEN 64
struct cfs_symbol {
- char name[CFS_SYMBOL_LEN];
- void *value;
- int ref;
- struct list_head sym_list;
+ char name[CFS_SYMBOL_LEN];
+ void *value;
+ int ref;
+ cfs_list_t sym_list;
};
extern int cfs_symbol_register(const char *, const void *);
struct ctl_table_header
{
cfs_sysctl_table_t * ctl_table;
- struct list_head ctl_entry;
+ cfs_list_t ctl_entry;
};
/* proc root entries, support routines */
size_t count;
loff_t index;
u32 version;
- mutex_t lock;
+ cfs_mutex_t lock;
const struct seq_operations *op;
void *private;
};
* Helpers for iteration over list_head-s in seq_files
*/
-extern struct list_head *seq_list_start(struct list_head *head,
- loff_t pos);
-extern struct list_head *seq_list_start_head(struct list_head *head,
- loff_t pos);
-extern struct list_head *seq_list_next(void *v, struct list_head *head,
- loff_t *ppos);
+extern cfs_list_t *seq_list_start(cfs_list_t *head, loff_t pos);
+extern cfs_list_t *seq_list_start_head(cfs_list_t *head, loff_t pos);
+extern cfs_list_t *seq_list_next(void *v, cfs_list_t *head, loff_t *ppos);
/*
* declaration of proc kernel process routines
typedef int cfs_task_state_t;
-#define CFS_TASK_INTERRUPTIBLE 0x00000001
-#define CFS_TASK_UNINT 0x00000002
-#define CFS_TASK_RUNNING 0x00000003
+#define CFS_TASK_INTERRUPTIBLE 0x00000001
+#define CFS_TASK_UNINT 0x00000002
+#define CFS_TASK_RUNNING 0x00000003
#define CFS_TASK_UNINTERRUPTIBLE CFS_TASK_UNINT
#define CFS_WAITQ_MAGIC 'CWQM'
typedef struct cfs_waitq {
- unsigned int magic;
- unsigned int flags;
-
- spinlock_t guard;
- struct list_head waiters;
+ unsigned int magic;
+ unsigned int flags;
+
+ cfs_spinlock_t guard;
+ cfs_list_t waiters;
} cfs_waitq_t;
typedef struct cfs_waitlink_channel {
- struct list_head link;
+ cfs_list_t link;
cfs_waitq_t * waitq;
cfs_waitlink_t * waitl;
} cfs_waitlink_channel_t;
unsigned int magic;
int flags;
event_t * event;
- atomic_t * hits;
+ cfs_atomic_t * hits;
cfs_waitlink_channel_t waitq[CFS_WAITQ_CHANNELS];
};
} cfs_thread_context_t;
int cfs_kernel_thread(int (*func)(void *), void *arg, int flag);
-#define kernel_thread cfs_kernel_thread
/*
* thread creation flags from Linux, not used in winnt
*/
#define NGROUPS_SMALL 32
#define NGROUPS_PER_BLOCK ((int)(PAGE_SIZE / sizeof(gid_t)))
-struct group_info {
+typedef struct cfs_group_info {
int ngroups;
- atomic_t usage;
+ cfs_atomic_t usage;
gid_t small_block[NGROUPS_SMALL];
int nblocks;
gid_t *blocks[0];
-};
+} cfs_group_info_t;
-#define get_group_info(group_info) do { \
- atomic_inc(&(group_info)->usage); \
+#define cfs_get_group_info(group_info) do { \
+ cfs_atomic_inc(&(group_info)->usage); \
} while (0)
-#define put_group_info(group_info) do { \
- if (atomic_dec_and_test(&(group_info)->usage)) \
- groups_free(group_info); \
+#define cfs_put_group_info(group_info) do { \
+ if (cfs_atomic_dec_and_test(&(group_info)->usage)) \
+ cfs_groups_free(group_info); \
} while (0)
-static __inline struct group_info *groups_alloc(int gidsetsize)
+static __inline cfs_group_info_t *cfs_groups_alloc(int gidsetsize)
{
- struct group_info * groupinfo;
+ cfs_group_info_t * groupinfo;
KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__, __FUNCTION__));
- groupinfo = (struct group_info *)cfs_alloc(sizeof(struct group_info), 0);
+ groupinfo =
+ (cfs_group_info_t *)cfs_alloc(sizeof(cfs_group_info_t), 0);
+
if (groupinfo) {
- memset(groupinfo, 0, sizeof(struct group_info));
+ memset(groupinfo, 0, sizeof(cfs_group_info_t));
}
return groupinfo;
}
-static __inline void groups_free(struct group_info *group_info)
+static __inline void cfs_groups_free(cfs_group_info_t *group_info)
{
- KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__, __FUNCTION__));
+ KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__,
+ __FUNCTION__));
cfs_free(group_info);
}
-static __inline int set_current_groups(struct group_info *group_info) {
- KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__, __FUNCTION__));
+static __inline int
+cfs_set_current_groups(cfs_group_info_t *group_info)
+{
+ KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__,
+ __FUNCTION__));
return 0;
}
-static __inline int groups_search(struct group_info *group_info, gid_t grp) {
- KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__, __FUNCTION__));
+static __inline int groups_search(cfs_group_info_t *group_info,
+ gid_t grp) {
+ KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__,
+ __FUNCTION__));
return 0;
}
* Task struct
*/
-#define CFS_MAX_SCHEDULE_TIMEOUT ((long_ptr_t)(~0UL>>12))
-#define schedule_timeout(t) cfs_schedule_timeout(0, t)
+#define CFS_MAX_SCHEDULE_TIMEOUT ((long_ptr_t)(~0UL>>12))
+#define cfs_schedule_timeout(t) cfs_schedule_timeout_and_set_state(0, t)
struct vfsmount;
#define NGROUPS 1
#define CFS_CURPROC_COMM_MAX (16)
typedef struct task_sruct{
- mode_t umask;
- sigset_t blocked;
-
- pid_t pid;
- pid_t pgrp;
-
- uid_t uid,euid,suid,fsuid;
- gid_t gid,egid,sgid,fsgid;
-
- int ngroups;
- int cgroups;
- gid_t groups[NGROUPS];
- struct group_info *group_info;
- cfs_kernel_cap_t cap_effective,
- cap_inheritable,
- cap_permitted;
-
- char comm[CFS_CURPROC_COMM_MAX];
- void *journal_info;
- struct vfsmount *fs;
+ mode_t umask;
+ sigset_t blocked;
+
+ pid_t pid;
+ pid_t pgrp;
+
+ uid_t uid,euid,suid,fsuid;
+ gid_t gid,egid,sgid,fsgid;
+
+ int ngroups;
+ int cgroups;
+ gid_t groups[NGROUPS];
+ cfs_group_info_t *group_info;
+ cfs_kernel_cap_t cap_effective,
+ cap_inheritable,
+ cap_permitted;
+
+ char comm[CFS_CURPROC_COMM_MAX];
+ void *journal_info;
+ struct vfsmount *fs;
} cfs_task_t;
static inline void task_lock(cfs_task_t *t)
typedef struct _TASK_MAN {
- ULONG Magic; /* Magic and Flags */
- ULONG Flags;
+ ULONG Magic; /* Magic and Flags */
+ ULONG Flags;
- spinlock_t Lock; /* Protection lock */
+ cfs_spinlock_t Lock; /* Protection lock */
- cfs_mem_cache_t * slab; /* Memory slab for task slot */
+ cfs_mem_cache_t *slab; /* Memory slab for task slot */
- ULONG NumOfTasks; /* Total tasks (threads) */
- LIST_ENTRY TaskList; /* List of task slots */
+ ULONG NumOfTasks; /* Total tasks (threads) */
+ LIST_ENTRY TaskList; /* List of task slots */
} TASK_MAN, *PTASK_MAN;
typedef struct _TASK_SLOT {
- ULONG Magic; /* Magic and Flags */
- ULONG Flags;
+ ULONG Magic; /* Magic and Flags */
+ ULONG Flags;
- LIST_ENTRY Link; /* To be linked to TaskMan */
+ LIST_ENTRY Link; /* To be linked to TaskMan */
- event_t Event; /* Schedule event */
+ event_t Event; /* Schedule event */
- HANDLE Pid; /* Process id */
- HANDLE Tid; /* Thread id */
- PETHREAD Tet; /* Pointer to ethread */
+ HANDLE Pid; /* Process id */
+ HANDLE Tid; /* Thread id */
+ PETHREAD Tet; /* Pointer to ethread */
- atomic_t count; /* refer count */
- atomic_t hits; /* times of waken event singaled */
+ cfs_atomic_t count; /* refer count */
+ cfs_atomic_t hits; /* times of waken event singaled */
- KIRQL irql; /* irql for rwlock ... */
+ KIRQL irql; /* irql for rwlock ... */
- cfs_task_t task; /* linux task part */
+ cfs_task_t task; /* linux task part */
} TASK_SLOT, *PTASK_SLOT;
#define current cfs_current()
-#define set_current_state(s) do {;} while (0)
-#define cfs_set_current_state(state) set_current_state(state)
+#define cfs_set_current_state(s) do {;} while (0)
+#define cfs_set_current_state(state) cfs_set_current_state(state)
-#define wait_event(wq, condition) \
+#define cfs_wait_event(wq, condition) \
do { \
cfs_waitlink_t __wait; \
\
cfs_task_t * cfs_current();
int wake_up_process(cfs_task_t * task);
void sleep_on(cfs_waitq_t *waitq);
-#define might_sleep() do {} while(0)
+#define cfs_might_sleep() do {} while(0)
#define CFS_DECL_JOURNAL_DATA
#define CFS_PUSH_JOURNAL do {;} while(0)
#define CFS_POP_JOURNAL do {;} while(0)
#define __init
#endif
-struct module {
+typedef struct cfs_module {
const char *name;
-};
+} cfs_module_t;
-extern struct module libcfs_global_module;
+extern cfs_module_t libcfs_global_module;
#define THIS_MODULE &libcfs_global_module
-#define request_module(x, y) (0)
+#define cfs_request_module(x, y) (0)
#define EXPORT_SYMBOL(s)
#define MODULE_AUTHOR(s)
#define MODULE_DESCRIPTION(s)
/* Module interfaces */
#define cfs_module(name, version, init, fini) \
-module_init(init); \
-module_exit(fini)
-#define module_refcount(x) (1)
+ module_init(init); \
+ module_exit(fini)
+#define cfs_module_refcount(x) (1)
/*
* typecheck
#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
#if 0 /* defined in libcfs/libcfs_hash.h */
-static inline u32 hash_long(u32 val, unsigned int bits)
+static inline u32 cfs_hash_long(u32 val, unsigned int bits)
{
/* On some cpus multiply is faster, on others gcc will do shifts */
u32 hash = val * GOLDEN_RATIO_PRIME_32;
* cache alignment size
*/
-#define L1_CACHE_ALIGN(x) (x)
+#define CFS_L1_CACHE_ALIGN(x) (x)
#define __cacheline_aligned
#define SMP_CACHE_BYTES 128
-#define NR_CPUS (32)
+#define CFS_NR_CPUS (32)
#define smp_num_cpus ((CCHAR)KeNumberProcessors)
-#define num_possible_cpus() smp_num_cpus
-#define num_online_cpus() smp_num_cpus
-#define smp_processor_id() ((USHORT)KeGetCurrentProcessorNumber())
+#define cfs_num_possible_cpus() smp_num_cpus
+#define cfs_num_online_cpus() smp_num_cpus
+#define cfs_smp_processor_id() ((USHORT)KeGetCurrentProcessorNumber())
#define smp_call_function(f, a, n, w) do {} while(0)
#define smp_rmb() do {} while(0)
* Irp related
*/
-#define NR_IRQS 512
-#define in_interrupt() (0)
-#define cfs_in_interrupt() in_interrupt()
+#define CFS_NR_IRQS 512
+#define cfs_in_interrupt() (0)
/*
* printk flags
*/
-#define KERN_EMERG "<0>" /* system is unusable */
-#define KERN_ALERT "<1>" /* action must be taken immediately */
-#define KERN_CRIT "<2>" /* critical conditions */
-#define KERN_ERR "<3>" /* error conditions */
-#define KERN_WARNING "<4>" /* warning conditions */
-#define KERN_NOTICE "<5>" /* normal but significant condition */
-#define KERN_INFO "<6>" /* informational */
-#define KERN_DEBUG "<7>" /* debug-level messages */
+#define CFS_KERN_EMERG "<0>" /* system is unusable */
+#define CFS_KERN_ALERT "<1>" /* action must be taken immediately */
+#define CFS_KERN_CRIT "<2>" /* critical conditions */
+#define CFS_KERN_ERR "<3>" /* error conditions */
+#define CFS_KERN_WARNING "<4>" /* warning conditions */
+#define CFS_KERN_NOTICE "<5>" /* normal but significant condition */
+#define CFS_KERN_INFO "<6>" /* informational */
+#define CFS_KERN_DEBUG "<7>" /* debug-level messages */
/*
* Misc
#define unlikely(exp) (exp)
#endif
-#define lock_kernel() do {} while(0)
-#define unlock_kernel() do {} while(0)
+#define cfs_lock_kernel() do {} while(0)
+#define cfs_unlock_kernel() do {} while(0)
#define local_irq_save(x)
#define local_irq_restore(x)
* module routines
*/
-static inline void __module_get(struct module *module)
+static inline void __cfs_module_get(cfs_module_t *module)
{
}
-static inline int try_module_get(struct module *module)
+static inline int cfs_try_module_get(cfs_module_t *module)
{
return 1;
}
-static inline void module_put(struct module *module)
+static inline void cfs_module_put(cfs_module_t *module)
{
}
int setenv(const char *envname, const char *envval, int overwrite);
-struct utsname {
+typedef struct utsname {
char sysname[64];
char nodename[64];
char release[128];
typedef struct _KS_TSDU {
- ULONG Magic; /* magic */
- ULONG Flags; /* flags */
+ ULONG Magic; /* magic */
+ ULONG Flags; /* flags */
- struct list_head Link; /* link list */
+ cfs_list_t Link; /* link list */
- ULONG TotalLength; /* total size of KS_TSDU */
- ULONG StartOffset; /* offset of the first Tsdu unit */
- ULONG LastOffset; /* end offset of the last Tsdu unit */
+ ULONG TotalLength; /* total size of KS_TSDU */
+ ULONG StartOffset; /* offset of the first Tsdu unit */
+ ULONG LastOffset; /* end offset of the last Tsdu unit */
/*
union {
} KS_TSDU_MDL, *PKS_TSDU_MDL;
typedef struct ks_engine_mgr {
- spinlock_t lock;
+ cfs_spinlock_t lock;
int stop;
event_t exit;
event_t start;
- struct list_head list;
+ cfs_list_t list;
} ks_engine_mgr_t;
typedef struct ks_engine_slot {
ks_tconn_t * tconn;
void * tsdumgr;
- struct list_head link;
+ cfs_list_t link;
int queued;
ks_engine_mgr_t * emgr;
} ks_engine_slot_t;
typedef struct _KS_TSDUMGR {
- struct list_head TsduList;
+ cfs_list_t TsduList;
ULONG NumOfTsdu;
ULONG TotalBytes;
KEVENT Event;
- spinlock_t Lock;
+ cfs_spinlock_t Lock;
ks_engine_slot_t Slot;
ULONG Payload;
int Busy:1;
int OOB:1;
} KS_TSDUMGR, *PKS_TSDUMGR;
-#define ks_lock_tsdumgr(mgr) spin_lock(&((mgr)->Lock))
-#define ks_unlock_tsdumgr(mgr) spin_unlock(&((mgr)->Lock))
+#define ks_lock_tsdumgr(mgr) cfs_spin_lock(&((mgr)->Lock))
+#define ks_unlock_tsdumgr(mgr) cfs_spin_unlock(&((mgr)->Lock))
typedef struct _KS_CHAIN {
KS_TSDUMGR Normal; /* normal queue */
typedef struct ks_backlogs {
- struct list_head list; /* list to link the backlog connections */
- int num; /* number of backlogs in the list */
+ cfs_list_t list; /* list to link the backlog connections */
+ int num; /* number of backlogs in the list */
} ks_backlogs_t;
typedef struct ks_daemon {
- ks_tconn_t * tconn; /* the listener connection object */
- unsigned short nbacklogs; /* number of listening backlog conns */
- unsigned short port; /* listening port number */
- int shutdown; /* daemon threads is to exit */
- struct list_head list; /* to be attached into ks_nal_data_t */
+ ks_tconn_t * tconn; /* the listener connection object */
+ unsigned short nbacklogs; /* number of listening backlog conns */
+ unsigned short port; /* listening port number */
+ int shutdown; /* daemon threads is to exit */
+ cfs_list_t list; /* to be attached into ks_nal_data_t */
} ks_daemon_t;
ulong kstc_magic; /* Magic & Flags */
ulong kstc_flags;
- spinlock_t kstc_lock; /* serialise lock*/
+ cfs_spinlock_t kstc_lock; /* serialise lock*/
void * kstc_conn; /* ks_conn_t */
ks_tconn_type_t kstc_type; /* tdi connection Type */
ks_tdi_addr_t kstc_addr; /* local address handlers / Objects */
- atomic_t kstc_refcount; /* reference count of ks_tconn_t */
+ cfs_atomic_t kstc_refcount; /* reference count of ks_tconn_t */
- struct list_head kstc_list; /* linked to global ksocknal_data */
+ cfs_list_t kstc_list; /* linked to global ksocknal_data */
union {
} listener;
struct {
- ks_tconn_info_t kstc_info; /* Connection Info if Connected */
- ks_chain_t kstc_recv; /* tsdu engine for data receiving */
- ks_chain_t kstc_send; /* tsdu engine for data sending */
+ ks_tconn_info_t kstc_info; /* Connection Info if Connected */
+ ks_chain_t kstc_recv; /* tsdu engine for data receiving */
+ ks_chain_t kstc_send; /* tsdu engine for data sending */
- int kstc_queued; /* Attached to Parent->ChildList ... */
- int kstc_queueno; /* 0: Attached to Listening list
+ int kstc_queued; /* Attached to Parent->ChildList ... */
+ int kstc_queueno; /* 0: Attached to Listening list
1: Attached to Accepted list */
- int kstc_busy; /* referred by ConnectEventCallback ? */
- int kstc_accepted; /* the connection is built ready ? */
+ int kstc_busy; /* referred by ConnectEventCallback ? */
+ int kstc_accepted; /* the connection is built ready ? */
- struct list_head kstc_link; /* linked to parent tdi connection */
- ks_tconn_t * kstc_parent; /* pointers to it's listener parent */
+ cfs_list_t kstc_link; /* linked to parent tdi connection */
+ ks_tconn_t * kstc_parent; /* pointers to it's listener parent */
} child;
struct {
* Tdi client information
*/
- UNICODE_STRING ksnd_client_name; /* tdi client module name */
- HANDLE ksnd_pnp_handle; /* the handle for pnp changes */
+ UNICODE_STRING ksnd_client_name; /* tdi client module name */
+ HANDLE ksnd_pnp_handle; /* the handle for pnp changes */
- spinlock_t ksnd_addrs_lock; /* serialize ip address list access */
- LIST_ENTRY ksnd_addrs_list; /* list of the ip addresses */
- int ksnd_naddrs; /* number of the ip addresses */
+ cfs_spinlock_t ksnd_addrs_lock; /* serialize ip address list access */
+ LIST_ENTRY ksnd_addrs_list; /* list of the ip addresses */
+ int ksnd_naddrs; /* number of the ip addresses */
/*
* Tdilnd internal defintions
*/
- int ksnd_init; /* initialisation state */
+ int ksnd_init; /* initialisation state */
- TDI_PROVIDER_INFO ksnd_provider; /* tdi tcp/ip provider's information */
+ TDI_PROVIDER_INFO ksnd_provider; /* tdi tcp/ip provider's information */
- spinlock_t ksnd_tconn_lock; /* tdi connections access serialise */
+ cfs_spinlock_t ksnd_tconn_lock; /* tdi connections access serialise */
- int ksnd_ntconns; /* number of tconns attached in list */
- struct list_head ksnd_tconns; /* tdi connections list */
- cfs_mem_cache_t * ksnd_tconn_slab; /* slabs for ks_tconn_t allocations */
- event_t ksnd_tconn_exit; /* exit event to be signaled by the last tconn */
+ int ksnd_ntconns; /* number of tconns attached in list */
+ cfs_list_t ksnd_tconns; /* tdi connections list */
+ cfs_mem_cache_t * ksnd_tconn_slab; /* slabs for ks_tconn_t allocations */
+ event_t ksnd_tconn_exit; /* exit event to be signaled by the last tconn */
- spinlock_t ksnd_tsdu_lock; /* tsdu access serialise */
+ cfs_spinlock_t ksnd_tsdu_lock; /* tsdu access serialise */
- int ksnd_ntsdus; /* number of tsdu buffers allocated */
- ulong ksnd_tsdu_size; /* the size of a signel tsdu buffer */
- cfs_mem_cache_t * ksnd_tsdu_slab; /* slab cache for tsdu buffer allocation */
+ int ksnd_ntsdus; /* number of tsdu buffers allocated */
+ ulong ksnd_tsdu_size; /* the size of a signel tsdu buffer */
+ cfs_mem_cache_t *ksnd_tsdu_slab; /* slab cache for tsdu buffer allocation */
- int ksnd_nfreetsdus; /* number of tsdu buffers in the freed list */
- struct list_head ksnd_freetsdus; /* List of the freed Tsdu buffer. */
+ int ksnd_nfreetsdus; /* number of tsdu buffers in the freed list */
+ cfs_list_t ksnd_freetsdus; /* List of the freed Tsdu buffer. */
- int ksnd_engine_nums; /* number of tcp sending engine threads */
- ks_engine_mgr_t * ksnd_engine_mgr; /* tcp sending engine structure */
+ int ksnd_engine_nums; /* number of tcp sending engine threads */
+ ks_engine_mgr_t *ksnd_engine_mgr; /* tcp sending engine structure */
} ks_tdi_data_t;
#include <libcfs/winnt/portals_compat25.h>
-#define HZ (100)
+#define CFS_HZ (100)
struct timespec {
__u32 tv_sec;
#define jiffies (ULONG_PTR)JIFFIES()
#define cfs_jiffies (ULONG_PTR)JIFFIES()
-static inline void do_gettimeofday(struct timeval *tv)
+static inline void cfs_gettimeofday(struct timeval *tv)
{
LARGE_INTEGER Time;
tv->tv_usec = (suseconds_t) (Time.QuadPart % 10000000) / 10;
}
-#define cfs_do_gettimeofday(tv) do_gettimeofday(tv)
-
static inline LONGLONG JIFFIES()
{
LARGE_INTEGER Tick;
KeQueryTickCount(&Tick);
Elapse.QuadPart = Tick.QuadPart * KeQueryTimeIncrement();
- Elapse.QuadPart /= (10000000 / HZ);
+ Elapse.QuadPart /= (10000000 / CFS_HZ);
return Elapse.QuadPart;
}
static inline time_t cfs_time_current_sec(void)
{
- return (time_t)(JIFFIES() / HZ);
+ return (time_t)(JIFFIES() / CFS_HZ);
}
-#define time_before(t1, t2) (((signed)(t1) - (signed)(t2)) < 0)
-#define time_before_eq(t1, t2) (((signed)(t1) - (signed)(t2)) <= 0)
+#define cfs_time_before(t1, t2) (((signed)(t1) - (signed)(t2)) < 0)
+#define cfs_time_beforeq(t1, t2) (((signed)(t1) - (signed)(t2)) <= 0)
static inline void cfs_fs_time_current(cfs_fs_time_t *t)
{
static inline cfs_duration_t cfs_time_seconds(cfs_duration_t seconds)
{
- return (cfs_duration_t)(seconds * HZ);
+ return (cfs_duration_t)(seconds * CFS_HZ);
}
static inline time_t cfs_duration_sec(cfs_duration_t d)
{
- return (time_t)(d / HZ);
+ return (time_t)(d / CFS_HZ);
}
static inline void cfs_duration_usec(cfs_duration_t d, struct timeval *s)
{
- s->tv_sec = (__u32)(d / HZ);
- s->tv_usec = (__u32)((d - (cfs_duration_t)s->tv_sec * HZ) *
- ONE_MILLION / HZ);
+ s->tv_sec = (__u32)(d / CFS_HZ);
+ s->tv_usec = (__u32)((d - (cfs_duration_t)s->tv_sec * CFS_HZ) *
+ ONE_MILLION / CFS_HZ);
}
static inline void cfs_duration_nsec(cfs_duration_t d, struct timespec *s)
{
- s->tv_sec = (__u32) (d / HZ);
- s->tv_nsec = (__u32)((d - (cfs_duration_t)s->tv_sec * HZ) *
- ONE_BILLION / HZ);
+ s->tv_sec = (__u32) (d / CFS_HZ);
+ s->tv_nsec = (__u32)((d - (cfs_duration_t)s->tv_sec * CFS_HZ) *
+ ONE_BILLION / CFS_HZ);
}
static inline void cfs_fs_time_usec(cfs_fs_time_t *t, struct timeval *v)
/* liblustre. time(2) based implementation. */
int nanosleep(const struct timespec *rqtp, struct timespec *rmtp);
void sleep(int time);
-void do_gettimeofday(struct timeval *tv);
+void cfs_gettimeofday(struct timeval *tv);
int gettimeofday(struct timeval *tv, void * tz);
#endif /* !__KERNEL__ */
typedef __u16 uid_t, gid_t;
typedef __u16 mode_t;
-typedef __u16 umode_t;
+typedef __u16 cfs_umode_t;
typedef __u32 sigset_t;
typedef int64_t loff_t;
typedef void * cfs_handle_t;
-typedef uint64_t cycles_t;
+typedef uint64_t cfs_cycles_t;
#ifndef INVALID_HANDLE_VALUE
#define INVALID_HANDLE_VALUE ((HANDLE)-1)
void
set_ptldebug_header(struct ptldebug_header *header, int subsys, int mask,
- const int line, unsigned long stack)
+ const int line, unsigned long stack)
{
struct timeval tv;
"Lustre kernel panic on LBUG");
EXPORT_SYMBOL(libcfs_panic_on_lbug);
-atomic_t libcfs_kmemory = ATOMIC_INIT(0);
+cfs_atomic_t libcfs_kmemory = CFS_ATOMIC_INIT(0);
EXPORT_SYMBOL(libcfs_kmemory);
static cfs_waitq_t debug_ctlwq;
-char debug_file_path_arr[1024] = DEBUG_FILE_PATH_DEFAULT;
+char libcfs_debug_file_path_arr[1024] = LIBCFS_DEBUG_FILE_PATH_DEFAULT;
/* We need to pass a pointer here, but elsewhere this must be a const */
-char *debug_file_path = &debug_file_path_arr[0];
-CFS_MODULE_PARM(debug_file_path, "s", charp, 0644,
+char *libcfs_debug_file_path = &libcfs_debug_file_path_arr[0];
+CFS_MODULE_PARM(libcfs_debug_file_path, "s", charp, 0644,
"Path for dumping debug logs, "
"set 'NONE' to prevent log dumping");
CFS_PUSH_JOURNAL;
- if (strncmp(debug_file_path_arr, "NONE", 4) != 0) {
+ if (strncmp(libcfs_debug_file_path_arr, "NONE", 4) != 0) {
snprintf(debug_file_name, sizeof(debug_file_name) - 1,
- "%s.%ld." LPLD, debug_file_path_arr,
+ "%s.%ld." LPLD, libcfs_debug_file_path_arr,
cfs_time_current_sec(), (long_ptr_t)arg);
- printk(KERN_ALERT "LustreError: dumping log to %s\n",
+ printk(CFS_KERN_ALERT "LustreError: dumping log to %s\n",
debug_file_name);
- tracefile_dump_all_pages(debug_file_name);
+ cfs_tracefile_dump_all_pages(debug_file_name);
libcfs_run_debug_log_upcall(debug_file_name);
}
CFS_POP_JOURNAL;
* able to set our state to running as it exits before we
* get to schedule() */
cfs_waitlink_init(&wait);
- set_current_state(TASK_INTERRUPTIBLE);
+ cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
cfs_waitq_add(&debug_ctlwq, &wait);
dumper = cfs_kthread_run(libcfs_debug_dumplog_thread,
(void*)(long)cfs_curproc_pid(),
"libcfs_debug_dumper");
if (IS_ERR(dumper))
- printk(KERN_ERR "LustreError: cannot start log dump thread: "
- "%ld\n", PTR_ERR(dumper));
+ printk(CFS_KERN_ERR "LustreError: cannot start log dump thread:"
+ " %ld\n", PTR_ERR(dumper));
else
cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
- /* be sure to teardown if kernel_thread() failed */
+ /* be sure to teardown if cfs_kernel_thread() failed */
cfs_waitq_del(&debug_ctlwq, &wait);
- set_current_state(TASK_RUNNING);
+ cfs_set_current_state(CFS_TASK_RUNNING);
}
int libcfs_debug_init(unsigned long bufsize)
/* If libcfs_debug_mb is set to an invalid value or uninitialized
* then just make the total buffers smp_num_cpus * TCD_MAX_PAGES */
- if (max > trace_max_debug_mb() || max < num_possible_cpus()) {
+ if (max > cfs_trace_max_debug_mb() || max < cfs_num_possible_cpus()) {
max = TCD_MAX_PAGES;
} else {
- max = (max / num_possible_cpus());
+ max = (max / cfs_num_possible_cpus());
max = (max << (20 - CFS_PAGE_SHIFT));
}
- rc = tracefile_init(max);
+ rc = cfs_tracefile_init(max);
if (rc == 0)
libcfs_register_panic_notifier();
int libcfs_debug_cleanup(void)
{
libcfs_unregister_panic_notifier();
- tracefile_exit();
+ cfs_tracefile_exit();
return 0;
}
int libcfs_debug_clear_buffer(void)
{
- trace_flush_pages();
+ cfs_trace_flush_pages();
return 0;
}
void libcfs_debug_set_level(unsigned int debug_level)
{
- printk(KERN_WARNING "Lustre: Setting portals debug level to %08x\n",
+ printk(CFS_KERN_WARNING "Lustre: Setting portals debug level to %08x\n",
debug_level);
libcfs_debug = debug_level;
}
cfs_hash_rlock(cfs_hash_t *hs)
{
if ((hs->hs_flags & CFS_HASH_REHASH) != 0)
- read_lock(&hs->hs_rwlock);
+ cfs_read_lock(&hs->hs_rwlock);
}
static void
cfs_hash_runlock(cfs_hash_t *hs)
{
if ((hs->hs_flags & CFS_HASH_REHASH) != 0)
- read_unlock(&hs->hs_rwlock);
+ cfs_read_unlock(&hs->hs_rwlock);
}
static void
cfs_hash_wlock(cfs_hash_t *hs)
{
if ((hs->hs_flags & CFS_HASH_REHASH) != 0)
- write_lock(&hs->hs_rwlock);
+ cfs_write_lock(&hs->hs_rwlock);
}
static void
cfs_hash_wunlock(cfs_hash_t *hs)
{
if ((hs->hs_flags & CFS_HASH_REHASH) != 0)
- write_unlock(&hs->hs_rwlock);
+ cfs_write_unlock(&hs->hs_rwlock);
}
/**
strncpy(hs->hs_name, name, sizeof(hs->hs_name));
hs->hs_name[sizeof(hs->hs_name) - 1] = '\0';
- atomic_set(&hs->hs_rehash_count, 0);
- atomic_set(&hs->hs_count, 0);
- rwlock_init(&hs->hs_rwlock);
+ cfs_atomic_set(&hs->hs_rehash_count, 0);
+ cfs_atomic_set(&hs->hs_count, 0);
+ cfs_rwlock_init(&hs->hs_rwlock);
hs->hs_cur_bits = cur_bits;
hs->hs_cur_mask = (1 << cur_bits) - 1;
hs->hs_min_bits = cur_bits;
}
CFS_INIT_HLIST_HEAD(&hs->hs_buckets[i]->hsb_head);
- rwlock_init(&hs->hs_buckets[i]->hsb_rwlock);
- atomic_set(&hs->hs_buckets[i]->hsb_count, 0);
+ cfs_rwlock_init(&hs->hs_buckets[i]->hsb_rwlock);
+ cfs_atomic_set(&hs->hs_buckets[i]->hsb_count, 0);
}
return hs;
cfs_hash_destroy(cfs_hash_t *hs)
{
cfs_hash_bucket_t *hsb;
- struct hlist_node *hnode;
- struct hlist_node *pos;
+ cfs_hlist_node_t *hnode;
+ cfs_hlist_node_t *pos;
int i;
ENTRY;
if (hsb == NULL)
continue;
- write_lock(&hsb->hsb_rwlock);
- hlist_for_each_safe(hnode, pos, &(hsb->hsb_head)) {
+ cfs_write_lock(&hsb->hsb_rwlock);
+ cfs_hlist_for_each_safe(hnode, pos, &(hsb->hsb_head)) {
__cfs_hash_bucket_validate(hs, hsb, hnode);
__cfs_hash_bucket_del(hs, hsb, hnode);
cfs_hash_exit(hs, hnode);
}
- LASSERT(hlist_empty(&(hsb->hsb_head)));
- LASSERT(atomic_read(&hsb->hsb_count) == 0);
- write_unlock(&hsb->hsb_rwlock);
+ LASSERT(cfs_hlist_empty(&(hsb->hsb_head)));
+ LASSERT(cfs_atomic_read(&hsb->hsb_count) == 0);
+ cfs_write_unlock(&hsb->hsb_rwlock);
CFS_FREE_PTR(hsb);
}
- LASSERT(atomic_read(&hs->hs_count) == 0);
+ LASSERT(cfs_atomic_read(&hs->hs_count) == 0);
cfs_hash_wunlock(hs);
LIBCFS_FREE(hs->hs_buckets,
* ops->hs_get function will be called when the item is added.
*/
void
-cfs_hash_add(cfs_hash_t *hs, void *key, struct hlist_node *hnode)
+cfs_hash_add(cfs_hash_t *hs, void *key, cfs_hlist_node_t *hnode)
{
cfs_hash_bucket_t *hsb;
int bits;
i = cfs_hash_id(hs, key, hs->hs_cur_mask);
hsb = hs->hs_buckets[i];
LASSERT(i <= hs->hs_cur_mask);
- LASSERT(hlist_unhashed(hnode));
+ LASSERT(cfs_hlist_unhashed(hnode));
- write_lock(&hsb->hsb_rwlock);
+ cfs_write_lock(&hsb->hsb_rwlock);
__cfs_hash_bucket_add(hs, hsb, hnode);
- write_unlock(&hsb->hsb_rwlock);
+ cfs_write_unlock(&hsb->hsb_rwlock);
bits = cfs_hash_rehash_bits(hs);
cfs_hash_runlock(hs);
}
CFS_EXPORT_SYMBOL(cfs_hash_add);
-static struct hlist_node *
+static cfs_hlist_node_t *
cfs_hash_findadd_unique_hnode(cfs_hash_t *hs, void *key,
- struct hlist_node *hnode)
+ cfs_hlist_node_t *hnode)
{
int bits = 0;
- struct hlist_node *ehnode;
+ cfs_hlist_node_t *ehnode;
cfs_hash_bucket_t *hsb;
unsigned i;
ENTRY;
i = cfs_hash_id(hs, key, hs->hs_cur_mask);
hsb = hs->hs_buckets[i];
LASSERT(i <= hs->hs_cur_mask);
- LASSERT(hlist_unhashed(hnode));
+ LASSERT(cfs_hlist_unhashed(hnode));
- write_lock(&hsb->hsb_rwlock);
+ cfs_write_lock(&hsb->hsb_rwlock);
ehnode = __cfs_hash_bucket_lookup(hs, hsb, key);
if (ehnode) {
cfs_hash_get(hs, ehnode);
ehnode = hnode;
bits = cfs_hash_rehash_bits(hs);
}
- write_unlock(&hsb->hsb_rwlock);
+ cfs_write_unlock(&hsb->hsb_rwlock);
cfs_hash_runlock(hs);
if (bits)
cfs_hash_rehash(hs, bits);
* Returns 0 on success or -EALREADY on key collisions.
*/
int
-cfs_hash_add_unique(cfs_hash_t *hs, void *key, struct hlist_node *hnode)
+cfs_hash_add_unique(cfs_hash_t *hs, void *key, cfs_hlist_node_t *hnode)
{
- struct hlist_node *ehnode;
+ cfs_hlist_node_t *ehnode;
ENTRY;
ehnode = cfs_hash_findadd_unique_hnode(hs, key, hnode);
*/
void *
cfs_hash_findadd_unique(cfs_hash_t *hs, void *key,
- struct hlist_node *hnode)
+ cfs_hlist_node_t *hnode)
{
- struct hlist_node *ehnode;
+ cfs_hlist_node_t *ehnode;
void *obj;
ENTRY;
* on the removed object.
*/
void *
-cfs_hash_del(cfs_hash_t *hs, void *key, struct hlist_node *hnode)
+cfs_hash_del(cfs_hash_t *hs, void *key, cfs_hlist_node_t *hnode)
{
cfs_hash_bucket_t *hsb;
void *obj;
i = cfs_hash_id(hs, key, hs->hs_cur_mask);
hsb = hs->hs_buckets[i];
LASSERT(i <= hs->hs_cur_mask);
- LASSERT(!hlist_unhashed(hnode));
+ LASSERT(!cfs_hlist_unhashed(hnode));
- write_lock(&hsb->hsb_rwlock);
+ cfs_write_lock(&hsb->hsb_rwlock);
obj = __cfs_hash_bucket_del(hs, hsb, hnode);
- write_unlock(&hsb->hsb_rwlock);
+ cfs_write_unlock(&hsb->hsb_rwlock);
cfs_hash_runlock(hs);
RETURN(obj);
cfs_hash_del_key(cfs_hash_t *hs, void *key)
{
void *obj = NULL;
- struct hlist_node *hnode;
+ cfs_hlist_node_t *hnode;
cfs_hash_bucket_t *hsb;
unsigned i;
ENTRY;
hsb = hs->hs_buckets[i];
LASSERT(i <= hs->hs_cur_mask);
- write_lock(&hsb->hsb_rwlock);
+ cfs_write_lock(&hsb->hsb_rwlock);
hnode = __cfs_hash_bucket_lookup(hs, hsb, key);
if (hnode)
obj = __cfs_hash_bucket_del(hs, hsb, hnode);
- write_unlock(&hsb->hsb_rwlock);
+ cfs_write_unlock(&hsb->hsb_rwlock);
cfs_hash_runlock(hs);
RETURN(obj);
cfs_hash_lookup(cfs_hash_t *hs, void *key)
{
void *obj = NULL;
- struct hlist_node *hnode;
+ cfs_hlist_node_t *hnode;
cfs_hash_bucket_t *hsb;
unsigned i;
ENTRY;
hsb = hs->hs_buckets[i];
LASSERT(i <= hs->hs_cur_mask);
- read_lock(&hsb->hsb_rwlock);
+ cfs_read_lock(&hsb->hsb_rwlock);
hnode = __cfs_hash_bucket_lookup(hs, hsb, key);
if (hnode)
obj = cfs_hash_get(hs, hnode);
- read_unlock(&hsb->hsb_rwlock);
+ cfs_read_unlock(&hsb->hsb_rwlock);
cfs_hash_runlock(hs);
RETURN(obj);
cfs_hash_for_each(cfs_hash_t *hs,
cfs_hash_for_each_cb_t func, void *data)
{
- struct hlist_node *hnode;
+ cfs_hlist_node_t *hnode;
cfs_hash_bucket_t *hsb;
void *obj;
int i;
cfs_hash_rlock(hs);
cfs_hash_for_each_bucket(hs, hsb, i) {
- read_lock(&hsb->hsb_rwlock);
- hlist_for_each(hnode, &(hsb->hsb_head)) {
+ cfs_read_lock(&hsb->hsb_rwlock);
+ cfs_hlist_for_each(hnode, &(hsb->hsb_head)) {
__cfs_hash_bucket_validate(hs, hsb, hnode);
obj = cfs_hash_get(hs, hnode);
func(obj, data);
(void)cfs_hash_put(hs, hnode);
}
- read_unlock(&hsb->hsb_rwlock);
+ cfs_read_unlock(&hsb->hsb_rwlock);
}
cfs_hash_runlock(hs);
cfs_hash_for_each_safe(cfs_hash_t *hs,
cfs_hash_for_each_cb_t func, void *data)
{
- struct hlist_node *hnode;
- struct hlist_node *pos;
+ cfs_hlist_node_t *hnode;
+ cfs_hlist_node_t *pos;
cfs_hash_bucket_t *hsb;
void *obj;
int i;
cfs_hash_rlock(hs);
cfs_hash_for_each_bucket(hs, hsb, i) {
- read_lock(&hsb->hsb_rwlock);
- hlist_for_each_safe(hnode, pos, &(hsb->hsb_head)) {
+ cfs_read_lock(&hsb->hsb_rwlock);
+ cfs_hlist_for_each_safe(hnode, pos, &(hsb->hsb_head)) {
__cfs_hash_bucket_validate(hs, hsb, hnode);
obj = cfs_hash_get(hs, hnode);
- read_unlock(&hsb->hsb_rwlock);
+ cfs_read_unlock(&hsb->hsb_rwlock);
func(obj, data);
- read_lock(&hsb->hsb_rwlock);
+ cfs_read_lock(&hsb->hsb_rwlock);
(void)cfs_hash_put(hs, hnode);
}
- read_unlock(&hsb->hsb_rwlock);
+ cfs_read_unlock(&hsb->hsb_rwlock);
}
cfs_hash_runlock(hs);
EXIT;
cfs_hash_for_each_empty(cfs_hash_t *hs,
cfs_hash_for_each_cb_t func, void *data)
{
- struct hlist_node *hnode;
+ cfs_hlist_node_t *hnode;
cfs_hash_bucket_t *hsb;
void *obj;
int i;
restart:
cfs_hash_rlock(hs);
cfs_hash_for_each_bucket(hs, hsb, i) {
- write_lock(&hsb->hsb_rwlock);
- while (!hlist_empty(&hsb->hsb_head)) {
+ cfs_write_lock(&hsb->hsb_rwlock);
+ while (!cfs_hlist_empty(&hsb->hsb_head)) {
hnode = hsb->hsb_head.first;
__cfs_hash_bucket_validate(hs, hsb, hnode);
obj = cfs_hash_get(hs, hnode);
- write_unlock(&hsb->hsb_rwlock);
+ cfs_write_unlock(&hsb->hsb_rwlock);
cfs_hash_runlock(hs);
func(obj, data);
(void)cfs_hash_put(hs, hnode);
goto restart;
}
- write_unlock(&hsb->hsb_rwlock);
+ cfs_write_unlock(&hsb->hsb_rwlock);
}
cfs_hash_runlock(hs);
EXIT;
cfs_hash_for_each_key(cfs_hash_t *hs, void *key,
cfs_hash_for_each_cb_t func, void *data)
{
- struct hlist_node *hnode;
+ cfs_hlist_node_t *hnode;
cfs_hash_bucket_t *hsb;
unsigned i;
ENTRY;
hsb = hs->hs_buckets[i];
LASSERT(i <= hs->hs_cur_mask);
- read_lock(&hsb->hsb_rwlock);
- hlist_for_each(hnode, &(hsb->hsb_head)) {
+ cfs_read_lock(&hsb->hsb_rwlock);
+ cfs_hlist_for_each(hnode, &(hsb->hsb_head)) {
__cfs_hash_bucket_validate(hs, hsb, hnode);
if (!cfs_hash_compare(hs, key, hnode))
(void)cfs_hash_put(hs, hnode);
}
- read_unlock(&hsb->hsb_rwlock);
+ cfs_read_unlock(&hsb->hsb_rwlock);
cfs_hash_runlock(hs);
EXIT;
int
cfs_hash_rehash(cfs_hash_t *hs, int bits)
{
- struct hlist_node *hnode;
- struct hlist_node *pos;
+ cfs_hlist_node_t *hnode;
+ cfs_hlist_node_t *pos;
cfs_hash_bucket_t **old_buckets;
cfs_hash_bucket_t **rehash_buckets;
cfs_hash_bucket_t *hs_hsb;
void *key;
ENTRY;
- LASSERT(!in_interrupt());
+ LASSERT(!cfs_in_interrupt());
LASSERT(new_mask > 0);
LASSERT((hs->hs_flags & CFS_HASH_REHASH) != 0);
GOTO(free, rc = -ENOMEM);
CFS_INIT_HLIST_HEAD(&rehash_buckets[i]->hsb_head);
- rwlock_init(&rehash_buckets[i]->hsb_rwlock);
- atomic_set(&rehash_buckets[i]->hsb_count, 0);
+ cfs_rwlock_init(&rehash_buckets[i]->hsb_rwlock);
+ cfs_atomic_set(&rehash_buckets[i]->hsb_count, 0);
}
cfs_hash_wlock(hs);
hs->hs_cur_bits = bits;
hs->hs_cur_mask = (1 << bits) - 1;
hs->hs_buckets = rehash_buckets;
- atomic_inc(&hs->hs_rehash_count);
+ cfs_atomic_inc(&hs->hs_rehash_count);
for (i = 0; i <= old_mask; i++) {
hs_hsb = old_buckets[i];
- write_lock(&hs_hsb->hsb_rwlock);
- hlist_for_each_safe(hnode, pos, &(hs_hsb->hsb_head)) {
+ cfs_write_lock(&hs_hsb->hsb_rwlock);
+ cfs_hlist_for_each_safe(hnode, pos, &(hs_hsb->hsb_head)) {
key = cfs_hash_key(hs, hnode);
LASSERT(key);
/*
* Delete from old hash bucket.
*/
- hlist_del(hnode);
- LASSERT(atomic_read(&hs_hsb->hsb_count) > 0);
- atomic_dec(&hs_hsb->hsb_count);
+ cfs_hlist_del(hnode);
+ LASSERT(cfs_atomic_read(&hs_hsb->hsb_count) > 0);
+ cfs_atomic_dec(&hs_hsb->hsb_count);
/*
* Add to rehash bucket, ops->hs_key must be defined.
*/
rehash_hsb = rehash_buckets[cfs_hash_id(hs, key,
new_mask)];
- hlist_add_head(hnode, &(rehash_hsb->hsb_head));
- atomic_inc(&rehash_hsb->hsb_count);
+ cfs_hlist_add_head(hnode, &(rehash_hsb->hsb_head));
+ cfs_atomic_inc(&rehash_hsb->hsb_count);
}
- LASSERT(hlist_empty(&(hs_hsb->hsb_head)));
- LASSERT(atomic_read(&hs_hsb->hsb_count) == 0);
- write_unlock(&hs_hsb->hsb_rwlock);
+ LASSERT(cfs_hlist_empty(&(hs_hsb->hsb_head)));
+ LASSERT(cfs_atomic_read(&hs_hsb->hsb_count) == 0);
+ cfs_write_unlock(&hs_hsb->hsb_rwlock);
}
cfs_hash_wunlock(hs);
* not be called.
*/
void cfs_hash_rehash_key(cfs_hash_t *hs, void *old_key, void *new_key,
- struct hlist_node *hnode)
+ cfs_hlist_node_t *hnode)
{
cfs_hash_bucket_t *old_hsb;
cfs_hash_bucket_t *new_hsb;
ENTRY;
__cfs_hash_key_validate(hs, new_key, hnode);
- LASSERT(!hlist_unhashed(hnode));
+ LASSERT(!cfs_hlist_unhashed(hnode));
cfs_hash_rlock(hs);
LASSERT(j <= hs->hs_cur_mask);
if (i < j) { /* write_lock ordering */
- write_lock(&old_hsb->hsb_rwlock);
- write_lock(&new_hsb->hsb_rwlock);
+ cfs_write_lock(&old_hsb->hsb_rwlock);
+ cfs_write_lock(&new_hsb->hsb_rwlock);
} else if (i > j) {
- write_lock(&new_hsb->hsb_rwlock);
- write_lock(&old_hsb->hsb_rwlock);
+ cfs_write_lock(&new_hsb->hsb_rwlock);
+ cfs_write_lock(&old_hsb->hsb_rwlock);
} else { /* do nothing */
- read_unlock(&hs->hs_rwlock);
+ cfs_read_unlock(&hs->hs_rwlock);
EXIT;
return;
}
* Migrate item between hash buckets without calling
* the cfs_hash_get() and cfs_hash_put() callback functions.
*/
- hlist_del(hnode);
- LASSERT(atomic_read(&old_hsb->hsb_count) > 0);
- atomic_dec(&old_hsb->hsb_count);
- hlist_add_head(hnode, &(new_hsb->hsb_head));
- atomic_inc(&new_hsb->hsb_count);
-
- write_unlock(&new_hsb->hsb_rwlock);
- write_unlock(&old_hsb->hsb_rwlock);
+ cfs_hlist_del(hnode);
+ LASSERT(cfs_atomic_read(&old_hsb->hsb_count) > 0);
+ cfs_atomic_dec(&old_hsb->hsb_count);
+ cfs_hlist_add_head(hnode, &(new_hsb->hsb_head));
+ cfs_atomic_inc(&new_hsb->hsb_count);
+
+ cfs_write_unlock(&new_hsb->hsb_rwlock);
+ cfs_write_unlock(&old_hsb->hsb_rwlock);
cfs_hash_runlock(hs);
EXIT;
__cfs_hash_theta_frac(hs->hs_max_theta));
c += snprintf(str + c, size - c, " 0x%02x ", hs->hs_flags);
c += snprintf(str + c, size - c, "%6d ",
- atomic_read(&hs->hs_rehash_count));
+ cfs_atomic_read(&hs->hs_rehash_count));
c += snprintf(str + c, size - c, "%5d ",
- atomic_read(&hs->hs_count));
+ cfs_atomic_read(&hs->hs_count));
/*
* The distribution is a summary of the chained hash depth in
* Non-Uniform hash distribution: 128/125/0/0/0/0/2/1
*/
cfs_hash_for_each_bucket(hs, hsb, i)
- dist[min(__fls(atomic_read(&hsb->hsb_count)/max(theta,1)),7)]++;
+ dist[min(__cfs_fls(cfs_atomic_read(&hsb->hsb_count)/max(theta,1)),7)]++;
for (i = 0; i < 8; i++)
c += snprintf(str + c, size - c, "%d%c", dist[i],
#include <linux/kallsyms.h>
#endif
+/* We need to pass a pointer here, but elsewhere this must be a const */
+static char *debug_file_path = &libcfs_debug_file_path_arr[0];
+CFS_MODULE_PARM(debug_file_path, "s", charp, 0644,
+ "Path for dumping debug logs (deprecated)");
+
char lnet_upcall[1024] = "/usr/lib/lustre/lnet_upcall";
char lnet_debug_log_upcall[1024] = "/usr/lib/lustre/lnet_debug_log_upcall";
{
libcfs_catastrophe = 1;
libcfs_debug_msg(NULL, 0, D_EMERG, file, func, line,
- "LBUG - trying to dump log to %s\n", debug_file_path);
+ "LBUG - trying to dump log to %s\n",
+ libcfs_debug_file_path);
libcfs_debug_dumplog();
libcfs_run_lbug_upcall(file, func, line);
asm("int $3");
* console on the rare cases it is ever triggered. */
if (in_interrupt()) {
- trace_debug_print();
+ cfs_trace_debug_print();
} else {
while (current->lock_depth >= 0)
unlock_kernel();
int rc;
rc = PTR_ERR(filp);
- printk(KERN_ERR "LustreError: can't open %s file: err %d\n",
- name, rc);
+ printk(KERN_ERR "LustreError: can't open %s file: err %d\n",
+ name, rc);
if (err)
*err = rc;
filp = NULL;
if (data->ioc_inllen2)
data->ioc_inlbuf2 = &data->ioc_bulk[0] +
- size_round(data->ioc_inllen1);
+ cfs_size_round(data->ioc_inllen1);
RETURN(0);
}
void
cfs_waitq_add_exclusive(cfs_waitq_t *waitq,
- cfs_waitlink_t *link)
+ cfs_waitlink_t *link)
{
add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
}
EXPORT_SYMBOL(cfs_waitq_wait);
int64_t
-cfs_waitq_timedwait(cfs_waitlink_t *link, cfs_task_state_t state, int64_t timeout)
+cfs_waitq_timedwait(cfs_waitlink_t *link, cfs_task_state_t state,
+ int64_t timeout)
{
return schedule_timeout(timeout);
}
EXPORT_SYMBOL(cfs_waitq_timedwait);
void
-cfs_schedule_timeout(cfs_task_state_t state, int64_t timeout)
+cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t timeout)
{
set_current_state(state);
schedule_timeout(timeout);
}
+EXPORT_SYMBOL(cfs_schedule_timeout_and_set_state);
+
+void
+cfs_schedule_timeout(int64_t timeout)
+{
+ schedule_timeout(timeout);
+}
EXPORT_SYMBOL(cfs_schedule_timeout);
void
int is_subsys = (mask == &libcfs_subsystem_debug) ? 1 : 0;
int is_printk = (mask == &libcfs_printk) ? 1 : 0;
- rc = trace_allocate_string_buffer(&tmpstr, tmpstrlen);
+ rc = cfs_trace_allocate_string_buffer(&tmpstr, tmpstrlen);
if (rc < 0)
return rc;
if (pos >= rc) {
rc = 0;
} else {
- rc = trace_copyout_string(buffer, nob,
- tmpstr + pos, "\n");
+ rc = cfs_trace_copyout_string(buffer, nob,
+ tmpstr + pos, "\n");
}
} else {
- rc = trace_copyin_string(tmpstr, tmpstrlen, buffer, nob);
+ rc = cfs_trace_copyin_string(tmpstr, tmpstrlen, buffer, nob);
if (rc < 0)
return rc;
*mask |= D_EMERG;
}
- trace_free_string_buffer(tmpstr, tmpstrlen);
+ cfs_trace_free_string_buffer(tmpstr, tmpstrlen);
return rc;
}
if (!write)
return 0;
- return trace_dump_debug_buffer_usrstr(buffer, nob);
+ return cfs_trace_dump_debug_buffer_usrstr(buffer, nob);
}
DECLARE_PROC_HANDLER(proc_dump_kernel)
loff_t pos, void *buffer, int nob)
{
if (!write) {
- int len = strlen(tracefile);
+ int len = strlen(cfs_tracefile);
if (pos >= len)
return 0;
- return trace_copyout_string(buffer, nob,
- tracefile + pos, "\n");
+ return cfs_trace_copyout_string(buffer, nob,
+ cfs_tracefile + pos, "\n");
}
- return trace_daemon_command_usrstr(buffer, nob);
+ return cfs_trace_daemon_command_usrstr(buffer, nob);
}
DECLARE_PROC_HANDLER(proc_daemon_file)
if (!write) {
char tmpstr[32];
int len = snprintf(tmpstr, sizeof(tmpstr), "%d",
- trace_get_debug_mb());
+ cfs_trace_get_debug_mb());
if (pos >= len)
return 0;
- return trace_copyout_string(buffer, nob, tmpstr + pos, "\n");
+ return cfs_trace_copyout_string(buffer, nob, tmpstr + pos,
+ "\n");
}
- return trace_set_debug_mb_usrstr(buffer, nob);
+ return cfs_trace_set_debug_mb_usrstr(buffer, nob);
}
DECLARE_PROC_HANDLER(proc_debug_mb)
{
.ctl_name = PSDEV_DEBUG_PATH,
.procname = "debug_path",
- .data = debug_file_path_arr,
- .maxlen = sizeof(debug_file_path_arr),
+ .data = libcfs_debug_file_path_arr,
+ .maxlen = sizeof(libcfs_debug_file_path_arr),
.mode = 0644,
.proc_handler = &proc_dostring,
},
struct socket *sock;
sock = sock_alloc();
- if (sock == NULL)
+ if (sock == NULL)
return -ENOMEM;
sock->type = type;
#include "tracefile.h"
/* percents to share the total debug memory for each type */
-static unsigned int pages_factor[TCD_TYPE_MAX] = {
- 80, /* 80% pages for TCD_TYPE_PROC */
- 10, /* 10% pages for TCD_TYPE_SOFTIRQ */
- 10 /* 10% pages for TCD_TYPE_IRQ */
+static unsigned int pages_factor[CFS_TCD_TYPE_MAX] = {
+ 80, /* 80% pages for CFS_TCD_TYPE_PROC */
+ 10, /* 10% pages for CFS_TCD_TYPE_SOFTIRQ */
+ 10 /* 10% pages for CFS_TCD_TYPE_IRQ */
};
-char *trace_console_buffers[NR_CPUS][TCD_TYPE_MAX];
+char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
-struct rw_semaphore tracefile_sem;
+cfs_rw_semaphore_t cfs_tracefile_sem;
-int tracefile_init_arch()
+int cfs_tracefile_init_arch()
{
int i;
int j;
- struct trace_cpu_data *tcd;
+ struct cfs_trace_cpu_data *tcd;
- init_rwsem(&tracefile_sem);
+ cfs_init_rwsem(&cfs_tracefile_sem);
/* initialize trace_data */
- memset(trace_data, 0, sizeof(trace_data));
- for (i = 0; i < TCD_TYPE_MAX; i++) {
- trace_data[i]=kmalloc(sizeof(union trace_data_union)*NR_CPUS,
- GFP_KERNEL);
- if (trace_data[i] == NULL)
+ memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
+ for (i = 0; i < CFS_TCD_TYPE_MAX; i++) {
+ cfs_trace_data[i] =
+ kmalloc(sizeof(union cfs_trace_data_union) * NR_CPUS,
+ GFP_KERNEL);
+ if (cfs_trace_data[i] == NULL)
goto out;
}
/* arch related info initialized */
- tcd_for_each(tcd, i, j) {
- spin_lock_init(&tcd->tcd_lock);
+ cfs_tcd_for_each(tcd, i, j) {
+ cfs_spin_lock_init(&tcd->tcd_lock);
tcd->tcd_pages_factor = pages_factor[i];
tcd->tcd_type = i;
tcd->tcd_cpu = j;
for (i = 0; i < num_possible_cpus(); i++)
for (j = 0; j < 3; j++) {
- trace_console_buffers[i][j] =
- kmalloc(TRACE_CONSOLE_BUFFER_SIZE,
- GFP_KERNEL);
+ cfs_trace_console_buffers[i][j] =
+ kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
+ GFP_KERNEL);
- if (trace_console_buffers[i][j] == NULL)
+ if (cfs_trace_console_buffers[i][j] == NULL)
goto out;
}
return 0;
out:
- tracefile_fini_arch();
- printk(KERN_ERR "lnet: No enough memory\n");
+ cfs_tracefile_fini_arch();
+ printk(KERN_ERR "lnet: Not enough memory\n");
return -ENOMEM;
}
-void tracefile_fini_arch()
+void cfs_tracefile_fini_arch()
{
int i;
int j;
for (i = 0; i < num_possible_cpus(); i++)
for (j = 0; j < 3; j++)
- if (trace_console_buffers[i][j] != NULL) {
- kfree(trace_console_buffers[i][j]);
- trace_console_buffers[i][j] = NULL;
+ if (cfs_trace_console_buffers[i][j] != NULL) {
+ kfree(cfs_trace_console_buffers[i][j]);
+ cfs_trace_console_buffers[i][j] = NULL;
}
- for (i = 0; trace_data[i] != NULL; i++) {
- kfree(trace_data[i]);
- trace_data[i] = NULL;
+ for (i = 0; cfs_trace_data[i] != NULL; i++) {
+ kfree(cfs_trace_data[i]);
+ cfs_trace_data[i] = NULL;
}
- fini_rwsem(&tracefile_sem);
+ cfs_fini_rwsem(&cfs_tracefile_sem);
}
-void tracefile_read_lock()
+void cfs_tracefile_read_lock()
{
- down_read(&tracefile_sem);
+ cfs_down_read(&cfs_tracefile_sem);
}
-void tracefile_read_unlock()
+void cfs_tracefile_read_unlock()
{
- up_read(&tracefile_sem);
+ cfs_up_read(&cfs_tracefile_sem);
}
-void tracefile_write_lock()
+void cfs_tracefile_write_lock()
{
- down_write(&tracefile_sem);
+ cfs_down_write(&cfs_tracefile_sem);
}
-void tracefile_write_unlock()
+void cfs_tracefile_write_unlock()
{
- up_write(&tracefile_sem);
+ cfs_up_write(&cfs_tracefile_sem);
}
-trace_buf_type_t
-trace_buf_idx_get()
+cfs_trace_buf_type_t cfs_trace_buf_idx_get()
{
if (in_irq())
- return TCD_TYPE_IRQ;
+ return CFS_TCD_TYPE_IRQ;
else if (in_softirq())
- return TCD_TYPE_SOFTIRQ;
+ return CFS_TCD_TYPE_SOFTIRQ;
else
- return TCD_TYPE_PROC;
+ return CFS_TCD_TYPE_PROC;
}
-int trace_lock_tcd(struct trace_cpu_data *tcd)
+int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd)
{
- __LASSERT(tcd->tcd_type < TCD_TYPE_MAX);
- if (tcd->tcd_type == TCD_TYPE_IRQ)
- spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
- else if (tcd->tcd_type == TCD_TYPE_SOFTIRQ)
- spin_lock_bh(&tcd->tcd_lock);
+ __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
+ if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
+ cfs_spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
+ else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
+ cfs_spin_lock_bh(&tcd->tcd_lock);
else
- spin_lock(&tcd->tcd_lock);
+ cfs_spin_lock(&tcd->tcd_lock);
return 1;
}
-void trace_unlock_tcd(struct trace_cpu_data *tcd)
+void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd)
{
- __LASSERT(tcd->tcd_type < TCD_TYPE_MAX);
- if (tcd->tcd_type == TCD_TYPE_IRQ)
- spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
- else if (tcd->tcd_type == TCD_TYPE_SOFTIRQ)
- spin_unlock_bh(&tcd->tcd_lock);
+ __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
+ if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
+ cfs_spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
+ else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
+ cfs_spin_unlock_bh(&tcd->tcd_lock);
else
- spin_unlock(&tcd->tcd_lock);
+ cfs_spin_unlock(&tcd->tcd_lock);
}
-int tcd_owns_tage(struct trace_cpu_data *tcd, struct trace_page *tage)
+int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
+ struct cfs_trace_page *tage)
{
/*
* XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
}
void
-set_ptldebug_header(struct ptldebug_header *header, int subsys, int mask,
- const int line, unsigned long stack)
+cfs_set_ptldebug_header(struct ptldebug_header *header, int subsys, int mask,
+ const int line, unsigned long stack)
{
struct timeval tv;
return;
}
-void print_to_console(struct ptldebug_header *hdr, int mask, const char *buf,
- int len, const char *file, const char *fn)
+void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
+ const char *buf, int len, const char *file,
+ const char *fn)
{
char *prefix = "Lustre", *ptype = NULL;
if ((mask & D_CONSOLE) != 0) {
printk("%s%s: %.*s", ptype, prefix, len, buf);
} else {
- printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix, hdr->ph_pid,
- hdr->ph_extern_pid, file, hdr->ph_line_num, fn, len, buf);
+ printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
+ hdr->ph_pid, hdr->ph_extern_pid, file, hdr->ph_line_num,
+ fn, len, buf);
}
return;
}
-int trace_max_debug_mb(void)
+int cfs_trace_max_debug_mb(void)
{
- int total_mb = (num_physpages >> (20 - CFS_PAGE_SHIFT));
-
+ int total_mb = (cfs_num_physpages >> (20 - PAGE_SHIFT));
+
return MAX(512, (total_mb * 80)/100);
}
* three types of trace_data in linux
*/
typedef enum {
- TCD_TYPE_PROC = 0,
- TCD_TYPE_SOFTIRQ,
- TCD_TYPE_IRQ,
- TCD_TYPE_MAX
-} trace_buf_type_t;
+ CFS_TCD_TYPE_PROC = 0,
+ CFS_TCD_TYPE_SOFTIRQ,
+ CFS_TCD_TYPE_IRQ,
+ CFS_TCD_TYPE_MAX
+} cfs_trace_buf_type_t;
#endif
#if !KLWT_SUPPORT
int lwt_enabled;
-lwt_cpu_t lwt_cpus[NR_CPUS];
+lwt_cpu_t lwt_cpus[CFS_NR_CPUS];
#endif
int lwt_pages_per_cpu;
char *user_ptr, int user_size)
{
int maxsize = 128;
-
+
/* knl_ptr was retrieved from an LWT snapshot and the caller wants to
* turn it into a string. NB we can crash with an access violation
* trying to determine the string length, so we're trusting our
maxsize = user_size;
*size = strnlen (knl_ptr, maxsize - 1) + 1;
-
+
if (user_ptr != NULL) {
if (user_size < 4)
return (-EINVAL);
-
- if (copy_to_user (user_ptr, knl_ptr, *size))
+
+ if (cfs_copy_to_user (user_ptr, knl_ptr, *size))
return (-EFAULT);
/* Did I truncate the string? */
if (knl_ptr[*size - 1] != 0)
- copy_to_user (user_ptr + *size - 4, "...", 4);
+ cfs_copy_to_user (user_ptr + *size - 4, "...", 4);
}
return (0);
if (!enable) {
LWT_EVENT(0,0,0,0);
lwt_enabled = 0;
- mb();
+ cfs_mb();
/* give people some time to stop adding traces */
- schedule_timeout(10);
+ cfs_schedule_timeout(10);
}
- for (i = 0; i < num_online_cpus(); i++) {
+ for (i = 0; i < cfs_num_online_cpus(); i++) {
p = lwt_cpus[i].lwtc_current_page;
if (p == NULL)
for (j = 0; j < lwt_pages_per_cpu; j++) {
memset (p->lwtp_events, 0, CFS_PAGE_SIZE);
- p = list_entry (p->lwtp_list.next,
- lwt_page_t, lwtp_list);
+ p = cfs_list_entry (p->lwtp_list.next,
+ lwt_page_t, lwtp_list);
}
}
if (enable) {
lwt_enabled = 1;
- mb();
+ cfs_mb();
LWT_EVENT(0,0,0,0);
}
}
int
-lwt_snapshot (cycles_t *now, int *ncpu, int *total_size,
+lwt_snapshot (cfs_cycles_t *now, int *ncpu, int *total_size,
void *user_ptr, int user_size)
{
const int events_per_page = CFS_PAGE_SIZE / sizeof(lwt_event_t);
if (!cfs_capable(CFS_CAP_SYS_ADMIN))
return (-EPERM);
- *ncpu = num_online_cpus();
- *total_size = num_online_cpus() * lwt_pages_per_cpu * bytes_per_page;
+ *ncpu = cfs_num_online_cpus();
+ *total_size = cfs_num_online_cpus() * lwt_pages_per_cpu *
+ bytes_per_page;
*now = get_cycles();
-
+
if (user_ptr == NULL)
return (0);
- for (i = 0; i < num_online_cpus(); i++) {
+ for (i = 0; i < cfs_num_online_cpus(); i++) {
p = lwt_cpus[i].lwtc_current_page;
if (p == NULL)
return (-ENODATA);
-
+
for (j = 0; j < lwt_pages_per_cpu; j++) {
- if (copy_to_user(user_ptr, p->lwtp_events,
- bytes_per_page))
+ if (cfs_copy_to_user(user_ptr, p->lwtp_events,
+ bytes_per_page))
return (-EFAULT);
user_ptr = ((char *)user_ptr) + bytes_per_page;
- p = list_entry(p->lwtp_list.next,
- lwt_page_t, lwtp_list);
-
+ p = cfs_list_entry(p->lwtp_list.next,
+ lwt_page_t, lwtp_list);
}
}
}
int
-lwt_init ()
+lwt_init ()
{
int i;
int j;
- for (i = 0; i < num_online_cpus(); i++)
+ for (i = 0; i < cfs_num_online_cpus(); i++)
if (lwt_cpus[i].lwtc_current_page != NULL)
return (-EALREADY);
-
+
LASSERT (!lwt_enabled);
/* NULL pointers, zero scalars */
memset (lwt_cpus, 0, sizeof (lwt_cpus));
- lwt_pages_per_cpu = LWT_MEMORY / (num_online_cpus() * CFS_PAGE_SIZE);
+ lwt_pages_per_cpu =
+ LWT_MEMORY / (cfs_num_online_cpus() * CFS_PAGE_SIZE);
- for (i = 0; i < num_online_cpus(); i++)
+ for (i = 0; i < cfs_num_online_cpus(); i++)
for (j = 0; j < lwt_pages_per_cpu; j++) {
struct page *page = alloc_page (GFP_KERNEL);
lwt_page_t *lwtp;
memset (lwtp->lwtp_events, 0, CFS_PAGE_SIZE);
if (j == 0) {
- INIT_LIST_HEAD (&lwtp->lwtp_list);
+ CFS_INIT_LIST_HEAD (&lwtp->lwtp_list);
lwt_cpus[i].lwtc_current_page = lwtp;
} else {
- list_add (&lwtp->lwtp_list,
+ cfs_list_add (&lwtp->lwtp_list,
&lwt_cpus[i].lwtc_current_page->lwtp_list);
}
}
lwt_enabled = 1;
- mb();
+ cfs_mb();
LWT_EVENT(0,0,0,0);
}
void
-lwt_fini ()
+lwt_fini ()
{
int i;
lwt_control(0, 0);
-
- for (i = 0; i < num_online_cpus(); i++)
+
+ for (i = 0; i < cfs_num_online_cpus(); i++)
while (lwt_cpus[i].lwtc_current_page != NULL) {
lwt_page_t *lwtp = lwt_cpus[i].lwtc_current_page;
-
- if (list_empty (&lwtp->lwtp_list)) {
+
+ if (cfs_list_empty (&lwtp->lwtp_list)) {
lwt_cpus[i].lwtc_current_page = NULL;
} else {
lwt_cpus[i].lwtc_current_page =
- list_entry (lwtp->lwtp_list.next,
- lwt_page_t, lwtp_list);
+ cfs_list_entry (lwtp->lwtp_list.next,
+ lwt_page_t, lwtp_list);
- list_del (&lwtp->lwtp_list);
+ cfs_list_del (&lwtp->lwtp_list);
}
__free_page (lwtp->lwtp_page);
RETURN(0);
}
-static struct rw_semaphore ioctl_list_sem;
-static struct list_head ioctl_list;
+static cfs_rw_semaphore_t ioctl_list_sem;
+static cfs_list_t ioctl_list;
int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand)
{
int rc = 0;
- down_write(&ioctl_list_sem);
- if (!list_empty(&hand->item))
+ cfs_down_write(&ioctl_list_sem);
+ if (!cfs_list_empty(&hand->item))
rc = -EBUSY;
else
- list_add_tail(&hand->item, &ioctl_list);
- up_write(&ioctl_list_sem);
+ cfs_list_add_tail(&hand->item, &ioctl_list);
+ cfs_up_write(&ioctl_list_sem);
return rc;
}
{
int rc = 0;
- down_write(&ioctl_list_sem);
- if (list_empty(&hand->item))
+ cfs_down_write(&ioctl_list_sem);
+ if (cfs_list_empty(&hand->item))
rc = -ENOENT;
else
- list_del_init(&hand->item);
- up_write(&ioctl_list_sem);
+ cfs_list_del_init(&hand->item);
+ cfs_up_write(&ioctl_list_sem);
return rc;
}
break;
case IOC_LIBCFS_LWT_SNAPSHOT: {
- cycles_t now;
- int ncpu;
- int total_size;
+ cfs_cycles_t now;
+ int ncpu;
+ int total_size;
err = lwt_snapshot (&now, &ncpu, &total_size,
data->ioc_pbuf1, data->ioc_plen1);
default: {
struct libcfs_ioctl_handler *hand;
err = -EINVAL;
- down_read(&ioctl_list_sem);
+ cfs_down_read(&ioctl_list_sem);
cfs_list_for_each_entry_typed(hand, &ioctl_list,
struct libcfs_ioctl_handler, item) {
err = hand->handle_ioctl(cmd, data);
break;
}
}
- up_read(&ioctl_list_sem);
+ cfs_up_read(&ioctl_list_sem);
break;
}
}
MODULE_LICENSE("GPL");
extern cfs_psdev_t libcfs_dev;
-extern struct rw_semaphore tracefile_sem;
-extern struct semaphore trace_thread_sem;
+extern cfs_rw_semaphore_t cfs_tracefile_sem;
+extern cfs_semaphore_t cfs_trace_thread_sem;
extern void libcfs_init_nidstrings(void);
extern int libcfs_arch_init(void);
libcfs_arch_init();
libcfs_init_nidstrings();
- init_rwsem(&tracefile_sem);
- init_mutex(&trace_thread_sem);
- init_rwsem(&ioctl_list_sem);
+ cfs_init_rwsem(&cfs_tracefile_sem);
+ cfs_init_mutex(&cfs_trace_thread_sem);
+ cfs_init_rwsem(&ioctl_list_sem);
CFS_INIT_LIST_HEAD(&ioctl_list);
rc = libcfs_debug_init(5 * 1024 * 1024);
if (rc < 0) {
- printk(KERN_ERR "LustreError: libcfs_debug_init: %d\n", rc);
+ printk(CFS_KERN_ERR "LustreError: libcfs_debug_init: %d\n", rc);
return (rc);
}
remove_proc();
CDEBUG(D_MALLOC, "before Portals cleanup: kmem %d\n",
- atomic_read(&libcfs_kmemory));
+ cfs_atomic_read(&libcfs_kmemory));
rc = cfs_psdev_deregister(&libcfs_dev);
if (rc)
lwt_fini();
#endif
- if (atomic_read(&libcfs_kmemory) != 0)
+ if (cfs_atomic_read(&libcfs_kmemory) != 0)
CERROR("Portals memory leaked: %d bytes\n",
- atomic_read(&libcfs_kmemory));
+ cfs_atomic_read(&libcfs_kmemory));
rc = libcfs_debug_cleanup();
if (rc)
- printk(KERN_ERR "LustreError: libcfs_debug_cleanup: %d\n", rc);
+ printk(CFS_KERN_ERR "LustreError: libcfs_debug_cleanup: %d\n",
+ rc);
- fini_rwsem(&ioctl_list_sem);
- fini_rwsem(&tracefile_sem);
+ cfs_fini_rwsem(&ioctl_list_sem);
+ cfs_fini_rwsem(&cfs_tracefile_sem);
libcfs_arch_cleanup();
}
static int libcfs_nidstring_idx = 0;
#ifdef __KERNEL__
-static spinlock_t libcfs_nidstring_lock;
+static cfs_spinlock_t libcfs_nidstring_lock;
void libcfs_init_nidstrings (void)
{
- spin_lock_init(&libcfs_nidstring_lock);
+ cfs_spin_lock_init(&libcfs_nidstring_lock);
}
-# define NIDSTR_LOCK(f) spin_lock_irqsave(&libcfs_nidstring_lock, f)
-# define NIDSTR_UNLOCK(f) spin_unlock_irqrestore(&libcfs_nidstring_lock, f)
+# define NIDSTR_LOCK(f) cfs_spin_lock_irqsave(&libcfs_nidstring_lock, f)
+# define NIDSTR_UNLOCK(f) cfs_spin_unlock_irqrestore(&libcfs_nidstring_lock, f)
#else
# define NIDSTR_LOCK(f) (f=0) /* avoid unused var warnings */
# define NIDSTR_UNLOCK(f) (f=0)
static void libcfs_decnum_addr2str(__u32 addr, char *str);
static void libcfs_hexnum_addr2str(__u32 addr, char *str);
static int libcfs_num_str2addr(const char *str, int nob, __u32 *addr);
-static int libcfs_ip_parse(char *str, int len, struct list_head *list);
-static int libcfs_num_parse(char *str, int len, struct list_head *list);
-static int libcfs_ip_match(__u32 addr, struct list_head *list);
-static int libcfs_num_match(__u32 addr, struct list_head *list);
+static int libcfs_ip_parse(char *str, int len, cfs_list_t *list);
+static int libcfs_num_parse(char *str, int len, cfs_list_t *list);
+static int libcfs_ip_match(__u32 addr, cfs_list_t *list);
+static int libcfs_num_match(__u32 addr, cfs_list_t *list);
struct netstrfns {
int nf_type;
void (*nf_addr2str)(__u32 addr, char *str);
int (*nf_str2addr)(const char *str, int nob, __u32 *addr);
int (*nf_parse_addrlist)(char *str, int len,
- struct list_head *list);
- int (*nf_match_addr)(__u32 addr, struct list_head *list);
+ cfs_list_t *list);
+ int (*nf_match_addr)(__u32 addr, cfs_list_t *list);
};
static struct netstrfns libcfs_netstrfns[] = {
* Link to list of this structures which is built on nid range
* list parsing.
*/
- struct list_head nr_link;
+ cfs_list_t nr_link;
/**
* List head for addrrange::ar_link.
*/
- struct list_head nr_addrranges;
+ cfs_list_t nr_addrranges;
/**
* Flag indicating that *@<net> is found.
*/
/**
* Link to nidrange::nr_addrranges.
*/
- struct list_head ar_link;
+ cfs_list_t ar_link;
/**
* List head for numaddr_range::nar_link.
*/
- struct list_head ar_numaddr_ranges;
+ cfs_list_t ar_numaddr_ranges;
};
/**
/**
* Link to addrrange::ar_numaddr_ranges.
*/
- struct list_head nar_link;
+ cfs_list_t nar_link;
/**
* List head for range_expr::re_link.
*/
- struct list_head nar_range_exprs;
+ cfs_list_t nar_range_exprs;
};
/**
/**
* Link to numaddr_range::nar_range_exprs.
*/
- struct list_head re_link;
+ cfs_list_t re_link;
__u32 re_lo;
__u32 re_hi;
__u32 re_stride;
* \retval 0 otherwise
*/
static int
-parse_expr_list(struct lstr *str, struct list_head *list,
+parse_expr_list(struct lstr *str, cfs_list_t *list,
unsigned min, unsigned max)
{
struct lstr res;
range = parse_range_expr(&res, min, max);
if (range == NULL)
return 0;
- list_add_tail(&range->re_link, list);
+ cfs_list_add_tail(&range->re_link, list);
}
return 1;
}
*/
static int
num_parse(char *str, int len,
- struct list_head *list, unsigned min, unsigned max)
+ cfs_list_t *list, unsigned min, unsigned max)
{
__u32 num;
struct lstr src;
LIBCFS_ALLOC(numaddr, sizeof(struct numaddr_range));
if (numaddr == NULL)
return 0;
- list_add_tail(&numaddr->nar_link, list);
+ cfs_list_add_tail(&numaddr->nar_link, list);
CFS_INIT_LIST_HEAD(&numaddr->nar_range_exprs);
if (libcfs_str2num_check(src.ls_str, src.ls_len, &num, min, max)) {
expr->re_lo = expr->re_hi = num;
expr->re_stride = 1;
- list_add_tail(&expr->re_link, &numaddr->nar_range_exprs);
+ cfs_list_add_tail(&expr->re_link, &numaddr->nar_range_exprs);
return 1;
}
* \retval 0 otherwise
*/
static int
-libcfs_num_parse(char *str, int len, struct list_head *list)
+libcfs_num_parse(char *str, int len, cfs_list_t *list)
{
return num_parse(str, len, list, 0, MAX_NUMERIC_VALUE);
}
*/
static int
libcfs_ip_parse(char *str, int len,
- struct list_head *list)
+ cfs_list_t *list)
{
struct lstr src, res;
int i;
LIBCFS_ALLOC(addrrange, sizeof(struct addrrange));
if (addrrange == NULL)
return 0;
- list_add_tail(&addrrange->ar_link, &nidrange->nr_addrranges);
+ cfs_list_add_tail(&addrrange->ar_link, &nidrange->nr_addrranges);
CFS_INIT_LIST_HEAD(&addrrange->ar_numaddr_ranges);
return nidrange->nr_netstrfns->nf_parse_addrlist(src->ls_str,
*/
static struct nidrange *
add_nidrange(const struct lstr *src,
- struct list_head *nidlist)
+ cfs_list_t *nidlist)
{
struct netstrfns *nf;
struct nidrange *nr;
return NULL;
}
- list_for_each_entry(nr, nidlist, nr_link) {
+ cfs_list_for_each_entry(nr, nidlist, nr_link) {
if (nr->nr_netstrfns != nf)
continue;
if (nr->nr_netnum != netnum)
LIBCFS_ALLOC(nr, sizeof(struct nidrange));
if (nr == NULL)
return NULL;
- list_add_tail(&nr->nr_link, nidlist);
+ cfs_list_add_tail(&nr->nr_link, nidlist);
CFS_INIT_LIST_HEAD(&nr->nr_addrranges);
nr->nr_netstrfns = nf;
nr->nr_all = 0;
* \retval 0 otherwise
*/
static int
-parse_nidrange(struct lstr *src, struct list_head *nidlist)
+parse_nidrange(struct lstr *src, cfs_list_t *nidlist)
{
struct lstr addrrange, net, tmp;
struct nidrange *nr;
* \retval none
*/
static void
-free_range_exprs(struct list_head *list)
+free_range_exprs(cfs_list_t *list)
{
- struct list_head *pos, *next;
+ cfs_list_t *pos, *next;
- list_for_each_safe(pos, next, list) {
- list_del(pos);
- LIBCFS_FREE(list_entry(pos, struct range_expr, re_link),
+ cfs_list_for_each_safe(pos, next, list) {
+ cfs_list_del(pos);
+ LIBCFS_FREE(cfs_list_entry(pos, struct range_expr, re_link),
sizeof(struct range_expr));
}
}
* \retval none
*/
static void
-free_numaddr_ranges(struct list_head *list)
+free_numaddr_ranges(cfs_list_t *list)
{
- struct list_head *pos, *next;
+ cfs_list_t *pos, *next;
struct numaddr_range *numaddr;
- list_for_each_safe(pos, next, list) {
- numaddr = list_entry(pos, struct numaddr_range, nar_link);
+ cfs_list_for_each_safe(pos, next, list) {
+ numaddr = cfs_list_entry(pos, struct numaddr_range, nar_link);
free_range_exprs(&numaddr->nar_range_exprs);
- list_del(pos);
+ cfs_list_del(pos);
LIBCFS_FREE(numaddr, sizeof(struct numaddr_range));
}
}
* \retval none
*/
static void
-free_addrranges(struct list_head *list)
+free_addrranges(cfs_list_t *list)
{
- struct list_head *pos, *next;
+ cfs_list_t *pos, *next;
struct addrrange *ar;
- list_for_each_safe(pos, next, list) {
- ar = list_entry(pos, struct addrrange, ar_link);
+ cfs_list_for_each_safe(pos, next, list) {
+ ar = cfs_list_entry(pos, struct addrrange, ar_link);
free_numaddr_ranges(&ar->ar_numaddr_ranges);
- list_del(pos);
+ cfs_list_del(pos);
LIBCFS_FREE(ar, sizeof(struct addrrange));
}
}
* \retval none
*/
void
-cfs_free_nidlist(struct list_head *list)
+cfs_free_nidlist(cfs_list_t *list)
{
- struct list_head *pos, *next;
+ cfs_list_t *pos, *next;
struct nidrange *nr;
- list_for_each_safe(pos, next, list) {
- nr = list_entry(pos, struct nidrange, nr_link);
+ cfs_list_for_each_safe(pos, next, list) {
+ nr = cfs_list_entry(pos, struct nidrange, nr_link);
free_addrranges(&nr->nr_addrranges);
- list_del(pos);
+ cfs_list_del(pos);
LIBCFS_FREE(nr, sizeof(struct nidrange));
}
}
* \retval 0 otherwise
*/
int
-cfs_parse_nidlist(char *str, int len, struct list_head *nidlist)
+cfs_parse_nidlist(char *str, int len, cfs_list_t *nidlist)
{
struct lstr src, res;
int rc;
* \retval 0 otherwise
*/
static int
-match_numaddr(__u32 addr, struct list_head *list, int shift, __u32 mask)
+match_numaddr(__u32 addr, cfs_list_t *list, int shift, __u32 mask)
{
struct numaddr_range *numaddr;
struct range_expr *expr;
int ip, ok;
ENTRY;
- list_for_each_entry(numaddr, list, nar_link) {
+ cfs_list_for_each_entry(numaddr, list, nar_link) {
ip = (addr >> shift) & mask;
shift -= 8;
ok = 0;
- list_for_each_entry(expr, &numaddr->nar_range_exprs, re_link) {
+ cfs_list_for_each_entry(expr, &numaddr->nar_range_exprs,
+ re_link) {
if (ip >= expr->re_lo &&
ip <= expr->re_hi &&
((ip - expr->re_lo) % expr->re_stride) == 0) {
* \retval 0 otherwise
*/
static int
-libcfs_num_match(__u32 addr, struct list_head *numaddr)
+libcfs_num_match(__u32 addr, cfs_list_t *numaddr)
{
return match_numaddr(addr, numaddr, 0, 0xffffffff);
}
* \retval 0 otherwise
*/
static int
-libcfs_ip_match(__u32 addr, struct list_head *numaddr)
+libcfs_ip_match(__u32 addr, cfs_list_t *numaddr)
{
return match_numaddr(addr, numaddr, 24, 0xff);
}
* \retval 1 on match
* \retval 0 otherwises
*/
-int cfs_match_nid(lnet_nid_t nid, struct list_head *nidlist)
+int cfs_match_nid(lnet_nid_t nid, cfs_list_t *nidlist)
{
struct nidrange *nr;
struct addrrange *ar;
ENTRY;
- list_for_each_entry(nr, nidlist, nr_link) {
+ cfs_list_for_each_entry(nr, nidlist, nr_link) {
if (nr->nr_netstrfns->nf_type != LNET_NETTYP(LNET_NIDNET(nid)))
continue;
if (nr->nr_netnum != LNET_NETNUM(LNET_NIDNET(nid)))
continue;
if (nr->nr_all)
RETURN(1);
- list_for_each_entry(ar, &nr->nr_addrranges, ar_link)
+ cfs_list_for_each_entry(ar, &nr->nr_addrranges, ar_link)
if (nr->nr_netstrfns->nf_match_addr(LNET_NIDADDR(nid),
&ar->ar_numaddr_ranges))
RETURN(1);
#endif /* HAVE_CATAMOUNT_DATA_H */
static int source_pid;
-int smp_processor_id = 1;
-char debug_file_path[1024];
+int cfs_smp_processor_id = 1;
+char libcfs_debug_file_path[1024];
FILE *debug_file_fd;
int portals_do_debug_dumplog(void *arg)
debug_filename = getenv("LIBLUSTRE_DEBUG_BASE");
if (debug_filename)
- strncpy(debug_file_path,debug_filename,sizeof(debug_file_path));
+ strncpy(libcfs_debug_file_path, debug_filename,
+ sizeof(libcfs_debug_file_path));
debug_filename = getenv("LIBLUSTRE_DEBUG_FILE");
if (debug_filename)
strncpy(debug_file_name,debug_filename,sizeof(debug_file_name));
- if (debug_file_name[0] == '\0' && debug_file_path[0] != '\0')
+ if (debug_file_name[0] == '\0' && libcfs_debug_file_path[0] != '\0')
snprintf(debug_file_name, sizeof(debug_file_name) - 1,
- "%s-%s-"CFS_TIME_T".log", debug_file_path, source_nid, time(0));
+ "%s-%s-"CFS_TIME_T".log", libcfs_debug_file_path,
+ source_nid, time(0));
if (strcmp(debug_file_name, "stdout") == 0 ||
strcmp(debug_file_name, "-") == 0) {
#include <libcfs/libcfs.h>
/* XXX move things up to the top, comment */
-union trace_data_union (*trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned;
+union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[CFS_NR_CPUS] __cacheline_aligned;
-char tracefile[TRACEFILE_NAME_SIZE];
-long long tracefile_size = TRACEFILE_SIZE;
+char cfs_tracefile[TRACEFILE_NAME_SIZE];
+long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
static struct tracefiled_ctl trace_tctl;
-struct semaphore trace_thread_sem;
+cfs_semaphore_t cfs_trace_thread_sem;
static int thread_running = 0;
-atomic_t tage_allocated = ATOMIC_INIT(0);
+cfs_atomic_t cfs_tage_allocated = CFS_ATOMIC_INIT(0);
static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
- struct trace_cpu_data *tcd);
+ struct cfs_trace_cpu_data *tcd);
-static inline struct trace_page *tage_from_list(struct list_head *list)
+static inline struct cfs_trace_page *
+cfs_tage_from_list(cfs_list_t *list)
{
- return list_entry(list, struct trace_page, linkage);
+ return cfs_list_entry(list, struct cfs_trace_page, linkage);
}
-static struct trace_page *tage_alloc(int gfp)
+static struct cfs_trace_page *cfs_tage_alloc(int gfp)
{
- cfs_page_t *page;
- struct trace_page *tage;
+ cfs_page_t *page;
+ struct cfs_trace_page *tage;
/*
* Don't spam console with allocation failures: they will be reported
}
tage->page = page;
- atomic_inc(&tage_allocated);
+ cfs_atomic_inc(&cfs_tage_allocated);
return tage;
}
-static void tage_free(struct trace_page *tage)
+static void cfs_tage_free(struct cfs_trace_page *tage)
{
__LASSERT(tage != NULL);
__LASSERT(tage->page != NULL);
cfs_free_page(tage->page);
cfs_free(tage);
- atomic_dec(&tage_allocated);
+ cfs_atomic_dec(&cfs_tage_allocated);
}
-static void tage_to_tail(struct trace_page *tage, struct list_head *queue)
+static void cfs_tage_to_tail(struct cfs_trace_page *tage,
+ cfs_list_t *queue)
{
__LASSERT(tage != NULL);
__LASSERT(queue != NULL);
- list_move_tail(&tage->linkage, queue);
+ cfs_list_move_tail(&tage->linkage, queue);
}
-int trace_refill_stock(struct trace_cpu_data *tcd, int gfp,
- struct list_head *stock)
+int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, int gfp,
+ cfs_list_t *stock)
{
int i;
*/
for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++ i) {
- struct trace_page *tage;
+ struct cfs_trace_page *tage;
- tage = tage_alloc(gfp);
+ tage = cfs_tage_alloc(gfp);
if (tage == NULL)
break;
- list_add_tail(&tage->linkage, stock);
+ cfs_list_add_tail(&tage->linkage, stock);
}
return i;
}
/* return a page that has 'len' bytes left at the end */
-static struct trace_page *trace_get_tage_try(struct trace_cpu_data *tcd,
- unsigned long len)
+static struct cfs_trace_page *
+cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
{
- struct trace_page *tage;
+ struct cfs_trace_page *tage;
if (tcd->tcd_cur_pages > 0) {
- __LASSERT(!list_empty(&tcd->tcd_pages));
- tage = tage_from_list(tcd->tcd_pages.prev);
+ __LASSERT(!cfs_list_empty(&tcd->tcd_pages));
+ tage = cfs_tage_from_list(tcd->tcd_pages.prev);
if (tage->used + len <= CFS_PAGE_SIZE)
return tage;
}
if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
if (tcd->tcd_cur_stock_pages > 0) {
- tage = tage_from_list(tcd->tcd_stock_pages.prev);
+ tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
-- tcd->tcd_cur_stock_pages;
- list_del_init(&tage->linkage);
+ cfs_list_del_init(&tage->linkage);
} else {
- tage = tage_alloc(CFS_ALLOC_ATOMIC);
+ tage = cfs_tage_alloc(CFS_ALLOC_ATOMIC);
if (tage == NULL) {
if (printk_ratelimit())
- printk(KERN_WARNING
+ printk(CFS_KERN_WARNING
"cannot allocate a tage (%ld)\n",
tcd->tcd_cur_pages);
return NULL;
}
tage->used = 0;
- tage->cpu = smp_processor_id();
+ tage->cpu = cfs_smp_processor_id();
tage->type = tcd->tcd_type;
- list_add_tail(&tage->linkage, &tcd->tcd_pages);
+ cfs_list_add_tail(&tage->linkage, &tcd->tcd_pages);
tcd->tcd_cur_pages++;
if (tcd->tcd_cur_pages > 8 && thread_running) {
return NULL;
}
-static void tcd_shrink(struct trace_cpu_data *tcd)
+static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
{
int pgcount = tcd->tcd_cur_pages / 10;
struct page_collection pc;
- struct trace_page *tage;
- struct trace_page *tmp;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
/*
* XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
*/
if (printk_ratelimit())
- printk(KERN_WARNING "debug daemon buffer overflowed; "
+ printk(CFS_KERN_WARNING "debug daemon buffer overflowed; "
"discarding 10%% of pages (%d of %ld)\n",
pgcount + 1, tcd->tcd_cur_pages);
CFS_INIT_LIST_HEAD(&pc.pc_pages);
- spin_lock_init(&pc.pc_lock);
+ cfs_spin_lock_init(&pc.pc_lock);
cfs_list_for_each_entry_safe_typed(tage, tmp, &tcd->tcd_pages,
- struct trace_page, linkage) {
+ struct cfs_trace_page, linkage) {
if (pgcount-- == 0)
break;
- list_move_tail(&tage->linkage, &pc.pc_pages);
+ cfs_list_move_tail(&tage->linkage, &pc.pc_pages);
tcd->tcd_cur_pages--;
}
put_pages_on_tcd_daemon_list(&pc, tcd);
}
/* return a page that has 'len' bytes left at the end */
-static struct trace_page *trace_get_tage(struct trace_cpu_data *tcd,
- unsigned long len)
+static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
+ unsigned long len)
{
- struct trace_page *tage;
+ struct cfs_trace_page *tage;
/*
* XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
*/
if (len > CFS_PAGE_SIZE) {
- printk(KERN_ERR
+ printk(CFS_KERN_ERR
"cowardly refusing to write %lu bytes in a page\n", len);
return NULL;
}
- tage = trace_get_tage_try(tcd, len);
+ tage = cfs_trace_get_tage_try(tcd, len);
if (tage != NULL)
return tage;
if (thread_running)
- tcd_shrink(tcd);
+ cfs_tcd_shrink(tcd);
if (tcd->tcd_cur_pages > 0) {
- tage = tage_from_list(tcd->tcd_pages.next);
+ tage = cfs_tage_from_list(tcd->tcd_pages.next);
tage->used = 0;
- tage_to_tail(tage, &tcd->tcd_pages);
+ cfs_tage_to_tail(tage, &tcd->tcd_pages);
}
return tage;
}
const char *format1, va_list args,
const char *format2, ...)
{
- struct trace_cpu_data *tcd = NULL;
- struct ptldebug_header header;
- struct trace_page *tage;
+ struct cfs_trace_cpu_data *tcd = NULL;
+ struct ptldebug_header header;
+ struct cfs_trace_page *tage;
/* string_buf is used only if tcd != NULL, and is always set then */
- char *string_buf = NULL;
- char *debug_buf;
- int known_size;
- int needed = 85; /* average message length */
- int max_nob;
- va_list ap;
- int depth;
- int i;
- int remain;
+ char *string_buf = NULL;
+ char *debug_buf;
+ int known_size;
+ int needed = 85; /* average message length */
+ int max_nob;
+ va_list ap;
+ int depth;
+ int i;
+ int remain;
if (strchr(file, '/'))
file = strrchr(file, '/') + 1;
- set_ptldebug_header(&header, subsys, mask, line, CDEBUG_STACK());
+ cfs_set_ptldebug_header(&header, subsys, mask, line, CDEBUG_STACK());
- tcd = trace_get_tcd();
+ tcd = cfs_trace_get_tcd();
if (tcd == NULL) /* arch may not log in IRQ context */
goto console;
if (tcd->tcd_shutting_down) {
- trace_put_tcd(tcd);
+ cfs_trace_put_tcd(tcd);
tcd = NULL;
goto console;
}
* if needed is to small for this format.
*/
for (i = 0; i < 2; i++) {
- tage = trace_get_tage(tcd, needed + known_size + 1);
+ tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
if (tage == NULL) {
if (needed + known_size > CFS_PAGE_SIZE)
mask |= D_ERROR;
- trace_put_tcd(tcd);
+ cfs_trace_put_tcd(tcd);
tcd = NULL;
goto console;
}
max_nob = CFS_PAGE_SIZE - tage->used - known_size;
if (max_nob <= 0) {
- printk(KERN_EMERG "negative max_nob: %i\n", max_nob);
+ printk(CFS_KERN_EMERG "negative max_nob: %i\n",
+ max_nob);
mask |= D_ERROR;
- trace_put_tcd(tcd);
+ cfs_trace_put_tcd(tcd);
tcd = NULL;
goto console;
}
}
if (*(string_buf+needed-1) != '\n')
- printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n",
+ printk(CFS_KERN_INFO "format at %s:%d:%s doesn't end in newline\n",
file, line, fn);
header.ph_len = known_size + needed;
if ((mask & libcfs_printk) == 0) {
/* no console output requested */
if (tcd != NULL)
- trace_put_tcd(tcd);
+ cfs_trace_put_tcd(tcd);
return 1;
}
/* skipping a console message */
cdls->cdls_count++;
if (tcd != NULL)
- trace_put_tcd(tcd);
+ cfs_trace_put_tcd(tcd);
return 1;
}
}
if (tcd != NULL) {
- print_to_console(&header, mask, string_buf, needed, file, fn);
- trace_put_tcd(tcd);
+ cfs_print_to_console(&header, mask, string_buf, needed, file,
+ fn);
+ cfs_trace_put_tcd(tcd);
} else {
- string_buf = trace_get_console_buffer();
+ string_buf = cfs_trace_get_console_buffer();
needed = 0;
if (format1 != NULL) {
va_copy(ap, args);
- needed = vsnprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE, format1, ap);
+ needed = vsnprintf(string_buf,
+ CFS_TRACE_CONSOLE_BUFFER_SIZE,
+ format1, ap);
va_end(ap);
}
if (format2 != NULL) {
- remain = TRACE_CONSOLE_BUFFER_SIZE - needed;
+ remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed;
if (remain > 0) {
va_start(ap, format2);
needed += vsnprintf(string_buf+needed, remain, format2, ap);
va_end(ap);
}
}
- print_to_console(&header, mask,
- string_buf, needed, file, fn);
+ cfs_print_to_console(&header, mask,
+ string_buf, needed, file, fn);
- trace_put_console_buffer(string_buf);
+ cfs_trace_put_console_buffer(string_buf);
}
if (cdls != NULL && cdls->cdls_count != 0) {
- string_buf = trace_get_console_buffer();
+ string_buf = cfs_trace_get_console_buffer();
- needed = snprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE,
+ needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
"Skipped %d previous similar message%s\n",
cdls->cdls_count, (cdls->cdls_count > 1) ? "s" : "");
- print_to_console(&header, mask,
+ cfs_print_to_console(&header, mask,
string_buf, needed, file, fn);
- trace_put_console_buffer(string_buf);
+ cfs_trace_put_console_buffer(string_buf);
cdls->cdls_count = 0;
}
EXPORT_SYMBOL(libcfs_assertion_failed);
void
-trace_assertion_failed(const char *str,
- const char *fn, const char *file, int line)
+cfs_trace_assertion_failed(const char *str,
+ const char *fn, const char *file, int line)
{
struct ptldebug_header hdr;
libcfs_panic_in_progress = 1;
libcfs_catastrophe = 1;
- mb();
+ cfs_mb();
- set_ptldebug_header(&hdr, DEBUG_SUBSYSTEM, D_EMERG, line,
- CDEBUG_STACK());
+ cfs_set_ptldebug_header(&hdr, DEBUG_SUBSYSTEM, D_EMERG, line,
+ CDEBUG_STACK());
- print_to_console(&hdr, D_EMERG, str, strlen(str), file, fn);
+ cfs_print_to_console(&hdr, D_EMERG, str, strlen(str), file, fn);
LIBCFS_PANIC("Lustre debug assertion failure\n");
/* Do the collect_pages job on a single CPU: assumes that all other
* CPUs have been stopped during a panic. If this isn't true for some
* arch, this will have to be implemented separately in each arch. */
- int i;
- int j;
- struct trace_cpu_data *tcd;
+ int i;
+ int j;
+ struct cfs_trace_cpu_data *tcd;
CFS_INIT_LIST_HEAD(&pc->pc_pages);
- tcd_for_each(tcd, i, j) {
- list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
+ cfs_tcd_for_each(tcd, i, j) {
+ cfs_list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
tcd->tcd_cur_pages = 0;
if (pc->pc_want_daemon_pages) {
- list_splice_init(&tcd->tcd_daemon_pages, &pc->pc_pages);
+ cfs_list_splice_init(&tcd->tcd_daemon_pages,
+ &pc->pc_pages);
tcd->tcd_cur_daemon_pages = 0;
}
}
static void collect_pages_on_all_cpus(struct page_collection *pc)
{
- struct trace_cpu_data *tcd;
+ struct cfs_trace_cpu_data *tcd;
int i, cpu;
- spin_lock(&pc->pc_lock);
- for_each_possible_cpu(cpu) {
- tcd_for_each_type_lock(tcd, i, cpu) {
- list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
+ cfs_spin_lock(&pc->pc_lock);
+ cfs_for_each_possible_cpu(cpu) {
+ cfs_tcd_for_each_type_lock(tcd, i, cpu) {
+ cfs_list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
tcd->tcd_cur_pages = 0;
if (pc->pc_want_daemon_pages) {
- list_splice_init(&tcd->tcd_daemon_pages,
- &pc->pc_pages);
+ cfs_list_splice_init(&tcd->tcd_daemon_pages,
+ &pc->pc_pages);
tcd->tcd_cur_daemon_pages = 0;
}
}
}
- spin_unlock(&pc->pc_lock);
+ cfs_spin_unlock(&pc->pc_lock);
}
static void collect_pages(struct page_collection *pc)
static void put_pages_back_on_all_cpus(struct page_collection *pc)
{
- struct trace_cpu_data *tcd;
- struct list_head *cur_head;
- struct trace_page *tage;
- struct trace_page *tmp;
+ struct cfs_trace_cpu_data *tcd;
+ cfs_list_t *cur_head;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
int i, cpu;
- spin_lock(&pc->pc_lock);
- for_each_possible_cpu(cpu) {
- tcd_for_each_type_lock(tcd, i, cpu) {
+ cfs_spin_lock(&pc->pc_lock);
+ cfs_for_each_possible_cpu(cpu) {
+ cfs_tcd_for_each_type_lock(tcd, i, cpu) {
cur_head = tcd->tcd_pages.next;
cfs_list_for_each_entry_safe_typed(tage, tmp,
&pc->pc_pages,
- struct trace_page,
+ struct cfs_trace_page,
linkage) {
__LASSERT_TAGE_INVARIANT(tage);
if (tage->cpu != cpu || tage->type != i)
continue;
- tage_to_tail(tage, cur_head);
+ cfs_tage_to_tail(tage, cur_head);
tcd->tcd_cur_pages++;
}
}
}
- spin_unlock(&pc->pc_lock);
+ cfs_spin_unlock(&pc->pc_lock);
}
static void put_pages_back(struct page_collection *pc)
* if we have been steadily writing (and otherwise discarding) pages via the
* debug daemon. */
static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
- struct trace_cpu_data *tcd)
+ struct cfs_trace_cpu_data *tcd)
{
- struct trace_page *tage;
- struct trace_page *tmp;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
- spin_lock(&pc->pc_lock);
+ cfs_spin_lock(&pc->pc_lock);
cfs_list_for_each_entry_safe_typed(tage, tmp, &pc->pc_pages,
- struct trace_page, linkage) {
+ struct cfs_trace_page, linkage) {
__LASSERT_TAGE_INVARIANT(tage);
if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
continue;
- tage_to_tail(tage, &tcd->tcd_daemon_pages);
+ cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
tcd->tcd_cur_daemon_pages++;
if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
- struct trace_page *victim;
+ struct cfs_trace_page *victim;
- __LASSERT(!list_empty(&tcd->tcd_daemon_pages));
- victim = tage_from_list(tcd->tcd_daemon_pages.next);
+ __LASSERT(!cfs_list_empty(&tcd->tcd_daemon_pages));
+ victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
__LASSERT_TAGE_INVARIANT(victim);
- list_del(&victim->linkage);
- tage_free(victim);
+ cfs_list_del(&victim->linkage);
+ cfs_tage_free(victim);
tcd->tcd_cur_daemon_pages--;
}
}
- spin_unlock(&pc->pc_lock);
+ cfs_spin_unlock(&pc->pc_lock);
}
static void put_pages_on_daemon_list(struct page_collection *pc)
{
- struct trace_cpu_data *tcd;
+ struct cfs_trace_cpu_data *tcd;
int i, cpu;
- for_each_possible_cpu(cpu) {
- tcd_for_each_type_lock(tcd, i, cpu)
+ cfs_for_each_possible_cpu(cpu) {
+ cfs_tcd_for_each_type_lock(tcd, i, cpu)
put_pages_on_tcd_daemon_list(pc, tcd);
}
}
-void trace_debug_print(void)
+void cfs_trace_debug_print(void)
{
struct page_collection pc;
- struct trace_page *tage;
- struct trace_page *tmp;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
- spin_lock_init(&pc.pc_lock);
+ cfs_spin_lock_init(&pc.pc_lock);
pc.pc_want_daemon_pages = 1;
collect_pages(&pc);
cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
- struct trace_page, linkage) {
+ struct cfs_trace_page, linkage) {
char *p, *file, *fn;
cfs_page_t *page;
p += strlen(fn) + 1;
len = hdr->ph_len - (int)(p - (char *)hdr);
- print_to_console(hdr, D_EMERG, p, len, file, fn);
+ cfs_print_to_console(hdr, D_EMERG, p, len, file, fn);
p += len;
}
- list_del(&tage->linkage);
- tage_free(tage);
+ cfs_list_del(&tage->linkage);
+ cfs_tage_free(tage);
}
}
-int tracefile_dump_all_pages(char *filename)
+int cfs_tracefile_dump_all_pages(char *filename)
{
struct page_collection pc;
cfs_file_t *filp;
- struct trace_page *tage;
- struct trace_page *tmp;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
int rc;
CFS_DECL_MMSPACE;
- tracefile_write_lock();
+ cfs_tracefile_write_lock();
filp = cfs_filp_open(filename,
O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600, &rc);
if (!filp) {
if (rc != -EEXIST)
- printk(KERN_ERR "LustreError: can't open %s for dump: rc %d\n",
+ printk(CFS_KERN_ERR
+ "LustreError: can't open %s for dump: rc %d\n",
filename, rc);
goto out;
}
- spin_lock_init(&pc.pc_lock);
+ cfs_spin_lock_init(&pc.pc_lock);
pc.pc_want_daemon_pages = 1;
collect_pages(&pc);
- if (list_empty(&pc.pc_pages)) {
+ if (cfs_list_empty(&pc.pc_pages)) {
rc = 0;
goto close;
}
* iobufs with the pages and calling generic_direct_IO */
CFS_MMSPACE_OPEN;
cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
- struct trace_page, linkage) {
+ struct cfs_trace_page, linkage) {
__LASSERT_TAGE_INVARIANT(tage);
rc = cfs_filp_write(filp, cfs_page_address(tage->page),
tage->used, cfs_filp_poff(filp));
if (rc != (int)tage->used) {
- printk(KERN_WARNING "wanted to write %u but wrote "
+ printk(CFS_KERN_WARNING "wanted to write %u but wrote "
"%d\n", tage->used, rc);
put_pages_back(&pc);
- __LASSERT(list_empty(&pc.pc_pages));
+ __LASSERT(cfs_list_empty(&pc.pc_pages));
break;
}
- list_del(&tage->linkage);
- tage_free(tage);
+ cfs_list_del(&tage->linkage);
+ cfs_tage_free(tage);
}
CFS_MMSPACE_CLOSE;
rc = cfs_filp_fsync(filp);
if (rc)
- printk(KERN_ERR "sync returns %d\n", rc);
+ printk(CFS_KERN_ERR "sync returns %d\n", rc);
close:
cfs_filp_close(filp);
out:
- tracefile_write_unlock();
+ cfs_tracefile_write_unlock();
return rc;
}
-void trace_flush_pages(void)
+void cfs_trace_flush_pages(void)
{
struct page_collection pc;
- struct trace_page *tage;
- struct trace_page *tmp;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
- spin_lock_init(&pc.pc_lock);
+ cfs_spin_lock_init(&pc.pc_lock);
pc.pc_want_daemon_pages = 1;
collect_pages(&pc);
cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
- struct trace_page, linkage) {
+ struct cfs_trace_page, linkage) {
__LASSERT_TAGE_INVARIANT(tage);
- list_del(&tage->linkage);
- tage_free(tage);
+ cfs_list_del(&tage->linkage);
+ cfs_tage_free(tage);
}
}
-int trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
- const char *usr_buffer, int usr_buffer_nob)
+int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
+ const char *usr_buffer, int usr_buffer_nob)
{
int nob;
if (usr_buffer_nob > knl_buffer_nob)
return -EOVERFLOW;
- if (copy_from_user((void *)knl_buffer,
+ if (cfs_copy_from_user((void *)knl_buffer,
(void *)usr_buffer, usr_buffer_nob))
return -EFAULT;
return 0;
}
-int trace_copyout_string(char *usr_buffer, int usr_buffer_nob,
- const char *knl_buffer, char *append)
+int cfs_trace_copyout_string(char *usr_buffer, int usr_buffer_nob,
+ const char *knl_buffer, char *append)
{
/* NB if 'append' != NULL, it's a single character to append to the
* copied out string - usually "\n", for /proc entries and "" (i.e. a
if (nob > usr_buffer_nob)
nob = usr_buffer_nob;
- if (copy_to_user(usr_buffer, knl_buffer, nob))
+ if (cfs_copy_to_user(usr_buffer, knl_buffer, nob))
return -EFAULT;
if (append != NULL && nob < usr_buffer_nob) {
- if (copy_to_user(usr_buffer + nob, append, 1))
+ if (cfs_copy_to_user(usr_buffer + nob, append, 1))
return -EFAULT;
nob++;
return nob;
}
-EXPORT_SYMBOL(trace_copyout_string);
+EXPORT_SYMBOL(cfs_trace_copyout_string);
-int trace_allocate_string_buffer(char **str, int nob)
+int cfs_trace_allocate_string_buffer(char **str, int nob)
{
if (nob > 2 * CFS_PAGE_SIZE) /* string must be "sensible" */
return -EINVAL;
return 0;
}
-void trace_free_string_buffer(char *str, int nob)
+void cfs_trace_free_string_buffer(char *str, int nob)
{
cfs_free(str);
}
-int trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob)
+int cfs_trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob)
{
char *str;
int rc;
- rc = trace_allocate_string_buffer(&str, usr_str_nob + 1);
+ rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
if (rc != 0)
return rc;
- rc = trace_copyin_string(str, usr_str_nob + 1,
- usr_str, usr_str_nob);
+ rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
+ usr_str, usr_str_nob);
if (rc != 0)
goto out;
goto out;
}
#endif
- rc = tracefile_dump_all_pages(str);
+ rc = cfs_tracefile_dump_all_pages(str);
out:
- trace_free_string_buffer(str, usr_str_nob + 1);
+ cfs_trace_free_string_buffer(str, usr_str_nob + 1);
return rc;
}
-int trace_daemon_command(char *str)
+int cfs_trace_daemon_command(char *str)
{
int rc = 0;
- tracefile_write_lock();
+ cfs_tracefile_write_lock();
if (strcmp(str, "stop") == 0) {
- tracefile_write_unlock();
- trace_stop_thread();
- tracefile_write_lock();
- memset(tracefile, 0, sizeof(tracefile));
+ cfs_tracefile_write_unlock();
+ cfs_trace_stop_thread();
+ cfs_tracefile_write_lock();
+ memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
} else if (strncmp(str, "size=", 5) == 0) {
- tracefile_size = simple_strtoul(str + 5, NULL, 0);
- if (tracefile_size < 10 || tracefile_size > 20480)
- tracefile_size = TRACEFILE_SIZE;
+ cfs_tracefile_size = simple_strtoul(str + 5, NULL, 0);
+ if (cfs_tracefile_size < 10 || cfs_tracefile_size > 20480)
+ cfs_tracefile_size = CFS_TRACEFILE_SIZE;
else
- tracefile_size <<= 20;
+ cfs_tracefile_size <<= 20;
- } else if (strlen(str) >= sizeof(tracefile)) {
+ } else if (strlen(str) >= sizeof(cfs_tracefile)) {
rc = -ENAMETOOLONG;
#ifndef __WINNT__
} else if (str[0] != '/') {
rc = -EINVAL;
#endif
} else {
- strcpy(tracefile, str);
+ strcpy(cfs_tracefile, str);
- printk(KERN_INFO "Lustre: debug daemon will attempt to start writing "
- "to %s (%lukB max)\n", tracefile,
- (long)(tracefile_size >> 10));
+ printk(CFS_KERN_INFO
+ "Lustre: debug daemon will attempt to start writing "
+ "to %s (%lukB max)\n", cfs_tracefile,
+ (long)(cfs_tracefile_size >> 10));
- trace_start_thread();
+ cfs_trace_start_thread();
}
- tracefile_write_unlock();
+ cfs_tracefile_write_unlock();
return rc;
}
-int trace_daemon_command_usrstr(void *usr_str, int usr_str_nob)
+int cfs_trace_daemon_command_usrstr(void *usr_str, int usr_str_nob)
{
char *str;
int rc;
- rc = trace_allocate_string_buffer(&str, usr_str_nob + 1);
+ rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
if (rc != 0)
return rc;
- rc = trace_copyin_string(str, usr_str_nob + 1,
+ rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
usr_str, usr_str_nob);
if (rc == 0)
- rc = trace_daemon_command(str);
+ rc = cfs_trace_daemon_command(str);
- trace_free_string_buffer(str, usr_str_nob + 1);
+ cfs_trace_free_string_buffer(str, usr_str_nob + 1);
return rc;
}
-int trace_set_debug_mb(int mb)
+int cfs_trace_set_debug_mb(int mb)
{
int i;
int j;
int pages;
- int limit = trace_max_debug_mb();
- struct trace_cpu_data *tcd;
+ int limit = cfs_trace_max_debug_mb();
+ struct cfs_trace_cpu_data *tcd;
- if (mb < num_possible_cpus())
+ if (mb < cfs_num_possible_cpus())
return -EINVAL;
if (mb > limit) {
- printk(KERN_ERR "Lustre: Refusing to set debug buffer size to "
- "%dMB - limit is %d\n", mb, limit);
+ printk(CFS_KERN_ERR "Lustre: Refusing to set debug buffer size "
+ "to %dMB - limit is %d\n", mb, limit);
return -EINVAL;
}
- mb /= num_possible_cpus();
+ mb /= cfs_num_possible_cpus();
pages = mb << (20 - CFS_PAGE_SHIFT);
- tracefile_write_lock();
+ cfs_tracefile_write_lock();
- tcd_for_each(tcd, i, j)
+ cfs_tcd_for_each(tcd, i, j)
tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
- tracefile_write_unlock();
+ cfs_tracefile_write_unlock();
return 0;
}
-int trace_set_debug_mb_usrstr(void *usr_str, int usr_str_nob)
+int cfs_trace_set_debug_mb_usrstr(void *usr_str, int usr_str_nob)
{
char str[32];
int rc;
- rc = trace_copyin_string(str, sizeof(str), usr_str, usr_str_nob);
+ rc = cfs_trace_copyin_string(str, sizeof(str), usr_str, usr_str_nob);
if (rc < 0)
return rc;
- return trace_set_debug_mb(simple_strtoul(str, NULL, 0));
+ return cfs_trace_set_debug_mb(simple_strtoul(str, NULL, 0));
}
-int trace_get_debug_mb(void)
+int cfs_trace_get_debug_mb(void)
{
int i;
int j;
- struct trace_cpu_data *tcd;
+ struct cfs_trace_cpu_data *tcd;
int total_pages = 0;
- tracefile_read_lock();
+ cfs_tracefile_read_lock();
- tcd_for_each(tcd, i, j)
+ cfs_tcd_for_each(tcd, i, j)
total_pages += tcd->tcd_max_pages;
- tracefile_read_unlock();
+ cfs_tracefile_read_unlock();
return (total_pages >> (20 - CFS_PAGE_SHIFT)) + 1;
}
{
struct page_collection pc;
struct tracefiled_ctl *tctl = arg;
- struct trace_page *tage;
- struct trace_page *tmp;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
struct ptldebug_header *hdr;
cfs_file_t *filp;
int last_loop = 0;
/* this is so broken in uml? what on earth is going on? */
cfs_daemonize("ktracefiled");
- spin_lock_init(&pc.pc_lock);
- complete(&tctl->tctl_start);
+ cfs_spin_lock_init(&pc.pc_lock);
+ cfs_complete(&tctl->tctl_start);
while (1) {
cfs_waitlink_t __wait;
pc.pc_want_daemon_pages = 0;
collect_pages(&pc);
- if (list_empty(&pc.pc_pages))
+ if (cfs_list_empty(&pc.pc_pages))
goto end_loop;
filp = NULL;
- tracefile_read_lock();
- if (tracefile[0] != 0) {
- filp = cfs_filp_open(tracefile,
+ cfs_tracefile_read_lock();
+ if (cfs_tracefile[0] != 0) {
+ filp = cfs_filp_open(cfs_tracefile,
O_CREAT | O_RDWR | O_LARGEFILE,
0600, &rc);
if (!(filp))
- printk(KERN_WARNING "couldn't open %s: %d\n",
- tracefile, rc);
+ printk(CFS_KERN_WARNING "couldn't open %s: "
+ "%d\n", cfs_tracefile, rc);
}
- tracefile_read_unlock();
+ cfs_tracefile_read_unlock();
if (filp == NULL) {
put_pages_on_daemon_list(&pc);
- __LASSERT(list_empty(&pc.pc_pages));
+ __LASSERT(cfs_list_empty(&pc.pc_pages));
goto end_loop;
}
CFS_MMSPACE_OPEN;
/* mark the first header, so we can sort in chunks */
- tage = tage_from_list(pc.pc_pages.next);
+ tage = cfs_tage_from_list(pc.pc_pages.next);
__LASSERT_TAGE_INVARIANT(tage);
hdr = cfs_page_address(tage->page);
hdr->ph_flags |= PH_FLAG_FIRST_RECORD;
cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
- struct trace_page, linkage) {
+ struct cfs_trace_page,
+ linkage) {
static loff_t f_pos;
__LASSERT_TAGE_INVARIANT(tage);
- if (f_pos >= (off_t)tracefile_size)
+ if (f_pos >= (off_t)cfs_tracefile_size)
f_pos = 0;
else if (f_pos > (off_t)cfs_filp_size(filp))
f_pos = cfs_filp_size(filp);
rc = cfs_filp_write(filp, cfs_page_address(tage->page),
tage->used, &f_pos);
if (rc != (int)tage->used) {
- printk(KERN_WARNING "wanted to write %u but "
- "wrote %d\n", tage->used, rc);
+ printk(CFS_KERN_WARNING "wanted to write %u "
+ "but wrote %d\n", tage->used, rc);
put_pages_back(&pc);
- __LASSERT(list_empty(&pc.pc_pages));
+ __LASSERT(cfs_list_empty(&pc.pc_pages));
}
}
CFS_MMSPACE_CLOSE;
cfs_filp_close(filp);
put_pages_on_daemon_list(&pc);
- if (!list_empty(&pc.pc_pages)) {
+ if (!cfs_list_empty(&pc.pc_pages)) {
int i;
- printk(KERN_ALERT "Lustre: trace pages aren't empty\n");
- printk(KERN_ERR "total cpus(%d): ", num_possible_cpus());
- for (i = 0; i < num_possible_cpus(); i++)
- if (cpu_online(i))
- printk(KERN_ERR "%d(on) ", i);
+ printk(CFS_KERN_ALERT "Lustre: trace pages aren't "
+ " empty\n");
+ printk(CFS_KERN_ERR "total cpus(%d): ",
+ cfs_num_possible_cpus());
+ for (i = 0; i < cfs_num_possible_cpus(); i++)
+ if (cfs_cpu_online(i))
+ printk(CFS_KERN_ERR "%d(on) ", i);
else
- printk(KERN_ERR "%d(off) ", i);
- printk(KERN_ERR "\n");
+ printk(CFS_KERN_ERR "%d(off) ", i);
+ printk(CFS_KERN_ERR "\n");
i = 0;
- list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
- linkage)
- printk(KERN_ERR "page %d belongs to cpu %d\n",
- ++i, tage->cpu);
- printk(KERN_ERR "There are %d pages unwritten\n", i);
+ cfs_list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
+ linkage)
+ printk(CFS_KERN_ERR "page %d belongs to cpu "
+ "%d\n", ++i, tage->cpu);
+ printk(CFS_KERN_ERR "There are %d pages unwritten\n",
+ i);
}
- __LASSERT(list_empty(&pc.pc_pages));
+ __LASSERT(cfs_list_empty(&pc.pc_pages));
end_loop:
- if (atomic_read(&tctl->tctl_shutdown)) {
+ if (cfs_atomic_read(&tctl->tctl_shutdown)) {
if (last_loop == 0) {
last_loop = 1;
continue;
}
cfs_waitlink_init(&__wait);
cfs_waitq_add(&tctl->tctl_waitq, &__wait);
- set_current_state(TASK_INTERRUPTIBLE);
+ cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
cfs_waitq_timedwait(&__wait, CFS_TASK_INTERRUPTIBLE,
cfs_time_seconds(1));
cfs_waitq_del(&tctl->tctl_waitq, &__wait);
}
- complete(&tctl->tctl_stop);
+ cfs_complete(&tctl->tctl_stop);
return 0;
}
-int trace_start_thread(void)
+int cfs_trace_start_thread(void)
{
struct tracefiled_ctl *tctl = &trace_tctl;
int rc = 0;
- mutex_down(&trace_thread_sem);
+ cfs_mutex_down(&cfs_trace_thread_sem);
if (thread_running)
goto out;
- init_completion(&tctl->tctl_start);
- init_completion(&tctl->tctl_stop);
+ cfs_init_completion(&tctl->tctl_start);
+ cfs_init_completion(&tctl->tctl_stop);
cfs_waitq_init(&tctl->tctl_waitq);
- atomic_set(&tctl->tctl_shutdown, 0);
+ cfs_atomic_set(&tctl->tctl_shutdown, 0);
if (cfs_kernel_thread(tracefiled, tctl, 0) < 0) {
rc = -ECHILD;
goto out;
}
- wait_for_completion(&tctl->tctl_start);
+ cfs_wait_for_completion(&tctl->tctl_start);
thread_running = 1;
out:
- mutex_up(&trace_thread_sem);
+ cfs_mutex_up(&cfs_trace_thread_sem);
return rc;
}
-void trace_stop_thread(void)
+void cfs_trace_stop_thread(void)
{
struct tracefiled_ctl *tctl = &trace_tctl;
- mutex_down(&trace_thread_sem);
+ cfs_mutex_down(&cfs_trace_thread_sem);
if (thread_running) {
- printk(KERN_INFO "Lustre: shutting down debug daemon thread...\n");
- atomic_set(&tctl->tctl_shutdown, 1);
- wait_for_completion(&tctl->tctl_stop);
+ printk(CFS_KERN_INFO
+ "Lustre: shutting down debug daemon thread...\n");
+ cfs_atomic_set(&tctl->tctl_shutdown, 1);
+ cfs_wait_for_completion(&tctl->tctl_stop);
thread_running = 0;
}
- mutex_up(&trace_thread_sem);
+ cfs_mutex_up(&cfs_trace_thread_sem);
}
-int tracefile_init(int max_pages)
+int cfs_tracefile_init(int max_pages)
{
- struct trace_cpu_data *tcd;
+ struct cfs_trace_cpu_data *tcd;
int i;
int j;
int rc;
int factor;
- rc = tracefile_init_arch();
+ rc = cfs_tracefile_init_arch();
if (rc != 0)
return rc;
- tcd_for_each(tcd, i, j) {
+ cfs_tcd_for_each(tcd, i, j) {
/* tcd_pages_factor is initialized int tracefile_init_arch. */
factor = tcd->tcd_pages_factor;
CFS_INIT_LIST_HEAD(&tcd->tcd_pages);
static void trace_cleanup_on_all_cpus(void)
{
- struct trace_cpu_data *tcd;
- struct trace_page *tage;
- struct trace_page *tmp;
+ struct cfs_trace_cpu_data *tcd;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
int i, cpu;
- for_each_possible_cpu(cpu) {
- tcd_for_each_type_lock(tcd, i, cpu) {
+ cfs_for_each_possible_cpu(cpu) {
+ cfs_tcd_for_each_type_lock(tcd, i, cpu) {
tcd->tcd_shutting_down = 1;
cfs_list_for_each_entry_safe_typed(tage, tmp,
&tcd->tcd_pages,
- struct trace_page,
+ struct cfs_trace_page,
linkage) {
__LASSERT_TAGE_INVARIANT(tage);
- list_del(&tage->linkage);
- tage_free(tage);
+ cfs_list_del(&tage->linkage);
+ cfs_tage_free(tage);
}
tcd->tcd_cur_pages = 0;
}
}
-static void trace_cleanup(void)
+static void cfs_trace_cleanup(void)
{
struct page_collection pc;
CFS_INIT_LIST_HEAD(&pc.pc_pages);
- spin_lock_init(&pc.pc_lock);
+ cfs_spin_lock_init(&pc.pc_lock);
trace_cleanup_on_all_cpus();
- tracefile_fini_arch();
+ cfs_tracefile_fini_arch();
}
-void tracefile_exit(void)
+void cfs_tracefile_exit(void)
{
- trace_stop_thread();
- trace_cleanup();
+ cfs_trace_stop_thread();
+ cfs_trace_cleanup();
}
/* trace file lock routines */
#define TRACEFILE_NAME_SIZE 1024
-extern char tracefile[TRACEFILE_NAME_SIZE];
-extern long long tracefile_size;
+extern char cfs_tracefile[TRACEFILE_NAME_SIZE];
+extern long long cfs_tracefile_size;
extern void libcfs_run_debug_log_upcall(char *file);
-int tracefile_init_arch(void);
-void tracefile_fini_arch(void);
-
-void tracefile_read_lock(void);
-void tracefile_read_unlock(void);
-void tracefile_write_lock(void);
-void tracefile_write_unlock(void);
-
-int tracefile_dump_all_pages(char *filename);
-void trace_debug_print(void);
-void trace_flush_pages(void);
-int trace_start_thread(void);
-void trace_stop_thread(void);
-int tracefile_init(int max_pages);
-void tracefile_exit(void);
-
-
-
-int trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
- const char *usr_buffer, int usr_buffer_nob);
-int trace_copyout_string(char *usr_buffer, int usr_buffer_nob,
- const char *knl_str, char *append);
-int trace_allocate_string_buffer(char **str, int nob);
-void trace_free_string_buffer(char *str, int nob);
-int trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob);
-int trace_daemon_command(char *str);
-int trace_daemon_command_usrstr(void *usr_str, int usr_str_nob);
-int trace_set_debug_mb(int mb);
-int trace_set_debug_mb_usrstr(void *usr_str, int usr_str_nob);
-int trace_get_debug_mb(void);
+int cfs_tracefile_init_arch(void);
+void cfs_tracefile_fini_arch(void);
+
+void cfs_tracefile_read_lock(void);
+void cfs_tracefile_read_unlock(void);
+void cfs_tracefile_write_lock(void);
+void cfs_tracefile_write_unlock(void);
+
+int cfs_tracefile_dump_all_pages(char *filename);
+void cfs_trace_debug_print(void);
+void cfs_trace_flush_pages(void);
+int cfs_trace_start_thread(void);
+void cfs_trace_stop_thread(void);
+int cfs_tracefile_init(int max_pages);
+void cfs_tracefile_exit(void);
+
+
+
+int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
+ const char *usr_buffer, int usr_buffer_nob);
+int cfs_trace_copyout_string(char *usr_buffer, int usr_buffer_nob,
+ const char *knl_str, char *append);
+int cfs_trace_allocate_string_buffer(char **str, int nob);
+void cfs_trace_free_string_buffer(char *str, int nob);
+int cfs_trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob);
+int cfs_trace_daemon_command(char *str);
+int cfs_trace_daemon_command_usrstr(void *usr_str, int usr_str_nob);
+int cfs_trace_set_debug_mb(int mb);
+int cfs_trace_set_debug_mb_usrstr(void *usr_str, int usr_str_nob);
+int cfs_trace_get_debug_mb(void);
extern void libcfs_debug_dumplog_internal(void *arg);
extern void libcfs_register_panic_notifier(void);
extern void libcfs_unregister_panic_notifier(void);
extern int libcfs_panic_in_progress;
-extern int trace_max_debug_mb(void);
+extern int cfs_trace_max_debug_mb(void);
#define TCD_MAX_PAGES (5 << (20 - CFS_PAGE_SHIFT))
#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
-#define TRACEFILE_SIZE (500 << 20)
+#define CFS_TRACEFILE_SIZE (500 << 20)
#ifdef LUSTRE_TRACEFILE_PRIVATE
#define TCD_MAX_PAGES (5 << (20 - CFS_PAGE_SHIFT))
#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
-#define TRACEFILE_SIZE (500 << 20)
+#define CFS_TRACEFILE_SIZE (500 << 20)
/* Size of a buffer for sprinting console messages if we can't get a page
* from system */
-#define TRACE_CONSOLE_BUFFER_SIZE 1024
+#define CFS_TRACE_CONSOLE_BUFFER_SIZE 1024
-union trace_data_union {
- struct trace_cpu_data {
+union cfs_trace_data_union {
+ struct cfs_trace_cpu_data {
/*
* Even though this structure is meant to be per-CPU, locking
* is needed because in some places the data may be accessed
* and trace_put_tcd, which are called in libcfs_debug_vmsg2 and
* tcd_for_each_type_lock
*/
- spinlock_t tcd_lock;
+ cfs_spinlock_t tcd_lock;
unsigned long tcd_lock_flags;
/*
* pages with trace records not yet processed by tracefiled.
*/
- struct list_head tcd_pages;
+ cfs_list_t tcd_pages;
/* number of pages on ->tcd_pages */
unsigned long tcd_cur_pages;
* (put_pages_on_daemon_list()). LRU pages from this list are
* discarded when list grows too large.
*/
- struct list_head tcd_daemon_pages;
+ cfs_list_t tcd_daemon_pages;
/* number of pages on ->tcd_daemon_pages */
unsigned long tcd_cur_daemon_pages;
* TCD_STOCK_PAGES pagesful are consumed by trace records all
* emitted in non-blocking contexts. Which is quite unlikely.
*/
- struct list_head tcd_stock_pages;
+ cfs_list_t tcd_stock_pages;
/* number of pages on ->tcd_stock_pages */
unsigned long tcd_cur_stock_pages;
/* The factors to share debug memory. */
unsigned short tcd_pages_factor;
} tcd;
- char __pad[L1_CACHE_ALIGN(sizeof(struct trace_cpu_data))];
+ char __pad[CFS_L1_CACHE_ALIGN(sizeof(struct cfs_trace_cpu_data))];
};
#define TCD_MAX_TYPES 8
-extern union trace_data_union (*trace_data[TCD_MAX_TYPES])[NR_CPUS];
+extern union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[CFS_NR_CPUS];
-#define tcd_for_each(tcd, i, j) \
- for (i = 0; trace_data[i] != NULL; i++) \
- for (j = 0, ((tcd) = &(*trace_data[i])[j].tcd); \
- j < num_possible_cpus(); j++, (tcd) = &(*trace_data[i])[j].tcd)
+#define cfs_tcd_for_each(tcd, i, j) \
+ for (i = 0; cfs_trace_data[i] != NULL; i++) \
+ for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd); \
+ j < cfs_num_possible_cpus(); \
+ j++, (tcd) = &(*cfs_trace_data[i])[j].tcd)
-#define tcd_for_each_type_lock(tcd, i, cpu) \
- for (i = 0; trace_data[i] && \
- (tcd = &(*trace_data[i])[cpu].tcd) && \
- trace_lock_tcd(tcd); trace_unlock_tcd(tcd), i++)
+#define cfs_tcd_for_each_type_lock(tcd, i, cpu) \
+ for (i = 0; cfs_trace_data[i] && \
+ (tcd = &(*cfs_trace_data[i])[cpu].tcd) && \
+ cfs_trace_lock_tcd(tcd); cfs_trace_unlock_tcd(tcd), i++)
/* XXX nikita: this declaration is internal to tracefile.c and should probably
* be moved there */
struct page_collection {
- struct list_head pc_pages;
+ cfs_list_t pc_pages;
/*
* spin-lock protecting ->pc_pages. It is taken by smp_call_function()
* call-back functions. XXX nikita: Which is horrible: all processors
* lock. Probably ->pc_pages should be replaced with an array of
* NR_CPUS elements accessed locklessly.
*/
- spinlock_t pc_lock;
+ cfs_spinlock_t pc_lock;
/*
* if this flag is set, collect_pages() will spill both
* ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
/* XXX nikita: this declaration is internal to tracefile.c and should probably
* be moved there */
struct tracefiled_ctl {
- struct completion tctl_start;
- struct completion tctl_stop;
- cfs_waitq_t tctl_waitq;
- pid_t tctl_pid;
- atomic_t tctl_shutdown;
+ cfs_completion_t tctl_start;
+ cfs_completion_t tctl_stop;
+ cfs_waitq_t tctl_waitq;
+ pid_t tctl_pid;
+ cfs_atomic_t tctl_shutdown;
};
/*
*/
/* XXX nikita: this declaration is internal to tracefile.c and should probably
* be moved there */
-struct trace_page {
+struct cfs_trace_page {
/*
* page itself
*/
- cfs_page_t *page;
+ cfs_page_t *page;
/*
* linkage into one of the lists in trace_data_union or
* page_collection
*/
- struct list_head linkage;
+ cfs_list_t linkage;
/*
* number of bytes used within this page
*/
- unsigned int used;
+ unsigned int used;
/*
* cpu that owns this page
*/
- unsigned short cpu;
+ unsigned short cpu;
/*
* type(context) of this page
*/
- unsigned short type;
+ unsigned short type;
};
-extern void set_ptldebug_header(struct ptldebug_header *header,
- int subsys, int mask, const int line,
- unsigned long stack);
-extern void print_to_console(struct ptldebug_header *hdr, int mask, const char *buf,
- int len, const char *file, const char *fn);
+extern void cfs_set_ptldebug_header(struct ptldebug_header *header,
+ int subsys, int mask, const int line,
+ unsigned long stack);
+extern void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
+ const char *buf, int len, const char *file,
+ const char *fn);
-extern int trace_lock_tcd(struct trace_cpu_data *tcd);
-extern void trace_unlock_tcd(struct trace_cpu_data *tcd);
+extern int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd);
+extern void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd);
/**
* trace_buf_type_t, trace_buf_idx_get() and trace_console_buffers[][]
* (see, for example, linux-tracefile.h).
*/
-extern char *trace_console_buffers[NR_CPUS][TCD_TYPE_MAX];
-extern trace_buf_type_t trace_buf_idx_get(void);
+extern char *cfs_trace_console_buffers[CFS_NR_CPUS][CFS_TCD_TYPE_MAX];
+extern cfs_trace_buf_type_t cfs_trace_buf_idx_get(void);
static inline char *
-trace_get_console_buffer(void)
+cfs_trace_get_console_buffer(void)
{
- return trace_console_buffers[cfs_get_cpu()][trace_buf_idx_get()];
+ unsigned int i = cfs_get_cpu();
+ unsigned int j = cfs_trace_buf_idx_get();
+
+ return cfs_trace_console_buffers[i][j];
}
static inline void
-trace_put_console_buffer(char *buffer)
+cfs_trace_put_console_buffer(char *buffer)
{
cfs_put_cpu();
}
-extern union trace_data_union (*trace_data[TCD_MAX_TYPES])[NR_CPUS];
-
-static inline struct trace_cpu_data *
-trace_get_tcd(void)
+static inline struct cfs_trace_cpu_data *
+cfs_trace_get_tcd(void)
{
- struct trace_cpu_data *tcd =
- &(*trace_data[trace_buf_idx_get()])[cfs_get_cpu()].tcd;
+ struct cfs_trace_cpu_data *tcd =
+ &(*cfs_trace_data[cfs_trace_buf_idx_get()])[cfs_get_cpu()].tcd;
- trace_lock_tcd(tcd);
+ cfs_trace_lock_tcd(tcd);
return tcd;
}
static inline void
-trace_put_tcd (struct trace_cpu_data *tcd)
+cfs_trace_put_tcd (struct cfs_trace_cpu_data *tcd)
{
- trace_unlock_tcd(tcd);
+ cfs_trace_unlock_tcd(tcd);
cfs_put_cpu();
}
-int trace_refill_stock(struct trace_cpu_data *tcd, int gfp,
- struct list_head *stock);
+int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, int gfp,
+ cfs_list_t *stock);
-int tcd_owns_tage(struct trace_cpu_data *tcd, struct trace_page *tage);
+int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
+ struct cfs_trace_page *tage);
-extern void trace_assertion_failed(const char *str, const char *fn,
- const char *file, int line);
+extern void cfs_trace_assertion_failed(const char *str, const char *fn,
+ const char *file, int line);
/* ASSERTION that is safe to use within the debug system */
#define __LASSERT(cond) \
do { \
if (unlikely(!(cond))) { \
- trace_assertion_failed("ASSERTION("#cond") failed", \
+ cfs_trace_assertion_failed("ASSERTION("#cond") failed", \
__FUNCTION__, __FILE__, __LINE__); \
} \
} while (0)
#define OFF_BY_START(start) ((start)/BITS_PER_LONG)
-unsigned long find_next_bit(unsigned long *addr,
- unsigned long size, unsigned long offset)
+unsigned long cfs_find_next_bit(unsigned long *addr,
+ unsigned long size, unsigned long offset)
{
unsigned long *word, *last;
unsigned long first_bit, bit, base;
return size;
if (first_bit != 0) {
int tmp = (*word++) & (~0UL << first_bit);
- bit = __ffs(tmp);
+ bit = __cfs_ffs(tmp);
if (bit < BITS_PER_LONG)
goto found;
word++;
}
while (word <= last) {
if (*word != 0UL) {
- bit = __ffs(*word);
+ bit = __cfs_ffs(*word);
goto found;
}
word++;
return base + bit;
}
-unsigned long find_next_zero_bit(unsigned long *addr,
- unsigned long size, unsigned long offset)
+unsigned long cfs_find_next_zero_bit(unsigned long *addr,
+ unsigned long size, unsigned long offset)
{
unsigned long *word, *last;
unsigned long first_bit, bit, base;
return size;
if (first_bit != 0) {
int tmp = (*word++) & (~0UL << first_bit);
- bit = __ffz(tmp);
+ bit = __cfs_ffz(tmp);
if (bit < BITS_PER_LONG)
goto found;
word++;
}
while (word <= last) {
if (*word != ~0UL) {
- bit = __ffz(*word);
+ bit = __cfs_ffz(*word);
goto found;
}
word++;
* No-op implementation.
*/
-void spin_lock_init(spinlock_t *lock)
+void cfs_spin_lock_init(cfs_spinlock_t *lock)
{
LASSERT(lock != NULL);
(void)lock;
}
-void spin_lock(spinlock_t *lock)
+void cfs_spin_lock(cfs_spinlock_t *lock)
{
(void)lock;
}
-void spin_unlock(spinlock_t *lock)
+void cfs_spin_unlock(cfs_spinlock_t *lock)
{
(void)lock;
}
-int spin_trylock(spinlock_t *lock)
+int cfs_spin_trylock(cfs_spinlock_t *lock)
{
(void)lock;
return 1;
}
-void spin_lock_bh_init(spinlock_t *lock)
+void cfs_spin_lock_bh_init(cfs_spinlock_t *lock)
{
LASSERT(lock != NULL);
(void)lock;
}
-void spin_lock_bh(spinlock_t *lock)
+void cfs_spin_lock_bh(cfs_spinlock_t *lock)
{
LASSERT(lock != NULL);
(void)lock;
}
-void spin_unlock_bh(spinlock_t *lock)
+void cfs_spin_unlock_bh(cfs_spinlock_t *lock)
{
LASSERT(lock != NULL);
(void)lock;
* - __up(x)
*/
-void sema_init(struct semaphore *s, int val)
+void cfs_sema_init(cfs_semaphore_t *s, int val)
{
LASSERT(s != NULL);
(void)s;
(void)val;
}
-void __down(struct semaphore *s)
+void __down(cfs_semaphore_t *s)
{
LASSERT(s != NULL);
(void)s;
}
-void __up(struct semaphore *s)
+void __up(cfs_semaphore_t *s)
{
LASSERT(s != NULL);
(void)s;
static cfs_wait_handler_t wait_handler;
-void init_completion_module(cfs_wait_handler_t handler)
+void cfs_init_completion_module(cfs_wait_handler_t handler)
{
wait_handler = handler;
}
-int call_wait_handler(int timeout)
+int cfs_call_wait_handler(int timeout)
{
if (!wait_handler)
return -ENOSYS;
return wait_handler(timeout);
}
-void init_completion(struct completion *c)
+void cfs_init_completion(cfs_completion_t *c)
{
LASSERT(c != NULL);
c->done = 0;
cfs_waitq_init(&c->wait);
}
-void complete(struct completion *c)
+void cfs_complete(cfs_completion_t *c)
{
LASSERT(c != NULL);
c->done = 1;
cfs_waitq_signal(&c->wait);
}
-void wait_for_completion(struct completion *c)
+void cfs_wait_for_completion(cfs_completion_t *c)
{
LASSERT(c != NULL);
do {
- if (call_wait_handler(1000) < 0)
+ if (cfs_call_wait_handler(1000) < 0)
break;
} while (c->done == 0);
}
-int wait_for_completion_interruptible(struct completion *c)
+int cfs_wait_for_completion_interruptible(cfs_completion_t *c)
{
LASSERT(c != NULL);
do {
- if (call_wait_handler(1000) < 0)
+ if (cfs_call_wait_handler(1000) < 0)
break;
} while (c->done == 0);
return 0;
* - up_write(x)
*/
-void init_rwsem(struct rw_semaphore *s)
+void cfs_init_rwsem(cfs_rw_semaphore_t *s)
{
LASSERT(s != NULL);
(void)s;
}
-void down_read(struct rw_semaphore *s)
+void cfs_down_read(cfs_rw_semaphore_t *s)
{
LASSERT(s != NULL);
(void)s;
}
-int down_read_trylock(struct rw_semaphore *s)
+int cfs_down_read_trylock(cfs_rw_semaphore_t *s)
{
LASSERT(s != NULL);
(void)s;
return 1;
}
-void down_write(struct rw_semaphore *s)
+void cfs_down_write(cfs_rw_semaphore_t *s)
{
LASSERT(s != NULL);
(void)s;
}
-int down_write_trylock(struct rw_semaphore *s)
+int cfs_down_write_trylock(cfs_rw_semaphore_t *s)
{
LASSERT(s != NULL);
(void)s;
return 1;
}
-void up_read(struct rw_semaphore *s)
+void cfs_up_read(cfs_rw_semaphore_t *s)
{
LASSERT(s != NULL);
(void)s;
}
-void up_write(struct rw_semaphore *s)
+void cfs_up_write(cfs_rw_semaphore_t *s)
{
LASSERT(s != NULL);
(void)s;
}
-void fini_rwsem(struct rw_semaphore *s)
+void cfs_fini_rwsem(cfs_rw_semaphore_t *s)
{
LASSERT(s != NULL);
(void)s;
#ifdef HAVE_LIBPTHREAD
/*
- * Completion
+ * Multi-threaded user space completion
*/
-void cfs_init_completion(struct cfs_completion *c)
+void cfs_mt_init_completion(cfs_mt_completion_t *c)
{
LASSERT(c != NULL);
c->c_done = 0;
pthread_cond_init(&c->c_cond, NULL);
}
-void cfs_fini_completion(struct cfs_completion *c)
+void cfs_mt_fini_completion(cfs_mt_completion_t *c)
{
LASSERT(c != NULL);
pthread_mutex_destroy(&c->c_mut);
pthread_cond_destroy(&c->c_cond);
}
-void cfs_complete(struct cfs_completion *c)
+void cfs_mt_complete(cfs_mt_completion_t *c)
{
LASSERT(c != NULL);
pthread_mutex_lock(&c->c_mut);
pthread_mutex_unlock(&c->c_mut);
}
-void cfs_wait_for_completion(struct cfs_completion *c)
+void cfs_mt_wait_for_completion(cfs_mt_completion_t *c)
{
LASSERT(c != NULL);
pthread_mutex_lock(&c->c_mut);
}
/*
- * atomic primitives
+ * Multi-threaded user space atomic primitives
*/
static pthread_mutex_t atomic_guard_lock = PTHREAD_MUTEX_INITIALIZER;
-int cfs_atomic_read(cfs_atomic_t *a)
+int cfs_mt_atomic_read(cfs_mt_atomic_t *a)
{
int r;
return r;
}
-void cfs_atomic_set(cfs_atomic_t *a, int b)
+void cfs_mt_atomic_set(cfs_mt_atomic_t *a, int b)
{
pthread_mutex_lock(&atomic_guard_lock);
a->counter = b;
pthread_mutex_unlock(&atomic_guard_lock);
}
-int cfs_atomic_dec_and_test(cfs_atomic_t *a)
+int cfs_mt_atomic_dec_and_test(cfs_mt_atomic_t *a)
{
int r;
return (r == 0);
}
-void cfs_atomic_inc(cfs_atomic_t *a)
+void cfs_mt_atomic_inc(cfs_mt_atomic_t *a)
{
pthread_mutex_lock(&atomic_guard_lock);
++a->counter;
pthread_mutex_unlock(&atomic_guard_lock);
}
-void cfs_atomic_dec(cfs_atomic_t *a)
+void cfs_mt_atomic_dec(cfs_mt_atomic_t *a)
{
pthread_mutex_lock(&atomic_guard_lock);
--a->counter;
pthread_mutex_unlock(&atomic_guard_lock);
}
-void cfs_atomic_add(int b, cfs_atomic_t *a)
+void cfs_mt_atomic_add(int b, cfs_mt_atomic_t *a)
{
pthread_mutex_lock(&atomic_guard_lock);
pthread_mutex_unlock(&atomic_guard_lock);
}
-void cfs_atomic_sub(int b, cfs_atomic_t *a)
+void cfs_mt_atomic_sub(int b, cfs_mt_atomic_t *a)
{
pthread_mutex_lock(&atomic_guard_lock);
a->counter -= b;
(void)link;
/* well, wait for something to happen */
- call_wait_handler(0);
+ cfs_call_wait_handler(0);
}
int64_t cfs_waitq_timedwait(struct cfs_waitlink *link, cfs_task_state_t state,
{
LASSERT(link != NULL);
(void)link;
- call_wait_handler(timeout);
+ cfs_call_wait_handler(timeout);
return 0;
}
-void cfs_schedule_timeout(cfs_task_state_t state, int64_t timeout)
+void cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t timeout)
{
cfs_waitlink_t l;
/* sleep(timeout) here instead? */
#include "tracefile.h"
struct lc_watchdog {
- cfs_timer_t lcw_timer; /* kernel timer */
- struct list_head lcw_list;
- cfs_time_t lcw_last_touched;
- cfs_task_t *lcw_task;
+ cfs_timer_t lcw_timer; /* kernel timer */
+ cfs_list_t lcw_list;
+ cfs_time_t lcw_last_touched;
+ cfs_task_t *lcw_task;
- void (*lcw_callback)(pid_t, void *);
- void *lcw_data;
+ void (*lcw_callback)(pid_t, void *);
+ void *lcw_data;
- pid_t lcw_pid;
+ pid_t lcw_pid;
enum {
LC_WATCHDOG_DISABLED,
* and lcw_stop_completion when it exits.
* Wake lcw_event_waitq to signal timer callback dispatches.
*/
-static struct completion lcw_start_completion;
-static struct completion lcw_stop_completion;
+static cfs_completion_t lcw_start_completion;
+static cfs_completion_t lcw_stop_completion;
static cfs_waitq_t lcw_event_waitq;
/*
* When it hits 0, we stop the distpatcher.
*/
static __u32 lcw_refcount = 0;
-static DECLARE_MUTEX(lcw_refcount_sem);
+static CFS_DECLARE_MUTEX(lcw_refcount_sem);
/*
* List of timers that have fired that need their callbacks run by the
* dispatcher.
*/
-static spinlock_t lcw_pending_timers_lock = SPIN_LOCK_UNLOCKED; /* BH lock! */
-static struct list_head lcw_pending_timers = \
+/* BH lock! */
+static cfs_spinlock_t lcw_pending_timers_lock = CFS_SPIN_LOCK_UNLOCKED;
+static cfs_list_t lcw_pending_timers = \
CFS_LIST_HEAD_INIT(lcw_pending_timers);
/* Last time a watchdog expired */
static cfs_time_t lcw_last_watchdog_time;
static int lcw_recent_watchdog_count;
-static spinlock_t lcw_last_watchdog_lock = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t lcw_last_watchdog_lock = CFS_SPIN_LOCK_UNLOCKED;
static void
lcw_dump(struct lc_watchdog *lcw)
{
ENTRY;
#if defined(HAVE_TASKLIST_LOCK)
- read_lock(&tasklist_lock);
+ cfs_read_lock(&tasklist_lock);
#elif defined(HAVE_TASK_RCU)
rcu_read_lock();
#else
}
#if defined(HAVE_TASKLIST_LOCK)
- read_unlock(&tasklist_lock);
+ cfs_read_unlock(&tasklist_lock);
#elif defined(HAVE_TASK_RCU)
rcu_read_unlock();
#endif
* Normally we would not hold the spin lock over the CWARN but in
* this case we hold it to ensure non ratelimited lcw_dumps are not
* interleaved on the console making them hard to read. */
- spin_lock_bh(&lcw_last_watchdog_lock);
+ cfs_spin_lock_bh(&lcw_last_watchdog_lock);
delta_time = cfs_duration_sec(cfs_time_sub(current_time,
lcw_last_watchdog_time));
lcw_dump(lcw);
}
- spin_unlock_bh(&lcw_last_watchdog_lock);
- spin_lock_bh(&lcw_pending_timers_lock);
+ cfs_spin_unlock_bh(&lcw_last_watchdog_lock);
+ cfs_spin_lock_bh(&lcw_pending_timers_lock);
- if (list_empty(&lcw->lcw_list)) {
- list_add(&lcw->lcw_list, &lcw_pending_timers);
+ if (cfs_list_empty(&lcw->lcw_list)) {
+ cfs_list_add(&lcw->lcw_list, &lcw_pending_timers);
cfs_waitq_signal(&lcw_event_waitq);
}
- spin_unlock_bh(&lcw_pending_timers_lock);
+ cfs_spin_unlock_bh(&lcw_pending_timers_lock);
EXIT;
}
{
int rc;
- if (test_bit(LCW_FLAG_STOP, &lcw_flags))
+ if (cfs_test_bit(LCW_FLAG_STOP, &lcw_flags))
return 1;
- spin_lock_bh(&lcw_pending_timers_lock);
- rc = !list_empty(&lcw_pending_timers);
- spin_unlock_bh(&lcw_pending_timers_lock);
+ cfs_spin_lock_bh(&lcw_pending_timers_lock);
+ rc = !cfs_list_empty(&lcw_pending_timers);
+ cfs_spin_unlock_bh(&lcw_pending_timers_lock);
return rc;
}
RECALC_SIGPENDING;
SIGNAL_MASK_UNLOCK(current, flags);
- complete(&lcw_start_completion);
+ cfs_complete(&lcw_start_completion);
while (1) {
- cfs_wait_event_interruptible(lcw_event_waitq, is_watchdog_fired(), rc);
+ cfs_wait_event_interruptible(lcw_event_waitq,
+ is_watchdog_fired(), rc);
CDEBUG(D_INFO, "Watchdog got woken up...\n");
- if (test_bit(LCW_FLAG_STOP, &lcw_flags)) {
+ if (cfs_test_bit(LCW_FLAG_STOP, &lcw_flags)) {
CDEBUG(D_INFO, "LCW_FLAG_STOP was set, shutting down...\n");
- spin_lock_bh(&lcw_pending_timers_lock);
- rc = !list_empty(&lcw_pending_timers);
- spin_unlock_bh(&lcw_pending_timers_lock);
+ cfs_spin_lock_bh(&lcw_pending_timers_lock);
+ rc = !cfs_list_empty(&lcw_pending_timers);
+ cfs_spin_unlock_bh(&lcw_pending_timers_lock);
if (rc) {
CERROR("pending timers list was not empty at "
"time of watchdog dispatch shutdown\n");
break;
}
- spin_lock_bh(&lcw_pending_timers_lock);
- while (!list_empty(&lcw_pending_timers)) {
+ cfs_spin_lock_bh(&lcw_pending_timers_lock);
+ while (!cfs_list_empty(&lcw_pending_timers)) {
- lcw = list_entry(lcw_pending_timers.next,
+ lcw = cfs_list_entry(lcw_pending_timers.next,
struct lc_watchdog,
lcw_list);
- list_del_init(&lcw->lcw_list);
- spin_unlock_bh(&lcw_pending_timers_lock);
+ cfs_list_del_init(&lcw->lcw_list);
+ cfs_spin_unlock_bh(&lcw_pending_timers_lock);
CDEBUG(D_INFO, "found lcw for pid " LPPID "\n", lcw->lcw_pid);
if (lcw->lcw_state != LC_WATCHDOG_DISABLED)
lcw->lcw_callback(lcw->lcw_pid, lcw->lcw_data);
- spin_lock_bh(&lcw_pending_timers_lock);
+ cfs_spin_lock_bh(&lcw_pending_timers_lock);
}
- spin_unlock_bh(&lcw_pending_timers_lock);
+ cfs_spin_unlock_bh(&lcw_pending_timers_lock);
}
- complete(&lcw_stop_completion);
+ cfs_complete(&lcw_stop_completion);
RETURN(rc);
}
ENTRY;
LASSERT(lcw_refcount == 1);
- init_completion(&lcw_stop_completion);
- init_completion(&lcw_start_completion);
+ cfs_init_completion(&lcw_stop_completion);
+ cfs_init_completion(&lcw_start_completion);
cfs_waitq_init(&lcw_event_waitq);
CDEBUG(D_INFO, "starting dispatch thread\n");
- rc = kernel_thread(lcw_dispatch_main, NULL, 0);
+ rc = cfs_kernel_thread(lcw_dispatch_main, NULL, 0);
if (rc < 0) {
CERROR("error spawning watchdog dispatch thread: %d\n", rc);
EXIT;
return;
}
- wait_for_completion(&lcw_start_completion);
+ cfs_wait_for_completion(&lcw_start_completion);
CDEBUG(D_INFO, "watchdog dispatcher initialization complete.\n");
EXIT;
CDEBUG(D_INFO, "trying to stop watchdog dispatcher.\n");
- set_bit(LCW_FLAG_STOP, &lcw_flags);
+ cfs_set_bit(LCW_FLAG_STOP, &lcw_flags);
cfs_waitq_signal(&lcw_event_waitq);
- wait_for_completion(&lcw_stop_completion);
+ cfs_wait_for_completion(&lcw_stop_completion);
CDEBUG(D_INFO, "watchdog dispatcher has shut down.\n");
CFS_INIT_LIST_HEAD(&lcw->lcw_list);
cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw);
- down(&lcw_refcount_sem);
+ cfs_down(&lcw_refcount_sem);
if (++lcw_refcount == 1)
lcw_dispatch_start();
- up(&lcw_refcount_sem);
+ cfs_up(&lcw_refcount_sem);
/* Keep this working in case we enable them by default */
if (lcw->lcw_state == LC_WATCHDOG_ENABLED) {
ENTRY;
LASSERT(lcw != NULL);
- spin_lock_bh(&lcw_pending_timers_lock);
- list_del_init(&lcw->lcw_list);
- spin_unlock_bh(&lcw_pending_timers_lock);
+ cfs_spin_lock_bh(&lcw_pending_timers_lock);
+ cfs_list_del_init(&lcw->lcw_list);
+ cfs_spin_unlock_bh(&lcw_pending_timers_lock);
lcw_update_time(lcw, "resumed");
lcw->lcw_state = LC_WATCHDOG_ENABLED;
ENTRY;
LASSERT(lcw != NULL);
- spin_lock_bh(&lcw_pending_timers_lock);
- if (!list_empty(&lcw->lcw_list))
- list_del_init(&lcw->lcw_list);
- spin_unlock_bh(&lcw_pending_timers_lock);
+ cfs_spin_lock_bh(&lcw_pending_timers_lock);
+ if (!cfs_list_empty(&lcw->lcw_list))
+ cfs_list_del_init(&lcw->lcw_list);
+ cfs_spin_unlock_bh(&lcw_pending_timers_lock);
lcw_update_time(lcw, "completed");
lcw->lcw_state = LC_WATCHDOG_DISABLED;
lcw_update_time(lcw, "stopped");
- spin_lock_bh(&lcw_pending_timers_lock);
- if (!list_empty(&lcw->lcw_list))
- list_del_init(&lcw->lcw_list);
- spin_unlock_bh(&lcw_pending_timers_lock);
+ cfs_spin_lock_bh(&lcw_pending_timers_lock);
+ if (!cfs_list_empty(&lcw->lcw_list))
+ cfs_list_del_init(&lcw->lcw_list);
+ cfs_spin_unlock_bh(&lcw_pending_timers_lock);
- down(&lcw_refcount_sem);
+ cfs_down(&lcw_refcount_sem);
if (--lcw_refcount == 0)
lcw_dispatch_stop();
- up(&lcw_refcount_sem);
+ cfs_up(&lcw_refcount_sem);
LIBCFS_FREE(lcw, sizeof(*lcw));
PLIST_ENTRY ListEntry = NULL;
PTASK_SLOT TaskSlot = NULL;
- spin_lock(&(cfs_win_task_manger.Lock));
+ cfs_spin_lock(&(cfs_win_task_manger.Lock));
ListEntry = cfs_win_task_manger.TaskList.Flink;
while (ListEntry != (&(cfs_win_task_manger.TaskList))) {
ListEntry = ListEntry->Flink;
}
- spin_unlock(&(cfs_win_task_manger.Lock));
+ cfs_spin_unlock(&(cfs_win_task_manger.Lock));
}
int
cfs_win_task_manger.Magic = TASKMAN_MAGIC;
/* initialize the spinlock protection */
- spin_lock_init(&cfs_win_task_manger.Lock);
+ cfs_spin_lock_init(&cfs_win_task_manger.Lock);
/* create slab memory cache */
cfs_win_task_manger.slab = cfs_mem_cache_create(
}
/* cleanup all the taskslots attached to the list */
- spin_lock(&(cfs_win_task_manger.Lock));
+ cfs_spin_lock(&(cfs_win_task_manger.Lock));
while (!IsListEmpty(&(cfs_win_task_manger.TaskList))) {
cleanup_task_slot(TaskSlot);
}
- spin_unlock(&cfs_win_task_manger.Lock);
+ cfs_spin_unlock(&cfs_win_task_manger.Lock);
/* destroy the taskslot cache slab */
cfs_mem_cache_destroy(cfs_win_task_manger.slab);
PLIST_ENTRY ListEntry = NULL;
PTASK_SLOT TaskSlot = NULL;
- spin_lock(&(cfs_win_task_manger.Lock));
+ cfs_spin_lock(&(cfs_win_task_manger.Lock));
ListEntry = cfs_win_task_manger.TaskList.Flink;
while (ListEntry != (&(cfs_win_task_manger.TaskList))) {
errorout:
- spin_unlock(&(cfs_win_task_manger.Lock));
+ cfs_spin_unlock(&(cfs_win_task_manger.Lock));
if (!TaskSlot) {
cfs_enter_debugger();
void
cfs_pause(cfs_duration_t ticks)
{
- cfs_schedule_timeout(CFS_TASK_UNINTERRUPTIBLE, ticks);
+ cfs_schedule_timeout_and_set_state(CFS_TASK_UNINTERRUPTIBLE, ticks);
}
void
-our_cond_resched()
-{
- cfs_schedule_timeout(CFS_TASK_UNINTERRUPTIBLE, 1i64);
-}
-
-void
-cfs_schedule_timeout(cfs_task_state_t state, int64_t time)
+cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t time)
{
cfs_task_t * task = cfs_current();
PTASK_SLOT slot = NULL;
void
cfs_schedule()
{
- cfs_schedule_timeout(CFS_TASK_UNINTERRUPTIBLE, 0);
+ cfs_schedule_timeout_and_set_state(CFS_TASK_UNINTERRUPTIBLE, 0);
}
int
struct dentry *dget(struct dentry *de)
{
if (de) {
- atomic_inc(&de->d_count);
+ cfs_atomic_inc(&de->d_count);
}
return de;
}
void dput(struct dentry *de)
{
- if (!de || atomic_read(&de->d_count) == 0) {
+ if (!de || cfs_atomic_read(&de->d_count) == 0) {
return;
}
- if (atomic_dec_and_test(&de->d_count)) {
+ if (cfs_atomic_dec_and_test(&de->d_count)) {
cfs_free(de);
}
}
#if defined(_X86_)
void __declspec (naked) FASTCALL
-atomic_add(
+cfs_atomic_add(
int i,
- atomic_t *v
+ cfs_atomic_t *v
)
{
// ECX = i
}
void __declspec (naked) FASTCALL
-atomic_sub(
+cfs_atomic_sub(
int i,
- atomic_t *v
+ cfs_atomic_t *v
)
{
// ECX = i
}
void __declspec (naked) FASTCALL
-atomic_inc(
- atomic_t *v
+cfs_atomic_inc(
+ cfs_atomic_t *v
)
{
//InterlockedIncrement((PULONG)(&((v)->counter)));
}
void __declspec (naked) FASTCALL
-atomic_dec(
- atomic_t *v
+cfs_atomic_dec(
+ cfs_atomic_t *v
)
{
// ECX = v ; [ECX][0] = v->counter
}
int __declspec (naked) FASTCALL
-atomic_sub_and_test(
+cfs_atomic_sub_and_test(
int i,
- atomic_t *v
+ cfs_atomic_t *v
)
{
}
int __declspec (naked) FASTCALL
-atomic_inc_and_test(
- atomic_t *v
+cfs_atomic_inc_and_test(
+ cfs_atomic_t *v
)
{
// ECX = v ; [ECX][0] = v->counter
}
int __declspec (naked) FASTCALL
-atomic_dec_and_test(
- atomic_t *v
+cfs_atomic_dec_and_test(
+ cfs_atomic_t *v
)
{
// ECX = v ; [ECX][0] = v->counter
#elif defined(_AMD64_)
void FASTCALL
-atomic_add(
+cfs_atomic_add(
int i,
- atomic_t *v
+ cfs_atomic_t *v
)
{
InterlockedExchangeAdd( (PULONG)(&((v)->counter)) , (LONG) (i));
}
void FASTCALL
-atomic_sub(
+cfs_atomic_sub(
int i,
- atomic_t *v
+ cfs_atomic_t *v
)
{
InterlockedExchangeAdd( (PULONG)(&((v)->counter)) , (LONG) (-1*i));
}
void FASTCALL
-atomic_inc(
- atomic_t *v
+cfs_atomic_inc(
+ cfs_atomic_t *v
)
{
InterlockedIncrement((PULONG)(&((v)->counter)));
}
void FASTCALL
-atomic_dec(
- atomic_t *v
+cfs_atomic_dec(
+ cfs_atomic_t *v
)
{
InterlockedDecrement((PULONG)(&((v)->counter)));
}
int FASTCALL
-atomic_sub_and_test(
+cfs_atomic_sub_and_test(
int i,
- atomic_t *v
+ cfs_atomic_t *v
)
{
int counter, result;
}
int FASTCALL
-atomic_inc_and_test(
- atomic_t *v
+cfs_atomic_inc_and_test(
+ cfs_atomic_t *v
)
{
int counter, result;
}
int FASTCALL
-atomic_dec_and_test(
- atomic_t *v
+cfs_atomic_dec_and_test(
+ cfs_atomic_t *v
)
{
int counter, result;
*
* Atomically adds \a i to \a v and returns \a i + \a v
*/
-int FASTCALL atomic_add_return(int i, atomic_t *v)
+int FASTCALL cfs_atomic_add_return(int i, cfs_atomic_t *v)
{
int counter, result;
*
* Atomically subtracts \a i from \a v and returns \a v - \a i
*/
-int FASTCALL atomic_sub_return(int i, atomic_t *v)
+int FASTCALL cfs_atomic_sub_return(int i, cfs_atomic_t *v)
{
- return atomic_add_return(-i, v);
+ return cfs_atomic_add_return(-i, v);
}
-int FASTCALL atomic_dec_and_lock(atomic_t *v, spinlock_t *lock)
+int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, cfs_spinlock_t *lock)
{
- if (atomic_read(v) != 1) {
+ if (cfs_atomic_read(v) != 1) {
return 0;
- }
+ }
- spin_lock(lock);
- if (atomic_dec_and_test(v))
+ cfs_spin_lock(lock);
+ if (cfs_atomic_dec_and_test(v))
return 1;
- spin_unlock(lock);
+ cfs_spin_unlock(lock);
return 0;
}
void
-rwlock_init(rwlock_t * rwlock)
+cfs_rwlock_init(cfs_rwlock_t * rwlock)
{
- spin_lock_init(&rwlock->guard);
+ cfs_spin_lock_init(&rwlock->guard);
rwlock->count = 0;
}
void
-rwlock_fini(rwlock_t * rwlock)
+cfs_rwlock_fini(cfs_rwlock_t * rwlock)
{
}
void
-read_lock(rwlock_t * rwlock)
+cfs_read_lock(cfs_rwlock_t * rwlock)
{
cfs_task_t * task = cfs_current();
PTASK_SLOT slot = NULL;
slot->irql = KeRaiseIrqlToDpcLevel();
while (TRUE) {
- spin_lock(&rwlock->guard);
+ cfs_spin_lock(&rwlock->guard);
if (rwlock->count >= 0)
break;
- spin_unlock(&rwlock->guard);
+ cfs_spin_unlock(&rwlock->guard);
}
rwlock->count++;
- spin_unlock(&rwlock->guard);
+ cfs_spin_unlock(&rwlock->guard);
}
void
-read_unlock(rwlock_t * rwlock)
+cfs_read_unlock(cfs_rwlock_t * rwlock)
{
cfs_task_t * task = cfs_current();
PTASK_SLOT slot = NULL;
slot = CONTAINING_RECORD(task, TASK_SLOT, task);
ASSERT(slot->Magic == TASKSLT_MAGIC);
- spin_lock(&rwlock->guard);
+ cfs_spin_lock(&rwlock->guard);
ASSERT(rwlock->count > 0);
rwlock->count--;
if (rwlock < 0) {
cfs_enter_debugger();
}
- spin_unlock(&rwlock->guard);
+ cfs_spin_unlock(&rwlock->guard);
KeLowerIrql(slot->irql);
}
void
-write_lock(rwlock_t * rwlock)
+cfs_write_lock(cfs_rwlock_t * rwlock)
{
cfs_task_t * task = cfs_current();
PTASK_SLOT slot = NULL;
slot->irql = KeRaiseIrqlToDpcLevel();
while (TRUE) {
- spin_lock(&rwlock->guard);
+ cfs_spin_lock(&rwlock->guard);
if (rwlock->count == 0)
break;
- spin_unlock(&rwlock->guard);
+ cfs_spin_unlock(&rwlock->guard);
}
rwlock->count = -1;
- spin_unlock(&rwlock->guard);
+ cfs_spin_unlock(&rwlock->guard);
}
void
-write_unlock(rwlock_t * rwlock)
+cfs_write_unlock(cfs_rwlock_t * rwlock)
{
cfs_task_t * task = cfs_current();
PTASK_SLOT slot = NULL;
slot = CONTAINING_RECORD(task, TASK_SLOT, task);
ASSERT(slot->Magic == TASKSLT_MAGIC);
- spin_lock(&rwlock->guard);
+ cfs_spin_lock(&rwlock->guard);
ASSERT(rwlock->count == -1);
rwlock->count = 0;
- spin_unlock(&rwlock->guard);
+ cfs_spin_unlock(&rwlock->guard);
KeLowerIrql(slot->irql);
}
memset(pg, 0, sizeof(cfs_page_t));
pg->addr = (void *)((__u64)addr & (~((__u64)PAGE_SIZE-1)));
pg->mapping = addr;
- atomic_set(&pg->count, 1);
- set_bit(PG_virt, &(pg->flags));
+ cfs_atomic_set(&pg->count, 1);
+ cfs_set_bit(PG_virt, &(pg->flags));
cfs_enter_debugger();
return pg;
}
* N/A
*/
-atomic_t libcfs_total_pages;
+cfs_atomic_t libcfs_total_pages;
cfs_page_t * cfs_alloc_page(int flags)
{
memset(pg, 0, sizeof(cfs_page_t));
pg->addr = cfs_mem_cache_alloc(cfs_page_p_slab, 0);
- atomic_set(&pg->count, 1);
+ cfs_atomic_set(&pg->count, 1);
if (pg->addr) {
if (cfs_is_flag_set(flags, CFS_ALLOC_ZERO)) {
memset(pg->addr, 0, CFS_PAGE_SIZE);
}
- atomic_inc(&libcfs_total_pages);
+ cfs_atomic_inc(&libcfs_total_pages);
} else {
cfs_enter_debugger();
cfs_mem_cache_free(cfs_page_t_slab, pg);
{
ASSERT(pg != NULL);
ASSERT(pg->addr != NULL);
- ASSERT(atomic_read(&pg->count) <= 1);
+ ASSERT(cfs_atomic_read(&pg->count) <= 1);
- if (!test_bit(PG_virt, &pg->flags)) {
+ if (!cfs_test_bit(PG_virt, &pg->flags)) {
cfs_mem_cache_free(cfs_page_p_slab, pg->addr);
- atomic_dec(&libcfs_total_pages);
+ cfs_atomic_dec(&libcfs_total_pages);
} else {
cfs_enter_debugger();
}
memset(pg, 0, sizeof(cfs_page_t));
pg->addr = cfs_alloc((CFS_PAGE_SIZE << order),0);
- atomic_set(&pg->count, 1);
+ cfs_atomic_set(&pg->count, 1);
if (pg->addr) {
if (cfs_is_flag_set(flags, CFS_ALLOC_ZERO)) {
memset(pg->addr, 0, CFS_PAGE_SIZE << order);
}
- atomic_add(1 << order, &libcfs_total_pages);
+ cfs_atomic_add(1 << order, &libcfs_total_pages);
} else {
cfs_enter_debugger();
cfs_mem_cache_free(cfs_page_t_slab, pg);
{
ASSERT(pg != NULL);
ASSERT(pg->addr != NULL);
- ASSERT(atomic_read(&pg->count) <= 1);
+ ASSERT(cfs_atomic_read(&pg->count) <= 1);
- atomic_sub(1 << order, &libcfs_total_pages);
+ cfs_atomic_sub(1 << order, &libcfs_total_pages);
cfs_free(pg->addr);
cfs_mem_cache_free(cfs_page_t_slab, pg);
}
ExFreeToNPagedLookasideList(&(kmc->npll), buf);
}
-spinlock_t shrinker_guard = {0};
+cfs_spinlock_t shrinker_guard = {0};
CFS_LIST_HEAD(shrinker_hdr);
cfs_timer_t shrinker_timer = {0};
-struct shrinker * set_shrinker(int seeks, shrink_callback cb)
+struct cfs_shrinker * cfs_set_shrinker(int seeks, shrink_callback cb)
{
- struct shrinker * s = (struct shrinker *)
- cfs_alloc(sizeof(struct shrinker), CFS_ALLOC_ZERO);
+ struct cfs_shrinker * s = (struct cfs_shrinker *)
+ cfs_alloc(sizeof(struct cfs_shrinker), CFS_ALLOC_ZERO);
if (s) {
s->cb = cb;
s->seeks = seeks;
s->nr = 2;
- spin_lock(&shrinker_guard);
- list_add(&s->list, &shrinker_hdr);
- spin_unlock(&shrinker_guard);
+ cfs_spin_lock(&shrinker_guard);
+ cfs_list_add(&s->list, &shrinker_hdr);
+ cfs_spin_unlock(&shrinker_guard);
}
return s;
}
-void remove_shrinker(struct shrinker *s)
+void cfs_remove_shrinker(struct cfs_shrinker *s)
{
- struct shrinker *tmp;
- spin_lock(&shrinker_guard);
+ struct cfs_shrinker *tmp;
+ cfs_spin_lock(&shrinker_guard);
#if TRUE
cfs_list_for_each_entry_typed(tmp, &shrinker_hdr,
- struct shrinker, list) {
+ struct cfs_shrinker, list) {
if (tmp == s) {
- list_del(&tmp->list);
+ cfs_list_del(&tmp->list);
break;
}
}
#else
- list_del(&s->list);
+ cfs_list_del(&s->list);
#endif
- spin_unlock(&shrinker_guard);
+ cfs_spin_unlock(&shrinker_guard);
cfs_free(s);
}
/* time ut test proc */
void shrinker_timer_proc(ulong_ptr_t arg)
{
- struct shrinker *s;
- spin_lock(&shrinker_guard);
+ struct cfs_shrinker *s;
+ cfs_spin_lock(&shrinker_guard);
cfs_list_for_each_entry_typed(s, &shrinker_hdr,
- struct shrinker, list) {
- s->cb(s->nr, __GFP_FS);
+ struct cfs_shrinker, list) {
+ s->cb(s->nr, __GFP_FS);
}
- spin_unlock(&shrinker_guard);
+ cfs_spin_unlock(&shrinker_guard);
cfs_timer_arm(&shrinker_timer, 300);
}
hdr = (struct libcfs_ioctl_hdr *)buf;
data = (struct libcfs_ioctl_data *)buf;
- err = copy_from_user(buf, (void *)arg, sizeof(*hdr));
+ err = cfs_copy_from_user(buf, (void *)arg, sizeof(*hdr));
if (err)
RETURN(err);
RETURN(-EINVAL);
}
- err = copy_from_user(buf, (void *)arg, hdr->ioc_len);
+ err = cfs_copy_from_user(buf, (void *)arg, hdr->ioc_len);
if (err)
RETURN(err);
if (data->ioc_inllen2)
data->ioc_inlbuf2 = &data->ioc_bulk[0] +
- size_round(data->ioc_inllen1);
+ cfs_size_round(data->ioc_inllen1);
RETURN(0);
}
int libcfs_ioctl_popdata(void *arg, void *data, int size)
{
- if (copy_to_user((char *)arg, data, size))
+ if (cfs_copy_to_user((char *)arg, data, size))
return -EFAULT;
return 0;
}
}
-void do_gettimeofday(struct timeval *tv)
+void cfs_gettimeofday(struct timeval *tv)
{
LARGE_INTEGER Time;
int gettimeofday(struct timeval *tv, void * tz)
{
- do_gettimeofday(tv);
+ cfs_gettimeofday(tv);
return 0;
}
length = portal->ioc_len;
} else if (_IOC_TYPE(cmd) == 'f') {
length = obd->ioc_len;
- extra = size_round(obd->ioc_plen1) + size_round(obd->ioc_plen2);
+ extra = cfs_size_round(obd->ioc_plen1) + cfs_size_round(obd->ioc_plen2);
} else if(_IOC_TYPE(cmd) == 'u') {
length = 4;
extra = 0;
if (obd->ioc_pbuf1 && data->ioc_plen1) {
data->ioc_pbuf1 = &procdat[length];
memcpy(data->ioc_pbuf1, obd->ioc_pbuf1, obd->ioc_plen1);
- length += size_round(obd->ioc_plen1);
+ length += cfs_size_round(obd->ioc_plen1);
} else {
data->ioc_plen1 = 0;
data->ioc_pbuf1 = NULL;
if (obd->ioc_pbuf2 && obd->ioc_plen2) {
data->ioc_pbuf2 = &procdat[length];
memcpy(data->ioc_pbuf2, obd->ioc_pbuf2, obd->ioc_plen2);
- length += size_round(obd->ioc_plen2);
+ length += cfs_size_round(obd->ioc_plen2);
} else {
data->ioc_plen2 = 0;
data->ioc_pbuf2 = NULL;
ASSERT(obd->ioc_plen1 == data->ioc_plen1);
data->ioc_pbuf1 = &procdat[length];
memcpy(obd->ioc_pbuf1, data->ioc_pbuf1, obd->ioc_plen1);
- length += size_round(obd->ioc_plen1);
+ length += cfs_size_round(obd->ioc_plen1);
}
if (obd->ioc_pbuf2) {
ASSERT(obd->ioc_plen2 == data->ioc_plen2);
data->ioc_pbuf2 = &procdat[length];
memcpy(obd->ioc_pbuf2, data->ioc_pbuf2, obd->ioc_plen2);
- length += size_round(obd->ioc_plen2);
+ length += cfs_size_round(obd->ioc_plen2);
}
}
data->ioc_inlbuf1 = obd->ioc_inlbuf1;
*/
-static CFS_DECL_RWSEM(cfs_symbol_lock);
+static CFS_DECLARE_RWSEM(cfs_symbol_lock);
CFS_LIST_HEAD(cfs_symbol_list);
int libcfs_is_mp_system = FALSE;
void *
cfs_symbol_get(const char *name)
{
- struct list_head *walker;
- struct cfs_symbol *sym = NULL;
+ cfs_list_t *walker;
+ struct cfs_symbol *sym = NULL;
- down_read(&cfs_symbol_lock);
- list_for_each(walker, &cfs_symbol_list) {
- sym = list_entry (walker, struct cfs_symbol, sym_list);
+ cfs_down_read(&cfs_symbol_lock);
+ cfs_list_for_each(walker, &cfs_symbol_list) {
+ sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
if (!strcmp(sym->name, name)) {
sym->ref ++;
break;
}
}
- up_read(&cfs_symbol_lock);
+ cfs_up_read(&cfs_symbol_lock);
if (sym != NULL)
return sym->value;
void
cfs_symbol_put(const char *name)
{
- struct list_head *walker;
- struct cfs_symbol *sym = NULL;
+ cfs_list_t *walker;
+ struct cfs_symbol *sym = NULL;
- down_read(&cfs_symbol_lock);
- list_for_each(walker, &cfs_symbol_list) {
- sym = list_entry (walker, struct cfs_symbol, sym_list);
+ cfs_down_read(&cfs_symbol_lock);
+ cfs_list_for_each(walker, &cfs_symbol_list) {
+ sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
if (!strcmp(sym->name, name)) {
LASSERT(sym->ref > 0);
sym->ref--;
break;
}
}
- up_read(&cfs_symbol_lock);
+ cfs_up_read(&cfs_symbol_lock);
LASSERT(sym != NULL);
}
int
cfs_symbol_register(const char *name, const void *value)
{
- struct list_head *walker;
- struct cfs_symbol *sym = NULL;
- struct cfs_symbol *new = NULL;
+ cfs_list_t *walker;
+ struct cfs_symbol *sym = NULL;
+ struct cfs_symbol *new = NULL;
new = cfs_alloc(sizeof(struct cfs_symbol), CFS_ALLOC_ZERO);
if (!new) {
new->ref = 0;
CFS_INIT_LIST_HEAD(&new->sym_list);
- down_write(&cfs_symbol_lock);
- list_for_each(walker, &cfs_symbol_list) {
- sym = list_entry (walker, struct cfs_symbol, sym_list);
+ cfs_down_write(&cfs_symbol_lock);
+ cfs_list_for_each(walker, &cfs_symbol_list) {
+ sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
if (!strcmp(sym->name, name)) {
- up_write(&cfs_symbol_lock);
+ cfs_up_write(&cfs_symbol_lock);
cfs_free(new);
return 0; // alreay registerred
}
}
- list_add_tail(&new->sym_list, &cfs_symbol_list);
- up_write(&cfs_symbol_lock);
+ cfs_list_add_tail(&new->sym_list, &cfs_symbol_list);
+ cfs_up_write(&cfs_symbol_lock);
return 0;
}
void
cfs_symbol_unregister(const char *name)
{
- struct list_head *walker;
- struct list_head *nxt;
- struct cfs_symbol *sym = NULL;
+ cfs_list_t *walker;
+ cfs_list_t *nxt;
+ struct cfs_symbol *sym = NULL;
- down_write(&cfs_symbol_lock);
- list_for_each_safe(walker, nxt, &cfs_symbol_list) {
- sym = list_entry (walker, struct cfs_symbol, sym_list);
+ cfs_down_write(&cfs_symbol_lock);
+ cfs_list_for_each_safe(walker, nxt, &cfs_symbol_list) {
+ sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
if (!strcmp(sym->name, name)) {
LASSERT(sym->ref == 0);
- list_del (&sym->sym_list);
+ cfs_list_del (&sym->sym_list);
cfs_free(sym);
break;
}
}
- up_write(&cfs_symbol_lock);
+ cfs_up_write(&cfs_symbol_lock);
}
/*
void
cfs_symbol_clean()
{
- struct list_head *walker;
+ cfs_list_t *walker;
struct cfs_symbol *sym = NULL;
- down_write(&cfs_symbol_lock);
- list_for_each(walker, &cfs_symbol_list) {
- sym = list_entry (walker, struct cfs_symbol, sym_list);
+ cfs_down_write(&cfs_symbol_lock);
+ cfs_list_for_each(walker, &cfs_symbol_list) {
+ sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
LASSERT(sym->ref == 0);
- list_del (&sym->sym_list);
+ cfs_list_del (&sym->sym_list);
cfs_free(sym);
}
- up_write(&cfs_symbol_lock);
+ cfs_up_write(&cfs_symbol_lock);
return;
}
KeAcquireSpinLock(&(timer->Lock), &Irql);
if (!cfs_is_flag_set(timer->Flags, CFS_TIMER_FLAG_TIMERED)){
- timeout.QuadPart = (LONGLONG)-1*1000*1000*10/HZ*deadline;
+ timeout.QuadPart = (LONGLONG)-1*1000*1000*10/CFS_HZ*deadline;
if (KeSetTimer(&timer->Timer, timeout, &timer->Dpc)) {
cfs_set_flag(timer->Flags, CFS_TIMER_FLAG_TIMERED);
{
int rc;
- spinlock_t lock;
+ cfs_spinlock_t lock;
/* Workground to check the system is MP build or UP build */
- spin_lock_init(&lock);
- spin_lock(&lock);
+ cfs_spin_lock_init(&lock);
+ cfs_spin_lock(&lock);
libcfs_is_mp_system = (int)lock.lock;
/* MP build system: it's a real spin, for UP build system, it
only raises the IRQL to DISPATCH_LEVEL */
- spin_unlock(&lock);
+ cfs_spin_unlock(&lock);
/* initialize libc routines (confliction between libcnptr.lib
and kernel ntoskrnl.lib) */
/* The global lock to protect all the access */
#if LIBCFS_PROCFS_SPINLOCK
-spinlock_t proc_fs_lock;
+cfs_spinlock_t proc_fs_lock;
-#define INIT_PROCFS_LOCK() spin_lock_init(&proc_fs_lock)
-#define LOCK_PROCFS() spin_lock(&proc_fs_lock)
-#define UNLOCK_PROCFS() spin_unlock(&proc_fs_lock)
+#define INIT_PROCFS_LOCK() cfs_spin_lock_init(&proc_fs_lock)
+#define LOCK_PROCFS() cfs_spin_lock(&proc_fs_lock)
+#define UNLOCK_PROCFS() cfs_spin_unlock(&proc_fs_lock)
#else
-mutex_t proc_fs_lock;
+cfs_mutex_t proc_fs_lock;
-#define INIT_PROCFS_LOCK() init_mutex(&proc_fs_lock)
-#define LOCK_PROCFS() mutex_down(&proc_fs_lock)
-#define UNLOCK_PROCFS() mutex_up(&proc_fs_lock)
+#define INIT_PROCFS_LOCK() cfs_init_mutex(&proc_fs_lock)
+#define LOCK_PROCFS() cfs_mutex_down(&proc_fs_lock)
+#define UNLOCK_PROCFS() cfs_mutex_up(&proc_fs_lock)
#endif
break;
}
- n -= copy_to_user((void *)buf, start, n);
+ n -= cfs_copy_to_user((void *)buf, start, n);
if (n == 0) {
if (retval == 0)
retval = -EFAULT;
continue;
/* Maybe we can't do anything with it... */
if (!table->proc_handler && !table->child) {
- printk(KERN_WARNING "SYSCTL: Can't register %s\n",
+ printk(CFS_KERN_WARNING "SYSCTL: Can't register %s\n",
table->procname);
continue;
}
continue;
if (de->mode & S_IFDIR) {
if (!table->child) {
- printk (KERN_ALERT "Help - malformed sysctl tree on free\n");
+ printk (CFS_KERN_ALERT "Help- malformed sysctl tree on free\n");
continue;
}
unregister_proc_table(table->child, de);
if (len > l) len = l;
if (len >= table->maxlen)
len = table->maxlen;
- if(copy_to_user(oldval, table->data, len))
+ if(cfs_copy_to_user(oldval, table->data, len))
return -EFAULT;
if(put_user(0, ((char *) oldval) + len))
return -EFAULT;
len = newlen;
if (len > table->maxlen)
len = table->maxlen;
- if(copy_from_user(table->data, newval, len))
+ if(cfs_copy_from_user(table->data, newval, len))
return -EFAULT;
if (len == table->maxlen)
len--;
len = left;
if (len > TMPBUFLEN-1)
len = TMPBUFLEN-1;
- if(copy_from_user(buf, buffer, len))
+ if(cfs_copy_from_user(buf, buffer, len))
return -EFAULT;
buf[len] = 0;
p = buf;
len = strlen(buf);
if (len > left)
len = left;
- if(copy_to_user(buffer, buf, len))
+ if(cfs_copy_to_user(buffer, buf, len))
return -EFAULT;
left -= len;
(char *)buffer += len;
}
if (len >= (size_t)table->maxlen)
len = (size_t)table->maxlen-1;
- if(copy_from_user(table->data, buffer, len))
+ if(cfs_copy_from_user(table->data, buffer, len))
return -EFAULT;
((char *) table->data)[len] = 0;
filp->f_pos += *lenp;
if (len > *lenp)
len = *lenp;
if (len)
- if(copy_to_user(buffer, table->data, len))
+ if(cfs_copy_to_user(buffer, table->data, len))
return -EFAULT;
if (len < *lenp) {
if(put_user('\n', ((char *) buffer) + len))
if (len) {
if (len > (size_t)table->maxlen)
len = (size_t)table->maxlen;
- if(copy_to_user(oldval, table->data, len))
+ if(cfs_copy_to_user(oldval, table->data, len))
return -EFAULT;
if(put_user(len, oldlenp))
return -EFAULT;
len = newlen;
if (len > (size_t)table->maxlen)
len = (size_t)table->maxlen;
- if(copy_from_user(table->data, newval, len))
+ if(cfs_copy_from_user(table->data, newval, len))
return -EFAULT;
}
}
int do_sysctl(int *name, int nlen, void *oldval, size_t *oldlenp,
void *newval, size_t newlen)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
if (nlen <= 0 || nlen >= CTL_MAXNAME)
return -ENOTDIR;
tmp = &root_table_header.ctl_entry;
do {
struct ctl_table_header *head =
- list_entry(tmp, struct ctl_table_header, ctl_entry);
+ cfs_list_entry(tmp, struct ctl_table_header, ctl_entry);
void *context = NULL;
int error = parse_table(name, nlen, oldval, oldlenp,
newval, newlen, head->ctl_table,
CFS_INIT_LIST_HEAD(&tmp->ctl_entry);
if (insert_at_head)
- list_add(&tmp->ctl_entry, &root_table_header.ctl_entry);
+ cfs_list_add(&tmp->ctl_entry, &root_table_header.ctl_entry);
else
- list_add_tail(&tmp->ctl_entry, &root_table_header.ctl_entry);
+ cfs_list_add_tail(&tmp->ctl_entry, &root_table_header.ctl_entry);
#ifdef CONFIG_PROC_FS
register_proc_table(table, cfs_proc_sys);
#endif
*/
void unregister_sysctl_table(struct ctl_table_header * header)
{
- list_del(&header->ctl_entry);
+ cfs_list_del(&header->ctl_entry);
#ifdef CONFIG_PROC_FS
unregister_proc_table(header->ctl_table, cfs_proc_sys);
#endif
int trace_write_dump_kernel(struct file *file, const char *buffer,
unsigned long count, void *data)
{
- int rc = trace_dump_debug_buffer_usrstr((void *)buffer, count);
+ int rc = cfs_trace_dump_debug_buffer_usrstr((void *)buffer, count);
return (rc < 0) ? rc : count;
}
int trace_write_daemon_file(struct file *file, const char *buffer,
unsigned long count, void *data)
{
- int rc = trace_daemon_command_usrstr((void *)buffer, count);
+ int rc = cfs_trace_daemon_command_usrstr((void *)buffer, count);
return (rc < 0) ? rc : count;
}
int *eof, void *data)
{
int rc;
- tracefile_read_lock();
- rc = trace_copyout_string(page, count, tracefile, "\n");
- tracefile_read_unlock();
+ cfs_tracefile_read_lock();
+ rc = cfs_trace_copyout_string(page, count, cfs_tracefile, "\n");
+ cfs_tracefile_read_unlock();
return rc;
}
{
char str[32];
- snprintf(str, sizeof(str), "%d\n", trace_get_debug_mb());
+ snprintf(str, sizeof(str), "%d\n", cfs_trace_get_debug_mb());
- return trace_copyout_string(page, count, str, NULL);
+ return cfs_trace_copyout_string(page, count, str, NULL);
}
int insert_proc(void)
if (obd->ioc_plen1) {
obd->ioc_pbuf1 = (char *)(data + off);
- off += size_round(obd->ioc_plen1);
+ off += cfs_size_round(obd->ioc_plen1);
} else {
obd->ioc_pbuf1 = NULL;
}
if (obd->ioc_plen2) {
obd->ioc_pbuf2 = (char *)(data + off);
- off += size_round(obd->ioc_plen2);
+ off += cfs_size_round(obd->ioc_plen2);
} else {
obd->ioc_pbuf2 = NULL;
}
file->private_data = p;
}
memset(p, 0, sizeof(*p));
- mutex_init(&p->lock);
+ cfs_mutex_init(&p->lock);
p->op = op;
/*
void *p;
int err = 0;
- mutex_lock(&m->lock);
+ cfs_mutex_lock(&m->lock);
/*
* seq_file->op->..m_start/m_stop/m_next may do special actions
* or optimisations based on the file->f_version, so we want to
/* if not empty - flush it first */
if (m->count) {
n = min(m->count, size);
- err = copy_to_user(buf, m->buf + m->from, n);
+ err = cfs_copy_to_user(buf, m->buf + m->from, n);
if (err)
goto Efault;
m->count -= n;
}
m->op->stop(m, p);
n = min(m->count, size);
- err = copy_to_user(buf, m->buf, n);
+ err = cfs_copy_to_user(buf, m->buf, n);
if (err)
goto Efault;
copied += n;
else
*ppos += copied;
file->f_version = m->version;
- mutex_unlock(&m->lock);
+ cfs_mutex_unlock(&m->lock);
return copied;
Enomem:
err = -ENOMEM;
struct seq_file *m = (struct seq_file *)file->private_data;
long long retval = -EINVAL;
- mutex_lock(&m->lock);
+ cfs_mutex_lock(&m->lock);
m->version = file->f_version;
switch (origin) {
case 1:
}
}
file->f_version = m->version;
- mutex_unlock(&m->lock);
+ cfs_mutex_unlock(&m->lock);
return retval;
}
EXPORT_SYMBOL(seq_lseek);
}
EXPORT_SYMBOL(seq_puts);
-struct list_head *seq_list_start(struct list_head *head, loff_t pos)
+cfs_list_t *seq_list_start(cfs_list_t *head, loff_t pos)
{
- struct list_head *lh;
+ cfs_list_t *lh;
- list_for_each(lh, head)
+ cfs_list_for_each(lh, head)
if (pos-- == 0)
return lh;
EXPORT_SYMBOL(seq_list_start);
-struct list_head *seq_list_start_head(struct list_head *head, loff_t pos)
+cfs_list_t *seq_list_start_head(cfs_list_t *head,
+ loff_t pos)
{
if (!pos)
return head;
EXPORT_SYMBOL(seq_list_start_head);
-struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos)
+cfs_list_t *seq_list_next(void *v, cfs_list_t *head,
+ loff_t *ppos)
{
- struct list_head *lh;
+ cfs_list_t *lh;
- lh = ((struct list_head *)v)->next;
+ lh = ((cfs_list_t *)v)->next;
++*ppos;
return lh == head ? NULL : lh;
}
waitq->magic = CFS_WAITQ_MAGIC;
waitq->flags = 0;
CFS_INIT_LIST_HEAD(&(waitq->waiters));
- spin_lock_init(&(waitq->guard));
+ cfs_spin_lock_init(&(waitq->guard));
}
/*
link->event = &(slot->Event);
link->hits = &(slot->hits);
- atomic_inc(&slot->count);
+ cfs_atomic_inc(&slot->count);
CFS_INIT_LIST_HEAD(&(link->waitq[0].link));
CFS_INIT_LIST_HEAD(&(link->waitq[1].link));
cfs_assert(link->waitq[0].waitq == NULL);
cfs_assert(link->waitq[1].waitq == NULL);
- atomic_dec(&slot->count);
+ cfs_atomic_dec(&slot->count);
}
LASSERT(link->magic == CFS_WAITLINK_MAGIC);
LASSERT(waitqid < CFS_WAITQ_CHANNELS);
- spin_lock(&(waitq->guard));
+ cfs_spin_lock(&(waitq->guard));
LASSERT(link->waitq[waitqid].waitq == NULL);
link->waitq[waitqid].waitq = waitq;
if (link->flags & CFS_WAITQ_EXCLUSIVE) {
- list_add_tail(&link->waitq[waitqid].link, &waitq->waiters);
+ cfs_list_add_tail(&link->waitq[waitqid].link, &waitq->waiters);
} else {
- list_add(&link->waitq[waitqid].link, &waitq->waiters);
+ cfs_list_add(&link->waitq[waitqid].link, &waitq->waiters);
}
- spin_unlock(&(waitq->guard));
+ cfs_spin_unlock(&(waitq->guard));
}
/*
* cfs_waitq_add
LASSERT(waitq->magic == CFS_WAITQ_MAGIC);
LASSERT(link->magic == CFS_WAITLINK_MAGIC);
- spin_lock(&(waitq->guard));
+ cfs_spin_lock(&(waitq->guard));
for (i=0; i < CFS_WAITQ_CHANNELS; i++) {
if (link->waitq[i].waitq == waitq)
if (i < CFS_WAITQ_CHANNELS) {
link->waitq[i].waitq = NULL;
- list_del_init(&link->waitq[i].link);
+ cfs_list_del_init(&link->waitq[i].link);
} else {
cfs_enter_debugger();
}
- spin_unlock(&(waitq->guard));
+ cfs_spin_unlock(&(waitq->guard));
}
/*
LASSERT(waitq != NULL);
LASSERT(waitq->magic == CFS_WAITQ_MAGIC);
- spin_lock(&waitq->guard);
+ cfs_spin_lock(&waitq->guard);
cfs_list_for_each_entry_typed(scan, &waitq->waiters,
cfs_waitlink_channel_t,
link) {
LASSERT( result == FALSE || result == TRUE );
if (result) {
- atomic_inc(waitl->hits);
+ cfs_atomic_inc(waitl->hits);
}
if ((waitl->flags & CFS_WAITQ_EXCLUSIVE) && --nr == 0)
break;
}
- spin_unlock(&waitq->guard);
+ cfs_spin_unlock(&waitq->guard);
return;
}
LASSERT(link != NULL);
LASSERT(link->magic == CFS_WAITLINK_MAGIC);
- if (atomic_read(link->hits) > 0) {
- atomic_dec(link->hits);
- LASSERT((__u32)atomic_read(link->hits) < (__u32)0xFFFFFF00);
+ if (cfs_atomic_read(link->hits) > 0) {
+ cfs_atomic_dec(link->hits);
+ LASSERT((__u32)cfs_atomic_read(link->hits) < (__u32)0xFFFFFF00);
} else {
cfs_wait_event_internal(link->event, 0);
}
int64_t timeout)
{
- if (atomic_read(link->hits) > 0) {
- atomic_dec(link->hits);
- LASSERT((__u32)atomic_read(link->hits) < (__u32)0xFFFFFF00);
+ if (cfs_atomic_read(link->hits) > 0) {
+ cfs_atomic_dec(link->hits);
+ LASSERT((__u32)cfs_atomic_read(link->hits) < (__u32)0xFFFFFF00);
return (int64_t)TRUE;
}
{
ULONG i;
for (i=0; i < length; i++) {
- if (((i+1) % 31) == 0)
+ if (((i+1) % 31) == 0)
printk("\n");
printk("%2.2x ", (UCHAR)buffer[i]);
}
{
PKS_TSDU KsTsdu = NULL;
- spin_lock(&(ks_data.ksnd_tsdu_lock));
+ cfs_spin_lock(&(ks_data.ksnd_tsdu_lock));
- if (!list_empty (&(ks_data.ksnd_freetsdus))) {
+ if (!cfs_list_empty (&(ks_data.ksnd_freetsdus))) {
LASSERT(ks_data.ksnd_nfreetsdus > 0);
- KsTsdu = list_entry(ks_data.ksnd_freetsdus.next, KS_TSDU, Link);
- list_del(&(KsTsdu->Link));
+ KsTsdu = cfs_list_entry(ks_data.ksnd_freetsdus.next, KS_TSDU, Link);
+ cfs_list_del(&(KsTsdu->Link));
ks_data.ksnd_nfreetsdus--;
} else {
ks_data.ksnd_tsdu_slab, 0);
}
- spin_unlock(&(ks_data.ksnd_tsdu_lock));
+ cfs_spin_unlock(&(ks_data.ksnd_tsdu_lock));
if (NULL != KsTsdu) {
RtlZeroMemory(KsTsdu, ks_data.ksnd_tsdu_size);
PKS_TSDU KsTsdu
)
{
- spin_lock(&(ks_data.ksnd_tsdu_lock));
+ cfs_spin_lock(&(ks_data.ksnd_tsdu_lock));
if (ks_data.ksnd_nfreetsdus > 128) {
KsFreeKsTsdu(KsTsdu);
} else {
- list_add_tail( &(KsTsdu->Link), &(ks_data.ksnd_freetsdus));
+ cfs_list_add_tail( &(KsTsdu->Link), &(ks_data.ksnd_freetsdus));
ks_data.ksnd_nfreetsdus++;
}
- spin_unlock(&(ks_data.ksnd_tsdu_lock));
+ cfs_spin_unlock(&(ks_data.ksnd_tsdu_lock));
}
/* with tconn lock acquired */
LASSERT(TsduMgr->TotalBytes >= length);
- while (!list_empty(&TsduMgr->TsduList)) {
+ while (!cfs_list_empty(&TsduMgr->TsduList)) {
ULONG start = 0;
- KsTsdu = list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
+ KsTsdu = cfs_list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
LASSERT(KsTsdu->Magic == KS_TSDU_MAGIC);
start = KsTsdu->StartOffset;
if (KsTsdu->StartOffset >= KsTsdu->LastOffset) {
/* remove KsTsdu from list */
- list_del(&KsTsdu->Link);
+ cfs_list_del(&KsTsdu->Link);
TsduMgr->NumOfTsdu--;
KsPutKsTsdu(KsTsdu);
}
/* retrieve the latest Tsdu buffer form TsduMgr
list if the list is not empty. */
- if (list_empty(&(TsduMgr->TsduList))) {
+ if (cfs_list_empty(&(TsduMgr->TsduList))) {
LASSERT(TsduMgr->NumOfTsdu == 0);
KsTsdu = NULL;
} else {
LASSERT(TsduMgr->NumOfTsdu > 0);
- KsTsdu = list_entry(TsduMgr->TsduList.prev, KS_TSDU, Link);
+ KsTsdu = cfs_list_entry(TsduMgr->TsduList.prev, KS_TSDU, Link);
/* if this Tsdu does not contain enough space, we need
allocate a new Tsdu queue. */
if (NULL == KsTsdu) {
KsTsdu = KsAllocateKsTsdu();
if (NULL != KsTsdu) {
- list_add_tail(&(KsTsdu->Link), &(TsduMgr->TsduList));
+ cfs_list_add_tail(&(KsTsdu->Link), &(TsduMgr->TsduList));
TsduMgr->NumOfTsdu++;
}
}
} else {
- KsTsdu = list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
+ KsTsdu = cfs_list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
LASSERT(KsTsdu->Magic == KS_TSDU_MAGIC);
/* remove the KsTsdu from TsduMgr list to release the lock */
- list_del(&(KsTsdu->Link));
+ cfs_list_del(&(KsTsdu->Link));
TsduMgr->NumOfTsdu--;
while (length > BytesRecved) {
KsTsdu = NULL;
} else {
TsduMgr->NumOfTsdu++;
- list_add(&(KsTsdu->Link), &(TsduMgr->TsduList));
+ cfs_list_add(&(KsTsdu->Link), &(TsduMgr->TsduList));
}
}
TsduMgr->NumOfTsdu = 0;
TsduMgr->TotalBytes = 0;
- spin_lock_init(&TsduMgr->Lock);
+ cfs_spin_lock_init(&TsduMgr->Lock);
}
KsRemoveTdiEngine(TsduMgr);
KeSetEvent(&(TsduMgr->Event), 0, FALSE);
- while (!list_empty(&TsduMgr->TsduList)) {
+ while (!cfs_list_empty(&TsduMgr->TsduList)) {
- KsTsdu = list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
+ KsTsdu = cfs_list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
LASSERT(KsTsdu->Magic == KS_TSDU_MAGIC);
if (KsTsdu->StartOffset == KsTsdu->LastOffset) {
// KsTsdu is empty now, we need free it ...
//
- list_del(&(KsTsdu->Link));
+ cfs_list_del(&(KsTsdu->Link));
TsduMgr->NumOfTsdu--;
KsFreeKsTsdu(KsTsdu);
LASSERT(child->kstc_type == kstt_child);
- spin_lock(&(child->kstc_lock));
+ cfs_spin_lock(&(child->kstc_lock));
LASSERT(parent->kstc_state == ksts_listening);
LASSERT(child->kstc_state == ksts_connecting);
FALSE
);
- spin_unlock(&(child->kstc_lock));
+ cfs_spin_unlock(&(child->kstc_lock));
KsPrint((2, "KsAcceptCompletionRoutine: singal parent: %p (child: %p)\n",
parent, child));
child->child.kstc_busy = FALSE;
child->kstc_state = ksts_associated;
- spin_unlock(&(child->kstc_lock));
+ cfs_spin_unlock(&(child->kstc_lock));
}
/* now free the Irp */
ks_addr_slot_t * slot = NULL;
PLIST_ENTRY list = NULL;
- spin_lock(&ks_data.ksnd_addrs_lock);
+ cfs_spin_lock(&ks_data.ksnd_addrs_lock);
list = ks_data.ksnd_addrs_list.Flink;
while (list != &ks_data.ksnd_addrs_list) {
slot = NULL;
}
- spin_unlock(&ks_data.ksnd_addrs_lock);
+ cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
return slot;
}
void
KsCleanupIpAddresses()
{
- spin_lock(&ks_data.ksnd_addrs_lock);
+ cfs_spin_lock(&ks_data.ksnd_addrs_lock);
while (!IsListEmpty(&ks_data.ksnd_addrs_list)) {
}
cfs_assert(ks_data.ksnd_naddrs == 0);
- spin_unlock(&ks_data.ksnd_addrs_lock);
+ cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
}
VOID
slot = cfs_alloc(sizeof(ks_addr_slot_t) + DeviceName->Length, CFS_ALLOC_ZERO);
if (slot != NULL) {
- spin_lock(&ks_data.ksnd_addrs_lock);
+ cfs_spin_lock(&ks_data.ksnd_addrs_lock);
InsertTailList(&ks_data.ksnd_addrs_list, &slot->link);
sprintf(slot->iface, "eth%d", ks_data.ksnd_naddrs++);
slot->ip_addr = ntohl(IpAddress->in_addr);
slot->devname.Length = DeviceName->Length;
slot->devname.MaximumLength = DeviceName->Length + sizeof(WCHAR);
slot->devname.Buffer = slot->buffer;
- spin_unlock(&ks_data.ksnd_addrs_lock);
+ cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
KsPrint((0, "KsAddAddressHandle: %s added: ip=%xh(%d.%d.%d.%d)\n",
slot->iface, IpAddress->in_addr,
/* initialize the global ks_data members */
RtlInitUnicodeString(&ks_data.ksnd_client_name, TDILND_MODULE_NAME);
- spin_lock_init(&ks_data.ksnd_addrs_lock);
+ cfs_spin_lock_init(&ks_data.ksnd_addrs_lock);
InitializeListHead(&ks_data.ksnd_addrs_list);
/* register the pnp handlers */
LASSERT(parent->kstc_type == kstt_listener);
LASSERT(parent->kstc_state == ksts_listening);
- if (list_empty(&(parent->listener.kstc_listening.list))) {
+ if (cfs_list_empty(&(parent->listener.kstc_listening.list))) {
child = NULL;
} else {
- struct list_head * tmp;
+ cfs_list_t * tmp;
/* check the listening queue and try to get a free connecton */
- list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
- child = list_entry (tmp, ks_tconn_t, child.kstc_link);
- spin_lock(&(child->kstc_lock));
+ cfs_list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
+ child = cfs_list_entry (tmp, ks_tconn_t, child.kstc_link);
+ cfs_spin_lock(&(child->kstc_lock));
if (!child->child.kstc_busy) {
LASSERT(child->kstc_state == ksts_associated);
child->child.kstc_busy = TRUE;
- spin_unlock(&(child->kstc_lock));
+ cfs_spin_unlock(&(child->kstc_lock));
break;
} else {
- spin_unlock(&(child->kstc_lock));
+ cfs_spin_unlock(&(child->kstc_lock));
child = NULL;
}
}
LASSERT(parent->kstc_type == kstt_listener);
- spin_lock(&(parent->kstc_lock));
+ cfs_spin_lock(&(parent->kstc_lock));
if (parent->kstc_state == ksts_listening) {
if (child) {
- spin_lock(&(child->kstc_lock));
+ cfs_spin_lock(&(child->kstc_lock));
child->child.kstc_info.ConnectionInfo = ConnectionInfo;
child->child.kstc_info.Remote = ConnectionInfo->RemoteAddress;
child->kstc_state = ksts_connecting;
- spin_unlock(&(child->kstc_lock));
+ cfs_spin_unlock(&(child->kstc_lock));
} else {
goto errorout;
}
- spin_unlock(&(parent->kstc_lock));
+ cfs_spin_unlock(&(parent->kstc_lock));
return Status;
errorout:
- spin_unlock(&(parent->kstc_lock));
+ cfs_spin_unlock(&(parent->kstc_lock));
*AcceptIrp = NULL;
*ConnectionContext = NULL;
KeSetEvent(&(WorkItem->Event), 0, FALSE);
- spin_lock(&(tconn->kstc_lock));
+ cfs_spin_lock(&(tconn->kstc_lock));
cfs_clear_flag(tconn->kstc_flags, KS_TCONN_DISCONNECT_BUSY);
- spin_unlock(&(tconn->kstc_lock));
+ cfs_spin_unlock(&(tconn->kstc_lock));
ks_put_tconn(tconn);
}
tconn, DisconnectFlags));
ks_get_tconn(tconn);
- spin_lock(&(tconn->kstc_lock));
+ cfs_spin_lock(&(tconn->kstc_lock));
WorkItem = &(tconn->kstc_disconnect);
}
}
- spin_unlock(&(tconn->kstc_lock));
+ cfs_spin_unlock(&(tconn->kstc_lock));
ks_put_tconn(tconn);
return (Status);
tconn
);
- spin_lock_init(&(tconn->kstc_lock));
+ cfs_spin_lock_init(&(tconn->kstc_lock));
ks_get_tconn(tconn);
- spin_lock(&(ks_data.ksnd_tconn_lock));
+ cfs_spin_lock(&(ks_data.ksnd_tconn_lock));
/* attach it into global list in ks_data */
- list_add(&(tconn->kstc_list), &(ks_data.ksnd_tconns));
+ cfs_list_add(&(tconn->kstc_list), &(ks_data.ksnd_tconns));
ks_data.ksnd_ntconns++;
- spin_unlock(&(ks_data.ksnd_tconn_lock));
+ cfs_spin_unlock(&(ks_data.ksnd_tconn_lock));
tconn->kstc_rcv_wnd = tconn->kstc_snd_wnd = 0x10000;
}
void
ks_free_tconn(ks_tconn_t * tconn)
{
- LASSERT(atomic_read(&(tconn->kstc_refcount)) == 0);
+ LASSERT(cfs_atomic_read(&(tconn->kstc_refcount)) == 0);
- spin_lock(&(ks_data.ksnd_tconn_lock));
+ cfs_spin_lock(&(ks_data.ksnd_tconn_lock));
/* remove it from the global list */
- list_del(&tconn->kstc_list);
+ cfs_list_del(&tconn->kstc_list);
ks_data.ksnd_ntconns--;
/* if this is the last tconn, it would be safe for
if (ks_data.ksnd_ntconns == 0) {
cfs_wake_event(&ks_data.ksnd_tconn_exit);
}
- spin_unlock(&(ks_data.ksnd_tconn_lock));
+ cfs_spin_unlock(&(ks_data.ksnd_tconn_lock));
/* free the structure memory */
cfs_mem_cache_free(ks_data.ksnd_tconn_slab, tconn);
ks_tconn_t * tconn
)
{
- atomic_inc(&(tconn->kstc_refcount));
+ cfs_atomic_inc(&(tconn->kstc_refcount));
}
/*
ks_tconn_t *tconn
)
{
- if (atomic_dec_and_test(&(tconn->kstc_refcount))) {
+ if (cfs_atomic_dec_and_test(&(tconn->kstc_refcount))) {
- spin_lock(&(tconn->kstc_lock));
+ cfs_spin_lock(&(tconn->kstc_lock));
if ( ( tconn->kstc_type == kstt_child ||
tconn->kstc_type == kstt_sender ) &&
( tconn->kstc_state == ksts_connected ) ) {
- spin_unlock(&(tconn->kstc_lock));
+ cfs_spin_unlock(&(tconn->kstc_lock));
ks_abort_tconn(tconn);
cfs_set_flag(tconn->kstc_flags, KS_TCONN_DESTROY_BUSY);
}
- spin_unlock(&(tconn->kstc_lock));
+ cfs_spin_unlock(&(tconn->kstc_lock));
}
}
}
tconn->kstc_addr.FileObject
);
- spin_lock(&tconn->child.kstc_parent->kstc_lock);
- spin_lock(&tconn->kstc_lock);
+ cfs_spin_lock(&tconn->child.kstc_parent->kstc_lock);
+ cfs_spin_lock(&tconn->kstc_lock);
tconn->kstc_state = ksts_inited;
if (tconn->child.kstc_queued) {
- list_del(&(tconn->child.kstc_link));
+ cfs_list_del(&(tconn->child.kstc_link));
if (tconn->child.kstc_queueno) {
tconn->child.kstc_queued = FALSE;
}
- spin_unlock(&tconn->kstc_lock);
- spin_unlock(&tconn->child.kstc_parent->kstc_lock);
+ cfs_spin_unlock(&tconn->kstc_lock);
+ cfs_spin_unlock(&tconn->child.kstc_parent->kstc_lock);
/* drop the reference of the parent tconn */
ks_put_tconn(tconn->child.kstc_parent);
NULL
);
- spin_lock(&(tconn->kstc_lock));
+ cfs_spin_lock(&(tconn->kstc_lock));
if (NT_SUCCESS(status)) {
tconn->sender.kstc_info.ConnectionInfo = ConnectionInfo;
tconn->sender.kstc_info.Remote = ConnectionInfo->RemoteAddress;
- spin_unlock(&(tconn->kstc_lock));
+ cfs_spin_unlock(&(tconn->kstc_lock));
} else {
rc = cfs_error_code(status);
tconn->kstc_state = ksts_associated;
- spin_unlock(&(tconn->kstc_lock));
+ cfs_spin_unlock(&(tconn->kstc_lock));
/* disassocidate the connection and the address object,
after cleanup, it's safe to set the state to abort ... */
cfs_enter_debugger();
}
- spin_lock(&(tconn->kstc_lock));
+ cfs_spin_lock(&(tconn->kstc_lock));
/* cleanup the tsdumgr Lists */
KsCleanupTsdu (tconn);
info->ConnectionInfo = NULL;
info->Remote = NULL;
- spin_unlock(&(tconn->kstc_lock));
+ cfs_spin_unlock(&(tconn->kstc_lock));
}
status = STATUS_SUCCESS;
WorkItem = &(tconn->kstc_disconnect);
ks_get_tconn(tconn);
- spin_lock(&(tconn->kstc_lock));
+ cfs_spin_lock(&(tconn->kstc_lock));
if (tconn->kstc_state != ksts_connected) {
ks_put_tconn(tconn);
}
}
- spin_unlock(&(tconn->kstc_lock));
+ cfs_spin_unlock(&(tconn->kstc_lock));
}
engs = &TsduMgr->Slot;
if (!engs->queued) {
- spin_lock(&engm->lock);
+ cfs_spin_lock(&engm->lock);
if (!engs->queued) {
- list_add_tail(&engs->link, &engm->list);
+ cfs_list_add_tail(&engs->link, &engm->list);
engs->queued = TRUE;
engs->tconn = tconn;
engs->emgr = engm;
engs->tsdumgr = TsduMgr;
KeSetEvent(&(engm->start),0, FALSE);
}
- spin_unlock(&engm->lock);
+ cfs_spin_unlock(&engm->lock);
KsPrint((4, "KsQueueTdiEngine: TsduMgr=%p is queued to engine %p\n",
TsduMgr, engm));
}
if (engs->queued) {
engm = engs->emgr;
LASSERT(engm != NULL);
- spin_lock(&engm->lock);
+ cfs_spin_lock(&engm->lock);
if (engs->queued) {
- list_del(&engs->link);
+ cfs_list_del(&engs->link);
engs->queued = FALSE;
engs->tconn = NULL;
engs->emgr = NULL;
engs->tsdumgr = NULL;
}
- spin_unlock(&engm->lock);
+ cfs_spin_unlock(&engm->lock);
KsPrint((4, "KsQueueTdiEngine: TsduMgr %p is removed from engine %p\n",
TsduMgr, engm));
}
tflags = TDI_SEND_NON_BLOCKING;
}
- if (list_empty(&TsduMgr->TsduList)) {
+ if (cfs_list_empty(&TsduMgr->TsduList)) {
LASSERT(TsduMgr->TotalBytes == 0);
ks_unlock_tsdumgr(TsduMgr);
goto errorout;
{
ks_engine_mgr_t * engm = context;
ks_engine_slot_t * engs;
- struct list_head * list;
+ cfs_list_t * list;
ks_tconn_t * tconn;
cfs_set_thread_priority(31);
cfs_wait_event_internal(&engm->start, 0);
- spin_lock(&engm->lock);
- if (list_empty(&engm->list)) {
- spin_unlock(&engm->lock);
+ cfs_spin_lock(&engm->lock);
+ if (cfs_list_empty(&engm->list)) {
+ cfs_spin_unlock(&engm->lock);
continue;
}
list = engm->list.next;
- list_del(list);
- engs = list_entry(list, ks_engine_slot_t, link);
+ cfs_list_del(list);
+ engs = cfs_list_entry(list, ks_engine_slot_t, link);
LASSERT(engs->emgr == engm);
LASSERT(engs->queued);
engs->emgr = NULL;
engs->queued = FALSE;
- spin_unlock(&engm->lock);
+ cfs_spin_unlock(&engm->lock);
tconn = engs->tconn;
LASSERT(tconn->kstc_magic == KS_TCONN_MAGIC);
/* initialize tconn related globals */
RtlZeroMemory(&ks_data, sizeof(ks_tdi_data_t));
- spin_lock_init(&ks_data.ksnd_tconn_lock);
+ cfs_spin_lock_init(&ks_data.ksnd_tconn_lock);
CFS_INIT_LIST_HEAD(&ks_data.ksnd_tconns);
cfs_init_event(&ks_data.ksnd_tconn_exit, TRUE, FALSE);
}
/* initialize tsdu related globals */
- spin_lock_init(&ks_data.ksnd_tsdu_lock);
+ cfs_spin_lock_init(&ks_data.ksnd_tsdu_lock);
CFS_INIT_LIST_HEAD(&ks_data.ksnd_freetsdus);
ks_data.ksnd_tsdu_size = TDINAL_TSDU_DEFAULT_SIZE; /* 64k */
ks_data.ksnd_tsdu_slab = cfs_mem_cache_create(
}
/* initialize engine threads list */
- ks_data.ksnd_engine_nums = num_online_cpus();
+ ks_data.ksnd_engine_nums = cfs_num_online_cpus();
if (ks_data.ksnd_engine_nums < 4) {
ks_data.ksnd_engine_nums = 4;
}
goto errorout;
}
for (i = 0; i < ks_data.ksnd_engine_nums; i++) {
- spin_lock_init(&ks_data.ksnd_engine_mgr[i].lock);
+ cfs_spin_lock_init(&ks_data.ksnd_engine_mgr[i].lock);
cfs_init_event(&ks_data.ksnd_engine_mgr[i].start, TRUE, FALSE);
cfs_init_event(&ks_data.ksnd_engine_mgr[i].exit, TRUE, FALSE);
CFS_INIT_LIST_HEAD(&ks_data.ksnd_engine_mgr[i].list);
ks_fini_tdi_data()
{
PKS_TSDU KsTsdu = NULL;
- struct list_head * list = NULL;
+ cfs_list_t * list = NULL;
int i;
/* clean up the pnp handler and address slots */
}
/* we need wait until all the tconn are freed */
- spin_lock(&(ks_data.ksnd_tconn_lock));
+ cfs_spin_lock(&(ks_data.ksnd_tconn_lock));
- if (list_empty(&(ks_data.ksnd_tconns))) {
+ if (cfs_list_empty(&(ks_data.ksnd_tconns))) {
cfs_wake_event(&ks_data.ksnd_tconn_exit);
}
- spin_unlock(&(ks_data.ksnd_tconn_lock));
+ cfs_spin_unlock(&(ks_data.ksnd_tconn_lock));
/* now wait on the tconn exit event */
cfs_wait_event_internal(&ks_data.ksnd_tconn_exit, 0);
ks_data.ksnd_tconn_slab = NULL;
/* clean up all the tsud buffers in the free list */
- spin_lock(&(ks_data.ksnd_tsdu_lock));
- list_for_each (list, &ks_data.ksnd_freetsdus) {
- KsTsdu = list_entry (list, KS_TSDU, Link);
+ cfs_spin_lock(&(ks_data.ksnd_tsdu_lock));
+ cfs_list_for_each (list, &ks_data.ksnd_freetsdus) {
+ KsTsdu = cfs_list_entry (list, KS_TSDU, Link);
cfs_mem_cache_free(
ks_data.ksnd_tsdu_slab,
KsTsdu );
}
- spin_unlock(&(ks_data.ksnd_tsdu_lock));
+ cfs_spin_unlock(&(ks_data.ksnd_tsdu_lock));
/* it's safe to delete the tsdu slab ... */
cfs_mem_cache_destroy(ks_data.ksnd_tsdu_slab);
/* create the backlog child tconn */
backlog = ks_create_child_tconn(parent);
- spin_lock(&(parent->kstc_lock));
+ cfs_spin_lock(&(parent->kstc_lock));
if (backlog) {
- spin_lock(&backlog->kstc_lock);
+ cfs_spin_lock(&backlog->kstc_lock);
/* attch it into the listing list of daemon */
- list_add( &backlog->child.kstc_link,
+ cfs_list_add( &backlog->child.kstc_link,
&parent->listener.kstc_listening.list );
parent->listener.kstc_listening.num++;
backlog->child.kstc_queued = TRUE;
- spin_unlock(&backlog->kstc_lock);
+ cfs_spin_unlock(&backlog->kstc_lock);
} else {
cfs_enter_debugger();
}
- spin_unlock(&(parent->kstc_lock));
+ cfs_spin_unlock(&(parent->kstc_lock));
}
}
return rc;
}
- spin_lock(&(tconn->kstc_lock));
+ cfs_spin_lock(&(tconn->kstc_lock));
tconn->listener.nbacklog = nbacklog;
tconn->kstc_state = ksts_listening;
cfs_set_flag(tconn->kstc_flags, KS_TCONN_DAEMON_STARTED);
- spin_unlock(&(tconn->kstc_lock));
+ cfs_spin_unlock(&(tconn->kstc_lock));
return rc;
}
void
ks_stop_listen(ks_tconn_t *tconn)
{
- struct list_head * list;
+ cfs_list_t * list;
ks_tconn_t * backlog;
/* reset all tdi event callbacks to NULL */
KsResetHandlers (tconn);
- spin_lock(&tconn->kstc_lock);
+ cfs_spin_lock(&tconn->kstc_lock);
cfs_clear_flag(tconn->kstc_flags, KS_TCONN_DAEMON_STARTED);
/* cleanup all the listening backlog child connections */
- list_for_each (list, &(tconn->listener.kstc_listening.list)) {
- backlog = list_entry(list, ks_tconn_t, child.kstc_link);
+ cfs_list_for_each (list, &(tconn->listener.kstc_listening.list)) {
+ backlog = cfs_list_entry(list, ks_tconn_t, child.kstc_link);
/* destory and free it */
ks_put_tconn(backlog);
}
- spin_unlock(&tconn->kstc_lock);
+ cfs_spin_unlock(&tconn->kstc_lock);
/* wake up it from the waiting on new incoming connections */
KeSetEvent(&tconn->listener.kstc_accept_event, 0, FALSE);
ks_tconn_t ** child
)
{
- struct list_head * tmp;
+ cfs_list_t * tmp;
ks_tconn_t * backlog = NULL;
ks_replenish_backlogs(parent, parent->listener.nbacklog);
- spin_lock(&(parent->kstc_lock));
+ cfs_spin_lock(&(parent->kstc_lock));
if (parent->listener.kstc_listening.num <= 0) {
- spin_unlock(&(parent->kstc_lock));
+ cfs_spin_unlock(&(parent->kstc_lock));
return -1;
}
/* check the listening queue and try to search the accepted connecton */
- list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
- backlog = list_entry (tmp, ks_tconn_t, child.kstc_link);
+ cfs_list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
+ backlog = cfs_list_entry (tmp, ks_tconn_t, child.kstc_link);
- spin_lock(&(backlog->kstc_lock));
+ cfs_spin_lock(&(backlog->kstc_lock));
if (backlog->child.kstc_accepted) {
LASSERT(backlog->kstc_state == ksts_connected);
LASSERT(backlog->child.kstc_busy);
- list_del(&(backlog->child.kstc_link));
- list_add(&(backlog->child.kstc_link),
- &(parent->listener.kstc_accepted.list));
+ cfs_list_del(&(backlog->child.kstc_link));
+ cfs_list_add(&(backlog->child.kstc_link),
+ &(parent->listener.kstc_accepted.list));
parent->listener.kstc_accepted.num++;
parent->listener.kstc_listening.num--;
backlog->child.kstc_queueno = 1;
- spin_unlock(&(backlog->kstc_lock));
+ cfs_spin_unlock(&(backlog->kstc_lock));
break;
} else {
- spin_unlock(&(backlog->kstc_lock));
+ cfs_spin_unlock(&(backlog->kstc_lock));
backlog = NULL;
}
}
- spin_unlock(&(parent->kstc_lock));
+ cfs_spin_unlock(&(parent->kstc_lock));
/* we need wait until new incoming connections are requested
or the case of shuting down the listenig daemon thread */
NULL
);
- spin_lock(&(parent->kstc_lock));
+ cfs_spin_lock(&(parent->kstc_lock));
/* check whether it's exptected to exit ? */
if (!cfs_is_flag_set(parent->kstc_flags, KS_TCONN_DAEMON_STARTED)) {
- spin_unlock(&(parent->kstc_lock));
+ cfs_spin_unlock(&(parent->kstc_lock));
} else {
goto again;
}
ks_addr_slot_t * slot = NULL;
PLIST_ENTRY list = NULL;
- spin_lock(&ks_data.ksnd_addrs_lock);
+ cfs_spin_lock(&ks_data.ksnd_addrs_lock);
list = ks_data.ksnd_addrs_list.Flink;
while (list != &ks_data.ksnd_addrs_list) {
slot = NULL;
}
- spin_unlock(&ks_data.ksnd_addrs_lock);
+ cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
return (int)(slot == NULL);
}
PLIST_ENTRY list = NULL;
int nips = 0;
- spin_lock(&ks_data.ksnd_addrs_lock);
+ cfs_spin_lock(&ks_data.ksnd_addrs_lock);
*names = cfs_alloc(sizeof(char *) * ks_data.ksnd_naddrs, CFS_ALLOC_ZERO);
if (*names == NULL) {
errorout:
- spin_unlock(&ks_data.ksnd_addrs_lock);
+ cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
return nips;
}
{
LASSERT(sock->kstc_type == kstt_listener);
- spin_lock(&(sock->kstc_lock));
+ cfs_spin_lock(&(sock->kstc_lock));
/* clear the daemon flag */
cfs_clear_flag(sock->kstc_flags, KS_TCONN_DAEMON_STARTED);
/* wake up it from the waiting on new incoming connections */
KeSetEvent(&sock->listener.kstc_accept_event, 0, FALSE);
- spin_unlock(&(sock->kstc_lock));
+ cfs_spin_unlock(&(sock->kstc_lock));
}
/*
{
PTRANSPORT_ADDRESS taddr = NULL;
- spin_lock(&socket->kstc_lock);
+ cfs_spin_lock(&socket->kstc_lock);
if (remote) {
if (socket->kstc_type == kstt_sender) {
taddr = socket->sender.kstc_info.Remote;
if (port != NULL)
*port = ntohs (addr->sin_port);
} else {
- spin_unlock(&socket->kstc_lock);
+ cfs_spin_unlock(&socket->kstc_lock);
return -ENOTCONN;
}
- spin_unlock(&socket->kstc_lock);
+ cfs_spin_unlock(&socket->kstc_lock);
return 0;
}
#include "tracefile.h"
/* percents to share the total debug memory for each type */
-static unsigned int pages_factor[TCD_TYPE_MAX] = {
- 90, /* 90% pages for TCD_TYPE_PASSIVE */
- 10 /* 10% pages for TCD_TYPE_DISPATCH */
+static unsigned int pages_factor[CFS_TCD_TYPE_MAX] = {
+ 90, /* 90% pages for CFS_TCD_TYPE_PASSIVE */
+ 10 /* 10% pages for CFS_TCD_TYPE_DISPATCH */
};
-char *trace_console_buffers[NR_CPUS][TCD_TYPE_MAX];
+char *cfs_trace_console_buffers[CFS_NR_CPUS][CFS_TCD_TYPE_MAX];
-struct rw_semaphore tracefile_sem;
+cfs_rw_semaphore_t cfs_tracefile_sem;
-int tracefile_init_arch()
+int cfs_tracefile_init_arch()
{
int i;
int j;
- struct trace_cpu_data *tcd;
+ struct cfs_trace_cpu_data *tcd;
- init_rwsem(&tracefile_sem);
+ cfs_init_rwsem(&cfs_tracefile_sem);
/* initialize trace_data */
- memset(trace_data, 0, sizeof(trace_data));
- for (i = 0; i < TCD_TYPE_MAX; i++) {
- trace_data[i]=cfs_alloc(sizeof(union trace_data_union)*NR_CPUS,
- GFP_KERNEL);
- if (trace_data[i] == NULL)
+ memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
+ for (i = 0; i < CFS_TCD_TYPE_MAX; i++) {
+ cfs_trace_data[i] =
+ cfs_alloc(sizeof(union cfs_trace_data_union) * \
+ CFS_NR_CPUS, GFP_KERNEL);
+ if (cfs_trace_data[i] == NULL)
goto out;
}
/* arch related info initialized */
- tcd_for_each(tcd, i, j) {
+ cfs_tcd_for_each(tcd, i, j) {
tcd->tcd_pages_factor = (USHORT) pages_factor[i];
tcd->tcd_type = (USHORT) i;
tcd->tcd_cpu = (USHORT)j;
}
- for (i = 0; i < num_possible_cpus(); i++)
- for (j = 0; j < TCD_TYPE_MAX; j++) {
- trace_console_buffers[i][j] =
- cfs_alloc(TRACE_CONSOLE_BUFFER_SIZE,
+ for (i = 0; i < cfs_num_possible_cpus(); i++)
+ for (j = 0; j < CFS_TCD_TYPE_MAX; j++) {
+ cfs_trace_console_buffers[i][j] =
+ cfs_alloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
GFP_KERNEL);
- if (trace_console_buffers[i][j] == NULL)
+ if (cfs_trace_console_buffers[i][j] == NULL)
goto out;
}
return 0;
out:
- tracefile_fini_arch();
- printk(KERN_ERR "lnet: No enough memory\n");
+ cfs_tracefile_fini_arch();
+ printk(CFS_KERN_ERR "lnet: Not enough memory\n");
return -ENOMEM;
}
-void tracefile_fini_arch()
+void cfs_tracefile_fini_arch()
{
int i;
int j;
- for (i = 0; i < num_possible_cpus(); i++) {
- for (j = 0; j < TCD_TYPE_MAX; j++) {
- if (trace_console_buffers[i][j] != NULL) {
- cfs_free(trace_console_buffers[i][j]);
- trace_console_buffers[i][j] = NULL;
+ for (i = 0; i < cfs_num_possible_cpus(); i++) {
+ for (j = 0; j < CFS_TCD_TYPE_MAX; j++) {
+ if (cfs_trace_console_buffers[i][j] != NULL) {
+ cfs_free(cfs_trace_console_buffers[i][j]);
+ cfs_trace_console_buffers[i][j] = NULL;
}
}
}
- for (i = 0; trace_data[i] != NULL; i++) {
- cfs_free(trace_data[i]);
- trace_data[i] = NULL;
+ for (i = 0; cfs_trace_data[i] != NULL; i++) {
+ cfs_free(cfs_trace_data[i]);
+ cfs_trace_data[i] = NULL;
}
- fini_rwsem(&tracefile_sem);
+ cfs_fini_rwsem(&cfs_tracefile_sem);
}
-void tracefile_read_lock()
+void cfs_tracefile_read_lock()
{
- down_read(&tracefile_sem);
+ cfs_down_read(&cfs_tracefile_sem);
}
-void tracefile_read_unlock()
+void cfs_tracefile_read_unlock()
{
- up_read(&tracefile_sem);
+ cfs_up_read(&cfs_tracefile_sem);
}
-void tracefile_write_lock()
+void cfs_tracefile_write_lock()
{
- down_write(&tracefile_sem);
+ cfs_down_write(&cfs_tracefile_sem);
}
-void tracefile_write_unlock()
+void cfs_tracefile_write_unlock()
{
- up_write(&tracefile_sem);
+ cfs_up_write(&cfs_tracefile_sem);
}
-trace_buf_type_t
-trace_buf_idx_get()
+cfs_trace_buf_type_t cfs_trace_buf_idx_get()
{
if (KeGetCurrentIrql() >= DISPATCH_LEVEL)
- return TCD_TYPE_DISPATCH;
+ return CFS_TCD_TYPE_DISPATCH;
else
- return TCD_TYPE_PASSIVE;
+ return CFS_TCD_TYPE_PASSIVE;
}
-int trace_lock_tcd(struct trace_cpu_data *tcd)
+int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd)
{
- __LASSERT(tcd->tcd_type < TCD_TYPE_MAX);
+ __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
return 1;
}
-void trace_unlock_tcd(struct trace_cpu_data *tcd)
+void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd)
{
- __LASSERT(tcd->tcd_type < TCD_TYPE_MAX);
+ __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
}
-int tcd_owns_tage(struct trace_cpu_data *tcd, struct trace_page *tage)
+int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
+ struct cfs_trace_page *tage)
{
/*
* XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
}
void
-set_ptldebug_header(struct ptldebug_header *header, int subsys, int mask,
- const int line, unsigned long stack)
+cfs_set_ptldebug_header(struct ptldebug_header *header, int subsys, int mask,
+ const int line, unsigned long stack)
{
struct timeval tv;
- do_gettimeofday(&tv);
+ cfs_gettimeofday(&tv);
header->ph_subsys = subsys;
header->ph_mask = mask;
- header->ph_cpu_id = smp_processor_id();
+ header->ph_cpu_id = cfs_smp_processor_id();
header->ph_sec = (__u32)tv.tv_sec;
header->ph_usec = tv.tv_usec;
header->ph_stack = stack;
return;
}
-void print_to_console(struct ptldebug_header *hdr, int mask, const char *buf,
- int len, const char *file, const char *fn)
+void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
+ const char *buf, int len, const char *file,
+ const char *fn)
{
char *prefix = "Lustre", *ptype = NULL;
if ((mask & D_EMERG) != 0) {
prefix = "LustreError";
- ptype = KERN_EMERG;
+ ptype = CFS_KERN_EMERG;
} else if ((mask & D_ERROR) != 0) {
prefix = "LustreError";
- ptype = KERN_ERR;
+ ptype = CFS_KERN_ERR;
} else if ((mask & D_WARNING) != 0) {
prefix = "Lustre";
- ptype = KERN_WARNING;
+ ptype = CFS_KERN_WARNING;
} else if ((mask & (D_CONSOLE | libcfs_printk)) != 0) {
prefix = "Lustre";
- ptype = KERN_INFO;
+ ptype = CFS_KERN_INFO;
}
if ((mask & D_CONSOLE) != 0) {
return;
}
-int trace_max_debug_mb(void)
+int cfs_trace_max_debug_mb(void)
{
- int total_mb = (num_physpages >> (20 - CFS_PAGE_SHIFT));
+ int total_mb = (cfs_num_physpages >> (20 - CFS_PAGE_SHIFT));
return MAX(512, (total_mb * 80)/100);
}
* only define one trace_data type for windows
*/
typedef enum {
- TCD_TYPE_PASSIVE = 0,
- TCD_TYPE_DISPATCH,
- TCD_TYPE_MAX
-} trace_buf_type_t;
+ CFS_TCD_TYPE_PASSIVE = 0,
+ CFS_TCD_TYPE_DISPATCH,
+ CFS_TCD_TYPE_MAX
+} cfs_trace_buf_type_t;
#endif
}
#ifdef __KERNEL__
-#define LNET_LOCK() spin_lock(&the_lnet.ln_lock)
-#define LNET_UNLOCK() spin_unlock(&the_lnet.ln_lock)
-#define LNET_MUTEX_DOWN(m) mutex_down(m)
-#define LNET_MUTEX_UP(m) mutex_up(m)
+#define LNET_LOCK() cfs_spin_lock(&the_lnet.ln_lock)
+#define LNET_UNLOCK() cfs_spin_unlock(&the_lnet.ln_lock)
+#define LNET_MUTEX_DOWN(m) cfs_mutex_down(m)
+#define LNET_MUTEX_UP(m) cfs_mutex_up(m)
#else
# ifndef HAVE_LIBPTHREAD
#define LNET_SINGLE_THREADED_LOCK(l) \
/* ALWAYS called with liblock held */
lnet_freeobj_t *o;
- if (list_empty (&fl->fl_list))
+ if (cfs_list_empty (&fl->fl_list))
return (NULL);
- o = list_entry (fl->fl_list.next, lnet_freeobj_t, fo_list);
- list_del (&o->fo_list);
+ o = cfs_list_entry (fl->fl_list.next, lnet_freeobj_t, fo_list);
+ cfs_list_del (&o->fo_list);
return ((void *)&o->fo_contents);
}
lnet_freelist_free (lnet_freelist_t *fl, void *obj)
{
/* ALWAYS called with liblock held */
- lnet_freeobj_t *o = list_entry (obj, lnet_freeobj_t, fo_contents);
+ lnet_freeobj_t *o = cfs_list_entry (obj, lnet_freeobj_t, fo_contents);
- list_add (&o->fo_list, &fl->fl_list);
+ cfs_list_add (&o->fo_list, &fl->fl_list);
}
LASSERT (ni->ni_refcount > 0);
ni->ni_refcount--;
if (ni->ni_refcount == 0)
- list_add_tail(&ni->ni_list, &the_lnet.ln_zombie_nis);
+ cfs_list_add_tail(&ni->ni_list, &the_lnet.ln_zombie_nis);
}
static inline void
LNET_UNLOCK();
}
-static inline struct list_head *
+static inline cfs_list_t *
lnet_nid2peerhash (lnet_nid_t nid)
{
unsigned int idx = LNET_NIDADDR(nid) % LNET_PEER_HASHSIZE;
int lnet_parse_ip2nets (char **networksp, char *ip2nets);
int lnet_parse_routes (char *route_str, int *im_a_router);
-int lnet_parse_networks (struct list_head *nilist, char *networks);
+int lnet_parse_networks (cfs_list_t *nilist, char *networks);
int lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid);
lnet_peer_t *lnet_find_peer_locked (lnet_nid_t nid);
struct lnet_libmd;
typedef struct lnet_msg {
- struct list_head msg_activelist;
- struct list_head msg_list; /* Q for credits/MD */
-
- lnet_process_id_t msg_target;
- __u32 msg_type;
-
- unsigned int msg_target_is_router:1; /* sending to a router */
- unsigned int msg_routing:1; /* being forwarded */
- unsigned int msg_ack:1; /* ack on finalize (PUT) */
- unsigned int msg_sending:1; /* outgoing message */
- unsigned int msg_receiving:1; /* being received */
- unsigned int msg_delayed:1; /* had to Q for buffer or tx credit */
- unsigned int msg_txcredit:1; /* taken an NI send credit */
- unsigned int msg_peertxcredit:1; /* taken a peer send credit */
- unsigned int msg_rtrcredit:1; /* taken a globel router credit */
- unsigned int msg_peerrtrcredit:1; /* taken a peer router credit */
- unsigned int msg_onactivelist:1; /* on the activelist */
-
- struct lnet_peer *msg_txpeer; /* peer I'm sending to */
- struct lnet_peer *msg_rxpeer; /* peer I received from */
-
- void *msg_private;
- struct lnet_libmd *msg_md;
-
- unsigned int msg_len;
- unsigned int msg_wanted;
- unsigned int msg_offset;
- unsigned int msg_niov;
- struct iovec *msg_iov;
- lnet_kiov_t *msg_kiov;
-
- lnet_event_t msg_ev;
- lnet_hdr_t msg_hdr;
+ cfs_list_t msg_activelist;
+ cfs_list_t msg_list; /* Q for credits/MD */
+
+ lnet_process_id_t msg_target;
+ __u32 msg_type;
+
+ unsigned int msg_target_is_router:1; /* sending to a router */
+ unsigned int msg_routing:1; /* being forwarded */
+ unsigned int msg_ack:1; /* ack on finalize (PUT) */
+ unsigned int msg_sending:1; /* outgoing message */
+ unsigned int msg_receiving:1; /* being received */
+ unsigned int msg_delayed:1; /* had to Q for buffer or tx credit */
+ unsigned int msg_txcredit:1; /* taken an NI send credit */
+ unsigned int msg_peertxcredit:1; /* taken a peer send credit */
+ unsigned int msg_rtrcredit:1; /* taken a globel router credit */
+ unsigned int msg_peerrtrcredit:1; /* taken a peer router credit */
+ unsigned int msg_onactivelist:1; /* on the activelist */
+
+ struct lnet_peer *msg_txpeer; /* peer I'm sending to */
+ struct lnet_peer *msg_rxpeer; /* peer I received from */
+
+ void *msg_private;
+ struct lnet_libmd *msg_md;
+
+ unsigned int msg_len;
+ unsigned int msg_wanted;
+ unsigned int msg_offset;
+ unsigned int msg_niov;
+ struct iovec *msg_iov;
+ lnet_kiov_t *msg_kiov;
+
+ lnet_event_t msg_ev;
+ lnet_hdr_t msg_hdr;
} lnet_msg_t;
typedef struct lnet_libhandle {
- struct list_head lh_hash_chain;
- __u64 lh_cookie;
+ cfs_list_t lh_hash_chain;
+ __u64 lh_cookie;
} lnet_libhandle_t;
#define lh_entry(ptr, type, member) \
((type *)((char *)(ptr)-(char *)(&((type *)0)->member)))
typedef struct lnet_eq {
- struct list_head eq_list;
- lnet_libhandle_t eq_lh;
- lnet_seq_t eq_enq_seq;
- lnet_seq_t eq_deq_seq;
- unsigned int eq_size;
- lnet_event_t *eq_events;
- int eq_refcount;
- lnet_eq_handler_t eq_callback;
+ cfs_list_t eq_list;
+ lnet_libhandle_t eq_lh;
+ lnet_seq_t eq_enq_seq;
+ lnet_seq_t eq_deq_seq;
+ unsigned int eq_size;
+ lnet_event_t *eq_events;
+ int eq_refcount;
+ lnet_eq_handler_t eq_callback;
} lnet_eq_t;
typedef struct lnet_me {
- struct list_head me_list;
- lnet_libhandle_t me_lh;
- lnet_process_id_t me_match_id;
- unsigned int me_portal;
- __u64 me_match_bits;
- __u64 me_ignore_bits;
- lnet_unlink_t me_unlink;
- struct lnet_libmd *me_md;
+ cfs_list_t me_list;
+ lnet_libhandle_t me_lh;
+ lnet_process_id_t me_match_id;
+ unsigned int me_portal;
+ __u64 me_match_bits;
+ __u64 me_ignore_bits;
+ lnet_unlink_t me_unlink;
+ struct lnet_libmd *me_md;
} lnet_me_t;
typedef struct lnet_libmd {
- struct list_head md_list;
- lnet_libhandle_t md_lh;
- lnet_me_t *md_me;
- char *md_start;
- unsigned int md_offset;
- unsigned int md_length;
- unsigned int md_max_size;
- int md_threshold;
- int md_refcount;
- unsigned int md_options;
- unsigned int md_flags;
- void *md_user_ptr;
- lnet_eq_t *md_eq;
- unsigned int md_niov; /* # frags */
+ cfs_list_t md_list;
+ lnet_libhandle_t md_lh;
+ lnet_me_t *md_me;
+ char *md_start;
+ unsigned int md_offset;
+ unsigned int md_length;
+ unsigned int md_max_size;
+ int md_threshold;
+ int md_refcount;
+ unsigned int md_options;
+ unsigned int md_flags;
+ void *md_user_ptr;
+ lnet_eq_t *md_eq;
+ unsigned int md_niov; /* # frags */
union {
struct iovec iov[LNET_MAX_IOV];
lnet_kiov_t kiov[LNET_MAX_IOV];
#ifdef LNET_USE_LIB_FREELIST
typedef struct
{
- void *fl_objs; /* single contiguous array of objects */
- int fl_nobjs; /* the number of them */
- int fl_objsize; /* the size (including overhead) of each of them */
- struct list_head fl_list; /* where they are enqueued */
+ void *fl_objs; /* single contiguous array of objects */
+ int fl_nobjs; /* the number of them */
+ int fl_objsize; /* the size (including overhead) of each of them */
+ cfs_list_t fl_list; /* where they are enqueued */
} lnet_freelist_t;
typedef struct
{
- struct list_head fo_list; /* enqueue on fl_list */
- void *fo_contents; /* aligned contents */
+ cfs_list_t fo_list; /* enqueue on fl_list */
+ void *fo_contents; /* aligned contents */
} lnet_freeobj_t;
#endif
typedef struct {
/* info about peers we are trying to fail */
- struct list_head tp_list; /* ln_test_peers */
- lnet_nid_t tp_nid; /* matching nid */
- unsigned int tp_threshold; /* # failures to simulate */
+ cfs_list_t tp_list; /* ln_test_peers */
+ lnet_nid_t tp_nid; /* matching nid */
+ unsigned int tp_threshold; /* # failures to simulate */
} lnet_test_peer_t;
#define LNET_COOKIE_TYPE_MD 1
typedef struct lnet_lnd
{
/* fields managed by portals */
- struct list_head lnd_list; /* stash in the LND table */
- int lnd_refcount; /* # active instances */
+ cfs_list_t lnd_list; /* stash in the LND table */
+ int lnd_refcount; /* # active instances */
/* fields initialised by the LND */
- unsigned int lnd_type;
+ unsigned int lnd_type;
int (*lnd_startup) (struct lnet_ni *ni);
void (*lnd_shutdown) (struct lnet_ni *ni);
#define LNET_MAX_INTERFACES 16
typedef struct lnet_ni {
- struct list_head ni_list; /* chain on ln_nis */
- struct list_head ni_txq; /* messages waiting for tx credits */
+ cfs_list_t ni_list; /* chain on ln_nis */
+ cfs_list_t ni_txq; /* messages waiting for tx credits */
int ni_maxtxcredits; /* # tx credits */
int ni_txcredits; /* # tx credits free */
int ni_mintxcredits; /* lowest it's been */
#define LNET_MAX_RTR_NIS 16
#define LNET_PINGINFO_SIZE offsetof(lnet_ping_info_t, pi_ni[LNET_MAX_RTR_NIS])
typedef struct {
- struct list_head rcd_list; /* chain on the_lnet.ln_zombie_rcd */
+ cfs_list_t rcd_list; /* chain on the_lnet.ln_zombie_rcd */
lnet_handle_md_t rcd_mdh; /* ping buffer MD */
lnet_ping_info_t *rcd_pinginfo; /* ping buffer */
} lnet_rc_data_t;
typedef struct lnet_peer {
- struct list_head lp_hashlist; /* chain on peer hash */
- struct list_head lp_txq; /* messages blocking for tx credits */
- struct list_head lp_rtrq; /* messages blocking for router credits */
- struct list_head lp_rtr_list; /* chain on router list */
+ cfs_list_t lp_hashlist; /* chain on peer hash */
+ cfs_list_t lp_txq; /* messages blocking for tx credits */
+ cfs_list_t lp_rtrq; /* messages blocking for router credits */
+ cfs_list_t lp_rtr_list; /* chain on router list */
int lp_txcredits; /* # tx credits available */
int lp_mintxcredits; /* low water mark */
int lp_rtrcredits; /* # router credits */
#define lnet_peer_aliveness_enabled(lp) ((lp)->lp_ni->ni_peertimeout > 0)
typedef struct {
- struct list_head lr_list; /* chain on net */
+ cfs_list_t lr_list; /* chain on net */
lnet_peer_t *lr_gateway; /* router node */
unsigned int lr_hops; /* how far I am */
} lnet_route_t;
typedef struct {
- struct list_head lrn_list; /* chain on ln_remote_nets */
- struct list_head lrn_routes; /* routes to me */
+ cfs_list_t lrn_list; /* chain on ln_remote_nets */
+ cfs_list_t lrn_routes; /* routes to me */
__u32 lrn_net; /* my net number */
} lnet_remotenet_t;
typedef struct {
- struct list_head rbp_bufs; /* my free buffer pool */
- struct list_head rbp_msgs; /* messages blocking for a buffer */
- int rbp_npages; /* # pages in each buffer */
- int rbp_nbuffers; /* # buffers */
- int rbp_credits; /* # free buffers / blocked messages */
- int rbp_mincredits; /* low water mark */
+ cfs_list_t rbp_bufs; /* my free buffer pool */
+ cfs_list_t rbp_msgs; /* messages blocking for a buffer */
+ int rbp_npages; /* # pages in each buffer */
+ int rbp_nbuffers; /* # buffers */
+ int rbp_credits; /* # free buffers / blocked messages */
+ int rbp_mincredits; /* low water mark */
} lnet_rtrbufpool_t;
typedef struct {
- struct list_head rb_list; /* chain on rbp_bufs */
- lnet_rtrbufpool_t *rb_pool; /* owning pool */
- lnet_kiov_t rb_kiov[0]; /* the buffer space */
+ cfs_list_t rb_list; /* chain on rbp_bufs */
+ lnet_rtrbufpool_t *rb_pool; /* owning pool */
+ lnet_kiov_t rb_kiov[0]; /* the buffer space */
} lnet_rtrbuf_t;
#include <libcfs/libcfs_pack.h>
/* Options for lnet_portal_t::ptl_options */
#define LNET_PTL_LAZY (1 << 0)
typedef struct {
- struct list_head ptl_ml; /* match list */
- struct list_head ptl_msgq; /* messages blocking for MD */
- __u64 ptl_ml_version; /* validity stamp, only changed for new attached MD */
- __u64 ptl_msgq_version; /* validity stamp */
- unsigned int ptl_options;
+ cfs_list_t ptl_ml; /* match list */
+ cfs_list_t ptl_msgq; /* messages blocking for MD */
+ __u64 ptl_ml_version; /* validity stamp, only changed for new attached MD */
+ __u64 ptl_msgq_version; /* validity stamp */
+ unsigned int ptl_options;
} lnet_portal_t;
/* Router Checker states */
typedef struct
{
/* Stuff initialised at LNetInit() */
- int ln_init; /* LNetInit() called? */
- int ln_refcount; /* LNetNIInit/LNetNIFini counter */
- int ln_niinit_self; /* Have I called LNetNIInit myself? */
+ int ln_init; /* LNetInit() called? */
+ int ln_refcount; /* LNetNIInit/LNetNIFini counter */
+ int ln_niinit_self; /* Have I called LNetNIInit myself? */
- struct list_head ln_lnds; /* registered LNDs */
+ cfs_list_t ln_lnds; /* registered LNDs */
#ifdef __KERNEL__
- spinlock_t ln_lock;
- cfs_waitq_t ln_waitq;
- struct semaphore ln_api_mutex;
- struct semaphore ln_lnd_mutex;
+ cfs_spinlock_t ln_lock;
+ cfs_waitq_t ln_waitq;
+ cfs_semaphore_t ln_api_mutex;
+ cfs_semaphore_t ln_lnd_mutex;
#else
# ifndef HAVE_LIBPTHREAD
- int ln_lock;
- int ln_api_mutex;
- int ln_lnd_mutex;
+ int ln_lock;
+ int ln_api_mutex;
+ int ln_lnd_mutex;
# else
- pthread_cond_t ln_cond;
- pthread_mutex_t ln_lock;
- pthread_mutex_t ln_api_mutex;
- pthread_mutex_t ln_lnd_mutex;
+ pthread_cond_t ln_cond;
+ pthread_mutex_t ln_lock;
+ pthread_mutex_t ln_api_mutex;
+ pthread_mutex_t ln_lnd_mutex;
# endif
#endif
/* Stuff initialised at LNetNIInit() */
- int ln_shutdown; /* shutdown in progress */
- int ln_nportals; /* # portals */
- lnet_portal_t *ln_portals; /* the vector of portals */
+ int ln_shutdown; /* shutdown in progress */
+ int ln_nportals; /* # portals */
+ lnet_portal_t *ln_portals; /* the vector of portals */
- lnet_pid_t ln_pid; /* requested pid */
+ lnet_pid_t ln_pid; /* requested pid */
- struct list_head ln_nis; /* LND instances */
- lnet_ni_t *ln_loni; /* the loopback NI */
- lnet_ni_t *ln_eqwaitni; /* NI to wait for events in */
- struct list_head ln_zombie_nis; /* dying LND instances */
- int ln_nzombie_nis; /* # of NIs to wait for */
+ cfs_list_t ln_nis; /* LND instances */
+ lnet_ni_t *ln_loni; /* the loopback NI */
+ lnet_ni_t *ln_eqwaitni; /* NI to wait for events in */
+ cfs_list_t ln_zombie_nis; /* dying LND instances */
+ int ln_nzombie_nis; /* # of NIs to wait for */
- struct list_head ln_remote_nets; /* remote networks with routes to them */
- __u64 ln_remote_nets_version; /* validity stamp */
+ cfs_list_t ln_remote_nets; /* remote networks with routes to them */
+ __u64 ln_remote_nets_version; /* validity stamp */
- struct list_head ln_routers; /* list of all known routers */
- __u64 ln_routers_version; /* validity stamp */
+ cfs_list_t ln_routers; /* list of all known routers */
+ __u64 ln_routers_version; /* validity stamp */
- struct list_head *ln_peer_hash; /* NID->peer hash */
- int ln_npeers; /* # peers extant */
- int ln_peertable_version; /* /proc validity stamp */
+ cfs_list_t *ln_peer_hash; /* NID->peer hash */
+ int ln_npeers; /* # peers extant */
+ int ln_peertable_version; /* /proc validity stamp */
- int ln_routing; /* am I a router? */
- lnet_rtrbufpool_t ln_rtrpools[LNET_NRBPOOLS]; /* router buffer pools */
+ int ln_routing; /* am I a router? */
+ lnet_rtrbufpool_t ln_rtrpools[LNET_NRBPOOLS]; /* router buffer pools */
- int ln_lh_hash_size; /* size of lib handle hash table */
- struct list_head *ln_lh_hash_table; /* all extant lib handles, this interface */
- __u64 ln_next_object_cookie; /* cookie generator */
- __u64 ln_interface_cookie; /* uniquely identifies this ni in this epoch */
+ int ln_lh_hash_size; /* size of lib handle hash table */
+ cfs_list_t *ln_lh_hash_table; /* all extant lib handles, this interface */
+ __u64 ln_next_object_cookie; /* cookie generator */
+ __u64 ln_interface_cookie; /* uniquely identifies this ni in this epoch */
- char *ln_network_tokens; /* space for network names */
- int ln_network_tokens_nob;
+ char *ln_network_tokens; /* space for network names */
+ int ln_network_tokens_nob;
- int ln_testprotocompat; /* test protocol compatibility flags */
+ int ln_testprotocompat; /* test protocol compatibility flags */
- struct list_head ln_finalizeq; /* msgs waiting to complete finalizing */
+ cfs_list_t ln_finalizeq; /* msgs waiting to complete finalizing */
#ifdef __KERNEL__
- void **ln_finalizers; /* threads doing finalization */
- int ln_nfinalizers; /* max # threads finalizing */
+ void **ln_finalizers; /* threads doing finalization */
+ int ln_nfinalizers; /* max # threads finalizing */
#else
- int ln_finalizing;
+ int ln_finalizing;
#endif
- struct list_head ln_test_peers; /* failure simulation */
+ cfs_list_t ln_test_peers; /* failure simulation */
- lnet_handle_md_t ln_ping_target_md;
- lnet_handle_eq_t ln_ping_target_eq;
- lnet_ping_info_t *ln_ping_info;
+ lnet_handle_md_t ln_ping_target_md;
+ lnet_handle_eq_t ln_ping_target_eq;
+ lnet_ping_info_t *ln_ping_info;
#ifdef __KERNEL__
- struct semaphore ln_rc_signal; /* serialise startup/shutdown */
+ cfs_semaphore_t ln_rc_signal; /* serialise startup/shutdown */
#endif
int ln_rc_state; /* router checker startup/shutdown state */
lnet_handle_eq_t ln_rc_eqh; /* router checker's event queue */
lnet_handle_md_t ln_rc_mdh;
- struct list_head ln_zombie_rcd;
+ cfs_list_t ln_zombie_rcd;
#ifdef LNET_USE_LIB_FREELIST
- lnet_freelist_t ln_free_mes;
- lnet_freelist_t ln_free_msgs;
- lnet_freelist_t ln_free_mds;
- lnet_freelist_t ln_free_eqs;
+ lnet_freelist_t ln_free_mes;
+ lnet_freelist_t ln_free_msgs;
+ lnet_freelist_t ln_free_mds;
+ lnet_freelist_t ln_free_eqs;
#endif
- struct list_head ln_active_msgs;
- struct list_head ln_active_mds;
- struct list_head ln_active_eqs;
+ cfs_list_t ln_active_msgs;
+ cfs_list_t ln_active_mds;
+ cfs_list_t ln_active_eqs;
- lnet_counters_t ln_counters;
+ lnet_counters_t ln_counters;
#ifndef __KERNEL__
/* Temporary workaround to allow uOSS and test programs force
* lnet_prepare(). The only way to turn this flag on is to
* call lnet_server_mode() */
- int ln_server_mode_flag;
+ int ln_server_mode_flag;
#endif
} lnet_t;
*** for list_batch command */
typedef struct {
- struct list_head rpe_link; /* link chain */
+ cfs_list_t rpe_link; /* link chain */
lnet_process_id_t rpe_peer; /* peer's id */
struct timeval rpe_stamp; /* time stamp of RPC */
int rpe_state; /* peer's state */
char *lstio_dbg_namep; /* IN: name of group|batch */
int lstio_dbg_count; /* IN: # of test nodes to debug */
lnet_process_id_t *lstio_dbg_idsp; /* IN: id of test nodes */
- struct list_head *lstio_dbg_resultp; /* OUT: list head of result buffer */
+ cfs_list_t *lstio_dbg_resultp; /* OUT: list head of result buffer */
} lstio_debug_args_t;
typedef struct {
char *lstio_grp_namep; /* IN: group name */
int lstio_grp_count; /* IN: # of nodes id */
lnet_process_id_t *lstio_grp_idsp; /* IN: array of nodes */
- struct list_head *lstio_grp_resultp; /* OUT: list head of result buffer */
+ cfs_list_t *lstio_grp_resultp; /* OUT: list head of result buffer */
} lstio_group_update_args_t;
typedef struct {
char *lstio_grp_namep; /* IN: group name */
int lstio_grp_count; /* IN: # of nodes */
lnet_process_id_t *lstio_grp_idsp; /* IN: nodes */
- struct list_head *lstio_grp_resultp; /* OUT: list head of result buffer */
+ cfs_list_t *lstio_grp_resultp; /* OUT: list head of result buffer */
} lstio_group_nodes_args_t;
typedef struct {
int lstio_bat_timeout; /* IN: timeout for the batch */
int lstio_bat_nmlen; /* IN: name length */
char *lstio_bat_namep; /* IN: batch name */
- struct list_head *lstio_bat_resultp; /* OUT: list head of result buffer */
+ cfs_list_t *lstio_bat_resultp; /* OUT: list head of result buffer */
} lstio_batch_run_args_t;
typedef struct {
int lstio_bat_force; /* IN: abort unfinished test RPC */
int lstio_bat_nmlen; /* IN: name length */
char *lstio_bat_namep; /* IN: batch name */
- struct list_head *lstio_bat_resultp; /* OUT: list head of result buffer */
+ cfs_list_t *lstio_bat_resultp; /* OUT: list head of result buffer */
} lstio_batch_stop_args_t;
typedef struct {
int lstio_bat_timeout; /* IN: timeout for waiting */
int lstio_bat_nmlen; /* IN: name length */
char *lstio_bat_namep; /* IN: batch name */
- struct list_head *lstio_bat_resultp; /* OUT: list head of result buffer */
+ cfs_list_t *lstio_bat_resultp; /* OUT: list head of result buffer */
} lstio_batch_query_args_t;
typedef struct {
char *lstio_sta_namep; /* IN: group name */
int lstio_sta_count; /* IN: # of pid */
lnet_process_id_t *lstio_sta_idsp; /* IN: pid */
- struct list_head *lstio_sta_resultp; /* OUT: list head of result buffer */
+ cfs_list_t *lstio_sta_resultp; /* OUT: list head of result buffer */
} lstio_stat_args_t;
typedef enum {
lstio_ping_param_t,
... more */
int *lstio_tes_retp; /* OUT: private returned value */
- struct list_head *lstio_tes_resultp; /* OUT: list head of result buffer */
+ cfs_list_t *lstio_tes_resultp; /* OUT: list head of result buffer */
} lstio_test_args_t;
typedef enum {
#define GMNAL_NETBUF_LOCAL_NETADDR(nb) ((void *)((unsigned long)(nb)->nb_netaddr))
typedef struct gmnal_txbuf {
- struct list_head txb_list; /* queue on gmni_idle_ltxbs */
+ cfs_list_t txb_list; /* queue on gmni_idle_ltxbs */
struct gmnal_txbuf *txb_next; /* stash on gmni_ltxs */
gmnal_netbuf_t txb_buf; /* space */
} gmnal_txbuf_t;
typedef struct gmnal_tx {
- struct list_head tx_list; /* queue */
+ cfs_list_t tx_list; /* queue */
int tx_credit:1; /* consumed a credit? */
int tx_large_iskiov:1; /* large is in kiovs? */
struct gmnal_ni *tx_gmni; /* owning NI */
} gmnal_tx_t;
typedef struct gmnal_rx {
- struct list_head rx_list; /* enqueue on gmni_rxq for handling */
+ cfs_list_t rx_list; /* enqueue on gmni_rxq for handling */
int rx_islarge:1; /* large receive buffer? */
unsigned int rx_recv_nob; /* bytes received */
__u16 rx_recv_gmid; /* sender */
} gmnal_rx_t;
typedef struct gmnal_ni {
- lnet_ni_t *gmni_ni; /* generic NI */
- struct gm_port *gmni_port; /* GM port */
- spinlock_t gmni_gm_lock; /* serialise GM calls */
- int gmni_large_pages; /* # pages in a large message buffer */
- int gmni_large_msgsize; /* nob in large message buffers */
- int gmni_large_gmsize; /* large message GM bucket */
- int gmni_small_msgsize; /* nob in small message buffers */
- int gmni_small_gmsize; /* small message GM bucket */
- __u64 gmni_netaddr_base; /* base of mapped network VM */
- int gmni_netaddr_size; /* # bytes of mapped network VM */
-
- gmnal_tx_t *gmni_txs; /* all txs */
- gmnal_rx_t *gmni_rxs; /* all rx descs */
- gmnal_txbuf_t *gmni_ltxbs; /* all large tx bufs */
-
- atomic_t gmni_nthreads; /* total # threads */
- gm_alarm_t gmni_alarm; /* alarm to wake caretaker */
- int gmni_shutdown; /* tell all threads to exit */
-
- struct list_head gmni_idle_txs; /* idle tx's */
- int gmni_tx_credits; /* # transmits still possible */
- struct list_head gmni_idle_ltxbs; /* idle large tx buffers */
- struct list_head gmni_buf_txq; /* tx's waiting for buffers */
- struct list_head gmni_cred_txq; /* tx's waiting for credits */
- spinlock_t gmni_tx_lock; /* serialise */
-
- struct gm_hash *gmni_rx_hash; /* buffer->rx lookup */
- struct semaphore gmni_rx_mutex; /* serialise blocking on GM */
+ lnet_ni_t *gmni_ni; /* generic NI */
+ struct gm_port *gmni_port; /* GM port */
+ cfs_spinlock_t gmni_gm_lock; /* serialise GM calls */
+ int gmni_large_pages; /* # pages in a large message buffer */
+ int gmni_large_msgsize;/* nob in large message buffers */
+ int gmni_large_gmsize; /* large message GM bucket */
+ int gmni_small_msgsize;/* nob in small message buffers */
+ int gmni_small_gmsize; /* small message GM bucket */
+ __u64 gmni_netaddr_base; /* base of mapped network VM */
+ int gmni_netaddr_size; /* # bytes of mapped network VM */
+
+ gmnal_tx_t *gmni_txs; /* all txs */
+ gmnal_rx_t *gmni_rxs; /* all rx descs */
+ gmnal_txbuf_t *gmni_ltxbs; /* all large tx bufs */
+
+ cfs_atomic_t gmni_nthreads; /* total # threads */
+ gm_alarm_t gmni_alarm; /* alarm to wake caretaker */
+ int gmni_shutdown; /* tell all threads to exit */
+
+ cfs_list_t gmni_idle_txs; /* idle tx's */
+ int gmni_tx_credits; /* # transmits still possible */
+ cfs_list_t gmni_idle_ltxbs; /* idle large tx buffers */
+ cfs_list_t gmni_buf_txq; /* tx's waiting for buffers */
+ cfs_list_t gmni_cred_txq; /* tx's waiting for credits */
+ cfs_spinlock_t gmni_tx_lock; /* serialise */
+
+ struct gm_hash *gmni_rx_hash; /* buffer->rx lookup */
+ cfs_semaphore_t gmni_rx_mutex; /* serialise blocking on GM */
} gmnal_ni_t;
typedef struct {
memset(gmni, 0, sizeof(*gmni));
gmni->gmni_ni = ni;
- spin_lock_init(&gmni->gmni_tx_lock);
- spin_lock_init(&gmni->gmni_gm_lock);
- INIT_LIST_HEAD(&gmni->gmni_idle_txs);
- INIT_LIST_HEAD(&gmni->gmni_idle_ltxbs);
- INIT_LIST_HEAD(&gmni->gmni_buf_txq);
- INIT_LIST_HEAD(&gmni->gmni_cred_txq);
- sema_init(&gmni->gmni_rx_mutex, 1);
+ cfs_spin_lock_init(&gmni->gmni_tx_lock);
+ cfs_spin_lock_init(&gmni->gmni_gm_lock);
+ CFS_INIT_LIST_HEAD(&gmni->gmni_idle_txs);
+ CFS_INIT_LIST_HEAD(&gmni->gmni_idle_ltxbs);
+ CFS_INIT_LIST_HEAD(&gmni->gmni_buf_txq);
+ CFS_INIT_LIST_HEAD(&gmni->gmni_cred_txq);
+ cfs_sema_init(&gmni->gmni_rx_mutex, 1);
PORTAL_MODULE_USE;
/*
LASSERT(tx->tx_lntmsg == NULL);
tx->tx_lntmsg = lntmsg;
- spin_lock(&gmni->gmni_tx_lock);
+ cfs_spin_lock(&gmni->gmni_tx_lock);
- list_add_tail(&tx->tx_list, &gmni->gmni_buf_txq);
+ cfs_list_add_tail(&tx->tx_list, &gmni->gmni_buf_txq);
gmnal_check_txqueues_locked(gmni);
- spin_unlock(&gmni->gmni_tx_lock);
+ cfs_spin_unlock(&gmni->gmni_tx_lock);
return 0;
}
{
gmnal_tx_t *tx = NULL;
- spin_lock(&gmni->gmni_tx_lock);
+ cfs_spin_lock(&gmni->gmni_tx_lock);
if (gmni->gmni_shutdown ||
- list_empty(&gmni->gmni_idle_txs)) {
- spin_unlock(&gmni->gmni_tx_lock);
+ cfs_list_empty(&gmni->gmni_idle_txs)) {
+ cfs_spin_unlock(&gmni->gmni_tx_lock);
return NULL;
}
- tx = list_entry(gmni->gmni_idle_txs.next, gmnal_tx_t, tx_list);
- list_del(&tx->tx_list);
+ tx = cfs_list_entry(gmni->gmni_idle_txs.next, gmnal_tx_t, tx_list);
+ cfs_list_del(&tx->tx_list);
- spin_unlock(&gmni->gmni_tx_lock);
+ cfs_spin_unlock(&gmni->gmni_tx_lock);
LASSERT (tx->tx_lntmsg == NULL);
LASSERT (tx->tx_ltxb == NULL);
tx->tx_lntmsg = NULL;
- spin_lock(&gmni->gmni_tx_lock);
+ cfs_spin_lock(&gmni->gmni_tx_lock);
if (tx->tx_ltxb != NULL) {
wake_sched = 1;
- list_add_tail(&tx->tx_ltxb->txb_list, &gmni->gmni_idle_ltxbs);
+ cfs_list_add_tail(&tx->tx_ltxb->txb_list,
+ &gmni->gmni_idle_ltxbs);
tx->tx_ltxb = NULL;
}
tx->tx_credit = 0;
}
- list_add_tail(&tx->tx_list, &gmni->gmni_idle_txs);
+ cfs_list_add_tail(&tx->tx_list, &gmni->gmni_idle_txs);
if (wake_sched)
gmnal_check_txqueues_locked(gmni);
- spin_unlock(&gmni->gmni_tx_lock);
+ cfs_spin_unlock(&gmni->gmni_tx_lock);
/* Delay finalize until tx is free */
if (lnetmsg != NULL)
{
gmnal_tx_t *tx = (gmnal_tx_t*)context;
- LASSERT(!in_interrupt());
+ LASSERT(!cfs_in_interrupt());
CDEBUG(D_NET, "status for tx [%p] is [%d][%s], nid %s\n",
tx, status, gmnal_gmstatus2str(status),
gmnal_tx_t *tx = (gmnal_tx_t*)context;
gmnal_ni_t *gmni = tx->tx_gmni;
- LASSERT(!in_interrupt());
+ LASSERT(!cfs_in_interrupt());
switch(status) {
case GM_SUCCESS:
gmnal_notify_peer_down(tx);
- spin_lock(&gmni->gmni_gm_lock);
+ cfs_spin_lock(&gmni->gmni_gm_lock);
gm_drop_sends(gmni->gmni_port,
tx->tx_ltxb != NULL ?
GMNAL_LARGE_PRIORITY : GMNAL_SMALL_PRIORITY,
tx->tx_gmlid, *gmnal_tunables.gm_port,
gmnal_drop_sends_callback, tx);
- spin_unlock(&gmni->gmni_gm_lock);
+ cfs_spin_unlock(&gmni->gmni_gm_lock);
return;
}
int pri;
void *netaddr;
- tx = list_empty(&gmni->gmni_buf_txq) ? NULL :
- list_entry(gmni->gmni_buf_txq.next, gmnal_tx_t, tx_list);
+ tx = cfs_list_empty(&gmni->gmni_buf_txq) ? NULL :
+ cfs_list_entry(gmni->gmni_buf_txq.next, gmnal_tx_t, tx_list);
if (tx != NULL &&
(tx->tx_large_nob == 0 ||
- !list_empty(&gmni->gmni_idle_ltxbs))) {
+ !cfs_list_empty(&gmni->gmni_idle_ltxbs))) {
/* consume tx */
- list_del(&tx->tx_list);
+ cfs_list_del(&tx->tx_list);
LASSERT (tx->tx_ltxb == NULL);
if (tx->tx_large_nob != 0) {
- ltxb = list_entry(gmni->gmni_idle_ltxbs.next,
- gmnal_txbuf_t, txb_list);
+ ltxb = cfs_list_entry(gmni->gmni_idle_ltxbs.next,
+ gmnal_txbuf_t, txb_list);
/* consume large buffer */
- list_del(<xb->txb_list);
+ cfs_list_del(<xb->txb_list);
- spin_unlock(&gmni->gmni_tx_lock);
+ cfs_spin_unlock(&gmni->gmni_tx_lock);
/* Unlocking here allows sends to get re-ordered,
* but we want to allow other CPUs to progress... */
tx->tx_msgnob += tx->tx_large_nob;
- spin_lock(&gmni->gmni_tx_lock);
+ cfs_spin_lock(&gmni->gmni_tx_lock);
}
- list_add_tail(&tx->tx_list, &gmni->gmni_cred_txq);
+ cfs_list_add_tail(&tx->tx_list, &gmni->gmni_cred_txq);
}
- if (!list_empty(&gmni->gmni_cred_txq) &&
+ if (!cfs_list_empty(&gmni->gmni_cred_txq) &&
gmni->gmni_tx_credits != 0) {
- tx = list_entry(gmni->gmni_cred_txq.next, gmnal_tx_t, tx_list);
+ tx = cfs_list_entry(gmni->gmni_cred_txq.next, gmnal_tx_t,
+ tx_list);
/* consume tx and 1 credit */
- list_del(&tx->tx_list);
+ cfs_list_del(&tx->tx_list);
gmni->gmni_tx_credits--;
- spin_unlock(&gmni->gmni_tx_lock);
+ cfs_spin_unlock(&gmni->gmni_tx_lock);
/* Unlocking here allows sends to get re-ordered, but we want
* to allow other CPUs to progress... */
pri = GMNAL_LARGE_PRIORITY;
}
- spin_lock(&gmni->gmni_gm_lock);
+ cfs_spin_lock(&gmni->gmni_gm_lock);
gm_send_to_peer_with_callback(gmni->gmni_port,
netaddr, gmsize,
gmnal_tx_callback,
(void*)tx);
- spin_unlock(&gmni->gmni_gm_lock);
- spin_lock(&gmni->gmni_tx_lock);
+ cfs_spin_unlock(&gmni->gmni_gm_lock);
+ cfs_spin_lock(&gmni->gmni_tx_lock);
}
}
CDEBUG(D_NET, "posting rx %p buf %p\n", rx, buffer);
- spin_lock(&gmni->gmni_gm_lock);
+ cfs_spin_lock(&gmni->gmni_gm_lock);
gm_provide_receive_buffer_with_tag(gmni->gmni_port,
buffer, gmsize, pri, 0);
- spin_unlock(&gmni->gmni_gm_lock);
+ cfs_spin_unlock(&gmni->gmni_gm_lock);
}
void
tx->tx_msgnob = offsetof(gmnal_msg_t, gmm_type);
tx->tx_large_nob = 0;
- spin_lock(&gmni->gmni_tx_lock);
+ cfs_spin_lock(&gmni->gmni_tx_lock);
- list_add_tail(&tx->tx_list, &gmni->gmni_buf_txq);
+ cfs_list_add_tail(&tx->tx_list, &gmni->gmni_buf_txq);
gmnal_check_txqueues_locked(gmni);
- spin_unlock(&gmni->gmni_tx_lock);
+ cfs_spin_unlock(&gmni->gmni_tx_lock);
}
int
if (rc != 0)
continue;
- spin_lock(&gmni->gmni_gm_lock);
+ cfs_spin_lock(&gmni->gmni_gm_lock);
rxevent = gm_blocking_receive_no_spin(gmni->gmni_port);
- spin_unlock(&gmni->gmni_gm_lock);
+ cfs_spin_unlock(&gmni->gmni_gm_lock);
switch (GM_RECV_EVENT_TYPE(rxevent)) {
default:
gm_unknown(gmni->gmni_port, rxevent);
- up(&gmni->gmni_rx_mutex);
+ cfs_up(&gmni->gmni_rx_mutex);
continue;
case GM_FAST_RECV_EVENT:
break;
}
- up(&gmni->gmni_rx_mutex);
+ cfs_up(&gmni->gmni_rx_mutex);
CDEBUG (D_NET, "rx %p: buf %p(%p) nob %d\n", rx,
GMNAL_NETBUF_LOCAL_NETADDR(&rx->rx_buf),
}
CDEBUG(D_NET, "exiting\n");
- atomic_dec(&gmni->gmni_nthreads);
+ cfs_atomic_dec(&gmni->gmni_nthreads);
return 0;
}
int count = 2;
gmni->gmni_shutdown = 1;
- mb();
+ cfs_mb();
/* wake rxthread owning gmni_rx_mutex with an alarm. */
- spin_lock(&gmni->gmni_gm_lock);
+ cfs_spin_lock(&gmni->gmni_gm_lock);
gm_set_alarm(gmni->gmni_port, &gmni->gmni_alarm, 0, NULL, NULL);
- spin_unlock(&gmni->gmni_gm_lock);
+ cfs_spin_unlock(&gmni->gmni_gm_lock);
- while (atomic_read(&gmni->gmni_nthreads) != 0) {
+ while (cfs_atomic_read(&gmni->gmni_nthreads) != 0) {
count++;
if ((count & (count - 1)) == 0)
CWARN("Waiting for %d threads to stop\n",
- atomic_read(&gmni->gmni_nthreads));
+ cfs_atomic_read(&gmni->gmni_nthreads));
gmnal_yield(1);
}
}
int pid;
LASSERT (!gmni->gmni_shutdown);
- LASSERT (atomic_read(&gmni->gmni_nthreads) == 0);
+ LASSERT (cfs_atomic_read(&gmni->gmni_nthreads) == 0);
gm_initialize_alarm(&gmni->gmni_alarm);
- for (i = 0; i < num_online_cpus(); i++) {
+ for (i = 0; i < cfs_num_online_cpus(); i++) {
- pid = kernel_thread(gmnal_rx_thread, (void*)gmni, 0);
+ pid = cfs_kernel_thread(gmnal_rx_thread, (void*)gmni, 0);
if (pid < 0) {
CERROR("rx thread failed to start: %d\n", pid);
gmnal_stop_threads(gmni);
return pid;
}
- atomic_inc(&gmni->gmni_nthreads);
+ cfs_atomic_inc(&gmni->gmni_nthreads);
}
return 0;
return rc;
}
- list_add_tail(&txb->txb_list, &gmni->gmni_idle_ltxbs);
+ cfs_list_add_tail(&txb->txb_list, &gmni->gmni_idle_ltxbs);
txb->txb_next = gmni->gmni_ltxbs;
gmni->gmni_ltxbs = txb;
tx->tx_gmni = gmni;
- list_add_tail(&tx->tx_list, &gmni->gmni_idle_txs);
+ cfs_list_add_tail(&tx->tx_list, &gmni->gmni_idle_txs);
tx->tx_next = gmni->gmni_txs;
gmni->gmni_txs = tx;
void
gmnal_yield(int delay)
{
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(delay);
+ cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+ cfs_schedule_timeout(delay);
}
for (i = 0; i < npages; i++) {
if (p->mxg_pages[i] != NULL) {
__free_page(p->mxg_pages[i]);
- spin_lock(&kmxlnd_data.kmx_mem_lock);
+ cfs_spin_lock(&kmxlnd_data.kmx_mem_lock);
kmxlnd_data.kmx_mem_used -= PAGE_SIZE;
- spin_unlock(&kmxlnd_data.kmx_mem_lock);
+ cfs_spin_unlock(&kmxlnd_data.kmx_mem_lock);
}
}
mxlnd_free_pages(p);
return -ENOMEM;
}
- spin_lock(&kmxlnd_data.kmx_mem_lock);
+ cfs_spin_lock(&kmxlnd_data.kmx_mem_lock);
kmxlnd_data.kmx_mem_used += PAGE_SIZE;
- spin_unlock(&kmxlnd_data.kmx_mem_lock);
+ cfs_spin_unlock(&kmxlnd_data.kmx_mem_lock);
}
*pp = p;
ctx->mxc_incarnation = 0;
ctx->mxc_deadline = 0;
ctx->mxc_state = MXLND_CTX_IDLE;
- if (!list_empty(&ctx->mxc_list))
- list_del_init(&ctx->mxc_list);
+ if (!cfs_list_empty(&ctx->mxc_list))
+ cfs_list_del_init(&ctx->mxc_list);
/* ignore mxc_rx_list */
if (ctx->mxc_type == MXLND_REQ_TX) {
ctx->mxc_nid = 0;
tx = &kmxlnd_data.kmx_txs[i];
tx->mxc_type = MXLND_REQ_TX;
- INIT_LIST_HEAD(&tx->mxc_list);
+ CFS_INIT_LIST_HEAD(&tx->mxc_list);
/* map mxc_msg to page */
page = pages->mxg_pages[ipage];
}
/* in startup(), no locks required */
- list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_idle);
+ cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_idle);
}
return 0;
kmx_peer_t *next = NULL;
for (i = 0; i < MXLND_HASH_SIZE; i++) {
- list_for_each_entry_safe(peer, next, &kmxlnd_data.kmx_peers[i], mxp_list) {
- list_del_init(&peer->mxp_list);
+ cfs_list_for_each_entry_safe(peer, next,
+ &kmxlnd_data.kmx_peers[i],
+ mxp_list) {
+ cfs_list_del_init(&peer->mxp_list);
if (peer->mxp_conn) mxlnd_conn_decref(peer->mxp_conn);
mxlnd_peer_decref(peer);
count++;
mx_get_endpoint_addr(kmxlnd_data.kmx_endpt, &kmxlnd_data.kmx_epa);
mx_decompose_endpoint_addr(kmxlnd_data.kmx_epa, &nic_id, &ep_id);
- mxret = mx_connect(kmxlnd_data.kmx_endpt, nic_id, ep_id, MXLND_MSG_MAGIC,
- MXLND_CONNECT_TIMEOUT/HZ*1000, &kmxlnd_data.kmx_epa);
+ mxret = mx_connect(kmxlnd_data.kmx_endpt, nic_id, ep_id,
+ MXLND_MSG_MAGIC, MXLND_CONNECT_TIMEOUT/CFS_HZ*1000,
+ &kmxlnd_data.kmx_epa);
if (mxret != MX_SUCCESS) {
CDEBUG(D_NETERROR, "unable to connect to myself (%s)\n", mx_strerror(mxret));
goto failed_with_endpoint;
mx_strerror(mxret));
goto failed_with_endpoint;
}
- mxret = mx_set_request_timeout(kmxlnd_data.kmx_endpt, NULL, MXLND_COMM_TIMEOUT/HZ*1000);
+ mxret = mx_set_request_timeout(kmxlnd_data.kmx_endpt, NULL,
+ MXLND_COMM_TIMEOUT/CFS_HZ*1000);
if (mxret != MX_SUCCESS) {
CERROR("mx_set_request_timeout() failed with %s\n",
mx_strerror(mxret));
int pid = 0;
int i = (int) ((long) arg);
- atomic_inc(&kmxlnd_data.kmx_nthreads);
- init_completion(&kmxlnd_data.kmx_completions[i]);
+ cfs_atomic_inc(&kmxlnd_data.kmx_nthreads);
+ cfs_init_completion(&kmxlnd_data.kmx_completions[i]);
- pid = kernel_thread (fn, arg, 0);
+ pid = cfs_kernel_thread (fn, arg, 0);
if (pid < 0) {
- CERROR("kernel_thread() failed with %d\n", pid);
- atomic_dec(&kmxlnd_data.kmx_nthreads);
+ CERROR("cfs_kernel_thread() failed with %d\n", pid);
+ cfs_atomic_dec(&kmxlnd_data.kmx_nthreads);
}
return pid;
}
mxlnd_thread_stop(long id)
{
int i = (int) id;
- atomic_dec (&kmxlnd_data.kmx_nthreads);
- complete(&kmxlnd_data.kmx_completions[i]);
+ cfs_atomic_dec (&kmxlnd_data.kmx_nthreads);
+ cfs_complete(&kmxlnd_data.kmx_completions[i]);
}
/**
CDEBUG(D_NET, "in shutdown()\n");
CDEBUG(D_MALLOC, "before MXLND cleanup: libcfs_kmemory %d "
- "kmx_mem_used %ld\n", atomic_read (&libcfs_kmemory),
+ "kmx_mem_used %ld\n", cfs_atomic_read(&libcfs_kmemory),
kmxlnd_data.kmx_mem_used);
CDEBUG(D_NET, "setting shutdown = 1\n");
- atomic_set(&kmxlnd_data.kmx_shutdown, 1);
+ cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
switch (kmxlnd_data.kmx_init) {
/* wakeup request_waitds */
mx_wakeup(kmxlnd_data.kmx_endpt);
- up(&kmxlnd_data.kmx_tx_queue_sem);
- up(&kmxlnd_data.kmx_conn_sem);
- mxlnd_sleep(2 * HZ);
+ cfs_up(&kmxlnd_data.kmx_tx_queue_sem);
+ cfs_up(&kmxlnd_data.kmx_conn_sem);
+ mxlnd_sleep(2 * CFS_HZ);
/* fall through */
CDEBUG(D_NET, "waiting on threads\n");
/* wait for threads to complete */
for (i = 0; i < nthreads; i++) {
- wait_for_completion(&kmxlnd_data.kmx_completions[i]);
+ cfs_wait_for_completion(&kmxlnd_data.kmx_completions[i]);
}
- LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
+ LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
CDEBUG(D_NET, "freeing completions\n");
MXLND_FREE(kmxlnd_data.kmx_completions,
- nthreads * sizeof(struct completion));
+ nthreads * sizeof(cfs_completion_t));
/* fall through */
CDEBUG(D_NET, "shutdown complete\n");
CDEBUG(D_MALLOC, "after MXLND cleanup: libcfs_kmemory %d "
- "kmx_mem_used %ld\n", atomic_read (&libcfs_kmemory),
+ "kmx_mem_used %ld\n", cfs_atomic_read(&libcfs_kmemory),
kmxlnd_data.kmx_mem_used);
kmxlnd_data.kmx_init = MXLND_INIT_NOTHING;
return -EPERM;
}
CDEBUG(D_MALLOC, "before MXLND startup: libcfs_kmemory %d "
- "kmx_mem_used %ld\n", atomic_read (&libcfs_kmemory),
+ "kmx_mem_used %ld\n", cfs_atomic_read(&libcfs_kmemory),
kmxlnd_data.kmx_mem_used);
ni->ni_maxtxcredits = MXLND_TX_MSGS();
kmxlnd_data.kmx_ni = ni;
ni->ni_data = &kmxlnd_data;
- do_gettimeofday(&tv);
+ cfs_gettimeofday(&tv);
kmxlnd_data.kmx_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
CDEBUG(D_NET, "my incarnation is %llu\n", kmxlnd_data.kmx_incarnation);
- rwlock_init (&kmxlnd_data.kmx_global_lock);
- spin_lock_init (&kmxlnd_data.kmx_mem_lock);
+ cfs_rwlock_init (&kmxlnd_data.kmx_global_lock);
+ cfs_spin_lock_init (&kmxlnd_data.kmx_mem_lock);
- INIT_LIST_HEAD (&kmxlnd_data.kmx_conn_reqs);
- INIT_LIST_HEAD (&kmxlnd_data.kmx_conn_zombies);
- INIT_LIST_HEAD (&kmxlnd_data.kmx_orphan_msgs);
- spin_lock_init (&kmxlnd_data.kmx_conn_lock);
- sema_init(&kmxlnd_data.kmx_conn_sem, 0);
+ CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_conn_reqs);
+ CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_conn_zombies);
+ CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_orphan_msgs);
+ cfs_spin_lock_init (&kmxlnd_data.kmx_conn_lock);
+ cfs_sema_init(&kmxlnd_data.kmx_conn_sem, 0);
for (i = 0; i < MXLND_HASH_SIZE; i++) {
- INIT_LIST_HEAD (&kmxlnd_data.kmx_peers[i]);
+ CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_peers[i]);
}
- INIT_LIST_HEAD (&kmxlnd_data.kmx_tx_idle);
- spin_lock_init (&kmxlnd_data.kmx_tx_idle_lock);
+ CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_tx_idle);
+ cfs_spin_lock_init (&kmxlnd_data.kmx_tx_idle_lock);
kmxlnd_data.kmx_tx_next_cookie = 1;
- INIT_LIST_HEAD (&kmxlnd_data.kmx_tx_queue);
- spin_lock_init (&kmxlnd_data.kmx_tx_queue_lock);
- sema_init(&kmxlnd_data.kmx_tx_queue_sem, 0);
+ CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_tx_queue);
+ cfs_spin_lock_init (&kmxlnd_data.kmx_tx_queue_lock);
+ cfs_sema_init(&kmxlnd_data.kmx_tx_queue_sem, 0);
kmxlnd_data.kmx_init = MXLND_INIT_DATA;
/*****************************************************/
/* start threads */
MXLND_ALLOC(kmxlnd_data.kmx_completions,
- nthreads * sizeof(struct completion));
+ nthreads * sizeof(cfs_completion_t));
if (kmxlnd_data.kmx_completions == NULL) {
CERROR("failed to alloc kmxlnd_data.kmx_completions\n");
goto failed;
}
memset(kmxlnd_data.kmx_completions, 0,
- nthreads * sizeof(struct completion));
+ nthreads * sizeof(cfs_completion_t));
CDEBUG(D_NET, "using %d %s in mx_wait_any()\n",
*kmxlnd_tunables.kmx_n_waitd,
ret = mxlnd_thread_start(mxlnd_request_waitd, (void*)((long)i));
if (ret < 0) {
CERROR("Starting mxlnd_request_waitd[%d] failed with %d\n", i, ret);
- atomic_set(&kmxlnd_data.kmx_shutdown, 1);
+ cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
mx_wakeup(kmxlnd_data.kmx_endpt);
for (--i; i >= 0; i--) {
- wait_for_completion(&kmxlnd_data.kmx_completions[i]);
+ cfs_wait_for_completion(&kmxlnd_data.kmx_completions[i]);
}
- LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
+ LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
MXLND_FREE(kmxlnd_data.kmx_completions,
- nthreads * sizeof(struct completion));
+ nthreads * sizeof(cfs_completion_t));
goto failed;
}
ret = mxlnd_thread_start(mxlnd_tx_queued, (void*)((long)i++));
if (ret < 0) {
CERROR("Starting mxlnd_tx_queued failed with %d\n", ret);
- atomic_set(&kmxlnd_data.kmx_shutdown, 1);
+ cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
mx_wakeup(kmxlnd_data.kmx_endpt);
for (--i; i >= 0; i--) {
- wait_for_completion(&kmxlnd_data.kmx_completions[i]);
+ cfs_wait_for_completion(&kmxlnd_data.kmx_completions[i]);
}
- LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
+ LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
MXLND_FREE(kmxlnd_data.kmx_completions,
- nthreads * sizeof(struct completion));
+ nthreads * sizeof(cfs_completion_t));
goto failed;
}
ret = mxlnd_thread_start(mxlnd_timeoutd, (void*)((long)i++));
if (ret < 0) {
CERROR("Starting mxlnd_timeoutd failed with %d\n", ret);
- atomic_set(&kmxlnd_data.kmx_shutdown, 1);
+ cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
mx_wakeup(kmxlnd_data.kmx_endpt);
- up(&kmxlnd_data.kmx_tx_queue_sem);
+ cfs_up(&kmxlnd_data.kmx_tx_queue_sem);
for (--i; i >= 0; i--) {
- wait_for_completion(&kmxlnd_data.kmx_completions[i]);
+ cfs_wait_for_completion(&kmxlnd_data.kmx_completions[i]);
}
- LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
+ LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
MXLND_FREE(kmxlnd_data.kmx_completions,
- nthreads * sizeof(struct completion));
+ nthreads * sizeof(cfs_completion_t));
goto failed;
}
ret = mxlnd_thread_start(mxlnd_connd, (void*)((long)i++));
if (ret < 0) {
CERROR("Starting mxlnd_connd failed with %d\n", ret);
- atomic_set(&kmxlnd_data.kmx_shutdown, 1);
+ cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
mx_wakeup(kmxlnd_data.kmx_endpt);
- up(&kmxlnd_data.kmx_tx_queue_sem);
+ cfs_up(&kmxlnd_data.kmx_tx_queue_sem);
for (--i; i >= 0; i--) {
- wait_for_completion(&kmxlnd_data.kmx_completions[i]);
+ cfs_wait_for_completion(&kmxlnd_data.kmx_completions[i]);
}
- LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
+ LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
MXLND_FREE(kmxlnd_data.kmx_completions,
- nthreads * sizeof(struct completion));
+ nthreads * sizeof(cfs_completion_t));
goto failed;
}
#define MXLND_NDAEMONS 3 /* connd, timeoutd, tx_queued */
#define MXLND_MX_BOARD 0 /* Use the first MX NIC if more than 1 avail */
#define MXLND_MX_EP_ID 0 /* MX endpoint ID */
-#define MXLND_COMM_TIMEOUT (20 * HZ) /* timeout for send/recv (jiffies) */
-#define MXLND_WAIT_TIMEOUT HZ /* timeout for wait (jiffies) */
-#define MXLND_CONNECT_TIMEOUT (5 * HZ) /* timeout for connections (jiffies) */
+#define MXLND_COMM_TIMEOUT (20 * CFS_HZ) /* timeout for send/recv (jiffies) */
+#define MXLND_WAIT_TIMEOUT CFS_HZ /* timeout for wait (jiffies) */
+#define MXLND_CONNECT_TIMEOUT (5 * CFS_HZ) /* timeout for connections (jiffies) */
#define MXLND_POLLING 1000 /* poll iterations before blocking */
#define MXLND_LOOKUP_COUNT 5 /* how many times to try to resolve MAC */
#define MXLND_MAX_PEERS 1024 /* number of nodes talking to me */
#define MXLND_ALLOC(x, size) \
do { \
- spin_lock(&kmxlnd_data.kmx_mem_lock); \
+ cfs_spin_lock(&kmxlnd_data.kmx_mem_lock); \
kmxlnd_data.kmx_mem_used += size; \
- spin_unlock(&kmxlnd_data.kmx_mem_lock); \
+ cfs_spin_unlock(&kmxlnd_data.kmx_mem_lock); \
LIBCFS_ALLOC(x, size); \
if (unlikely(x == NULL)) { \
- spin_lock(&kmxlnd_data.kmx_mem_lock); \
+ cfs_spin_lock(&kmxlnd_data.kmx_mem_lock); \
kmxlnd_data.kmx_mem_used -= size; \
- spin_unlock(&kmxlnd_data.kmx_mem_lock); \
+ cfs_spin_unlock(&kmxlnd_data.kmx_mem_lock); \
} \
} while (0)
#define MXLND_FREE(x, size) \
do { \
- spin_lock(&kmxlnd_data.kmx_mem_lock); \
+ cfs_spin_lock(&kmxlnd_data.kmx_mem_lock); \
kmxlnd_data.kmx_mem_used -= size; \
- spin_unlock(&kmxlnd_data.kmx_mem_lock); \
+ cfs_spin_unlock(&kmxlnd_data.kmx_mem_lock); \
LIBCFS_FREE(x, size); \
} while (0)
typedef struct kmx_data
{
int kmx_init; /* initialization state */
- atomic_t kmx_shutdown; /* shutting down? */
- atomic_t kmx_nthreads; /* number of threads */
- struct completion *kmx_completions; /* array of completion structs */
+ cfs_atomic_t kmx_shutdown; /* shutting down? */
+ cfs_atomic_t kmx_nthreads; /* number of threads */
+ cfs_completion_t *kmx_completions; /* array of completion structs */
lnet_ni_t *kmx_ni; /* the LND instance */
u64 kmx_incarnation; /* my incarnation value */
long kmx_mem_used; /* memory used */
mx_endpoint_t kmx_endpt; /* the MX endpoint */
mx_endpoint_addr_t kmx_epa; /* the MX endpoint address */
- rwlock_t kmx_global_lock; /* global lock */
- spinlock_t kmx_mem_lock; /* memory accounting lock */
+ cfs_rwlock_t kmx_global_lock; /* global lock */
+ cfs_spinlock_t kmx_mem_lock; /* memory accounting lock */
- struct list_head kmx_conn_reqs; /* list of connection requests */
- spinlock_t kmx_conn_lock; /* connection list lock */
- struct semaphore kmx_conn_sem; /* semaphore for connection request list */
- struct list_head kmx_conn_zombies; /* list of zombie connections */
- struct list_head kmx_orphan_msgs; /* list of txs to cancel */
+ cfs_list_t kmx_conn_reqs; /* list of connection requests */
+ cfs_spinlock_t kmx_conn_lock; /* connection list lock */
+ cfs_semaphore_t kmx_conn_sem; /* semaphore for connection request list */
+ cfs_list_t kmx_conn_zombies; /* list of zombie connections */
+ cfs_list_t kmx_orphan_msgs; /* list of txs to cancel */
/* list of all known peers */
- struct list_head kmx_peers[MXLND_HASH_SIZE];
- atomic_t kmx_npeers; /* number of peers */
+ cfs_list_t kmx_peers[MXLND_HASH_SIZE];
+ cfs_atomic_t kmx_npeers; /* number of peers */
kmx_pages_t *kmx_tx_pages; /* tx msg pages */
struct kmx_ctx *kmx_txs; /* all tx descriptors */
- struct list_head kmx_tx_idle; /* list of idle tx */
- spinlock_t kmx_tx_idle_lock; /* lock for idle tx list */
+ cfs_list_t kmx_tx_idle; /* list of idle tx */
+ cfs_spinlock_t kmx_tx_idle_lock; /* lock for idle tx list */
s32 kmx_tx_used; /* txs in use */
u64 kmx_tx_next_cookie; /* unique id for tx */
- struct list_head kmx_tx_queue; /* generic send queue */
- spinlock_t kmx_tx_queue_lock; /* lock for generic sends */
- struct semaphore kmx_tx_queue_sem; /* semaphore for tx queue */
+ cfs_list_t kmx_tx_queue; /* generic send queue */
+ cfs_spinlock_t kmx_tx_queue_lock; /* lock for generic sends */
+ cfs_semaphore_t kmx_tx_queue_sem; /* semaphore for tx queue */
} kmx_data_t;
#define MXLND_INIT_NOTHING 0 /* in the beginning, there was nothing... */
control credits after completion */
unsigned long mxc_deadline; /* request time out in absolute jiffies */
enum kmx_req_state mxc_state; /* what is the state of the request? */
- struct list_head mxc_list; /* place on rx/tx idle list, tx q, peer tx */
- struct list_head mxc_rx_list; /* place on mxp_rx_posted list */
+ cfs_list_t mxc_list; /* place on rx/tx idle list, tx q, peer tx */
+ cfs_list_t mxc_rx_list; /* place on mxp_rx_posted list */
lnet_nid_t mxc_nid; /* dst's NID if peer is not known */
struct kmx_peer *mxc_peer; /* owning peer */
/* store all data from an unexpected CONN_[REQ|ACK] receive */
typedef struct kmx_connparams
{
- struct list_head mxr_list; /* list to hang on kmx_conn_reqs */
+ cfs_list_t mxr_list; /* list to hang on kmx_conn_reqs */
void *mxr_context; /* context - unused - will hold net */
mx_endpoint_addr_t mxr_epa; /* the peer's epa */
u64 mxr_match; /* the CONN_REQ's match bits */
typedef struct kmx_conn
{
struct kmx_peer *mxk_peer; /* owning peer */
- struct list_head mxk_list; /* for placing on mxp_conns */
- struct list_head mxk_zombie; /* for placing on zombies list */
+ cfs_list_t mxk_list; /* for placing on mxp_conns */
+ cfs_list_t mxk_zombie; /* for placing on zombies list */
u64 mxk_incarnation; /* connections's incarnation value */
u32 mxk_sid; /* peer's MX session id */
- atomic_t mxk_refcount; /* reference counting */
+ cfs_atomic_t mxk_refcount; /* reference counting */
int mxk_status; /* can we send messages? MXLND_CONN_* */
mx_endpoint_addr_t mxk_epa; /* peer's endpoint address */
- spinlock_t mxk_lock; /* lock */
+ cfs_spinlock_t mxk_lock; /* lock */
unsigned long mxk_timeout; /* expiration of oldest pending tx/rx */
unsigned long mxk_last_tx; /* when last tx completed with success */
unsigned long mxk_last_rx; /* when last rx completed */
kmx_pages_t *mxk_rx_pages; /* rx msg pages */
kmx_ctx_t *mxk_rxs; /* the rx descriptors */
- struct list_head mxk_rx_idle; /* list of idle rx */
+ cfs_list_t mxk_rx_idle; /* list of idle rx */
int mxk_credits; /* # of my credits for sending to peer */
int mxk_outstanding; /* # of credits to return */
- struct list_head mxk_tx_credit_queue; /* send queue for peer */
- struct list_head mxk_tx_free_queue; /* send queue for peer */
+ cfs_list_t mxk_tx_credit_queue; /* send queue for peer */
+ cfs_list_t mxk_tx_free_queue; /* send queue for peer */
int mxk_ntx_msgs; /* # of msgs on tx queues */
int mxk_ntx_data ; /* # of DATA on tx queues */
int mxk_ntx_posted; /* # of tx msgs in flight */
int mxk_data_posted; /* # of tx data payloads in flight */
- struct list_head mxk_pending; /* in flight rxs and txs */
+ cfs_list_t mxk_pending; /* in flight rxs and txs */
} kmx_conn_t;
/* peer state */
typedef struct kmx_peer
{
- struct list_head mxp_list; /* for placing on kmx_peers */
+ cfs_list_t mxp_list; /* for placing on kmx_peers */
lnet_nid_t mxp_nid; /* peer's LNET NID */
lnet_ni_t *mxp_ni; /* LNET interface */
- atomic_t mxp_refcount; /* reference counts */
+ cfs_atomic_t mxp_refcount; /* reference counts */
- struct list_head mxp_conns; /* list of connections */
+ cfs_list_t mxp_conns; /* list of connections */
kmx_conn_t *mxp_conn; /* current connection */
- struct list_head mxp_tx_queue; /* msgs waiting for a conn */
+ cfs_list_t mxp_tx_queue; /* msgs waiting for a conn */
u32 mxp_board; /* peer's board rank */
u32 mxp_ep_id; /* peer's MX endpoint ID */
#define mxlnd_peer_addref(peer) \
do { \
LASSERT(peer != NULL); \
- LASSERT(atomic_read(&(peer)->mxp_refcount) > 0); \
- atomic_inc(&(peer)->mxp_refcount); \
+ LASSERT(cfs_atomic_read(&(peer)->mxp_refcount) > 0); \
+ cfs_atomic_inc(&(peer)->mxp_refcount); \
} while (0)
#define mxlnd_peer_decref(peer) \
do { \
- LASSERT(atomic_read(&(peer)->mxp_refcount) > 0); \
- if (atomic_dec_and_test(&(peer)->mxp_refcount)) \
+ LASSERT(cfs_atomic_read(&(peer)->mxp_refcount) > 0); \
+ if (cfs_atomic_dec_and_test(&(peer)->mxp_refcount)) \
mxlnd_peer_free(peer); \
} while (0)
#define mxlnd_conn_addref(conn) \
do { \
LASSERT(conn != NULL); \
- LASSERT(atomic_read(&(conn)->mxk_refcount) > 0); \
- atomic_inc(&(conn)->mxk_refcount); \
+ LASSERT(cfs_atomic_read(&(conn)->mxk_refcount) > 0); \
+ cfs_atomic_inc(&(conn)->mxk_refcount); \
} while (0)
-#define mxlnd_conn_decref(conn) \
-do { \
- LASSERT(conn != NULL); \
- LASSERT(atomic_read(&(conn)->mxk_refcount) > 0); \
- if (atomic_dec_and_test(&(conn)->mxk_refcount)) { \
- spin_lock(&kmxlnd_data.kmx_conn_lock); \
+#define mxlnd_conn_decref(conn) \
+do { \
+ LASSERT(conn != NULL); \
+ LASSERT(cfs_atomic_read(&(conn)->mxk_refcount) > 0); \
+ if (cfs_atomic_dec_and_test(&(conn)->mxk_refcount)) { \
+ cfs_spin_lock(&kmxlnd_data.kmx_conn_lock); \
LASSERT((conn)->mxk_status == MXLND_CONN_DISCONNECT); \
CDEBUG(D_NET, "adding conn %p to zombies\n", (conn)); \
- list_add_tail(&(conn)->mxk_zombie, \
- &kmxlnd_data.kmx_conn_zombies); \
- spin_unlock(&kmxlnd_data.kmx_conn_lock); \
- up(&kmxlnd_data.kmx_conn_sem); \
- } \
+ cfs_list_add_tail(&(conn)->mxk_zombie, \
+ &kmxlnd_data.kmx_conn_zombies); \
+ cfs_spin_unlock(&kmxlnd_data.kmx_conn_lock); \
+ cfs_up(&kmxlnd_data.kmx_conn_sem); \
+ } \
} while (0)
#define mxlnd_valid_msg_type(type) \
kmx_ctx_t *
mxlnd_get_idle_rx(kmx_conn_t *conn)
{
- struct list_head *rxs = NULL;
+ cfs_list_t *rxs = NULL;
kmx_ctx_t *rx = NULL;
LASSERT(conn != NULL);
rxs = &conn->mxk_rx_idle;
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
- if (list_empty (rxs)) {
- spin_unlock(&conn->mxk_lock);
+ if (cfs_list_empty (rxs)) {
+ cfs_spin_unlock(&conn->mxk_lock);
return NULL;
}
- rx = list_entry (rxs->next, kmx_ctx_t, mxc_list);
- list_del_init(&rx->mxc_list);
- spin_unlock(&conn->mxk_lock);
+ rx = cfs_list_entry (rxs->next, kmx_ctx_t, mxc_list);
+ cfs_list_del_init(&rx->mxc_list);
+ cfs_spin_unlock(&conn->mxk_lock);
#if MXLND_DEBUG
if (rx->mxc_get != rx->mxc_put) {
CDEBUG(D_NETERROR, "*** incarnation= %lld ***\n", rx->mxc_incarnation);
CDEBUG(D_NETERROR, "*** deadline= %ld ***\n", rx->mxc_deadline);
CDEBUG(D_NETERROR, "*** state= %s ***\n", mxlnd_ctxstate_to_str(rx->mxc_state));
- CDEBUG(D_NETERROR, "*** listed?= %d ***\n", !list_empty(&rx->mxc_list));
+ CDEBUG(D_NETERROR, "*** listed?= %d ***\n", !cfs_list_empty(&rx->mxc_list));
CDEBUG(D_NETERROR, "*** nid= 0x%llx ***\n", rx->mxc_nid);
CDEBUG(D_NETERROR, "*** peer= 0x%p ***\n", rx->mxc_peer);
CDEBUG(D_NETERROR, "*** msg_type= %s ***\n", mxlnd_msgtype_to_str(rx->mxc_msg_type));
mxlnd_put_idle_rx(kmx_ctx_t *rx)
{
kmx_conn_t *conn = rx->mxc_conn;
- struct list_head *rxs = &conn->mxk_rx_idle;
+ cfs_list_t *rxs = &conn->mxk_rx_idle;
LASSERT(rx->mxc_type == MXLND_REQ_RX);
rx->mxc_put++;
LASSERT(rx->mxc_get == rx->mxc_put);
- spin_lock(&conn->mxk_lock);
- list_add(&rx->mxc_list, rxs);
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
+ cfs_list_add(&rx->mxc_list, rxs);
+ cfs_spin_unlock(&conn->mxk_lock);
return 0;
}
kmx_ctx_t *
mxlnd_get_idle_tx(void)
{
- struct list_head *tmp = &kmxlnd_data.kmx_tx_idle;
+ cfs_list_t *tmp = &kmxlnd_data.kmx_tx_idle;
kmx_ctx_t *tx = NULL;
- spin_lock(&kmxlnd_data.kmx_tx_idle_lock);
+ cfs_spin_lock(&kmxlnd_data.kmx_tx_idle_lock);
- if (list_empty (&kmxlnd_data.kmx_tx_idle)) {
+ if (cfs_list_empty (&kmxlnd_data.kmx_tx_idle)) {
CDEBUG(D_NETERROR, "%d txs in use\n", kmxlnd_data.kmx_tx_used);
- spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
+ cfs_spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
return NULL;
}
tmp = &kmxlnd_data.kmx_tx_idle;
- tx = list_entry (tmp->next, kmx_ctx_t, mxc_list);
- list_del_init(&tx->mxc_list);
+ tx = cfs_list_entry (tmp->next, kmx_ctx_t, mxc_list);
+ cfs_list_del_init(&tx->mxc_list);
/* Allocate a new completion cookie. It might not be needed,
* but we've got a lock right now and we're unlikely to
kmxlnd_data.kmx_tx_next_cookie = 1;
}
kmxlnd_data.kmx_tx_used++;
- spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
+ cfs_spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
LASSERT (tx->mxc_get == tx->mxc_put);
tx->mxc_put++;
LASSERT(tx->mxc_get == tx->mxc_put);
- spin_lock(&kmxlnd_data.kmx_tx_idle_lock);
- list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_idle);
+ cfs_spin_lock(&kmxlnd_data.kmx_tx_idle_lock);
+ cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_idle);
kmxlnd_data.kmx_tx_used--;
- spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
+ cfs_spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
if (lntmsg[0] != NULL) lnet_finalize(kmxlnd_data.kmx_ni, lntmsg[0], result);
if (lntmsg[1] != NULL) lnet_finalize(kmxlnd_data.kmx_ni, lntmsg[1], result);
void
mxlnd_connparams_free(kmx_connparams_t *cp)
{
- LASSERT(list_empty(&cp->mxr_list));
+ LASSERT(cfs_list_empty(&cp->mxr_list));
MXLND_FREE(cp, sizeof(*cp));
return;
}
MXLND_ALLOC(c, sizeof(*c));
if (!c) return -ENOMEM;
- INIT_LIST_HEAD(&c->mxr_list);
+ CFS_INIT_LIST_HEAD(&c->mxr_list);
c->mxr_context = context;
c->mxr_epa = epa;
c->mxr_match = match;
mxlnd_set_conn_status(kmx_conn_t *conn, int status)
{
conn->mxk_status = status;
- mb();
+ cfs_mb();
}
/**
kmx_peer_t *peer = conn->mxk_peer;
CDEBUG(D_NET, "freeing conn 0x%p *****\n", conn);
- LASSERT (list_empty (&conn->mxk_tx_credit_queue) &&
- list_empty (&conn->mxk_tx_free_queue) &&
- list_empty (&conn->mxk_pending));
- if (!list_empty(&conn->mxk_list)) {
- list_del_init(&conn->mxk_list);
+ LASSERT (cfs_list_empty (&conn->mxk_tx_credit_queue) &&
+ cfs_list_empty (&conn->mxk_tx_free_queue) &&
+ cfs_list_empty (&conn->mxk_pending));
+ if (!cfs_list_empty(&conn->mxk_list)) {
+ cfs_list_del_init(&conn->mxk_list);
if (peer->mxp_conn == conn) {
peer->mxp_conn = NULL;
if (valid) {
}
}
/* unlink from global list and drop its ref */
- list_del_init(&peer->mxp_list);
+ cfs_list_del_init(&peer->mxp_list);
mxlnd_peer_decref(peer);
}
}
do {
found = 0;
- spin_lock(&conn->mxk_lock);
- list_for_each_entry_safe(ctx, next, &conn->mxk_pending, mxc_list) {
- list_del_init(&ctx->mxc_list);
+ cfs_spin_lock(&conn->mxk_lock);
+ cfs_list_for_each_entry_safe(ctx, next, &conn->mxk_pending,
+ mxc_list) {
+ cfs_list_del_init(&ctx->mxc_list);
if (ctx->mxc_type == MXLND_REQ_RX) {
found = 1;
mxret = mx_cancel(kmxlnd_data.kmx_endpt,
if (result == 1) {
ctx->mxc_errno = -ECONNABORTED;
ctx->mxc_state = MXLND_CTX_CANCELED;
- spin_unlock(&conn->mxk_lock);
- spin_lock(&kmxlnd_data.kmx_conn_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
+ cfs_spin_lock(&kmxlnd_data.kmx_conn_lock);
/* we may be holding the global lock,
* move to orphan list so that it can free it */
- list_add_tail(&ctx->mxc_list,
- &kmxlnd_data.kmx_orphan_msgs);
+ cfs_list_add_tail(&ctx->mxc_list,
+ &kmxlnd_data.kmx_orphan_msgs);
count++;
- spin_unlock(&kmxlnd_data.kmx_conn_lock);
- spin_lock(&conn->mxk_lock);
+ cfs_spin_unlock(&kmxlnd_data.kmx_conn_lock);
+ cfs_spin_lock(&conn->mxk_lock);
}
break;
}
}
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
}
while (found);
mxlnd_cancel_queued_txs(kmx_conn_t *conn)
{
int count = 0;
- struct list_head *tmp = NULL;
+ cfs_list_t *tmp = NULL;
- spin_lock(&conn->mxk_lock);
- while (!list_empty(&conn->mxk_tx_free_queue) ||
- !list_empty(&conn->mxk_tx_credit_queue)) {
+ cfs_spin_lock(&conn->mxk_lock);
+ while (!cfs_list_empty(&conn->mxk_tx_free_queue) ||
+ !cfs_list_empty(&conn->mxk_tx_credit_queue)) {
kmx_ctx_t *tx = NULL;
- if (!list_empty(&conn->mxk_tx_free_queue)) {
+ if (!cfs_list_empty(&conn->mxk_tx_free_queue)) {
tmp = &conn->mxk_tx_free_queue;
} else {
tmp = &conn->mxk_tx_credit_queue;
}
- tx = list_entry(tmp->next, kmx_ctx_t, mxc_list);
- list_del_init(&tx->mxc_list);
- spin_unlock(&conn->mxk_lock);
+ tx = cfs_list_entry(tmp->next, kmx_ctx_t, mxc_list);
+ cfs_list_del_init(&tx->mxc_list);
+ cfs_spin_unlock(&conn->mxk_lock);
tx->mxc_errno = -ECONNABORTED;
tx->mxc_state = MXLND_CTX_CANCELED;
/* move to orphan list and then abort */
- spin_lock(&kmxlnd_data.kmx_conn_lock);
- list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_orphan_msgs);
- spin_unlock(&kmxlnd_data.kmx_conn_lock);
+ cfs_spin_lock(&kmxlnd_data.kmx_conn_lock);
+ cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_orphan_msgs);
+ cfs_spin_unlock(&kmxlnd_data.kmx_conn_lock);
count++;
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
}
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
return count;
}
int valid = !mxlnd_endpoint_addr_null(epa);
int count = 0;
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
if (conn->mxk_status == MXLND_CONN_DISCONNECT) {
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
return;
}
mxlnd_set_conn_status(conn, MXLND_CONN_DISCONNECT);
conn->mxk_timeout = 0;
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
count = mxlnd_cancel_queued_txs(conn);
count += mxlnd_conn_cancel_pending_rxs(conn);
if (count)
- up(&kmxlnd_data.kmx_conn_sem); /* let connd call kmxlnd_abort_msgs() */
+ cfs_up(&kmxlnd_data.kmx_conn_sem); /* let connd call kmxlnd_abort_msgs() */
if (send_bye && valid &&
conn->mxk_peer->mxp_nid != kmxlnd_data.kmx_ni->ni_nid) {
mxlnd_sleep(msecs_to_jiffies(20));
}
- if (atomic_read(&kmxlnd_data.kmx_shutdown) != 1) {
+ if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown) != 1) {
unsigned long last_msg = 0;
/* notify LNET that we are giving up on this peer */
- if (time_after(conn->mxk_last_rx, conn->mxk_last_tx))
+ if (cfs_time_after(conn->mxk_last_rx, conn->mxk_last_tx))
last_msg = conn->mxk_last_rx;
else
last_msg = conn->mxk_last_tx;
memset(conn->mxk_rxs, 0, MXLND_RX_MSGS() * sizeof(kmx_ctx_t));
conn->mxk_peer = peer;
- INIT_LIST_HEAD(&conn->mxk_list);
- INIT_LIST_HEAD(&conn->mxk_zombie);
- atomic_set(&conn->mxk_refcount, 2); /* ref for owning peer
+ CFS_INIT_LIST_HEAD(&conn->mxk_list);
+ CFS_INIT_LIST_HEAD(&conn->mxk_zombie);
+ cfs_atomic_set(&conn->mxk_refcount, 2); /* ref for owning peer
and one for the caller */
if (peer->mxp_nid == kmxlnd_data.kmx_ni->ni_nid) {
u64 nic_id = 0ULL;
mxlnd_set_conn_status(conn, MXLND_CONN_INIT);
/* mxk_epa - to be set after mx_iconnect() */
}
- spin_lock_init(&conn->mxk_lock);
+ cfs_spin_lock_init(&conn->mxk_lock);
/* conn->mxk_timeout = 0 */
/* conn->mxk_last_tx = 0 */
/* conn->mxk_last_rx = 0 */
- INIT_LIST_HEAD(&conn->mxk_rx_idle);
+ CFS_INIT_LIST_HEAD(&conn->mxk_rx_idle);
conn->mxk_credits = *kmxlnd_tunables.kmx_peercredits;
/* mxk_outstanding = 0 */
- INIT_LIST_HEAD(&conn->mxk_tx_credit_queue);
- INIT_LIST_HEAD(&conn->mxk_tx_free_queue);
+ CFS_INIT_LIST_HEAD(&conn->mxk_tx_credit_queue);
+ CFS_INIT_LIST_HEAD(&conn->mxk_tx_free_queue);
/* conn->mxk_ntx_msgs = 0 */
/* conn->mxk_ntx_data = 0 */
/* conn->mxk_ntx_posted = 0 */
/* conn->mxk_data_posted = 0 */
- INIT_LIST_HEAD(&conn->mxk_pending);
+ CFS_INIT_LIST_HEAD(&conn->mxk_pending);
for (i = 0; i < MXLND_RX_MSGS(); i++) {
rx = &conn->mxk_rxs[i];
rx->mxc_type = MXLND_REQ_RX;
- INIT_LIST_HEAD(&rx->mxc_list);
+ CFS_INIT_LIST_HEAD(&rx->mxc_list);
/* map mxc_msg to page */
page = pages->mxg_pages[ipage];
LASSERT (ipage <= MXLND_TX_MSG_PAGES());
}
- list_add_tail(&rx->mxc_list, &conn->mxk_rx_idle);
+ cfs_list_add_tail(&rx->mxc_list, &conn->mxk_rx_idle);
}
*connp = conn;
mxlnd_peer_addref(peer); /* add a ref for this conn */
/* add to front of peer's conns list */
- list_add(&conn->mxk_list, &peer->mxp_conns);
+ cfs_list_add(&conn->mxk_list, &peer->mxp_conns);
peer->mxp_conn = conn;
return 0;
}
mxlnd_conn_alloc(kmx_conn_t **connp, kmx_peer_t *peer)
{
int ret = 0;
- rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
+ cfs_rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
- write_lock(g_lock);
+ cfs_write_lock(g_lock);
ret = mxlnd_conn_alloc_locked(connp, peer);
- write_unlock(g_lock);
+ cfs_write_unlock(g_lock);
return ret;
}
ctx->mxc_state = MXLND_CTX_PENDING;
if (conn != NULL) {
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
if (conn->mxk_status >= MXLND_CONN_INIT) {
- list_add_tail(&ctx->mxc_list, &conn->mxk_pending);
+ cfs_list_add_tail(&ctx->mxc_list, &conn->mxk_pending);
if (conn->mxk_timeout == 0 || ctx->mxc_deadline < conn->mxk_timeout) {
conn->mxk_timeout = ctx->mxc_deadline;
}
ctx->mxc_state = MXLND_CTX_COMPLETED;
ret = -1;
}
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
}
return ret;
}
mxlnd_ctxstate_to_str(ctx->mxc_state));
}
ctx->mxc_state = MXLND_CTX_COMPLETED;
- if (!list_empty(&ctx->mxc_list)) {
+ if (!cfs_list_empty(&ctx->mxc_list)) {
kmx_conn_t *conn = ctx->mxc_conn;
kmx_ctx_t *next = NULL;
LASSERT(conn != NULL);
- spin_lock(&conn->mxk_lock);
- list_del_init(&ctx->mxc_list);
+ cfs_spin_lock(&conn->mxk_lock);
+ cfs_list_del_init(&ctx->mxc_list);
conn->mxk_timeout = 0;
- if (!list_empty(&conn->mxk_pending)) {
- next = list_entry(conn->mxk_pending.next, kmx_ctx_t, mxc_list);
+ if (!cfs_list_empty(&conn->mxk_pending)) {
+ next = cfs_list_entry(conn->mxk_pending.next,
+ kmx_ctx_t, mxc_list);
conn->mxk_timeout = next->mxc_deadline;
}
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
}
return 0;
}
{
CDEBUG(D_NET, "freeing peer 0x%p %s\n", peer, libcfs_nid2str(peer->mxp_nid));
- LASSERT (atomic_read(&peer->mxp_refcount) == 0);
+ LASSERT (cfs_atomic_read(&peer->mxp_refcount) == 0);
- if (!list_empty(&peer->mxp_list)) {
+ if (!cfs_list_empty(&peer->mxp_list)) {
/* assume we are locked */
- list_del_init(&peer->mxp_list);
+ cfs_list_del_init(&peer->mxp_list);
}
MXLND_FREE(peer, sizeof (*peer));
- atomic_dec(&kmxlnd_data.kmx_npeers);
+ cfs_atomic_dec(&kmxlnd_data.kmx_npeers);
return;
}
break;
} else if (ret == -EHOSTUNREACH && try < tries) {
/* add a little backoff */
- CDEBUG(D_NET, "sleeping for %d jiffies\n", HZ/4);
- mxlnd_sleep(HZ/4);
+ CDEBUG(D_NET, "sleeping for %d jiffies\n",
+ CFS_HZ/4);
+ mxlnd_sleep(CFS_HZ/4);
}
}
} while (try++ < tries);
MXLND_ALLOC(peer, sizeof (*peer));
if (peer == NULL) {
- CDEBUG(D_NETERROR, "Cannot allocate peer for NID 0x%llx\n", nid);
+ CDEBUG(D_NETERROR, "Cannot allocate peer for NID 0x%llx\n",
+ nid);
return -ENOMEM;
}
CDEBUG(D_NET, "allocated peer 0x%p for NID 0x%llx\n", peer, nid);
memset(peer, 0, sizeof(*peer));
- INIT_LIST_HEAD(&peer->mxp_list);
+ CFS_INIT_LIST_HEAD(&peer->mxp_list);
peer->mxp_nid = nid;
/* peer->mxp_ni unused - may be used for multi-rail */
- atomic_set(&peer->mxp_refcount, 1); /* ref for kmx_peers list */
+ cfs_atomic_set(&peer->mxp_refcount, 1); /* ref for kmx_peers list */
peer->mxp_board = board;
peer->mxp_ep_id = ep_id;
peer->mxp_nic_id = nic_id;
- INIT_LIST_HEAD(&peer->mxp_conns);
+ CFS_INIT_LIST_HEAD(&peer->mxp_conns);
ret = mxlnd_conn_alloc(&peer->mxp_conn, peer); /* adds 2nd conn ref here... */
if (ret != 0) {
mxlnd_peer_decref(peer);
return ret;
}
- INIT_LIST_HEAD(&peer->mxp_tx_queue);
+ CFS_INIT_LIST_HEAD(&peer->mxp_tx_queue);
if (peer->mxp_nic_id != 0ULL)
nic_id = peer->mxp_nic_id;
hash = mxlnd_nid_to_hash(nid);
- list_for_each_entry(peer, &kmxlnd_data.kmx_peers[hash], mxp_list) {
+ cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[hash], mxp_list) {
if (peer->mxp_nid == nid) {
found = 1;
mxlnd_peer_addref(peer);
int hash = 0;
kmx_peer_t *peer = NULL;
kmx_peer_t *old = NULL;
- rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
+ cfs_rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
- read_lock(g_lock);
+ cfs_read_lock(g_lock);
peer = mxlnd_find_peer_by_nid_locked(nid); /* adds peer ref */
if ((peer && peer->mxp_conn) || /* found peer with conn or */
(!peer && !create)) { /* did not find peer and do not create one */
- read_unlock(g_lock);
+ cfs_read_unlock(g_lock);
return peer;
}
- read_unlock(g_lock);
+ cfs_read_unlock(g_lock);
/* if peer but _not_ conn */
if (peer && !peer->mxp_conn) {
if (create) {
- write_lock(g_lock);
+ cfs_write_lock(g_lock);
if (!peer->mxp_conn) { /* check again */
/* create the conn */
ret = mxlnd_conn_alloc_locked(&peer->mxp_conn, peer);
mxlnd_conn_decref(peer->mxp_conn);
}
}
- write_unlock(g_lock);
+ cfs_write_unlock(g_lock);
}
return peer;
}
if (ret != 0) /* no memory, peer is NULL */
return NULL;
- write_lock(g_lock);
+ cfs_write_lock(g_lock);
/* look again */
old = mxlnd_find_peer_by_nid_locked(nid);
peer = old;
} else {
/* no other peer, use this one */
- list_add_tail(&peer->mxp_list, &kmxlnd_data.kmx_peers[hash]);
- atomic_inc(&kmxlnd_data.kmx_npeers);
+ cfs_list_add_tail(&peer->mxp_list,
+ &kmxlnd_data.kmx_peers[hash]);
+ cfs_atomic_inc(&kmxlnd_data.kmx_npeers);
mxlnd_peer_addref(peer);
mxlnd_conn_decref(peer->mxp_conn); /* drop ref from peer_alloc */
}
- write_unlock(g_lock);
+ cfs_write_unlock(g_lock);
return peer;
}
mx_decompose_endpoint_addr2(source, &nic_id, &ep_id, &sid);
mxlnd_parse_match(match_value, &msg_type, &error, &cookie);
- read_lock(&kmxlnd_data.kmx_global_lock);
+ cfs_read_lock(&kmxlnd_data.kmx_global_lock);
mx_get_endpoint_addr_context(source, (void **) &conn);
if (conn) {
mxlnd_conn_addref(conn); /* add ref for this function */
peer = conn->mxk_peer;
}
- read_unlock(&kmxlnd_data.kmx_global_lock);
+ cfs_read_unlock(&kmxlnd_data.kmx_global_lock);
if (msg_type == MXLND_MSG_BYE) {
if (conn) {
mxlnd_send_message(source, MXLND_MSG_CONN_ACK, ENOMEM, 0);
return MX_RECV_FINISHED;
}
- spin_lock(&kmxlnd_data.kmx_conn_lock);
- list_add_tail(&cp->mxr_list, &kmxlnd_data.kmx_conn_reqs);
- spin_unlock(&kmxlnd_data.kmx_conn_lock);
- up(&kmxlnd_data.kmx_conn_sem);
+ cfs_spin_lock(&kmxlnd_data.kmx_conn_lock);
+ cfs_list_add_tail(&cp->mxr_list, &kmxlnd_data.kmx_conn_reqs);
+ cfs_spin_unlock(&kmxlnd_data.kmx_conn_lock);
+ cfs_up(&kmxlnd_data.kmx_conn_sem);
return MX_RECV_FINISHED;
}
if (msg_type == MXLND_MSG_CONN_ACK) {
" from %llx:%d\n", nic_id, ep_id);
mxlnd_conn_disconnect(conn, 1, 1);
} else {
- spin_lock(&kmxlnd_data.kmx_conn_lock);
- list_add_tail(&cp->mxr_list, &kmxlnd_data.kmx_conn_reqs);
- spin_unlock(&kmxlnd_data.kmx_conn_lock);
- up(&kmxlnd_data.kmx_conn_sem);
+ cfs_spin_lock(&kmxlnd_data.kmx_conn_lock);
+ cfs_list_add_tail(&cp->mxr_list,
+ &kmxlnd_data.kmx_conn_reqs);
+ cfs_spin_unlock(&kmxlnd_data.kmx_conn_lock);
+ cfs_up(&kmxlnd_data.kmx_conn_sem);
}
}
mxlnd_conn_decref(conn); /* drop ref taken above */
int ret = -ENOENT;
kmx_peer_t *peer = NULL;
- read_lock(&kmxlnd_data.kmx_global_lock);
+ cfs_read_lock(&kmxlnd_data.kmx_global_lock);
for (i = 0; i < MXLND_HASH_SIZE; i++) {
- list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i], mxp_list) {
+ cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i],
+ mxp_list) {
if (index-- == 0) {
*nidp = peer->mxp_nid;
- *count = atomic_read(&peer->mxp_refcount);
+ *count = cfs_atomic_read(&peer->mxp_refcount);
ret = 0;
break;
}
}
}
- read_unlock(&kmxlnd_data.kmx_global_lock);
+ cfs_read_unlock(&kmxlnd_data.kmx_global_lock);
return ret;
}
if (peer->mxp_conn) {
mxlnd_conn_disconnect(peer->mxp_conn, 1, 1);
} else {
- list_del_init(&peer->mxp_list); /* remove from the global list */
+ cfs_list_del_init(&peer->mxp_list); /* remove from the global list */
mxlnd_peer_decref(peer); /* drop global list ref */
}
return;
if (nid != LNET_NID_ANY) {
peer = mxlnd_find_peer_by_nid(nid, 0); /* adds peer ref */
}
- write_lock(&kmxlnd_data.kmx_global_lock);
+ cfs_write_lock(&kmxlnd_data.kmx_global_lock);
if (nid != LNET_NID_ANY) {
if (peer == NULL) {
ret = -ENOENT;
}
} else { /* LNET_NID_ANY */
for (i = 0; i < MXLND_HASH_SIZE; i++) {
- list_for_each_entry_safe(peer, next,
- &kmxlnd_data.kmx_peers[i], mxp_list) {
+ cfs_list_for_each_entry_safe(peer, next,
+ &kmxlnd_data.kmx_peers[i],
+ mxp_list) {
mxlnd_del_peer_locked(peer);
}
}
}
- write_unlock(&kmxlnd_data.kmx_global_lock);
+ cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
return ret;
}
kmx_peer_t *peer = NULL;
kmx_conn_t *conn = NULL;
- read_lock(&kmxlnd_data.kmx_global_lock);
+ cfs_read_lock(&kmxlnd_data.kmx_global_lock);
for (i = 0; i < MXLND_HASH_SIZE; i++) {
- list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i], mxp_list) {
- list_for_each_entry(conn, &peer->mxp_conns, mxk_list) {
+ cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i],
+ mxp_list) {
+ cfs_list_for_each_entry(conn, &peer->mxp_conns,
+ mxk_list) {
if (index-- > 0) {
continue;
}
mxlnd_conn_addref(conn); /* add ref here, dec in ctl() */
- read_unlock(&kmxlnd_data.kmx_global_lock);
+ cfs_read_unlock(&kmxlnd_data.kmx_global_lock);
return conn;
}
}
}
- read_unlock(&kmxlnd_data.kmx_global_lock);
+ cfs_read_unlock(&kmxlnd_data.kmx_global_lock);
return NULL;
}
kmx_conn_t *conn = NULL;
kmx_conn_t *next = NULL;
- list_for_each_entry_safe(conn, next, &peer->mxp_conns, mxk_list)
+ cfs_list_for_each_entry_safe(conn, next, &peer->mxp_conns, mxk_list)
mxlnd_conn_disconnect(conn, 0, 1);
return;
int ret = 0;
kmx_peer_t *peer = NULL;
- write_lock(&kmxlnd_data.kmx_global_lock);
+ cfs_write_lock(&kmxlnd_data.kmx_global_lock);
if (nid != LNET_NID_ANY) {
peer = mxlnd_find_peer_by_nid_locked(nid); /* adds peer ref */
if (peer == NULL) {
}
} else { /* LNET_NID_ANY */
for (i = 0; i < MXLND_HASH_SIZE; i++) {
- list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i], mxp_list)
+ cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i], mxp_list)
mxlnd_close_matching_conns_locked(peer);
}
}
- write_unlock(&kmxlnd_data.kmx_global_lock);
+ cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
return ret;
}
msg_type != MXLND_MSG_GET_DATA) {
/* msg style tx */
if (mxlnd_tx_requires_credit(tx)) {
- list_add_tail(&tx->mxc_list, &conn->mxk_tx_credit_queue);
+ cfs_list_add_tail(&tx->mxc_list,
+ &conn->mxk_tx_credit_queue);
conn->mxk_ntx_msgs++;
} else if (msg_type == MXLND_MSG_CONN_REQ ||
msg_type == MXLND_MSG_CONN_ACK) {
/* put conn msgs at the front of the queue */
- list_add(&tx->mxc_list, &conn->mxk_tx_free_queue);
+ cfs_list_add(&tx->mxc_list, &conn->mxk_tx_free_queue);
} else {
/* PUT_ACK, PUT_NAK */
- list_add_tail(&tx->mxc_list, &conn->mxk_tx_free_queue);
+ cfs_list_add_tail(&tx->mxc_list,
+ &conn->mxk_tx_free_queue);
conn->mxk_ntx_msgs++;
}
} else {
/* data style tx */
- list_add_tail(&tx->mxc_list, &conn->mxk_tx_free_queue);
+ cfs_list_add_tail(&tx->mxc_list, &conn->mxk_tx_free_queue);
conn->mxk_ntx_data++;
}
{
LASSERT(tx->mxc_peer != NULL);
LASSERT(tx->mxc_conn != NULL);
- spin_lock(&tx->mxc_conn->mxk_lock);
+ cfs_spin_lock(&tx->mxc_conn->mxk_lock);
mxlnd_peer_queue_tx_locked(tx);
- spin_unlock(&tx->mxc_conn->mxk_lock);
+ cfs_spin_unlock(&tx->mxc_conn->mxk_lock);
return;
}
mxlnd_peer_queue_tx(tx);
mxlnd_check_sends(peer);
} else {
- spin_lock(&kmxlnd_data.kmx_tx_queue_lock);
- list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_queue);
- spin_unlock(&kmxlnd_data.kmx_tx_queue_lock);
- up(&kmxlnd_data.kmx_tx_queue_sem);
+ cfs_spin_lock(&kmxlnd_data.kmx_tx_queue_lock);
+ cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_queue);
+ cfs_spin_unlock(&kmxlnd_data.kmx_tx_queue_lock);
+ cfs_up(&kmxlnd_data.kmx_tx_queue_sem);
}
done:
return;
int nob = 0;
uint32_t length = 0;
kmx_peer_t *peer = NULL;
- rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
+ cfs_rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
payload_nob, payload_niov, libcfs_id2str(target));
if (unlikely(peer->mxp_incompatible)) {
mxlnd_peer_decref(peer); /* drop ref taken above */
} else {
- read_lock(g_lock);
+ cfs_read_lock(g_lock);
conn = peer->mxp_conn;
if (conn && conn->mxk_status != MXLND_CONN_DISCONNECT) {
mxlnd_conn_addref(conn);
} else {
conn = NULL;
}
- read_unlock(g_lock);
+ cfs_read_unlock(g_lock);
mxlnd_peer_decref(peer); /* drop peer ref taken above */
if (!conn)
return -ENOTCONN;
if (repost) {
/* we received a message, increment peer's outstanding credits */
if (credit == 1) {
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
conn->mxk_outstanding++;
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
}
/* we are done with the rx */
mxlnd_put_idle_rx(rx);
void
mxlnd_sleep(unsigned long timeout)
{
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(timeout);
+ cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+ cfs_schedule_timeout(timeout);
return;
}
int found = 0;
kmx_ctx_t *tx = NULL;
kmx_peer_t *peer = NULL;
- struct list_head *queue = &kmxlnd_data.kmx_tx_queue;
- spinlock_t *tx_q_lock = &kmxlnd_data.kmx_tx_queue_lock;
- rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
+ cfs_list_t *queue = &kmxlnd_data.kmx_tx_queue;
+ cfs_spinlock_t *tx_q_lock = &kmxlnd_data.kmx_tx_queue_lock;
+ cfs_rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
cfs_daemonize("mxlnd_tx_queued");
- while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) {
+ while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
ret = down_interruptible(&kmxlnd_data.kmx_tx_queue_sem);
- if (atomic_read(&kmxlnd_data.kmx_shutdown))
+ if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown))
break;
if (ret != 0) // Should we check for -EINTR?
continue;
- spin_lock(tx_q_lock);
- if (list_empty (&kmxlnd_data.kmx_tx_queue)) {
- spin_unlock(tx_q_lock);
+ cfs_spin_lock(tx_q_lock);
+ if (cfs_list_empty (&kmxlnd_data.kmx_tx_queue)) {
+ cfs_spin_unlock(tx_q_lock);
continue;
}
- tx = list_entry (queue->next, kmx_ctx_t, mxc_list);
- list_del_init(&tx->mxc_list);
- spin_unlock(tx_q_lock);
+ tx = cfs_list_entry (queue->next, kmx_ctx_t, mxc_list);
+ cfs_list_del_init(&tx->mxc_list);
+ cfs_spin_unlock(tx_q_lock);
found = 0;
peer = mxlnd_find_peer_by_nid(tx->mxc_nid, 0); /* adds peer ref */
if (peer != NULL) {
tx->mxc_peer = peer;
- write_lock(g_lock);
+ cfs_write_lock(g_lock);
if (peer->mxp_conn == NULL) {
ret = mxlnd_conn_alloc_locked(&peer->mxp_conn, peer);
if (ret != 0) {
/* out of memory, give up and fail tx */
tx->mxc_errno = -ENOMEM;
mxlnd_peer_decref(peer);
- write_unlock(g_lock);
+ cfs_write_unlock(g_lock);
mxlnd_put_idle_tx(tx);
continue;
}
tx->mxc_conn = peer->mxp_conn;
mxlnd_conn_addref(tx->mxc_conn); /* for this tx */
mxlnd_peer_decref(peer); /* drop peer ref taken above */
- write_unlock(g_lock);
+ cfs_write_unlock(g_lock);
mxlnd_queue_tx(tx);
found = 1;
}
/* add peer to global peer list, but look to see
* if someone already created it after we released
* the read lock */
- write_lock(g_lock);
+ cfs_write_lock(g_lock);
old = mxlnd_find_peer_by_nid_locked(peer->mxp_nid);
if (old) {
/* we have a peer ref on old */
}
if (found == 0) {
- list_add_tail(&peer->mxp_list, &kmxlnd_data.kmx_peers[hash]);
- atomic_inc(&kmxlnd_data.kmx_npeers);
+ cfs_list_add_tail(&peer->mxp_list,
+ &kmxlnd_data.kmx_peers[hash]);
+ cfs_atomic_inc(&kmxlnd_data.kmx_npeers);
} else {
tx->mxc_peer = old;
tx->mxc_conn = old->mxp_conn;
mxlnd_conn_decref(peer->mxp_conn); /* drop peer's ref */
mxlnd_peer_decref(peer);
}
- write_unlock(g_lock);
+ cfs_write_unlock(g_lock);
mxlnd_queue_tx(tx);
}
}
if (peer->mxp_nic_id == 0ULL && conn->mxk_status == MXLND_CONN_WAIT) {
/* not mapped yet, return */
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
mxlnd_set_conn_status(conn, MXLND_CONN_INIT);
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
}
}
- if (time_after(jiffies, peer->mxp_reconnect_time + MXLND_CONNECT_TIMEOUT) &&
+ if (cfs_time_after(jiffies,
+ peer->mxp_reconnect_time + MXLND_CONNECT_TIMEOUT) &&
conn->mxk_status != MXLND_CONN_DISCONNECT) {
/* give up and notify LNET */
CDEBUG(D_NET, "timeout trying to connect to %s\n",
peer->mxp_ep_id, MXLND_MSG_MAGIC, match,
(void *) peer, &request);
if (unlikely(mxret != MX_SUCCESS)) {
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
CDEBUG(D_NETERROR, "mx_iconnect() failed with %s (%d) to %s\n",
mx_strerror(mxret), mxret, libcfs_nid2str(peer->mxp_nid));
mxlnd_conn_decref(conn);
}
- mx_set_request_timeout(kmxlnd_data.kmx_endpt, request, MXLND_CONNECT_TIMEOUT/HZ*1000);
+ mx_set_request_timeout(kmxlnd_data.kmx_endpt, request,
+ MXLND_CONNECT_TIMEOUT/CFS_HZ*1000);
return;
}
LASSERT(peer != NULL);
return -1;
}
- write_lock(&kmxlnd_data.kmx_global_lock);
+ cfs_write_lock(&kmxlnd_data.kmx_global_lock);
conn = peer->mxp_conn;
/* NOTE take a ref for the duration of this function since it is called
* when there might not be any queued txs for this peer */
if (conn) {
if (conn->mxk_status == MXLND_CONN_DISCONNECT) {
- write_unlock(&kmxlnd_data.kmx_global_lock);
+ cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
return -1;
}
mxlnd_conn_addref(conn); /* for duration of this function */
}
- write_unlock(&kmxlnd_data.kmx_global_lock);
+ cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
/* do not add another ref for this tx */
}
#if MXLND_STATS
- if (time_after(jiffies, last)) {
- last = jiffies + HZ;
+ if (cfs_time_after(jiffies, last)) {
+ last = jiffies + CFS_HZ;
CDEBUG(D_NET, "status= %s credits= %d outstanding= %d ntx_msgs= %d "
"ntx_posted= %d ntx_data= %d data_posted= %d\n",
mxlnd_connstatus_to_str(conn->mxk_status), conn->mxk_credits,
}
#endif
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
ntx_posted = conn->mxk_ntx_posted;
credits = conn->mxk_credits;
/* check number of queued msgs, ignore data */
if (conn->mxk_outstanding >= MXLND_CREDIT_HIGHWATER()) {
/* check if any txs queued that could return credits... */
- if (list_empty(&conn->mxk_tx_credit_queue) || conn->mxk_ntx_msgs == 0) {
+ if (cfs_list_empty(&conn->mxk_tx_credit_queue) ||
+ conn->mxk_ntx_msgs == 0) {
/* if not, send a NOOP */
tx = mxlnd_get_idle_tx();
if (likely(tx != NULL)) {
conn->mxk_status == MXLND_CONN_FAIL)) {
CDEBUG(D_NET, "status=%s\n", mxlnd_connstatus_to_str(conn->mxk_status));
mxlnd_set_conn_status(conn, MXLND_CONN_WAIT);
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
mxlnd_iconnect(peer, (u8) MXLND_MSG_ICON_REQ);
goto done;
}
- while (!list_empty(&conn->mxk_tx_free_queue) ||
- !list_empty(&conn->mxk_tx_credit_queue)) {
+ while (!cfs_list_empty(&conn->mxk_tx_free_queue) ||
+ !cfs_list_empty(&conn->mxk_tx_credit_queue)) {
/* We have something to send. If we have a queued tx that does not
* require a credit (free), choose it since its completion will
* return a credit (here or at the peer), complete a DATA or
* CONN_REQ or CONN_ACK. */
- struct list_head *tmp_tx = NULL;
- if (!list_empty(&conn->mxk_tx_free_queue)) {
+ cfs_list_t *tmp_tx = NULL;
+ if (!cfs_list_empty(&conn->mxk_tx_free_queue)) {
tmp_tx = &conn->mxk_tx_free_queue;
} else {
tmp_tx = &conn->mxk_tx_credit_queue;
}
- tx = list_entry(tmp_tx->next, kmx_ctx_t, mxc_list);
+ tx = cfs_list_entry(tmp_tx->next, kmx_ctx_t, mxc_list);
msg_type = tx->mxc_msg_type;
tx->mxc_cookie,
mxlnd_msgtype_to_str(tx->mxc_msg_type));
if (conn->mxk_status == MXLND_CONN_DISCONNECT ||
- time_after_eq(jiffies, tx->mxc_deadline)) {
- list_del_init(&tx->mxc_list);
+ cfs_time_aftereq(jiffies, tx->mxc_deadline)) {
+ cfs_list_del_init(&tx->mxc_list);
tx->mxc_errno = -ECONNABORTED;
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
mxlnd_put_idle_tx(tx);
mxlnd_conn_decref(conn);
goto done;
}
}
- list_del_init(&tx->mxc_list);
+ cfs_list_del_init(&tx->mxc_list);
/* handle credits, etc now while we have the lock to avoid races */
if (credit) {
(conn->mxk_ntx_msgs >= 1)) {
conn->mxk_credits++;
conn->mxk_ntx_posted--;
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
/* redundant NOOP */
mxlnd_put_idle_tx(tx);
mxlnd_conn_decref(conn);
mxret = MX_SUCCESS;
status = conn->mxk_status;
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
if (likely((status == MXLND_CONN_READY) ||
(msg_type == MXLND_MSG_CONN_REQ) ||
&tx->mxc_mxreq);
} else {
/* send a DATA tx */
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
conn->mxk_ntx_data--;
conn->mxk_data_posted++;
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
CDEBUG(D_NET, "sending %s 0x%llx\n",
mxlnd_msgtype_to_str(msg_type),
tx->mxc_cookie);
tx->mxc_errno = -ECONNABORTED;
}
if (credit) {
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
conn->mxk_ntx_posted--;
conn->mxk_credits++;
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
} else if (msg_type == MXLND_MSG_PUT_DATA ||
msg_type == MXLND_MSG_GET_DATA) {
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
conn->mxk_data_posted--;
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
}
if (msg_type != MXLND_MSG_PUT_DATA &&
msg_type != MXLND_MSG_GET_DATA &&
msg_type != MXLND_MSG_CONN_REQ &&
msg_type != MXLND_MSG_CONN_ACK) {
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
conn->mxk_outstanding += tx->mxc_msg->mxm_credits;
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
}
if (msg_type != MXLND_MSG_CONN_REQ &&
msg_type != MXLND_MSG_CONN_ACK) {
mxlnd_conn_decref(conn);
}
}
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
}
done_locked:
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
done:
mxlnd_conn_decref(conn); /* drop ref taken at start of function */
return found;
if (failed) {
if (tx->mxc_errno == 0) tx->mxc_errno = -EIO;
} else {
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
conn->mxk_last_tx = cfs_time_current(); /* jiffies */
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
}
switch (type) {
case MXLND_MSG_GET_DATA:
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
if (conn->mxk_incarnation == tx->mxc_incarnation) {
conn->mxk_outstanding++;
conn->mxk_data_posted--;
}
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
break;
case MXLND_MSG_PUT_DATA:
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
if (conn->mxk_incarnation == tx->mxc_incarnation) {
conn->mxk_data_posted--;
}
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
break;
case MXLND_MSG_NOOP:
mx_strstatus(code), code, tx->mxc_errno,
libcfs_nid2str(tx->mxc_nid));
if (!peer->mxp_incompatible) {
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
if (code == MX_STATUS_BAD_SESSION)
mxlnd_set_conn_status(conn, MXLND_CONN_INIT);
else
mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
}
}
break;
}
if (credit) {
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
if (conn->mxk_incarnation == tx->mxc_incarnation) {
conn->mxk_ntx_posted--;
}
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
}
mxlnd_put_idle_tx(tx);
} /* else peer and conn == NULL */
if (conn == NULL && peer != NULL) {
- write_lock(&kmxlnd_data.kmx_global_lock);
+ cfs_write_lock(&kmxlnd_data.kmx_global_lock);
conn = peer->mxp_conn;
if (conn) {
mxlnd_conn_addref(conn); /* conn takes ref... */
conn_ref = 1;
peer_ref = 0;
}
- write_unlock(&kmxlnd_data.kmx_global_lock);
+ cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
rx->mxc_conn = conn;
}
LASSERT(peer != NULL && conn != NULL);
if (msg->mxm_credits != 0) {
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
if (msg->mxm_srcstamp == conn->mxk_incarnation) {
if ((conn->mxk_credits + msg->mxm_credits) >
*kmxlnd_tunables.kmx_peercredits) {
LASSERT(conn->mxk_credits >= 0);
LASSERT(conn->mxk_credits <= *kmxlnd_tunables.kmx_peercredits);
}
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
}
CDEBUG(D_NET, "switch %s for rx (0x%llx)\n", mxlnd_msgtype_to_str(type), seq);
if (ret < 0) {
CDEBUG(D_NET, "setting PEER_CONN_FAILED\n");
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
}
cleanup:
if (conn != NULL) {
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
conn->mxk_last_rx = cfs_time_current(); /* jiffies */
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
}
if (repost) {
type == MXLND_MSG_EAGER ||
type == MXLND_MSG_PUT_REQ ||
type == MXLND_MSG_NOOP) {
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
conn->mxk_outstanding++;
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
}
}
if (conn_ref) mxlnd_conn_decref(conn);
peer->mxp_nid,
peer->mxp_nic_id,
peer->mxp_ep_id);
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
- if (time_after(jiffies, peer->mxp_reconnect_time + MXLND_CONNECT_TIMEOUT)) {
+ if (cfs_time_after(jiffies, peer->mxp_reconnect_time +
+ MXLND_CONNECT_TIMEOUT)) {
CDEBUG(D_NETERROR, "timeout, calling conn_disconnect()\n");
mxlnd_conn_disconnect(conn, 0, send_bye);
}
return;
}
mx_decompose_endpoint_addr2(status.source, &nic_id, &ep_id, &sid);
- write_lock(&kmxlnd_data.kmx_global_lock);
- spin_lock(&conn->mxk_lock);
+ cfs_write_lock(&kmxlnd_data.kmx_global_lock);
+ cfs_spin_lock(&conn->mxk_lock);
conn->mxk_epa = status.source;
mx_set_endpoint_addr_context(conn->mxk_epa, (void *) conn);
if (msg_type == MXLND_MSG_ICON_ACK && likely(!peer->mxp_incompatible)) {
mxlnd_set_conn_status(conn, MXLND_CONN_READY);
}
- spin_unlock(&conn->mxk_lock);
- write_unlock(&kmxlnd_data.kmx_global_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
+ cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
/* mx_iconnect() succeeded, reset delay to 0 */
- write_lock(&kmxlnd_data.kmx_global_lock);
+ cfs_write_lock(&kmxlnd_data.kmx_global_lock);
peer->mxp_reconnect_time = 0;
peer->mxp_conn->mxk_sid = sid;
- write_unlock(&kmxlnd_data.kmx_global_lock);
+ cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
/* marshal CONN_REQ or CONN_ACK msg */
/* we are still using the conn ref from iconnect() - do not take another */
CDEBUG(D_NETERROR, "Can't obtain %s tx for %s\n",
mxlnd_msgtype_to_str(type),
libcfs_nid2str(peer->mxp_nid));
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
mxlnd_conn_decref(conn);
return;
}
CDEBUG(D_NET, "%s starting\n", name);
- while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) {
+ while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
u8 msg_type = 0;
mxret = MX_SUCCESS;
mxret = mx_wait_any(kmxlnd_data.kmx_endpt, MXLND_WAIT_TIMEOUT,
0ULL, 0ULL, &status, &result);
#endif
- if (unlikely(atomic_read(&kmxlnd_data.kmx_shutdown)))
+ if (unlikely(cfs_atomic_read(&kmxlnd_data.kmx_shutdown)))
break;
if (result != 1) {
unsigned long next = 0; /* jiffies */
kmx_peer_t *peer = NULL;
kmx_conn_t *conn = NULL;
- rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
+ cfs_rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
- read_lock(g_lock);
+ cfs_read_lock(g_lock);
for (i = 0; i < MXLND_HASH_SIZE; i++) {
- list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i], mxp_list) {
+ cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i],
+ mxp_list) {
- if (unlikely(atomic_read(&kmxlnd_data.kmx_shutdown))) {
- read_unlock(g_lock);
+ if (unlikely(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
+ cfs_read_unlock(g_lock);
return next;
}
continue;
}
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
/* if nothing pending (timeout == 0) or
* if conn is already disconnected,
* skip this conn */
if (conn->mxk_timeout == 0 ||
conn->mxk_status == MXLND_CONN_DISCONNECT) {
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
mxlnd_conn_decref(conn);
continue;
}
* if it is in the future, we will sleep until then.
* if it is in the past, then we will sleep one
* second and repeat the process. */
- if ((next == 0) || (time_before(conn->mxk_timeout, next))) {
+ if ((next == 0) ||
+ (cfs_time_before(conn->mxk_timeout, next))) {
next = conn->mxk_timeout;
}
disconnect = 0;
- if (time_after_eq(now, conn->mxk_timeout)) {
+ if (cfs_time_aftereq(now, conn->mxk_timeout)) {
disconnect = 1;
}
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
if (disconnect) {
mxlnd_conn_disconnect(conn, 1, 1);
mxlnd_conn_decref(conn);
}
}
- read_unlock(g_lock);
+ cfs_read_unlock(g_lock);
if (next == 0) next = now + MXLND_COMM_TIMEOUT;
return next;
kmx_msg_t *msg = &cp->mxr_msg;
kmx_peer_t *peer = cp->mxr_peer;
kmx_conn_t *conn = NULL;
- rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
+ cfs_rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
mx_decompose_endpoint_addr2(cp->mxr_epa, &nic_id, &ep_id, &sid);
}
peer->mxp_conn->mxk_sid = sid;
LASSERT(peer->mxp_ep_id == ep_id);
- write_lock(g_lock);
+ cfs_write_lock(g_lock);
existing_peer = mxlnd_find_peer_by_nid_locked(msg->mxm_srcnid);
if (existing_peer) {
mxlnd_conn_decref(peer->mxp_conn);
mxlnd_conn_addref(peer->mxp_conn);
conn = peer->mxp_conn;
} else {
- list_add_tail(&peer->mxp_list,
- &kmxlnd_data.kmx_peers[hash]);
- atomic_inc(&kmxlnd_data.kmx_npeers);
+ cfs_list_add_tail(&peer->mxp_list,
+ &kmxlnd_data.kmx_peers[hash]);
+ cfs_atomic_inc(&kmxlnd_data.kmx_npeers);
}
- write_unlock(g_lock);
+ cfs_write_unlock(g_lock);
} else {
ret = mxlnd_conn_alloc(&conn, peer); /* adds 2nd ref */
- write_lock(g_lock);
+ cfs_write_lock(g_lock);
mxlnd_peer_decref(peer); /* drop ref taken above */
- write_unlock(g_lock);
+ cfs_write_unlock(g_lock);
if (ret != 0) {
CDEBUG(D_NETERROR, "Cannot allocate mxp_conn\n");
goto cleanup;
conn = peer->mxp_conn;
}
}
- write_lock(g_lock);
+ cfs_write_lock(g_lock);
peer->mxp_incompatible = incompatible;
- write_unlock(g_lock);
- spin_lock(&conn->mxk_lock);
+ cfs_write_unlock(g_lock);
+ cfs_spin_lock(&conn->mxk_lock);
conn->mxk_incarnation = msg->mxm_srcstamp;
mxlnd_set_conn_status(conn, MXLND_CONN_WAIT);
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
/* handle_conn_ack() will create the CONN_ACK msg */
mxlnd_iconnect(peer, (u8) MXLND_MSG_ICON_ACK);
ret = -1;
goto failed;
}
- write_lock(&kmxlnd_data.kmx_global_lock);
+ cfs_write_lock(&kmxlnd_data.kmx_global_lock);
peer->mxp_incompatible = incompatible;
- write_unlock(&kmxlnd_data.kmx_global_lock);
- spin_lock(&conn->mxk_lock);
+ cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
+ cfs_spin_lock(&conn->mxk_lock);
conn->mxk_credits = *kmxlnd_tunables.kmx_peercredits;
conn->mxk_outstanding = 0;
conn->mxk_incarnation = msg->mxm_srcstamp;
libcfs_nid2str(msg->mxm_srcnid));
mxlnd_set_conn_status(conn, MXLND_CONN_READY);
}
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
if (!incompatible)
mxlnd_check_sends(peer);
failed:
if (ret < 0) {
- spin_lock(&conn->mxk_lock);
+ cfs_spin_lock(&conn->mxk_lock);
mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
- spin_unlock(&conn->mxk_lock);
+ cfs_spin_unlock(&conn->mxk_lock);
}
if (incompatible) mxlnd_conn_disconnect(conn, 0, 0);
mxlnd_abort_msgs(void)
{
int count = 0;
- struct list_head *orphans = &kmxlnd_data.kmx_orphan_msgs;
- spinlock_t *g_conn_lock = &kmxlnd_data.kmx_conn_lock;
+ cfs_list_t *orphans = &kmxlnd_data.kmx_orphan_msgs;
+ cfs_spinlock_t *g_conn_lock = &kmxlnd_data.kmx_conn_lock;
/* abort orphans */
- spin_lock(g_conn_lock);
- while (!list_empty(orphans)) {
+ cfs_spin_lock(g_conn_lock);
+ while (!cfs_list_empty(orphans)) {
kmx_ctx_t *ctx = NULL;
kmx_conn_t *conn = NULL;
- ctx = list_entry(orphans->next, kmx_ctx_t, mxc_list);
- list_del_init(&ctx->mxc_list);
- spin_unlock(g_conn_lock);
+ ctx = cfs_list_entry(orphans->next, kmx_ctx_t, mxc_list);
+ cfs_list_del_init(&ctx->mxc_list);
+ cfs_spin_unlock(g_conn_lock);
ctx->mxc_errno = -ECONNABORTED;
conn = ctx->mxc_conn;
}
count++;
- spin_lock(g_conn_lock);
+ cfs_spin_lock(g_conn_lock);
}
- spin_unlock(g_conn_lock);
+ cfs_spin_unlock(g_conn_lock);
return count;
}
mxlnd_free_conn_zombies(void)
{
int count = 0;
- struct list_head *zombies = &kmxlnd_data.kmx_conn_zombies;
- spinlock_t *g_conn_lock = &kmxlnd_data.kmx_conn_lock;
- rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
+ cfs_list_t *zombies = &kmxlnd_data.kmx_conn_zombies;
+ cfs_spinlock_t *g_conn_lock = &kmxlnd_data.kmx_conn_lock;
+ cfs_rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
/* cleanup any zombies */
- spin_lock(g_conn_lock);
- while (!list_empty(zombies)) {
+ cfs_spin_lock(g_conn_lock);
+ while (!cfs_list_empty(zombies)) {
kmx_conn_t *conn = NULL;
- conn = list_entry(zombies->next, kmx_conn_t, mxk_zombie);
- list_del_init(&conn->mxk_zombie);
- spin_unlock(g_conn_lock);
+ conn = cfs_list_entry(zombies->next, kmx_conn_t, mxk_zombie);
+ cfs_list_del_init(&conn->mxk_zombie);
+ cfs_spin_unlock(g_conn_lock);
- write_lock(g_lock);
+ cfs_write_lock(g_lock);
mxlnd_conn_free_locked(conn);
- write_unlock(g_lock);
+ cfs_write_unlock(g_lock);
count++;
- spin_lock(g_conn_lock);
+ cfs_spin_lock(g_conn_lock);
}
- spin_unlock(g_conn_lock);
+ cfs_spin_unlock(g_conn_lock);
CDEBUG(D_NET, "%s: freed %d zombies\n", __func__, count);
return count;
}
CDEBUG(D_NET, "connd starting\n");
- while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) {
- int ret = 0;
- kmx_connparams_t *cp = NULL;
- spinlock_t *g_conn_lock = &kmxlnd_data.kmx_conn_lock;
- struct list_head *conn_reqs = &kmxlnd_data.kmx_conn_reqs;
+ while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
+ int ret = 0;
+ kmx_connparams_t *cp = NULL;
+ cfs_spinlock_t *g_conn_lock = &kmxlnd_data.kmx_conn_lock;
+ cfs_list_t *conn_reqs = &kmxlnd_data.kmx_conn_reqs;
ret = down_interruptible(&kmxlnd_data.kmx_conn_sem);
- if (atomic_read(&kmxlnd_data.kmx_shutdown))
+ if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown))
break;
if (ret != 0)
ret = mxlnd_abort_msgs();
ret += mxlnd_free_conn_zombies();
- spin_lock(g_conn_lock);
- if (list_empty(conn_reqs)) {
+ cfs_spin_lock(g_conn_lock);
+ if (cfs_list_empty(conn_reqs)) {
if (ret == 0)
CDEBUG(D_NETERROR, "connd woke up but did not "
"find a kmx_connparams_t or zombie conn\n");
- spin_unlock(g_conn_lock);
+ cfs_spin_unlock(g_conn_lock);
continue;
}
- cp = list_entry(conn_reqs->next, kmx_connparams_t, mxr_list);
- list_del_init(&cp->mxr_list);
- spin_unlock(g_conn_lock);
+ cp = cfs_list_entry(conn_reqs->next, kmx_connparams_t,
+ mxr_list);
+ cfs_list_del_init(&cp->mxr_list);
+ cfs_spin_unlock(g_conn_lock);
switch (MXLND_MSG_TYPE(cp->mxr_match)) {
case MXLND_MSG_CONN_REQ:
long id = (long) arg;
unsigned long now = 0;
unsigned long next = 0;
- unsigned long delay = HZ;
+ unsigned long delay = CFS_HZ;
kmx_peer_t *peer = NULL;
kmx_peer_t *temp = NULL;
kmx_conn_t *conn = NULL;
- rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
+ cfs_rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
cfs_daemonize("mxlnd_timeoutd");
CDEBUG(D_NET, "timeoutd starting\n");
- while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) {
+ while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
now = jiffies;
/* if the next timeout has not arrived, go back to sleep */
- if (time_after(now, next)) {
+ if (cfs_time_after(now, next)) {
next = mxlnd_check_timeouts(now);
}
/* try to progress peers' txs */
- write_lock(g_lock);
+ cfs_write_lock(g_lock);
for (i = 0; i < MXLND_HASH_SIZE; i++) {
- struct list_head *peers = &kmxlnd_data.kmx_peers[i];
+ cfs_list_t *peers = &kmxlnd_data.kmx_peers[i];
/* NOTE we are safe against the removal of peer, but
* not against the removal of temp */
- list_for_each_entry_safe(peer, temp, peers, mxp_list) {
- if (atomic_read(&kmxlnd_data.kmx_shutdown))
+ cfs_list_for_each_entry_safe(peer, temp, peers,
+ mxp_list) {
+ if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown))
break;
mxlnd_peer_addref(peer); /* add ref... */
conn = peer->mxp_conn;
if ((conn->mxk_status == MXLND_CONN_READY ||
conn->mxk_status == MXLND_CONN_FAIL) &&
- time_after(now, conn->mxk_last_tx + HZ)) {
- write_unlock(g_lock);
+ cfs_time_after(now,
+ conn->mxk_last_tx +
+ CFS_HZ)) {
+ cfs_write_unlock(g_lock);
mxlnd_check_sends(peer);
- write_lock(g_lock);
+ cfs_write_lock(g_lock);
}
mxlnd_conn_decref(conn); /* until here */
mxlnd_peer_decref(peer); /* ...to here */
}
}
- write_unlock(g_lock);
+ cfs_write_unlock(g_lock);
mxlnd_sleep(delay);
}
peer->ibp_nid = nid;
peer->ibp_error = 0;
peer->ibp_last_alive = 0;
- atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
+ cfs_atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
- INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
- INIT_LIST_HEAD(&peer->ibp_conns);
- INIT_LIST_HEAD(&peer->ibp_tx_queue);
+ CFS_INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
+ CFS_INIT_LIST_HEAD(&peer->ibp_conns);
+ CFS_INIT_LIST_HEAD(&peer->ibp_tx_queue);
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
/* always called with a ref on ni, which prevents ni being shutdown */
LASSERT (net->ibn_shutdown == 0);
/* npeers only grows with the global lock held */
- atomic_inc(&net->ibn_npeers);
+ cfs_atomic_inc(&net->ibn_npeers);
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
*peerp = peer;
return 0;
kib_net_t *net = peer->ibp_ni->ni_data;
LASSERT (net != NULL);
- LASSERT (atomic_read(&peer->ibp_refcount) == 0);
+ LASSERT (cfs_atomic_read(&peer->ibp_refcount) == 0);
LASSERT (!kiblnd_peer_active(peer));
LASSERT (peer->ibp_connecting == 0);
LASSERT (peer->ibp_accepting == 0);
- LASSERT (list_empty(&peer->ibp_conns));
- LASSERT (list_empty(&peer->ibp_tx_queue));
+ LASSERT (cfs_list_empty(&peer->ibp_conns));
+ LASSERT (cfs_list_empty(&peer->ibp_tx_queue));
LIBCFS_FREE(peer, sizeof(*peer));
* they are destroyed, so we can be assured that _all_ state to do
* with this peer has been cleaned up when its refcount drops to
* zero. */
- atomic_dec(&net->ibn_npeers);
+ cfs_atomic_dec(&net->ibn_npeers);
}
kib_peer_t *
{
/* the caller is responsible for accounting the additional reference
* that this creates */
- struct list_head *peer_list = kiblnd_nid2peerlist(nid);
- struct list_head *tmp;
+ cfs_list_t *peer_list = kiblnd_nid2peerlist(nid);
+ cfs_list_t *tmp;
kib_peer_t *peer;
- list_for_each (tmp, peer_list) {
+ cfs_list_for_each (tmp, peer_list) {
- peer = list_entry(tmp, kib_peer_t, ibp_list);
+ peer = cfs_list_entry(tmp, kib_peer_t, ibp_list);
LASSERT (peer->ibp_connecting > 0 || /* creating conns */
peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns)); /* active conn */
+ !cfs_list_empty(&peer->ibp_conns)); /* active conn */
if (peer->ibp_nid != nid)
continue;
CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n",
peer, libcfs_nid2str(nid),
- atomic_read(&peer->ibp_refcount),
+ cfs_atomic_read(&peer->ibp_refcount),
peer->ibp_version);
return peer;
}
void
kiblnd_unlink_peer_locked (kib_peer_t *peer)
{
- LASSERT (list_empty(&peer->ibp_conns));
+ LASSERT (cfs_list_empty(&peer->ibp_conns));
LASSERT (kiblnd_peer_active(peer));
- list_del_init(&peer->ibp_list);
+ cfs_list_del_init(&peer->ibp_list);
/* lose peerlist's ref */
kiblnd_peer_decref(peer);
}
kiblnd_get_peer_info (lnet_ni_t *ni, int index,
lnet_nid_t *nidp, int *count)
{
- kib_peer_t *peer;
- struct list_head *ptmp;
- int i;
- unsigned long flags;
+ kib_peer_t *peer;
+ cfs_list_t *ptmp;
+ int i;
+ unsigned long flags;
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
- list_for_each (ptmp, &kiblnd_data.kib_peers[i]) {
+ cfs_list_for_each (ptmp, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ peer = cfs_list_entry(ptmp, kib_peer_t, ibp_list);
LASSERT (peer->ibp_connecting > 0 ||
peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns));
+ !cfs_list_empty(&peer->ibp_conns));
if (peer->ibp_ni != ni)
continue;
continue;
*nidp = peer->ibp_nid;
- *count = atomic_read(&peer->ibp_refcount);
+ *count = cfs_atomic_read(&peer->ibp_refcount);
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
- flags);
+ cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+ flags);
return 0;
}
}
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
return -ENOENT;
}
void
kiblnd_del_peer_locked (kib_peer_t *peer)
{
- struct list_head *ctmp;
- struct list_head *cnxt;
- kib_conn_t *conn;
+ cfs_list_t *ctmp;
+ cfs_list_t *cnxt;
+ kib_conn_t *conn;
- if (list_empty(&peer->ibp_conns)) {
+ if (cfs_list_empty(&peer->ibp_conns)) {
kiblnd_unlink_peer_locked(peer);
} else {
- list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
+ cfs_list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
+ conn = cfs_list_entry(ctmp, kib_conn_t, ibc_list);
kiblnd_close_conn_locked(conn, 0);
}
int
kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid)
{
- CFS_LIST_HEAD (zombies);
- struct list_head *ptmp;
- struct list_head *pnxt;
- kib_peer_t *peer;
- int lo;
- int hi;
- int i;
- unsigned long flags;
- int rc = -ENOENT;
+ CFS_LIST_HEAD (zombies);
+ cfs_list_t *ptmp;
+ cfs_list_t *pnxt;
+ kib_peer_t *peer;
+ int lo;
+ int hi;
+ int i;
+ unsigned long flags;
+ int rc = -ENOENT;
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
if (nid != LNET_NID_ANY) {
lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
}
for (i = lo; i <= hi; i++) {
- list_for_each_safe (ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ cfs_list_for_each_safe (ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
+ peer = cfs_list_entry(ptmp, kib_peer_t, ibp_list);
LASSERT (peer->ibp_connecting > 0 ||
peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns));
+ !cfs_list_empty(&peer->ibp_conns));
if (peer->ibp_ni != ni)
continue;
if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid))
continue;
- if (!list_empty(&peer->ibp_tx_queue)) {
- LASSERT (list_empty(&peer->ibp_conns));
+ if (!cfs_list_empty(&peer->ibp_tx_queue)) {
+ LASSERT (cfs_list_empty(&peer->ibp_conns));
- list_splice_init(&peer->ibp_tx_queue, &zombies);
+ cfs_list_splice_init(&peer->ibp_tx_queue,
+ &zombies);
}
kiblnd_del_peer_locked(peer);
}
}
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
kiblnd_txlist_done(ni, &zombies, -EIO);
kib_conn_t *
kiblnd_get_conn_by_idx (lnet_ni_t *ni, int index)
{
- kib_peer_t *peer;
- struct list_head *ptmp;
- kib_conn_t *conn;
- struct list_head *ctmp;
- int i;
- unsigned long flags;
+ kib_peer_t *peer;
+ cfs_list_t *ptmp;
+ kib_conn_t *conn;
+ cfs_list_t *ctmp;
+ int i;
+ unsigned long flags;
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
- list_for_each (ptmp, &kiblnd_data.kib_peers[i]) {
+ cfs_list_for_each (ptmp, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ peer = cfs_list_entry(ptmp, kib_peer_t, ibp_list);
LASSERT (peer->ibp_connecting > 0 ||
peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns));
+ !cfs_list_empty(&peer->ibp_conns));
if (peer->ibp_ni != ni)
continue;
- list_for_each (ctmp, &peer->ibp_conns) {
+ cfs_list_for_each (ctmp, &peer->ibp_conns) {
if (index-- > 0)
continue;
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
+ conn = cfs_list_entry(ctmp, kib_conn_t,
+ ibc_list);
kiblnd_conn_addref(conn);
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
- flags);
+ cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+ flags);
return conn;
}
}
}
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
return NULL;
}
void
kiblnd_debug_conn (kib_conn_t *conn)
{
- struct list_head *tmp;
- int i;
+ cfs_list_t *tmp;
+ int i;
- spin_lock(&conn->ibc_lock);
+ cfs_spin_lock(&conn->ibc_lock);
CDEBUG(D_CONSOLE, "conn[%d] %p [version %x] -> %s: \n",
- atomic_read(&conn->ibc_refcount), conn,
+ cfs_atomic_read(&conn->ibc_refcount), conn,
conn->ibc_version, libcfs_nid2str(conn->ibc_peer->ibp_nid));
CDEBUG(D_CONSOLE, " state %d nposted %d/%d cred %d o_cred %d r_cred %d\n",
conn->ibc_state, conn->ibc_noops_posted,
CDEBUG(D_CONSOLE, " comms_err %d\n", conn->ibc_comms_error);
CDEBUG(D_CONSOLE, " early_rxs:\n");
- list_for_each(tmp, &conn->ibc_early_rxs)
- kiblnd_debug_rx(list_entry(tmp, kib_rx_t, rx_list));
+ cfs_list_for_each(tmp, &conn->ibc_early_rxs)
+ kiblnd_debug_rx(cfs_list_entry(tmp, kib_rx_t, rx_list));
CDEBUG(D_CONSOLE, " tx_queue_nocred:\n");
- list_for_each(tmp, &conn->ibc_tx_queue_nocred)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
+ cfs_list_for_each(tmp, &conn->ibc_tx_queue_nocred)
+ kiblnd_debug_tx(cfs_list_entry(tmp, kib_tx_t, tx_list));
CDEBUG(D_CONSOLE, " tx_queue_rsrvd:\n");
- list_for_each(tmp, &conn->ibc_tx_queue_rsrvd)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
+ cfs_list_for_each(tmp, &conn->ibc_tx_queue_rsrvd)
+ kiblnd_debug_tx(cfs_list_entry(tmp, kib_tx_t, tx_list));
CDEBUG(D_CONSOLE, " tx_queue:\n");
- list_for_each(tmp, &conn->ibc_tx_queue)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
+ cfs_list_for_each(tmp, &conn->ibc_tx_queue)
+ kiblnd_debug_tx(cfs_list_entry(tmp, kib_tx_t, tx_list));
CDEBUG(D_CONSOLE, " active_txs:\n");
- list_for_each(tmp, &conn->ibc_active_txs)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
+ cfs_list_for_each(tmp, &conn->ibc_active_txs)
+ kiblnd_debug_tx(cfs_list_entry(tmp, kib_tx_t, tx_list));
CDEBUG(D_CONSOLE, " rxs:\n");
for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++)
kiblnd_debug_rx(&conn->ibc_rxs[i]);
- spin_unlock(&conn->ibc_lock);
+ cfs_spin_unlock(&conn->ibc_lock);
}
int
if (cmid->route.path_rec == NULL)
return;
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu);
LASSERT (mtu >= 0);
if (mtu != 0)
cmid->route.path_rec->mtu = mtu;
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
}
kib_conn_t *
int i;
LASSERT (net != NULL);
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
LIBCFS_ALLOC(init_qp_attr, sizeof(*init_qp_attr));
if (init_qp_attr == NULL) {
cmid->context = conn; /* for future CM callbacks */
conn->ibc_cmid = cmid;
- INIT_LIST_HEAD(&conn->ibc_early_rxs);
- INIT_LIST_HEAD(&conn->ibc_tx_queue);
- INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
- INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
- INIT_LIST_HEAD(&conn->ibc_active_txs);
- spin_lock_init(&conn->ibc_lock);
+ CFS_INIT_LIST_HEAD(&conn->ibc_early_rxs);
+ CFS_INIT_LIST_HEAD(&conn->ibc_tx_queue);
+ CFS_INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
+ CFS_INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
+ CFS_INIT_LIST_HEAD(&conn->ibc_active_txs);
+ cfs_spin_lock_init(&conn->ibc_lock);
LIBCFS_ALLOC(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
if (conn->ibc_connvars == NULL) {
LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
/* 1 ref for caller and each rxmsg */
- atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version));
+ cfs_atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version));
conn->ibc_nrx = IBLND_RX_MSGS(version);
/* post receives */
/* Make posted receives complete */
kiblnd_abort_receives(conn);
- /* correct # of posted buffers
+ /* correct # of posted buffers
* NB locking needed now I'm racing with completion */
- spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
+ cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock,
+ flags);
conn->ibc_nrx -= IBLND_RX_MSGS(version) - i;
- spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
- flags);
+ cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
+ flags);
/* cmid will be destroyed by CM(ofed) after cm_callback
* returned, so we can't refer it anymore
conn->ibc_state = state;
/* 1 more conn */
- atomic_inc(&net->ibn_nconns);
+ cfs_atomic_inc(&net->ibn_nconns);
return conn;
failed_2:
kib_peer_t *peer = conn->ibc_peer;
int rc;
- LASSERT (!in_interrupt());
- LASSERT (atomic_read(&conn->ibc_refcount) == 0);
- LASSERT (list_empty(&conn->ibc_early_rxs));
- LASSERT (list_empty(&conn->ibc_tx_queue));
- LASSERT (list_empty(&conn->ibc_tx_queue_rsrvd));
- LASSERT (list_empty(&conn->ibc_tx_queue_nocred));
- LASSERT (list_empty(&conn->ibc_active_txs));
+ LASSERT (!cfs_in_interrupt());
+ LASSERT (cfs_atomic_read(&conn->ibc_refcount) == 0);
+ LASSERT (cfs_list_empty(&conn->ibc_early_rxs));
+ LASSERT (cfs_list_empty(&conn->ibc_tx_queue));
+ LASSERT (cfs_list_empty(&conn->ibc_tx_queue_rsrvd));
+ LASSERT (cfs_list_empty(&conn->ibc_tx_queue_nocred));
+ LASSERT (cfs_list_empty(&conn->ibc_active_txs));
LASSERT (conn->ibc_noops_posted == 0);
LASSERT (conn->ibc_nsends_posted == 0);
kiblnd_peer_decref(peer);
rdma_destroy_id(cmid);
- atomic_dec(&net->ibn_nconns);
+ cfs_atomic_dec(&net->ibn_nconns);
}
LIBCFS_FREE(conn, sizeof(*conn));
int
kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why)
{
- kib_conn_t *conn;
- struct list_head *ctmp;
- struct list_head *cnxt;
- int count = 0;
+ kib_conn_t *conn;
+ cfs_list_t *ctmp;
+ cfs_list_t *cnxt;
+ int count = 0;
- list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
+ cfs_list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
+ conn = cfs_list_entry(ctmp, kib_conn_t, ibc_list);
CDEBUG(D_NET, "Closing conn -> %s, "
"version: %x, reason: %d\n",
kiblnd_close_stale_conns_locked (kib_peer_t *peer,
int version, __u64 incarnation)
{
- kib_conn_t *conn;
- struct list_head *ctmp;
- struct list_head *cnxt;
- int count = 0;
+ kib_conn_t *conn;
+ cfs_list_t *ctmp;
+ cfs_list_t *cnxt;
+ int count = 0;
- list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
+ cfs_list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
+ conn = cfs_list_entry(ctmp, kib_conn_t, ibc_list);
if (conn->ibc_version == version &&
conn->ibc_incarnation == incarnation)
int
kiblnd_close_matching_conns (lnet_ni_t *ni, lnet_nid_t nid)
{
- kib_peer_t *peer;
- struct list_head *ptmp;
- struct list_head *pnxt;
- int lo;
- int hi;
- int i;
- unsigned long flags;
- int count = 0;
+ kib_peer_t *peer;
+ cfs_list_t *ptmp;
+ cfs_list_t *pnxt;
+ int lo;
+ int hi;
+ int i;
+ unsigned long flags;
+ int count = 0;
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
if (nid != LNET_NID_ANY)
lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
}
for (i = lo; i <= hi; i++) {
- list_for_each_safe (ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
+ cfs_list_for_each_safe (ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ peer = cfs_list_entry(ptmp, kib_peer_t, ibp_list);
LASSERT (peer->ibp_connecting > 0 ||
peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns));
+ !cfs_list_empty(&peer->ibp_conns));
if (peer->ibp_ni != ni)
continue;
}
}
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
/* wildcards always succeed */
if (nid == LNET_NID_ANY)
void
kiblnd_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
{
- cfs_time_t last_alive = 0;
- rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_peer_t *peer;
- unsigned long flags;
+ cfs_time_t last_alive = 0;
+ cfs_rwlock_t *glock = &kiblnd_data.kib_global_lock;
+ kib_peer_t *peer;
+ unsigned long flags;
- read_lock_irqsave(glock, flags);
+ cfs_read_lock_irqsave(glock, flags);
peer = kiblnd_find_peer_locked(nid);
if (peer != NULL) {
LASSERT (peer->ibp_connecting > 0 || /* creating conns */
peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns)); /* active conn */
+ !cfs_list_empty(&peer->ibp_conns)); /* active conn */
last_alive = peer->ibp_last_alive;
}
- read_unlock_irqrestore(glock, flags);
+ cfs_read_unlock_irqrestore(glock, flags);
if (last_alive != 0)
*when = last_alive;
tx->tx_msgaddr));
KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
- list_add(&tx->tx_list, &pool->po_free_list);
+ cfs_list_add(&tx->tx_list, &pool->po_free_list);
page_offset += IBLND_MSG_SIZE;
LASSERT (page_offset <= PAGE_SIZE);
}
void
-kiblnd_destroy_fmr_pool_list(struct list_head *head)
+kiblnd_destroy_fmr_pool_list(cfs_list_t *head)
{
kib_fmr_pool_t *pool;
- while (!list_empty(head)) {
- pool = list_entry(head->next, kib_fmr_pool_t, fpo_list);
- list_del(&pool->fpo_list);
+ while (!cfs_list_empty(head)) {
+ pool = cfs_list_entry(head->next, kib_fmr_pool_t, fpo_list);
+ cfs_list_del(&pool->fpo_list);
kiblnd_destroy_fmr_pool(pool);
}
}
memset(fps, 0, sizeof(kib_fmr_poolset_t));
fps->fps_net = net;
- spin_lock_init(&fps->fps_lock);
+ cfs_spin_lock_init(&fps->fps_lock);
CFS_INIT_LIST_HEAD(&fps->fps_pool_list);
rc = kiblnd_create_fmr_pool(fps, &fpo);
if (rc == 0)
- list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
+ cfs_list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
return rc;
}
fmr->fmr_pool = NULL;
fmr->fmr_pfmr = NULL;
- spin_lock(&fps->fps_lock);
+ cfs_spin_lock(&fps->fps_lock);
fpo->fpo_map_count --; /* decref the pool */
- list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) {
+ cfs_list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) {
/* the first pool is persistent */
if (fps->fps_pool_list.next == &fpo->fpo_list)
continue;
if (fpo->fpo_map_count == 0 && /* no more reference */
cfs_time_aftereq(cfs_time_current(), fpo->fpo_deadline)) {
- list_move(&fpo->fpo_list, &zombies);
+ cfs_list_move(&fpo->fpo_list, &zombies);
fps->fps_version ++;
}
}
- spin_unlock(&fps->fps_lock);
+ cfs_spin_unlock(&fps->fps_lock);
- if (!list_empty(&zombies))
+ if (!cfs_list_empty(&zombies))
kiblnd_destroy_fmr_pool_list(&zombies);
}
LASSERT (fps->fps_net->ibn_with_fmr);
again:
- spin_lock(&fps->fps_lock);
+ cfs_spin_lock(&fps->fps_lock);
version = fps->fps_version;
- list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
+ cfs_list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
fpo->fpo_map_count ++;
- spin_unlock(&fps->fps_lock);
+ cfs_spin_unlock(&fps->fps_lock);
pfmr = ib_fmr_pool_map_phys(fpo->fpo_fmr_pool,
pages, npages, iov);
return 0;
}
- spin_lock(&fps->fps_lock);
+ cfs_spin_lock(&fps->fps_lock);
fpo->fpo_map_count --;
if (PTR_ERR(pfmr) != -EAGAIN) {
- spin_unlock(&fps->fps_lock);
+ cfs_spin_unlock(&fps->fps_lock);
return PTR_ERR(pfmr);
}
/* EAGAIN and ... */
if (version != fps->fps_version) {
- spin_unlock(&fps->fps_lock);
+ cfs_spin_unlock(&fps->fps_lock);
goto again;
}
}
if (fps->fps_increasing) {
- spin_unlock(&fps->fps_lock);
+ cfs_spin_unlock(&fps->fps_lock);
CDEBUG(D_NET, "Another thread is allocating new "
"FMR pool, waiting for her to complete\n");
- schedule();
+ cfs_schedule();
goto again;
}
if (cfs_time_before(cfs_time_current(), fps->fps_next_retry)) {
/* someone failed recently */
- spin_unlock(&fps->fps_lock);
+ cfs_spin_unlock(&fps->fps_lock);
return -EAGAIN;
}
fps->fps_increasing = 1;
- spin_unlock(&fps->fps_lock);
+ cfs_spin_unlock(&fps->fps_lock);
CDEBUG(D_NET, "Allocate new FMR pool\n");
rc = kiblnd_create_fmr_pool(fps, &fpo);
- spin_lock(&fps->fps_lock);
+ cfs_spin_lock(&fps->fps_lock);
fps->fps_increasing = 0;
if (rc == 0) {
fps->fps_version ++;
- list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
+ cfs_list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
} else {
fps->fps_next_retry = cfs_time_shift(10);
}
- spin_unlock(&fps->fps_lock);
+ cfs_spin_unlock(&fps->fps_lock);
goto again;
}
static void
kiblnd_fini_pool(kib_pool_t *pool)
{
- LASSERT (list_empty(&pool->po_free_list));
+ LASSERT (cfs_list_empty(&pool->po_free_list));
LASSERT (pool->po_allocated == 0);
CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
}
void
-kiblnd_destroy_pool_list(kib_poolset_t *ps, struct list_head *head)
+kiblnd_destroy_pool_list(kib_poolset_t *ps, cfs_list_t *head)
{
kib_pool_t *pool;
- while (!list_empty(head)) {
- pool = list_entry(head->next, kib_pool_t, po_list);
- list_del(&pool->po_list);
+ while (!cfs_list_empty(head)) {
+ pool = cfs_list_entry(head->next, kib_pool_t, po_list);
+ cfs_list_del(&pool->po_list);
ps->ps_pool_destroy(pool);
}
}
ps->ps_node_fini = nd_fini;
ps->ps_pool_size = size;
strncpy(ps->ps_name, name, IBLND_POOL_NAME_LEN);
- spin_lock_init(&ps->ps_lock);
+ cfs_spin_lock_init(&ps->ps_lock);
CFS_INIT_LIST_HEAD(&ps->ps_pool_list);
rc = ps->ps_pool_create(ps, size, &pool);
if (rc == 0)
- list_add(&pool->po_list, &ps->ps_pool_list);
+ cfs_list_add(&pool->po_list, &ps->ps_pool_list);
else
CERROR("Failed to create the first pool for %s\n", ps->ps_name);
}
void
-kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
+kiblnd_pool_free_node(kib_pool_t *pool, cfs_list_t *node)
{
CFS_LIST_HEAD (zombies);
kib_poolset_t *ps = pool->po_owner;
kib_pool_t *tmp;
cfs_time_t now = cfs_time_current();
- spin_lock(&ps->ps_lock);
+ cfs_spin_lock(&ps->ps_lock);
if (ps->ps_node_fini != NULL)
ps->ps_node_fini(pool, node);
LASSERT (pool->po_allocated > 0);
- list_add(node, &pool->po_free_list);
+ cfs_list_add(node, &pool->po_free_list);
pool->po_allocated --;
- list_for_each_entry_safe(pool, tmp, &ps->ps_pool_list, po_list) {
+ cfs_list_for_each_entry_safe(pool, tmp, &ps->ps_pool_list, po_list) {
/* the first pool is persistent */
if (ps->ps_pool_list.next == &pool->po_list)
continue;
if (pool->po_allocated == 0 &&
cfs_time_aftereq(now, pool->po_deadline))
- list_move(&pool->po_list, &zombies);
+ cfs_list_move(&pool->po_list, &zombies);
}
- spin_unlock(&ps->ps_lock);
+ cfs_spin_unlock(&ps->ps_lock);
- if (!list_empty(&zombies))
+ if (!cfs_list_empty(&zombies))
kiblnd_destroy_pool_list(ps, &zombies);
}
-struct list_head *
+cfs_list_t *
kiblnd_pool_alloc_node(kib_poolset_t *ps)
{
- struct list_head *node;
- kib_pool_t *pool;
- int rc;
+ cfs_list_t *node;
+ kib_pool_t *pool;
+ int rc;
again:
- spin_lock(&ps->ps_lock);
- list_for_each_entry(pool, &ps->ps_pool_list, po_list) {
- if (list_empty(&pool->po_free_list))
+ cfs_spin_lock(&ps->ps_lock);
+ cfs_list_for_each_entry(pool, &ps->ps_pool_list, po_list) {
+ if (cfs_list_empty(&pool->po_free_list))
continue;
pool->po_allocated ++;
pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
node = pool->po_free_list.next;
- list_del(node);
+ cfs_list_del(node);
if (ps->ps_node_init != NULL) {
/* still hold the lock */
ps->ps_node_init(pool, node);
}
- spin_unlock(&ps->ps_lock);
+ cfs_spin_unlock(&ps->ps_lock);
return node;
}
/* no available tx pool and ... */
if (ps->ps_increasing) {
/* another thread is allocating a new pool */
- spin_unlock(&ps->ps_lock);
+ cfs_spin_unlock(&ps->ps_lock);
CDEBUG(D_NET, "Another thread is allocating new "
"%s pool, waiting for her to complete\n",
ps->ps_name);
- schedule();
+ cfs_schedule();
goto again;
}
if (cfs_time_before(cfs_time_current(), ps->ps_next_retry)) {
/* someone failed recently */
- spin_unlock(&ps->ps_lock);
+ cfs_spin_unlock(&ps->ps_lock);
return NULL;
}
ps->ps_increasing = 1;
- spin_unlock(&ps->ps_lock);
+ cfs_spin_unlock(&ps->ps_lock);
CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
- spin_lock(&ps->ps_lock);
+ cfs_spin_lock(&ps->ps_lock);
ps->ps_increasing = 0;
if (rc == 0) {
- list_add_tail(&pool->po_list, &ps->ps_pool_list);
+ cfs_list_add_tail(&pool->po_list, &ps->ps_pool_list);
} else {
/* retry 10 seconds later */
ps->ps_next_retry = cfs_time_shift(10);
CERROR("Can't allocate new %s pool because out of memory\n",
ps->ps_name);
}
- spin_unlock(&ps->ps_lock);
+ cfs_spin_unlock(&ps->ps_lock);
goto again;
}
kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_rdma_desc_t *rd,
__u64 *iova, kib_phys_mr_t **pp_pmr)
{
- kib_phys_mr_t *pmr;
- struct list_head *node;
- int rc;
- int i;
+ kib_phys_mr_t *pmr;
+ cfs_list_t *node;
+ int rc;
+ int i;
node = kiblnd_pool_alloc_node(&pps->pps_poolset);
if (node == NULL) {
LASSERT (pool->po_allocated == 0);
- while (!list_empty(&pool->po_free_list)) {
- pmr = list_entry(pool->po_free_list.next,
- kib_phys_mr_t, pmr_list);
+ while (!cfs_list_empty(&pool->po_free_list)) {
+ pmr = cfs_list_entry(pool->po_free_list.next,
+ kib_phys_mr_t, pmr_list);
LASSERT (pmr->pmr_mr == NULL);
- list_del(&pmr->pmr_list);
+ cfs_list_del(&pmr->pmr_list);
if (pmr->pmr_ipb != NULL) {
LIBCFS_FREE(pmr->pmr_ipb,
if (pmr->pmr_ipb == NULL)
break;
- list_add(&pmr->pmr_list, &pool->po_free_list);
+ cfs_list_add(&pmr->pmr_list, &pool->po_free_list);
}
if (i < size) {
for (i = 0; i < pool->po_size; i++) {
kib_tx_t *tx = &tpo->tpo_tx_descs[i];
- list_del(&tx->tx_list);
+ cfs_list_del(&tx->tx_list);
if (tx->tx_pages != NULL)
LIBCFS_FREE(tx->tx_pages,
LNET_MAX_IOV *
}
static void
-kiblnd_tx_init(kib_pool_t *pool, struct list_head *node)
+kiblnd_tx_init(kib_pool_t *pool, cfs_list_t *node)
{
- kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t, tps_poolset);
- kib_tx_t *tx = list_entry(node, kib_tx_t, tx_list);
+ kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t,
+ tps_poolset);
+ kib_tx_t *tx = cfs_list_entry(node, kib_tx_t, tx_list);
tx->tx_cookie = tps->tps_next_tx_cookie ++;
}
LPX64", array size: %d\n",
ibdev->ibd_mr_size, ibdev->ibd_nmrs);
- list_add_tail(&ibdev->ibd_list,
+ cfs_list_add_tail(&ibdev->ibd_list,
&kiblnd_data.kib_devs);
return 0;
}
{
LASSERT (dev->ibd_nnets == 0);
- if (!list_empty(&dev->ibd_list)) /* on kib_devs? */
- list_del_init(&dev->ibd_list);
+ if (!cfs_list_empty(&dev->ibd_list)) /* on kib_devs? */
+ cfs_list_del_init(&dev->ibd_list);
kiblnd_dev_cleanup(dev);
{
int i;
- LASSERT (list_empty(&kiblnd_data.kib_devs));
+ LASSERT (cfs_list_empty(&kiblnd_data.kib_devs));
CDEBUG(D_MALLOC, "before LND base cleanup: kmem %d\n",
- atomic_read(&libcfs_kmemory));
+ cfs_atomic_read(&libcfs_kmemory));
switch (kiblnd_data.kib_init) {
default:
case IBLND_INIT_DATA:
LASSERT (kiblnd_data.kib_peers != NULL);
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
- LASSERT (list_empty(&kiblnd_data.kib_peers[i]));
+ LASSERT (cfs_list_empty(&kiblnd_data.kib_peers[i]));
}
- LASSERT (list_empty(&kiblnd_data.kib_connd_zombies));
- LASSERT (list_empty(&kiblnd_data.kib_connd_conns));
+ LASSERT (cfs_list_empty(&kiblnd_data.kib_connd_zombies));
+ LASSERT (cfs_list_empty(&kiblnd_data.kib_connd_conns));
/* flag threads to terminate; wake and wait for them to die */
kiblnd_data.kib_shutdown = 1;
- wake_up_all(&kiblnd_data.kib_sched_waitq);
- wake_up_all(&kiblnd_data.kib_connd_waitq);
+ cfs_waitq_broadcast(&kiblnd_data.kib_sched_waitq);
+ cfs_waitq_broadcast(&kiblnd_data.kib_connd_waitq);
i = 2;
- while (atomic_read(&kiblnd_data.kib_nthreads) != 0) {
+ while (cfs_atomic_read(&kiblnd_data.kib_nthreads) != 0) {
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"Waiting for %d threads to terminate\n",
- atomic_read(&kiblnd_data.kib_nthreads));
+ cfs_atomic_read(&kiblnd_data.kib_nthreads));
cfs_pause(cfs_time_seconds(1));
}
if (kiblnd_data.kib_peers != NULL)
LIBCFS_FREE(kiblnd_data.kib_peers,
- sizeof(struct list_head) *
+ sizeof(cfs_list_t) *
kiblnd_data.kib_peer_hash_size);
CDEBUG(D_MALLOC, "after LND base cleanup: kmem %d\n",
- atomic_read(&libcfs_kmemory));
+ cfs_atomic_read(&libcfs_kmemory));
kiblnd_data.kib_init = IBLND_INIT_NOTHING;
PORTAL_MODULE_UNUSE;
kiblnd_shutdown (lnet_ni_t *ni)
{
kib_net_t *net = ni->ni_data;
- rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+ cfs_rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
int i;
unsigned long flags;
goto out;
CDEBUG(D_MALLOC, "before LND net cleanup: kmem %d\n",
- atomic_read(&libcfs_kmemory));
+ cfs_atomic_read(&libcfs_kmemory));
- write_lock_irqsave(g_lock, flags);
+ cfs_write_lock_irqsave(g_lock, flags);
net->ibn_shutdown = 1;
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
switch (net->ibn_init) {
default:
/* Wait for all peer state to clean up */
i = 2;
- while (atomic_read(&net->ibn_npeers) != 0) {
+ while (cfs_atomic_read(&net->ibn_npeers) != 0) {
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */
"%s: waiting for %d peers to disconnect\n",
libcfs_nid2str(ni->ni_nid),
- atomic_read(&net->ibn_npeers));
+ cfs_atomic_read(&net->ibn_npeers));
cfs_pause(cfs_time_seconds(1));
}
/* fall through */
case IBLND_INIT_NOTHING:
- LASSERT (atomic_read(&net->ibn_nconns) == 0);
+ LASSERT (cfs_atomic_read(&net->ibn_nconns) == 0);
if (net->ibn_dev != NULL &&
net->ibn_dev->ibd_nnets == 0)
}
CDEBUG(D_MALLOC, "after LND net cleanup: kmem %d\n",
- atomic_read(&libcfs_kmemory));
+ cfs_atomic_read(&libcfs_kmemory));
net->ibn_init = IBLND_INIT_NOTHING;
ni->ni_data = NULL;
LIBCFS_FREE(net, sizeof(*net));
out:
- if (list_empty(&kiblnd_data.kib_devs))
+ if (cfs_list_empty(&kiblnd_data.kib_devs))
kiblnd_base_shutdown();
return;
}
PORTAL_MODULE_USE;
memset(&kiblnd_data, 0, sizeof(kiblnd_data)); /* zero pointers, flags etc */
- rwlock_init(&kiblnd_data.kib_global_lock);
+ cfs_rwlock_init(&kiblnd_data.kib_global_lock);
- INIT_LIST_HEAD(&kiblnd_data.kib_devs);
+ CFS_INIT_LIST_HEAD(&kiblnd_data.kib_devs);
kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
LIBCFS_ALLOC(kiblnd_data.kib_peers,
- sizeof(struct list_head) * kiblnd_data.kib_peer_hash_size);
+ sizeof(cfs_list_t) *
+ kiblnd_data.kib_peer_hash_size);
if (kiblnd_data.kib_peers == NULL) {
goto failed;
}
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
- INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
+ CFS_INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
- spin_lock_init(&kiblnd_data.kib_connd_lock);
- INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
- INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
- init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
+ cfs_spin_lock_init(&kiblnd_data.kib_connd_lock);
+ CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
+ CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
+ cfs_waitq_init(&kiblnd_data.kib_connd_waitq);
- spin_lock_init(&kiblnd_data.kib_sched_lock);
- INIT_LIST_HEAD(&kiblnd_data.kib_sched_conns);
- init_waitqueue_head(&kiblnd_data.kib_sched_waitq);
+ cfs_spin_lock_init(&kiblnd_data.kib_sched_lock);
+ CFS_INIT_LIST_HEAD(&kiblnd_data.kib_sched_conns);
+ cfs_waitq_init(&kiblnd_data.kib_sched_waitq);
kiblnd_data.kib_error_qpa.qp_state = IB_QPS_ERR;
char *ifname;
kib_dev_t *ibdev = NULL;
kib_net_t *net;
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct timeval tv;
int rc;
memset(net, 0, sizeof(*net));
- do_gettimeofday(&tv);
+ cfs_gettimeofday(&tv);
net->ibn_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
ni->ni_peertimeout = *kiblnd_tunables.kib_peertimeout;
goto failed;
}
- list_for_each (tmp, &kiblnd_data.kib_devs) {
- ibdev = list_entry(tmp, kib_dev_t, ibd_list);
+ cfs_list_for_each (tmp, &kiblnd_data.kib_devs) {
+ ibdev = cfs_list_entry(tmp, kib_dev_t, ibd_list);
if (!strcmp(&ibdev->ibd_ifname[0], ifname))
break;
/* tunables fixed at compile time */
#ifdef CONFIG_SMP
-# define IBLND_N_SCHED num_online_cpus() /* # schedulers */
+# define IBLND_N_SCHED cfs_num_online_cpus() /* # schedulers */
#else
# define IBLND_N_SCHED 1 /* # schedulers */
#endif
typedef struct
{
- struct list_head ibd_list; /* chain on kib_devs */
+ cfs_list_t ibd_list; /* chain on kib_devs */
__u32 ibd_ifip; /* IPoIB interface IP */
char ibd_ifname[32]; /* IPoIB interface name */
int ibd_nnets; /* # nets extant */
struct kib_pmr_pool;
typedef struct {
- struct list_head pmr_list; /* chain node */
+ cfs_list_t pmr_list; /* chain node */
struct ib_phys_buf *pmr_ipb; /* physical buffer */
struct ib_mr *pmr_mr; /* IB MR */
struct kib_pmr_pool *pmr_pool; /* owner of this MR */
typedef int (*kib_ps_pool_create_t)(struct kib_poolset *ps, int inc, struct kib_pool **pp_po);
typedef void (*kib_ps_pool_destroy_t)(struct kib_pool *po);
-typedef void (*kib_ps_node_init_t)(struct kib_pool *po, struct list_head *node);
-typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, struct list_head *node);
+typedef void (*kib_ps_node_init_t)(struct kib_pool *po,
+ cfs_list_t *node);
+typedef void (*kib_ps_node_fini_t)(struct kib_pool *po,
+ cfs_list_t *node);
struct kib_net;
typedef struct kib_poolset
{
- spinlock_t ps_lock; /* serialize */
+ cfs_spinlock_t ps_lock; /* serialize */
struct kib_net *ps_net; /* network it belongs to */
char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
- struct list_head ps_pool_list; /* list of pools */
+ cfs_list_t ps_pool_list; /* list of pools */
cfs_time_t ps_next_retry; /* time stamp for retry if failed to allocate */
int ps_increasing; /* is allocating new pool */
int ps_pool_size; /* new pool size */
typedef struct kib_pool
{
- struct list_head po_list; /* chain on pool list */
- struct list_head po_free_list; /* pre-allocated node */
+ cfs_list_t po_list; /* chain on pool list */
+ cfs_list_t po_free_list; /* pre-allocated node */
kib_poolset_t *po_owner; /* pool_set of this pool */
cfs_time_t po_deadline; /* deadline of this pool */
int po_allocated; /* # of elements in use */
typedef struct
{
- spinlock_t fps_lock; /* serialize */
+ cfs_spinlock_t fps_lock; /* serialize */
struct kib_net *fps_net; /* IB network */
- struct list_head fps_pool_list; /* FMR pool list */
+ cfs_list_t fps_pool_list; /* FMR pool list */
__u64 fps_version; /* validity stamp */
int fps_increasing; /* is allocating new pool */
cfs_time_t fps_next_retry; /* time stamp for retry if failed to allocate */
typedef struct
{
- struct list_head fpo_list; /* chain on pool list */
+ cfs_list_t fpo_list; /* chain on pool list */
kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
cfs_time_t fpo_deadline; /* deadline of this pool */
unsigned int ibn_with_fmr:1; /* FMR? */
unsigned int ibn_with_pmr:1; /* PMR? */
- atomic_t ibn_npeers; /* # peers extant */
- atomic_t ibn_nconns; /* # connections extant */
+ cfs_atomic_t ibn_npeers; /* # peers extant */
+ cfs_atomic_t ibn_nconns; /* # connections extant */
kib_tx_poolset_t ibn_tx_ps; /* tx pool-set */
kib_fmr_poolset_t ibn_fmr_ps; /* fmr pool-set */
typedef struct
{
- int kib_init; /* initialisation state */
- int kib_shutdown; /* shut down? */
- struct list_head kib_devs; /* IB devices extant */
- atomic_t kib_nthreads; /* # live threads */
- rwlock_t kib_global_lock; /* stabilize net/dev/peer/conn ops */
+ int kib_init; /* initialisation state */
+ int kib_shutdown; /* shut down? */
+ cfs_list_t kib_devs; /* IB devices extant */
+ cfs_atomic_t kib_nthreads; /* # live threads */
+ cfs_rwlock_t kib_global_lock; /* stabilize net/dev/peer/conn ops */
- struct list_head *kib_peers; /* hash table of all my known peers */
- int kib_peer_hash_size; /* size of kib_peers */
+ cfs_list_t *kib_peers; /* hash table of all my known peers */
+ int kib_peer_hash_size;/* size of kib_peers */
- void *kib_connd; /* the connd task (serialisation assertions) */
- struct list_head kib_connd_conns; /* connections to setup/teardown */
- struct list_head kib_connd_zombies; /* connections with zero refcount */
- wait_queue_head_t kib_connd_waitq; /* connection daemon sleeps here */
- spinlock_t kib_connd_lock; /* serialise */
+ void *kib_connd; /* the connd task (serialisation assertions) */
+ cfs_list_t kib_connd_conns; /* connections to setup/teardown */
+ cfs_list_t kib_connd_zombies;/* connections with zero refcount */
+ cfs_waitq_t kib_connd_waitq; /* connection daemon sleeps here */
+ cfs_spinlock_t kib_connd_lock; /* serialise */
- wait_queue_head_t kib_sched_waitq; /* schedulers sleep here */
- struct list_head kib_sched_conns; /* conns to check for rx completions */
- spinlock_t kib_sched_lock; /* serialise */
+ cfs_waitq_t kib_sched_waitq; /* schedulers sleep here */
+ cfs_list_t kib_sched_conns; /* conns to check for rx completions */
+ cfs_spinlock_t kib_sched_lock; /* serialise */
- struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
+ struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
} kib_data_t;
#define IBLND_INIT_NOTHING 0
typedef struct kib_rx /* receive message */
{
- struct list_head rx_list; /* queue for attention */
+ cfs_list_t rx_list; /* queue for attention */
struct kib_conn *rx_conn; /* owning conn */
int rx_nob; /* # bytes received (-1 while posted) */
enum ib_wc_status rx_status; /* completion status */
typedef struct kib_tx /* transmit message */
{
- struct list_head tx_list; /* queue on idle_txs ibc_tx_queue etc. */
+ cfs_list_t tx_list; /* queue on idle_txs ibc_tx_queue etc. */
kib_tx_pool_t *tx_pool; /* pool I'm from */
struct kib_conn *tx_conn; /* owning conn */
short tx_sending; /* # tx callbacks outstanding */
typedef struct kib_conn
{
- struct kib_peer *ibc_peer; /* owning peer */
- struct list_head ibc_list; /* stash on peer's conn list */
- struct list_head ibc_sched_list; /* schedule for attention */
- __u16 ibc_version; /* version of connection */
- __u64 ibc_incarnation; /* which instance of the peer */
- atomic_t ibc_refcount; /* # users */
- int ibc_state; /* what's happening */
- int ibc_nsends_posted; /* # uncompleted sends */
- int ibc_noops_posted; /* # uncompleted NOOPs */
- int ibc_credits; /* # credits I have */
- int ibc_outstanding_credits; /* # credits to return */
- int ibc_reserved_credits;/* # ACK/DONE msg credits */
- int ibc_comms_error; /* set on comms error */
- int ibc_nrx:16; /* receive buffers owned */
- int ibc_scheduled:1; /* scheduled for attention */
- int ibc_ready:1; /* CQ callback fired */
- unsigned long ibc_last_send; /* time of last send */
- struct list_head ibc_early_rxs; /* rxs completed before ESTABLISHED */
- struct list_head ibc_tx_queue; /* sends that need a credit */
- struct list_head ibc_tx_queue_nocred;/* sends that don't need a credit */
- struct list_head ibc_tx_queue_rsrvd; /* sends that need to reserve an ACK/DONE msg */
- struct list_head ibc_active_txs; /* active tx awaiting completion */
- spinlock_t ibc_lock; /* serialise */
- kib_rx_t *ibc_rxs; /* the rx descs */
- kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */
-
- struct rdma_cm_id *ibc_cmid; /* CM id */
- struct ib_cq *ibc_cq; /* completion queue */
-
- kib_connvars_t *ibc_connvars; /* in-progress connection state */
+ struct kib_peer *ibc_peer; /* owning peer */
+ cfs_list_t ibc_list; /* stash on peer's conn list */
+ cfs_list_t ibc_sched_list; /* schedule for attention */
+ __u16 ibc_version; /* version of connection */
+ __u64 ibc_incarnation; /* which instance of the peer */
+ cfs_atomic_t ibc_refcount; /* # users */
+ int ibc_state; /* what's happening */
+ int ibc_nsends_posted; /* # uncompleted sends */
+ int ibc_noops_posted; /* # uncompleted NOOPs */
+ int ibc_credits; /* # credits I have */
+ int ibc_outstanding_credits; /* # credits to return */
+ int ibc_reserved_credits;/* # ACK/DONE msg credits */
+ int ibc_comms_error; /* set on comms error */
+ int ibc_nrx:16; /* receive buffers owned */
+ int ibc_scheduled:1; /* scheduled for attention */
+ int ibc_ready:1; /* CQ callback fired */
+ unsigned long ibc_last_send; /* time of last send */
+ cfs_list_t ibc_early_rxs; /* rxs completed before ESTABLISHED */
+ cfs_list_t ibc_tx_queue; /* sends that need a credit */
+ cfs_list_t ibc_tx_queue_nocred;/* sends that don't need a credit */
+ cfs_list_t ibc_tx_queue_rsrvd; /* sends that need to reserve an ACK/DONE msg */
+ cfs_list_t ibc_active_txs; /* active tx awaiting completion */
+ cfs_spinlock_t ibc_lock; /* serialise */
+ kib_rx_t *ibc_rxs; /* the rx descs */
+ kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */
+
+ struct rdma_cm_id *ibc_cmid; /* CM id */
+ struct ib_cq *ibc_cq; /* completion queue */
+
+ kib_connvars_t *ibc_connvars; /* in-progress connection state */
} kib_conn_t;
#define IBLND_CONN_INIT 0 /* being intialised */
typedef struct kib_peer
{
- struct list_head ibp_list; /* stash on global peer list */
- lnet_nid_t ibp_nid; /* who's on the other end(s) */
- lnet_ni_t *ibp_ni; /* LNet interface */
- atomic_t ibp_refcount; /* # users */
- struct list_head ibp_conns; /* all active connections */
- struct list_head ibp_tx_queue; /* msgs waiting for a conn */
- __u16 ibp_version; /* version of peer */
- __u64 ibp_incarnation; /* incarnation of peer */
- int ibp_connecting; /* current active connection attempts */
- int ibp_accepting; /* current passive connection attempts */
- int ibp_error; /* errno on closing this peer */
- cfs_time_t ibp_last_alive; /* when (in jiffies) I was last alive */
+ cfs_list_t ibp_list; /* stash on global peer list */
+ lnet_nid_t ibp_nid; /* who's on the other end(s) */
+ lnet_ni_t *ibp_ni; /* LNet interface */
+ cfs_atomic_t ibp_refcount; /* # users */
+ cfs_list_t ibp_conns; /* all active connections */
+ cfs_list_t ibp_tx_queue; /* msgs waiting for a conn */
+ __u16 ibp_version; /* version of peer */
+ __u64 ibp_incarnation; /* incarnation of peer */
+ int ibp_connecting; /* current active connection attempts */
+ int ibp_accepting; /* current passive connection attempts */
+ int ibp_error; /* errno on closing this peer */
+ cfs_time_t ibp_last_alive; /* when (in jiffies) I was last alive */
} kib_peer_t;
extern kib_data_t kiblnd_data;
#define kiblnd_conn_addref(conn) \
do { \
CDEBUG(D_NET, "conn[%p] (%d)++\n", \
- (conn), atomic_read(&(conn)->ibc_refcount)); \
- LASSERT(atomic_read(&(conn)->ibc_refcount) > 0); \
- atomic_inc(&(conn)->ibc_refcount); \
+ (conn), cfs_atomic_read(&(conn)->ibc_refcount)); \
+ LASSERT(cfs_atomic_read(&(conn)->ibc_refcount) > 0); \
+ cfs_atomic_inc(&(conn)->ibc_refcount); \
} while (0)
-#define kiblnd_conn_decref(conn) \
-do { \
- unsigned long flags; \
- \
- CDEBUG(D_NET, "conn[%p] (%d)--\n", \
- (conn), atomic_read(&(conn)->ibc_refcount)); \
- LASSERT(atomic_read(&(conn)->ibc_refcount) > 0); \
- if (atomic_dec_and_test(&(conn)->ibc_refcount)) { \
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
- list_add_tail(&(conn)->ibc_list, \
- &kiblnd_data.kib_connd_zombies); \
- wake_up(&kiblnd_data.kib_connd_waitq); \
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); \
- } \
+#define kiblnd_conn_decref(conn) \
+do { \
+ unsigned long flags; \
+ \
+ CDEBUG(D_NET, "conn[%p] (%d)--\n", \
+ (conn), cfs_atomic_read(&(conn)->ibc_refcount)); \
+ LASSERT(cfs_atomic_read(&(conn)->ibc_refcount) > 0); \
+ if (cfs_atomic_dec_and_test(&(conn)->ibc_refcount)) { \
+ cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
+ cfs_list_add_tail(&(conn)->ibc_list, \
+ &kiblnd_data.kib_connd_zombies); \
+ cfs_waitq_signal(&kiblnd_data.kib_connd_waitq); \
+ cfs_spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
+ } \
} while (0)
#define kiblnd_peer_addref(peer) \
do { \
CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \
(peer), libcfs_nid2str((peer)->ibp_nid), \
- atomic_read (&(peer)->ibp_refcount)); \
- LASSERT(atomic_read(&(peer)->ibp_refcount) > 0); \
- atomic_inc(&(peer)->ibp_refcount); \
+ cfs_atomic_read (&(peer)->ibp_refcount)); \
+ LASSERT(cfs_atomic_read(&(peer)->ibp_refcount) > 0); \
+ cfs_atomic_inc(&(peer)->ibp_refcount); \
} while (0)
#define kiblnd_peer_decref(peer) \
do { \
CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \
(peer), libcfs_nid2str((peer)->ibp_nid), \
- atomic_read (&(peer)->ibp_refcount)); \
- LASSERT(atomic_read(&(peer)->ibp_refcount) > 0); \
- if (atomic_dec_and_test(&(peer)->ibp_refcount)) \
+ cfs_atomic_read (&(peer)->ibp_refcount)); \
+ LASSERT(cfs_atomic_read(&(peer)->ibp_refcount) > 0); \
+ if (cfs_atomic_dec_and_test(&(peer)->ibp_refcount)) \
kiblnd_destroy_peer(peer); \
} while (0)
-static inline struct list_head *
+static inline cfs_list_t *
kiblnd_nid2peerlist (lnet_nid_t nid)
{
- unsigned int hash = ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
+ unsigned int hash =
+ ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
return (&kiblnd_data.kib_peers [hash]);
}
kiblnd_peer_active (kib_peer_t *peer)
{
/* Am I in the peer hash table? */
- return (!list_empty(&peer->ibp_list));
+ return (!cfs_list_empty(&peer->ibp_list));
}
static inline kib_conn_t *
kiblnd_get_conn_locked (kib_peer_t *peer)
{
- LASSERT (!list_empty(&peer->ibp_conns));
+ LASSERT (!cfs_list_empty(&peer->ibp_conns));
/* just return the first connection */
- return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
+ return cfs_list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
}
static inline int
kiblnd_send_keepalive(kib_conn_t *conn)
{
return (*kiblnd_tunables.kib_keepalive > 0) &&
- time_after(jiffies, conn->ibc_last_send +
- *kiblnd_tunables.kib_keepalive*HZ);
+ cfs_time_after(jiffies, conn->ibc_last_send +
+ *kiblnd_tunables.kib_keepalive*CFS_HZ);
}
static inline int
!kiblnd_send_keepalive(conn))
return 0; /* No need to send NOOP */
- if (!list_empty(&conn->ibc_tx_queue_nocred))
+ if (!cfs_list_empty(&conn->ibc_tx_queue_nocred))
return 0; /* NOOP can be piggybacked */
if (!IBLND_OOB_CAPABLE(conn->ibc_version))
- return list_empty(&conn->ibc_tx_queue); /* can't piggyback? */
+ /* can't piggyback? */
+ return cfs_list_empty(&conn->ibc_tx_queue);
/* No tx to piggyback NOOP onto or no credit to send a tx */
- return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 0);
+ return (cfs_list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 0);
}
static inline void
}
static inline const char *
-kiblnd_queue2str (kib_conn_t *conn, struct list_head *q)
+kiblnd_queue2str (kib_conn_t *conn, cfs_list_t *q)
{
if (q == &conn->ibc_tx_queue)
return "tx_queue";
kiblnd_set_conn_state (kib_conn_t *conn, int state)
{
conn->ibc_state = state;
- mb();
+ cfs_mb();
}
static inline void
int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
kib_rdma_desc_t *rd, int nfrags);
void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx);
-void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
-struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps);
+void kiblnd_pool_free_node(kib_pool_t *pool, cfs_list_t *node);
+cfs_list_t *kiblnd_pool_alloc_node(kib_poolset_t *ps);
int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages,
int npages, __u64 iov, kib_fmr_t *fmr);
void kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn);
void kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn);
void kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
-void kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int status);
+void kiblnd_txlist_done (lnet_ni_t *ni, cfs_list_t *txlist,
+ int status);
void kiblnd_check_sends (kib_conn_t *conn);
void kiblnd_qp_event(struct ib_event *event, void *arg);
int i;
LASSERT (net != NULL);
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
LASSERT (!tx->tx_queued); /* mustn't be queued for sending */
LASSERT (tx->tx_sending == 0); /* mustn't be awaiting sent callback */
LASSERT (!tx->tx_waiting); /* mustn't be awaiting peer response */
}
void
-kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int status)
+kiblnd_txlist_done (lnet_ni_t *ni, cfs_list_t *txlist, int status)
{
kib_tx_t *tx;
- while (!list_empty (txlist)) {
- tx = list_entry (txlist->next, kib_tx_t, tx_list);
+ while (!cfs_list_empty (txlist)) {
+ tx = cfs_list_entry (txlist->next, kib_tx_t, tx_list);
- list_del(&tx->tx_list);
+ cfs_list_del(&tx->tx_list);
/* complete now */
tx->tx_waiting = 0;
tx->tx_status = status;
kib_tx_t *
kiblnd_get_idle_tx (lnet_ni_t *ni)
{
- kib_net_t *net = (kib_net_t *)ni->ni_data;
- struct list_head *node;
- kib_tx_t *tx;
+ kib_net_t *net = (kib_net_t *)ni->ni_data;
+ cfs_list_t *node;
+ kib_tx_t *tx;
node = kiblnd_pool_alloc_node(&net->ibn_tx_ps.tps_poolset);
if (node == NULL)
kib_conn_t *conn = rx->rx_conn;
unsigned long flags;
- spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
+ cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
LASSERT (conn->ibc_nrx > 0);
conn->ibc_nrx--;
- spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
+ cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
kiblnd_conn_decref(conn);
}
int rc;
LASSERT (net != NULL);
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
LASSERT (credit == IBLND_POSTRX_NO_CREDIT ||
credit == IBLND_POSTRX_PEER_CREDIT ||
credit == IBLND_POSTRX_RSRVD_CREDIT);
if (credit == IBLND_POSTRX_NO_CREDIT)
return 0;
- spin_lock(&conn->ibc_lock);
+ cfs_spin_lock(&conn->ibc_lock);
if (credit == IBLND_POSTRX_PEER_CREDIT)
conn->ibc_outstanding_credits++;
else
conn->ibc_reserved_credits++;
- spin_unlock(&conn->ibc_lock);
+ cfs_spin_unlock(&conn->ibc_lock);
kiblnd_check_sends(conn);
return 0;
kib_tx_t *
kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
- list_for_each(tmp, &conn->ibc_active_txs) {
- kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
+ cfs_list_for_each(tmp, &conn->ibc_active_txs) {
+ kib_tx_t *tx = cfs_list_entry(tmp, kib_tx_t, tx_list);
LASSERT (!tx->tx_queued);
LASSERT (tx->tx_sending != 0 || tx->tx_waiting);
lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
int idle;
- spin_lock(&conn->ibc_lock);
+ cfs_spin_lock(&conn->ibc_lock);
tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
if (tx == NULL) {
- spin_unlock(&conn->ibc_lock);
+ cfs_spin_unlock(&conn->ibc_lock);
CWARN("Unmatched completion type %x cookie "LPX64" from %s\n",
txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid));
idle = !tx->tx_queued && (tx->tx_sending == 0);
if (idle)
- list_del(&tx->tx_list);
+ cfs_list_del(&tx->tx_list);
- spin_unlock(&conn->ibc_lock);
+ cfs_spin_unlock(&conn->ibc_lock);
if (idle)
kiblnd_tx_done(ni, tx);
LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
CDEBUG (D_NET, "Received %x[%d] from %s\n",
- msg->ibm_type, credits, libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ msg->ibm_type, credits,
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
if (credits != 0) {
/* Have I received credits that will let me send? */
- spin_lock(&conn->ibc_lock);
+ cfs_spin_lock(&conn->ibc_lock);
if (conn->ibc_credits + credits >
IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) {
rc2 = conn->ibc_credits;
- spin_unlock(&conn->ibc_lock);
+ cfs_spin_unlock(&conn->ibc_lock);
CERROR("Bad credits from %s: %d + %d > %d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid),
- rc2, credits, IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
+ rc2, credits,
+ IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
kiblnd_close_conn(conn, -EPROTO);
kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT);
conn->ibc_credits += credits;
- spin_unlock(&conn->ibc_lock);
+ cfs_spin_unlock(&conn->ibc_lock);
kiblnd_check_sends(conn);
}
break;
case IBLND_MSG_PUT_NAK:
- CWARN ("PUT_NACK from %s\n", libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ CWARN ("PUT_NACK from %s\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
post_credit = IBLND_POSTRX_RSRVD_CREDIT;
kiblnd_handle_completion(conn, IBLND_MSG_PUT_REQ,
msg->ibm_u.completion.ibcm_status,
case IBLND_MSG_PUT_ACK:
post_credit = IBLND_POSTRX_RSRVD_CREDIT;
- spin_lock(&conn->ibc_lock);
+ cfs_spin_lock(&conn->ibc_lock);
tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
msg->ibm_u.putack.ibpam_src_cookie);
if (tx != NULL)
- list_del(&tx->tx_list);
- spin_unlock(&conn->ibc_lock);
+ cfs_list_del(&tx->tx_list);
+ cfs_spin_unlock(&conn->ibc_lock);
if (tx == NULL) {
CERROR("Unmatched PUT_ACK from %s\n",
CERROR("Can't setup rdma for PUT to %s: %d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2);
- spin_lock(&conn->ibc_lock);
+ cfs_spin_lock(&conn->ibc_lock);
tx->tx_waiting = 0; /* clear waiting and queue atomically */
kiblnd_queue_tx_locked(tx, conn);
- spin_unlock(&conn->ibc_lock);
+ cfs_spin_unlock(&conn->ibc_lock);
break;
case IBLND_MSG_PUT_DONE:
/* racing with connection establishment/teardown! */
if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
- rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+ cfs_rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
unsigned long flags;
- write_lock_irqsave(g_lock, flags);
+ cfs_write_lock_irqsave(g_lock, flags);
/* must check holding global lock to eliminate race */
if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
- list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
- write_unlock_irqrestore(g_lock, flags);
+ cfs_list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
+ cfs_write_unlock_irqrestore(g_lock, flags);
return;
}
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
}
kiblnd_handle_rx(rx);
return;
}
/* NB don't drop ibc_lock before bumping tx_sending */
- list_del(&tx->tx_list);
+ cfs_list_del(&tx->tx_list);
tx->tx_queued = 0;
if (msg->ibm_type == IBLND_MSG_NOOP &&
/* OK to drop when posted enough NOOPs, since
* kiblnd_check_sends will queue NOOP again when
* posted NOOPs complete */
- spin_unlock(&conn->ibc_lock);
+ cfs_spin_unlock(&conn->ibc_lock);
kiblnd_tx_done(peer->ibp_ni, tx);
- spin_lock(&conn->ibc_lock);
+ cfs_spin_lock(&conn->ibc_lock);
CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n",
libcfs_nid2str(peer->ibp_nid),
conn->ibc_noops_posted);
* tx_sending is non-zero if we've not done the tx_complete()
* from the first send; hence the ++ rather than = below. */
tx->tx_sending++;
- list_add(&tx->tx_list, &conn->ibc_active_txs);
+ cfs_list_add(&tx->tx_list, &conn->ibc_active_txs);
/* I'm still holding ibc_lock! */
if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
done = (tx->tx_sending == 0);
if (done)
- list_del(&tx->tx_list);
+ cfs_list_del(&tx->tx_list);
- spin_unlock(&conn->ibc_lock);
+ cfs_spin_unlock(&conn->ibc_lock);
if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
CERROR("Error %d posting transmit to %s\n",
return;
}
- spin_lock(&conn->ibc_lock);
+ cfs_spin_lock(&conn->ibc_lock);
LASSERT (conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver));
LASSERT (!IBLND_OOB_CAPABLE(ver) ||
LASSERT (conn->ibc_reserved_credits >= 0);
while (conn->ibc_reserved_credits > 0 &&
- !list_empty(&conn->ibc_tx_queue_rsrvd)) {
- tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
- kib_tx_t, tx_list);
- list_del(&tx->tx_list);
- list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
+ !cfs_list_empty(&conn->ibc_tx_queue_rsrvd)) {
+ tx = cfs_list_entry(conn->ibc_tx_queue_rsrvd.next,
+ kib_tx_t, tx_list);
+ cfs_list_del(&tx->tx_list);
+ cfs_list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
conn->ibc_reserved_credits--;
}
if (kiblnd_send_noop(conn)) {
- spin_unlock(&conn->ibc_lock);
+ cfs_spin_unlock(&conn->ibc_lock);
tx = kiblnd_get_idle_tx(ni);
if (tx != NULL)
kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
- spin_lock(&conn->ibc_lock);
+ cfs_spin_lock(&conn->ibc_lock);
if (tx != NULL)
kiblnd_queue_tx_locked(tx, conn);
}
for (;;) {
int credit;
- if (!list_empty(&conn->ibc_tx_queue_nocred)) {
+ if (!cfs_list_empty(&conn->ibc_tx_queue_nocred)) {
credit = 0;
- tx = list_entry(conn->ibc_tx_queue_nocred.next,
- kib_tx_t, tx_list);
- } else if (!list_empty(&conn->ibc_tx_queue)) {
+ tx = cfs_list_entry(conn->ibc_tx_queue_nocred.next,
+ kib_tx_t, tx_list);
+ } else if (!cfs_list_empty(&conn->ibc_tx_queue)) {
credit = 1;
- tx = list_entry(conn->ibc_tx_queue.next,
- kib_tx_t, tx_list);
+ tx = cfs_list_entry(conn->ibc_tx_queue.next,
+ kib_tx_t, tx_list);
} else
break;
break;
}
- spin_unlock(&conn->ibc_lock);
+ cfs_spin_unlock(&conn->ibc_lock);
}
void
kiblnd_peer_alive(conn->ibc_peer);
}
- spin_lock(&conn->ibc_lock);
+ cfs_spin_lock(&conn->ibc_lock);
/* I could be racing with rdma completion. Whoever makes 'tx' idle
* gets to free it, which also drops its ref on 'conn'. */
!tx->tx_waiting && /* Not waiting for peer */
!tx->tx_queued; /* Not re-queued (PUT_DONE) */
if (idle)
- list_del(&tx->tx_list);
+ cfs_list_del(&tx->tx_list);
kiblnd_conn_addref(conn); /* 1 ref for me.... */
- spin_unlock(&conn->ibc_lock);
+ cfs_spin_unlock(&conn->ibc_lock);
if (idle)
kiblnd_tx_done(conn->ibc_peer->ibp_ni, tx);
int dstidx;
int wrknob;
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
LASSERT (tx->tx_nwrq == 0);
LASSERT (type == IBLND_MSG_GET_DONE ||
type == IBLND_MSG_PUT_DONE);
void
kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn)
{
- struct list_head *q;
+ cfs_list_t *q;
LASSERT (tx->tx_nwrq > 0); /* work items set up */
LASSERT (!tx->tx_queued); /* not queued for sending already */
LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
tx->tx_queued = 1;
- tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ);
+ tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * CFS_HZ);
if (tx->tx_conn == NULL) {
kiblnd_conn_addref(conn);
break;
}
- list_add_tail(&tx->tx_list, q);
+ cfs_list_add_tail(&tx->tx_list, q);
}
void
kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn)
{
- spin_lock(&conn->ibc_lock);
+ cfs_spin_lock(&conn->ibc_lock);
kiblnd_queue_tx_locked(tx, conn);
- spin_unlock(&conn->ibc_lock);
+ cfs_spin_unlock(&conn->ibc_lock);
kiblnd_check_sends(conn);
}
kib_peer_t *peer;
kib_peer_t *peer2;
kib_conn_t *conn;
- rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+ cfs_rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
unsigned long flags;
int rc;
/* First time, just use a read lock since I expect to find my peer
* connected */
- read_lock_irqsave(g_lock, flags);
+ cfs_read_lock_irqsave(g_lock, flags);
peer = kiblnd_find_peer_locked(nid);
- if (peer != NULL && !list_empty(&peer->ibp_conns)) {
+ if (peer != NULL && !cfs_list_empty(&peer->ibp_conns)) {
/* Found a peer with an established connection */
conn = kiblnd_get_conn_locked(peer);
kiblnd_conn_addref(conn); /* 1 ref for me... */
- read_unlock_irqrestore(g_lock, flags);
+ cfs_read_unlock_irqrestore(g_lock, flags);
if (tx != NULL)
kiblnd_queue_tx(tx, conn);
return;
}
- read_unlock(g_lock);
+ cfs_read_unlock(g_lock);
/* Re-try with a write lock */
- write_lock(g_lock);
+ cfs_write_lock(g_lock);
peer = kiblnd_find_peer_locked(nid);
if (peer != NULL) {
- if (list_empty(&peer->ibp_conns)) {
+ if (cfs_list_empty(&peer->ibp_conns)) {
/* found a peer, but it's still connecting... */
LASSERT (peer->ibp_connecting != 0 ||
peer->ibp_accepting != 0);
if (tx != NULL)
- list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
- write_unlock_irqrestore(g_lock, flags);
+ cfs_list_add_tail(&tx->tx_list,
+ &peer->ibp_tx_queue);
+ cfs_write_unlock_irqrestore(g_lock, flags);
} else {
conn = kiblnd_get_conn_locked(peer);
kiblnd_conn_addref(conn); /* 1 ref for me... */
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
if (tx != NULL)
kiblnd_queue_tx(tx, conn);
return;
}
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
/* Allocate a peer ready to add to the peer table and retry */
rc = kiblnd_create_peer(ni, &peer, nid);
return;
}
- write_lock_irqsave(g_lock, flags);
+ cfs_write_lock_irqsave(g_lock, flags);
peer2 = kiblnd_find_peer_locked(nid);
if (peer2 != NULL) {
- if (list_empty(&peer2->ibp_conns)) {
+ if (cfs_list_empty(&peer2->ibp_conns)) {
/* found a peer, but it's still connecting... */
LASSERT (peer2->ibp_connecting != 0 ||
peer2->ibp_accepting != 0);
if (tx != NULL)
- list_add_tail(&tx->tx_list, &peer2->ibp_tx_queue);
- write_unlock_irqrestore(g_lock, flags);
+ cfs_list_add_tail(&tx->tx_list,
+ &peer2->ibp_tx_queue);
+ cfs_write_unlock_irqrestore(g_lock, flags);
} else {
conn = kiblnd_get_conn_locked(peer2);
kiblnd_conn_addref(conn); /* 1 ref for me... */
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
if (tx != NULL)
kiblnd_queue_tx(tx, conn);
LASSERT (((kib_net_t *)ni->ni_data)->ibn_shutdown == 0);
if (tx != NULL)
- list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
+ cfs_list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
kiblnd_peer_addref(peer);
- list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
+ cfs_list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
kiblnd_connect_peer(peer);
kiblnd_peer_decref(peer);
LASSERT (payload_niov <= LNET_MAX_IOV);
/* Thread context */
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
/* payload is either all vaddrs or all pages */
LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
int rc = 0;
LASSERT (mlen <= rlen);
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
/* Either all pages or all vaddrs */
LASSERT (!(kiov != NULL && iov != NULL));
int
kiblnd_thread_start (int (*fn)(void *arg), void *arg)
{
- long pid = kernel_thread (fn, arg, 0);
+ long pid = cfs_kernel_thread (fn, arg, 0);
if (pid < 0)
return ((int)pid);
- atomic_inc (&kiblnd_data.kib_nthreads);
+ cfs_atomic_inc (&kiblnd_data.kib_nthreads);
return (0);
}
void
kiblnd_thread_fini (void)
{
- atomic_dec (&kiblnd_data.kib_nthreads);
+ cfs_atomic_dec (&kiblnd_data.kib_nthreads);
}
void
{
/* This is racy, but everyone's only writing cfs_time_current() */
peer->ibp_last_alive = cfs_time_current();
- mb();
+ cfs_mb();
}
void
cfs_time_t last_alive = 0;
unsigned long flags;
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (list_empty(&peer->ibp_conns) &&
+ if (cfs_list_empty(&peer->ibp_conns) &&
peer->ibp_accepting == 0 &&
peer->ibp_connecting == 0 &&
peer->ibp_error != 0) {
last_alive = peer->ibp_last_alive;
}
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
if (error != 0)
lnet_notify(peer->ibp_ni,
return; /* already being handled */
if (error == 0 &&
- list_empty(&conn->ibc_tx_queue) &&
- list_empty(&conn->ibc_tx_queue_rsrvd) &&
- list_empty(&conn->ibc_tx_queue_nocred) &&
- list_empty(&conn->ibc_active_txs)) {
+ cfs_list_empty(&conn->ibc_tx_queue) &&
+ cfs_list_empty(&conn->ibc_tx_queue_rsrvd) &&
+ cfs_list_empty(&conn->ibc_tx_queue_nocred) &&
+ cfs_list_empty(&conn->ibc_active_txs)) {
CDEBUG(D_NET, "closing conn to %s\n",
libcfs_nid2str(peer->ibp_nid));
} else {
CDEBUG(D_NETERROR, "Closing conn to %s: error %d%s%s%s%s\n",
libcfs_nid2str(peer->ibp_nid), error,
- list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
- list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
- list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
- list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
+ cfs_list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
+ cfs_list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
+ cfs_list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
+ cfs_list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
}
- list_del(&conn->ibc_list);
+ cfs_list_del(&conn->ibc_list);
/* connd (see below) takes over ibc_list's ref */
- if (list_empty (&peer->ibp_conns) && /* no more conns */
+ if (cfs_list_empty (&peer->ibp_conns) && /* no more conns */
kiblnd_peer_active(peer)) { /* still in peer table */
kiblnd_unlink_peer_locked(peer);
kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
- list_add_tail (&conn->ibc_list, &kiblnd_data.kib_connd_conns);
- wake_up (&kiblnd_data.kib_connd_waitq);
+ cfs_list_add_tail (&conn->ibc_list, &kiblnd_data.kib_connd_conns);
+ cfs_waitq_signal (&kiblnd_data.kib_connd_waitq);
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+ cfs_spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
}
void
{
unsigned long flags;
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
kiblnd_close_conn_locked(conn, error);
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
}
void
unsigned long flags;
kib_rx_t *rx;
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- while (!list_empty(&conn->ibc_early_rxs)) {
- rx = list_entry(conn->ibc_early_rxs.next,
+ cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ while (!cfs_list_empty(&conn->ibc_early_rxs)) {
+ rx = cfs_list_entry(conn->ibc_early_rxs.next,
kib_rx_t, rx_list);
- list_del(&rx->rx_list);
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ cfs_list_del(&rx->rx_list);
+ cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+ flags);
kiblnd_handle_rx(rx);
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
}
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
}
void
-kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
+kiblnd_abort_txs(kib_conn_t *conn, cfs_list_t *txs)
{
- LIST_HEAD (zombies);
- struct list_head *tmp;
- struct list_head *nxt;
+ CFS_LIST_HEAD (zombies);
+ cfs_list_t *tmp;
+ cfs_list_t *nxt;
kib_tx_t *tx;
- spin_lock(&conn->ibc_lock);
+ cfs_spin_lock(&conn->ibc_lock);
- list_for_each_safe (tmp, nxt, txs) {
- tx = list_entry (tmp, kib_tx_t, tx_list);
+ cfs_list_for_each_safe (tmp, nxt, txs) {
+ tx = cfs_list_entry (tmp, kib_tx_t, tx_list);
if (txs == &conn->ibc_active_txs) {
LASSERT (!tx->tx_queued);
if (tx->tx_sending == 0) {
tx->tx_queued = 0;
- list_del (&tx->tx_list);
- list_add (&tx->tx_list, &zombies);
+ cfs_list_del (&tx->tx_list);
+ cfs_list_add (&tx->tx_list, &zombies);
}
}
- spin_unlock(&conn->ibc_lock);
+ cfs_spin_unlock(&conn->ibc_lock);
kiblnd_txlist_done(conn->ibc_peer->ibp_ni,
&zombies, -ECONNABORTED);
void
kiblnd_finalise_conn (kib_conn_t *conn)
{
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
LASSERT (conn->ibc_state > IBLND_CONN_INIT);
kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
void
kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error)
{
- LIST_HEAD (zombies);
+ CFS_LIST_HEAD (zombies);
unsigned long flags;
LASSERT (error != 0);
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
if (active) {
LASSERT (peer->ibp_connecting > 0);
if (peer->ibp_connecting != 0 ||
peer->ibp_accepting != 0) {
/* another connection attempt under way... */
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+ flags);
return;
}
- if (list_empty(&peer->ibp_conns)) {
+ if (cfs_list_empty(&peer->ibp_conns)) {
/* Take peer's blocked transmits to complete with error */
- list_add(&zombies, &peer->ibp_tx_queue);
- list_del_init(&peer->ibp_tx_queue);
+ cfs_list_add(&zombies, &peer->ibp_tx_queue);
+ cfs_list_del_init(&peer->ibp_tx_queue);
if (kiblnd_peer_active(peer))
kiblnd_unlink_peer_locked(peer);
peer->ibp_error = error;
} else {
/* Can't have blocked transmits if there are connections */
- LASSERT (list_empty(&peer->ibp_tx_queue));
+ LASSERT (cfs_list_empty(&peer->ibp_tx_queue));
}
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
kiblnd_peer_notify(peer);
- if (list_empty (&zombies))
+ if (cfs_list_empty (&zombies))
return;
CDEBUG (D_NETERROR, "Deleting messages for %s: connection failed\n",
{
kib_peer_t *peer = conn->ibc_peer;
kib_tx_t *tx;
- struct list_head txs;
+ cfs_list_t txs;
unsigned long flags;
int active;
libcfs_nid2str(peer->ibp_nid), active,
conn->ibc_version, status);
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
peer->ibp_connecting > 0) ||
(conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
}
/* connection established */
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
conn->ibc_last_send = jiffies;
kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
/* Add conn to peer's list and nuke any dangling conns from a different
* peer instance... */
kiblnd_conn_addref(conn); /* +1 ref for ibc_list */
- list_add(&conn->ibc_list, &peer->ibp_conns);
+ cfs_list_add(&conn->ibc_list, &peer->ibp_conns);
if (active)
peer->ibp_connecting--;
else
}
/* grab pending txs while I have the lock */
- list_add(&txs, &peer->ibp_tx_queue);
- list_del_init(&peer->ibp_tx_queue);
+ cfs_list_add(&txs, &peer->ibp_tx_queue);
+ cfs_list_del_init(&peer->ibp_tx_queue);
if (!kiblnd_peer_active(peer) || /* peer has been deleted */
conn->ibc_comms_error != 0) { /* error has happened already */
/* start to shut down connection */
kiblnd_close_conn_locked(conn, -ECONNABORTED);
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+ flags);
kiblnd_txlist_done(ni, &txs, -ECONNABORTED);
return;
}
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
/* Schedule blocked txs */
- spin_lock (&conn->ibc_lock);
- while (!list_empty (&txs)) {
- tx = list_entry (txs.next, kib_tx_t, tx_list);
- list_del(&tx->tx_list);
+ cfs_spin_lock (&conn->ibc_lock);
+ while (!cfs_list_empty (&txs)) {
+ tx = cfs_list_entry (txs.next, kib_tx_t, tx_list);
+ cfs_list_del(&tx->tx_list);
kiblnd_queue_tx_locked(tx, conn);
}
- spin_unlock (&conn->ibc_lock);
+ cfs_spin_unlock (&conn->ibc_lock);
kiblnd_check_sends(conn);
int
kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
{
- rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+ cfs_rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
kib_msg_t *reqmsg = priv;
kib_msg_t *ackmsg;
kib_dev_t *ibdev;
unsigned long flags;
int rc;
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
/* cmid inherits 'context' from the corresponding listener id */
ibdev = (kib_dev_t *)cmid->context;
goto failed;
}
- write_lock_irqsave(g_lock, flags);
+ cfs_write_lock_irqsave(g_lock, flags);
peer2 = kiblnd_find_peer_locked(nid);
if (peer2 != NULL) {
if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
peer2->ibp_version != version) {
kiblnd_close_peer_conns_locked(peer2, -ESTALE);
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
CWARN("Conn stale %s [old ver: %x, new ver: %x]\n",
libcfs_nid2str(nid), peer2->ibp_version, version);
/* tie-break connection race in favour of the higher NID */
if (peer2->ibp_connecting != 0 &&
nid < ni->ni_nid) {
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
CWARN("Conn race %s\n", libcfs_nid2str(peer2->ibp_nid));
peer2->ibp_accepting++;
kiblnd_peer_addref(peer2);
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
kiblnd_peer_decref(peer);
peer = peer2;
} else {
LASSERT (net->ibn_shutdown == 0);
kiblnd_peer_addref(peer);
- list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
+ cfs_list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
}
conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version);
LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
LASSERT (peer->ibp_connecting > 0); /* 'conn' at least */
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
/* retry connection if it's still needed and no other connection
* attempts (active or passive) are in progress */
- if (!list_empty(&peer->ibp_tx_queue) &&
+ if (!cfs_list_empty(&peer->ibp_tx_queue) &&
peer->ibp_connecting == 1 &&
peer->ibp_accepting == 0) {
retry = 1;
peer->ibp_incarnation = incarnation;
}
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
if (!retry)
return;
{
kib_peer_t *peer = conn->ibc_peer;
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
switch (reason) {
goto failed;
}
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
if (msg->ibm_dstnid == ni->ni_nid &&
msg->ibm_dststamp == net->ibn_incarnation)
rc = 0;
else
rc = -ESTALE;
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
if (rc != 0) {
CERROR("Bad connection reply from %s, rc = %d, "
unsigned long flags;
int rc;
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
incarnation = peer->ibp_incarnation;
version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION : peer->ibp_version;
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version);
if (conn == NULL) {
}
int
-kiblnd_check_txs (kib_conn_t *conn, struct list_head *txs)
+kiblnd_check_txs (kib_conn_t *conn, cfs_list_t *txs)
{
kib_tx_t *tx;
- struct list_head *ttmp;
+ cfs_list_t *ttmp;
int timed_out = 0;
- spin_lock(&conn->ibc_lock);
+ cfs_spin_lock(&conn->ibc_lock);
- list_for_each (ttmp, txs) {
- tx = list_entry (ttmp, kib_tx_t, tx_list);
+ cfs_list_for_each (ttmp, txs) {
+ tx = cfs_list_entry (ttmp, kib_tx_t, tx_list);
if (txs != &conn->ibc_active_txs) {
LASSERT (tx->tx_queued);
LASSERT (tx->tx_waiting || tx->tx_sending != 0);
}
- if (time_after_eq (jiffies, tx->tx_deadline)) {
+ if (cfs_time_aftereq (jiffies, tx->tx_deadline)) {
timed_out = 1;
CERROR("Timed out tx: %s, %lu seconds\n",
kiblnd_queue2str(conn, txs),
}
}
- spin_unlock(&conn->ibc_lock);
+ cfs_spin_unlock(&conn->ibc_lock);
return timed_out;
}
void
kiblnd_check_conns (int idx)
{
- struct list_head *peers = &kiblnd_data.kib_peers[idx];
- struct list_head *ptmp;
+ cfs_list_t *peers = &kiblnd_data.kib_peers[idx];
+ cfs_list_t *ptmp;
kib_peer_t *peer;
kib_conn_t *conn;
- struct list_head *ctmp;
+ cfs_list_t *ctmp;
unsigned long flags;
again:
/* NB. We expect to have a look at all the peers and not find any
* rdmas to time out, so we just use a shared lock while we
* take a look... */
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- list_for_each (ptmp, peers) {
- peer = list_entry (ptmp, kib_peer_t, ibp_list);
+ cfs_list_for_each (ptmp, peers) {
+ peer = cfs_list_entry (ptmp, kib_peer_t, ibp_list);
- list_for_each (ctmp, &peer->ibp_conns) {
- conn = list_entry (ctmp, kib_conn_t, ibc_list);
+ cfs_list_for_each (ctmp, &peer->ibp_conns) {
+ conn = cfs_list_entry (ctmp, kib_conn_t, ibc_list);
LASSERT (conn->ibc_state == IBLND_CONN_ESTABLISHED);
kiblnd_conn_addref(conn); /* 1 ref for me... */
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
- flags);
+ cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+ flags);
CERROR("Timed out RDMA with %s (%lu)\n",
libcfs_nid2str(peer->ibp_nid),
}
}
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
}
void
kiblnd_disconnect_conn (kib_conn_t *conn)
{
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
LASSERT (current == kiblnd_data.kib_connd);
LASSERT (conn->ibc_state == IBLND_CONN_CLOSING);
int
kiblnd_connd (void *arg)
{
- wait_queue_t wait;
+ cfs_waitlink_t wait;
unsigned long flags;
kib_conn_t *conn;
int timeout;
cfs_daemonize ("kiblnd_connd");
cfs_block_allsigs ();
- init_waitqueue_entry (&wait, current);
+ cfs_waitlink_init (&wait);
kiblnd_data.kib_connd = current;
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
while (!kiblnd_data.kib_shutdown) {
dropped_lock = 0;
- if (!list_empty (&kiblnd_data.kib_connd_zombies)) {
- conn = list_entry (kiblnd_data.kib_connd_zombies.next,
- kib_conn_t, ibc_list);
- list_del(&conn->ibc_list);
+ if (!cfs_list_empty (&kiblnd_data.kib_connd_zombies)) {
+ conn = cfs_list_entry(kiblnd_data. \
+ kib_connd_zombies.next,
+ kib_conn_t, ibc_list);
+ cfs_list_del(&conn->ibc_list);
- spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
+ cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock,
+ flags);
dropped_lock = 1;
kiblnd_destroy_conn(conn);
- spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags);
+ cfs_spin_lock_irqsave (&kiblnd_data.kib_connd_lock,
+ flags);
}
- if (!list_empty (&kiblnd_data.kib_connd_conns)) {
- conn = list_entry (kiblnd_data.kib_connd_conns.next,
- kib_conn_t, ibc_list);
- list_del(&conn->ibc_list);
+ if (!cfs_list_empty (&kiblnd_data.kib_connd_conns)) {
+ conn = cfs_list_entry (kiblnd_data.kib_connd_conns.next,
+ kib_conn_t, ibc_list);
+ cfs_list_del(&conn->ibc_list);
- spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
+ cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock,
+ flags);
dropped_lock = 1;
kiblnd_disconnect_conn(conn);
kiblnd_conn_decref(conn);
- spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags);
+ cfs_spin_lock_irqsave (&kiblnd_data.kib_connd_lock,
+ flags);
}
/* careful with the jiffy wrap... */
const int p = 1;
int chunk = kiblnd_data.kib_peer_hash_size;
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+ cfs_spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
dropped_lock = 1;
/* Time to check for RDMA timeouts on a few more
kiblnd_data.kib_peer_hash_size;
}
- deadline += p * HZ;
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ deadline += p * CFS_HZ;
+ cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock,
+ flags);
}
if (dropped_lock)
continue;
/* Nothing to do for 'timeout' */
- set_current_state (TASK_INTERRUPTIBLE);
- add_wait_queue (&kiblnd_data.kib_connd_waitq, &wait);
- spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
+ cfs_set_current_state (CFS_TASK_INTERRUPTIBLE);
+ cfs_waitq_add (&kiblnd_data.kib_connd_waitq, &wait);
+ cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
- schedule_timeout (timeout);
+ cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE, timeout);
- set_current_state (TASK_RUNNING);
- remove_wait_queue (&kiblnd_data.kib_connd_waitq, &wait);
- spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags);
+ cfs_set_current_state (CFS_TASK_RUNNING);
+ cfs_waitq_del (&kiblnd_data.kib_connd_waitq, &wait);
+ cfs_spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags);
}
- spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
+ cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
kiblnd_thread_fini();
return (0);
* and this CQ is about to be destroyed so I NOOP. */
kib_conn_t *conn = (kib_conn_t *)arg;
unsigned long flags;
-
+
LASSERT (cq == conn->ibc_cq);
- spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
+ cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
conn->ibc_ready = 1;
conn->ibc_nsends_posted > 0)) {
kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
conn->ibc_scheduled = 1;
- list_add_tail(&conn->ibc_sched_list,
- &kiblnd_data.kib_sched_conns);
- wake_up(&kiblnd_data.kib_sched_waitq);
+ cfs_list_add_tail(&conn->ibc_sched_list,
+ &kiblnd_data.kib_sched_conns);
+ cfs_waitq_signal(&kiblnd_data.kib_sched_waitq);
}
- spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
+ cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
}
void
kiblnd_scheduler(void *arg)
{
long id = (long)arg;
- wait_queue_t wait;
+ cfs_waitlink_t wait;
char name[16];
unsigned long flags;
kib_conn_t *conn;
cfs_daemonize(name);
cfs_block_allsigs();
- init_waitqueue_entry(&wait, current);
+ cfs_waitlink_init(&wait);
- spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
+ cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
while (!kiblnd_data.kib_shutdown) {
if (busy_loops++ >= IBLND_RESCHED) {
- spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
- flags);
+ cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
+ flags);
- our_cond_resched();
+ cfs_cond_resched();
busy_loops = 0;
- spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
+ cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock,
+ flags);
}
did_something = 0;
- if (!list_empty(&kiblnd_data.kib_sched_conns)) {
- conn = list_entry(kiblnd_data.kib_sched_conns.next,
- kib_conn_t, ibc_sched_list);
+ if (!cfs_list_empty(&kiblnd_data.kib_sched_conns)) {
+ conn = cfs_list_entry(kiblnd_data.kib_sched_conns.next,
+ kib_conn_t, ibc_sched_list);
/* take over kib_sched_conns' ref on conn... */
LASSERT(conn->ibc_scheduled);
- list_del(&conn->ibc_sched_list);
+ cfs_list_del(&conn->ibc_sched_list);
conn->ibc_ready = 0;
- spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
- flags);
+ cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
+ flags);
rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
if (rc == 0) {
libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
kiblnd_close_conn(conn, -EIO);
kiblnd_conn_decref(conn);
- spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
+ cfs_spin_lock_irqsave(&kiblnd_data. \
+ kib_sched_lock,
+ flags);
continue;
}
if (rc < 0) {
CWARN("%s: ib_poll_cq failed: %d, "
"closing connection\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ rc);
kiblnd_close_conn(conn, -EIO);
kiblnd_conn_decref(conn);
- spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
+ cfs_spin_lock_irqsave(&kiblnd_data. \
+ kib_sched_lock, flags);
continue;
}
- spin_lock_irqsave(&kiblnd_data.kib_sched_lock,
- flags);
+ cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock,
+ flags);
if (rc != 0 || conn->ibc_ready) {
/* There may be another completion waiting; get
* another scheduler to check while I handle
* this one... */
kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
- list_add_tail(&conn->ibc_sched_list,
- &kiblnd_data.kib_sched_conns);
- wake_up(&kiblnd_data.kib_sched_waitq);
+ cfs_list_add_tail(&conn->ibc_sched_list,
+ &kiblnd_data.kib_sched_conns);
+ cfs_waitq_signal(&kiblnd_data.kib_sched_waitq);
} else {
conn->ibc_scheduled = 0;
}
-
+
if (rc != 0) {
- spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
- flags);
+ cfs_spin_unlock_irqrestore(&kiblnd_data. \
+ kib_sched_lock,
+ flags);
kiblnd_complete(&wc);
- spin_lock_irqsave(&kiblnd_data.kib_sched_lock,
- flags);
+ cfs_spin_lock_irqsave(&kiblnd_data. \
+ kib_sched_lock,
+ flags);
}
kiblnd_conn_decref(conn); /* ...drop my ref from above */
if (did_something)
continue;
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue_exclusive(&kiblnd_data.kib_sched_waitq, &wait);
- spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
+ cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+ cfs_waitq_add_exclusive(&kiblnd_data.kib_sched_waitq, &wait);
+ cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
- schedule();
+ cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
busy_loops = 0;
- remove_wait_queue(&kiblnd_data.kib_sched_waitq, &wait);
- set_current_state(TASK_RUNNING);
- spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
+ cfs_waitq_del(&kiblnd_data.kib_sched_waitq, &wait);
+ cfs_set_current_state(CFS_TASK_RUNNING);
+ cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
}
- spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
+ cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
kiblnd_thread_fini();
return (0);
unsigned long flags;
char *str;
- spin_lock_irqsave(&kptllnd_data.kptl_ptlid2str_lock, flags);
+ cfs_spin_lock_irqsave(&kptllnd_data.kptl_ptlid2str_lock, flags);
str = strs[idx++];
if (idx >= sizeof(strs)/sizeof(strs[0]))
idx = 0;
- spin_unlock_irqrestore(&kptllnd_data.kptl_ptlid2str_lock, flags);
+ cfs_spin_unlock_irqrestore(&kptllnd_data.kptl_ptlid2str_lock, flags);
snprintf(str, sizeof(strs[0]), FMT_PTLID, id.pid, id.nid);
return str;
if (kptllnd_find_target(net, id, &peer) != 0)
return;
- spin_lock_irqsave(&peer->peer_lock, flags);
+ cfs_spin_lock_irqsave(&peer->peer_lock, flags);
if (peer->peer_last_alive != 0)
*when = peer->peer_last_alive;
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
kptllnd_peer_decref(peer);
return;
}
unsigned long flags;
lnet_process_id_t process_id;
- read_lock(&kptllnd_data.kptl_net_rw_lock);
- LASSERT (list_empty(&kptllnd_data.kptl_nets));
- read_unlock(&kptllnd_data.kptl_net_rw_lock);
+ cfs_read_lock(&kptllnd_data.kptl_net_rw_lock);
+ LASSERT (cfs_list_empty(&kptllnd_data.kptl_nets));
+ cfs_read_unlock(&kptllnd_data.kptl_net_rw_lock);
switch (kptllnd_data.kptl_init) {
default:
case PTLLND_INIT_DATA:
/* stop receiving */
kptllnd_rx_buffer_pool_fini(&kptllnd_data.kptl_rx_buffer_pool);
- LASSERT (list_empty(&kptllnd_data.kptl_sched_rxq));
- LASSERT (list_empty(&kptllnd_data.kptl_sched_rxbq));
+ LASSERT (cfs_list_empty(&kptllnd_data.kptl_sched_rxq));
+ LASSERT (cfs_list_empty(&kptllnd_data.kptl_sched_rxbq));
/* lock to interleave cleanly with peer birth/death */
- write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
LASSERT (kptllnd_data.kptl_shutdown == 0);
kptllnd_data.kptl_shutdown = 1; /* phase 1 == destroy peers */
/* no new peers possible now */
- write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+ flags);
/* nuke all existing peers */
process_id.nid = LNET_NID_ANY;
process_id.pid = LNET_PID_ANY;
kptllnd_peer_del(process_id);
- read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
LASSERT (kptllnd_data.kptl_n_active_peers == 0);
"Waiting for %d peers to terminate\n",
kptllnd_data.kptl_npeers);
- read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
- flags);
+ cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+ flags);
cfs_pause(cfs_time_seconds(1));
- read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock,
- flags);
+ cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock,
+ flags);
}
- LASSERT (list_empty(&kptllnd_data.kptl_closing_peers));
- LASSERT (list_empty(&kptllnd_data.kptl_zombie_peers));
+ LASSERT (cfs_list_empty(&kptllnd_data.kptl_closing_peers));
+ LASSERT (cfs_list_empty(&kptllnd_data.kptl_zombie_peers));
LASSERT (kptllnd_data.kptl_peers != NULL);
for (i = 0; i < kptllnd_data.kptl_peer_hash_size; i++)
- LASSERT (list_empty (&kptllnd_data.kptl_peers[i]));
+ LASSERT (cfs_list_empty (&kptllnd_data.kptl_peers[i]));
- read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+ flags);
CDEBUG(D_NET, "All peers deleted\n");
/* Shutdown phase 2: kill the daemons... */
kptllnd_data.kptl_shutdown = 2;
- mb();
+ cfs_mb();
i = 2;
- while (atomic_read (&kptllnd_data.kptl_nthreads) != 0) {
+ while (cfs_atomic_read (&kptllnd_data.kptl_nthreads) != 0) {
/* Wake up all threads*/
- wake_up_all(&kptllnd_data.kptl_sched_waitq);
- wake_up_all(&kptllnd_data.kptl_watchdog_waitq);
+ cfs_wake_up_all(&kptllnd_data.kptl_sched_waitq);
+ cfs_wake_up_all(&kptllnd_data.kptl_watchdog_waitq);
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"Waiting for %d threads to terminate\n",
- atomic_read(&kptllnd_data.kptl_nthreads));
+ cfs_atomic_read(&kptllnd_data.kptl_nthreads));
cfs_pause(cfs_time_seconds(1));
}
CDEBUG(D_NET, "All Threads stopped\n");
- LASSERT(list_empty(&kptllnd_data.kptl_sched_txq));
+ LASSERT(cfs_list_empty(&kptllnd_data.kptl_sched_txq));
kptllnd_cleanup_tx_descs();
kptllnd_errtype2str(prc), prc);
}
- LASSERT (atomic_read(&kptllnd_data.kptl_ntx) == 0);
- LASSERT (list_empty(&kptllnd_data.kptl_idle_txs));
+ LASSERT (cfs_atomic_read(&kptllnd_data.kptl_ntx) == 0);
+ LASSERT (cfs_list_empty(&kptllnd_data.kptl_idle_txs));
if (kptllnd_data.kptl_rx_cache != NULL)
cfs_mem_cache_destroy(kptllnd_data.kptl_rx_cache);
if (kptllnd_data.kptl_peers != NULL)
LIBCFS_FREE(kptllnd_data.kptl_peers,
- sizeof (struct list_head) *
+ sizeof (cfs_list_t) *
kptllnd_data.kptl_peer_hash_size);
if (kptllnd_data.kptl_nak_msg != NULL)
kptllnd_data.kptl_eqh = PTL_INVALID_HANDLE;
kptllnd_data.kptl_nih = PTL_INVALID_HANDLE;
- rwlock_init(&kptllnd_data.kptl_net_rw_lock);
- INIT_LIST_HEAD(&kptllnd_data.kptl_nets);
+ cfs_rwlock_init(&kptllnd_data.kptl_net_rw_lock);
+ CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_nets);
/* Setup the sched locks/lists/waitq */
- spin_lock_init(&kptllnd_data.kptl_sched_lock);
- init_waitqueue_head(&kptllnd_data.kptl_sched_waitq);
- INIT_LIST_HEAD(&kptllnd_data.kptl_sched_txq);
- INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxq);
- INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxbq);
+ cfs_spin_lock_init(&kptllnd_data.kptl_sched_lock);
+ cfs_init_waitqueue_head(&kptllnd_data.kptl_sched_waitq);
+ CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_txq);
+ CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxq);
+ CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxbq);
/* Init kptl_ptlid2str_lock before any call to kptllnd_ptlid2str */
- spin_lock_init(&kptllnd_data.kptl_ptlid2str_lock);
+ cfs_spin_lock_init(&kptllnd_data.kptl_ptlid2str_lock);
/* Setup the tx locks/lists */
- spin_lock_init(&kptllnd_data.kptl_tx_lock);
- INIT_LIST_HEAD(&kptllnd_data.kptl_idle_txs);
- atomic_set(&kptllnd_data.kptl_ntx, 0);
+ cfs_spin_lock_init(&kptllnd_data.kptl_tx_lock);
+ CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_idle_txs);
+ cfs_atomic_set(&kptllnd_data.kptl_ntx, 0);
/* Uptick the module reference count */
PORTAL_MODULE_USE;
kptllnd_data.kptl_expected_peers =
*kptllnd_tunables.kptl_max_nodes *
*kptllnd_tunables.kptl_max_procs_per_node;
-
+
/*
* Initialize the Network interface instance
* We use the default because we don't have any
/* Initialized the incarnation - it must be for-all-time unique, even
* accounting for the fact that we increment it when we disconnect a
* peer that's using it */
- do_gettimeofday(&tv);
+ cfs_gettimeofday(&tv);
kptllnd_data.kptl_incarnation = (((__u64)tv.tv_sec) * 1000000) +
tv.tv_usec;
CDEBUG(D_NET, "Incarnation="LPX64"\n", kptllnd_data.kptl_incarnation);
kptllnd_data.kptl_nak_msg->ptlm_srcpid = the_lnet.ln_pid;
kptllnd_data.kptl_nak_msg->ptlm_srcstamp = kptllnd_data.kptl_incarnation;
- rwlock_init(&kptllnd_data.kptl_peer_rw_lock);
- init_waitqueue_head(&kptllnd_data.kptl_watchdog_waitq);
- atomic_set(&kptllnd_data.kptl_needs_ptltrace, 0);
- INIT_LIST_HEAD(&kptllnd_data.kptl_closing_peers);
- INIT_LIST_HEAD(&kptllnd_data.kptl_zombie_peers);
+ cfs_rwlock_init(&kptllnd_data.kptl_peer_rw_lock);
+ cfs_init_waitqueue_head(&kptllnd_data.kptl_watchdog_waitq);
+ cfs_atomic_set(&kptllnd_data.kptl_needs_ptltrace, 0);
+ CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_closing_peers);
+ CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_zombie_peers);
/* Allocate and setup the peer hash table */
kptllnd_data.kptl_peer_hash_size =
*kptllnd_tunables.kptl_peer_hash_table_size;
LIBCFS_ALLOC(kptllnd_data.kptl_peers,
- sizeof(struct list_head) *
+ sizeof(cfs_list_t) *
kptllnd_data.kptl_peer_hash_size);
if (kptllnd_data.kptl_peers == NULL) {
CERROR("Failed to allocate space for peer hash table size=%d\n",
goto failed;
}
for (i = 0; i < kptllnd_data.kptl_peer_hash_size; i++)
- INIT_LIST_HEAD(&kptllnd_data.kptl_peers[i]);
+ CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_peers[i]);
kptllnd_rx_buffer_pool_init(&kptllnd_data.kptl_rx_buffer_pool);
- kptllnd_data.kptl_rx_cache =
+ kptllnd_data.kptl_rx_cache =
cfs_mem_cache_create("ptllnd_rx",
sizeof(kptl_rx_t) +
*kptllnd_tunables.kptl_max_msg_size,
if (*kptllnd_tunables.kptl_checksum)
CWARN("Checksumming enabled\n");
-
+
CDEBUG(D_NET, "<<< kptllnd_base_startup SUCCESS\n");
return 0;
* multiple NIs */
kptllnd_data.kptl_nak_msg->ptlm_srcnid = ni->ni_nid;
- atomic_set(&net->net_refcount, 1);
- write_lock(&kptllnd_data.kptl_net_rw_lock);
- list_add_tail(&net->net_list, &kptllnd_data.kptl_nets);
- write_unlock(&kptllnd_data.kptl_net_rw_lock);
+ cfs_atomic_set(&net->net_refcount, 1);
+ cfs_write_lock(&kptllnd_data.kptl_net_rw_lock);
+ cfs_list_add_tail(&net->net_list, &kptllnd_data.kptl_nets);
+ cfs_write_unlock(&kptllnd_data.kptl_net_rw_lock);
return 0;
failed:
LASSERT (kptllnd_data.kptl_init == PTLLND_INIT_ALL);
CDEBUG(D_MALLOC, "before LND cleanup: kmem %d\n",
- atomic_read (&libcfs_kmemory));
+ cfs_atomic_read (&libcfs_kmemory));
if (net == NULL)
goto out;
LASSERT (ni == net->net_ni);
LASSERT (!net->net_shutdown);
- LASSERT (!list_empty(&net->net_list));
- LASSERT (atomic_read(&net->net_refcount) != 0);
+ LASSERT (!cfs_list_empty(&net->net_list));
+ LASSERT (cfs_atomic_read(&net->net_refcount) != 0);
ni->ni_data = NULL;
net->net_ni = NULL;
- write_lock(&kptllnd_data.kptl_net_rw_lock);
+ cfs_write_lock(&kptllnd_data.kptl_net_rw_lock);
kptllnd_net_decref(net);
- list_del_init(&net->net_list);
- write_unlock(&kptllnd_data.kptl_net_rw_lock);
+ cfs_list_del_init(&net->net_list);
+ cfs_write_unlock(&kptllnd_data.kptl_net_rw_lock);
/* Can't nuke peers here - they are shared among all NIs */
- write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
net->net_shutdown = 1; /* Order with peer creation */
- write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
- i = 2;
- while (atomic_read(&net->net_refcount) != 0) {
+ i = 2;
+ while (cfs_atomic_read(&net->net_refcount) != 0) {
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
"Waiting for %d references to drop\n",
- atomic_read(&net->net_refcount));
+ cfs_atomic_read(&net->net_refcount));
- cfs_pause(cfs_time_seconds(1));
+ cfs_pause(cfs_time_seconds(1));
}
LIBCFS_FREE(net, sizeof(*net));
out:
/* NB no locking since I don't race with writers */
- if (list_empty(&kptllnd_data.kptl_nets))
+ if (cfs_list_empty(&kptllnd_data.kptl_nets))
kptllnd_base_shutdown();
CDEBUG(D_MALLOC, "after LND cleanup: kmem %d\n",
- atomic_read (&libcfs_kmemory));
+ cfs_atomic_read (&libcfs_kmemory));
return;
}
//#define PJK_DEBUGGING
#ifdef CONFIG_SMP
-# define PTLLND_N_SCHED num_online_cpus() /* # schedulers */
+# define PTLLND_N_SCHED cfs_num_online_cpus() /* # schedulers */
#else
# define PTLLND_N_SCHED 1 /* # schedulers */
#endif
typedef struct kptl_rx /* receive message */
{
- struct list_head rx_list; /* queue for attention */
+ cfs_list_t rx_list; /* queue for attention */
kptl_rx_buffer_t *rx_rxb; /* the rx buffer pointer */
kptl_msg_t *rx_msg; /* received message */
int rx_nob; /* received message size */
typedef struct kptl_rx_buffer_pool
{
- spinlock_t rxbp_lock;
- struct list_head rxbp_list; /* all allocated buffers */
+ cfs_spinlock_t rxbp_lock;
+ cfs_list_t rxbp_list; /* all allocated buffers */
int rxbp_count; /* # allocated buffers */
int rxbp_reserved; /* # requests to buffer */
int rxbp_shutdown; /* shutdown flag */
struct kptl_rx_buffer
{
- kptl_rx_buffer_pool_t *rxb_pool;
- struct list_head rxb_list; /* for the rxb_pool list */
- struct list_head rxb_repost_list;/* for the kptl_sched_rxbq list */
- int rxb_posted:1; /* on the net */
- int rxb_idle:1; /* all done */
- kptl_eventarg_t rxb_eventarg; /* event->md.user_ptr */
- int rxb_refcount; /* reference count */
- ptl_handle_md_t rxb_mdh; /* the portals memory descriptor (MD) handle */
- char *rxb_buffer; /* the buffer */
+ kptl_rx_buffer_pool_t *rxb_pool;
+ cfs_list_t rxb_list; /* for the rxb_pool list */
+ cfs_list_t rxb_repost_list;/* for the kptl_sched_rxbq list */
+ int rxb_posted:1; /* on the net */
+ int rxb_idle:1; /* all done */
+ kptl_eventarg_t rxb_eventarg; /* event->md.user_ptr */
+ int rxb_refcount; /* reference count */
+ ptl_handle_md_t rxb_mdh; /* the portals memory descriptor (MD) handle */
+ char *rxb_buffer; /* the buffer */
};
typedef struct kptl_tx /* transmit message */
{
- struct list_head tx_list; /* queue on idle_txs etc */
- atomic_t tx_refcount; /* reference count*/
+ cfs_list_t tx_list; /* queue on idle_txs etc */
+ cfs_atomic_t tx_refcount; /* reference count*/
enum kptl_tx_type tx_type; /* small msg/{put,get}{req,resp} */
int tx_active:1; /* queued on the peer */
int tx_idle:1; /* on the free list */
struct kptl_peer
{
- struct list_head peer_list;
- atomic_t peer_refcount; /* The current references */
+ cfs_list_t peer_list;
+ cfs_atomic_t peer_refcount; /* The current references */
enum kptllnd_peer_state peer_state;
- spinlock_t peer_lock; /* serialize */
- struct list_head peer_noops; /* PTLLND_MSG_TYPE_NOOP txs */
- struct list_head peer_sendq; /* txs waiting for mh handles */
- struct list_head peer_activeq; /* txs awaiting completion */
+ cfs_spinlock_t peer_lock; /* serialize */
+ cfs_list_t peer_noops; /* PTLLND_MSG_TYPE_NOOP txs */
+ cfs_list_t peer_sendq; /* txs waiting for mh handles */
+ cfs_list_t peer_activeq; /* txs awaiting completion */
lnet_process_id_t peer_id; /* Peer's LNET id */
ptl_process_id_t peer_ptlid; /* Peer's portals id */
__u64 peer_incarnation; /* peer's incarnation */
{
int kptl_init; /* initialisation state */
volatile int kptl_shutdown; /* shut down? */
- atomic_t kptl_nthreads; /* # live threads */
+ cfs_atomic_t kptl_nthreads; /* # live threads */
ptl_handle_ni_t kptl_nih; /* network inteface handle */
ptl_process_id_t kptl_portals_id; /* Portals ID of interface */
__u64 kptl_incarnation; /* which one am I */
ptl_handle_eq_t kptl_eqh; /* Event Queue (EQ) */
- rwlock_t kptl_net_rw_lock; /* serialise... */
- struct list_head kptl_nets; /* kptl_net instances */
+ cfs_rwlock_t kptl_net_rw_lock; /* serialise... */
+ cfs_list_t kptl_nets; /* kptl_net instances */
- spinlock_t kptl_sched_lock; /* serialise... */
- wait_queue_head_t kptl_sched_waitq; /* schedulers sleep here */
- struct list_head kptl_sched_txq; /* tx requiring attention */
- struct list_head kptl_sched_rxq; /* rx requiring attention */
- struct list_head kptl_sched_rxbq; /* rxb requiring reposting */
+ cfs_spinlock_t kptl_sched_lock; /* serialise... */
+ cfs_waitq_t kptl_sched_waitq; /* schedulers sleep here */
+ cfs_list_t kptl_sched_txq; /* tx requiring attention */
+ cfs_list_t kptl_sched_rxq; /* rx requiring attention */
+ cfs_list_t kptl_sched_rxbq; /* rxb requiring reposting */
- wait_queue_head_t kptl_watchdog_waitq; /* watchdog sleeps here */
- atomic_t kptl_needs_ptltrace; /* watchdog thread to dump ptltrace */
+ cfs_waitq_t kptl_watchdog_waitq; /* watchdog sleeps here */
+ cfs_atomic_t kptl_needs_ptltrace; /* watchdog thread to dump ptltrace */
kptl_rx_buffer_pool_t kptl_rx_buffer_pool; /* rx buffer pool */
cfs_mem_cache_t* kptl_rx_cache; /* rx descripter cache */
- atomic_t kptl_ntx; /* # tx descs allocated */
- spinlock_t kptl_tx_lock; /* serialise idle tx list*/
- struct list_head kptl_idle_txs; /* idle tx descriptors */
+ cfs_atomic_t kptl_ntx; /* # tx descs allocated */
+ cfs_spinlock_t kptl_tx_lock; /* serialise idle tx list*/
+ cfs_list_t kptl_idle_txs; /* idle tx descriptors */
- rwlock_t kptl_peer_rw_lock; /* lock for peer table */
- struct list_head *kptl_peers; /* hash table of all my known peers */
- struct list_head kptl_closing_peers; /* peers being closed */
- struct list_head kptl_zombie_peers; /* peers waiting for refs to drain */
+ cfs_rwlock_t kptl_peer_rw_lock; /* lock for peer table */
+ cfs_list_t *kptl_peers; /* hash table of all my known peers */
+ cfs_list_t kptl_closing_peers; /* peers being closed */
+ cfs_list_t kptl_zombie_peers; /* peers waiting for refs to drain */
int kptl_peer_hash_size; /* size of kptl_peers */
int kptl_npeers; /* # peers extant */
int kptl_n_active_peers; /* # active peers */
int kptl_expected_peers; /* # peers I can buffer HELLOs from */
kptl_msg_t *kptl_nak_msg; /* common NAK message */
- spinlock_t kptl_ptlid2str_lock; /* serialise str ops */
+ cfs_spinlock_t kptl_ptlid2str_lock; /* serialise str ops */
};
struct kptl_net
{
- struct list_head net_list; /* chain on kptl_data:: kptl_nets */
+ cfs_list_t net_list; /* chain on kptl_data:: kptl_nets */
lnet_ni_t *net_ni;
- atomic_t net_refcount; /* # current references */
+ cfs_atomic_t net_refcount; /* # current references */
int net_shutdown; /* lnd_shutdown called */
};
{
#ifdef CRAY_XT3
if (*kptllnd_tunables.kptl_ptltrace_on_fail) {
- atomic_inc(&kptllnd_data.kptl_needs_ptltrace);
- wake_up(&kptllnd_data.kptl_watchdog_waitq);
+ cfs_atomic_inc(&kptllnd_data.kptl_needs_ptltrace);
+ cfs_waitq_signal(&kptllnd_data.kptl_watchdog_waitq);
}
#endif
}
default:
LBUG();
case PTLLND_EVENTARG_TYPE_BUF:
- return list_entry(eva, kptl_rx_buffer_t, rxb_eventarg);
+ return cfs_list_entry(eva, kptl_rx_buffer_t, rxb_eventarg);
case PTLLND_EVENTARG_TYPE_RDMA:
- return list_entry(eva, kptl_tx_t, tx_rdma_eventarg);
+ return cfs_list_entry(eva, kptl_tx_t, tx_rdma_eventarg);
case PTLLND_EVENTARG_TYPE_MSG:
- return list_entry(eva, kptl_tx_t, tx_msg_eventarg);
+ return cfs_list_entry(eva, kptl_tx_t, tx_msg_eventarg);
}
}
kptllnd_rx_buffer_addref(kptl_rx_buffer_t *rxb)
{
unsigned long flags;
-
- spin_lock_irqsave(&rxb->rxb_pool->rxbp_lock, flags);
+
+ cfs_spin_lock_irqsave(&rxb->rxb_pool->rxbp_lock, flags);
rxb->rxb_refcount++;
- spin_unlock_irqrestore(&rxb->rxb_pool->rxbp_lock, flags);
+ cfs_spin_unlock_irqrestore(&rxb->rxb_pool->rxbp_lock, flags);
}
static inline void
kptllnd_rx_buffer_decref_locked(kptl_rx_buffer_t *rxb)
{
if (--(rxb->rxb_refcount) == 0) {
- spin_lock(&kptllnd_data.kptl_sched_lock);
-
- list_add_tail(&rxb->rxb_repost_list,
- &kptllnd_data.kptl_sched_rxbq);
- wake_up(&kptllnd_data.kptl_sched_waitq);
+ cfs_spin_lock(&kptllnd_data.kptl_sched_lock);
+
+ cfs_list_add_tail(&rxb->rxb_repost_list,
+ &kptllnd_data.kptl_sched_rxbq);
+ cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
- spin_unlock(&kptllnd_data.kptl_sched_lock);
+ cfs_spin_unlock(&kptllnd_data.kptl_sched_lock);
}
}
{
unsigned long flags;
int count;
-
- spin_lock_irqsave(&rxb->rxb_pool->rxbp_lock, flags);
+
+ cfs_spin_lock_irqsave(&rxb->rxb_pool->rxbp_lock, flags);
count = --(rxb->rxb_refcount);
- spin_unlock_irqrestore(&rxb->rxb_pool->rxbp_lock, flags);
+ cfs_spin_unlock_irqrestore(&rxb->rxb_pool->rxbp_lock, flags);
if (count == 0)
kptllnd_rx_buffer_post(rxb);
static inline void
kptllnd_peer_addref (kptl_peer_t *peer)
{
- atomic_inc(&peer->peer_refcount);
+ cfs_atomic_inc(&peer->peer_refcount);
}
static inline void
kptllnd_peer_decref (kptl_peer_t *peer)
{
- if (atomic_dec_and_test(&peer->peer_refcount))
+ if (cfs_atomic_dec_and_test(&peer->peer_refcount))
kptllnd_peer_destroy(peer);
}
static inline void
kptllnd_net_addref (kptl_net_t *net)
{
- LASSERT (atomic_read(&net->net_refcount) > 0);
- atomic_inc(&net->net_refcount);
+ LASSERT (cfs_atomic_read(&net->net_refcount) > 0);
+ cfs_atomic_inc(&net->net_refcount);
}
static inline void
kptllnd_net_decref (kptl_net_t *net)
{
- LASSERT (atomic_read(&net->net_refcount) > 0);
- atomic_dec(&net->net_refcount);
+ LASSERT (cfs_atomic_read(&net->net_refcount) > 0);
+ cfs_atomic_dec(&net->net_refcount);
}
static inline void
-kptllnd_set_tx_peer(kptl_tx_t *tx, kptl_peer_t *peer)
+kptllnd_set_tx_peer(kptl_tx_t *tx, kptl_peer_t *peer)
{
LASSERT (tx->tx_peer == NULL);
-
+
kptllnd_peer_addref(peer);
tx->tx_peer = peer;
}
-static inline struct list_head *
+static inline cfs_list_t *
kptllnd_nid2peerlist(lnet_nid_t nid)
{
/* Only one copy of peer state for all logical peers, so the net part
kptl_peer_t *peer;
unsigned long flags;
- read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
peer = kptllnd_id2peer_locked(id);
- read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
return peer;
}
int kptllnd_setup_tx_descs(void);
void kptllnd_cleanup_tx_descs(void);
void kptllnd_tx_fini(kptl_tx_t *tx);
-void kptllnd_cancel_txlist(struct list_head *peerq, struct list_head *txs);
-void kptllnd_restart_txs(kptl_net_t *net, lnet_process_id_t id, struct list_head *restarts);
+void kptllnd_cancel_txlist(cfs_list_t *peerq, cfs_list_t *txs);
+void kptllnd_restart_txs(kptl_net_t *net, lnet_process_id_t id,
+ cfs_list_t *restarts);
kptl_tx_t *kptllnd_get_idle_tx(enum kptl_tx_type purpose);
void kptllnd_tx_callback(ptl_event_t *ev);
const char *kptllnd_tx_typestr(int type);
static inline void
kptllnd_tx_addref(kptl_tx_t *tx)
{
- atomic_inc(&tx->tx_refcount);
+ cfs_atomic_inc(&tx->tx_refcount);
}
-static inline void
+static inline void
kptllnd_tx_decref(kptl_tx_t *tx)
{
- LASSERT (!in_interrupt()); /* Thread context only */
+ LASSERT (!cfs_in_interrupt()); /* Thread context only */
- if (atomic_dec_and_test(&tx->tx_refcount))
+ if (cfs_atomic_dec_and_test(&tx->tx_refcount))
kptllnd_tx_fini(tx);
}
return -EIO;
}
- spin_lock_irqsave(&peer->peer_lock, flags);
+ cfs_spin_lock_irqsave(&peer->peer_lock, flags);
tx->tx_lnet_msg = lntmsg;
/* lnet_finalize() will be called when tx is torn down, so I must
* return success from here on... */
- tx->tx_deadline = jiffies + (*kptllnd_tunables.kptl_timeout * HZ);
+ tx->tx_deadline = jiffies + (*kptllnd_tunables.kptl_timeout * CFS_HZ);
tx->tx_rdma_mdh = mdh;
tx->tx_active = 1;
- list_add_tail(&tx->tx_list, &peer->peer_activeq);
+ cfs_list_add_tail(&tx->tx_list, &peer->peer_activeq);
/* peer has now got my ref on 'tx' */
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
tx->tx_tposted = jiffies;
LASSERT (payload_niov <= LNET_MAX_IOV);
LASSERT (payload_niov <= PTL_MD_MAX_IOV); /* !!! */
LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
rc = kptllnd_find_target(net, target, &peer);
if (rc != 0)
LASSERT (mlen <= rlen);
LASSERT (mlen >= 0);
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
LASSERT (!(kiov != NULL && iov != NULL)); /* never both */
LASSERT (niov <= PTL_MD_MAX_IOV); /* !!! */
switch (eva->eva_type) {
default:
LBUG();
-
+
case PTLLND_EVENTARG_TYPE_MSG:
case PTLLND_EVENTARG_TYPE_RDMA:
kptllnd_tx_callback(ev);
break;
-
+
case PTLLND_EVENTARG_TYPE_BUF:
kptllnd_rx_buffer_callback(ev);
break;
void
kptllnd_thread_fini (void)
{
- atomic_dec(&kptllnd_data.kptl_nthreads);
+ cfs_atomic_dec(&kptllnd_data.kptl_nthreads);
}
int
{
long pid;
- atomic_inc(&kptllnd_data.kptl_nthreads);
+ cfs_atomic_inc(&kptllnd_data.kptl_nthreads);
- pid = kernel_thread (fn, arg, 0);
+ pid = cfs_kernel_thread (fn, arg, 0);
if (pid >= 0)
return 0;
-
- CERROR("Failed to start kernel_thread: error %d\n", (int)pid);
+
+ CERROR("Failed to start cfs_kernel_thread: error %d\n", (int)pid);
kptllnd_thread_fini();
return (int)pid;
}
{
int id = (long)arg;
char name[16];
- wait_queue_t waitlink;
+ cfs_waitlink_t waitlink;
int stamp = 0;
int peer_index = 0;
unsigned long deadline = jiffies;
cfs_daemonize(name);
cfs_block_allsigs();
- init_waitqueue_entry(&waitlink, current);
+ cfs_waitlink_init(&waitlink);
/* threads shut down in phase 2 after all peers have been destroyed */
while (kptllnd_data.kptl_shutdown < 2) {
/* add a check for needs ptltrace
* yes, this is blatant hijacking of this thread
- * we can't dump directly from tx or rx _callbacks as it deadlocks portals
- * and takes out the node
+ * we can't dump directly from tx or rx _callbacks as it
+ * deadlocks portals and takes out the node
*/
- if (atomic_read(&kptllnd_data.kptl_needs_ptltrace)) {
+ if (cfs_atomic_read(&kptllnd_data.kptl_needs_ptltrace)) {
#ifdef CRAY_XT3
kptllnd_dump_ptltrace();
/* we only dump once, no matter how many pending */
- atomic_set(&kptllnd_data.kptl_needs_ptltrace, 0);
+ cfs_atomic_set(&kptllnd_data.kptl_needs_ptltrace, 0);
#else
LBUG();
#endif
kptllnd_data.kptl_peer_hash_size;
}
- deadline += p * HZ;
+ deadline += p * CFS_HZ;
stamp++;
continue;
}
kptllnd_handle_closing_peers();
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue_exclusive(&kptllnd_data.kptl_watchdog_waitq,
- &waitlink);
+ cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+ cfs_waitq_add_exclusive(&kptllnd_data.kptl_watchdog_waitq,
+ &waitlink);
- schedule_timeout(timeout);
-
- set_current_state (TASK_RUNNING);
- remove_wait_queue(&kptllnd_data.kptl_watchdog_waitq, &waitlink);
+ cfs_waitq_timedwait(&waitlink, CFS_TASK_INTERRUPTIBLE, timeout);
+
+ cfs_set_current_state (CFS_TASK_RUNNING);
+ cfs_waitq_del(&kptllnd_data.kptl_watchdog_waitq, &waitlink);
}
kptllnd_thread_fini();
{
int id = (long)arg;
char name[16];
- wait_queue_t waitlink;
+ cfs_waitlink_t waitlink;
unsigned long flags;
int did_something;
int counter = 0;
cfs_daemonize(name);
cfs_block_allsigs();
- init_waitqueue_entry(&waitlink, current);
+ cfs_waitlink_init(&waitlink);
- spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
+ cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
/* threads shut down in phase 2 after all peers have been destroyed */
while (kptllnd_data.kptl_shutdown < 2) {
did_something = 0;
- if (!list_empty(&kptllnd_data.kptl_sched_rxq)) {
- rx = list_entry (kptllnd_data.kptl_sched_rxq.next,
- kptl_rx_t, rx_list);
- list_del(&rx->rx_list);
-
- spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
- flags);
+ if (!cfs_list_empty(&kptllnd_data.kptl_sched_rxq)) {
+ rx = cfs_list_entry (kptllnd_data.kptl_sched_rxq.next,
+ kptl_rx_t, rx_list);
+ cfs_list_del(&rx->rx_list);
+
+ cfs_spin_unlock_irqrestore(&kptllnd_data. \
+ kptl_sched_lock,
+ flags);
kptllnd_rx_parse(rx);
did_something = 1;
- spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
+ cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
+ flags);
}
- if (!list_empty(&kptllnd_data.kptl_sched_rxbq)) {
- rxb = list_entry (kptllnd_data.kptl_sched_rxbq.next,
- kptl_rx_buffer_t, rxb_repost_list);
- list_del(&rxb->rxb_repost_list);
+ if (!cfs_list_empty(&kptllnd_data.kptl_sched_rxbq)) {
+ rxb = cfs_list_entry (kptllnd_data.kptl_sched_rxbq.next,
+ kptl_rx_buffer_t,
+ rxb_repost_list);
+ cfs_list_del(&rxb->rxb_repost_list);
- spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
- flags);
+ cfs_spin_unlock_irqrestore(&kptllnd_data. \
+ kptl_sched_lock,
+ flags);
kptllnd_rx_buffer_post(rxb);
did_something = 1;
- spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
+ cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
+ flags);
}
- if (!list_empty(&kptllnd_data.kptl_sched_txq)) {
- tx = list_entry (kptllnd_data.kptl_sched_txq.next,
- kptl_tx_t, tx_list);
- list_del_init(&tx->tx_list);
+ if (!cfs_list_empty(&kptllnd_data.kptl_sched_txq)) {
+ tx = cfs_list_entry (kptllnd_data.kptl_sched_txq.next,
+ kptl_tx_t, tx_list);
+ cfs_list_del_init(&tx->tx_list);
- spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
+ cfs_spin_unlock_irqrestore(&kptllnd_data. \
+ kptl_sched_lock, flags);
kptllnd_tx_fini(tx);
did_something = 1;
- spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
+ cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
+ flags);
}
if (did_something) {
continue;
}
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue_exclusive(&kptllnd_data.kptl_sched_waitq,
- &waitlink);
- spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
+ cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+ cfs_waitq_add_exclusive(&kptllnd_data.kptl_sched_waitq,
+ &waitlink);
+ cfs_spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
+ flags);
if (!did_something)
- schedule();
+ cfs_waitq_wait(&waitlink, CFS_TASK_INTERRUPTIBLE);
else
- cond_resched();
+ cfs_cond_resched();
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&kptllnd_data.kptl_sched_waitq, &waitlink);
+ cfs_set_current_state(CFS_TASK_RUNNING);
+ cfs_waitq_del(&kptllnd_data.kptl_sched_waitq, &waitlink);
- spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
+ cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
counter = 0;
}
- spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
+ cfs_spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
kptllnd_thread_fini();
return 0;
#include <libcfs/list.h>
static int
-kptllnd_count_queue(struct list_head *q)
+kptllnd_count_queue(cfs_list_t *q)
{
- struct list_head *e;
- int n = 0;
-
- list_for_each(e, q) {
+ cfs_list_t *e;
+ int n = 0;
+
+ cfs_list_for_each(e, q) {
n++;
}
}
int
-kptllnd_get_peer_info(int index,
+kptllnd_get_peer_info(int index,
lnet_process_id_t *id,
int *state, int *sent_hello,
int *refcount, __u64 *incarnation,
__u64 *next_matchbits, __u64 *last_matchbits_seen,
int *nsendq, int *nactiveq,
- int *credits, int *outstanding_credits)
+ int *credits, int *outstanding_credits)
{
- rwlock_t *g_lock = &kptllnd_data.kptl_peer_rw_lock;
+ cfs_rwlock_t *g_lock = &kptllnd_data.kptl_peer_rw_lock;
unsigned long flags;
- struct list_head *ptmp;
+ cfs_list_t *ptmp;
kptl_peer_t *peer;
int i;
int rc = -ENOENT;
- read_lock_irqsave(g_lock, flags);
+ cfs_read_lock_irqsave(g_lock, flags);
for (i = 0; i < kptllnd_data.kptl_peer_hash_size; i++) {
- list_for_each (ptmp, &kptllnd_data.kptl_peers[i]) {
- peer = list_entry(ptmp, kptl_peer_t, peer_list);
+ cfs_list_for_each (ptmp, &kptllnd_data.kptl_peers[i]) {
+ peer = cfs_list_entry(ptmp, kptl_peer_t, peer_list);
if (index-- > 0)
continue;
-
+
*id = peer->peer_id;
*state = peer->peer_state;
*sent_hello = peer->peer_sent_hello;
- *refcount = atomic_read(&peer->peer_refcount);
+ *refcount = cfs_atomic_read(&peer->peer_refcount);
*incarnation = peer->peer_incarnation;
- spin_lock(&peer->peer_lock);
+ cfs_spin_lock(&peer->peer_lock);
*next_matchbits = peer->peer_next_matchbits;
*last_matchbits_seen = peer->peer_last_matchbits_seen;
*nsendq = kptllnd_count_queue(&peer->peer_sendq);
*nactiveq = kptllnd_count_queue(&peer->peer_activeq);
- spin_unlock(&peer->peer_lock);
+ cfs_spin_unlock(&peer->peer_lock);
rc = 0;
goto out;
}
}
-
+
out:
- read_unlock_irqrestore(g_lock, flags);
+ cfs_read_unlock_irqrestore(g_lock, flags);
return rc;
}
LASSERT (peer->peer_state == PEER_STATE_WAITING_HELLO ||
peer->peer_state == PEER_STATE_ACTIVE);
-
+
kptllnd_data.kptl_n_active_peers++;
- atomic_inc(&peer->peer_refcount); /* +1 ref for the list */
+ cfs_atomic_inc(&peer->peer_refcount); /* +1 ref for the list */
/* NB add to HEAD of peer list for MRU order!
* (see kptllnd_cull_peertable) */
- list_add(&peer->peer_list, kptllnd_nid2peerlist(peer->peer_id.nid));
+ cfs_list_add(&peer->peer_list, kptllnd_nid2peerlist(peer->peer_id.nid));
}
void
/* I'm about to add a new peer with this portals ID to the peer table,
* so (a) this peer should not exist already and (b) I want to leave at
* most (max_procs_per_nid - 1) peers with this NID in the table. */
- struct list_head *peers = kptllnd_nid2peerlist(pid.nid);
- int cull_count = *kptllnd_tunables.kptl_max_procs_per_node;
- int count;
- struct list_head *tmp;
- struct list_head *nxt;
- kptl_peer_t *peer;
-
+ cfs_list_t *peers = kptllnd_nid2peerlist(pid.nid);
+ int cull_count = *kptllnd_tunables.kptl_max_procs_per_node;
+ int count;
+ cfs_list_t *tmp;
+ cfs_list_t *nxt;
+ kptl_peer_t *peer;
+
count = 0;
- list_for_each_safe (tmp, nxt, peers) {
+ cfs_list_for_each_safe (tmp, nxt, peers) {
/* NB I rely on kptllnd_peer_add_peertable_locked to add peers
* in MRU order */
- peer = list_entry(tmp, kptl_peer_t, peer_list);
+ peer = cfs_list_entry(tmp, kptl_peer_t, peer_list);
if (LNET_NIDADDR(peer->peer_id.nid) != LNET_NIDADDR(pid.nid))
continue;
memset(peer, 0, sizeof(*peer)); /* zero flags etc */
- INIT_LIST_HEAD (&peer->peer_noops);
- INIT_LIST_HEAD (&peer->peer_sendq);
- INIT_LIST_HEAD (&peer->peer_activeq);
- spin_lock_init (&peer->peer_lock);
+ CFS_INIT_LIST_HEAD (&peer->peer_noops);
+ CFS_INIT_LIST_HEAD (&peer->peer_sendq);
+ CFS_INIT_LIST_HEAD (&peer->peer_activeq);
+ cfs_spin_lock_init (&peer->peer_lock);
peer->peer_state = PEER_STATE_ALLOCATED;
peer->peer_error = 0;
peer->peer_sent_credits = 1; /* HELLO credit is implicit */
peer->peer_max_msg_size = PTLLND_MIN_BUFFER_SIZE; /* until we know better */
- atomic_set(&peer->peer_refcount, 1); /* 1 ref for caller */
+ cfs_atomic_set(&peer->peer_refcount, 1); /* 1 ref for caller */
- write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
peer->peer_myincarnation = kptllnd_data.kptl_incarnation;
/* Only increase # peers under lock, to guarantee we dont grow it
* during shutdown */
if (net->net_shutdown) {
- write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+ flags);
LIBCFS_FREE(peer, sizeof(*peer));
return NULL;
}
kptllnd_data.kptl_npeers++;
- write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
return peer;
}
kptllnd_peer_destroy (kptl_peer_t *peer)
{
unsigned long flags;
-
+
CDEBUG(D_NET, "Peer=%p\n", peer);
- LASSERT (!in_interrupt());
- LASSERT (atomic_read(&peer->peer_refcount) == 0);
+ LASSERT (!cfs_in_interrupt());
+ LASSERT (cfs_atomic_read(&peer->peer_refcount) == 0);
LASSERT (peer->peer_state == PEER_STATE_ALLOCATED ||
peer->peer_state == PEER_STATE_ZOMBIE);
- LASSERT (list_empty(&peer->peer_noops));
- LASSERT (list_empty(&peer->peer_sendq));
- LASSERT (list_empty(&peer->peer_activeq));
+ LASSERT (cfs_list_empty(&peer->peer_noops));
+ LASSERT (cfs_list_empty(&peer->peer_sendq));
+ LASSERT (cfs_list_empty(&peer->peer_activeq));
- write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
if (peer->peer_state == PEER_STATE_ZOMBIE)
- list_del(&peer->peer_list);
+ cfs_list_del(&peer->peer_list);
kptllnd_data.kptl_npeers--;
- write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
LIBCFS_FREE (peer, sizeof (*peer));
}
void
-kptllnd_cancel_txlist (struct list_head *peerq, struct list_head *txs)
+kptllnd_cancel_txlist (cfs_list_t *peerq, cfs_list_t *txs)
{
- struct list_head *tmp;
- struct list_head *nxt;
- kptl_tx_t *tx;
+ cfs_list_t *tmp;
+ cfs_list_t *nxt;
+ kptl_tx_t *tx;
- list_for_each_safe (tmp, nxt, peerq) {
- tx = list_entry(tmp, kptl_tx_t, tx_list);
+ cfs_list_for_each_safe (tmp, nxt, peerq) {
+ tx = cfs_list_entry(tmp, kptl_tx_t, tx_list);
- list_del(&tx->tx_list);
- list_add_tail(&tx->tx_list, txs);
+ cfs_list_del(&tx->tx_list);
+ cfs_list_add_tail(&tx->tx_list, txs);
tx->tx_status = -EIO;
tx->tx_active = 0;
}
void
-kptllnd_peer_cancel_txs(kptl_peer_t *peer, struct list_head *txs)
+kptllnd_peer_cancel_txs(kptl_peer_t *peer, cfs_list_t *txs)
{
unsigned long flags;
- spin_lock_irqsave(&peer->peer_lock, flags);
+ cfs_spin_lock_irqsave(&peer->peer_lock, flags);
kptllnd_cancel_txlist(&peer->peer_noops, txs);
kptllnd_cancel_txlist(&peer->peer_sendq, txs);
kptllnd_cancel_txlist(&peer->peer_activeq, txs);
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
}
void
{
/* This is racy, but everyone's only writing cfs_time_current() */
peer->peer_last_alive = cfs_time_current();
- mb();
+ cfs_mb();
}
void
int nnets = 0;
int error = 0;
cfs_time_t last_alive = 0;
-
- spin_lock_irqsave(&peer->peer_lock, flags);
+
+ cfs_spin_lock_irqsave(&peer->peer_lock, flags);
if (peer->peer_error != 0) {
error = peer->peer_error;
peer->peer_error = 0;
last_alive = peer->peer_last_alive;
}
-
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+
+ cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
if (error == 0)
return;
- read_lock(&kptllnd_data.kptl_net_rw_lock);
- list_for_each_entry (net, &kptllnd_data.kptl_nets, net_list)
+ cfs_read_lock(&kptllnd_data.kptl_net_rw_lock);
+ cfs_list_for_each_entry (net, &kptllnd_data.kptl_nets, net_list)
nnets++;
- read_unlock(&kptllnd_data.kptl_net_rw_lock);
+ cfs_read_unlock(&kptllnd_data.kptl_net_rw_lock);
if (nnets == 0) /* shutdown in progress */
return;
}
memset(nets, 0, nnets * sizeof(*nets));
- read_lock(&kptllnd_data.kptl_net_rw_lock);
+ cfs_read_lock(&kptllnd_data.kptl_net_rw_lock);
i = 0;
- list_for_each_entry (net, &kptllnd_data.kptl_nets, net_list) {
+ cfs_list_for_each_entry (net, &kptllnd_data.kptl_nets, net_list) {
LASSERT (i < nnets);
nets[i] = net;
kptllnd_net_addref(net);
i++;
}
- read_unlock(&kptllnd_data.kptl_net_rw_lock);
+ cfs_read_unlock(&kptllnd_data.kptl_net_rw_lock);
for (i = 0; i < nnets; i++) {
lnet_nid_t peer_nid;
kptllnd_handle_closing_peers ()
{
unsigned long flags;
- struct list_head txs;
+ cfs_list_t txs;
kptl_peer_t *peer;
- struct list_head *tmp;
- struct list_head *nxt;
+ cfs_list_t *tmp;
+ cfs_list_t *nxt;
kptl_tx_t *tx;
int idle;
/* Check with a read lock first to avoid blocking anyone */
- read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
- idle = list_empty(&kptllnd_data.kptl_closing_peers) &&
- list_empty(&kptllnd_data.kptl_zombie_peers);
- read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ idle = cfs_list_empty(&kptllnd_data.kptl_closing_peers) &&
+ cfs_list_empty(&kptllnd_data.kptl_zombie_peers);
+ cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
if (idle)
return;
- INIT_LIST_HEAD(&txs);
+ CFS_INIT_LIST_HEAD(&txs);
- write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
/* Cancel txs on all zombie peers. NB anyone dropping the last peer
* ref removes it from this list, so I musn't drop the lock while
* scanning it. */
- list_for_each (tmp, &kptllnd_data.kptl_zombie_peers) {
- peer = list_entry (tmp, kptl_peer_t, peer_list);
+ cfs_list_for_each (tmp, &kptllnd_data.kptl_zombie_peers) {
+ peer = cfs_list_entry (tmp, kptl_peer_t, peer_list);
LASSERT (peer->peer_state == PEER_STATE_ZOMBIE);
* I'm the only one removing from this list, but peers can be added on
* the end any time I drop the lock. */
- list_for_each_safe (tmp, nxt, &kptllnd_data.kptl_closing_peers) {
- peer = list_entry (tmp, kptl_peer_t, peer_list);
+ cfs_list_for_each_safe (tmp, nxt, &kptllnd_data.kptl_closing_peers) {
+ peer = cfs_list_entry (tmp, kptl_peer_t, peer_list);
LASSERT (peer->peer_state == PEER_STATE_CLOSING);
- list_del(&peer->peer_list);
- list_add_tail(&peer->peer_list,
- &kptllnd_data.kptl_zombie_peers);
+ cfs_list_del(&peer->peer_list);
+ cfs_list_add_tail(&peer->peer_list,
+ &kptllnd_data.kptl_zombie_peers);
peer->peer_state = PEER_STATE_ZOMBIE;
- write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+ flags);
kptllnd_peer_notify(peer);
kptllnd_peer_cancel_txs(peer, &txs);
kptllnd_peer_decref(peer);
- write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
}
- write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
/* Drop peer's ref on all cancelled txs. This will get
* kptllnd_tx_fini() to abort outstanding comms if necessary. */
- list_for_each_safe (tmp, nxt, &txs) {
- tx = list_entry(tmp, kptl_tx_t, tx_list);
- list_del(&tx->tx_list);
+ cfs_list_for_each_safe (tmp, nxt, &txs) {
+ tx = cfs_list_entry(tmp, kptl_tx_t, tx_list);
+ cfs_list_del(&tx->tx_list);
kptllnd_tx_decref(tx);
}
}
kptllnd_data.kptl_n_active_peers--;
LASSERT (kptllnd_data.kptl_n_active_peers >= 0);
- list_del(&peer->peer_list);
+ cfs_list_del(&peer->peer_list);
kptllnd_peer_unreserve_buffers();
peer->peer_error = why; /* stash 'why' only on first close */
peer->peer_state = PEER_STATE_CLOSING;
/* Schedule for immediate attention, taking peer table's ref */
- list_add_tail(&peer->peer_list,
- &kptllnd_data.kptl_closing_peers);
- wake_up(&kptllnd_data.kptl_watchdog_waitq);
+ cfs_list_add_tail(&peer->peer_list,
+ &kptllnd_data.kptl_closing_peers);
+ cfs_waitq_signal(&kptllnd_data.kptl_watchdog_waitq);
break;
case PEER_STATE_ZOMBIE:
{
unsigned long flags;
- write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
kptllnd_peer_close_locked(peer, why);
- write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
}
int
kptllnd_peer_del(lnet_process_id_t id)
{
- struct list_head *ptmp;
- struct list_head *pnxt;
+ cfs_list_t *ptmp;
+ cfs_list_t *pnxt;
kptl_peer_t *peer;
int lo;
int hi;
* wildcard (LNET_NID_ANY) then look at all of the buckets
*/
if (id.nid != LNET_NID_ANY) {
- struct list_head *l = kptllnd_nid2peerlist(id.nid);
-
+ cfs_list_t *l = kptllnd_nid2peerlist(id.nid);
+
lo = hi = l - kptllnd_data.kptl_peers;
} else {
if (id.pid != LNET_PID_ANY)
return -EINVAL;
-
+
lo = 0;
hi = kptllnd_data.kptl_peer_hash_size - 1;
}
again:
- read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
for (i = lo; i <= hi; i++) {
- list_for_each_safe (ptmp, pnxt, &kptllnd_data.kptl_peers[i]) {
- peer = list_entry (ptmp, kptl_peer_t, peer_list);
+ cfs_list_for_each_safe (ptmp, pnxt,
+ &kptllnd_data.kptl_peers[i]) {
+ peer = cfs_list_entry (ptmp, kptl_peer_t, peer_list);
if (!(id.nid == LNET_NID_ANY ||
(LNET_NIDADDR(peer->peer_id.nid) == LNET_NIDADDR(id.nid) &&
kptllnd_peer_addref(peer); /* 1 ref for me... */
- read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
- flags);
+ cfs_read_unlock_irqrestore(&kptllnd_data. \
+ kptl_peer_rw_lock,
+ flags);
kptllnd_peer_close(peer, 0);
kptllnd_peer_decref(peer); /* ...until here */
}
}
- read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
return (rc);
}
/* CAVEAT EMPTOR: I take over caller's ref on 'tx' */
unsigned long flags;
- spin_lock_irqsave(&peer->peer_lock, flags);
+ cfs_spin_lock_irqsave(&peer->peer_lock, flags);
/* Ensure HELLO is sent first */
if (tx->tx_msg->ptlm_type == PTLLND_MSG_TYPE_NOOP)
- list_add(&tx->tx_list, &peer->peer_noops);
+ cfs_list_add(&tx->tx_list, &peer->peer_noops);
else if (tx->tx_msg->ptlm_type == PTLLND_MSG_TYPE_HELLO)
- list_add(&tx->tx_list, &peer->peer_sendq);
+ cfs_list_add(&tx->tx_list, &peer->peer_sendq);
else
- list_add_tail(&tx->tx_list, &peer->peer_sendq);
+ cfs_list_add_tail(&tx->tx_list, &peer->peer_sendq);
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
}
}
- tx->tx_deadline = jiffies + (*kptllnd_tunables.kptl_timeout * HZ);
+ tx->tx_deadline = jiffies + (*kptllnd_tunables.kptl_timeout * CFS_HZ);
tx->tx_active = 1;
tx->tx_msg_mdh = msg_mdh;
kptllnd_queue_tx(peer, tx);
/* NB "restarts" comes from peer_sendq of a single peer */
void
-kptllnd_restart_txs (kptl_net_t *net, lnet_process_id_t target, struct list_head *restarts)
+kptllnd_restart_txs (kptl_net_t *net, lnet_process_id_t target,
+ cfs_list_t *restarts)
{
kptl_tx_t *tx;
kptl_tx_t *tmp;
kptl_peer_t *peer;
- LASSERT (!list_empty(restarts));
+ LASSERT (!cfs_list_empty(restarts));
if (kptllnd_find_target(net, target, &peer) != 0)
peer = NULL;
- list_for_each_entry_safe (tx, tmp, restarts, tx_list) {
+ cfs_list_for_each_entry_safe (tx, tmp, restarts, tx_list) {
LASSERT (tx->tx_peer != NULL);
LASSERT (tx->tx_type == TX_TYPE_GET_REQUEST ||
tx->tx_type == TX_TYPE_PUT_REQUEST ||
tx->tx_type == TX_TYPE_SMALL_MESSAGE);
- list_del_init(&tx->tx_list);
+ cfs_list_del_init(&tx->tx_list);
if (peer == NULL ||
tx->tx_msg->ptlm_type == PTLLND_MSG_TYPE_HELLO) {
{
if (!peer->peer_sent_hello ||
peer->peer_credits == 0 ||
- !list_empty(&peer->peer_noops) ||
+ !cfs_list_empty(&peer->peer_noops) ||
peer->peer_outstanding_credits < PTLLND_CREDIT_HIGHWATER)
return 0;
/* No tx to piggyback NOOP onto or no credit to send a tx */
- return (list_empty(&peer->peer_sendq) || peer->peer_credits == 1);
+ return (cfs_list_empty(&peer->peer_sendq) || peer->peer_credits == 1);
}
void
int msg_type;
unsigned long flags;
- LASSERT(!in_interrupt());
+ LASSERT(!cfs_in_interrupt());
- spin_lock_irqsave(&peer->peer_lock, flags);
+ cfs_spin_lock_irqsave(&peer->peer_lock, flags);
peer->peer_retry_noop = 0;
if (kptllnd_peer_send_noop(peer)) {
/* post a NOOP to return credits */
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
tx = kptllnd_get_idle_tx(TX_TYPE_SMALL_MESSAGE);
if (tx == NULL) {
kptllnd_post_tx(peer, tx, 0);
}
- spin_lock_irqsave(&peer->peer_lock, flags);
+ cfs_spin_lock_irqsave(&peer->peer_lock, flags);
peer->peer_retry_noop = (tx == NULL);
}
for (;;) {
- if (!list_empty(&peer->peer_noops)) {
+ if (!cfs_list_empty(&peer->peer_noops)) {
LASSERT (peer->peer_sent_hello);
- tx = list_entry(peer->peer_noops.next,
- kptl_tx_t, tx_list);
- } else if (!list_empty(&peer->peer_sendq)) {
- tx = list_entry(peer->peer_sendq.next,
- kptl_tx_t, tx_list);
+ tx = cfs_list_entry(peer->peer_noops.next,
+ kptl_tx_t, tx_list);
+ } else if (!cfs_list_empty(&peer->peer_sendq)) {
+ tx = cfs_list_entry(peer->peer_sendq.next,
+ kptl_tx_t, tx_list);
} else {
/* nothing to send right now */
break;
/* Ensure HELLO is sent first */
if (!peer->peer_sent_hello) {
- LASSERT (list_empty(&peer->peer_noops));
+ LASSERT (cfs_list_empty(&peer->peer_noops));
if (msg_type != PTLLND_MSG_TYPE_HELLO)
break;
peer->peer_sent_hello = 1;
break;
}
- list_del(&tx->tx_list);
+ cfs_list_del(&tx->tx_list);
/* Discard any NOOP I queued if I'm not at the high-water mark
* any more or more messages have been queued */
!kptllnd_peer_send_noop(peer)) {
tx->tx_active = 0;
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
CDEBUG(D_NET, "%s: redundant noop\n",
libcfs_id2str(peer->peer_id));
kptllnd_tx_decref(tx);
- spin_lock_irqsave(&peer->peer_lock, flags);
+ cfs_spin_lock_irqsave(&peer->peer_lock, flags);
continue;
}
kptllnd_msgtype2str(msg_type), tx, tx->tx_msg->ptlm_nob,
tx->tx_msg->ptlm_credits);
- list_add_tail(&tx->tx_list, &peer->peer_activeq);
+ cfs_list_add_tail(&tx->tx_list, &peer->peer_activeq);
kptllnd_tx_addref(tx); /* 1 ref for me... */
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
if (tx->tx_type == TX_TYPE_PUT_REQUEST ||
tx->tx_type == TX_TYPE_GET_REQUEST) {
kptllnd_tx_decref(tx); /* drop my ref */
- spin_lock_irqsave(&peer->peer_lock, flags);
+ cfs_spin_lock_irqsave(&peer->peer_lock, flags);
}
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
return;
failed:
kptllnd_find_timed_out_tx(kptl_peer_t *peer)
{
kptl_tx_t *tx;
- struct list_head *ele;
+ cfs_list_t *ele;
- list_for_each(ele, &peer->peer_sendq) {
- tx = list_entry(ele, kptl_tx_t, tx_list);
+ cfs_list_for_each(ele, &peer->peer_sendq) {
+ tx = cfs_list_entry(ele, kptl_tx_t, tx_list);
- if (time_after_eq(jiffies, tx->tx_deadline)) {
+ if (cfs_time_aftereq(jiffies, tx->tx_deadline)) {
kptllnd_tx_addref(tx);
return tx;
}
}
- list_for_each(ele, &peer->peer_activeq) {
- tx = list_entry(ele, kptl_tx_t, tx_list);
+ cfs_list_for_each(ele, &peer->peer_activeq) {
+ tx = cfs_list_entry(ele, kptl_tx_t, tx_list);
- if (time_after_eq(jiffies, tx->tx_deadline)) {
+ if (cfs_time_aftereq(jiffies, tx->tx_deadline)) {
kptllnd_tx_addref(tx);
return tx;
}
void
kptllnd_peer_check_bucket (int idx, int stamp)
{
- struct list_head *peers = &kptllnd_data.kptl_peers[idx];
+ cfs_list_t *peers = &kptllnd_data.kptl_peers[idx];
kptl_peer_t *peer;
unsigned long flags;
again:
/* NB. Shared lock while I just look */
- read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
- list_for_each_entry (peer, peers, peer_list) {
+ cfs_list_for_each_entry (peer, peers, peer_list) {
kptl_tx_t *tx;
int check_sends;
int c = -1, oc = -1, sc = -1;
libcfs_id2str(peer->peer_id), peer->peer_credits,
peer->peer_outstanding_credits, peer->peer_sent_credits);
- spin_lock(&peer->peer_lock);
+ cfs_spin_lock(&peer->peer_lock);
if (peer->peer_check_stamp == stamp) {
/* checked already this pass */
- spin_unlock(&peer->peer_lock);
+ cfs_spin_unlock(&peer->peer_lock);
continue;
}
peer->peer_check_stamp = stamp;
tx = kptllnd_find_timed_out_tx(peer);
check_sends = peer->peer_retry_noop;
-
+
if (tx != NULL) {
c = peer->peer_credits;
sc = peer->peer_sent_credits;
nactive = kptllnd_count_queue(&peer->peer_activeq);
}
- spin_unlock(&peer->peer_lock);
-
+ cfs_spin_unlock(&peer->peer_lock);
+
if (tx == NULL && !check_sends)
continue;
kptllnd_peer_addref(peer); /* 1 ref for me... */
- read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+ flags);
if (tx == NULL) { /* nothing timed out */
kptllnd_peer_check_sends(peer);
LCONSOLE_ERROR_MSG(0x126, "Timing out %s: %s\n",
libcfs_id2str(peer->peer_id),
- (tx->tx_tposted == 0) ?
- "no free peer buffers" :
+ (tx->tx_tposted == 0) ?
+ "no free peer buffers" :
"please check Portals");
if (tx->tx_tposted) {
goto again;
}
- read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
}
kptl_peer_t *
kptllnd_id2peer_locked (lnet_process_id_t id)
{
- struct list_head *peers = kptllnd_nid2peerlist(id.nid);
- struct list_head *tmp;
+ cfs_list_t *peers = kptllnd_nid2peerlist(id.nid);
+ cfs_list_t *tmp;
kptl_peer_t *peer;
- list_for_each (tmp, peers) {
- peer = list_entry (tmp, kptl_peer_t, peer_list);
+ cfs_list_for_each (tmp, peers) {
+ peer = cfs_list_entry (tmp, kptl_peer_t, peer_list);
LASSERT(peer->peer_state == PEER_STATE_WAITING_HELLO ||
peer->peer_state == PEER_STATE_ACTIVE);
CDEBUG(D_NET, "%s -> %s (%d)\n",
libcfs_id2str(id),
kptllnd_ptlid2str(peer->peer_ptlid),
- atomic_read (&peer->peer_refcount));
+ cfs_atomic_read (&peer->peer_refcount));
return peer;
}
__u64
kptllnd_get_last_seen_matchbits_locked(lnet_process_id_t lpid)
{
- kptl_peer_t *peer;
- struct list_head *tmp;
+ kptl_peer_t *peer;
+ cfs_list_t *tmp;
/* Find the last matchbits I saw this new peer using. Note..
A. This peer cannot be in the peer table - she's new!
/* peer's last matchbits can't change after it comes out of the peer
* table, so first match is fine */
- list_for_each (tmp, &kptllnd_data.kptl_closing_peers) {
- peer = list_entry (tmp, kptl_peer_t, peer_list);
+ cfs_list_for_each (tmp, &kptllnd_data.kptl_closing_peers) {
+ peer = cfs_list_entry (tmp, kptl_peer_t, peer_list);
if (LNET_NIDADDR(peer->peer_id.nid) == LNET_NIDADDR(lpid.nid) &&
peer->peer_id.pid == lpid.pid)
return peer->peer_last_matchbits_seen;
}
- list_for_each (tmp, &kptllnd_data.kptl_zombie_peers) {
- peer = list_entry (tmp, kptl_peer_t, peer_list);
+ cfs_list_for_each (tmp, &kptllnd_data.kptl_zombie_peers) {
+ peer = cfs_list_entry (tmp, kptl_peer_t, peer_list);
if (LNET_NIDADDR(peer->peer_id.nid) == LNET_NIDADDR(lpid.nid) &&
peer->peer_id.pid == lpid.pid)
kptllnd_peer_handle_hello (kptl_net_t *net,
ptl_process_id_t initiator, kptl_msg_t *msg)
{
- rwlock_t *g_lock = &kptllnd_data.kptl_peer_rw_lock;
+ cfs_rwlock_t *g_lock = &kptllnd_data.kptl_peer_rw_lock;
kptl_peer_t *peer;
kptl_peer_t *new_peer;
lnet_process_id_t lpid;
return NULL;
}
- write_lock_irqsave(g_lock, flags);
+ cfs_write_lock_irqsave(g_lock, flags);
peer = kptllnd_id2peer_locked(lpid);
if (peer != NULL) {
if (msg->ptlm_dststamp != 0 &&
msg->ptlm_dststamp != peer->peer_myincarnation) {
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
CERROR("Ignoring HELLO from %s: unexpected "
"dststamp "LPX64" ("LPX64" wanted)\n",
peer->peer_max_msg_size =
msg->ptlm_u.hello.kptlhm_max_msg_size;
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
return peer;
}
if (msg->ptlm_dststamp != 0 &&
msg->ptlm_dststamp <= peer->peer_myincarnation) {
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
CERROR("Ignoring stale HELLO from %s: "
"dststamp "LPX64" (current "LPX64")\n",
kptllnd_cull_peertable_locked(lpid);
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
if (peer != NULL) {
CDEBUG(D_NET, "Peer %s (%s) reconnecting:"
return NULL;
}
- write_lock_irqsave(g_lock, flags);
+ cfs_write_lock_irqsave(g_lock, flags);
again:
if (net->net_shutdown) {
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
CERROR ("Shutdown started, refusing connection from %s\n",
libcfs_id2str(lpid));
peer->peer_max_msg_size =
msg->ptlm_u.hello.kptlhm_max_msg_size;
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
CWARN("Outgoing instantiated peer %s\n",
libcfs_id2str(lpid));
} else {
LASSERT (peer->peer_state == PEER_STATE_ACTIVE);
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
/* WOW! Somehow this peer completed the HELLO
* handshake while I slept. I guess I could have slept
if (kptllnd_data.kptl_n_active_peers ==
kptllnd_data.kptl_expected_peers) {
/* peer table full */
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
kptllnd_peertable_overflow_msg("Connection from ", lpid);
return NULL;
}
- write_lock_irqsave(g_lock, flags);
+ cfs_write_lock_irqsave(g_lock, flags);
kptllnd_data.kptl_expected_peers++;
goto again;
}
LASSERT (!net->net_shutdown);
kptllnd_peer_add_peertable_locked(new_peer);
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
/* NB someone else could get in now and post a message before I post
* the HELLO, but post_tx/check_sends take care of that! */
kptllnd_find_target(kptl_net_t *net, lnet_process_id_t target,
kptl_peer_t **peerp)
{
- rwlock_t *g_lock = &kptllnd_data.kptl_peer_rw_lock;
+ cfs_rwlock_t *g_lock = &kptllnd_data.kptl_peer_rw_lock;
ptl_process_id_t ptl_id;
kptl_peer_t *new_peer;
kptl_tx_t *hello_tx;
__u64 last_matchbits_seen;
/* I expect to find the peer, so I only take a read lock... */
- read_lock_irqsave(g_lock, flags);
+ cfs_read_lock_irqsave(g_lock, flags);
*peerp = kptllnd_id2peer_locked(target);
- read_unlock_irqrestore(g_lock, flags);
+ cfs_read_unlock_irqrestore(g_lock, flags);
if (*peerp != NULL)
return 0;
-
+
if ((target.pid & LNET_PID_USERFLAG) != 0) {
CWARN("Refusing to create a new connection to %s "
"(non-kernel peer)\n", libcfs_id2str(target));
if (rc != 0)
goto unwind_1;
- write_lock_irqsave(g_lock, flags);
+ cfs_write_lock_irqsave(g_lock, flags);
again:
/* Called only in lnd_send which can't happen after lnd_shutdown */
LASSERT (!net->net_shutdown);
*peerp = kptllnd_id2peer_locked(target);
if (*peerp != NULL) {
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
goto unwind_2;
}
if (kptllnd_data.kptl_n_active_peers ==
kptllnd_data.kptl_expected_peers) {
/* peer table full */
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
kptllnd_peertable_overflow_msg("Connection to ", target);
rc = -ENOMEM;
goto unwind_2;
}
- write_lock_irqsave(g_lock, flags);
+ cfs_write_lock_irqsave(g_lock, flags);
kptllnd_data.kptl_expected_peers++;
goto again;
}
kptllnd_peer_add_peertable_locked(new_peer);
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
/* NB someone else could get in now and post a message before I post
* the HELLO, but post_tx/check_sends take care of that! */
#include "ptllnd.h"
#ifdef CRAY_XT3
-static struct semaphore ptltrace_mutex;
+static cfs_semaphore_t ptltrace_mutex;
static cfs_waitq_t ptltrace_debug_ctlwq;
void
libcfs_daemonize("kpt_ptltrace_dump");
/* serialise with other instances of me */
- mutex_down(&ptltrace_mutex);
+ cfs_mutex_down(&ptltrace_mutex);
snprintf(fname, sizeof(fname), "%s.%ld.%ld",
*kptllnd_tunables.kptl_ptltrace_basename,
kptllnd_ptltrace_to_file(fname);
- mutex_up(&ptltrace_mutex);
+ cfs_mutex_up(&ptltrace_mutex);
/* unblock my creator */
cfs_waitq_signal(&ptltrace_debug_ctlwq);
void
kptllnd_dump_ptltrace(void)
{
- int rc;
+ int rc;
cfs_waitlink_t wait;
ENTRY;
/* taken from libcfs_debug_dumplog */
cfs_waitlink_init(&wait);
- set_current_state(TASK_INTERRUPTIBLE);
+ cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
cfs_waitq_add(&ptltrace_debug_ctlwq, &wait);
rc = cfs_kernel_thread(kptllnd_dump_ptltrace_thread,
cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
}
- /* teardown if kernel_thread() failed */
+ /* teardown if cfs_kernel_thread() failed */
cfs_waitq_del(&ptltrace_debug_ctlwq, &wait);
- set_current_state(TASK_RUNNING);
+ cfs_set_current_state(CFS_TASK_RUNNING);
EXIT;
}
kptllnd_init_ptltrace(void)
{
cfs_waitq_init(&ptltrace_debug_ctlwq);
- init_mutex(&ptltrace_mutex);
+ cfs_init_mutex(&ptltrace_mutex);
}
#endif
kptllnd_rx_buffer_pool_init(kptl_rx_buffer_pool_t *rxbp)
{
memset(rxbp, 0, sizeof(*rxbp));
- spin_lock_init(&rxbp->rxbp_lock);
- INIT_LIST_HEAD(&rxbp->rxbp_list);
+ cfs_spin_lock_init(&rxbp->rxbp_lock);
+ CFS_INIT_LIST_HEAD(&rxbp->rxbp_list);
}
void
LASSERT(!rxb->rxb_posted);
LASSERT(rxb->rxb_idle);
- list_del(&rxb->rxb_list);
+ cfs_list_del(&rxb->rxb_list);
rxbp->rxbp_count--;
LIBCFS_FREE(rxb->rxb_buffer, kptllnd_rx_buffer_size());
CDEBUG(D_NET, "kptllnd_rx_buffer_pool_reserve(%d)\n", count);
- spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
for (;;) {
if (rxbp->rxbp_shutdown) {
break;
}
- spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
LIBCFS_ALLOC(rxb, sizeof(*rxb));
LIBCFS_ALLOC(buffer, bufsize);
if (buffer != NULL)
LIBCFS_FREE(buffer, bufsize);
- spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
rc = -ENOMEM;
break;
}
rxb->rxb_buffer = buffer;
rxb->rxb_mdh = PTL_INVALID_HANDLE;
- spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
if (rxbp->rxbp_shutdown) {
- spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
LIBCFS_FREE(rxb, sizeof(*rxb));
LIBCFS_FREE(buffer, bufsize);
- spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
rc = -ESHUTDOWN;
break;
}
- list_add_tail(&rxb->rxb_list, &rxbp->rxbp_list);
+ cfs_list_add_tail(&rxb->rxb_list, &rxbp->rxbp_list);
rxbp->rxbp_count++;
- spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
kptllnd_rx_buffer_post(rxb);
- spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
}
if (rc == 0)
rxbp->rxbp_reserved += count;
- spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
return rc;
}
{
unsigned long flags;
- spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
CDEBUG(D_NET, "kptllnd_rx_buffer_pool_unreserve(%d)\n", count);
rxbp->rxbp_reserved -= count;
- spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
}
void
int rc;
int i;
unsigned long flags;
- struct list_head *tmp;
- struct list_head *nxt;
+ cfs_list_t *tmp;
+ cfs_list_t *nxt;
ptl_handle_md_t mdh;
- /* CAVEAT EMPTOR: I'm racing with everything here!!!
+ /* CAVEAT EMPTOR: I'm racing with everything here!!!
*
* Buffers can still be posted after I set rxbp_shutdown because I
* can't hold rxbp_lock while I'm posting them.
* different MD) from when the MD is actually unlinked, to when the
* event callback tells me it has been unlinked. */
- spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
rxbp->rxbp_shutdown = 1;
for (i = 9;; i++) {
- list_for_each_safe(tmp, nxt, &rxbp->rxbp_list) {
- rxb = list_entry (tmp, kptl_rx_buffer_t, rxb_list);
-
+ cfs_list_for_each_safe(tmp, nxt, &rxbp->rxbp_list) {
+ rxb = cfs_list_entry (tmp, kptl_rx_buffer_t, rxb_list);
+
if (rxb->rxb_idle) {
- spin_unlock_irqrestore(&rxbp->rxbp_lock,
- flags);
+ cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock,
+ flags);
kptllnd_rx_buffer_destroy(rxb);
- spin_lock_irqsave(&rxbp->rxbp_lock,
- flags);
+ cfs_spin_lock_irqsave(&rxbp->rxbp_lock,
+ flags);
continue;
}
if (PtlHandleIsEqual(mdh, PTL_INVALID_HANDLE))
continue;
- spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
rc = PtlMDUnlink(mdh);
- spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
#ifdef LUSTRE_PORTALS_UNLINK_SEMANTICS
/* callback clears rxb_mdh and drops net's ref
#endif
}
- if (list_empty(&rxbp->rxbp_list))
+ if (cfs_list_empty(&rxbp->rxbp_list))
break;
- spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
/* Wait a bit for references to be dropped */
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
cfs_pause(cfs_time_seconds(1));
- spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
}
- spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
}
void
kptl_rx_buffer_pool_t *rxbp = rxb->rxb_pool;
unsigned long flags;
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
LASSERT (rxb->rxb_refcount == 0);
LASSERT (!rxb->rxb_idle);
LASSERT (!rxb->rxb_posted);
any.nid = PTL_NID_ANY;
any.pid = PTL_PID_ANY;
- spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
if (rxbp->rxbp_shutdown) {
rxb->rxb_idle = 1;
- spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
return;
}
rxb->rxb_refcount = 1; /* net's ref */
rxb->rxb_posted = 1; /* I'm posting */
- spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
rc = PtlMEAttach(kptllnd_data.kptl_nih,
*kptllnd_tunables.kptl_portal,
rc = PtlMDAttach(meh, md, PTL_UNLINK, &mdh);
if (rc == PTL_OK) {
- spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
if (rxb->rxb_posted) /* Not auto-unlinked yet!!! */
rxb->rxb_mdh = mdh;
- spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
return;
}
LASSERT(rc == PTL_OK);
failed:
- spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
rxb->rxb_posted = 0;
/* XXX this will just try again immediately */
kptllnd_rx_buffer_decref_locked(rxb);
- spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
}
kptl_rx_t *
if (peer != NULL) {
/* Update credits (after I've decref-ed the buffer) */
- spin_lock_irqsave(&peer->peer_lock, flags);
+ cfs_spin_lock_irqsave(&peer->peer_lock, flags);
if (post_credit == PTLLND_POSTRX_PEER_CREDIT)
peer->peer_outstanding_credits++;
peer->peer_outstanding_credits, peer->peer_sent_credits,
rx);
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
/* I might have to send back credits */
kptllnd_peer_check_sends(peer);
#endif
CDEBUG(D_NET, "%s: %s(%d) rxb=%p fail=%s(%d) unlink=%d\n",
- kptllnd_ptlid2str(ev->initiator),
- kptllnd_evtype2str(ev->type), ev->type, rxb,
+ kptllnd_ptlid2str(ev->initiator),
+ kptllnd_evtype2str(ev->type), ev->type, rxb,
kptllnd_errtype2str(ev->ni_fail_type), ev->ni_fail_type,
unlinked);
LASSERT (!rxb->rxb_idle);
LASSERT (ev->md.start == rxb->rxb_buffer);
- LASSERT (ev->offset + ev->mlength <=
+ LASSERT (ev->offset + ev->mlength <=
PAGE_SIZE * *kptllnd_tunables.kptl_rxb_npages);
- LASSERT (ev->type == PTL_EVENT_PUT_END ||
+ LASSERT (ev->type == PTL_EVENT_PUT_END ||
ev->type == PTL_EVENT_UNLINK);
LASSERT (ev->type == PTL_EVENT_UNLINK ||
ev->match_bits == LNET_MSG_MATCHBITS);
/* Portals can't force alignment - copy into
* rx_space (avoiding overflow) to fix */
int maxlen = *kptllnd_tunables.kptl_max_msg_size;
-
+
rx->rx_rxb = NULL;
rx->rx_nob = MIN(maxlen, ev->mlength);
rx->rx_msg = (kptl_msg_t *)rx->rx_space;
rx->rx_uid = ev->uid;
#endif
/* Queue for attention */
- spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
- flags);
+ cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
+ flags);
- list_add_tail(&rx->rx_list,
- &kptllnd_data.kptl_sched_rxq);
- wake_up(&kptllnd_data.kptl_sched_waitq);
+ cfs_list_add_tail(&rx->rx_list,
+ &kptllnd_data.kptl_sched_rxq);
+ cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
- spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
- flags);
+ cfs_spin_unlock_irqrestore(&kptllnd_data. \
+ kptl_sched_lock, flags);
}
}
if (unlinked) {
- spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
rxb->rxb_posted = 0;
rxb->rxb_mdh = PTL_INVALID_HANDLE;
kptllnd_rx_buffer_decref_locked(rxb);
- spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
}
}
{
kptl_net_t *net;
- read_lock(&kptllnd_data.kptl_net_rw_lock);
- list_for_each_entry (net, &kptllnd_data.kptl_nets, net_list) {
+ cfs_read_lock(&kptllnd_data.kptl_net_rw_lock);
+ cfs_list_for_each_entry (net, &kptllnd_data.kptl_nets, net_list) {
LASSERT (!net->net_shutdown);
if (net->net_ni->ni_nid == nid) {
kptllnd_net_addref(net);
- read_unlock(&kptllnd_data.kptl_net_rw_lock);
+ cfs_read_unlock(&kptllnd_data.kptl_net_rw_lock);
return net;
}
}
- read_unlock(&kptllnd_data.kptl_net_rw_lock);
+ cfs_read_unlock(&kptllnd_data.kptl_net_rw_lock);
return NULL;
}
int post_credit = PTLLND_POSTRX_PEER_CREDIT;
kptl_net_t *net = NULL;
kptl_peer_t *peer;
- struct list_head txs;
+ cfs_list_t txs;
unsigned long flags;
lnet_process_id_t srcid;
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
LASSERT (rx->rx_peer == NULL);
- INIT_LIST_HEAD(&txs);
+ CFS_INIT_LIST_HEAD(&txs);
if ((rx->rx_nob >= 4 &&
(msg->ptlm_magic == LNET_PROTO_MAGIC ||
if (peer->peer_state == PEER_STATE_WAITING_HELLO) {
/* recoverable error - restart txs */
- spin_lock_irqsave(&peer->peer_lock, flags);
+ cfs_spin_lock_irqsave(&peer->peer_lock, flags);
kptllnd_cancel_txlist(&peer->peer_sendq, &txs);
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
CWARN("NAK %s: Unexpected %s message\n",
libcfs_id2str(srcid),
LASSERTF (msg->ptlm_srcpid == peer->peer_id.pid, "m %u p %u\n",
msg->ptlm_srcpid, peer->peer_id.pid);
- spin_lock_irqsave(&peer->peer_lock, flags);
+ cfs_spin_lock_irqsave(&peer->peer_lock, flags);
/* Check peer only sends when I've sent her credits */
if (peer->peer_sent_credits == 0) {
int oc = peer->peer_outstanding_credits;
int sc = peer->peer_sent_credits;
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
CERROR("%s: buffer overrun [%d/%d+%d]\n",
libcfs_id2str(peer->peer_id), c, sc, oc);
post_credit = PTLLND_POSTRX_NO_CREDIT;
}
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
/* See if something can go out now that credits have come in */
if (msg->ptlm_credits != 0)
PTL_RESERVED_MATCHBITS);
/* Update last match bits seen */
- spin_lock_irqsave(&peer->peer_lock, flags);
+ cfs_spin_lock_irqsave(&peer->peer_lock, flags);
if (msg->ptlm_u.rdma.kptlrm_matchbits >
rx->rx_peer->peer_last_matchbits_seen)
rx->rx_peer->peer_last_matchbits_seen =
msg->ptlm_u.rdma.kptlrm_matchbits;
- spin_unlock_irqrestore(&rx->rx_peer->peer_lock, flags);
+ cfs_spin_unlock_irqrestore(&rx->rx_peer->peer_lock, flags);
rc = lnet_parse(net->net_ni,
&msg->ptlm_u.rdma.kptlrm_hdr,
kptllnd_peer_close(peer, rc);
if (rx->rx_peer == NULL) /* drop ref on peer */
kptllnd_peer_decref(peer); /* unless rx_done will */
- if (!list_empty(&txs)) {
+ if (!cfs_list_empty(&txs)) {
LASSERT (net != NULL);
kptllnd_restart_txs(net, srcid, &txs);
}
LIBCFS_FREE(tx, sizeof(*tx));
- atomic_dec(&kptllnd_data.kptl_ntx);
+ cfs_atomic_dec(&kptllnd_data.kptl_ntx);
/* Keep the tunable in step for visibility */
- *kptllnd_tunables.kptl_ntx = atomic_read(&kptllnd_data.kptl_ntx);
+ *kptllnd_tunables.kptl_ntx = cfs_atomic_read(&kptllnd_data.kptl_ntx);
}
kptl_tx_t *
return NULL;
}
- atomic_inc(&kptllnd_data.kptl_ntx);
+ cfs_atomic_inc(&kptllnd_data.kptl_ntx);
/* Keep the tunable in step for visibility */
- *kptllnd_tunables.kptl_ntx = atomic_read(&kptllnd_data.kptl_ntx);
+ *kptllnd_tunables.kptl_ntx = cfs_atomic_read(&kptllnd_data.kptl_ntx);
tx->tx_idle = 1;
tx->tx_rdma_mdh = PTL_INVALID_HANDLE;
{
int n = *kptllnd_tunables.kptl_ntx;
int i;
-
+
for (i = 0; i < n; i++) {
kptl_tx_t *tx = kptllnd_alloc_tx();
if (tx == NULL)
return -ENOMEM;
-
- spin_lock(&kptllnd_data.kptl_tx_lock);
- list_add_tail(&tx->tx_list, &kptllnd_data.kptl_idle_txs);
- spin_unlock(&kptllnd_data.kptl_tx_lock);
+
+ cfs_spin_lock(&kptllnd_data.kptl_tx_lock);
+ cfs_list_add_tail(&tx->tx_list, &kptllnd_data.kptl_idle_txs);
+ cfs_spin_unlock(&kptllnd_data.kptl_tx_lock);
}
-
+
return 0;
}
/* No locking; single threaded now */
LASSERT (kptllnd_data.kptl_shutdown == 2);
- while (!list_empty(&kptllnd_data.kptl_idle_txs)) {
- tx = list_entry(kptllnd_data.kptl_idle_txs.next,
- kptl_tx_t, tx_list);
-
- list_del(&tx->tx_list);
+ while (!cfs_list_empty(&kptllnd_data.kptl_idle_txs)) {
+ tx = cfs_list_entry(kptllnd_data.kptl_idle_txs.next,
+ kptl_tx_t, tx_list);
+
+ cfs_list_del(&tx->tx_list);
kptllnd_free_tx(tx);
}
- LASSERT (atomic_read(&kptllnd_data.kptl_ntx) == 0);
+ LASSERT (cfs_atomic_read(&kptllnd_data.kptl_ntx) == 0);
}
kptl_tx_t *
{
kptl_tx_t *tx = NULL;
- if (IS_SIMULATION_ENABLED(FAIL_TX_PUT_ALLOC) &&
+ if (IS_SIMULATION_ENABLED(FAIL_TX_PUT_ALLOC) &&
type == TX_TYPE_PUT_REQUEST) {
CERROR("FAIL_TX_PUT_ALLOC SIMULATION triggered\n");
return NULL;
}
- if (IS_SIMULATION_ENABLED(FAIL_TX_GET_ALLOC) &&
+ if (IS_SIMULATION_ENABLED(FAIL_TX_GET_ALLOC) &&
type == TX_TYPE_GET_REQUEST) {
CERROR ("FAIL_TX_GET_ALLOC SIMULATION triggered\n");
return NULL;
return NULL;
}
- spin_lock(&kptllnd_data.kptl_tx_lock);
+ cfs_spin_lock(&kptllnd_data.kptl_tx_lock);
- if (list_empty (&kptllnd_data.kptl_idle_txs)) {
- spin_unlock(&kptllnd_data.kptl_tx_lock);
+ if (cfs_list_empty (&kptllnd_data.kptl_idle_txs)) {
+ cfs_spin_unlock(&kptllnd_data.kptl_tx_lock);
tx = kptllnd_alloc_tx();
if (tx == NULL)
return NULL;
} else {
- tx = list_entry(kptllnd_data.kptl_idle_txs.next,
- kptl_tx_t, tx_list);
- list_del(&tx->tx_list);
+ tx = cfs_list_entry(kptllnd_data.kptl_idle_txs.next,
+ kptl_tx_t, tx_list);
+ cfs_list_del(&tx->tx_list);
- spin_unlock(&kptllnd_data.kptl_tx_lock);
+ cfs_spin_unlock(&kptllnd_data.kptl_tx_lock);
}
- LASSERT (atomic_read(&tx->tx_refcount)== 0);
+ LASSERT (cfs_atomic_read(&tx->tx_refcount)== 0);
LASSERT (tx->tx_idle);
LASSERT (!tx->tx_active);
LASSERT (tx->tx_lnet_msg == NULL);
LASSERT (PtlHandleIsEqual(tx->tx_msg_mdh, PTL_INVALID_HANDLE));
tx->tx_type = type;
- atomic_set(&tx->tx_refcount, 1);
+ cfs_atomic_set(&tx->tx_refcount, 1);
tx->tx_status = 0;
tx->tx_idle = 0;
tx->tx_tposted = 0;
ptl_handle_md_t rdma_mdh;
unsigned long flags;
- LASSERT (atomic_read(&tx->tx_refcount) == 0);
+ LASSERT (cfs_atomic_read(&tx->tx_refcount) == 0);
LASSERT (!tx->tx_active);
- spin_lock_irqsave(&peer->peer_lock, flags);
+ cfs_spin_lock_irqsave(&peer->peer_lock, flags);
msg_mdh = tx->tx_msg_mdh;
rdma_mdh = tx->tx_rdma_mdh;
if (PtlHandleIsEqual(msg_mdh, PTL_INVALID_HANDLE) &&
PtlHandleIsEqual(rdma_mdh, PTL_INVALID_HANDLE)) {
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
return 0;
}
tx->tx_lnet_replymsg == NULL));
/* stash the tx on its peer until it completes */
- atomic_set(&tx->tx_refcount, 1);
+ cfs_atomic_set(&tx->tx_refcount, 1);
tx->tx_active = 1;
- list_add_tail(&tx->tx_list, &peer->peer_activeq);
+ cfs_list_add_tail(&tx->tx_list, &peer->peer_activeq);
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
/* These unlinks will ensure completion events (normal or unlink) will
* happen ASAP */
unsigned long flags;
ptl_err_t prc;
- LASSERT (atomic_read(&tx->tx_refcount) == 0);
+ LASSERT (cfs_atomic_read(&tx->tx_refcount) == 0);
LASSERT (!tx->tx_active);
- spin_lock_irqsave(&peer->peer_lock, flags);
+ cfs_spin_lock_irqsave(&peer->peer_lock, flags);
msg_mdh = tx->tx_msg_mdh;
rdma_mdh = tx->tx_rdma_mdh;
if (PtlHandleIsEqual(msg_mdh, PTL_INVALID_HANDLE) &&
PtlHandleIsEqual(rdma_mdh, PTL_INVALID_HANDLE)) {
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
return 0;
}
(tx->tx_lnet_msg == NULL &&
tx->tx_replymsg == NULL));
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
if (!PtlHandleIsEqual(msg_mdh, PTL_INVALID_HANDLE)) {
prc = PtlMDUnlink(msg_mdh);
rdma_mdh = PTL_INVALID_HANDLE;
}
- spin_lock_irqsave(&peer->peer_lock, flags);
+ cfs_spin_lock_irqsave(&peer->peer_lock, flags);
/* update tx_???_mdh if callback hasn't fired */
if (PtlHandleIsEqual(tx->tx_msg_mdh, PTL_INVALID_HANDLE))
if (PtlHandleIsEqual(msg_mdh, PTL_INVALID_HANDLE) &&
PtlHandleIsEqual(rdma_mdh, PTL_INVALID_HANDLE)) {
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
return 0;
}
/* stash the tx on its peer until it completes */
- atomic_set(&tx->tx_refcount, 1);
+ cfs_atomic_set(&tx->tx_refcount, 1);
tx->tx_active = 1;
- list_add_tail(&tx->tx_list, &peer->peer_activeq);
+ cfs_list_add_tail(&tx->tx_list, &peer->peer_activeq);
kptllnd_peer_addref(peer); /* extra ref for me... */
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
/* This will get the watchdog thread to try aborting all the peer's
* comms again. NB, this deems it fair that 1 failing tx which can't
int status = tx->tx_status;
int rc;
- LASSERT (!in_interrupt());
- LASSERT (atomic_read(&tx->tx_refcount) == 0);
+ LASSERT (!cfs_in_interrupt());
+ LASSERT (cfs_atomic_read(&tx->tx_refcount) == 0);
LASSERT (!tx->tx_idle);
LASSERT (!tx->tx_active);
tx->tx_peer = NULL;
tx->tx_idle = 1;
- spin_lock(&kptllnd_data.kptl_tx_lock);
- list_add_tail(&tx->tx_list, &kptllnd_data.kptl_idle_txs);
- spin_unlock(&kptllnd_data.kptl_tx_lock);
+ cfs_spin_lock(&kptllnd_data.kptl_tx_lock);
+ cfs_list_add_tail(&tx->tx_list, &kptllnd_data.kptl_idle_txs);
+ cfs_spin_unlock(&kptllnd_data.kptl_tx_lock);
/* Must finalize AFTER freeing 'tx' */
if (msg != NULL)
if (!unlinked)
return;
- spin_lock_irqsave(&peer->peer_lock, flags);
+ cfs_spin_lock_irqsave(&peer->peer_lock, flags);
if (ismsg)
tx->tx_msg_mdh = PTL_INVALID_HANDLE;
if (!PtlHandleIsEqual(tx->tx_msg_mdh, PTL_INVALID_HANDLE) ||
!PtlHandleIsEqual(tx->tx_rdma_mdh, PTL_INVALID_HANDLE) ||
!tx->tx_active) {
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
return;
}
- list_del(&tx->tx_list);
+ cfs_list_del(&tx->tx_list);
tx->tx_active = 0;
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
/* drop peer's ref, but if it was the last one... */
- if (atomic_dec_and_test(&tx->tx_refcount)) {
+ if (cfs_atomic_dec_and_test(&tx->tx_refcount)) {
/* ...finalize it in thread context! */
- spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
+ cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
- list_add_tail(&tx->tx_list, &kptllnd_data.kptl_sched_txq);
- wake_up(&kptllnd_data.kptl_sched_waitq);
+ cfs_list_add_tail(&tx->tx_list, &kptllnd_data.kptl_sched_txq);
+ cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
- spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
+ cfs_spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
+ flags);
}
}
* assertions generated here (but fails-safe if it ever does) */
typedef struct {
int counter;
-} atomic_t;
+} cfs_atomic_t;
#include <lnet/lib-types.h>
#include <lnet/ptllnd_wire.h>
kqswnal_get_tx_desc (struct libcfs_ioctl_data *data)
{
unsigned long flags;
- struct list_head *tmp;
+ cfs_list_t *tmp;
kqswnal_tx_t *ktx;
lnet_hdr_t *hdr;
int index = data->ioc_count;
int rc = -ENOENT;
- spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
+ cfs_spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
- list_for_each (tmp, &kqswnal_data.kqn_activetxds) {
+ cfs_list_for_each (tmp, &kqswnal_data.kqn_activetxds) {
if (index-- != 0)
continue;
- ktx = list_entry (tmp, kqswnal_tx_t, ktx_list);
+ ktx = cfs_list_entry (tmp, kqswnal_tx_t, ktx_list);
hdr = (lnet_hdr_t *)ktx->ktx_buffer;
data->ioc_count = le32_to_cpu(hdr->payload_length);
data->ioc_u64[0] = ktx->ktx_nid;
data->ioc_u32[0] = le32_to_cpu(hdr->type);
data->ioc_u32[1] = ktx->ktx_launcher;
- data->ioc_flags = (list_empty (&ktx->ktx_schedlist) ? 0 : 1) |
- (ktx->ktx_state << 2);
+ data->ioc_flags =
+ (cfs_list_empty (&ktx->ktx_schedlist) ? 0 : 1) |
+ (ktx->ktx_state << 2);
rc = 0;
break;
}
-
- spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
+
+ cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
return (rc);
}
case IOC_LIBCFS_REGISTER_MYNID:
if (data->ioc_nid == ni->ni_nid)
return 0;
-
+
LASSERT (LNET_NIDNET(data->ioc_nid) == LNET_NIDNET(ni->ni_nid));
CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID for %s(%s)\n",
libcfs_nid2str(data->ioc_nid),
libcfs_nid2str(ni->ni_nid));
return 0;
-
+
default:
return (-EINVAL);
}
/**********************************************************************/
/* Signal the start of shutdown... */
- spin_lock_irqsave(&kqswnal_data.kqn_idletxd_lock, flags);
+ cfs_spin_lock_irqsave(&kqswnal_data.kqn_idletxd_lock, flags);
kqswnal_data.kqn_shuttingdown = 1;
- spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);
+ cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);
/**********************************************************************/
/* wait for sends that have allocated a tx desc to launch or give up */
- while (atomic_read (&kqswnal_data.kqn_pending_txs) != 0) {
+ while (cfs_atomic_read (&kqswnal_data.kqn_pending_txs) != 0) {
CDEBUG(D_NET, "waiting for %d pending sends\n",
- atomic_read (&kqswnal_data.kqn_pending_txs));
+ cfs_atomic_read (&kqswnal_data.kqn_pending_txs));
cfs_pause(cfs_time_seconds(1));
}
/* NB ep_free_xmtr() returns only after all outstanding transmits
* have called their callback... */
- LASSERT(list_empty(&kqswnal_data.kqn_activetxds));
+ LASSERT(cfs_list_empty(&kqswnal_data.kqn_activetxds));
/**********************************************************************/
/* flag threads to terminate, wake them and wait for them to die */
kqswnal_data.kqn_shuttingdown = 2;
- wake_up_all (&kqswnal_data.kqn_sched_waitq);
+ cfs_waitq_broadcast (&kqswnal_data.kqn_sched_waitq);
- while (atomic_read (&kqswnal_data.kqn_nthreads) != 0) {
+ while (cfs_atomic_read (&kqswnal_data.kqn_nthreads) != 0) {
CDEBUG(D_NET, "waiting for %d threads to terminate\n",
- atomic_read (&kqswnal_data.kqn_nthreads));
+ cfs_atomic_read (&kqswnal_data.kqn_nthreads));
cfs_pause(cfs_time_seconds(1));
}
* I control the horizontals and the verticals...
*/
- LASSERT (list_empty (&kqswnal_data.kqn_readyrxds));
- LASSERT (list_empty (&kqswnal_data.kqn_donetxds));
- LASSERT (list_empty (&kqswnal_data.kqn_delayedtxds));
+ LASSERT (cfs_list_empty (&kqswnal_data.kqn_readyrxds));
+ LASSERT (cfs_list_empty (&kqswnal_data.kqn_donetxds));
+ LASSERT (cfs_list_empty (&kqswnal_data.kqn_delayedtxds));
/**********************************************************************/
/* Unmap message buffers and free all descriptors and buffers
/* resets flags, pointers to NULL etc */
memset(&kqswnal_data, 0, sizeof (kqswnal_data));
- CDEBUG (D_MALLOC, "done kmem %d\n", atomic_read(&libcfs_kmemory));
+ CDEBUG (D_MALLOC, "done kmem %d\n", cfs_atomic_read(&libcfs_kmemory));
PORTAL_MODULE_UNUSE;
}
*kqswnal_tunables.kqn_credits);
}
- CDEBUG (D_MALLOC, "start kmem %d\n", atomic_read(&libcfs_kmemory));
+ CDEBUG (D_MALLOC, "start kmem %d\n", cfs_atomic_read(&libcfs_kmemory));
/* ensure all pointers NULL etc */
memset (&kqswnal_data, 0, sizeof (kqswnal_data));
ni->ni_peertxcredits = *kqswnal_tunables.kqn_peercredits;
ni->ni_maxtxcredits = *kqswnal_tunables.kqn_credits;
- INIT_LIST_HEAD (&kqswnal_data.kqn_idletxds);
- INIT_LIST_HEAD (&kqswnal_data.kqn_activetxds);
- spin_lock_init (&kqswnal_data.kqn_idletxd_lock);
+ CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_idletxds);
+ CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_activetxds);
+ cfs_spin_lock_init (&kqswnal_data.kqn_idletxd_lock);
- INIT_LIST_HEAD (&kqswnal_data.kqn_delayedtxds);
- INIT_LIST_HEAD (&kqswnal_data.kqn_donetxds);
- INIT_LIST_HEAD (&kqswnal_data.kqn_readyrxds);
+ CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_delayedtxds);
+ CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_donetxds);
+ CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_readyrxds);
- spin_lock_init (&kqswnal_data.kqn_sched_lock);
- init_waitqueue_head (&kqswnal_data.kqn_sched_waitq);
+ cfs_spin_lock_init (&kqswnal_data.kqn_sched_lock);
+ cfs_waitq_init (&kqswnal_data.kqn_sched_waitq);
/* pointers/lists/locks initialised */
kqswnal_data.kqn_init = KQN_INIT_DATA;
ktx->ktx_basepage = basepage + premapped_pages; /* message mapping starts here */
ktx->ktx_npages = KQSW_NTXMSGPAGES - premapped_pages; /* for this many pages */
- INIT_LIST_HEAD (&ktx->ktx_schedlist);
+ CFS_INIT_LIST_HEAD (&ktx->ktx_schedlist);
ktx->ktx_state = KTX_IDLE;
ktx->ktx_rail = -1; /* unset rail */
- list_add_tail (&ktx->ktx_list, &kqswnal_data.kqn_idletxds);
+ cfs_list_add_tail (&ktx->ktx_list, &kqswnal_data.kqn_idletxds);
}
/**********************************************************************/
/**********************************************************************/
/* Spawn scheduling threads */
- for (i = 0; i < num_online_cpus(); i++) {
+ for (i = 0; i < cfs_num_online_cpus(); i++) {
rc = kqswnal_thread_start (kqswnal_scheduler, NULL);
if (rc != 0)
{
typedef struct kqswnal_rx
{
- struct list_head krx_list; /* enqueue -> thread */
- struct kqswnal_rx *krx_alloclist; /* stack in kqn_rxds */
- EP_RCVR *krx_eprx; /* port to post receives to */
- EP_RXD *krx_rxd; /* receive descriptor (for repost) */
- EP_NMD krx_elanbuffer; /* contiguous Elan buffer */
- int krx_npages; /* # pages in receive buffer */
- int krx_nob; /* Number Of Bytes received into buffer */
- int krx_rpc_reply_needed:1; /* peer waiting for EKC RPC reply */
- int krx_state; /* what this RX is doing */
- atomic_t krx_refcount; /* how to tell when rpc is done */
+ cfs_list_t krx_list; /* enqueue -> thread */
+ struct kqswnal_rx *krx_alloclist;/* stack in kqn_rxds */
+ EP_RCVR *krx_eprx; /* port to post receives to */
+ EP_RXD *krx_rxd; /* receive descriptor (for repost) */
+ EP_NMD krx_elanbuffer;/* contiguous Elan buffer */
+ int krx_npages; /* # pages in receive buffer */
+ int krx_nob; /* Number Of Bytes received into buffer */
+ int krx_rpc_reply_needed:1; /* peer waiting for EKC RPC reply */
+ int krx_state; /* what this RX is doing */
+ cfs_atomic_t krx_refcount; /* how to tell when rpc is done */
#if KQSW_CKSUM
- __u32 krx_cksum; /* checksum */
+ __u32 krx_cksum; /* checksum */
#endif
- kqswnal_rpc_reply_t krx_rpc_reply; /* rpc reply status block */
- lnet_kiov_t krx_kiov[KQSW_NRXMSGPAGES_LARGE]; /* buffer frags */
+ kqswnal_rpc_reply_t krx_rpc_reply; /* rpc reply status block */
+ lnet_kiov_t krx_kiov[KQSW_NRXMSGPAGES_LARGE];/* buffer frags */
} kqswnal_rx_t;
#define KRX_POSTED 1 /* receiving */
typedef struct kqswnal_tx
{
- struct list_head ktx_list; /* enqueue idle/active */
- struct list_head ktx_schedlist; /* enqueue on scheduler */
- struct kqswnal_tx *ktx_alloclist; /* stack in kqn_txds */
- unsigned int ktx_state:7; /* What I'm doing */
- unsigned int ktx_firsttmpfrag:1; /* ktx_frags[0] is in my ebuffer ? 0 : 1 */
- __u32 ktx_basepage; /* page offset in reserved elan tx vaddrs for mapping pages */
- int ktx_npages; /* pages reserved for mapping messages */
- int ktx_nmappedpages; /* # pages mapped for current message */
- int ktx_port; /* destination ep port */
- lnet_nid_t ktx_nid; /* destination node */
- void *ktx_args[3]; /* completion passthru */
- char *ktx_buffer; /* pre-allocated contiguous buffer for hdr + small payloads */
- cfs_time_t ktx_launchtime; /* when (in jiffies) the transmit
- * was launched */
- int ktx_status; /* completion status */
+ cfs_list_t ktx_list; /* enqueue idle/active */
+ cfs_list_t ktx_schedlist; /* enqueue on scheduler */
+ struct kqswnal_tx *ktx_alloclist; /* stack in kqn_txds */
+ unsigned int ktx_state:7; /* What I'm doing */
+ unsigned int ktx_firsttmpfrag:1; /* ktx_frags[0] is in my ebuffer ? 0 : 1 */
+ __u32 ktx_basepage; /* page offset in reserved elan tx vaddrs for mapping pages */
+ int ktx_npages; /* pages reserved for mapping messages */
+ int ktx_nmappedpages; /* # pages mapped for current message */
+ int ktx_port; /* destination ep port */
+ lnet_nid_t ktx_nid; /* destination node */
+ void *ktx_args[3]; /* completion passthru */
+ char *ktx_buffer; /* pre-allocated contiguous buffer for hdr + small payloads */
+ cfs_time_t ktx_launchtime; /* when (in jiffies) the
+ * transmit was launched */
+ int ktx_status; /* completion status */
#if KQSW_CKSUM
- __u32 ktx_cksum; /* optimized GET payload checksum */
+ __u32 ktx_cksum; /* optimized GET payload checksum */
#endif
/* debug/info fields */
- pid_t ktx_launcher; /* pid of launching process */
+ pid_t ktx_launcher; /* pid of launching process */
- int ktx_nfrag; /* # message frags */
- int ktx_rail; /* preferred rail */
- EP_NMD ktx_ebuffer; /* elan mapping of ktx_buffer */
- EP_NMD ktx_frags[EP_MAXFRAG];/* elan mapping of msg frags */
+ int ktx_nfrag; /* # message frags */
+ int ktx_rail; /* preferred rail */
+ EP_NMD ktx_ebuffer; /* elan mapping of ktx_buffer */
+ EP_NMD ktx_frags[EP_MAXFRAG];/* elan mapping of msg frags */
} kqswnal_tx_t;
#define KTX_IDLE 0 /* on kqn_idletxds */
typedef struct
{
- char kqn_init; /* what's been initialised */
- char kqn_shuttingdown; /* I'm trying to shut down */
- atomic_t kqn_nthreads; /* # threads running */
- lnet_ni_t *kqn_ni; /* _the_ instance of me */
-
- kqswnal_rx_t *kqn_rxds; /* stack of all the receive descriptors */
- kqswnal_tx_t *kqn_txds; /* stack of all the transmit descriptors */
-
- struct list_head kqn_idletxds; /* transmit descriptors free to use */
- struct list_head kqn_activetxds; /* transmit descriptors being used */
- spinlock_t kqn_idletxd_lock; /* serialise idle txd access */
- atomic_t kqn_pending_txs; /* # transmits being prepped */
-
- spinlock_t kqn_sched_lock; /* serialise packet schedulers */
- wait_queue_head_t kqn_sched_waitq; /* scheduler blocks here */
-
- struct list_head kqn_readyrxds; /* rxds full of data */
- struct list_head kqn_donetxds; /* completed transmits */
- struct list_head kqn_delayedtxds; /* delayed transmits */
-
- EP_SYS *kqn_ep; /* elan system */
- EP_NMH *kqn_ep_tx_nmh; /* elan reserved tx vaddrs */
- EP_NMH *kqn_ep_rx_nmh; /* elan reserved rx vaddrs */
- EP_XMTR *kqn_eptx; /* elan transmitter */
- EP_RCVR *kqn_eprx_small; /* elan receiver (small messages) */
- EP_RCVR *kqn_eprx_large; /* elan receiver (large messages) */
-
- int kqn_nnodes; /* this cluster's size */
- int kqn_elanid; /* this nodes's elan ID */
-
- EP_STATUSBLK kqn_rpc_success; /* preset RPC reply status blocks */
- EP_STATUSBLK kqn_rpc_failed;
- EP_STATUSBLK kqn_rpc_version; /* reply to future version query */
- EP_STATUSBLK kqn_rpc_magic; /* reply to future version query */
+ char kqn_init; /* what's been initialised */
+ char kqn_shuttingdown;/* I'm trying to shut down */
+ cfs_atomic_t kqn_nthreads; /* # threads running */
+ lnet_ni_t *kqn_ni; /* _the_ instance of me */
+
+ kqswnal_rx_t *kqn_rxds; /* stack of all the receive descriptors */
+ kqswnal_tx_t *kqn_txds; /* stack of all the transmit descriptors */
+
+ cfs_list_t kqn_idletxds; /* transmit descriptors free to use */
+ cfs_list_t kqn_activetxds; /* transmit descriptors being used */
+ cfs_spinlock_t kqn_idletxd_lock; /* serialise idle txd access */
+ cfs_atomic_t kqn_pending_txs;/* # transmits being prepped */
+
+ cfs_spinlock_t kqn_sched_lock; /* serialise packet schedulers */
+ cfs_waitq_t kqn_sched_waitq;/* scheduler blocks here */
+
+ cfs_list_t kqn_readyrxds; /* rxds full of data */
+ cfs_list_t kqn_donetxds; /* completed transmits */
+ cfs_list_t kqn_delayedtxds;/* delayed transmits */
+
+ EP_SYS *kqn_ep; /* elan system */
+ EP_NMH *kqn_ep_tx_nmh; /* elan reserved tx vaddrs */
+ EP_NMH *kqn_ep_rx_nmh; /* elan reserved rx vaddrs */
+ EP_XMTR *kqn_eptx; /* elan transmitter */
+ EP_RCVR *kqn_eprx_small; /* elan receiver (small messages) */
+ EP_RCVR *kqn_eprx_large; /* elan receiver (large messages) */
+
+ int kqn_nnodes; /* this cluster's size */
+ int kqn_elanid; /* this nodes's elan ID */
+
+ EP_STATUSBLK kqn_rpc_success;/* preset RPC reply status blocks */
+ EP_STATUSBLK kqn_rpc_failed;
+ EP_STATUSBLK kqn_rpc_version;/* reply to future version query */
+ EP_STATUSBLK kqn_rpc_magic; /* reply to future version query */
} kqswnal_data_t;
/* kqn_init state */
static inline void kqswnal_rx_decref (kqswnal_rx_t *krx)
{
- LASSERT (atomic_read (&krx->krx_refcount) > 0);
- if (atomic_dec_and_test (&krx->krx_refcount))
+ LASSERT (cfs_atomic_read (&krx->krx_refcount) > 0);
+ if (cfs_atomic_dec_and_test (&krx->krx_refcount))
kqswnal_rx_done(krx);
}
kqswnal_unmap_tx (ktx); /* release temporary mappings */
ktx->ktx_state = KTX_IDLE;
- spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
+ cfs_spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
- list_del (&ktx->ktx_list); /* take off active list */
- list_add (&ktx->ktx_list, &kqswnal_data.kqn_idletxds);
+ cfs_list_del (&ktx->ktx_list); /* take off active list */
+ cfs_list_add (&ktx->ktx_list, &kqswnal_data.kqn_idletxds);
- spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
+ cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
}
kqswnal_tx_t *
unsigned long flags;
kqswnal_tx_t *ktx;
- spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
+ cfs_spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
if (kqswnal_data.kqn_shuttingdown ||
- list_empty (&kqswnal_data.kqn_idletxds)) {
- spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
+ cfs_list_empty (&kqswnal_data.kqn_idletxds)) {
+ cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock,
+ flags);
return NULL;
}
- ktx = list_entry (kqswnal_data.kqn_idletxds.next, kqswnal_tx_t, ktx_list);
- list_del (&ktx->ktx_list);
+ ktx = cfs_list_entry (kqswnal_data.kqn_idletxds.next, kqswnal_tx_t,
+ ktx_list);
+ cfs_list_del (&ktx->ktx_list);
- list_add (&ktx->ktx_list, &kqswnal_data.kqn_activetxds);
+ cfs_list_add (&ktx->ktx_list, &kqswnal_data.kqn_activetxds);
ktx->ktx_launcher = current->pid;
- atomic_inc(&kqswnal_data.kqn_pending_txs);
+ cfs_atomic_inc(&kqswnal_data.kqn_pending_txs);
- spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
+ cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
/* Idle descs can't have any mapped (as opposed to pre-mapped) pages */
LASSERT (ktx->ktx_nmappedpages == 0);
int status0 = 0;
int status1 = 0;
kqswnal_rx_t *krx;
-
- LASSERT (!in_interrupt());
-
+
+ LASSERT (!cfs_in_interrupt());
+
if (ktx->ktx_status == -EHOSTDOWN)
kqswnal_notify_peer_down(ktx);
ktx->ktx_status = status;
- if (!in_interrupt()) {
+ if (!cfs_in_interrupt()) {
kqswnal_tx_done_in_thread_context(ktx);
return;
}
/* Complete the send in thread context */
- spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
-
- list_add_tail(&ktx->ktx_schedlist,
- &kqswnal_data.kqn_donetxds);
- wake_up(&kqswnal_data.kqn_sched_waitq);
-
- spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
+ cfs_spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
+
+ cfs_list_add_tail(&ktx->ktx_schedlist,
+ &kqswnal_data.kqn_donetxds);
+ cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
+
+ cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
}
static void
kqswnal_launch (kqswnal_tx_t *ktx)
{
/* Don't block for transmit descriptor if we're in interrupt context */
- int attr = in_interrupt() ? (EP_NO_SLEEP | EP_NO_ALLOC) : 0;
+ int attr = cfs_in_interrupt() ? (EP_NO_SLEEP | EP_NO_ALLOC) : 0;
int dest = kqswnal_nid2elanid (ktx->ktx_nid);
unsigned long flags;
int rc;
kqswnal_txhandler, ktx,
NULL, ktx->ktx_frags, ktx->ktx_nfrag);
break;
-
+
default:
LBUG();
rc = -EINVAL; /* no compiler warning please */
return (0);
case EP_ENOMEM: /* can't allocate ep txd => queue for later */
- spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
+ cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
- list_add_tail (&ktx->ktx_schedlist, &kqswnal_data.kqn_delayedtxds);
- wake_up (&kqswnal_data.kqn_sched_waitq);
+ cfs_list_add_tail (&ktx->ktx_schedlist,
+ &kqswnal_data.kqn_delayedtxds);
+ cfs_waitq_signal (&kqswnal_data.kqn_sched_waitq);
- spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
+ cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock,
+ flags);
return (0);
default: /* fatal error */
- CDEBUG (D_NETERROR, "Tx to %s failed: %d\n", libcfs_nid2str(ktx->ktx_nid), rc);
+ CDEBUG (D_NETERROR, "Tx to %s failed: %d\n",
+ libcfs_nid2str(ktx->ktx_nid), rc);
kqswnal_notify_peer_down(ktx);
return (-EHOSTUNREACH);
}
ktx->ktx_args[0] = krx;
ktx->ktx_args[1] = lntmsg;
- LASSERT (atomic_read(&krx->krx_refcount) > 0);
+ LASSERT (cfs_atomic_read(&krx->krx_refcount) > 0);
/* Take an extra ref for the completion callback */
- atomic_inc(&krx->krx_refcount);
+ cfs_atomic_inc(&krx->krx_refcount);
/* Map on the rail the RPC prefers */
ktx->ktx_rail = ep_rcvr_prefrail(krx->krx_eprx,
kqswnal_put_idle_tx (ktx);
}
- atomic_dec(&kqswnal_data.kqn_pending_txs);
+ cfs_atomic_dec(&kqswnal_data.kqn_pending_txs);
return (rc);
}
LASSERT (payload_niov <= LNET_MAX_IOV);
/* It must be OK to kmap() if required */
- LASSERT (payload_kiov == NULL || !in_interrupt ());
+ LASSERT (payload_kiov == NULL || !cfs_in_interrupt ());
/* payload is either all vaddrs or all pages */
LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
}
- atomic_dec(&kqswnal_data.kqn_pending_txs);
+ cfs_atomic_dec(&kqswnal_data.kqn_pending_txs);
return (rc == 0 ? 0 : -EIO);
}
void
kqswnal_requeue_rx (kqswnal_rx_t *krx)
{
- LASSERT (atomic_read(&krx->krx_refcount) == 0);
+ LASSERT (cfs_atomic_read(&krx->krx_refcount) == 0);
LASSERT (!krx->krx_rpc_reply_needed);
krx->krx_state = KRX_POSTED;
{
int rc;
- LASSERT (atomic_read(&krx->krx_refcount) == 0);
+ LASSERT (cfs_atomic_read(&krx->krx_refcount) == 0);
if (krx->krx_rpc_reply_needed) {
/* We've not completed the peer's RPC yet... */
krx->krx_rpc_reply.msg.magic = LNET_PROTO_QSW_MAGIC;
krx->krx_rpc_reply.msg.version = QSWLND_PROTO_VERSION;
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
rc = ep_complete_rpc(krx->krx_rxd,
kqswnal_rpc_complete, krx,
int nob;
int rc;
- LASSERT (atomic_read(&krx->krx_refcount) == 1);
+ LASSERT (cfs_atomic_read(&krx->krx_refcount) == 1);
if (krx->krx_nob < offsetof(kqswnal_msg_t, kqm_u)) {
CERROR("Short message %d received from %s\n",
/* Default to failure if an RPC reply is requested but not handled */
krx->krx_rpc_reply.msg.status = -EPROTO;
- atomic_set (&krx->krx_refcount, 1);
+ cfs_atomic_set (&krx->krx_refcount, 1);
if (status != EP_SUCCESS) {
/* receives complete with failure when receiver is removed */
return;
}
- if (!in_interrupt()) {
+ if (!cfs_in_interrupt()) {
kqswnal_parse(krx);
return;
}
- spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
+ cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
- list_add_tail (&krx->krx_list, &kqswnal_data.kqn_readyrxds);
- wake_up (&kqswnal_data.kqn_sched_waitq);
+ cfs_list_add_tail (&krx->krx_list, &kqswnal_data.kqn_readyrxds);
+ cfs_waitq_signal (&kqswnal_data.kqn_sched_waitq);
- spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
+ cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
}
int
int msg_offset;
int rc;
- LASSERT (!in_interrupt ()); /* OK to map */
+ LASSERT (!cfs_in_interrupt ()); /* OK to map */
/* Either all pages or all vaddrs */
LASSERT (!(kiov != NULL && iov != NULL));
int
kqswnal_thread_start (int (*fn)(void *arg), void *arg)
{
- long pid = kernel_thread (fn, arg, 0);
+ long pid = cfs_kernel_thread (fn, arg, 0);
if (pid < 0)
return ((int)pid);
- atomic_inc (&kqswnal_data.kqn_nthreads);
+ cfs_atomic_inc (&kqswnal_data.kqn_nthreads);
return (0);
}
void
kqswnal_thread_fini (void)
{
- atomic_dec (&kqswnal_data.kqn_nthreads);
+ cfs_atomic_dec (&kqswnal_data.kqn_nthreads);
}
int
cfs_daemonize ("kqswnal_sched");
cfs_block_allsigs ();
-
- spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
+
+ cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
for (;;)
{
did_something = 0;
- if (!list_empty (&kqswnal_data.kqn_readyrxds))
+ if (!cfs_list_empty (&kqswnal_data.kqn_readyrxds))
{
- krx = list_entry(kqswnal_data.kqn_readyrxds.next,
- kqswnal_rx_t, krx_list);
- list_del (&krx->krx_list);
- spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
- flags);
+ krx = cfs_list_entry(kqswnal_data.kqn_readyrxds.next,
+ kqswnal_rx_t, krx_list);
+ cfs_list_del (&krx->krx_list);
+ cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
+ flags);
LASSERT (krx->krx_state == KRX_PARSE);
kqswnal_parse (krx);
did_something = 1;
- spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
+ cfs_spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
+ flags);
}
- if (!list_empty (&kqswnal_data.kqn_donetxds))
+ if (!cfs_list_empty (&kqswnal_data.kqn_donetxds))
{
- ktx = list_entry(kqswnal_data.kqn_donetxds.next,
- kqswnal_tx_t, ktx_schedlist);
- list_del_init (&ktx->ktx_schedlist);
- spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
- flags);
+ ktx = cfs_list_entry(kqswnal_data.kqn_donetxds.next,
+ kqswnal_tx_t, ktx_schedlist);
+ cfs_list_del_init (&ktx->ktx_schedlist);
+ cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
+ flags);
kqswnal_tx_done_in_thread_context(ktx);
did_something = 1;
- spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
+ cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock,
+ flags);
}
- if (!list_empty (&kqswnal_data.kqn_delayedtxds))
+ if (!cfs_list_empty (&kqswnal_data.kqn_delayedtxds))
{
- ktx = list_entry(kqswnal_data.kqn_delayedtxds.next,
- kqswnal_tx_t, ktx_schedlist);
- list_del_init (&ktx->ktx_schedlist);
- spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
- flags);
+ ktx = cfs_list_entry(kqswnal_data.kqn_delayedtxds.next,
+ kqswnal_tx_t, ktx_schedlist);
+ cfs_list_del_init (&ktx->ktx_schedlist);
+ cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
+ flags);
rc = kqswnal_launch (ktx);
if (rc != 0) {
libcfs_nid2str(ktx->ktx_nid), rc);
kqswnal_tx_done (ktx, rc);
}
- atomic_dec (&kqswnal_data.kqn_pending_txs);
+ cfs_atomic_dec (&kqswnal_data.kqn_pending_txs);
did_something = 1;
- spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
+ cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock,
+ flags);
}
/* nothing to do or hogging CPU */
if (!did_something || counter++ == KQSW_RESCHED) {
- spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
- flags);
+ cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
+ flags);
counter = 0;
if (!did_something) {
if (kqswnal_data.kqn_shuttingdown == 2) {
- /* We only exit in stage 2 of shutdown when
- * there's nothing left to do */
+ /* We only exit in stage 2 of shutdown
+ * when there's nothing left to do */
break;
}
- rc = wait_event_interruptible_exclusive (
+ cfs_wait_event_interruptible_exclusive (
kqswnal_data.kqn_sched_waitq,
kqswnal_data.kqn_shuttingdown == 2 ||
- !list_empty(&kqswnal_data.kqn_readyrxds) ||
- !list_empty(&kqswnal_data.kqn_donetxds) ||
- !list_empty(&kqswnal_data.kqn_delayedtxds));
+ !cfs_list_empty(&kqswnal_data. \
+ kqn_readyrxds) ||
+ !cfs_list_empty(&kqswnal_data. \
+ kqn_donetxds) ||
+ !cfs_list_empty(&kqswnal_data. \
+ kqn_delayedtxds, rc));
LASSERT (rc == 0);
} else if (need_resched())
- schedule ();
+ cfs_schedule ();
- spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
+ cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock,
+ flags);
}
}
kranal_close_stale_conns_locked (kra_peer_t *peer, kra_conn_t *newconn)
{
kra_conn_t *conn;
- struct list_head *ctmp;
- struct list_head *cnxt;
+ cfs_list_t *ctmp;
+ cfs_list_t *cnxt;
int loopback;
int count = 0;
loopback = peer->rap_nid == kranal_data.kra_ni->ni_nid;
- list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
- conn = list_entry(ctmp, kra_conn_t, rac_list);
+ cfs_list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
+ conn = cfs_list_entry(ctmp, kra_conn_t, rac_list);
if (conn == newconn)
continue;
kranal_conn_isdup_locked(kra_peer_t *peer, kra_conn_t *newconn)
{
kra_conn_t *conn;
- struct list_head *tmp;
+ cfs_list_t *tmp;
int loopback;
loopback = peer->rap_nid == kranal_data.kra_ni->ni_nid;
- list_for_each(tmp, &peer->rap_conns) {
- conn = list_entry(tmp, kra_conn_t, rac_list);
+ cfs_list_for_each(tmp, &peer->rap_conns) {
+ conn = cfs_list_entry(tmp, kra_conn_t, rac_list);
/* 'newconn' is from an earlier version of 'peer'!!! */
if (newconn->rac_peerstamp < conn->rac_peerstamp)
{
unsigned long flags;
- write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
conn->rac_my_connstamp = kranal_data.kra_connstamp++;
conn->rac_cqid = kranal_data.kra_next_cqid++;
} while (kranal_cqid2conn_locked(conn->rac_cqid) != NULL);
- write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
}
int
kra_conn_t *conn;
RAP_RETURN rrc;
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
LIBCFS_ALLOC(conn, sizeof(*conn));
if (conn == NULL)
return -ENOMEM;
memset(conn, 0, sizeof(*conn));
- atomic_set(&conn->rac_refcount, 1);
- INIT_LIST_HEAD(&conn->rac_list);
- INIT_LIST_HEAD(&conn->rac_hashlist);
- INIT_LIST_HEAD(&conn->rac_schedlist);
- INIT_LIST_HEAD(&conn->rac_fmaq);
- INIT_LIST_HEAD(&conn->rac_rdmaq);
- INIT_LIST_HEAD(&conn->rac_replyq);
- spin_lock_init(&conn->rac_lock);
+ cfs_atomic_set(&conn->rac_refcount, 1);
+ CFS_INIT_LIST_HEAD(&conn->rac_list);
+ CFS_INIT_LIST_HEAD(&conn->rac_hashlist);
+ CFS_INIT_LIST_HEAD(&conn->rac_schedlist);
+ CFS_INIT_LIST_HEAD(&conn->rac_fmaq);
+ CFS_INIT_LIST_HEAD(&conn->rac_rdmaq);
+ CFS_INIT_LIST_HEAD(&conn->rac_replyq);
+ cfs_spin_lock_init(&conn->rac_lock);
kranal_set_conn_uniqueness(conn);
return -ENETDOWN;
}
- atomic_inc(&kranal_data.kra_nconns);
+ cfs_atomic_inc(&kranal_data.kra_nconns);
*connp = conn;
return 0;
}
{
RAP_RETURN rrc;
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
LASSERT (!conn->rac_scheduled);
- LASSERT (list_empty(&conn->rac_list));
- LASSERT (list_empty(&conn->rac_hashlist));
- LASSERT (list_empty(&conn->rac_schedlist));
- LASSERT (atomic_read(&conn->rac_refcount) == 0);
- LASSERT (list_empty(&conn->rac_fmaq));
- LASSERT (list_empty(&conn->rac_rdmaq));
- LASSERT (list_empty(&conn->rac_replyq));
+ LASSERT (cfs_list_empty(&conn->rac_list));
+ LASSERT (cfs_list_empty(&conn->rac_hashlist));
+ LASSERT (cfs_list_empty(&conn->rac_schedlist));
+ LASSERT (cfs_atomic_read(&conn->rac_refcount) == 0);
+ LASSERT (cfs_list_empty(&conn->rac_fmaq));
+ LASSERT (cfs_list_empty(&conn->rac_rdmaq));
+ LASSERT (cfs_list_empty(&conn->rac_replyq));
rrc = RapkDestroyRi(conn->rac_device->rad_handle,
conn->rac_rihandle);
kranal_peer_decref(conn->rac_peer);
LIBCFS_FREE(conn, sizeof(*conn));
- atomic_dec(&kranal_data.kra_nconns);
+ cfs_atomic_dec(&kranal_data.kra_nconns);
}
void
kranal_terminate_conn_locked (kra_conn_t *conn)
{
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
LASSERT (conn->rac_state == RANAL_CONN_CLOSING);
- LASSERT (!list_empty(&conn->rac_hashlist));
- LASSERT (list_empty(&conn->rac_list));
+ LASSERT (!cfs_list_empty(&conn->rac_hashlist));
+ LASSERT (cfs_list_empty(&conn->rac_list));
/* Remove from conn hash table: no new callbacks */
- list_del_init(&conn->rac_hashlist);
+ cfs_list_del_init(&conn->rac_hashlist);
kranal_conn_decref(conn);
conn->rac_state = RANAL_CONN_CLOSED;
"closing conn to %s: error %d\n",
libcfs_nid2str(peer->rap_nid), error);
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
LASSERT (conn->rac_state == RANAL_CONN_ESTABLISHED);
- LASSERT (!list_empty(&conn->rac_hashlist));
- LASSERT (!list_empty(&conn->rac_list));
+ LASSERT (!cfs_list_empty(&conn->rac_hashlist));
+ LASSERT (!cfs_list_empty(&conn->rac_list));
- list_del_init(&conn->rac_list);
+ cfs_list_del_init(&conn->rac_list);
- if (list_empty(&peer->rap_conns) &&
+ if (cfs_list_empty(&peer->rap_conns) &&
peer->rap_persistence == 0) {
/* Non-persistent peer with no more conns... */
kranal_unlink_peer_locked(peer);
* RDMA. Otherwise if we wait for the full timeout we can also be sure
* all RDMA has stopped. */
conn->rac_last_rx = jiffies;
- mb();
+ cfs_mb();
conn->rac_state = RANAL_CONN_CLOSING;
kranal_schedule_conn(conn); /* schedule sending CLOSE */
unsigned long flags;
- write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
if (conn->rac_state == RANAL_CONN_ESTABLISHED)
kranal_close_conn_locked(conn, error);
- write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
}
int
/* Schedule conn on rad_new_conns */
kranal_conn_addref(conn);
- spin_lock_irqsave(&dev->rad_lock, flags);
- list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns);
- wake_up(&dev->rad_waitq);
- spin_unlock_irqrestore(&dev->rad_lock, flags);
+ cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+ cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns);
+ cfs_waitq_signal(&dev->rad_waitq);
+ cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
rrc = RapkWaitToConnect(conn->rac_rihandle);
if (rrc != RAP_SUCCESS) {
{
kra_peer_t *peer2;
kra_tx_t *tx;
- lnet_nid_t peer_nid;
- lnet_nid_t dst_nid;
+ lnet_nid_t peer_nid;
+ lnet_nid_t dst_nid;
unsigned long flags;
kra_conn_t *conn;
int rc;
if (rc != 0)
return rc;
- write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
if (!kranal_peer_active(peer)) {
/* raced with peer getting unlinked */
- write_unlock_irqrestore(&kranal_data.kra_global_lock,
- flags);
+ cfs_write_unlock_irqrestore(&kranal_data. \
+ kra_global_lock,
+ flags);
kranal_conn_decref(conn);
return -ESTALE;
}
return -ENOMEM;
}
- write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
peer2 = kranal_find_peer_locked(peer_nid);
if (peer2 == NULL) {
* this while holding the global lock, to synch with connection
* destruction on NID change. */
if (kranal_data.kra_ni->ni_nid != dst_nid) {
- write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+ flags);
CERROR("Stale/bad connection with %s: dst_nid %s, expected %s\n",
libcfs_nid2str(peer_nid), libcfs_nid2str(dst_nid),
* _don't_ have any blocked txs to complete with failure. */
rc = kranal_conn_isdup_locked(peer, conn);
if (rc != 0) {
- LASSERT (!list_empty(&peer->rap_conns));
- LASSERT (list_empty(&peer->rap_tx_queue));
- write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ LASSERT (!cfs_list_empty(&peer->rap_conns));
+ LASSERT (cfs_list_empty(&peer->rap_tx_queue));
+ cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+ flags);
CWARN("Not creating duplicate connection to %s: %d\n",
libcfs_nid2str(peer_nid), rc);
rc = 0;
if (new_peer) {
/* peer table takes my ref on the new peer */
- list_add_tail(&peer->rap_list,
- kranal_nid2peerlist(peer_nid));
+ cfs_list_add_tail(&peer->rap_list,
+ kranal_nid2peerlist(peer_nid));
}
/* initialise timestamps before reaper looks at them */
kranal_peer_addref(peer); /* +1 ref for conn */
conn->rac_peer = peer;
- list_add_tail(&conn->rac_list, &peer->rap_conns);
+ cfs_list_add_tail(&conn->rac_list, &peer->rap_conns);
kranal_conn_addref(conn); /* +1 ref for conn table */
- list_add_tail(&conn->rac_hashlist,
- kranal_cqid2connlist(conn->rac_cqid));
+ cfs_list_add_tail(&conn->rac_hashlist,
+ kranal_cqid2connlist(conn->rac_cqid));
/* Schedule all packets blocking for a connection */
- while (!list_empty(&peer->rap_tx_queue)) {
- tx = list_entry(peer->rap_tx_queue.next,
- kra_tx_t, tx_list);
+ while (!cfs_list_empty(&peer->rap_tx_queue)) {
+ tx = cfs_list_entry(peer->rap_tx_queue.next,
+ kra_tx_t, tx_list);
- list_del(&tx->tx_list);
+ cfs_list_del(&tx->tx_list);
kranal_post_fma(conn, tx);
}
nstale = kranal_close_stale_conns_locked(peer, conn);
- write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
/* CAVEAT EMPTOR: passive peer can disappear NOW */
{
kra_tx_t *tx;
unsigned long flags;
- struct list_head zombies;
+ cfs_list_t zombies;
int rc;
LASSERT (peer->rap_connecting);
CDEBUG(D_NET, "Done handshake %s:%d \n",
libcfs_nid2str(peer->rap_nid), rc);
- write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
LASSERT (peer->rap_connecting);
peer->rap_connecting = 0;
if (rc == 0) {
/* kranal_conn_handshake() queues blocked txs immediately on
* success to avoid messages jumping the queue */
- LASSERT (list_empty(&peer->rap_tx_queue));
+ LASSERT (cfs_list_empty(&peer->rap_tx_queue));
peer->rap_reconnect_interval = 0; /* OK to reconnect at any time */
- write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+ flags);
return;
}
MIN(peer->rap_reconnect_interval,
*kranal_tunables.kra_max_reconnect_interval);
- peer->rap_reconnect_time = jiffies + peer->rap_reconnect_interval * HZ;
+ peer->rap_reconnect_time = jiffies + peer->rap_reconnect_interval *
+ CFS_HZ;
/* Grab all blocked packets while we have the global lock */
- list_add(&zombies, &peer->rap_tx_queue);
- list_del_init(&peer->rap_tx_queue);
+ cfs_list_add(&zombies, &peer->rap_tx_queue);
+ cfs_list_del_init(&peer->rap_tx_queue);
- write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
- if (list_empty(&zombies))
+ if (cfs_list_empty(&zombies))
return;
CDEBUG(D_NETERROR, "Dropping packets for %s: connection failed\n",
libcfs_nid2str(peer->rap_nid));
do {
- tx = list_entry(zombies.next, kra_tx_t, tx_list);
+ tx = cfs_list_entry(zombies.next, kra_tx_t, tx_list);
- list_del(&tx->tx_list);
+ cfs_list_del(&tx->tx_list);
kranal_tx_done(tx, -EHOSTUNREACH);
- } while (!list_empty(&zombies));
+ } while (!cfs_list_empty(&zombies));
}
void
ras->ras_sock = sock;
- spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+ cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
- list_add_tail(&ras->ras_list, &kranal_data.kra_connd_acceptq);
- wake_up(&kranal_data.kra_connd_waitq);
+ cfs_list_add_tail(&ras->ras_list, &kranal_data.kra_connd_acceptq);
+ cfs_waitq_signal(&kranal_data.kra_connd_waitq);
- spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+ cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
return 0;
}
memset(peer, 0, sizeof(*peer)); /* zero flags etc */
peer->rap_nid = nid;
- atomic_set(&peer->rap_refcount, 1); /* 1 ref for caller */
+ cfs_atomic_set(&peer->rap_refcount, 1); /* 1 ref for caller */
- INIT_LIST_HEAD(&peer->rap_list);
- INIT_LIST_HEAD(&peer->rap_connd_list);
- INIT_LIST_HEAD(&peer->rap_conns);
- INIT_LIST_HEAD(&peer->rap_tx_queue);
+ CFS_INIT_LIST_HEAD(&peer->rap_list);
+ CFS_INIT_LIST_HEAD(&peer->rap_connd_list);
+ CFS_INIT_LIST_HEAD(&peer->rap_conns);
+ CFS_INIT_LIST_HEAD(&peer->rap_tx_queue);
peer->rap_reconnect_interval = 0; /* OK to connect at any time */
- write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
if (kranal_data.kra_nonewpeers) {
/* shutdown has started already */
- write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
-
+ cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+ flags);
+
LIBCFS_FREE(peer, sizeof(*peer));
CERROR("Can't create peer: network shutdown\n");
return -ESHUTDOWN;
}
- atomic_inc(&kranal_data.kra_npeers);
+ cfs_atomic_inc(&kranal_data.kra_npeers);
- write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
*peerp = peer;
return 0;
CDEBUG(D_NET, "peer %s %p deleted\n",
libcfs_nid2str(peer->rap_nid), peer);
- LASSERT (atomic_read(&peer->rap_refcount) == 0);
+ LASSERT (cfs_atomic_read(&peer->rap_refcount) == 0);
LASSERT (peer->rap_persistence == 0);
LASSERT (!kranal_peer_active(peer));
LASSERT (!peer->rap_connecting);
- LASSERT (list_empty(&peer->rap_conns));
- LASSERT (list_empty(&peer->rap_tx_queue));
- LASSERT (list_empty(&peer->rap_connd_list));
+ LASSERT (cfs_list_empty(&peer->rap_conns));
+ LASSERT (cfs_list_empty(&peer->rap_tx_queue));
+ LASSERT (cfs_list_empty(&peer->rap_connd_list));
LIBCFS_FREE(peer, sizeof(*peer));
* they are destroyed, so we can be assured that _all_ state to do
* with this peer has been cleaned up when its refcount drops to
* zero. */
- atomic_dec(&kranal_data.kra_npeers);
+ cfs_atomic_dec(&kranal_data.kra_npeers);
}
kra_peer_t *
kranal_find_peer_locked (lnet_nid_t nid)
{
- struct list_head *peer_list = kranal_nid2peerlist(nid);
- struct list_head *tmp;
+ cfs_list_t *peer_list = kranal_nid2peerlist(nid);
+ cfs_list_t *tmp;
kra_peer_t *peer;
- list_for_each (tmp, peer_list) {
+ cfs_list_for_each (tmp, peer_list) {
- peer = list_entry(tmp, kra_peer_t, rap_list);
+ peer = cfs_list_entry(tmp, kra_peer_t, rap_list);
LASSERT (peer->rap_persistence > 0 || /* persistent peer */
- !list_empty(&peer->rap_conns)); /* active conn */
+ !cfs_list_empty(&peer->rap_conns)); /* active conn */
if (peer->rap_nid != nid)
continue;
CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
peer, libcfs_nid2str(nid),
- atomic_read(&peer->rap_refcount));
+ cfs_atomic_read(&peer->rap_refcount));
return peer;
}
return NULL;
{
kra_peer_t *peer;
- read_lock(&kranal_data.kra_global_lock);
+ cfs_read_lock(&kranal_data.kra_global_lock);
peer = kranal_find_peer_locked(nid);
if (peer != NULL) /* +1 ref for caller? */
kranal_peer_addref(peer);
- read_unlock(&kranal_data.kra_global_lock);
+ cfs_read_unlock(&kranal_data.kra_global_lock);
return peer;
}
kranal_unlink_peer_locked (kra_peer_t *peer)
{
LASSERT (peer->rap_persistence == 0);
- LASSERT (list_empty(&peer->rap_conns));
+ LASSERT (cfs_list_empty(&peer->rap_conns));
LASSERT (kranal_peer_active(peer));
- list_del_init(&peer->rap_list);
+ cfs_list_del_init(&peer->rap_list);
/* lose peerlist's ref */
kranal_peer_decref(peer);
int *persistencep)
{
kra_peer_t *peer;
- struct list_head *ptmp;
+ cfs_list_t *ptmp;
int i;
- read_lock(&kranal_data.kra_global_lock);
+ cfs_read_lock(&kranal_data.kra_global_lock);
for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
- list_for_each(ptmp, &kranal_data.kra_peers[i]) {
+ cfs_list_for_each(ptmp, &kranal_data.kra_peers[i]) {
- peer = list_entry(ptmp, kra_peer_t, rap_list);
+ peer = cfs_list_entry(ptmp, kra_peer_t, rap_list);
LASSERT (peer->rap_persistence > 0 ||
- !list_empty(&peer->rap_conns));
+ !cfs_list_empty(&peer->rap_conns));
if (index-- > 0)
continue;
*portp = peer->rap_port;
*persistencep = peer->rap_persistence;
- read_unlock(&kranal_data.kra_global_lock);
+ cfs_read_unlock(&kranal_data.kra_global_lock);
return 0;
}
}
- read_unlock(&kranal_data.kra_global_lock);
+ cfs_read_unlock(&kranal_data.kra_global_lock);
return -ENOENT;
}
if (rc != 0)
return rc;
- write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
peer2 = kranal_find_peer_locked(nid);
if (peer2 != NULL) {
peer = peer2;
} else {
/* peer table takes existing ref on peer */
- list_add_tail(&peer->rap_list,
+ cfs_list_add_tail(&peer->rap_list,
kranal_nid2peerlist(nid));
}
peer->rap_port = port;
peer->rap_persistence++;
- write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
return 0;
}
void
kranal_del_peer_locked (kra_peer_t *peer)
{
- struct list_head *ctmp;
- struct list_head *cnxt;
+ cfs_list_t *ctmp;
+ cfs_list_t *cnxt;
kra_conn_t *conn;
peer->rap_persistence = 0;
- if (list_empty(&peer->rap_conns)) {
+ if (cfs_list_empty(&peer->rap_conns)) {
kranal_unlink_peer_locked(peer);
} else {
- list_for_each_safe(ctmp, cnxt, &peer->rap_conns) {
- conn = list_entry(ctmp, kra_conn_t, rac_list);
+ cfs_list_for_each_safe(ctmp, cnxt, &peer->rap_conns) {
+ conn = cfs_list_entry(ctmp, kra_conn_t, rac_list);
kranal_close_conn_locked(conn, 0);
}
kranal_del_peer (lnet_nid_t nid)
{
unsigned long flags;
- struct list_head *ptmp;
- struct list_head *pnxt;
+ cfs_list_t *ptmp;
+ cfs_list_t *pnxt;
kra_peer_t *peer;
int lo;
int hi;
int i;
int rc = -ENOENT;
- write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
if (nid != LNET_NID_ANY)
lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
}
for (i = lo; i <= hi; i++) {
- list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
- peer = list_entry(ptmp, kra_peer_t, rap_list);
+ cfs_list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
+ peer = cfs_list_entry(ptmp, kra_peer_t, rap_list);
LASSERT (peer->rap_persistence > 0 ||
- !list_empty(&peer->rap_conns));
+ !cfs_list_empty(&peer->rap_conns));
if (!(nid == LNET_NID_ANY || peer->rap_nid == nid))
continue;
}
}
- write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
return rc;
}
kranal_get_conn_by_idx (int index)
{
kra_peer_t *peer;
- struct list_head *ptmp;
+ cfs_list_t *ptmp;
kra_conn_t *conn;
- struct list_head *ctmp;
+ cfs_list_t *ctmp;
int i;
- read_lock (&kranal_data.kra_global_lock);
+ cfs_read_lock (&kranal_data.kra_global_lock);
for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
- list_for_each (ptmp, &kranal_data.kra_peers[i]) {
+ cfs_list_for_each (ptmp, &kranal_data.kra_peers[i]) {
- peer = list_entry(ptmp, kra_peer_t, rap_list);
+ peer = cfs_list_entry(ptmp, kra_peer_t, rap_list);
LASSERT (peer->rap_persistence > 0 ||
- !list_empty(&peer->rap_conns));
+ !cfs_list_empty(&peer->rap_conns));
- list_for_each (ctmp, &peer->rap_conns) {
+ cfs_list_for_each (ctmp, &peer->rap_conns) {
if (index-- > 0)
continue;
- conn = list_entry(ctmp, kra_conn_t, rac_list);
- CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
+ conn = cfs_list_entry(ctmp, kra_conn_t,
+ rac_list);
+ CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
libcfs_nid2str(conn->rac_peer->rap_nid),
- atomic_read(&conn->rac_refcount));
- atomic_inc(&conn->rac_refcount);
- read_unlock(&kranal_data.kra_global_lock);
+ cfs_atomic_read(&conn->rac_refcount));
+ cfs_atomic_inc(&conn->rac_refcount);
+ cfs_read_unlock(&kranal_data.kra_global_lock);
return conn;
}
}
}
- read_unlock(&kranal_data.kra_global_lock);
+ cfs_read_unlock(&kranal_data.kra_global_lock);
return NULL;
}
kranal_close_peer_conns_locked (kra_peer_t *peer, int why)
{
kra_conn_t *conn;
- struct list_head *ctmp;
- struct list_head *cnxt;
+ cfs_list_t *ctmp;
+ cfs_list_t *cnxt;
int count = 0;
- list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
- conn = list_entry(ctmp, kra_conn_t, rac_list);
+ cfs_list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
+ conn = cfs_list_entry(ctmp, kra_conn_t, rac_list);
count++;
kranal_close_conn_locked(conn, why);
{
unsigned long flags;
kra_peer_t *peer;
- struct list_head *ptmp;
- struct list_head *pnxt;
+ cfs_list_t *ptmp;
+ cfs_list_t *pnxt;
int lo;
int hi;
int i;
int count = 0;
- write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
if (nid != LNET_NID_ANY)
lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
}
for (i = lo; i <= hi; i++) {
- list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
+ cfs_list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
- peer = list_entry(ptmp, kra_peer_t, rap_list);
+ peer = cfs_list_entry(ptmp, kra_peer_t, rap_list);
LASSERT (peer->rap_persistence > 0 ||
- !list_empty(&peer->rap_conns));
+ !cfs_list_empty(&peer->rap_conns));
if (!(nid == LNET_NID_ANY || nid == peer->rap_nid))
continue;
}
}
- write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
/* wildcards always succeed */
if (nid == LNET_NID_ANY)
}
void
-kranal_free_txdescs(struct list_head *freelist)
+kranal_free_txdescs(cfs_list_t *freelist)
{
kra_tx_t *tx;
- while (!list_empty(freelist)) {
- tx = list_entry(freelist->next, kra_tx_t, tx_list);
+ while (!cfs_list_empty(freelist)) {
+ tx = cfs_list_entry(freelist->next, kra_tx_t, tx_list);
- list_del(&tx->tx_list);
+ cfs_list_del(&tx->tx_list);
LIBCFS_FREE(tx->tx_phys, LNET_MAX_IOV * sizeof(*tx->tx_phys));
LIBCFS_FREE(tx, sizeof(*tx));
}
}
int
-kranal_alloc_txdescs(struct list_head *freelist, int n)
+kranal_alloc_txdescs(cfs_list_t *freelist, int n)
{
int i;
kra_tx_t *tx;
LASSERT (freelist == &kranal_data.kra_idle_txs);
- LASSERT (list_empty(freelist));
+ LASSERT (cfs_list_empty(freelist));
for (i = 0; i < n; i++) {
tx->tx_buftype = RANAL_BUF_NONE;
tx->tx_msg.ram_type = RANAL_MSG_NONE;
- list_add(&tx->tx_list, freelist);
+ cfs_list_add(&tx->tx_list, freelist);
}
return 0;
void
kranal_device_fini(kra_device_t *dev)
{
- LASSERT (list_empty(&dev->rad_ready_conns));
- LASSERT (list_empty(&dev->rad_new_conns));
+ LASSERT (cfs_list_empty(&dev->rad_ready_conns));
+ LASSERT (cfs_list_empty(&dev->rad_new_conns));
LASSERT (dev->rad_nphysmap == 0);
LASSERT (dev->rad_nppphysmap == 0);
LASSERT (dev->rad_nvirtmap == 0);
LASSERT (dev->rad_nobvirtmap == 0);
-
+
LASSERT(dev->rad_scheduler == NULL);
RapkDestroyCQ(dev->rad_handle, dev->rad_fma_cqh);
RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cqh);
unsigned long flags;
CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
- atomic_read(&libcfs_kmemory));
+ cfs_atomic_read(&libcfs_kmemory));
LASSERT (ni == kranal_data.kra_ni);
LASSERT (ni->ni_data == &kranal_data);
case RANAL_INIT_ALL:
/* Prevent new peers from being created */
- write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
kranal_data.kra_nonewpeers = 1;
- write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
-
+ cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+ flags);
+
/* Remove all existing peers from the peer table */
kranal_del_peer(LNET_NID_ANY);
/* Wait for pending conn reqs to be handled */
i = 2;
- spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
- while (!list_empty(&kranal_data.kra_connd_acceptq)) {
- spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
- flags);
+ cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+ while (!cfs_list_empty(&kranal_data.kra_connd_acceptq)) {
+ cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
+ flags);
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */
"waiting for conn reqs to clean up\n");
cfs_pause(cfs_time_seconds(1));
- spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+ cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock,
+ flags);
}
- spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+ cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
/* Wait for all peers to be freed */
i = 2;
- while (atomic_read(&kranal_data.kra_npeers) != 0) {
+ while (cfs_atomic_read(&kranal_data.kra_npeers) != 0) {
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */
"waiting for %d peers to close down\n",
- atomic_read(&kranal_data.kra_npeers));
+ cfs_atomic_read(&kranal_data.kra_npeers));
cfs_pause(cfs_time_seconds(1));
}
/* fall through */
* while there are still active connds, but these will be temporary
* since peer creation always fails after the listener has started to
* shut down. */
- LASSERT (atomic_read(&kranal_data.kra_npeers) == 0);
+ LASSERT (cfs_atomic_read(&kranal_data.kra_npeers) == 0);
/* Flag threads to terminate */
kranal_data.kra_shutdown = 1;
for (i = 0; i < kranal_data.kra_ndevs; i++) {
kra_device_t *dev = &kranal_data.kra_devices[i];
- spin_lock_irqsave(&dev->rad_lock, flags);
- wake_up(&dev->rad_waitq);
- spin_unlock_irqrestore(&dev->rad_lock, flags);
+ cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+ cfs_waitq_signal(&dev->rad_waitq);
+ cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
}
- spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
- wake_up_all(&kranal_data.kra_reaper_waitq);
- spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
+ cfs_spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
+ cfs_waitq_broadcast(&kranal_data.kra_reaper_waitq);
+ cfs_spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
- LASSERT (list_empty(&kranal_data.kra_connd_peers));
- spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
- wake_up_all(&kranal_data.kra_connd_waitq);
- spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+ LASSERT (cfs_list_empty(&kranal_data.kra_connd_peers));
+ cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+ cfs_waitq_broadcast(&kranal_data.kra_connd_waitq);
+ cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
/* Wait for threads to exit */
i = 2;
- while (atomic_read(&kranal_data.kra_nthreads) != 0) {
+ while (cfs_atomic_read(&kranal_data.kra_nthreads) != 0) {
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"Waiting for %d threads to terminate\n",
- atomic_read(&kranal_data.kra_nthreads));
+ cfs_atomic_read(&kranal_data.kra_nthreads));
cfs_pause(cfs_time_seconds(1));
}
- LASSERT (atomic_read(&kranal_data.kra_npeers) == 0);
+ LASSERT (cfs_atomic_read(&kranal_data.kra_npeers) == 0);
if (kranal_data.kra_peers != NULL) {
for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
- LASSERT (list_empty(&kranal_data.kra_peers[i]));
+ LASSERT (cfs_list_empty(&kranal_data.kra_peers[i]));
LIBCFS_FREE(kranal_data.kra_peers,
- sizeof (struct list_head) *
+ sizeof (cfs_list_t) *
kranal_data.kra_peer_hash_size);
}
- LASSERT (atomic_read(&kranal_data.kra_nconns) == 0);
+ LASSERT (cfs_atomic_read(&kranal_data.kra_nconns) == 0);
if (kranal_data.kra_conns != NULL) {
for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
- LASSERT (list_empty(&kranal_data.kra_conns[i]));
+ LASSERT (cfs_list_empty(&kranal_data.kra_conns[i]));
LIBCFS_FREE(kranal_data.kra_conns,
- sizeof (struct list_head) *
+ sizeof (cfs_list_t) *
kranal_data.kra_conn_hash_size);
}
kranal_free_txdescs(&kranal_data.kra_idle_txs);
CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
- atomic_read(&libcfs_kmemory));
+ cfs_atomic_read(&libcfs_kmemory));
kranal_data.kra_init = RANAL_INIT_NOTHING;
PORTAL_MODULE_UNUSE;
kranal_startup (lnet_ni_t *ni)
{
struct timeval tv;
- int pkmem = atomic_read(&libcfs_kmemory);
+ int pkmem = cfs_atomic_read(&libcfs_kmemory);
int rc;
int i;
kra_device_t *dev;
* initialised with seconds + microseconds at startup time. So we
* rely on NOT creating connections more frequently on average than
* 1MHz to ensure we don't use old connstamps when we reboot. */
- do_gettimeofday(&tv);
+ cfs_gettimeofday(&tv);
kranal_data.kra_connstamp =
kranal_data.kra_peerstamp = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
- rwlock_init(&kranal_data.kra_global_lock);
+ cfs_rwlock_init(&kranal_data.kra_global_lock);
for (i = 0; i < RANAL_MAXDEVS; i++ ) {
kra_device_t *dev = &kranal_data.kra_devices[i];
dev->rad_idx = i;
- INIT_LIST_HEAD(&dev->rad_ready_conns);
- INIT_LIST_HEAD(&dev->rad_new_conns);
- init_waitqueue_head(&dev->rad_waitq);
- spin_lock_init(&dev->rad_lock);
+ CFS_INIT_LIST_HEAD(&dev->rad_ready_conns);
+ CFS_INIT_LIST_HEAD(&dev->rad_new_conns);
+ cfs_waitq_init(&dev->rad_waitq);
+ cfs_spin_lock_init(&dev->rad_lock);
}
- kranal_data.kra_new_min_timeout = MAX_SCHEDULE_TIMEOUT;
- init_waitqueue_head(&kranal_data.kra_reaper_waitq);
- spin_lock_init(&kranal_data.kra_reaper_lock);
+ kranal_data.kra_new_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
+ cfs_waitq_init(&kranal_data.kra_reaper_waitq);
+ cfs_spin_lock_init(&kranal_data.kra_reaper_lock);
- INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq);
- INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
- init_waitqueue_head(&kranal_data.kra_connd_waitq);
- spin_lock_init(&kranal_data.kra_connd_lock);
+ CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq);
+ CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
+ cfs_waitq_init(&kranal_data.kra_connd_waitq);
+ cfs_spin_lock_init(&kranal_data.kra_connd_lock);
- INIT_LIST_HEAD(&kranal_data.kra_idle_txs);
- spin_lock_init(&kranal_data.kra_tx_lock);
+ CFS_INIT_LIST_HEAD(&kranal_data.kra_idle_txs);
+ cfs_spin_lock_init(&kranal_data.kra_tx_lock);
/* OK to call kranal_api_shutdown() to cleanup now */
kranal_data.kra_init = RANAL_INIT_DATA;
kranal_data.kra_peer_hash_size = RANAL_PEER_HASH_SIZE;
LIBCFS_ALLOC(kranal_data.kra_peers,
- sizeof(struct list_head) * kranal_data.kra_peer_hash_size);
+ sizeof(cfs_list_t) *
+ kranal_data.kra_peer_hash_size);
if (kranal_data.kra_peers == NULL)
goto failed;
for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
- INIT_LIST_HEAD(&kranal_data.kra_peers[i]);
+ CFS_INIT_LIST_HEAD(&kranal_data.kra_peers[i]);
kranal_data.kra_conn_hash_size = RANAL_PEER_HASH_SIZE;
LIBCFS_ALLOC(kranal_data.kra_conns,
- sizeof(struct list_head) * kranal_data.kra_conn_hash_size);
+ sizeof(cfs_list_t) *
+ kranal_data.kra_conn_hash_size);
if (kranal_data.kra_conns == NULL)
goto failed;
for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
- INIT_LIST_HEAD(&kranal_data.kra_conns[i]);
+ CFS_INIT_LIST_HEAD(&kranal_data.kra_conns[i]);
rc = kranal_alloc_txdescs(&kranal_data.kra_idle_txs,
*kranal_tunables.kra_ntx);
typedef struct
{
- RAP_PVOID rad_handle; /* device handle */
- RAP_PVOID rad_fma_cqh; /* FMA completion queue handle */
- RAP_PVOID rad_rdma_cqh; /* rdma completion queue handle */
- int rad_id; /* device id */
- int rad_idx; /* index in kra_devices */
- int rad_ready; /* set by device callback */
- struct list_head rad_ready_conns;/* connections ready to tx/rx */
- struct list_head rad_new_conns; /* new connections to complete */
- wait_queue_head_t rad_waitq; /* scheduler waits here */
- spinlock_t rad_lock; /* serialise */
- void *rad_scheduler; /* scheduling thread */
- unsigned int rad_nphysmap; /* # phys mappings */
- unsigned int rad_nppphysmap; /* # phys pages mapped */
- unsigned int rad_nvirtmap; /* # virt mappings */
- unsigned long rad_nobvirtmap; /* # virt bytes mapped */
+ RAP_PVOID rad_handle; /* device handle */
+ RAP_PVOID rad_fma_cqh; /* FMA completion queue handle */
+ RAP_PVOID rad_rdma_cqh; /* rdma completion queue handle */
+ int rad_id; /* device id */
+ int rad_idx; /* index in kra_devices */
+ int rad_ready; /* set by device callback */
+ cfs_list_t rad_ready_conns;/* connections ready to tx/rx */
+ cfs_list_t rad_new_conns; /* new connections to complete */
+ cfs_waitq_t rad_waitq; /* scheduler waits here */
+ cfs_spinlock_t rad_lock; /* serialise */
+ void *rad_scheduler; /* scheduling thread */
+ unsigned int rad_nphysmap; /* # phys mappings */
+ unsigned int rad_nppphysmap;/* # phys pages mapped */
+ unsigned int rad_nvirtmap; /* # virt mappings */
+ unsigned long rad_nobvirtmap;/* # virt bytes mapped */
} kra_device_t;
typedef struct
{
- int kra_init; /* initialisation state */
- int kra_shutdown; /* shut down? */
- atomic_t kra_nthreads; /* # live threads */
- lnet_ni_t *kra_ni; /* _the_ nal instance */
-
- kra_device_t kra_devices[RANAL_MAXDEVS]; /* device/ptag/cq etc */
- int kra_ndevs; /* # devices */
-
- rwlock_t kra_global_lock; /* stabilize peer/conn ops */
-
- struct list_head *kra_peers; /* hash table of all my known peers */
- int kra_peer_hash_size; /* size of kra_peers */
- atomic_t kra_npeers; /* # peers extant */
- int kra_nonewpeers; /* prevent new peers */
-
- struct list_head *kra_conns; /* conns hashed by cqid */
- int kra_conn_hash_size; /* size of kra_conns */
- __u64 kra_peerstamp; /* when I started up */
- __u64 kra_connstamp; /* conn stamp generator */
- int kra_next_cqid; /* cqid generator */
- atomic_t kra_nconns; /* # connections extant */
-
- long kra_new_min_timeout; /* minimum timeout on any new conn */
- wait_queue_head_t kra_reaper_waitq; /* reaper sleeps here */
- spinlock_t kra_reaper_lock; /* serialise */
-
- struct list_head kra_connd_peers; /* peers waiting for a connection */
- struct list_head kra_connd_acceptq; /* accepted sockets to handshake */
- wait_queue_head_t kra_connd_waitq; /* connection daemons sleep here */
- spinlock_t kra_connd_lock; /* serialise */
-
- struct list_head kra_idle_txs; /* idle tx descriptors */
- __u64 kra_next_tx_cookie; /* RDMA completion cookie */
- spinlock_t kra_tx_lock; /* serialise */
+ int kra_init; /* initialisation state */
+ int kra_shutdown; /* shut down? */
+ cfs_atomic_t kra_nthreads; /* # live threads */
+ lnet_ni_t *kra_ni; /* _the_ nal instance */
+
+ kra_device_t kra_devices[RANAL_MAXDEVS]; /* device/ptag/cq */
+ int kra_ndevs; /* # devices */
+
+ cfs_rwlock_t kra_global_lock; /* stabilize peer/conn ops */
+
+ cfs_list_t *kra_peers; /* hash table of all my known peers */
+ int kra_peer_hash_size; /* size of kra_peers */
+ cfs_atomic_t kra_npeers; /* # peers extant */
+ int kra_nonewpeers; /* prevent new peers */
+
+ cfs_list_t *kra_conns; /* conns hashed by cqid */
+ int kra_conn_hash_size; /* size of kra_conns */
+ __u64 kra_peerstamp; /* when I started up */
+ __u64 kra_connstamp; /* conn stamp generator */
+ int kra_next_cqid; /* cqid generator */
+ cfs_atomic_t kra_nconns; /* # connections extant */
+
+ long kra_new_min_timeout; /* minimum timeout on any new conn */
+ cfs_waitq_t kra_reaper_waitq; /* reaper sleeps here */
+ cfs_spinlock_t kra_reaper_lock; /* serialise */
+
+ cfs_list_t kra_connd_peers; /* peers waiting for a connection */
+ cfs_list_t kra_connd_acceptq; /* accepted sockets to handshake */
+ cfs_waitq_t kra_connd_waitq; /* connection daemons sleep here */
+ cfs_spinlock_t kra_connd_lock; /* serialise */
+
+ cfs_list_t kra_idle_txs; /* idle tx descriptors */
+ __u64 kra_next_tx_cookie; /* RDMA completion cookie */
+ cfs_spinlock_t kra_tx_lock; /* serialise */
} kra_data_t;
#define RANAL_INIT_NOTHING 0
#define RANAL_INIT_DATA 1
#define RANAL_INIT_ALL 2
-typedef struct kra_acceptsock /* accepted socket queued for connd */
+typedef struct kra_acceptsock /* accepted socket queued for connd */
{
- struct list_head ras_list; /* queue for attention */
+ cfs_list_t ras_list; /* queue for attention */
struct socket *ras_sock; /* the accepted socket */
} kra_acceptsock_t;
typedef struct kra_tx /* message descriptor */
{
- struct list_head tx_list; /* queue on idle_txs/rac_sendq/rac_waitq */
- struct kra_conn *tx_conn; /* owning conn */
- lnet_msg_t *tx_lntmsg[2]; /* ptl msgs to finalize on completion */
- unsigned long tx_qtime; /* when tx started to wait for something (jiffies) */
- int tx_nob; /* # bytes of payload */
- int tx_buftype; /* payload buffer type */
- void *tx_buffer; /* source/sink buffer */
- int tx_phys_offset; /* first page offset (if phys) */
- int tx_phys_npages; /* # physical pages */
- RAP_PHYS_REGION *tx_phys; /* page descriptors */
- RAP_MEM_KEY tx_map_key; /* mapping key */
- RAP_RDMA_DESCRIPTOR tx_rdma_desc; /* rdma descriptor */
- __u64 tx_cookie; /* identify this tx to peer */
- kra_msg_t tx_msg; /* FMA message buffer */
+ cfs_list_t tx_list; /* queue on idle_txs/rac_sendq/rac_waitq */
+ struct kra_conn *tx_conn; /* owning conn */
+ lnet_msg_t *tx_lntmsg[2]; /* ptl msgs to finalize on completion */
+ unsigned long tx_qtime; /* when tx started to wait for something (jiffies) */
+ int tx_nob; /* # bytes of payload */
+ int tx_buftype; /* payload buffer type */
+ void *tx_buffer; /* source/sink buffer */
+ int tx_phys_offset; /* first page offset (if phys) */
+ int tx_phys_npages; /* # physical pages */
+ RAP_PHYS_REGION *tx_phys; /* page descriptors */
+ RAP_MEM_KEY tx_map_key; /* mapping key */
+ RAP_RDMA_DESCRIPTOR tx_rdma_desc; /* rdma descriptor */
+ __u64 tx_cookie; /* identify this tx to peer */
+ kra_msg_t tx_msg; /* FMA message buffer */
} kra_tx_t;
#define RANAL_BUF_NONE 0 /* buffer type not set */
typedef struct kra_conn
{
struct kra_peer *rac_peer; /* owning peer */
- struct list_head rac_list; /* stash on peer's conn list */
- struct list_head rac_hashlist; /* stash in connection hash table */
- struct list_head rac_schedlist; /* schedule (on rad_???_conns) for attention */
- struct list_head rac_fmaq; /* txs queued for FMA */
- struct list_head rac_rdmaq; /* txs awaiting RDMA completion */
- struct list_head rac_replyq; /* txs awaiting replies */
- __u64 rac_peerstamp; /* peer's unique stamp */
- __u64 rac_peer_connstamp; /* peer's unique connection stamp */
- __u64 rac_my_connstamp; /* my unique connection stamp */
- unsigned long rac_last_tx; /* when I last sent an FMA message (jiffies) */
- unsigned long rac_last_rx; /* when I last received an FMA messages (jiffies) */
- long rac_keepalive; /* keepalive interval (seconds) */
- long rac_timeout; /* infer peer death if no rx for this many seconds */
- __u32 rac_cqid; /* my completion callback id (non-unique) */
- __u32 rac_tx_seq; /* tx msg sequence number */
- __u32 rac_rx_seq; /* rx msg sequence number */
- atomic_t rac_refcount; /* # users */
- unsigned int rac_close_sent; /* I've sent CLOSE */
- unsigned int rac_close_recvd; /* I've received CLOSE */
- unsigned int rac_state; /* connection state */
- unsigned int rac_scheduled; /* being attented to */
- spinlock_t rac_lock; /* serialise */
- kra_device_t *rac_device; /* which device */
- RAP_PVOID rac_rihandle; /* RA endpoint */
- kra_msg_t *rac_rxmsg; /* incoming message (FMA prefix) */
- kra_msg_t rac_msg; /* keepalive/CLOSE message buffer */
+ cfs_list_t rac_list; /* stash on peer's conn list */
+ cfs_list_t rac_hashlist; /* stash in connection hash table */
+ cfs_list_t rac_schedlist; /* schedule (on rad_???_conns) for attention */
+ cfs_list_t rac_fmaq; /* txs queued for FMA */
+ cfs_list_t rac_rdmaq; /* txs awaiting RDMA completion */
+ cfs_list_t rac_replyq; /* txs awaiting replies */
+ __u64 rac_peerstamp; /* peer's unique stamp */
+ __u64 rac_peer_connstamp;/* peer's unique connection stamp */
+ __u64 rac_my_connstamp; /* my unique connection stamp */
+ unsigned long rac_last_tx; /* when I last sent an FMA message (jiffies) */
+ unsigned long rac_last_rx; /* when I last received an FMA messages (jiffies) */
+ long rac_keepalive; /* keepalive interval (seconds) */
+ long rac_timeout; /* infer peer death if no rx for this many seconds */
+ __u32 rac_cqid; /* my completion callback id (non-unique) */
+ __u32 rac_tx_seq; /* tx msg sequence number */
+ __u32 rac_rx_seq; /* rx msg sequence number */
+ cfs_atomic_t rac_refcount; /* # users */
+ unsigned int rac_close_sent; /* I've sent CLOSE */
+ unsigned int rac_close_recvd; /* I've received CLOSE */
+ unsigned int rac_state; /* connection state */
+ unsigned int rac_scheduled; /* being attented to */
+ cfs_spinlock_t rac_lock; /* serialise */
+ kra_device_t *rac_device; /* which device */
+ RAP_PVOID rac_rihandle; /* RA endpoint */
+ kra_msg_t *rac_rxmsg; /* incoming message (FMA prefix) */
+ kra_msg_t rac_msg; /* keepalive/CLOSE message buffer */
} kra_conn_t;
#define RANAL_CONN_ESTABLISHED 0
typedef struct kra_peer
{
- struct list_head rap_list; /* stash on global peer list */
- struct list_head rap_connd_list; /* schedule on kra_connd_peers */
- struct list_head rap_conns; /* all active connections */
- struct list_head rap_tx_queue; /* msgs waiting for a conn */
- lnet_nid_t rap_nid; /* who's on the other end(s) */
- __u32 rap_ip; /* IP address of peer */
- int rap_port; /* port on which peer listens */
- atomic_t rap_refcount; /* # users */
- int rap_persistence; /* "known" peer refs */
- int rap_connecting; /* connection forming */
+ cfs_list_t rap_list; /* stash on global peer list */
+ cfs_list_t rap_connd_list; /* schedule on kra_connd_peers */
+ cfs_list_t rap_conns; /* all active connections */
+ cfs_list_t rap_tx_queue; /* msgs waiting for a conn */
+ lnet_nid_t rap_nid; /* who's on the other end(s) */
+ __u32 rap_ip; /* IP address of peer */
+ int rap_port; /* port on which peer listens */
+ cfs_atomic_t rap_refcount; /* # users */
+ int rap_persistence; /* "known" peer refs */
+ int rap_connecting; /* connection forming */
unsigned long rap_reconnect_time; /* CURRENT_SECONDS when reconnect OK */
unsigned long rap_reconnect_interval; /* exponential backoff */
} kra_peer_t;
kranal_peer_addref(kra_peer_t *peer)
{
CDEBUG(D_NET, "%p->%s\n", peer, libcfs_nid2str(peer->rap_nid));
- LASSERT(atomic_read(&peer->rap_refcount) > 0);
- atomic_inc(&peer->rap_refcount);
+ LASSERT(cfs_atomic_read(&peer->rap_refcount) > 0);
+ cfs_atomic_inc(&peer->rap_refcount);
}
static inline void
kranal_peer_decref(kra_peer_t *peer)
{
CDEBUG(D_NET, "%p->%s\n", peer, libcfs_nid2str(peer->rap_nid));
- LASSERT(atomic_read(&peer->rap_refcount) > 0);
- if (atomic_dec_and_test(&peer->rap_refcount))
+ LASSERT(cfs_atomic_read(&peer->rap_refcount) > 0);
+ if (cfs_atomic_dec_and_test(&peer->rap_refcount))
kranal_destroy_peer(peer);
}
-static inline struct list_head *
+static inline cfs_list_t *
kranal_nid2peerlist (lnet_nid_t nid)
{
unsigned int hash = ((unsigned int)nid) % kranal_data.kra_peer_hash_size;
kranal_peer_active(kra_peer_t *peer)
{
/* Am I in the peer hash table? */
- return (!list_empty(&peer->rap_list));
+ return (!cfs_list_empty(&peer->rap_list));
}
static inline void
{
CDEBUG(D_NET, "%p->%s\n", conn,
libcfs_nid2str(conn->rac_peer->rap_nid));
- LASSERT(atomic_read(&conn->rac_refcount) > 0);
- atomic_inc(&conn->rac_refcount);
+ LASSERT(cfs_atomic_read(&conn->rac_refcount) > 0);
+ cfs_atomic_inc(&conn->rac_refcount);
}
static inline void
{
CDEBUG(D_NET, "%p->%s\n", conn,
libcfs_nid2str(conn->rac_peer->rap_nid));
- LASSERT(atomic_read(&conn->rac_refcount) > 0);
- if (atomic_dec_and_test(&conn->rac_refcount))
+ LASSERT(cfs_atomic_read(&conn->rac_refcount) > 0);
+ if (cfs_atomic_dec_and_test(&conn->rac_refcount))
kranal_destroy_conn(conn);
}
-static inline struct list_head *
+static inline cfs_list_t *
kranal_cqid2connlist (__u32 cqid)
{
unsigned int hash = cqid % kranal_data.kra_conn_hash_size;
static inline kra_conn_t *
kranal_cqid2conn_locked (__u32 cqid)
{
- struct list_head *conns = kranal_cqid2connlist(cqid);
- struct list_head *tmp;
+ cfs_list_t *conns = kranal_cqid2connlist(cqid);
+ cfs_list_t *tmp;
kra_conn_t *conn;
- list_for_each(tmp, conns) {
- conn = list_entry(tmp, kra_conn_t, rac_hashlist);
+ cfs_list_for_each(tmp, conns) {
+ conn = cfs_list_entry(tmp, kra_conn_t, rac_hashlist);
if (conn->rac_cqid == cqid)
return conn;
void kranal_shutdown (lnet_ni_t *ni);
int kranal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg);
int kranal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
-int kranal_eager_recv(lnet_ni_t *ni, void *private,
- lnet_msg_t *lntmsg, void **new_private);
-int kranal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
- int delayed, unsigned int niov,
+int kranal_eager_recv(lnet_ni_t *ni, void *private,
+ lnet_msg_t *lntmsg, void **new_private);
+int kranal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
+ int delayed, unsigned int niov,
struct iovec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen);
int kranal_accept(lnet_ni_t *ni, struct socket *sock);
if (dev->rad_id != devid)
continue;
- spin_lock_irqsave(&dev->rad_lock, flags);
+ cfs_spin_lock_irqsave(&dev->rad_lock, flags);
if (!dev->rad_ready) {
dev->rad_ready = 1;
- wake_up(&dev->rad_waitq);
+ cfs_waitq_signal(&dev->rad_waitq);
}
- spin_unlock_irqrestore(&dev->rad_lock, flags);
+ cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
return;
}
kra_device_t *dev = conn->rac_device;
unsigned long flags;
- spin_lock_irqsave(&dev->rad_lock, flags);
+ cfs_spin_lock_irqsave(&dev->rad_lock, flags);
if (!conn->rac_scheduled) {
kranal_conn_addref(conn); /* +1 ref for scheduler */
conn->rac_scheduled = 1;
- list_add_tail(&conn->rac_schedlist, &dev->rad_ready_conns);
- wake_up(&dev->rad_waitq);
+ cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_ready_conns);
+ cfs_waitq_signal(&dev->rad_waitq);
}
- spin_unlock_irqrestore(&dev->rad_lock, flags);
+ cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
}
kra_tx_t *
unsigned long flags;
kra_tx_t *tx;
- spin_lock_irqsave(&kranal_data.kra_tx_lock, flags);
+ cfs_spin_lock_irqsave(&kranal_data.kra_tx_lock, flags);
- if (list_empty(&kranal_data.kra_idle_txs)) {
- spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
+ if (cfs_list_empty(&kranal_data.kra_idle_txs)) {
+ cfs_spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
return NULL;
}
- tx = list_entry(kranal_data.kra_idle_txs.next, kra_tx_t, tx_list);
- list_del(&tx->tx_list);
+ tx = cfs_list_entry(kranal_data.kra_idle_txs.next, kra_tx_t, tx_list);
+ cfs_list_del(&tx->tx_list);
/* Allocate a new completion cookie. It might not be needed, but we've
* got a lock right now... */
tx->tx_cookie = kranal_data.kra_next_tx_cookie++;
- spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
+ cfs_spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
LASSERT (tx->tx_buftype == RANAL_BUF_NONE);
LASSERT (tx->tx_msg.ram_type == RANAL_MSG_NONE);
unsigned long flags;
int i;
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
kranal_unmap_buffer(tx);
tx->tx_msg.ram_type = RANAL_MSG_NONE;
tx->tx_conn = NULL;
- spin_lock_irqsave(&kranal_data.kra_tx_lock, flags);
+ cfs_spin_lock_irqsave(&kranal_data.kra_tx_lock, flags);
- list_add_tail(&tx->tx_list, &kranal_data.kra_idle_txs);
+ cfs_list_add_tail(&tx->tx_list, &kranal_data.kra_idle_txs);
- spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
+ cfs_spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
/* finalize AFTER freeing lnet msgs */
for (i = 0; i < 2; i++) {
kra_conn_t *
kranal_find_conn_locked (kra_peer_t *peer)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
/* just return the first connection */
- list_for_each (tmp, &peer->rap_conns) {
- return list_entry(tmp, kra_conn_t, rac_list);
+ cfs_list_for_each (tmp, &peer->rap_conns) {
+ return cfs_list_entry(tmp, kra_conn_t, rac_list);
}
return NULL;
tx->tx_conn = conn;
- spin_lock_irqsave(&conn->rac_lock, flags);
- list_add_tail(&tx->tx_list, &conn->rac_fmaq);
+ cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+ cfs_list_add_tail(&tx->tx_list, &conn->rac_fmaq);
tx->tx_qtime = jiffies;
- spin_unlock_irqrestore(&conn->rac_lock, flags);
+ cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
kranal_schedule_conn(conn);
}
kra_conn_t *conn;
int rc;
int retry;
- rwlock_t *g_lock = &kranal_data.kra_global_lock;
+ cfs_rwlock_t *g_lock = &kranal_data.kra_global_lock;
/* If I get here, I've committed to send, so I complete the tx with
* failure on any problems */
- LASSERT (tx->tx_conn == NULL); /* only set when assigned a conn */
+ LASSERT (tx->tx_conn == NULL); /* only set when assigned a conn */
for (retry = 0; ; retry = 1) {
- read_lock(g_lock);
+ cfs_read_lock(g_lock);
peer = kranal_find_peer_locked(nid);
if (peer != NULL) {
conn = kranal_find_conn_locked(peer);
if (conn != NULL) {
kranal_post_fma(conn, tx);
- read_unlock(g_lock);
+ cfs_read_unlock(g_lock);
return;
}
}
/* Making connections; I'll need a write lock... */
- read_unlock(g_lock);
- write_lock_irqsave(g_lock, flags);
+ cfs_read_unlock(g_lock);
+ cfs_write_lock_irqsave(g_lock, flags);
peer = kranal_find_peer_locked(nid);
if (peer != NULL)
break;
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
if (retry) {
CERROR("Can't find peer %s\n", libcfs_nid2str(nid));
if (conn != NULL) {
/* Connection exists; queue message on it */
kranal_post_fma(conn, tx);
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
return;
}
LASSERT (peer->rap_persistence > 0);
if (!peer->rap_connecting) {
- LASSERT (list_empty(&peer->rap_tx_queue));
+ LASSERT (cfs_list_empty(&peer->rap_tx_queue));
if (!(peer->rap_reconnect_interval == 0 || /* first attempt */
- time_after_eq(jiffies, peer->rap_reconnect_time))) {
- write_unlock_irqrestore(g_lock, flags);
+ cfs_time_aftereq(jiffies, peer->rap_reconnect_time))) {
+ cfs_write_unlock_irqrestore(g_lock, flags);
kranal_tx_done(tx, -EHOSTUNREACH);
return;
}
peer->rap_connecting = 1;
kranal_peer_addref(peer); /* extra ref for connd */
- spin_lock(&kranal_data.kra_connd_lock);
+ cfs_spin_lock(&kranal_data.kra_connd_lock);
- list_add_tail(&peer->rap_connd_list,
+ cfs_list_add_tail(&peer->rap_connd_list,
&kranal_data.kra_connd_peers);
- wake_up(&kranal_data.kra_connd_waitq);
+ cfs_waitq_signal(&kranal_data.kra_connd_waitq);
- spin_unlock(&kranal_data.kra_connd_lock);
+ cfs_spin_unlock(&kranal_data.kra_connd_lock);
}
/* A connection is being established; queue the message... */
- list_add_tail(&tx->tx_list, &peer->rap_tx_queue);
+ cfs_list_add_tail(&tx->tx_list, &peer->rap_tx_queue);
- write_unlock_irqrestore(g_lock, flags);
+ cfs_write_unlock_irqrestore(g_lock, flags);
}
void
rrc = RapkPostRdma(conn->rac_rihandle, &tx->tx_rdma_desc);
LASSERT (rrc == RAP_SUCCESS);
- spin_lock_irqsave(&conn->rac_lock, flags);
- list_add_tail(&tx->tx_list, &conn->rac_rdmaq);
+ cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+ cfs_list_add_tail(&tx->tx_list, &conn->rac_rdmaq);
tx->tx_qtime = jiffies;
- spin_unlock_irqrestore(&conn->rac_lock, flags);
+ cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
}
int
LASSERT (nob == 0 || niov > 0);
LASSERT (niov <= LNET_MAX_IOV);
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
/* payload is either all vaddrs or all pages */
LASSERT (!(kiov != NULL && iov != NULL));
int rc;
LASSERT (mlen <= rlen);
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
/* Either all pages or all vaddrs */
LASSERT (!(kiov != NULL && iov != NULL));
/* No match */
tx = kranal_new_tx_msg(RANAL_MSG_GET_NAK);
if (tx != NULL) {
- tx->tx_msg.ram_u.completion.racm_cookie =
+ tx->tx_msg.ram_u.completion.racm_cookie =
rxmsg->ram_u.get.ragm_cookie;
kranal_post_fma(conn, tx);
}
int
kranal_thread_start (int(*fn)(void *arg), void *arg)
{
- long pid = kernel_thread(fn, arg, 0);
+ long pid = cfs_kernel_thread(fn, arg, 0);
if (pid < 0)
return(int)pid;
- atomic_inc(&kranal_data.kra_nthreads);
+ cfs_atomic_inc(&kranal_data.kra_nthreads);
return 0;
}
void
kranal_thread_fini (void)
{
- atomic_dec(&kranal_data.kra_nthreads);
+ cfs_atomic_dec(&kranal_data.kra_nthreads);
}
int
kranal_check_conn_timeouts (kra_conn_t *conn)
{
kra_tx_t *tx;
- struct list_head *ttmp;
+ cfs_list_t *ttmp;
unsigned long flags;
long timeout;
unsigned long now = jiffies;
conn->rac_state == RANAL_CONN_CLOSING);
if (!conn->rac_close_sent &&
- time_after_eq(now, conn->rac_last_tx + conn->rac_keepalive * HZ)) {
+ cfs_time_aftereq(now, conn->rac_last_tx + conn->rac_keepalive *
+ CFS_HZ)) {
/* not sent in a while; schedule conn so scheduler sends a keepalive */
CDEBUG(D_NET, "Scheduling keepalive %p->%s\n",
conn, libcfs_nid2str(conn->rac_peer->rap_nid));
kranal_schedule_conn(conn);
}
- timeout = conn->rac_timeout * HZ;
+ timeout = conn->rac_timeout * CFS_HZ;
if (!conn->rac_close_recvd &&
- time_after_eq(now, conn->rac_last_rx + timeout)) {
+ cfs_time_aftereq(now, conn->rac_last_rx + timeout)) {
CERROR("%s received from %s within %lu seconds\n",
(conn->rac_state == RANAL_CONN_ESTABLISHED) ?
"Nothing" : "CLOSE not",
libcfs_nid2str(conn->rac_peer->rap_nid),
- (now - conn->rac_last_rx)/HZ);
+ (now - conn->rac_last_rx)/CFS_HZ);
return -ETIMEDOUT;
}
* in case of hardware/software errors that make this conn seem
* responsive even though it isn't progressing its message queues. */
- spin_lock_irqsave(&conn->rac_lock, flags);
+ cfs_spin_lock_irqsave(&conn->rac_lock, flags);
- list_for_each (ttmp, &conn->rac_fmaq) {
- tx = list_entry(ttmp, kra_tx_t, tx_list);
+ cfs_list_for_each (ttmp, &conn->rac_fmaq) {
+ tx = cfs_list_entry(ttmp, kra_tx_t, tx_list);
- if (time_after_eq(now, tx->tx_qtime + timeout)) {
- spin_unlock_irqrestore(&conn->rac_lock, flags);
+ if (cfs_time_aftereq(now, tx->tx_qtime + timeout)) {
+ cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
CERROR("tx on fmaq for %s blocked %lu seconds\n",
libcfs_nid2str(conn->rac_peer->rap_nid),
- (now - tx->tx_qtime)/HZ);
+ (now - tx->tx_qtime)/CFS_HZ);
return -ETIMEDOUT;
}
}
- list_for_each (ttmp, &conn->rac_rdmaq) {
- tx = list_entry(ttmp, kra_tx_t, tx_list);
+ cfs_list_for_each (ttmp, &conn->rac_rdmaq) {
+ tx = cfs_list_entry(ttmp, kra_tx_t, tx_list);
- if (time_after_eq(now, tx->tx_qtime + timeout)) {
- spin_unlock_irqrestore(&conn->rac_lock, flags);
+ if (cfs_time_aftereq(now, tx->tx_qtime + timeout)) {
+ cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
CERROR("tx on rdmaq for %s blocked %lu seconds\n",
libcfs_nid2str(conn->rac_peer->rap_nid),
- (now - tx->tx_qtime)/HZ);
+ (now - tx->tx_qtime)/CFS_HZ);
return -ETIMEDOUT;
}
}
- list_for_each (ttmp, &conn->rac_replyq) {
- tx = list_entry(ttmp, kra_tx_t, tx_list);
+ cfs_list_for_each (ttmp, &conn->rac_replyq) {
+ tx = cfs_list_entry(ttmp, kra_tx_t, tx_list);
- if (time_after_eq(now, tx->tx_qtime + timeout)) {
- spin_unlock_irqrestore(&conn->rac_lock, flags);
+ if (cfs_time_aftereq(now, tx->tx_qtime + timeout)) {
+ cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
CERROR("tx on replyq for %s blocked %lu seconds\n",
libcfs_nid2str(conn->rac_peer->rap_nid),
- (now - tx->tx_qtime)/HZ);
+ (now - tx->tx_qtime)/CFS_HZ);
return -ETIMEDOUT;
}
}
- spin_unlock_irqrestore(&conn->rac_lock, flags);
+ cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
return 0;
}
void
kranal_reaper_check (int idx, unsigned long *min_timeoutp)
{
- struct list_head *conns = &kranal_data.kra_conns[idx];
- struct list_head *ctmp;
+ cfs_list_t *conns = &kranal_data.kra_conns[idx];
+ cfs_list_t *ctmp;
kra_conn_t *conn;
unsigned long flags;
int rc;
again:
/* NB. We expect to check all the conns and not find any problems, so
* we just use a shared lock while we take a look... */
- read_lock(&kranal_data.kra_global_lock);
+ cfs_read_lock(&kranal_data.kra_global_lock);
- list_for_each (ctmp, conns) {
- conn = list_entry(ctmp, kra_conn_t, rac_hashlist);
+ cfs_list_for_each (ctmp, conns) {
+ conn = cfs_list_entry(ctmp, kra_conn_t, rac_hashlist);
if (conn->rac_timeout < *min_timeoutp )
*min_timeoutp = conn->rac_timeout;
continue;
kranal_conn_addref(conn);
- read_unlock(&kranal_data.kra_global_lock);
+ cfs_read_unlock(&kranal_data.kra_global_lock);
CERROR("Conn to %s, cqid %d timed out\n",
libcfs_nid2str(conn->rac_peer->rap_nid),
conn->rac_cqid);
- write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
switch (conn->rac_state) {
default:
break;
}
- write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+ flags);
kranal_conn_decref(conn);
goto again;
}
- read_unlock(&kranal_data.kra_global_lock);
+ cfs_read_unlock(&kranal_data.kra_global_lock);
}
int
{
long id = (long)arg;
char name[16];
- wait_queue_t wait;
+ cfs_waitlink_t wait;
unsigned long flags;
kra_peer_t *peer;
kra_acceptsock_t *ras;
cfs_daemonize(name);
cfs_block_allsigs();
- init_waitqueue_entry(&wait, current);
+ cfs_waitlink_init(&wait);
- spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+ cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
while (!kranal_data.kra_shutdown) {
did_something = 0;
- if (!list_empty(&kranal_data.kra_connd_acceptq)) {
- ras = list_entry(kranal_data.kra_connd_acceptq.next,
- kra_acceptsock_t, ras_list);
- list_del(&ras->ras_list);
+ if (!cfs_list_empty(&kranal_data.kra_connd_acceptq)) {
+ ras = cfs_list_entry(kranal_data.kra_connd_acceptq.next,
+ kra_acceptsock_t, ras_list);
+ cfs_list_del(&ras->ras_list);
- spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+ cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
+ flags);
CDEBUG(D_NET,"About to handshake someone\n");
CDEBUG(D_NET,"Finished handshaking someone\n");
- spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+ cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock,
+ flags);
did_something = 1;
}
- if (!list_empty(&kranal_data.kra_connd_peers)) {
- peer = list_entry(kranal_data.kra_connd_peers.next,
- kra_peer_t, rap_connd_list);
+ if (!cfs_list_empty(&kranal_data.kra_connd_peers)) {
+ peer = cfs_list_entry(kranal_data.kra_connd_peers.next,
+ kra_peer_t, rap_connd_list);
- list_del_init(&peer->rap_connd_list);
- spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+ cfs_list_del_init(&peer->rap_connd_list);
+ cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
+ flags);
kranal_connect(peer);
kranal_peer_decref(peer);
- spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+ cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock,
+ flags);
did_something = 1;
}
if (did_something)
continue;
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue_exclusive(&kranal_data.kra_connd_waitq, &wait);
+ cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+ cfs_waitq_add_exclusive(&kranal_data.kra_connd_waitq, &wait);
- spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+ cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
- schedule ();
+ cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&kranal_data.kra_connd_waitq, &wait);
+ cfs_set_current_state(CFS_TASK_RUNNING);
+ cfs_waitq_del(&kranal_data.kra_connd_waitq, &wait);
- spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+ cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
}
- spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+ cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
kranal_thread_fini();
return 0;
LASSERT (timeout > 0);
- spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
+ cfs_spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
if (timeout < kranal_data.kra_new_min_timeout)
kranal_data.kra_new_min_timeout = timeout;
- spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
+ cfs_spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
}
int
kranal_reaper (void *arg)
{
- wait_queue_t wait;
+ cfs_waitlink_t wait;
unsigned long flags;
long timeout;
int i;
int conn_index = 0;
int base_index = conn_entries - 1;
unsigned long next_check_time = jiffies;
- long next_min_timeout = MAX_SCHEDULE_TIMEOUT;
+ long next_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
long current_min_timeout = 1;
cfs_daemonize("kranal_reaper");
cfs_block_allsigs();
- init_waitqueue_entry(&wait, current);
+ cfs_waitlink_init(&wait);
- spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
+ cfs_spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
while (!kranal_data.kra_shutdown) {
/* I wake up every 'p' seconds to check for timeouts on some
/* careful with the jiffy wrap... */
timeout = (long)(next_check_time - jiffies);
if (timeout > 0) {
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&kranal_data.kra_reaper_waitq, &wait);
+ cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+ cfs_waitq_add(&kranal_data.kra_reaper_waitq, &wait);
- spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
+ cfs_spin_unlock_irqrestore(&kranal_data.kra_reaper_lock,
+ flags);
- schedule_timeout(timeout);
+ cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
+ timeout);
- spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
+ cfs_spin_lock_irqsave(&kranal_data.kra_reaper_lock,
+ flags);
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&kranal_data.kra_reaper_waitq, &wait);
+ cfs_set_current_state(CFS_TASK_RUNNING);
+ cfs_waitq_del(&kranal_data.kra_reaper_waitq, &wait);
continue;
}
- if (kranal_data.kra_new_min_timeout != MAX_SCHEDULE_TIMEOUT) {
+ if (kranal_data.kra_new_min_timeout !=
+ CFS_MAX_SCHEDULE_TIMEOUT) {
/* new min timeout set: restart min timeout scan */
- next_min_timeout = MAX_SCHEDULE_TIMEOUT;
+ next_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
base_index = conn_index - 1;
if (base_index < 0)
base_index = conn_entries - 1;
- if (kranal_data.kra_new_min_timeout < current_min_timeout) {
- current_min_timeout = kranal_data.kra_new_min_timeout;
+ if (kranal_data.kra_new_min_timeout <
+ current_min_timeout) {
+ current_min_timeout =
+ kranal_data.kra_new_min_timeout;
CDEBUG(D_NET, "Set new min timeout %ld\n",
current_min_timeout);
}
- kranal_data.kra_new_min_timeout = MAX_SCHEDULE_TIMEOUT;
+ kranal_data.kra_new_min_timeout =
+ CFS_MAX_SCHEDULE_TIMEOUT;
}
min_timeout = current_min_timeout;
- spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
+ cfs_spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
LASSERT (min_timeout > 0);
conn_index = (conn_index + 1) % conn_entries;
}
- next_check_time += p * HZ;
+ next_check_time += p * CFS_HZ;
- spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
+ cfs_spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
if (((conn_index - chunk <= base_index &&
base_index < conn_index) ||
}
/* ...and restart min timeout scan */
- next_min_timeout = MAX_SCHEDULE_TIMEOUT;
+ next_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
base_index = conn_index - 1;
if (base_index < 0)
base_index = conn_entries - 1;
LASSERT (rrc == RAP_SUCCESS);
LASSERT ((event_type & RAPK_CQ_EVENT_OVERRUN) == 0);
- read_lock(&kranal_data.kra_global_lock);
+ cfs_read_lock(&kranal_data.kra_global_lock);
conn = kranal_cqid2conn_locked(cqid);
if (conn == NULL) {
/* Conn was destroyed? */
CDEBUG(D_NET, "RDMA CQID lookup %d failed\n", cqid);
- read_unlock(&kranal_data.kra_global_lock);
+ cfs_read_unlock(&kranal_data.kra_global_lock);
continue;
}
LASSERT (rrc == RAP_SUCCESS);
CDEBUG(D_NET, "Completed %p\n",
- list_entry(conn->rac_rdmaq.next, kra_tx_t, tx_list));
+ cfs_list_entry(conn->rac_rdmaq.next, kra_tx_t, tx_list));
- spin_lock_irqsave(&conn->rac_lock, flags);
+ cfs_spin_lock_irqsave(&conn->rac_lock, flags);
- LASSERT (!list_empty(&conn->rac_rdmaq));
- tx = list_entry(conn->rac_rdmaq.next, kra_tx_t, tx_list);
- list_del(&tx->tx_list);
+ LASSERT (!cfs_list_empty(&conn->rac_rdmaq));
+ tx = cfs_list_entry(conn->rac_rdmaq.next, kra_tx_t, tx_list);
+ cfs_list_del(&tx->tx_list);
LASSERT(desc->AppPtr == (void *)tx);
LASSERT(tx->tx_msg.ram_type == RANAL_MSG_PUT_DONE ||
tx->tx_msg.ram_type == RANAL_MSG_GET_DONE);
- list_add_tail(&tx->tx_list, &conn->rac_fmaq);
+ cfs_list_add_tail(&tx->tx_list, &conn->rac_fmaq);
tx->tx_qtime = jiffies;
- spin_unlock_irqrestore(&conn->rac_lock, flags);
+ cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
/* Get conn's fmaq processed, now I've just put something
* there */
kranal_schedule_conn(conn);
- read_unlock(&kranal_data.kra_global_lock);
+ cfs_read_unlock(&kranal_data.kra_global_lock);
}
}
RAP_RETURN rrc;
__u32 cqid;
__u32 event_type;
- struct list_head *conns;
- struct list_head *tmp;
+ cfs_list_t *conns;
+ cfs_list_t *tmp;
int i;
for (;;) {
if ((event_type & RAPK_CQ_EVENT_OVERRUN) == 0) {
- read_lock(&kranal_data.kra_global_lock);
+ cfs_read_lock(&kranal_data.kra_global_lock);
conn = kranal_cqid2conn_locked(cqid);
if (conn == NULL) {
kranal_schedule_conn(conn);
}
- read_unlock(&kranal_data.kra_global_lock);
+ cfs_read_unlock(&kranal_data.kra_global_lock);
continue;
}
for (i = 0; i < kranal_data.kra_conn_hash_size; i++) {
- read_lock(&kranal_data.kra_global_lock);
+ cfs_read_lock(&kranal_data.kra_global_lock);
conns = &kranal_data.kra_conns[i];
- list_for_each (tmp, conns) {
- conn = list_entry(tmp, kra_conn_t,
- rac_hashlist);
+ cfs_list_for_each (tmp, conns) {
+ conn = cfs_list_entry(tmp, kra_conn_t,
+ rac_hashlist);
if (conn->rac_device == dev)
kranal_schedule_conn(conn);
}
/* don't block write lockers for too long... */
- read_unlock(&kranal_data.kra_global_lock);
+ cfs_read_unlock(&kranal_data.kra_global_lock);
}
}
}
return 0;
case RAP_NOT_DONE:
- if (time_after_eq(jiffies,
- conn->rac_last_tx + conn->rac_keepalive*HZ))
+ if (cfs_time_aftereq(jiffies,
+ conn->rac_last_tx + conn->rac_keepalive *
+ CFS_HZ))
CWARN("EAGAIN sending %02x (idle %lu secs)\n",
- msg->ram_type, (jiffies - conn->rac_last_tx)/HZ);
+ msg->ram_type,
+ (jiffies - conn->rac_last_tx)/CFS_HZ);
return -EAGAIN;
}
}
LASSERT (current == conn->rac_device->rad_scheduler);
if (conn->rac_state != RANAL_CONN_ESTABLISHED) {
- if (!list_empty(&conn->rac_rdmaq)) {
+ if (!cfs_list_empty(&conn->rac_rdmaq)) {
/* RDMAs in progress */
LASSERT (!conn->rac_close_sent);
- if (time_after_eq(jiffies,
- conn->rac_last_tx +
- conn->rac_keepalive * HZ)) {
+ if (cfs_time_aftereq(jiffies,
+ conn->rac_last_tx +
+ conn->rac_keepalive * CFS_HZ)) {
CDEBUG(D_NET, "sending NOOP (rdma in progress)\n");
kranal_init_msg(&conn->rac_msg, RANAL_MSG_NOOP);
kranal_sendmsg(conn, &conn->rac_msg, NULL, 0);
if (!conn->rac_close_recvd)
return;
- write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
if (conn->rac_state == RANAL_CONN_CLOSING)
kranal_terminate_conn_locked(conn);
- write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+ flags);
return;
}
- spin_lock_irqsave(&conn->rac_lock, flags);
+ cfs_spin_lock_irqsave(&conn->rac_lock, flags);
- if (list_empty(&conn->rac_fmaq)) {
+ if (cfs_list_empty(&conn->rac_fmaq)) {
- spin_unlock_irqrestore(&conn->rac_lock, flags);
+ cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
- if (time_after_eq(jiffies,
- conn->rac_last_tx + conn->rac_keepalive * HZ)) {
+ if (cfs_time_aftereq(jiffies,
+ conn->rac_last_tx + conn->rac_keepalive *
+ CFS_HZ)) {
CDEBUG(D_NET, "sending NOOP -> %s (%p idle %lu(%ld))\n",
libcfs_nid2str(conn->rac_peer->rap_nid), conn,
- (jiffies - conn->rac_last_tx)/HZ, conn->rac_keepalive);
+ (jiffies - conn->rac_last_tx)/CFS_HZ,
+ conn->rac_keepalive);
kranal_init_msg(&conn->rac_msg, RANAL_MSG_NOOP);
kranal_sendmsg(conn, &conn->rac_msg, NULL, 0);
}
return;
}
- tx = list_entry(conn->rac_fmaq.next, kra_tx_t, tx_list);
- list_del(&tx->tx_list);
- more_to_do = !list_empty(&conn->rac_fmaq);
+ tx = cfs_list_entry(conn->rac_fmaq.next, kra_tx_t, tx_list);
+ cfs_list_del(&tx->tx_list);
+ more_to_do = !cfs_list_empty(&conn->rac_fmaq);
- spin_unlock_irqrestore(&conn->rac_lock, flags);
+ cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
expect_reply = 0;
CDEBUG(D_NET, "sending regular msg: %p, type %02x, cookie "LPX64"\n",
/* I need credits to send this. Replace tx at the head of the
* fmaq and I'll get rescheduled when credits appear */
CDEBUG(D_NET, "EAGAIN on %p\n", conn);
- spin_lock_irqsave(&conn->rac_lock, flags);
- list_add(&tx->tx_list, &conn->rac_fmaq);
- spin_unlock_irqrestore(&conn->rac_lock, flags);
+ cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+ cfs_list_add(&tx->tx_list, &conn->rac_fmaq);
+ cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
return;
}
} else {
/* LASSERT(current) above ensures this doesn't race with reply
* processing */
- spin_lock_irqsave(&conn->rac_lock, flags);
- list_add_tail(&tx->tx_list, &conn->rac_replyq);
+ cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+ cfs_list_add_tail(&tx->tx_list, &conn->rac_replyq);
tx->tx_qtime = jiffies;
- spin_unlock_irqrestore(&conn->rac_lock, flags);
+ cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
}
if (more_to_do) {
kra_tx_t *
kranal_match_reply(kra_conn_t *conn, int type, __u64 cookie)
{
- struct list_head *ttmp;
+ cfs_list_t *ttmp;
kra_tx_t *tx;
unsigned long flags;
- spin_lock_irqsave(&conn->rac_lock, flags);
+ cfs_spin_lock_irqsave(&conn->rac_lock, flags);
- list_for_each(ttmp, &conn->rac_replyq) {
- tx = list_entry(ttmp, kra_tx_t, tx_list);
+ cfs_list_for_each(ttmp, &conn->rac_replyq) {
+ tx = cfs_list_entry(ttmp, kra_tx_t, tx_list);
CDEBUG(D_NET,"Checking %p %02x/"LPX64"\n",
tx, tx->tx_msg.ram_type, tx->tx_cookie);
continue;
if (tx->tx_msg.ram_type != type) {
- spin_unlock_irqrestore(&conn->rac_lock, flags);
+ cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
CWARN("Unexpected type %x (%x expected) "
"matched reply from %s\n",
tx->tx_msg.ram_type, type,
return NULL;
}
- list_del(&tx->tx_list);
- spin_unlock_irqrestore(&conn->rac_lock, flags);
+ cfs_list_del(&tx->tx_list);
+ cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
return tx;
}
- spin_unlock_irqrestore(&conn->rac_lock, flags);
+ cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
CWARN("Unmatched reply %02x/"LPX64" from %s\n",
type, cookie, libcfs_nid2str(conn->rac_peer->rap_nid));
return NULL;
if (msg->ram_type == RANAL_MSG_CLOSE) {
CWARN("RX CLOSE from %s\n", libcfs_nid2str(conn->rac_peer->rap_nid));
conn->rac_close_recvd = 1;
- write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
if (conn->rac_state == RANAL_CONN_ESTABLISHED)
kranal_close_conn_locked(conn, 0);
conn->rac_close_sent)
kranal_terminate_conn_locked(conn);
- write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+ flags);
goto out;
}
int nreplies;
LASSERT (conn->rac_state == RANAL_CONN_CLOSED);
- LASSERT (list_empty(&conn->rac_list));
- LASSERT (list_empty(&conn->rac_hashlist));
+ LASSERT (cfs_list_empty(&conn->rac_list));
+ LASSERT (cfs_list_empty(&conn->rac_hashlist));
- for (nfma = 0; !list_empty(&conn->rac_fmaq); nfma++) {
- tx = list_entry(conn->rac_fmaq.next, kra_tx_t, tx_list);
+ for (nfma = 0; !cfs_list_empty(&conn->rac_fmaq); nfma++) {
+ tx = cfs_list_entry(conn->rac_fmaq.next, kra_tx_t, tx_list);
- list_del(&tx->tx_list);
+ cfs_list_del(&tx->tx_list);
kranal_tx_done(tx, -ECONNABORTED);
}
- LASSERT (list_empty(&conn->rac_rdmaq));
+ LASSERT (cfs_list_empty(&conn->rac_rdmaq));
- for (nreplies = 0; !list_empty(&conn->rac_replyq); nreplies++) {
- tx = list_entry(conn->rac_replyq.next, kra_tx_t, tx_list);
+ for (nreplies = 0; !cfs_list_empty(&conn->rac_replyq); nreplies++) {
+ tx = cfs_list_entry(conn->rac_replyq.next, kra_tx_t, tx_list);
- list_del(&tx->tx_list);
+ cfs_list_del(&tx->tx_list);
kranal_tx_done(tx, -ECONNABORTED);
}
kranal_process_new_conn (kra_conn_t *conn)
{
RAP_RETURN rrc;
-
+
rrc = RapkCompleteSync(conn->rac_rihandle, 1);
if (rrc == RAP_SUCCESS)
return 0;
LASSERT (rrc == RAP_NOT_DONE);
- if (!time_after_eq(jiffies, conn->rac_last_tx +
- conn->rac_timeout * HZ))
+ if (!cfs_time_aftereq(jiffies, conn->rac_last_tx +
+ conn->rac_timeout * CFS_HZ))
return -EAGAIN;
/* Too late */
kranal_scheduler (void *arg)
{
kra_device_t *dev = (kra_device_t *)arg;
- wait_queue_t wait;
+ cfs_waitlink_t wait;
char name[16];
kra_conn_t *conn;
unsigned long flags;
unsigned long soonest;
int nsoonest;
long timeout;
- struct list_head *tmp;
- struct list_head *nxt;
+ cfs_list_t *tmp;
+ cfs_list_t *nxt;
int rc;
int dropped_lock;
int busy_loops = 0;
cfs_block_allsigs();
dev->rad_scheduler = current;
- init_waitqueue_entry(&wait, current);
+ cfs_waitlink_init(&wait);
- spin_lock_irqsave(&dev->rad_lock, flags);
+ cfs_spin_lock_irqsave(&dev->rad_lock, flags);
while (!kranal_data.kra_shutdown) {
/* Safe: kra_shutdown only set when quiescent */
if (busy_loops++ >= RANAL_RESCHED) {
- spin_unlock_irqrestore(&dev->rad_lock, flags);
+ cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
- our_cond_resched();
+ cfs_cond_resched();
busy_loops = 0;
- spin_lock_irqsave(&dev->rad_lock, flags);
+ cfs_spin_lock_irqsave(&dev->rad_lock, flags);
}
dropped_lock = 0;
if (dev->rad_ready) {
/* Device callback fired since I last checked it */
dev->rad_ready = 0;
- spin_unlock_irqrestore(&dev->rad_lock, flags);
+ cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
dropped_lock = 1;
kranal_check_rdma_cq(dev);
kranal_check_fma_cq(dev);
- spin_lock_irqsave(&dev->rad_lock, flags);
+ cfs_spin_lock_irqsave(&dev->rad_lock, flags);
}
- list_for_each_safe(tmp, nxt, &dev->rad_ready_conns) {
- conn = list_entry(tmp, kra_conn_t, rac_schedlist);
+ cfs_list_for_each_safe(tmp, nxt, &dev->rad_ready_conns) {
+ conn = cfs_list_entry(tmp, kra_conn_t, rac_schedlist);
- list_del_init(&conn->rac_schedlist);
+ cfs_list_del_init(&conn->rac_schedlist);
LASSERT (conn->rac_scheduled);
conn->rac_scheduled = 0;
- spin_unlock_irqrestore(&dev->rad_lock, flags);
+ cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
dropped_lock = 1;
kranal_check_fma_rx(conn);
kranal_complete_closed_conn(conn);
kranal_conn_decref(conn);
- spin_lock_irqsave(&dev->rad_lock, flags);
+ cfs_spin_lock_irqsave(&dev->rad_lock, flags);
}
nsoonest = 0;
soonest = jiffies;
- list_for_each_safe(tmp, nxt, &dev->rad_new_conns) {
- conn = list_entry(tmp, kra_conn_t, rac_schedlist);
-
+ cfs_list_for_each_safe(tmp, nxt, &dev->rad_new_conns) {
+ conn = cfs_list_entry(tmp, kra_conn_t, rac_schedlist);
+
deadline = conn->rac_last_tx + conn->rac_keepalive;
- if (time_after_eq(jiffies, deadline)) {
+ if (cfs_time_aftereq(jiffies, deadline)) {
/* Time to process this new conn */
- spin_unlock_irqrestore(&dev->rad_lock, flags);
+ cfs_spin_unlock_irqrestore(&dev->rad_lock,
+ flags);
dropped_lock = 1;
rc = kranal_process_new_conn(conn);
if (rc != -EAGAIN) {
/* All done with this conn */
- spin_lock_irqsave(&dev->rad_lock, flags);
- list_del_init(&conn->rac_schedlist);
- spin_unlock_irqrestore(&dev->rad_lock, flags);
+ cfs_spin_lock_irqsave(&dev->rad_lock,
+ flags);
+ cfs_list_del_init(&conn->rac_schedlist);
+ cfs_spin_unlock_irqrestore(&dev-> \
+ rad_lock,
+ flags);
kranal_conn_decref(conn);
- spin_lock_irqsave(&dev->rad_lock, flags);
+ cfs_spin_lock_irqsave(&dev->rad_lock,
+ flags);
continue;
}
/* retry with exponential backoff until HZ */
if (conn->rac_keepalive == 0)
conn->rac_keepalive = 1;
- else if (conn->rac_keepalive <= HZ)
+ else if (conn->rac_keepalive <= CFS_HZ)
conn->rac_keepalive *= 2;
else
- conn->rac_keepalive += HZ;
+ conn->rac_keepalive += CFS_HZ;
deadline = conn->rac_last_tx + conn->rac_keepalive;
- spin_lock_irqsave(&dev->rad_lock, flags);
+ cfs_spin_lock_irqsave(&dev->rad_lock, flags);
}
/* Does this conn need attention soonest? */
if (nsoonest++ == 0 ||
- !time_after_eq(deadline, soonest))
+ !cfs_time_aftereq(deadline, soonest))
soonest = deadline;
}
if (dropped_lock) /* may sleep iff I didn't drop the lock */
continue;
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue_exclusive(&dev->rad_waitq, &wait);
- spin_unlock_irqrestore(&dev->rad_lock, flags);
+ cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+ cfs_waitq_add_exclusive(&dev->rad_waitq, &wait);
+ cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
if (nsoonest == 0) {
busy_loops = 0;
- schedule();
+ cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
} else {
timeout = (long)(soonest - jiffies);
if (timeout > 0) {
busy_loops = 0;
- schedule_timeout(timeout);
+ cfs_waitq_timedwait(&wait,
+ CFS_TASK_INTERRUPTIBLE,
+ timeout);
}
}
- remove_wait_queue(&dev->rad_waitq, &wait);
- set_current_state(TASK_RUNNING);
- spin_lock_irqsave(&dev->rad_lock, flags);
+ cfs_waitq_del(&dev->rad_waitq, &wait);
+ cfs_set_current_state(CFS_TASK_RUNNING);
+ cfs_spin_lock_irqsave(&dev->rad_lock, flags);
}
- spin_unlock_irqrestore(&dev->rad_lock, flags);
+ cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
dev->rad_scheduler = NULL;
kranal_thread_fini();
LASSERT (cfs_atomic_read (&peer->ksnp_refcount) == 0);
LASSERT (peer->ksnp_accepting == 0);
- LASSERT (list_empty (&peer->ksnp_conns));
- LASSERT (list_empty (&peer->ksnp_routes));
- LASSERT (list_empty (&peer->ksnp_tx_queue));
- LASSERT (list_empty (&peer->ksnp_zc_req_list));
+ LASSERT (cfs_list_empty (&peer->ksnp_conns));
+ LASSERT (cfs_list_empty (&peer->ksnp_routes));
+ LASSERT (cfs_list_empty (&peer->ksnp_tx_queue));
+ LASSERT (cfs_list_empty (&peer->ksnp_zc_req_list));
LIBCFS_FREE (peer, sizeof (*peer));
ksock_peer_t *
ksocknal_find_peer_locked (lnet_ni_t *ni, lnet_process_id_t id)
{
- struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
- struct list_head *tmp;
+ cfs_list_t *peer_list = ksocknal_nid2peerlist(id.nid);
+ cfs_list_t *tmp;
ksock_peer_t *peer;
- list_for_each (tmp, peer_list) {
+ cfs_list_for_each (tmp, peer_list) {
- peer = list_entry (tmp, ksock_peer_t, ksnp_list);
+ peer = cfs_list_entry (tmp, ksock_peer_t, ksnp_list);
LASSERT (!peer->ksnp_closing);
iface->ksni_npeers--;
}
- LASSERT (list_empty(&peer->ksnp_conns));
- LASSERT (list_empty(&peer->ksnp_routes));
+ LASSERT (cfs_list_empty(&peer->ksnp_conns));
+ LASSERT (cfs_list_empty(&peer->ksnp_routes));
LASSERT (!peer->ksnp_closing);
peer->ksnp_closing = 1;
- list_del (&peer->ksnp_list);
+ cfs_list_del (&peer->ksnp_list);
/* lose peerlist's ref */
ksocknal_peer_decref(peer);
}
int
ksocknal_get_peer_info (lnet_ni_t *ni, int index,
- lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip, int *port,
- int *conn_count, int *share_count)
+ lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
+ int *port, int *conn_count, int *share_count)
{
ksock_peer_t *peer;
- struct list_head *ptmp;
+ cfs_list_t *ptmp;
ksock_route_t *route;
- struct list_head *rtmp;
+ cfs_list_t *rtmp;
int i;
int j;
int rc = -ENOENT;
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
- list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
+ cfs_list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
+ peer = cfs_list_entry (ptmp, ksock_peer_t, ksnp_list);
if (peer->ksnp_ni != ni)
continue;
if (peer->ksnp_n_passive_ips == 0 &&
- list_empty(&peer->ksnp_routes)) {
+ cfs_list_empty(&peer->ksnp_routes)) {
if (index-- > 0)
continue;
goto out;
}
- list_for_each (rtmp, &peer->ksnp_routes) {
+ cfs_list_for_each (rtmp, &peer->ksnp_routes) {
if (index-- > 0)
continue;
- route = list_entry(rtmp, ksock_route_t,
- ksnr_list);
+ route = cfs_list_entry(rtmp, ksock_route_t,
+ ksnr_list);
*id = peer->ksnp_id;
*myip = route->ksnr_myipaddr;
void
ksocknal_add_route_locked (ksock_peer_t *peer, ksock_route_t *route)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
ksock_conn_t *conn;
ksock_route_t *route2;
LASSERT (route->ksnr_connected == 0);
/* LASSERT(unique) */
- list_for_each(tmp, &peer->ksnp_routes) {
- route2 = list_entry(tmp, ksock_route_t, ksnr_list);
+ cfs_list_for_each(tmp, &peer->ksnp_routes) {
+ route2 = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
CERROR ("Duplicate route %s %u.%u.%u.%u\n",
route->ksnr_peer = peer;
ksocknal_peer_addref(peer);
/* peer's routelist takes over my ref on 'route' */
- list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
+ cfs_list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
- list_for_each(tmp, &peer->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ cfs_list_for_each(tmp, &peer->ksnp_conns) {
+ conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
continue;
ksock_peer_t *peer = route->ksnr_peer;
ksock_interface_t *iface;
ksock_conn_t *conn;
- struct list_head *ctmp;
- struct list_head *cnxt;
+ cfs_list_t *ctmp;
+ cfs_list_t *cnxt;
LASSERT (!route->ksnr_deleted);
/* Close associated conns */
- list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) {
- conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
+ cfs_list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) {
+ conn = cfs_list_entry(ctmp, ksock_conn_t, ksnc_list);
if (conn->ksnc_route != route)
continue;
}
route->ksnr_deleted = 1;
- list_del (&route->ksnr_list);
+ cfs_list_del (&route->ksnr_list);
ksocknal_route_decref(route); /* drop peer's ref */
- if (list_empty (&peer->ksnp_routes) &&
- list_empty (&peer->ksnp_conns)) {
+ if (cfs_list_empty (&peer->ksnp_routes) &&
+ cfs_list_empty (&peer->ksnp_conns)) {
/* I've just removed the last route to a peer with no active
* connections */
ksocknal_unlink_peer_locked (peer);
int
ksocknal_add_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
ksock_peer_t *peer;
ksock_peer_t *peer2;
ksock_route_t *route;
peer = peer2;
} else {
/* peer table takes my ref on peer */
- list_add_tail (&peer->ksnp_list,
- ksocknal_nid2peerlist (id.nid));
+ cfs_list_add_tail (&peer->ksnp_list,
+ ksocknal_nid2peerlist (id.nid));
}
route2 = NULL;
- list_for_each (tmp, &peer->ksnp_routes) {
- route2 = list_entry(tmp, ksock_route_t, ksnr_list);
+ cfs_list_for_each (tmp, &peer->ksnp_routes) {
+ route2 = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
if (route2->ksnr_ipaddr == ipaddr)
break;
{
ksock_conn_t *conn;
ksock_route_t *route;
- struct list_head *tmp;
- struct list_head *nxt;
+ cfs_list_t *tmp;
+ cfs_list_t *nxt;
int nshared;
LASSERT (!peer->ksnp_closing);
/* Extra ref prevents peer disappearing until I'm done with it */
ksocknal_peer_addref(peer);
- list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ cfs_list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
+ route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
/* no match */
if (!(ip == 0 || route->ksnr_ipaddr == ip))
}
nshared = 0;
- list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ cfs_list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
+ route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
nshared += route->ksnr_share_count;
}
/* remove everything else if there are no explicit entries
* left */
- list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ cfs_list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
+ route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
/* we should only be removing auto-entries */
LASSERT(route->ksnr_share_count == 0);
ksocknal_del_route_locked (route);
}
- list_for_each_safe (tmp, nxt, &peer->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ cfs_list_for_each_safe (tmp, nxt, &peer->ksnp_conns) {
+ conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
ksocknal_close_conn_locked(conn, 0);
}
ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
{
CFS_LIST_HEAD (zombies);
- struct list_head *ptmp;
- struct list_head *pnxt;
+ cfs_list_t *ptmp;
+ cfs_list_t *pnxt;
ksock_peer_t *peer;
int lo;
int hi;
}
for (i = lo; i <= hi; i++) {
- list_for_each_safe (ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
+ cfs_list_for_each_safe (ptmp, pnxt,
+ &ksocknal_data.ksnd_peers[i]) {
+ peer = cfs_list_entry (ptmp, ksock_peer_t, ksnp_list);
if (peer->ksnp_ni != ni)
continue;
ksocknal_del_peer_locked (peer, ip);
- if (peer->ksnp_closing && !list_empty(&peer->ksnp_tx_queue)) {
- LASSERT (list_empty(&peer->ksnp_conns));
- LASSERT (list_empty(&peer->ksnp_routes));
+ if (peer->ksnp_closing &&
+ !cfs_list_empty(&peer->ksnp_tx_queue)) {
+ LASSERT (cfs_list_empty(&peer->ksnp_conns));
+ LASSERT (cfs_list_empty(&peer->ksnp_routes));
- list_splice_init(&peer->ksnp_tx_queue, &zombies);
+ cfs_list_splice_init(&peer->ksnp_tx_queue,
+ &zombies);
}
ksocknal_peer_decref(peer); /* ...till here */
ksocknal_get_conn_by_idx (lnet_ni_t *ni, int index)
{
ksock_peer_t *peer;
- struct list_head *ptmp;
+ cfs_list_t *ptmp;
ksock_conn_t *conn;
- struct list_head *ctmp;
+ cfs_list_t *ctmp;
int i;
cfs_read_lock (&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
- list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
+ cfs_list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
+ peer = cfs_list_entry (ptmp, ksock_peer_t, ksnp_list);
LASSERT (!peer->ksnp_closing);
if (peer->ksnp_ni != ni)
continue;
- list_for_each (ctmp, &peer->ksnp_conns) {
+ cfs_list_for_each (ctmp, &peer->ksnp_conns) {
if (index-- > 0)
continue;
- conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
+ conn = cfs_list_entry (ctmp, ksock_conn_t,
+ ksnc_list);
ksocknal_conn_addref(conn);
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data. \
+ ksnd_global_lock);
return (conn);
}
}
ksock_irqinfo_t *info;
int i;
- LASSERT (irq < NR_IRQS);
+ LASSERT (irq < CFS_NR_IRQS);
info = &ksocknal_data.ksnd_irqinfo[irq];
if (irq != 0 && /* hardware NIC */
ksocknal_create_routes(ksock_peer_t *peer, int port,
__u32 *peer_ipaddrs, int npeer_ipaddrs)
{
- ksock_route_t *newroute = NULL;
- cfs_rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
- lnet_ni_t *ni = peer->ksnp_ni;
- ksock_net_t *net = ni->ni_data;
- struct list_head *rtmp;
- ksock_route_t *route;
- ksock_interface_t *iface;
- ksock_interface_t *best_iface;
- int best_netmatch;
- int this_netmatch;
- int best_nroutes;
- int i;
- int j;
+ ksock_route_t *newroute = NULL;
+ cfs_rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
+ lnet_ni_t *ni = peer->ksnp_ni;
+ ksock_net_t *net = ni->ni_data;
+ cfs_list_t *rtmp;
+ ksock_route_t *route;
+ ksock_interface_t *iface;
+ ksock_interface_t *best_iface;
+ int best_netmatch;
+ int this_netmatch;
+ int best_nroutes;
+ int i;
+ int j;
/* CAVEAT EMPTOR: We do all our interface matching with an
* exclusive hold of global lock at IRQ priority. We're only
/* Already got a route? */
route = NULL;
- list_for_each(rtmp, &peer->ksnp_routes) {
- route = list_entry(rtmp, ksock_route_t, ksnr_list);
+ cfs_list_for_each(rtmp, &peer->ksnp_routes) {
+ route = cfs_list_entry(rtmp, ksock_route_t, ksnr_list);
if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
break;
iface = &net->ksnn_interfaces[j];
/* Using this interface already? */
- list_for_each(rtmp, &peer->ksnp_routes) {
- route = list_entry(rtmp, ksock_route_t, ksnr_list);
+ cfs_list_for_each(rtmp, &peer->ksnp_routes) {
+ route = cfs_list_entry(rtmp, ksock_route_t,
+ ksnr_list);
if (route->ksnr_myipaddr == iface->ksni_ipaddr)
break;
cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
- list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
+ cfs_list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
cfs_rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
CFS_LIST_HEAD (zombies);
lnet_process_id_t peerid;
- struct list_head *tmp;
+ cfs_list_t *tmp;
__u64 incarnation;
ksock_conn_t *conn;
ksock_conn_t *conn2;
if (peer2 == NULL) {
/* NB this puts an "empty" peer in the peer
* table (which takes my ref) */
- list_add_tail(&peer->ksnp_list,
- ksocknal_nid2peerlist(peerid.nid));
+ cfs_list_add_tail(&peer->ksnp_list,
+ ksocknal_nid2peerlist(peerid.nid));
} else {
ksocknal_peer_decref(peer);
peer = peer2;
* NB recv_hello may have returned EPROTO to signal my peer
* wants a different protocol than the one I asked for.
*/
- LASSERT (list_empty(&peer->ksnp_conns));
+ LASSERT (cfs_list_empty(&peer->ksnp_conns));
peer->ksnp_proto = conn->ksnc_proto;
peer->ksnp_incarnation = incarnation;
/* Refuse to duplicate an existing connection, unless this is a
* loopback connection */
if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
- list_for_each(tmp, &peer->ksnp_conns) {
- conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
+ cfs_list_for_each(tmp, &peer->ksnp_conns) {
+ conn2 = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
* create an association. This allows incoming connections created
* by routes in my peer to match my own route entries so I don't
* continually create duplicate routes. */
- list_for_each (tmp, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ cfs_list_for_each (tmp, &peer->ksnp_routes) {
+ route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
continue;
conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
cfs_mb(); /* order with adding to peer's conn list */
- list_add (&conn->ksnc_list, &peer->ksnp_conns);
+ cfs_list_add (&conn->ksnc_list, &peer->ksnp_conns);
ksocknal_conn_addref(conn);
ksocknal_new_packet(conn, 0);
conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
/* Take packets blocking for this connection. */
- list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
+ cfs_list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) == SOCKNAL_MATCH_NO)
continue;
- list_del (&tx->tx_list);
+ cfs_list_del (&tx->tx_list);
ksocknal_queue_tx_locked (tx, conn);
}
failed_2:
if (!peer->ksnp_closing &&
- list_empty (&peer->ksnp_conns) &&
- list_empty (&peer->ksnp_routes)) {
- list_add(&zombies, &peer->ksnp_tx_queue);
- list_del_init(&peer->ksnp_tx_queue);
+ cfs_list_empty (&peer->ksnp_conns) &&
+ cfs_list_empty (&peer->ksnp_routes)) {
+ cfs_list_add(&zombies, &peer->ksnp_tx_queue);
+ cfs_list_del_init(&peer->ksnp_tx_queue);
ksocknal_unlink_peer_locked(peer);
}
ksock_peer_t *peer = conn->ksnc_peer;
ksock_route_t *route;
ksock_conn_t *conn2;
- struct list_head *tmp;
+ cfs_list_t *tmp;
LASSERT (peer->ksnp_error == 0);
LASSERT (!conn->ksnc_closing);
conn->ksnc_closing = 1;
/* ksnd_deathrow_conns takes over peer's ref */
- list_del (&conn->ksnc_list);
+ cfs_list_del (&conn->ksnc_list);
route = conn->ksnc_route;
if (route != NULL) {
LASSERT ((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
conn2 = NULL;
- list_for_each(tmp, &peer->ksnp_conns) {
- conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
+ cfs_list_for_each(tmp, &peer->ksnp_conns) {
+ conn2 = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
if (conn2->ksnc_route == route &&
conn2->ksnc_type == conn->ksnc_type)
conn->ksnc_route = NULL;
#if 0 /* irrelevent with only eager routes */
- list_del (&route->ksnr_list); /* make route least favourite */
- list_add_tail (&route->ksnr_list, &peer->ksnp_routes);
+ /* make route least favourite */
+ cfs_list_del (&route->ksnr_list);
+ cfs_list_add_tail (&route->ksnr_list, &peer->ksnp_routes);
#endif
ksocknal_route_decref(route); /* drop conn's ref on route */
}
- if (list_empty (&peer->ksnp_conns)) {
+ if (cfs_list_empty (&peer->ksnp_conns)) {
/* No more connections to this peer */
- if (!list_empty(&peer->ksnp_tx_queue)) {
+ if (!cfs_list_empty(&peer->ksnp_tx_queue)) {
ksock_tx_t *tx;
LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
/* throw them to the last connection...,
* these TXs will be send to /dev/null by scheduler */
- list_for_each_entry(tx, &peer->ksnp_tx_queue, tx_list)
+ cfs_list_for_each_entry(tx, &peer->ksnp_tx_queue,
+ tx_list)
ksocknal_tx_prep(conn, tx);
- spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
- list_splice_init(&peer->ksnp_tx_queue, &conn->ksnc_tx_queue);
- spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
+ cfs_spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
+ cfs_list_splice_init(&peer->ksnp_tx_queue,
+ &conn->ksnc_tx_queue);
+ cfs_spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
}
peer->ksnp_proto = NULL; /* renegotiate protocol version */
peer->ksnp_error = error; /* stash last conn close reason */
- if (list_empty (&peer->ksnp_routes)) {
+ if (cfs_list_empty (&peer->ksnp_routes)) {
/* I've just closed last conn belonging to a
* peer with no routes to it */
ksocknal_unlink_peer_locked (peer);
cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
- list_add_tail (&conn->ksnc_list, &ksocknal_data.ksnd_deathrow_conns);
+ cfs_list_add_tail (&conn->ksnc_list,
+ &ksocknal_data.ksnd_deathrow_conns);
cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq);
cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
cfs_read_lock (&ksocknal_data.ksnd_global_lock);
if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
- list_empty(&peer->ksnp_conns) &&
+ cfs_list_empty(&peer->ksnp_conns) &&
peer->ksnp_accepting == 0 &&
ksocknal_find_connecting_route_locked(peer) == NULL) {
notify = 1;
LASSERT (tx->tx_msg.ksm_zc_cookies[0] != 0);
tx->tx_msg.ksm_zc_cookies[0] = 0;
- list_del(&tx->tx_zc_list);
- list_add(&tx->tx_zc_list, &zlist);
+ cfs_list_del(&tx->tx_zc_list);
+ cfs_list_add(&tx->tx_zc_list, &zlist);
}
cfs_spin_unlock(&peer->ksnp_lock);
- while (!list_empty(&zlist)) {
- tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
+ while (!cfs_list_empty(&zlist)) {
+ tx = cfs_list_entry(zlist.next, ksock_tx_t, tx_zc_list);
- list_del(&tx->tx_zc_list);
+ cfs_list_del(&tx->tx_zc_list);
ksocknal_tx_decref(tx);
}
}
conn->ksnc_tx_ready = 1;
if (!conn->ksnc_tx_scheduled &&
- !list_empty(&conn->ksnc_tx_queue)){
- list_add_tail (&conn->ksnc_tx_list,
+ !cfs_list_empty(&conn->ksnc_tx_queue)){
+ cfs_list_add_tail (&conn->ksnc_tx_list,
&sched->kss_tx_conns);
conn->ksnc_tx_scheduled = 1;
/* extra ref for scheduler */
if (peer->ksnp_error != 0) {
/* peer's last conn closed in error */
- LASSERT (list_empty (&peer->ksnp_conns));
+ LASSERT (cfs_list_empty (&peer->ksnp_conns));
failed = 1;
peer->ksnp_error = 0; /* avoid multiple notifications */
}
LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) == 0);
cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
- list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
+ cfs_list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
LASSERT (conn->ksnc_route == NULL);
LASSERT (!conn->ksnc_tx_scheduled);
LASSERT (!conn->ksnc_rx_scheduled);
- LASSERT (list_empty(&conn->ksnc_tx_queue));
+ LASSERT (cfs_list_empty(&conn->ksnc_tx_queue));
/* complete current receive if any */
switch (conn->ksnc_rx_state) {
ksocknal_close_peer_conns_locked (ksock_peer_t *peer, __u32 ipaddr, int why)
{
ksock_conn_t *conn;
- struct list_head *ctmp;
- struct list_head *cnxt;
+ cfs_list_t *ctmp;
+ cfs_list_t *cnxt;
int count = 0;
- list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) {
- conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
+ cfs_list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) {
+ conn = cfs_list_entry (ctmp, ksock_conn_t, ksnc_list);
if (ipaddr == 0 ||
conn->ksnc_ipaddr == ipaddr) {
ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr)
{
ksock_peer_t *peer;
- struct list_head *ptmp;
- struct list_head *pnxt;
+ cfs_list_t *ptmp;
+ cfs_list_t *pnxt;
int lo;
int hi;
int i;
}
for (i = lo; i <= hi; i++) {
- list_for_each_safe (ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
+ cfs_list_for_each_safe (ptmp, pnxt,
+ &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
+ peer = cfs_list_entry (ptmp, ksock_peer_t, ksnp_list);
if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) &&
(id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
int connect = 1;
cfs_time_t last_alive = 0;
ksock_peer_t *peer = NULL;
- rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
+ cfs_rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
lnet_process_id_t id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID};
- read_lock(glock);
+ cfs_read_lock(glock);
peer = ksocknal_find_peer_locked(ni, id);
if (peer != NULL) {
- struct list_head *tmp;
+ cfs_list_t *tmp;
ksock_conn_t *conn;
int bufnob;
- list_for_each (tmp, &peer->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ cfs_list_for_each (tmp, &peer->ksnp_conns) {
+ conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
bufnob = libcfs_sock_wmem_queued(conn->ksnc_sock);
if (bufnob < conn->ksnc_tx_bufnob) {
connect = 0;
}
- read_unlock(glock);
+ cfs_read_unlock(glock);
if (last_alive != 0)
*when = last_alive;
ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
- write_lock_bh(glock);
+ cfs_write_lock_bh(glock);
peer = ksocknal_find_peer_locked(ni, id);
if (peer != NULL)
ksocknal_launch_all_connections_locked(peer);
- write_unlock_bh(glock);
+ cfs_write_unlock_bh(glock);
return;
}
{
int index;
int i;
- struct list_head *tmp;
+ cfs_list_t *tmp;
ksock_conn_t *conn;
for (index = 0; ; index++) {
i = 0;
conn = NULL;
- list_for_each (tmp, &peer->ksnp_conns) {
+ cfs_list_for_each (tmp, &peer->ksnp_conns) {
if (i++ == index) {
- conn = list_entry (tmp, ksock_conn_t, ksnc_list);
+ conn = cfs_list_entry (tmp, ksock_conn_t,
+ ksnc_list);
ksocknal_conn_addref(conn);
break;
}
ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id)
{
ksock_peer_t *peer;
- struct list_head *tmp;
+ cfs_list_t *tmp;
int index;
int i;
int j;
index = 0;
peer = NULL;
- list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(tmp, ksock_peer_t,
- ksnp_list);
+ cfs_list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
+ peer = cfs_list_entry(tmp, ksock_peer_t,
+ ksnp_list);
if (!((id.nid == LNET_NID_ANY ||
id.nid == peer->ksnp_id.nid) &&
int rc;
int i;
int j;
- struct list_head *ptmp;
+ cfs_list_t *ptmp;
ksock_peer_t *peer;
- struct list_head *rtmp;
+ cfs_list_t *rtmp;
ksock_route_t *route;
if (ipaddress == 0 ||
iface->ksni_npeers = 0;
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
- list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
+ cfs_list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
+ peer = cfs_list_entry(ptmp, ksock_peer_t,
+ ksnp_list);
for (j = 0; j < peer->ksnp_n_passive_ips; j++)
if (peer->ksnp_passive_ips[j] == ipaddress)
iface->ksni_npeers++;
- list_for_each(rtmp, &peer->ksnp_routes) {
- route = list_entry(rtmp, ksock_route_t, ksnr_list);
+ cfs_list_for_each(rtmp, &peer->ksnp_routes) {
+ route = cfs_list_entry(rtmp,
+ ksock_route_t,
+ ksnr_list);
if (route->ksnr_myipaddr == ipaddress)
iface->ksni_nroutes++;
void
ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
{
- struct list_head *tmp;
- struct list_head *nxt;
+ cfs_list_t *tmp;
+ cfs_list_t *nxt;
ksock_route_t *route;
ksock_conn_t *conn;
int i;
break;
}
- list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
- route = list_entry (tmp, ksock_route_t, ksnr_list);
+ cfs_list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
+ route = cfs_list_entry (tmp, ksock_route_t, ksnr_list);
if (route->ksnr_myipaddr != ipaddr)
continue;
}
}
- list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ cfs_list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
+ conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
if (conn->ksnc_myipaddr == ipaddr)
ksocknal_close_conn_locked (conn, 0);
{
ksock_net_t *net = ni->ni_data;
int rc = -ENOENT;
- struct list_head *tmp;
- struct list_head *nxt;
+ cfs_list_t *tmp;
+ cfs_list_t *nxt;
ksock_peer_t *peer;
__u32 this_ip;
int i;
net->ksnn_ninterfaces--;
for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
- list_for_each_safe(tmp, nxt, &ksocknal_data.ksnd_peers[j]) {
- peer = list_entry(tmp, ksock_peer_t, ksnp_list);
+ cfs_list_for_each_safe(tmp, nxt,
+ &ksocknal_data.ksnd_peers[j]) {
+ peer = cfs_list_entry(tmp, ksock_peer_t,
+ ksnp_list);
if (peer->ksnp_ni != ni)
continue;
sizeof (ksock_sched_t) * ksocknal_data.ksnd_nschedulers);
LIBCFS_FREE (ksocknal_data.ksnd_peers,
- sizeof (struct list_head) *
+ sizeof (cfs_list_t) *
ksocknal_data.ksnd_peer_hash_size);
cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
- if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
- struct list_head zlist;
+ if (!cfs_list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
+ cfs_list_t zlist;
ksock_tx_t *tx;
- list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
- list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
+ cfs_list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
+ cfs_list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
- while(!list_empty(&zlist)) {
- tx = list_entry(zlist.next, ksock_tx_t, tx_list);
- list_del(&tx->tx_list);
+ while(!cfs_list_empty(&zlist)) {
+ tx = cfs_list_entry(zlist.next, ksock_tx_t, tx_list);
+ cfs_list_del(&tx->tx_list);
LIBCFS_FREE(tx, tx->tx_desc_size);
}
} else {
case SOCKNAL_INIT_DATA:
LASSERT (ksocknal_data.ksnd_peers != NULL);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
- LASSERT (list_empty (&ksocknal_data.ksnd_peers[i]));
+ LASSERT (cfs_list_empty (&ksocknal_data.ksnd_peers[i]));
}
- LASSERT (list_empty (&ksocknal_data.ksnd_enomem_conns));
- LASSERT (list_empty (&ksocknal_data.ksnd_zombie_conns));
- LASSERT (list_empty (&ksocknal_data.ksnd_connd_connreqs));
- LASSERT (list_empty (&ksocknal_data.ksnd_connd_routes));
+ LASSERT (cfs_list_empty (&ksocknal_data.ksnd_enomem_conns));
+ LASSERT (cfs_list_empty (&ksocknal_data.ksnd_zombie_conns));
+ LASSERT (cfs_list_empty (&ksocknal_data.ksnd_connd_connreqs));
+ LASSERT (cfs_list_empty (&ksocknal_data.ksnd_connd_routes));
if (ksocknal_data.ksnd_schedulers != NULL)
for (i = 0; i < ksocknal_data.ksnd_nschedulers; i++) {
ksock_sched_t *kss =
&ksocknal_data.ksnd_schedulers[i];
- LASSERT (list_empty (&kss->kss_tx_conns));
- LASSERT (list_empty (&kss->kss_rx_conns));
- LASSERT (list_empty (&kss->kss_zombie_noop_txs));
+ LASSERT (cfs_list_empty (&kss->kss_tx_conns));
+ LASSERT (cfs_list_empty (&kss->kss_rx_conns));
+ LASSERT (cfs_list_empty (&kss-> \
+ kss_zombie_noop_txs));
LASSERT (kss->kss_nconns == 0);
}
* we won't be able to reboot more frequently than 1MHz for the
* forseeable future :) */
- cfs_do_gettimeofday(&tv);
+ cfs_gettimeofday(&tv);
return (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
}
ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
LIBCFS_ALLOC (ksocknal_data.ksnd_peers,
- sizeof (struct list_head) * ksocknal_data.ksnd_peer_hash_size);
+ sizeof (cfs_list_t) *
+ ksocknal_data.ksnd_peer_hash_size);
if (ksocknal_data.ksnd_peers == NULL)
return -ENOMEM;
ksocknal_debug_peerhash (lnet_ni_t *ni)
{
ksock_peer_t *peer = NULL;
- struct list_head *tmp;
+ cfs_list_t *tmp;
int i;
cfs_read_lock (&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
- list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry (tmp, ksock_peer_t, ksnp_list);
+ cfs_list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
+ peer = cfs_list_entry (tmp, ksock_peer_t, ksnp_list);
if (peer->ksnp_ni == ni) break;
peer->ksnp_sharecount, peer->ksnp_closing,
peer->ksnp_accepting, peer->ksnp_error,
peer->ksnp_zc_next_cookie,
- !list_empty(&peer->ksnp_tx_queue),
- !list_empty(&peer->ksnp_zc_req_list));
+ !cfs_list_empty(&peer->ksnp_tx_queue),
+ !cfs_list_empty(&peer->ksnp_zc_req_list));
- list_for_each (tmp, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ cfs_list_for_each (tmp, &peer->ksnp_routes) {
+ route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
"del %d\n", cfs_atomic_read(&route->ksnr_refcount),
route->ksnr_scheduled, route->ksnr_connecting,
route->ksnr_connected, route->ksnr_deleted);
}
- list_for_each (tmp, &peer->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ cfs_list_for_each (tmp, &peer->ksnp_conns) {
+ conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
cfs_atomic_read(&conn->ksnc_conn_refcount),
cfs_atomic_read(&conn->ksnc_sock_refcount),
typedef struct /* per scheduler state */
{
cfs_spinlock_t kss_lock; /* serialise */
- struct list_head kss_rx_conns; /* conn waiting to be read */
- struct list_head kss_tx_conns; /* conn waiting to be written */
- struct list_head kss_zombie_noop_txs; /* zombie noop tx list */
+ cfs_list_t kss_rx_conns; /* conn waiting to be read */
+ cfs_list_t kss_tx_conns; /* conn waiting to be written */
+ cfs_list_t kss_zombie_noop_txs; /* zombie noop tx list */
cfs_waitq_t kss_waitq; /* where scheduler sleeps */
int kss_nconns; /* # connections assigned to this scheduler */
#if !SOCKNAL_SINGLE_FRAG_RX
typedef struct
{
- int ksnd_init; /* initialisation state */
- int ksnd_nnets; /* # networks set up */
-
- cfs_rwlock_t ksnd_global_lock; /* stabilize peer/conn ops */
- struct list_head *ksnd_peers; /* hash table of all my known peers */
- int ksnd_peer_hash_size; /* size of ksnd_peers */
-
- int ksnd_nthreads; /* # live threads */
- int ksnd_shuttingdown; /* tell threads to exit */
- int ksnd_nschedulers; /* # schedulers */
- ksock_sched_t *ksnd_schedulers; /* their state */
-
- cfs_atomic_t ksnd_nactive_txs; /* #active txs */
-
- struct list_head ksnd_deathrow_conns; /* conns to close: reaper_lock*/
- struct list_head ksnd_zombie_conns; /* conns to free: reaper_lock */
- struct list_head ksnd_enomem_conns; /* conns to retry: reaper_lock*/
- cfs_waitq_t ksnd_reaper_waitq; /* reaper sleeps here */
- cfs_time_t ksnd_reaper_waketime; /* when reaper will wake */
- cfs_spinlock_t ksnd_reaper_lock; /* serialise */
-
- int ksnd_enomem_tx; /* test ENOMEM sender */
- int ksnd_stall_tx; /* test sluggish sender */
- int ksnd_stall_rx; /* test sluggish receiver */
-
- struct list_head ksnd_connd_connreqs; /* incoming connection requests */
- struct list_head ksnd_connd_routes; /* routes waiting to be connected */
- cfs_waitq_t ksnd_connd_waitq; /* connds sleep here */
+ int ksnd_init; /* initialisation state */
+ int ksnd_nnets; /* # networks set up */
+
+ cfs_rwlock_t ksnd_global_lock; /* stabilize peer/conn ops */
+ cfs_list_t *ksnd_peers; /* hash table of all my known peers */
+ int ksnd_peer_hash_size; /* size of ksnd_peers */
+
+ int ksnd_nthreads; /* # live threads */
+ int ksnd_shuttingdown; /* tell threads to exit */
+ int ksnd_nschedulers; /* # schedulers */
+ ksock_sched_t *ksnd_schedulers; /* their state */
+
+ cfs_atomic_t ksnd_nactive_txs; /* #active txs */
+
+ cfs_list_t ksnd_deathrow_conns; /* conns to close: reaper_lock*/
+ cfs_list_t ksnd_zombie_conns; /* conns to free: reaper_lock */
+ cfs_list_t ksnd_enomem_conns; /* conns to retry: reaper_lock*/
+ cfs_waitq_t ksnd_reaper_waitq; /* reaper sleeps here */
+ cfs_time_t ksnd_reaper_waketime;/* when reaper will wake */
+ cfs_spinlock_t ksnd_reaper_lock; /* serialise */
+
+ int ksnd_enomem_tx; /* test ENOMEM sender */
+ int ksnd_stall_tx; /* test sluggish sender */
+ int ksnd_stall_rx; /* test sluggish receiver */
+
+ cfs_list_t ksnd_connd_connreqs; /* incoming connection requests */
+ cfs_list_t ksnd_connd_routes; /* routes waiting to be connected */
+ cfs_waitq_t ksnd_connd_waitq; /* connds sleep here */
int ksnd_connd_connecting;/* # connds connecting */
- cfs_spinlock_t ksnd_connd_lock; /* serialise */
+ cfs_spinlock_t ksnd_connd_lock; /* serialise */
- struct list_head ksnd_idle_noop_txs; /* list head for freed noop tx */
- cfs_spinlock_t ksnd_tx_lock; /* serialise, NOT safe in g_lock */
+ cfs_list_t ksnd_idle_noop_txs; /* list head for freed noop tx */
+ cfs_spinlock_t ksnd_tx_lock; /* serialise, NOT safe in g_lock */
- ksock_irqinfo_t ksnd_irqinfo[NR_IRQS];/* irq->scheduler lookup */
+ ksock_irqinfo_t ksnd_irqinfo[CFS_NR_IRQS];/* irq->scheduler lookup */
} ksock_nal_data_t;
typedef struct /* transmit packet */
{
- struct list_head tx_list; /* queue on conn for transmission etc */
- struct list_head tx_zc_list; /* queue on peer for ZC request */
- cfs_atomic_t tx_refcount; /* tx reference count */
- int tx_nob; /* # packet bytes */
- int tx_resid; /* residual bytes */
- int tx_niov; /* # packet iovec frags */
- struct iovec *tx_iov; /* packet iovec frags */
- int tx_nkiov; /* # packet page frags */
- unsigned int tx_zc_capable:1; /* payload is large enough for ZC */
- unsigned int tx_zc_checked:1; /* Have I checked if I should ZC? */
- unsigned int tx_nonblk:1; /* it's a non-blocking ACK */
- lnet_kiov_t *tx_kiov; /* packet page frags */
- struct ksock_conn *tx_conn; /* owning conn */
- lnet_msg_t *tx_lnetmsg; /* lnet message for lnet_finalize() */
- cfs_time_t tx_deadline; /* when (in jiffies) tx times out */
- ksock_msg_t tx_msg; /* socklnd message buffer */
- int tx_desc_size; /* size of this descriptor */
+ cfs_list_t tx_list; /* queue on conn for transmission etc */
+ cfs_list_t tx_zc_list; /* queue on peer for ZC request */
+ cfs_atomic_t tx_refcount; /* tx reference count */
+ int tx_nob; /* # packet bytes */
+ int tx_resid; /* residual bytes */
+ int tx_niov; /* # packet iovec frags */
+ struct iovec *tx_iov; /* packet iovec frags */
+ int tx_nkiov; /* # packet page frags */
+ unsigned int tx_zc_capable:1; /* payload is large enough for ZC */
+ unsigned int tx_zc_checked:1; /* Have I checked if I should ZC? */
+ unsigned int tx_nonblk:1; /* it's a non-blocking ACK */
+ lnet_kiov_t *tx_kiov; /* packet page frags */
+ struct ksock_conn *tx_conn; /* owning conn */
+ lnet_msg_t *tx_lnetmsg; /* lnet message for lnet_finalize() */
+ cfs_time_t tx_deadline; /* when (in jiffies) tx times out */
+ ksock_msg_t tx_msg; /* socklnd message buffer */
+ int tx_desc_size; /* size of this descriptor */
union {
struct {
struct iovec iov; /* virt hdr */
typedef struct ksock_conn
{
- struct ksock_peer *ksnc_peer; /* owning peer */
- struct ksock_route *ksnc_route; /* owning route */
- struct list_head ksnc_list; /* stash on peer's conn list */
- cfs_socket_t *ksnc_sock; /* actual socket */
+ struct ksock_peer *ksnc_peer; /* owning peer */
+ struct ksock_route *ksnc_route; /* owning route */
+ cfs_list_t ksnc_list; /* stash on peer's conn list */
+ cfs_socket_t *ksnc_sock; /* actual socket */
void *ksnc_saved_data_ready; /* socket's original data_ready() callback */
void *ksnc_saved_write_space; /* socket's original write_space() callback */
cfs_atomic_t ksnc_conn_refcount; /* conn refcount */
cfs_atomic_t ksnc_sock_refcount; /* sock refcount */
- ksock_sched_t *ksnc_scheduler; /* who schedules this connection */
- __u32 ksnc_myipaddr; /* my IP */
- __u32 ksnc_ipaddr; /* peer's IP */
- int ksnc_port; /* peer's port */
- int ksnc_type:3; /* type of connection, should be signed value */
- int ksnc_closing:1; /* being shut down */
- int ksnc_flip:1; /* flip or not, only for V2.x */
- int ksnc_zc_capable:1; /* enable to ZC */
- struct ksock_proto *ksnc_proto; /* protocol for the connection */
+ ksock_sched_t *ksnc_scheduler; /* who schedules this connection */
+ __u32 ksnc_myipaddr; /* my IP */
+ __u32 ksnc_ipaddr; /* peer's IP */
+ int ksnc_port; /* peer's port */
+ int ksnc_type:3; /* type of connection, should be signed value */
+ int ksnc_closing:1; /* being shut down */
+ int ksnc_flip:1; /* flip or not, only for V2.x */
+ int ksnc_zc_capable:1; /* enable to ZC */
+ struct ksock_proto *ksnc_proto; /* protocol for the connection */
/* reader */
- struct list_head ksnc_rx_list; /* where I enq waiting input or a forwarding descriptor */
- cfs_time_t ksnc_rx_deadline; /* when (in jiffies) receive times out */
- __u8 ksnc_rx_started; /* started receiving a message */
- __u8 ksnc_rx_ready; /* data ready to read */
- __u8 ksnc_rx_scheduled; /* being progressed */
- __u8 ksnc_rx_state; /* what is being read */
- int ksnc_rx_nob_left; /* # bytes to next hdr/body */
- int ksnc_rx_nob_wanted; /* bytes actually wanted */
- int ksnc_rx_niov; /* # iovec frags */
- struct iovec *ksnc_rx_iov; /* the iovec frags */
- int ksnc_rx_nkiov; /* # page frags */
- lnet_kiov_t *ksnc_rx_kiov; /* the page frags */
- ksock_rxiovspace_t ksnc_rx_iov_space; /* space for frag descriptors */
- __u32 ksnc_rx_csum; /* partial checksum for incoming data */
- void *ksnc_cookie; /* rx lnet_finalize passthru arg */
- ksock_msg_t ksnc_msg; /* incoming message buffer:
- * V2.x message takes the whole struct
- * V1.x message is a bare lnet_hdr_t, it's stored
- * in ksnc_msg.ksm_u.lnetmsg */
+ cfs_list_t ksnc_rx_list; /* where I enq waiting input or a forwarding descriptor */
+ cfs_time_t ksnc_rx_deadline; /* when (in jiffies) receive times out */
+ __u8 ksnc_rx_started; /* started receiving a message */
+ __u8 ksnc_rx_ready; /* data ready to read */
+ __u8 ksnc_rx_scheduled;/* being progressed */
+ __u8 ksnc_rx_state; /* what is being read */
+ int ksnc_rx_nob_left; /* # bytes to next hdr/body */
+ int ksnc_rx_nob_wanted; /* bytes actually wanted */
+ int ksnc_rx_niov; /* # iovec frags */
+ struct iovec *ksnc_rx_iov; /* the iovec frags */
+ int ksnc_rx_nkiov; /* # page frags */
+ lnet_kiov_t *ksnc_rx_kiov; /* the page frags */
+ ksock_rxiovspace_t ksnc_rx_iov_space;/* space for frag descriptors */
+ __u32 ksnc_rx_csum; /* partial checksum for incoming data */
+ void *ksnc_cookie; /* rx lnet_finalize passthru arg */
+ ksock_msg_t ksnc_msg; /* incoming message buffer:
+ * V2.x message takes the
+ * whole struct
+ * V1.x message is a bare
+ * lnet_hdr_t, it's stored in
+ * ksnc_msg.ksm_u.lnetmsg */
/* WRITER */
- struct list_head ksnc_tx_list; /* where I enq waiting for output space */
- struct list_head ksnc_tx_queue; /* packets waiting to be sent */
- ksock_tx_t *ksnc_tx_carrier; /* next TX that can carry a LNet message or ZC-ACK */
- cfs_time_t ksnc_tx_deadline; /* when (in jiffies) tx times out */
- int ksnc_tx_bufnob; /* send buffer marker */
- cfs_atomic_t ksnc_tx_nob; /* # bytes queued */
- int ksnc_tx_ready; /* write space */
- int ksnc_tx_scheduled; /* being progressed */
- cfs_time_t ksnc_tx_last_post; /* time stamp of the last posted TX */
+ cfs_list_t ksnc_tx_list; /* where I enq waiting for output space */
+ cfs_list_t ksnc_tx_queue; /* packets waiting to be sent */
+ ksock_tx_t *ksnc_tx_carrier; /* next TX that can carry a LNet message or ZC-ACK */
+ cfs_time_t ksnc_tx_deadline; /* when (in jiffies) tx times out */
+ int ksnc_tx_bufnob; /* send buffer marker */
+ cfs_atomic_t ksnc_tx_nob; /* # bytes queued */
+ int ksnc_tx_ready; /* write space */
+ int ksnc_tx_scheduled; /* being progressed */
+ cfs_time_t ksnc_tx_last_post; /* time stamp of the last posted TX */
} ksock_conn_t;
typedef struct ksock_route
{
- struct list_head ksnr_list; /* chain on peer route list */
- struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */
- struct ksock_peer *ksnr_peer; /* owning peer */
- cfs_atomic_t ksnr_refcount; /* # users */
- cfs_time_t ksnr_timeout; /* when (in jiffies) reconnection can happen next */
- cfs_duration_t ksnr_retry_interval; /* how long between retries */
- __u32 ksnr_myipaddr; /* my IP */
- __u32 ksnr_ipaddr; /* IP address to connect to */
- int ksnr_port; /* port to connect to */
- unsigned int ksnr_scheduled:1; /* scheduled for attention */
- unsigned int ksnr_connecting:1; /* connection establishment in progress */
- unsigned int ksnr_connected:4; /* connections established by type */
- unsigned int ksnr_deleted:1; /* been removed from peer? */
- unsigned int ksnr_share_count; /* created explicitly? */
- int ksnr_conn_count; /* # conns established by this route */
+ cfs_list_t ksnr_list; /* chain on peer route list */
+ cfs_list_t ksnr_connd_list; /* chain on ksnr_connd_routes */
+ struct ksock_peer *ksnr_peer; /* owning peer */
+ cfs_atomic_t ksnr_refcount; /* # users */
+ cfs_time_t ksnr_timeout; /* when (in jiffies) reconnection can happen next */
+ cfs_duration_t ksnr_retry_interval; /* how long between retries */
+ __u32 ksnr_myipaddr; /* my IP */
+ __u32 ksnr_ipaddr; /* IP address to connect to */
+ int ksnr_port; /* port to connect to */
+ unsigned int ksnr_scheduled:1; /* scheduled for attention */
+ unsigned int ksnr_connecting:1;/* connection establishment in progress */
+ unsigned int ksnr_connected:4; /* connections established by type */
+ unsigned int ksnr_deleted:1; /* been removed from peer? */
+ unsigned int ksnr_share_count; /* created explicitly? */
+ int ksnr_conn_count; /* # conns established by this route */
} ksock_route_t;
#define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */
typedef struct ksock_peer
{
- struct list_head ksnp_list; /* stash on global peer list */
- cfs_time_t ksnp_last_alive; /* when (in jiffies) I was last alive */
- lnet_process_id_t ksnp_id; /* who's on the other end(s) */
- cfs_atomic_t ksnp_refcount; /* # users */
- int ksnp_sharecount; /* lconf usage counter */
- int ksnp_closing; /* being closed */
- int ksnp_accepting; /* # passive connections pending */
- int ksnp_error; /* errno on closing last conn */
- __u64 ksnp_zc_next_cookie;/* ZC completion cookie */
- __u64 ksnp_incarnation; /* latest known peer incarnation */
- struct ksock_proto *ksnp_proto; /* latest known peer protocol */
- struct list_head ksnp_conns; /* all active connections */
- struct list_head ksnp_routes; /* routes */
- struct list_head ksnp_tx_queue; /* waiting packets */
- cfs_spinlock_t ksnp_lock; /* serialize, NOT safe in g_lock */
- struct list_head ksnp_zc_req_list; /* zero copy requests wait for ACK */
- cfs_time_t ksnp_send_keepalive; /* time to send keepalive */
- lnet_ni_t *ksnp_ni; /* which network */
- int ksnp_n_passive_ips; /* # of... */
- __u32 ksnp_passive_ips[LNET_MAX_INTERFACES]; /* preferred local interfaces */
+ cfs_list_t ksnp_list; /* stash on global peer list */
+ cfs_time_t ksnp_last_alive; /* when (in jiffies) I was last alive */
+ lnet_process_id_t ksnp_id; /* who's on the other end(s) */
+ cfs_atomic_t ksnp_refcount; /* # users */
+ int ksnp_sharecount; /* lconf usage counter */
+ int ksnp_closing; /* being closed */
+ int ksnp_accepting;/* # passive connections pending */
+ int ksnp_error; /* errno on closing last conn */
+ __u64 ksnp_zc_next_cookie;/* ZC completion cookie */
+ __u64 ksnp_incarnation; /* latest known peer incarnation */
+ struct ksock_proto *ksnp_proto; /* latest known peer protocol */
+ cfs_list_t ksnp_conns; /* all active connections */
+ cfs_list_t ksnp_routes; /* routes */
+ cfs_list_t ksnp_tx_queue; /* waiting packets */
+ cfs_spinlock_t ksnp_lock; /* serialize, NOT safe in g_lock */
+ cfs_list_t ksnp_zc_req_list; /* zero copy requests wait for ACK */
+ cfs_time_t ksnp_send_keepalive; /* time to send keepalive */
+ lnet_ni_t *ksnp_ni; /* which network */
+ int ksnp_n_passive_ips; /* # of... */
+ __u32 ksnp_passive_ips[LNET_MAX_INTERFACES]; /* preferred local interfaces */
} ksock_peer_t;
typedef struct ksock_connreq
{
- struct list_head ksncr_list; /* stash on ksnd_connd_connreqs */
- lnet_ni_t *ksncr_ni; /* chosen NI */
- cfs_socket_t *ksncr_sock; /* accepted socket */
+ cfs_list_t ksncr_list; /* stash on ksnd_connd_connreqs */
+ lnet_ni_t *ksncr_ni; /* chosen NI */
+ cfs_socket_t *ksncr_sock; /* accepted socket */
} ksock_connreq_t;
extern ksock_nal_data_t ksocknal_data;
extern ksock_tunables_t ksocknal_tunables;
-#define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */
-#define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */
-#define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not preferred */
+#define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */
+#define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */
+#define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not preferred */
typedef struct ksock_proto
{
(1 << SOCKLND_CONN_BULK_OUT));
}
-static inline struct list_head *
+static inline cfs_list_t *
ksocknal_nid2peerlist (lnet_nid_t nid)
{
unsigned int hash = ((unsigned int)nid) % ksocknal_data.ksnd_peer_hash_size;
extern ksock_tx_t *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk);
extern void ksocknal_next_tx_carrier(ksock_conn_t *conn);
extern void ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn);
-extern void ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error);
+extern void ksocknal_txlist_done (lnet_ni_t *ni, cfs_list_t *txlist,
+ int error);
extern void ksocknal_notify (lnet_ni_t *ni, lnet_nid_t gw_nid, int alive);
extern void ksocknal_query (struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when);
extern int ksocknal_thread_start (int (*fn)(void *arg), void *arg);
extern int ksocknal_reaper (void *arg);
extern int ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn,
lnet_nid_t peer_nid, ksock_hello_msg_t *hello);
-extern int ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
+extern int ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
ksock_hello_msg_t *hello, lnet_process_id_t *id,
__u64 *incarnation);
extern void ksocknal_read_callback(ksock_conn_t *conn);
/* searching for a noop tx in free list */
cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
- if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
- tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
- ksock_tx_t, tx_list);
+ if (!cfs_list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
+ tx = cfs_list_entry(ksocknal_data.ksnd_idle_noop_txs. \
+ next, ksock_tx_t, tx_list);
LASSERT(tx->tx_desc_size == size);
- list_del(&tx->tx_list);
+ cfs_list_del(&tx->tx_list);
}
cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
/* it's a noop tx */
cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
- list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
+ cfs_list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
} else {
}
void
-ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error)
+ksocknal_txlist_done (lnet_ni_t *ni, cfs_list_t *txlist, int error)
{
ksock_tx_t *tx;
- while (!list_empty (txlist)) {
- tx = list_entry (txlist->next, ksock_tx_t, tx_list);
+ while (!cfs_list_empty (txlist)) {
+ tx = cfs_list_entry (txlist->next, ksock_tx_t, tx_list);
if (error && tx->tx_lnetmsg != NULL) {
CDEBUG (D_NETERROR, "Deleting packet type %d len %d %s->%s\n",
CDEBUG (D_NETERROR, "Deleting noop packet\n");
}
- list_del (&tx->tx_list);
+ cfs_list_del (&tx->tx_list);
LASSERT (cfs_atomic_read(&tx->tx_refcount) == 1);
ksocknal_tx_done (ni, tx);
if (peer->ksnp_zc_next_cookie == 0)
peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
- list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
+ cfs_list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
cfs_spin_unlock(&peer->ksnp_lock);
}
}
tx->tx_msg.ksm_zc_cookies[0] = 0;
- list_del(&tx->tx_zc_list);
+ cfs_list_del(&tx->tx_zc_list);
cfs_spin_unlock(&peer->ksnp_lock);
/* enomem list takes over scheduler's ref... */
LASSERT (conn->ksnc_tx_scheduled);
- list_add_tail(&conn->ksnc_tx_list,
- &ksocknal_data.ksnd_enomem_conns);
+ cfs_list_add_tail(&conn->ksnc_tx_list,
+ &ksocknal_data.ksnd_enomem_conns);
if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
SOCKNAL_ENOMEM_RETRY),
ksocknal_data.ksnd_reaper_waketime))
cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
- list_add_tail (&route->ksnr_connd_list,
- &ksocknal_data.ksnd_connd_routes);
+ cfs_list_add_tail (&route->ksnr_connd_list,
+ &ksocknal_data.ksnd_connd_routes);
cfs_waitq_signal (&ksocknal_data.ksnd_connd_waitq);
cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
ksock_conn_t *
ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
ksock_conn_t *conn;
ksock_conn_t *typed = NULL;
ksock_conn_t *fallback = NULL;
int tnob = 0;
int fnob = 0;
- list_for_each (tmp, &peer->ksnp_conns) {
- ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list);
+ cfs_list_for_each (tmp, &peer->ksnp_conns) {
+ ksock_conn_t *c = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
int nob = cfs_atomic_read(&c->ksnc_tx_nob) +
libcfs_sock_wmem_queued(c->ksnc_sock);
int rc;
bufnob = libcfs_sock_wmem_queued(conn->ksnc_sock);
cfs_spin_lock_bh (&sched->kss_lock);
- if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
+ if (cfs_list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
/* First packet starts the timeout */
conn->ksnc_tx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
if (ztx != NULL) {
cfs_atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
- list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
+ cfs_list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
}
if (conn->ksnc_tx_ready && /* able to send */
!conn->ksnc_tx_scheduled) { /* not scheduled to send */
/* +1 ref for scheduler */
ksocknal_conn_addref(conn);
- list_add_tail (&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
+ cfs_list_add_tail (&conn->ksnc_tx_list,
+ &sched->kss_tx_conns);
conn->ksnc_tx_scheduled = 1;
cfs_waitq_signal (&sched->kss_waitq);
}
ksock_route_t *
ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
ksock_route_t *route;
- list_for_each (tmp, &peer->ksnp_routes) {
- route = list_entry (tmp, ksock_route_t, ksnr_list);
+ cfs_list_for_each (tmp, &peer->ksnp_routes) {
+ route = cfs_list_entry (tmp, ksock_route_t, ksnr_list);
LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
ksock_route_t *
ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
ksock_route_t *route;
- list_for_each (tmp, &peer->ksnp_routes) {
- route = list_entry (tmp, ksock_route_t, ksnr_list);
+ cfs_list_for_each (tmp, &peer->ksnp_routes) {
+ route = cfs_list_entry (tmp, ksock_route_t, ksnr_list);
LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
/* Queue the message until a connection is established */
- list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
+ cfs_list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
cfs_write_unlock_bh (g_lock);
return 0;
}
switch (conn->ksnc_rx_state) {
case SOCKNAL_RX_PARSE_WAIT:
- list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
+ cfs_list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
cfs_waitq_signal (&sched->kss_waitq);
LASSERT (conn->ksnc_rx_ready);
break;
cfs_spin_lock_bh (&sched->kss_lock);
rc = (!ksocknal_data.ksnd_shuttingdown &&
- list_empty(&sched->kss_rx_conns) &&
- list_empty(&sched->kss_tx_conns));
+ cfs_list_empty(&sched->kss_rx_conns) &&
+ cfs_list_empty(&sched->kss_tx_conns));
cfs_spin_unlock_bh (&sched->kss_lock);
return (rc);
/* Ensure I progress everything semi-fairly */
- if (!list_empty (&sched->kss_rx_conns)) {
- conn = list_entry(sched->kss_rx_conns.next,
- ksock_conn_t, ksnc_rx_list);
- list_del(&conn->ksnc_rx_list);
+ if (!cfs_list_empty (&sched->kss_rx_conns)) {
+ conn = cfs_list_entry(sched->kss_rx_conns.next,
+ ksock_conn_t, ksnc_rx_list);
+ cfs_list_del(&conn->ksnc_rx_list);
LASSERT(conn->ksnc_rx_scheduled);
LASSERT(conn->ksnc_rx_ready);
conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
} else if (conn->ksnc_rx_ready) {
/* reschedule for rx */
- list_add_tail (&conn->ksnc_rx_list,
- &sched->kss_rx_conns);
+ cfs_list_add_tail (&conn->ksnc_rx_list,
+ &sched->kss_rx_conns);
} else {
conn->ksnc_rx_scheduled = 0;
/* drop my ref */
did_something = 1;
}
- if (!list_empty (&sched->kss_tx_conns)) {
+ if (!cfs_list_empty (&sched->kss_tx_conns)) {
CFS_LIST_HEAD (zlist);
- if (!list_empty(&sched->kss_zombie_noop_txs)) {
- list_add(&zlist, &sched->kss_zombie_noop_txs);
- list_del_init(&sched->kss_zombie_noop_txs);
+ if (!cfs_list_empty(&sched->kss_zombie_noop_txs)) {
+ cfs_list_add(&zlist,
+ &sched->kss_zombie_noop_txs);
+ cfs_list_del_init(&sched->kss_zombie_noop_txs);
}
- conn = list_entry(sched->kss_tx_conns.next,
- ksock_conn_t, ksnc_tx_list);
- list_del (&conn->ksnc_tx_list);
+ conn = cfs_list_entry(sched->kss_tx_conns.next,
+ ksock_conn_t, ksnc_tx_list);
+ cfs_list_del (&conn->ksnc_tx_list);
LASSERT(conn->ksnc_tx_scheduled);
LASSERT(conn->ksnc_tx_ready);
- LASSERT(!list_empty(&conn->ksnc_tx_queue));
+ LASSERT(!cfs_list_empty(&conn->ksnc_tx_queue));
- tx = list_entry(conn->ksnc_tx_queue.next,
- ksock_tx_t, tx_list);
+ tx = cfs_list_entry(conn->ksnc_tx_queue.next,
+ ksock_tx_t, tx_list);
if (conn->ksnc_tx_carrier == tx)
ksocknal_next_tx_carrier(conn);
/* dequeue now so empty list => more to send */
- list_del(&tx->tx_list);
+ cfs_list_del(&tx->tx_list);
/* Clear tx_ready in case send isn't complete. Do
* it BEFORE we call process_transmit, since
conn->ksnc_tx_ready = 0;
cfs_spin_unlock_bh (&sched->kss_lock);
- if (!list_empty(&zlist)) {
+ if (!cfs_list_empty(&zlist)) {
/* free zombie noop txs, it's fast because
* noop txs are just put in freelist */
ksocknal_txlist_done(NULL, &zlist, 0);
if (rc == -ENOMEM || rc == -EAGAIN) {
/* Incomplete send: replace tx on HEAD of tx_queue */
cfs_spin_lock_bh (&sched->kss_lock);
- list_add (&tx->tx_list, &conn->ksnc_tx_queue);
+ cfs_list_add (&tx->tx_list,
+ &conn->ksnc_tx_queue);
} else {
/* Complete send; tx -ref */
ksocknal_tx_decref (tx);
/* Do nothing; after a short timeout, this
* conn will be reposted on kss_tx_conns. */
} else if (conn->ksnc_tx_ready &&
- !list_empty (&conn->ksnc_tx_queue)) {
+ !cfs_list_empty (&conn->ksnc_tx_queue)) {
/* reschedule for tx */
- list_add_tail (&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
+ cfs_list_add_tail (&conn->ksnc_tx_list,
+ &sched->kss_tx_conns);
} else {
conn->ksnc_tx_scheduled = 0;
/* drop my ref */
!ksocknal_sched_cansleep(sched), rc);
LASSERT (rc == 0);
} else {
- our_cond_resched();
+ cfs_cond_resched();
}
cfs_spin_lock_bh (&sched->kss_lock);
conn->ksnc_rx_ready = 1;
if (!conn->ksnc_rx_scheduled) { /* not being progressed */
- list_add_tail(&conn->ksnc_rx_list,
- &sched->kss_rx_conns);
+ cfs_list_add_tail(&conn->ksnc_rx_list,
+ &sched->kss_rx_conns);
conn->ksnc_rx_scheduled = 1;
/* extra ref for scheduler */
ksocknal_conn_addref(conn);
conn->ksnc_tx_ready = 1;
- if (!conn->ksnc_tx_scheduled && // not being progressed
- !list_empty(&conn->ksnc_tx_queue)){//packets to send
- list_add_tail (&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
+ if (!conn->ksnc_tx_scheduled && // not being progressed
+ !cfs_list_empty(&conn->ksnc_tx_queue)){//packets to send
+ cfs_list_add_tail (&conn->ksnc_tx_list,
+ &sched->kss_tx_conns);
conn->ksnc_tx_scheduled = 1;
/* extra ref for scheduler */
ksocknal_conn_addref(conn);
route->ksnr_timeout = cfs_time_add(cfs_time_current(),
route->ksnr_retry_interval);
- if (!list_empty(&peer->ksnp_tx_queue) &&
+ if (!cfs_list_empty(&peer->ksnp_tx_queue) &&
peer->ksnp_accepting == 0 &&
ksocknal_find_connecting_route_locked(peer) == NULL) {
ksock_conn_t *conn;
/* ksnp_tx_queue is queued on a conn on successful
* connection for V1.x and V2.x */
- if (!list_empty (&peer->ksnp_conns)) {
- conn = list_entry(peer->ksnp_conns.next, ksock_conn_t, ksnc_list);
+ if (!cfs_list_empty (&peer->ksnp_conns)) {
+ conn = cfs_list_entry(peer->ksnp_conns.next,
+ ksock_conn_t, ksnc_list);
LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
}
/* take all the blocked packets while I've got the lock and
* complete below... */
- list_splice_init(&peer->ksnp_tx_queue, &zombies);
+ cfs_list_splice_init(&peer->ksnp_tx_queue, &zombies);
}
#if 0 /* irrelevent with only eager routes */
if (!route->ksnr_deleted) {
/* make this route least-favourite for re-selection */
- list_del(&route->ksnr_list);
- list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
+ cfs_list_del(&route->ksnr_list);
+ cfs_list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
}
#endif
cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
now = cfs_time_current();
/* connd_routes can contain both pending and ordinary routes */
- list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
- ksnr_connd_list) {
+ cfs_list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
+ ksnr_connd_list) {
if (route->ksnr_retry_interval == 0 ||
cfs_time_aftereq(now, route->ksnr_timeout))
dropped_lock = 0;
- if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
+ if (!cfs_list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
/* Connection accepted by the listener */
- cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
- ksock_connreq_t, ksncr_list);
+ cr = cfs_list_entry(ksocknal_data.ksnd_connd_connreqs. \
+ next, ksock_connreq_t, ksncr_list);
- list_del(&cr->ksncr_list);
+ cfs_list_del(&cr->ksncr_list);
cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
dropped_lock = 1;
route = ksocknal_connd_get_route_locked(&timeout);
if (route != NULL) {
- list_del (&route->ksnr_connd_list);
+ cfs_list_del (&route->ksnr_connd_list);
ksocknal_data.ksnd_connd_connecting++;
cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
dropped_lock = 1;
/* Nothing to do for 'timeout' */
cfs_set_current_state (CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add_exclusive (&ksocknal_data.ksnd_connd_waitq, &wait);
+ cfs_waitq_add_exclusive (&ksocknal_data.ksnd_connd_waitq,
+ &wait);
cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
cfs_waitq_timedwait (&wait, CFS_TASK_INTERRUPTIBLE, timeout);
{
/* We're called with a shared lock on ksnd_global_lock */
ksock_conn_t *conn;
- struct list_head *ctmp;
+ cfs_list_t *ctmp;
- list_for_each (ctmp, &peer->ksnp_conns) {
+ cfs_list_for_each (ctmp, &peer->ksnp_conns) {
int error;
- conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
+ conn = cfs_list_entry (ctmp, ksock_conn_t, ksnc_list);
/* Don't need the {get,put}connsock dance to deref ksnc_sock */
LASSERT (!conn->ksnc_closing);
return (conn);
}
- if ((!list_empty(&conn->ksnc_tx_queue) ||
+ if ((!cfs_list_empty(&conn->ksnc_tx_queue) ||
libcfs_sock_wmem_queued(conn->ksnc_sock) != 0) &&
cfs_time_aftereq(cfs_time_current(),
conn->ksnc_tx_deadline)) {
{
ksock_tx_t *tx;
CFS_LIST_HEAD (stale_txs);
-
+
cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
- while (!list_empty (&peer->ksnp_tx_queue)) {
- tx = list_entry (peer->ksnp_tx_queue.next,
- ksock_tx_t, tx_list);
+ while (!cfs_list_empty (&peer->ksnp_tx_queue)) {
+ tx = cfs_list_entry (peer->ksnp_tx_queue.next,
+ ksock_tx_t, tx_list);
if (!cfs_time_aftereq(cfs_time_current(),
tx->tx_deadline))
break;
-
- list_del (&tx->tx_list);
- list_add_tail (&tx->tx_list, &stale_txs);
+
+ cfs_list_del (&tx->tx_list);
+ cfs_list_add_tail (&tx->tx_list, &stale_txs);
}
cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
ksock_conn_t *conn;
ksock_tx_t *tx;
- if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
+ if (cfs_list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
return 0;
if (peer->ksnp_proto != &ksocknal_protocol_v3x)
if (conn != NULL) {
sched = conn->ksnc_scheduler;
- spin_lock_bh (&sched->kss_lock);
- if (!list_empty(&conn->ksnc_tx_queue)) {
- spin_unlock_bh(&sched->kss_lock);
+ cfs_spin_lock_bh (&sched->kss_lock);
+ if (!cfs_list_empty(&conn->ksnc_tx_queue)) {
+ cfs_spin_unlock_bh(&sched->kss_lock);
/* there is an queued ACK, don't need keepalive */
return 0;
}
- spin_unlock_bh(&sched->kss_lock);
+ cfs_spin_unlock_bh(&sched->kss_lock);
}
- read_unlock(&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock(&ksocknal_data.ksnd_global_lock);
/* cookie = 1 is reserved for keepalive PING */
tx = ksocknal_alloc_tx_noop(1, 1);
if (tx == NULL) {
- read_lock(&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock(&ksocknal_data.ksnd_global_lock);
return -ENOMEM;
}
if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) {
- read_lock(&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock(&ksocknal_data.ksnd_global_lock);
return 1;
}
ksocknal_free_tx(tx);
- read_lock(&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock(&ksocknal_data.ksnd_global_lock);
return -EIO;
}
void
ksocknal_check_peer_timeouts (int idx)
{
- struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
+ cfs_list_t *peers = &ksocknal_data.ksnd_peers[idx];
ksock_peer_t *peer;
ksock_conn_t *conn;
cfs_list_for_each_entry_typed(peer, peers, ksock_peer_t, ksnp_list) {
if (ksocknal_send_keepalive_locked(peer) != 0) {
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
goto again;
}
/* we can't process stale txs right here because we're
* holding only shared lock */
- if (!list_empty (&peer->ksnp_tx_queue)) {
- ksock_tx_t *tx = list_entry (peer->ksnp_tx_queue.next,
- ksock_tx_t, tx_list);
+ if (!cfs_list_empty (&peer->ksnp_tx_queue)) {
+ ksock_tx_t *tx =
+ cfs_list_entry (peer->ksnp_tx_queue.next,
+ ksock_tx_t, tx_list);
if (cfs_time_aftereq(cfs_time_current(),
tx->tx_deadline)) {
}
if (n != 0) {
- tx = list_entry (peer->ksnp_zc_req_list.next,
- ksock_tx_t, tx_zc_list);
+ tx = cfs_list_entry (peer->ksnp_zc_req_list.next,
+ ksock_tx_t, tx_zc_list);
CWARN("Stale ZC_REQs for peer %s detected: %d; the "
"oldest (%p) timed out %ld secs ago\n",
libcfs_nid2str(peer->ksnp_id.nid), n, tx,
cfs_waitlink_t wait;
ksock_conn_t *conn;
ksock_sched_t *sched;
- struct list_head enomem_conns;
+ cfs_list_t enomem_conns;
int nenomem_conns;
cfs_duration_t timeout;
int i;
while (!ksocknal_data.ksnd_shuttingdown) {
- if (!list_empty (&ksocknal_data.ksnd_deathrow_conns)) {
- conn = list_entry (ksocknal_data.ksnd_deathrow_conns.next,
- ksock_conn_t, ksnc_list);
- list_del (&conn->ksnc_list);
+ if (!cfs_list_empty (&ksocknal_data.ksnd_deathrow_conns)) {
+ conn = cfs_list_entry (ksocknal_data. \
+ ksnd_deathrow_conns.next,
+ ksock_conn_t, ksnc_list);
+ cfs_list_del (&conn->ksnc_list);
cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
continue;
}
- if (!list_empty (&ksocknal_data.ksnd_zombie_conns)) {
- conn = list_entry (ksocknal_data.ksnd_zombie_conns.next,
- ksock_conn_t, ksnc_list);
- list_del (&conn->ksnc_list);
+ if (!cfs_list_empty (&ksocknal_data.ksnd_zombie_conns)) {
+ conn = cfs_list_entry (ksocknal_data.ksnd_zombie_conns.\
+ next, ksock_conn_t, ksnc_list);
+ cfs_list_del (&conn->ksnc_list);
cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
continue;
}
- if (!list_empty (&ksocknal_data.ksnd_enomem_conns)) {
- list_add(&enomem_conns, &ksocknal_data.ksnd_enomem_conns);
- list_del_init(&ksocknal_data.ksnd_enomem_conns);
+ if (!cfs_list_empty (&ksocknal_data.ksnd_enomem_conns)) {
+ cfs_list_add(&enomem_conns,
+ &ksocknal_data.ksnd_enomem_conns);
+ cfs_list_del_init(&ksocknal_data.ksnd_enomem_conns);
}
cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
/* reschedule all the connections that stalled with ENOMEM... */
nenomem_conns = 0;
- while (!list_empty (&enomem_conns)) {
- conn = list_entry (enomem_conns.next,
- ksock_conn_t, ksnc_tx_list);
- list_del (&conn->ksnc_tx_list);
+ while (!cfs_list_empty (&enomem_conns)) {
+ conn = cfs_list_entry (enomem_conns.next,
+ ksock_conn_t, ksnc_tx_list);
+ cfs_list_del (&conn->ksnc_tx_list);
sched = conn->ksnc_scheduler;
LASSERT (conn->ksnc_tx_scheduled);
conn->ksnc_tx_ready = 1;
- list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
+ cfs_list_add_tail(&conn->ksnc_tx_list,
+ &sched->kss_tx_conns);
cfs_waitq_signal (&sched->kss_waitq);
cfs_spin_unlock_bh (&sched->kss_lock);
cfs_waitq_add (&ksocknal_data.ksnd_reaper_waitq, &wait);
if (!ksocknal_data.ksnd_shuttingdown &&
- list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
- list_empty (&ksocknal_data.ksnd_zombie_conns))
- cfs_waitq_timedwait (&wait, CFS_TASK_INTERRUPTIBLE, timeout);
+ cfs_list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
+ cfs_list_empty (&ksocknal_data.ksnd_zombie_conns))
+ cfs_waitq_timedwait (&wait, CFS_TASK_INTERRUPTIBLE,
+ timeout);
cfs_set_current_state (CFS_TASK_RUNNING);
cfs_waitq_del (&ksocknal_data.ksnd_reaper_waitq, &wait);
nob += scratchiov[i].iov_len;
}
- if (!list_empty(&conn->ksnc_tx_queue) ||
+ if (!cfs_list_empty(&conn->ksnc_tx_queue) ||
nob < tx->tx_resid)
msg.msg_flags |= MSG_MORE;
CDEBUG(D_NET, "page %p + offset %x for %d\n",
page, offset, kiov->kiov_len);
- if (!list_empty(&conn->ksnc_tx_queue) ||
+ if (!cfs_list_empty(&conn->ksnc_tx_queue) ||
fragsize < tx->tx_resid)
msgflg |= MSG_MORE;
nob += scratchiov[i].iov_len = kiov[i].kiov_len;
}
- if (!list_empty(&conn->ksnc_tx_queue) ||
+ if (!cfs_list_empty(&conn->ksnc_tx_queue) ||
nob < tx->tx_resid)
msg.msg_flags |= MSG_MORE;
" ready" : " blocked"),
(conn == NULL) ? "" : (conn->ksnc_tx_scheduled ?
" scheduled" : " idle"),
- (conn == NULL) ? "" : (list_empty (&conn->ksnc_tx_queue) ?
+ (conn == NULL) ? "" : (cfs_list_empty (&conn->ksnc_tx_queue) ?
" empty" : " queued"));
if (conn == NULL) { /* raced with ksocknal_terminate_conn */
sched = conn->ksnc_scheduler;
cfs_spin_lock_bh (&sched->kss_lock);
-
+
if (!SOCK_TEST_NOSPACE(conn->ksnc_sock) &&
!conn->ksnc_tx_ready) {
/* SOCK_NOSPACE is set when the socket fills
* after a timeout */
rc = -ENOMEM;
}
-
+
cfs_spin_unlock_bh (&sched->kss_lock);
return rc;
ksocknal_lib_csum_tx(tx);
nob = ks_query_iovs_length(tx->tx_iov, tx->tx_niov);
- flags = (!list_empty (&conn->ksnc_tx_queue) || nob < tx->tx_resid) ?
+ flags = (!cfs_list_empty (&conn->ksnc_tx_queue) || nob < tx->tx_resid) ?
(MSG_DONTWAIT | MSG_MORE) : MSG_DONTWAIT;
rc = ks_send_iovs(sock, tx->tx_iov, tx->tx_niov, flags, 0);
nkiov = tx->tx_nkiov;
nob = ks_query_kiovs_length(tx->tx_kiov, nkiov);
- flags = (!list_empty (&conn->ksnc_tx_queue) || nob < tx->tx_resid) ?
+ flags = (!cfs_list_empty (&conn->ksnc_tx_queue) || nob < tx->tx_resid) ?
(MSG_DONTWAIT | MSG_MORE) : MSG_DONTWAIT;
rc = ks_send_kiovs(sock, tx->tx_kiov, nkiov, flags, 0);
ks_get_tconn(tconn);
- spin_lock(&tconn->kstc_lock);
+ cfs_spin_lock(&tconn->kstc_lock);
if (tconn->kstc_type == kstt_sender) {
nagle = tconn->sender.kstc_info.nagle;
tconn->sender.kstc_info.nagle = 0;
tconn->child.kstc_info.nagle = 0;
}
- spin_unlock(&tconn->kstc_lock);
+ cfs_spin_unlock(&tconn->kstc_lock);
val = 1;
rc = ks_set_tcp_option(
);
LASSERT (rc == 0);
- spin_lock(&tconn->kstc_lock);
+ cfs_spin_lock(&tconn->kstc_lock);
if (tconn->kstc_type == kstt_sender) {
tconn->sender.kstc_info.nagle = nagle;
LASSERT(tconn->kstc_type == kstt_child);
tconn->child.kstc_info.nagle = nagle;
}
- spin_unlock(&tconn->kstc_lock);
+ cfs_spin_unlock(&tconn->kstc_lock);
ks_put_tconn(tconn);
}
{
ksock_conn_t * conn = (ksock_conn_t *) sock->kstc_conn;
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
if (mode) {
ksocknal_write_callback(conn);
} else {
ksocknal_read_callback(conn);
}
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
}
void
ksocknal_tx_fini_callback(ksock_conn_t * conn, ksock_tx_t * tx)
{
/* remove tx/conn from conn's outgoing queue */
- spin_lock_bh (&conn->ksnc_scheduler->kss_lock);
- list_del(&tx->tx_list);
- if (list_empty(&conn->ksnc_tx_queue)) {
- list_del (&conn->ksnc_tx_list);
+ cfs_spin_lock_bh (&conn->ksnc_scheduler->kss_lock);
+ cfs_list_del(&tx->tx_list);
+ if (cfs_list_empty(&conn->ksnc_tx_queue)) {
+ cfs_list_del (&conn->ksnc_tx_list);
}
- spin_unlock_bh (&conn->ksnc_scheduler->kss_lock);
+ cfs_spin_unlock_bh (&conn->ksnc_scheduler->kss_lock);
/* complete send; tx -ref */
ksocknal_tx_decref (tx);
static inline int
ksocknal_nsched(void)
{
- return num_online_cpus();
+ return cfs_num_online_cpus();
}
static inline int
ksocknal_queue_tx_msg_v1(ksock_conn_t *conn, ksock_tx_t *tx_msg)
{
/* V1.x, just enqueue it */
- list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
+ cfs_list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
return NULL;
}
ksock_tx_t *tx = conn->ksnc_tx_carrier;
/* Called holding BH lock: conn->ksnc_scheduler->kss_lock */
- LASSERT (!list_empty(&conn->ksnc_tx_queue));
+ LASSERT (!cfs_list_empty(&conn->ksnc_tx_queue));
LASSERT (tx != NULL);
/* Next TX that can carry ZC-ACK or LNet message */
/* no more packets queued */
conn->ksnc_tx_carrier = NULL;
} else {
- conn->ksnc_tx_carrier = list_entry(tx->tx_list.next, ksock_tx_t, tx_list);
+ conn->ksnc_tx_carrier = cfs_list_entry(tx->tx_list.next,
+ ksock_tx_t, tx_list);
LASSERT (conn->ksnc_tx_carrier->tx_msg.ksm_type == tx->tx_msg.ksm_type);
}
}
*/
if (tx == NULL) {
if (tx_ack != NULL) {
- list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
+ cfs_list_add_tail(&tx_ack->tx_list,
+ &conn->ksnc_tx_queue);
conn->ksnc_tx_carrier = tx_ack;
}
return 0;
if (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP) {
/* tx is noop zc-ack, can't piggyback zc-ack cookie */
if (tx_ack != NULL)
- list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
+ cfs_list_add_tail(&tx_ack->tx_list,
+ &conn->ksnc_tx_queue);
return 0;
}
* and replace the NOOP tx, and return the NOOP tx.
*/
if (tx == NULL) { /* nothing on queue */
- list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
+ cfs_list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
conn->ksnc_tx_carrier = tx_msg;
return NULL;
}
if (tx->tx_msg.ksm_type == KSOCK_MSG_LNET) { /* nothing to carry */
- list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
+ cfs_list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
return NULL;
}
ksocknal_next_tx_carrier(conn);
/* use new_tx to replace the noop zc-ack packet */
- list_add(&tx_msg->tx_list, &tx->tx_list);
- list_del(&tx->tx_list);
+ cfs_list_add(&tx_msg->tx_list, &tx->tx_list);
+ cfs_list_del(&tx->tx_list);
return tx;
}
if ((tx = conn->ksnc_tx_carrier) == NULL) {
if (tx_ack != NULL) {
- list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
+ cfs_list_add_tail(&tx_ack->tx_list,
+ &conn->ksnc_tx_queue);
conn->ksnc_tx_carrier = tx_ack;
}
return 0;
/* failed to piggyback ZC-ACK */
if (tx_ack != NULL) {
- list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
+ cfs_list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
/* the next tx can piggyback at least 1 ACK */
ksocknal_next_tx_carrier(conn);
}
cfs_spin_unlock_bh (&sched->kss_lock);
if (rc) { /* piggybacked */
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
return 0;
}
}
cfs_spin_lock(&peer->ksnp_lock);
- list_for_each_entry_safe(tx, tmp,
- &peer->ksnp_zc_req_list, tx_zc_list) {
+ cfs_list_for_each_entry_safe(tx, tmp,
+ &peer->ksnp_zc_req_list, tx_zc_list) {
__u64 c = tx->tx_msg.ksm_zc_cookies[0];
if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) {
tx->tx_msg.ksm_zc_cookies[0] = 0;
- list_del(&tx->tx_zc_list);
- list_add(&tx->tx_zc_list, &zlist);
+ cfs_list_del(&tx->tx_zc_list);
+ cfs_list_add(&tx->tx_zc_list, &zlist);
if (--count == 0)
break;
cfs_spin_unlock(&peer->ksnp_lock);
- while (!list_empty(&zlist)) {
- tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
- list_del(&tx->tx_zc_list);
+ while (!cfs_list_empty(&zlist)) {
+ tx = cfs_list_entry(zlist.next, ksock_tx_t, tx_zc_list);
+ cfs_list_del(&tx->tx_zc_list);
ksocknal_tx_decref(tx);
}
int pta_shutdown;
cfs_socket_t *pta_sock;
#ifdef __KERNEL__
- struct semaphore pta_signal;
+ cfs_completion_t pta_signal;
#else
- struct cfs_completion pta_signal;
+ cfs_mt_completion_t pta_signal;
#endif
} lnet_acceptor_state;
#ifdef __KERNEL__
-#define cfs_init_completion(c) init_mutex_locked(c)
-#define cfs_wait_for_completion(c) mutex_down(c)
-#define cfs_complete(c) mutex_up(c)
-#define cfs_fini_completion(c) do { } while (0)
-#define cfs_create_thread(func, a) cfs_kernel_thread(func, a, 0)
+#define cfs_mt_init_completion(c) cfs_init_completion(c)
+#define cfs_mt_wait_for_completion(c) cfs_wait_for_completion(c)
+#define cfs_mt_complete(c) cfs_complete(c)
+#define cfs_mt_fini_completion(c) cfs_fini_completion(c)
+#define cfs_create_thread(func, a) cfs_kernel_thread(func, a, 0)
EXPORT_SYMBOL(lnet_acceptor_port);
/* set init status and unblock parent */
lnet_acceptor_state.pta_shutdown = rc;
- cfs_complete(&lnet_acceptor_state.pta_signal);
+ cfs_mt_complete(&lnet_acceptor_state.pta_signal);
if (rc != 0)
return rc;
LCONSOLE(0, "Acceptor stopping\n");
/* unblock lnet_acceptor_stop() */
- cfs_complete(&lnet_acceptor_state.pta_signal);
+ cfs_mt_complete(&lnet_acceptor_state.pta_signal);
return 0;
}
if ((the_lnet.ln_pid & LNET_PID_USERFLAG) != 0)
return 0;
#endif
- cfs_init_completion(&lnet_acceptor_state.pta_signal);
+ cfs_mt_init_completion(&lnet_acceptor_state.pta_signal);
rc = accept2secure(accept_type, &secure);
if (rc <= 0) {
- cfs_fini_completion(&lnet_acceptor_state.pta_signal);
+ cfs_mt_fini_completion(&lnet_acceptor_state.pta_signal);
return rc;
}
rc2 = cfs_create_thread(lnet_acceptor, (void *)(ulong_ptr_t)secure);
if (rc2 < 0) {
CERROR("Can't start acceptor thread: %d\n", rc);
- cfs_fini_completion(&lnet_acceptor_state.pta_signal);
+ cfs_mt_fini_completion(&lnet_acceptor_state.pta_signal);
+
return -ESRCH;
}
/* wait for acceptor to startup */
- cfs_wait_for_completion(&lnet_acceptor_state.pta_signal);
+ cfs_mt_wait_for_completion(&lnet_acceptor_state.pta_signal);
if (!lnet_acceptor_state.pta_shutdown) {
/* started OK */
}
LASSERT (lnet_acceptor_state.pta_sock == NULL);
- cfs_fini_completion(&lnet_acceptor_state.pta_signal);
+ cfs_mt_fini_completion(&lnet_acceptor_state.pta_signal);
+
return -ENETDOWN;
}
libcfs_sock_abort_accept(lnet_acceptor_state.pta_sock);
/* block until acceptor signals exit */
- cfs_wait_for_completion(&lnet_acceptor_state.pta_signal);
+ cfs_mt_wait_for_completion(&lnet_acceptor_state.pta_signal);
- cfs_fini_completion(&lnet_acceptor_state.pta_signal);
+ cfs_mt_fini_completion(&lnet_acceptor_state.pta_signal);
}
#else /* single-threaded user-space */
void
lnet_init_locks(void)
{
- spin_lock_init (&the_lnet.ln_lock);
+ cfs_spin_lock_init (&the_lnet.ln_lock);
cfs_waitq_init (&the_lnet.ln_waitq);
- init_mutex(&the_lnet.ln_lnd_mutex);
- init_mutex(&the_lnet.ln_api_mutex);
+ cfs_init_mutex(&the_lnet.ln_lnd_mutex);
+ cfs_init_mutex(&the_lnet.ln_api_mutex);
}
void
int len;
int nob;
int rc;
- struct list_head *tmp;
+ cfs_list_t *tmp;
#ifdef NOT_YET
if (networks != NULL && ip2nets != NULL) {
*str = 0;
sep = "";
- list_for_each (tmp, &the_lnet.ln_lnds) {
- lnd_t *lnd = list_entry(tmp, lnd_t, lnd_list);
+ cfs_list_for_each (tmp, &the_lnet.ln_lnds) {
+ lnd_t *lnd = cfs_list_entry(tmp, lnd_t, lnd_list);
nob = snprintf(str, len, "%s%s", sep,
libcfs_lnd2str(lnd->lnd_type));
lnet_find_lnd_by_type (int type)
{
lnd_t *lnd;
- struct list_head *tmp;
+ cfs_list_t *tmp;
/* holding lnd mutex */
- list_for_each (tmp, &the_lnet.ln_lnds) {
- lnd = list_entry(tmp, lnd_t, lnd_list);
+ cfs_list_for_each (tmp, &the_lnet.ln_lnds) {
+ lnd = cfs_list_entry(tmp, lnd_t, lnd_list);
if ((int)lnd->lnd_type == type)
return lnd;
LASSERT (libcfs_isknown_lnd(lnd->lnd_type));
LASSERT (lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
- list_add_tail (&lnd->lnd_list, &the_lnet.ln_lnds);
+ cfs_list_add_tail (&lnd->lnd_list, &the_lnet.ln_lnds);
lnd->lnd_refcount = 0;
CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
LASSERT (lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
LASSERT (lnd->lnd_refcount == 0);
- list_del (&lnd->lnd_list);
+ cfs_list_del (&lnd->lnd_list);
CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
LNET_MUTEX_UP(&the_lnet.ln_lnd_mutex);
do
{
memset (space, 0, size);
- list_add ((struct list_head *)space, &fl->fl_list);
+ cfs_list_add ((cfs_list_t *)space, &fl->fl_list);
space += size;
} while (--n != 0);
void
lnet_freelist_fini (lnet_freelist_t *fl)
{
- struct list_head *el;
+ cfs_list_t *el;
int count;
if (fl->fl_nobjs == 0)
int rc = gettimeofday (&tv, NULL);
LASSERT (rc == 0);
#else
- do_gettimeofday(&tv);
+ cfs_gettimeofday(&tv);
#endif
cookie = tv.tv_sec;
cookie *= 1000000;
/* Arbitrary choice of hash table size */
#ifdef __KERNEL__
- the_lnet.ln_lh_hash_size = CFS_PAGE_SIZE / sizeof (struct list_head);
+ the_lnet.ln_lh_hash_size =
+ CFS_PAGE_SIZE / sizeof (cfs_list_t);
#else
the_lnet.ln_lh_hash_size = (MAX_MES + MAX_MDS + MAX_EQS)/4;
#endif
LIBCFS_ALLOC(the_lnet.ln_lh_hash_table,
- the_lnet.ln_lh_hash_size * sizeof (struct list_head));
+ the_lnet.ln_lh_hash_size * sizeof (cfs_list_t));
if (the_lnet.ln_lh_hash_table == NULL)
return (-ENOMEM);
return;
LIBCFS_FREE(the_lnet.ln_lh_hash_table,
- the_lnet.ln_lh_hash_size * sizeof (struct list_head));
+ the_lnet.ln_lh_hash_size * sizeof (cfs_list_t));
}
lnet_libhandle_t *
lnet_lookup_cookie (__u64 cookie, int type)
{
/* ALWAYS called with LNET_LOCK held */
- struct list_head *list;
- struct list_head *el;
+ cfs_list_t *list;
+ cfs_list_t *el;
unsigned int hash;
if ((cookie & (LNET_COOKIE_TYPES - 1)) != type)
hash = ((unsigned int)cookie) % the_lnet.ln_lh_hash_size;
list = &the_lnet.ln_lh_hash_table[hash];
- list_for_each (el, list) {
- lnet_libhandle_t *lh = list_entry (el, lnet_libhandle_t,
- lh_hash_chain);
+ cfs_list_for_each (el, list) {
+ lnet_libhandle_t *lh = cfs_list_entry (el, lnet_libhandle_t,
+ lh_hash_chain);
if (lh->lh_cookie == cookie)
return (lh);
the_lnet.ln_next_object_cookie += LNET_COOKIE_TYPES;
hash = ((unsigned int)lh->lh_cookie) % the_lnet.ln_lh_hash_size;
- list_add (&lh->lh_hash_chain, &the_lnet.ln_lh_hash_table[hash]);
+ cfs_list_add (&lh->lh_hash_chain, &the_lnet.ln_lh_hash_table[hash]);
}
void
lnet_invalidate_handle (lnet_libhandle_t *lh)
{
/* ALWAYS called with LNET_LOCK held */
- list_del (&lh->lh_hash_chain);
+ cfs_list_del (&lh->lh_hash_chain);
}
int
#ifdef __KERNEL__
int i;
- the_lnet.ln_nfinalizers = (int) num_online_cpus();
+ the_lnet.ln_nfinalizers = (int) cfs_num_online_cpus();
LIBCFS_ALLOC(the_lnet.ln_finalizers,
the_lnet.ln_nfinalizers *
#else
LASSERT (!the_lnet.ln_finalizing);
#endif
- LASSERT (list_empty(&the_lnet.ln_finalizeq));
+ LASSERT (cfs_list_empty(&the_lnet.ln_finalizeq));
}
#ifndef __KERNEL__
lnet_fail_nid(LNET_NID_ANY, 0);
- LASSERT (list_empty(&the_lnet.ln_test_peers));
+ LASSERT (cfs_list_empty(&the_lnet.ln_test_peers));
LASSERT (the_lnet.ln_refcount == 0);
- LASSERT (list_empty(&the_lnet.ln_nis));
- LASSERT (list_empty(&the_lnet.ln_zombie_nis));
+ LASSERT (cfs_list_empty(&the_lnet.ln_nis));
+ LASSERT (cfs_list_empty(&the_lnet.ln_zombie_nis));
LASSERT (the_lnet.ln_nzombie_nis == 0);
for (idx = 0; idx < the_lnet.ln_nportals; idx++) {
- LASSERT (list_empty(&the_lnet.ln_portals[idx].ptl_msgq));
+ LASSERT (cfs_list_empty(&the_lnet.ln_portals[idx].ptl_msgq));
- while (!list_empty (&the_lnet.ln_portals[idx].ptl_ml)) {
- lnet_me_t *me = list_entry (the_lnet.ln_portals[idx].ptl_ml.next,
- lnet_me_t, me_list);
+ while (!cfs_list_empty (&the_lnet.ln_portals[idx].ptl_ml)) {
+ lnet_me_t *me = cfs_list_entry (the_lnet.ln_portals[idx].ptl_ml.next,
+ lnet_me_t, me_list);
CERROR ("Active me %p on exit\n", me);
- list_del (&me->me_list);
+ cfs_list_del (&me->me_list);
lnet_me_free (me);
}
}
- while (!list_empty (&the_lnet.ln_active_mds)) {
- lnet_libmd_t *md = list_entry (the_lnet.ln_active_mds.next,
- lnet_libmd_t, md_list);
+ while (!cfs_list_empty (&the_lnet.ln_active_mds)) {
+ lnet_libmd_t *md = cfs_list_entry (the_lnet.ln_active_mds.next,
+ lnet_libmd_t, md_list);
CERROR ("Active md %p on exit\n", md);
- list_del_init (&md->md_list);
+ cfs_list_del_init (&md->md_list);
lnet_md_free (md);
}
- while (!list_empty (&the_lnet.ln_active_eqs)) {
- lnet_eq_t *eq = list_entry (the_lnet.ln_active_eqs.next,
- lnet_eq_t, eq_list);
+ while (!cfs_list_empty (&the_lnet.ln_active_eqs)) {
+ lnet_eq_t *eq = cfs_list_entry (the_lnet.ln_active_eqs.next,
+ lnet_eq_t, eq_list);
CERROR ("Active eq %p on exit\n", eq);
- list_del (&eq->eq_list);
+ cfs_list_del (&eq->eq_list);
lnet_eq_free (eq);
}
- while (!list_empty (&the_lnet.ln_active_msgs)) {
- lnet_msg_t *msg = list_entry (the_lnet.ln_active_msgs.next,
- lnet_msg_t, msg_activelist);
+ while (!cfs_list_empty (&the_lnet.ln_active_msgs)) {
+ lnet_msg_t *msg = cfs_list_entry (the_lnet.ln_active_msgs.next,
+ lnet_msg_t, msg_activelist);
CERROR ("Active msg %p on exit\n", msg);
LASSERT (msg->msg_onactivelist);
msg->msg_onactivelist = 0;
- list_del (&msg->msg_activelist);
+ cfs_list_del (&msg->msg_activelist);
lnet_msg_free (msg);
}
lnet_ni_t *
lnet_net2ni_locked (__u32 net)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
lnet_ni_t *ni;
- list_for_each (tmp, &the_lnet.ln_nis) {
- ni = list_entry(tmp, lnet_ni_t, ni_list);
+ cfs_list_for_each (tmp, &the_lnet.ln_nis) {
+ ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
if (LNET_NIDNET(ni->ni_nid) == net) {
lnet_ni_addref_locked(ni);
lnet_ni_t *
lnet_nid2ni_locked (lnet_nid_t nid)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
lnet_ni_t *ni;
- list_for_each (tmp, &the_lnet.ln_nis) {
- ni = list_entry(tmp, lnet_ni_t, ni_list);
+ cfs_list_for_each (tmp, &the_lnet.ln_nis) {
+ ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
if (ni->ni_nid == nid) {
lnet_ni_addref_locked(ni);
lnet_count_acceptor_nis (void)
{
/* Return the # of NIs that need the acceptor. */
- int count = 0;
+ int count = 0;
#if defined(__KERNEL__) || defined(HAVE_LIBPTHREAD)
- struct list_head *tmp;
- lnet_ni_t *ni;
+ cfs_list_t *tmp;
+ lnet_ni_t *ni;
LNET_LOCK();
- list_for_each (tmp, &the_lnet.ln_nis) {
- ni = list_entry(tmp, lnet_ni_t, ni_list);
+ cfs_list_for_each (tmp, &the_lnet.ln_nis) {
+ ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
if (ni->ni_lnd->lnd_accept != NULL)
count++;
/* All quiet on the API front */
LASSERT (!the_lnet.ln_shutdown);
LASSERT (the_lnet.ln_refcount == 0);
- LASSERT (list_empty(&the_lnet.ln_zombie_nis));
+ LASSERT (cfs_list_empty(&the_lnet.ln_zombie_nis));
LASSERT (the_lnet.ln_nzombie_nis == 0);
- LASSERT (list_empty(&the_lnet.ln_remote_nets));
+ LASSERT (cfs_list_empty(&the_lnet.ln_remote_nets));
LNET_LOCK();
the_lnet.ln_shutdown = 1; /* flag shutdown */
/* Unlink NIs from the global table */
- while (!list_empty(&the_lnet.ln_nis)) {
- ni = list_entry(the_lnet.ln_nis.next,
- lnet_ni_t, ni_list);
- list_del (&ni->ni_list);
+ while (!cfs_list_empty(&the_lnet.ln_nis)) {
+ ni = cfs_list_entry(the_lnet.ln_nis.next,
+ lnet_ni_t, ni_list);
+ cfs_list_del (&ni->ni_list);
the_lnet.ln_nzombie_nis++;
lnet_ni_decref_locked(ni); /* drop apini's ref */
i = 2;
while (the_lnet.ln_nzombie_nis != 0) {
- while (list_empty(&the_lnet.ln_zombie_nis)) {
+ while (cfs_list_empty(&the_lnet.ln_zombie_nis)) {
LNET_UNLOCK();
++i;
if ((i & (-i)) == i)
LNET_LOCK();
}
- ni = list_entry(the_lnet.ln_zombie_nis.next,
- lnet_ni_t, ni_list);
- list_del(&ni->ni_list);
+ ni = cfs_list_entry(the_lnet.ln_zombie_nis.next,
+ lnet_ni_t, ni_list);
+ cfs_list_del(&ni->ni_list);
ni->ni_lnd->lnd_refcount--;
LNET_UNLOCK();
islo = ni->ni_lnd->lnd_type == LOLND;
- LASSERT (!in_interrupt ());
+ LASSERT (!cfs_in_interrupt ());
(ni->ni_lnd->lnd_shutdown)(ni);
/* can't deref lnd anymore now; it might have unregistered
{
lnd_t *lnd;
lnet_ni_t *ni;
- struct list_head nilist;
+ cfs_list_t nilist;
int rc = 0;
int lnd_type;
int nicount = 0;
if (rc != 0)
goto failed;
- while (!list_empty(&nilist)) {
- ni = list_entry(nilist.next, lnet_ni_t, ni_list);
+ while (!cfs_list_empty(&nilist)) {
+ ni = cfs_list_entry(nilist.next, lnet_ni_t, ni_list);
lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
LASSERT (libcfs_isknown_lnd(lnd_type));
#ifdef __KERNEL__
if (lnd == NULL) {
LNET_MUTEX_UP(&the_lnet.ln_lnd_mutex);
- rc = request_module("%s", libcfs_lnd2modname(lnd_type));
+ rc = cfs_request_module("%s",
+ libcfs_lnd2modname(lnd_type));
LNET_MUTEX_DOWN(&the_lnet.ln_lnd_mutex);
lnd = lnet_find_lnd_by_type(lnd_type);
LASSERT (ni->ni_peertimeout <= 0 || lnd->lnd_query != NULL);
- list_del(&ni->ni_list);
+ cfs_list_del(&ni->ni_list);
LNET_LOCK();
- list_add_tail(&ni->ni_list, &the_lnet.ln_nis);
+ cfs_list_add_tail(&ni->ni_list, &the_lnet.ln_nis);
LNET_UNLOCK();
if (lnd->lnd_type == LOLND) {
failed:
lnet_shutdown_lndnis();
- while (!list_empty(&nilist)) {
- ni = list_entry(nilist.next, lnet_ni_t, ni_list);
- list_del(&ni->ni_list);
+ while (!cfs_list_empty(&nilist)) {
+ ni = cfs_list_entry(nilist.next, lnet_ni_t, ni_list);
+ cfs_list_del(&ni->ni_list);
LIBCFS_FREE(ni, sizeof(*ni));
}
LASSERT (the_lnet.ln_init);
LASSERT (the_lnet.ln_refcount == 0);
- while (!list_empty(&the_lnet.ln_lnds))
- lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
- lnd_t, lnd_list));
+ while (!cfs_list_empty(&the_lnet.ln_lnds))
+ lnet_unregister_lnd(cfs_list_entry(the_lnet.ln_lnds.next,
+ lnd_t, lnd_list));
lnet_fini_locks();
the_lnet.ln_init = 0;
LNetGetId(unsigned int index, lnet_process_id_t *id)
{
lnet_ni_t *ni;
- struct list_head *tmp;
+ cfs_list_t *tmp;
int rc = -ENOENT;
LASSERT (the_lnet.ln_init);
LNET_LOCK();
- list_for_each(tmp, &the_lnet.ln_nis) {
+ cfs_list_for_each(tmp, &the_lnet.ln_nis) {
if (index-- != 0)
continue;
- ni = list_entry(tmp, lnet_ni_t, ni_list);
+ ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
id->nid = ni->ni_nid;
id->pid = the_lnet.ln_pid;
LNET_LOCK();
- list_for_each_entry (ni, &the_lnet.ln_nis, ni_list) {
+ cfs_list_for_each_entry (ni, &the_lnet.ln_nis, ni_list) {
ni->ni_status = NULL;
}
tmpid.pid = info->pi_pid;
tmpid.nid = info->pi_ni[i].ns_nid;
#ifdef __KERNEL__
- if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
+ if (cfs_copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
goto out_1;
#else
ids[i] = tmpid;
#define DEBUG_SUBSYSTEM S_LNET
#include <lnet/lib-lnet.h>
-typedef struct { /* tmp struct for parsing routes */
- struct list_head ltb_list; /* stash on lists */
- int ltb_size; /* allocated size */
- char ltb_text[0]; /* text buffer */
+typedef struct { /* tmp struct for parsing routes */
+ cfs_list_t ltb_list; /* stash on lists */
+ int ltb_size; /* allocated size */
+ char ltb_text[0]; /* text buffer */
} lnet_text_buf_t;
static int lnet_tbnob = 0; /* track text buf allocation */
#define LNET_SINGLE_TEXTBUF_NOB (4<<10)
typedef struct {
- struct list_head lre_list; /* stash in a list */
+ cfs_list_t lre_list; /* stash in a list */
int lre_min; /* min value */
int lre_max; /* max value */
int lre_stride; /* stride */
static int lnet_re_alloc = 0; /* track expr allocation */
-void
+void
lnet_syntax(char *name, char *str, int offset, int width)
{
static char dots[LNET_SINGLE_TEXTBUF_NOB];
lnet_trimwhite(char *str)
{
char *end;
-
+
while (cfs_iswhite(*str))
str++;
-
+
end = str + strlen(str);
while (end > str) {
if (!cfs_iswhite(end[-1]))
}
int
-lnet_net_unique(__u32 net, struct list_head *nilist)
+lnet_net_unique(__u32 net, cfs_list_t *nilist)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
lnet_ni_t *ni;
- list_for_each (tmp, nilist) {
- ni = list_entry(tmp, lnet_ni_t, ni_list);
+ cfs_list_for_each (tmp, nilist) {
+ ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
if (LNET_NIDNET(ni->ni_nid) == net)
return 0;
}
lnet_ni_t *
-lnet_new_ni(__u32 net, struct list_head *nilist)
+lnet_new_ni(__u32 net, cfs_list_t *nilist)
{
lnet_ni_t *ni;
CFS_INIT_LIST_HEAD(&ni->ni_txq);
ni->ni_last_alive = cfs_time_current();
- list_add_tail(&ni->ni_list, nilist);
+ cfs_list_add_tail(&ni->ni_list, nilist);
return ni;
}
int
-lnet_parse_networks(struct list_head *nilist, char *networks)
+lnet_parse_networks(cfs_list_t *nilist, char *networks)
{
int tokensize = strlen(networks) + 1;
char *tokens;
}
}
- LASSERT (!list_empty(nilist));
+ LASSERT (!cfs_list_empty(nilist));
return 0;
failed:
- while (!list_empty(nilist)) {
- ni = list_entry(nilist->next, lnet_ni_t, ni_list);
+ while (!cfs_list_empty(nilist)) {
+ ni = cfs_list_entry(nilist->next, lnet_ni_t, ni_list);
- list_del(&ni->ni_list);
+ cfs_list_del(&ni->ni_list);
LIBCFS_FREE(ni, sizeof(*ni));
}
LIBCFS_FREE(tokens, tokensize);
}
void
-lnet_free_text_bufs(struct list_head *tbs)
+lnet_free_text_bufs(cfs_list_t *tbs)
{
lnet_text_buf_t *ltb;
-
- while (!list_empty(tbs)) {
- ltb = list_entry(tbs->next, lnet_text_buf_t, ltb_list);
-
- list_del(<b->ltb_list);
+
+ while (!cfs_list_empty(tbs)) {
+ ltb = cfs_list_entry(tbs->next, lnet_text_buf_t, ltb_list);
+
+ cfs_list_del(<b->ltb_list);
lnet_free_text_buf(ltb);
}
}
void
-lnet_print_text_bufs(struct list_head *tbs)
+lnet_print_text_bufs(cfs_list_t *tbs)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
lnet_text_buf_t *ltb;
- list_for_each (tmp, tbs) {
- ltb = list_entry(tmp, lnet_text_buf_t, ltb_list);
+ cfs_list_for_each (tmp, tbs) {
+ ltb = cfs_list_entry(tmp, lnet_text_buf_t, ltb_list);
CDEBUG(D_WARNING, "%s\n", ltb->ltb_text);
}
}
int
-lnet_str2tbs_sep (struct list_head *tbs, char *str)
+lnet_str2tbs_sep (cfs_list_t *tbs, char *str)
{
- struct list_head pending;
+ cfs_list_t pending;
char *sep;
int nob;
int i;
ltb->ltb_text[nob] = 0;
- list_add_tail(<b->ltb_list, &pending);
+ cfs_list_add_tail(<b->ltb_list, &pending);
}
if (*sep == '#') {
str = sep + 1;
}
- list_splice(&pending, tbs->prev);
+ cfs_list_splice(&pending, tbs->prev);
return 0;
}
int
-lnet_expand1tb (struct list_head *list,
+lnet_expand1tb (cfs_list_t *list,
char *str, char *sep1, char *sep2,
char *item, int itemlen)
{
ltb = lnet_new_text_buf(len1 + itemlen + len2);
if (ltb == NULL)
return -ENOMEM;
-
+
memcpy(ltb->ltb_text, str, len1);
memcpy(<b->ltb_text[len1], item, itemlen);
memcpy(<b->ltb_text[len1+itemlen], sep2 + 1, len2);
ltb->ltb_text[len1 + itemlen + len2] = 0;
-
- list_add_tail(<b->ltb_list, list);
+
+ cfs_list_add_tail(<b->ltb_list, list);
return 0;
}
int
-lnet_str2tbs_expand (struct list_head *tbs, char *str)
+lnet_str2tbs_expand (cfs_list_t *tbs, char *str)
{
char num[16];
- struct list_head pending;
+ cfs_list_t pending;
char *sep;
char *sep2;
char *parsed;
int scanned;
CFS_INIT_LIST_HEAD(&pending);
-
+
sep = strchr(str, '[');
if (sep == NULL) /* nothing to expand */
return 0;
}
}
- list_splice(&pending, tbs->prev);
+ cfs_list_splice(&pending, tbs->prev);
return 1;
failed:
{
int len = strlen(str);
int nob = len;
-
+
return (sscanf(str, "%u%n", hops, &nob) >= 1 &&
nob == len &&
*hops > 0 && *hops < 256);
/* static scratch buffer OK (single threaded) */
static char cmd[LNET_SINGLE_TEXTBUF_NOB];
- struct list_head nets;
- struct list_head gateways;
- struct list_head *tmp1;
- struct list_head *tmp2;
+ cfs_list_t nets;
+ cfs_list_t gateways;
+ cfs_list_t *tmp1;
+ cfs_list_t *tmp2;
__u32 net;
lnet_nid_t nid;
lnet_text_buf_t *ltb;
strcpy(ltb->ltb_text, token);
tmp1 = <b->ltb_list;
- list_add_tail(tmp1, tmp2);
+ cfs_list_add_tail(tmp1, tmp2);
while (tmp1 != tmp2) {
- ltb = list_entry(tmp1, lnet_text_buf_t, ltb_list);
+ ltb = cfs_list_entry(tmp1, lnet_text_buf_t, ltb_list);
rc = lnet_str2tbs_expand(tmp1->next, ltb->ltb_text);
if (rc < 0)
tmp1 = tmp1->next;
if (rc > 0) { /* expanded! */
- list_del(<b->ltb_list);
+ cfs_list_del(<b->ltb_list);
lnet_free_text_buf(ltb);
continue;
}
if (!got_hops)
hops = 1;
- LASSERT (!list_empty(&nets));
- LASSERT (!list_empty(&gateways));
+ LASSERT (!cfs_list_empty(&nets));
+ LASSERT (!cfs_list_empty(&gateways));
- list_for_each (tmp1, &nets) {
- ltb = list_entry(tmp1, lnet_text_buf_t, ltb_list);
+ cfs_list_for_each (tmp1, &nets) {
+ ltb = cfs_list_entry(tmp1, lnet_text_buf_t, ltb_list);
net = libcfs_str2net(ltb->ltb_text);
LASSERT (net != LNET_NIDNET(LNET_NID_ANY));
- list_for_each (tmp2, &gateways) {
- ltb = list_entry(tmp2, lnet_text_buf_t, ltb_list);
+ cfs_list_for_each (tmp2, &gateways) {
+ ltb = cfs_list_entry(tmp2, lnet_text_buf_t, ltb_list);
nid = libcfs_str2nid(ltb->ltb_text);
LASSERT (nid != LNET_NID_ANY);
}
int
-lnet_parse_route_tbs(struct list_head *tbs, int *im_a_router)
+lnet_parse_route_tbs(cfs_list_t *tbs, int *im_a_router)
{
lnet_text_buf_t *ltb;
- while (!list_empty(tbs)) {
- ltb = list_entry(tbs->next, lnet_text_buf_t, ltb_list);
+ while (!cfs_list_empty(tbs)) {
+ ltb = cfs_list_entry(tbs->next, lnet_text_buf_t, ltb_list);
if (lnet_parse_route(ltb->ltb_text, im_a_router) < 0) {
lnet_free_text_bufs(tbs);
return -EINVAL;
}
- list_del(<b->ltb_list);
+ cfs_list_del(<b->ltb_list);
lnet_free_text_buf(ltb);
}
int
lnet_parse_routes (char *routes, int *im_a_router)
{
- struct list_head tbs;
+ cfs_list_t tbs;
int rc = 0;
*im_a_router = 0;
}
void
-lnet_print_range_exprs(struct list_head *exprs)
+lnet_print_range_exprs(cfs_list_t *exprs)
{
- struct list_head *e;
+ cfs_list_t *e;
lnet_range_expr_t *lre;
-
- list_for_each(e, exprs) {
- lre = list_entry(exprs->next, lnet_range_expr_t, lre_list);
+
+ cfs_list_for_each(e, exprs) {
+ lre = cfs_list_entry(exprs->next, lnet_range_expr_t, lre_list);
CDEBUG(D_WARNING, "%d-%d/%d\n",
lre->lre_min, lre->lre_max, lre->lre_stride);
}
int
-lnet_new_range_expr(struct list_head *exprs, int min, int max, int stride)
+lnet_new_range_expr(cfs_list_t *exprs, int min, int max, int stride)
{
lnet_range_expr_t *lre;
lre->lre_max = max;
lre->lre_stride = stride;
- list_add(&lre->lre_list, exprs);
+ cfs_list_add(&lre->lre_list, exprs);
return 0;
}
void
-lnet_destroy_range_exprs(struct list_head *exprs)
+lnet_destroy_range_exprs(cfs_list_t *exprs)
{
lnet_range_expr_t *lre;
- while (!list_empty(exprs)) {
- lre = list_entry(exprs->next, lnet_range_expr_t, lre_list);
+ while (!cfs_list_empty(exprs)) {
+ lre = cfs_list_entry(exprs->next, lnet_range_expr_t, lre_list);
- list_del(&lre->lre_list);
+ cfs_list_del(&lre->lre_list);
LIBCFS_FREE(lre, sizeof(*lre));
lnet_re_alloc--;
}
}
int
-lnet_parse_range_expr(struct list_head *exprs, char *str)
+lnet_parse_range_expr(cfs_list_t *exprs, char *str)
{
int nob = strlen(str);
char *sep;
int
lnet_match_network_token(char *token, __u32 *ipaddrs, int nip)
{
- struct list_head exprs[4];
- struct list_head *e;
+ cfs_list_t exprs[4];
+ cfs_list_t *e;
lnet_range_expr_t *re;
char *str;
int i;
for (match = i = 0; !match && i < nip; i++) {
ip = ipaddrs[i];
-
+
for (match = 1, j = 0; match && j < 4; j++) {
n = (ip >> (8 * (3 - j))) & 0xff;
match = 0;
- list_for_each(e, &exprs[j]) {
- re = list_entry(e, lnet_range_expr_t, lre_list);
+ cfs_list_for_each(e, &exprs[j]) {
+ re = cfs_list_entry(e, lnet_range_expr_t,
+ lre_list);
if (re->lre_min <= n &&
re->lre_max >= n &&
return 1;
}
-__u32
+__u32
lnet_netspec2net(char *netspec)
{
char *bracket = strchr(netspec, '(');
if (bracket != NULL)
*bracket = '(';
-
+
return net;
}
int
-lnet_splitnets(char *source, struct list_head *nets)
+lnet_splitnets(char *source, cfs_list_t *nets)
{
int offset = 0;
int offset2;
int len;
lnet_text_buf_t *tb;
lnet_text_buf_t *tb2;
- struct list_head *t;
+ cfs_list_t *t;
char *sep;
char *bracket;
__u32 net;
- LASSERT (!list_empty(nets));
+ LASSERT (!cfs_list_empty(nets));
LASSERT (nets->next == nets->prev); /* single entry */
-
- tb = list_entry(nets->next, lnet_text_buf_t, ltb_list);
+
+ tb = cfs_list_entry(nets->next, lnet_text_buf_t, ltb_list);
for (;;) {
sep = strchr(tb->ltb_text, ',');
return -EINVAL;
}
- list_for_each(t, nets) {
- tb2 = list_entry(t, lnet_text_buf_t, ltb_list);
+ cfs_list_for_each(t, nets) {
+ tb2 = cfs_list_entry(t, lnet_text_buf_t, ltb_list);
if (tb2 == tb)
continue;
return -EINVAL;
}
}
-
+
if (sep == NULL)
return 0;
tb2 = lnet_new_text_buf(strlen(sep));
if (tb2 == NULL)
return -ENOMEM;
-
+
strcpy(tb2->ltb_text, sep);
- list_add_tail(&tb2->ltb_list, nets);
+ cfs_list_add_tail(&tb2->ltb_list, nets);
tb = tb2;
}
int
lnet_match_networks (char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
{
- static char networks[LNET_SINGLE_TEXTBUF_NOB];
- static char source[LNET_SINGLE_TEXTBUF_NOB];
-
- struct list_head raw_entries;
- struct list_head matched_nets;
- struct list_head current_nets;
- struct list_head *t;
- struct list_head *t2;
+ static char networks[LNET_SINGLE_TEXTBUF_NOB];
+ static char source[LNET_SINGLE_TEXTBUF_NOB];
+
+ cfs_list_t raw_entries;
+ cfs_list_t matched_nets;
+ cfs_list_t current_nets;
+ cfs_list_t *t;
+ cfs_list_t *t2;
lnet_text_buf_t *tb;
lnet_text_buf_t *tb2;
__u32 net1;
len = 0;
rc = 0;
- while (!list_empty(&raw_entries)) {
- tb = list_entry(raw_entries.next, lnet_text_buf_t, ltb_list);
+ while (!cfs_list_empty(&raw_entries)) {
+ tb = cfs_list_entry(raw_entries.next, lnet_text_buf_t,
+ ltb_list);
strncpy(source, tb->ltb_text, sizeof(source)-1);
source[sizeof(source)-1] = 0;
if (rc < 0)
break;
- list_del(&tb->ltb_list);
+ cfs_list_del(&tb->ltb_list);
if (rc == 0) { /* no match */
lnet_free_text_buf(tb);
/* split into separate networks */
CFS_INIT_LIST_HEAD(¤t_nets);
- list_add(&tb->ltb_list, ¤t_nets);
+ cfs_list_add(&tb->ltb_list, ¤t_nets);
rc = lnet_splitnets(source, ¤t_nets);
if (rc < 0)
break;
dup = 0;
- list_for_each (t, ¤t_nets) {
- tb = list_entry(t, lnet_text_buf_t, ltb_list);
+ cfs_list_for_each (t, ¤t_nets) {
+ tb = cfs_list_entry(t, lnet_text_buf_t, ltb_list);
net1 = lnet_netspec2net(tb->ltb_text);
LASSERT (net1 != LNET_NIDNET(LNET_NID_ANY));
- list_for_each(t2, &matched_nets) {
- tb2 = list_entry(t2, lnet_text_buf_t, ltb_list);
+ cfs_list_for_each(t2, &matched_nets) {
+ tb2 = cfs_list_entry(t2, lnet_text_buf_t,
+ ltb_list);
net2 = lnet_netspec2net(tb2->ltb_text);
LASSERT (net2 != LNET_NIDNET(LNET_NID_ANY));
continue;
}
- list_for_each_safe(t, t2, ¤t_nets) {
- tb = list_entry(t, lnet_text_buf_t, ltb_list);
+ cfs_list_for_each_safe(t, t2, ¤t_nets) {
+ tb = cfs_list_entry(t, lnet_text_buf_t, ltb_list);
- list_del(&tb->ltb_list);
- list_add_tail(&tb->ltb_list, &matched_nets);
+ cfs_list_del(&tb->ltb_list);
+ cfs_list_add_tail(&tb->ltb_list, &matched_nets);
len += snprintf(networks + len, sizeof(networks) - len,
"%s%s", (len == 0) ? "" : ",",
if (rc < 0)
return rc;
-
+
*networksp = networks;
return count;
}
if (nif <= 0)
return nif;
-
+
LIBCFS_ALLOC(ipaddrs, nif * sizeof(*ipaddrs));
if (ipaddrs == NULL) {
CERROR("Can't allocate ipaddrs[%d]\n", nif);
for (i = nip = 0; i < nif; i++) {
if (!strcmp(ifnames[i], "lo"))
continue;
-
- rc = libcfs_ipif_query(ifnames[i], &up,
+
+ rc = libcfs_ipif_query(ifnames[i], &up,
&ipaddrs[nip], &netmask);
if (rc != 0) {
CWARN("Can't query interface %s: %d\n",
}
int
-lnet_set_ip_niaddr (lnet_ni_t *ni)
+lnet_set_ip_niaddr (lnet_ni_t *ni)
{
__u32 net = LNET_NIDNET(ni->ni_nid);
char **names;
libcfs_net2str(net));
return -EPERM;
}
-
+
rc = libcfs_ipif_query(ni->ni_interfaces[0],
&up, &ip, &netmask);
if (rc != 0) {
libcfs_net2str(net), ni->ni_interfaces[0]);
return -ENETDOWN;
}
-
+
ni->ni_nid = LNET_MKNID(net, ip);
return 0;
}
n = libcfs_ipif_enumerate(&names);
if (n <= 0) {
- CERROR("Net %s can't enumerate interfaces: %d\n",
+ CERROR("Net %s can't enumerate interfaces: %d\n",
libcfs_net2str(net), n);
return 0;
}
for (i = 0; i < n; i++) {
if (!strcmp(names[i], "lo")) /* skip the loopback IF */
continue;
-
+
rc = libcfs_ipif_query(names[i], &up, &ip, &netmask);
-
+
if (rc != 0) {
CWARN("Net %s can't query interface %s: %d\n",
libcfs_net2str(net), names[i], rc);
continue;
}
-
+
if (!up) {
CWARN("Net %s ignoring interface %s (down)\n",
libcfs_net2str(net), names[i]);
LNET_LOCK();
lnet_initialise_handle (&eq->eq_lh, LNET_COOKIE_TYPE_EQ);
- list_add (&eq->eq_list, &the_lnet.ln_active_eqs);
+ cfs_list_add (&eq->eq_list, &the_lnet.ln_active_eqs);
LNET_UNLOCK();
size = eq->eq_size;
lnet_invalidate_handle (&eq->eq_lh);
- list_del (&eq->eq_list);
+ cfs_list_del (&eq->eq_list);
lnet_eq_free (eq);
LNET_UNLOCK();
}
cfs_waitlink_init(&wl);
- set_current_state(TASK_INTERRUPTIBLE);
+ cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
cfs_waitq_add(&the_lnet.ln_waitq, &wl);
LNET_UNLOCK();
LASSERT (md->md_eq->eq_refcount >= 0);
}
- LASSERT (!list_empty(&md->md_list));
- list_del_init (&md->md_list);
+ LASSERT (!cfs_list_empty(&md->md_list));
+ cfs_list_del_init (&md->md_list);
lnet_md_free(md);
}
/* It's good; let handle2md succeed and add to active mds */
lnet_initialise_handle (&lmd->md_lh, LNET_COOKIE_TYPE_MD);
- LASSERT (list_empty(&lmd->md_list));
- list_add (&lmd->md_list, &the_lnet.ln_active_mds);
+ LASSERT (cfs_list_empty(&lmd->md_list));
+ cfs_list_add (&lmd->md_list, &the_lnet.ln_active_mds);
return 0;
}
lnet_initialise_handle (&me->me_lh, LNET_COOKIE_TYPE_ME);
if (pos == LNET_INS_AFTER)
- list_add_tail(&me->me_list, &(the_lnet.ln_portals[portal].ptl_ml));
+ cfs_list_add_tail(&me->me_list,
+ &(the_lnet.ln_portals[portal].ptl_ml));
else
- list_add(&me->me_list, &(the_lnet.ln_portals[portal].ptl_ml));
+ cfs_list_add(&me->me_list,
+ &(the_lnet.ln_portals[portal].ptl_ml));
lnet_me2handle(handle, me);
lnet_initialise_handle (&new_me->me_lh, LNET_COOKIE_TYPE_ME);
if (pos == LNET_INS_AFTER)
- list_add(&new_me->me_list, ¤t_me->me_list);
+ cfs_list_add(&new_me->me_list, ¤t_me->me_list);
else
- list_add_tail(&new_me->me_list, ¤t_me->me_list);
+ cfs_list_add_tail(&new_me->me_list, ¤t_me->me_list);
lnet_me2handle(handle, new_me);
void
lnet_me_unlink(lnet_me_t *me)
{
- list_del (&me->me_list);
+ cfs_list_del (&me->me_list);
if (me->me_md != NULL) {
me->me_md->md_me = NULL;
CWARN("\tMD\t= %p\n", me->md);
CWARN("\tprev\t= %p\n",
- list_entry(me->me_list.prev, lnet_me_t, me_list));
+ cfs_list_entry(me->me_list.prev, lnet_me_t, me_list));
CWARN("\tnext\t= %p\n",
- list_entry(me->me_list.next, lnet_me_t, me_list));
+ cfs_list_entry(me->me_list.next, lnet_me_t, me_list));
}
#endif
int
lnet_fail_nid (lnet_nid_t nid, unsigned int threshold)
{
- lnet_test_peer_t *tp;
- struct list_head *el;
- struct list_head *next;
- struct list_head cull;
+ lnet_test_peer_t *tp;
+ cfs_list_t *el;
+ cfs_list_t *next;
+ cfs_list_t cull;
LASSERT (the_lnet.ln_init);
tp->tp_threshold = threshold;
LNET_LOCK();
- list_add_tail (&tp->tp_list, &the_lnet.ln_test_peers);
+ cfs_list_add_tail (&tp->tp_list, &the_lnet.ln_test_peers);
LNET_UNLOCK();
return 0;
}
LNET_LOCK();
- list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
- tp = list_entry (el, lnet_test_peer_t, tp_list);
+ cfs_list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
+ tp = cfs_list_entry (el, lnet_test_peer_t, tp_list);
if (tp->tp_threshold == 0 || /* needs culling anyway */
nid == LNET_NID_ANY || /* removing all entries */
tp->tp_nid == nid) /* matched this one */
{
- list_del (&tp->tp_list);
- list_add (&tp->tp_list, &cull);
+ cfs_list_del (&tp->tp_list);
+ cfs_list_add (&tp->tp_list, &cull);
}
}
LNET_UNLOCK();
- while (!list_empty (&cull)) {
- tp = list_entry (cull.next, lnet_test_peer_t, tp_list);
+ while (!cfs_list_empty (&cull)) {
+ tp = cfs_list_entry (cull.next, lnet_test_peer_t, tp_list);
- list_del (&tp->tp_list);
+ cfs_list_del (&tp->tp_list);
LIBCFS_FREE(tp, sizeof (*tp));
}
return 0;
static int
fail_peer (lnet_nid_t nid, int outgoing)
{
- lnet_test_peer_t *tp;
- struct list_head *el;
- struct list_head *next;
- struct list_head cull;
+ lnet_test_peer_t *tp;
+ cfs_list_t *el;
+ cfs_list_t *next;
+ cfs_list_t cull;
int fail = 0;
CFS_INIT_LIST_HEAD (&cull);
LNET_LOCK();
- list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
- tp = list_entry (el, lnet_test_peer_t, tp_list);
+ cfs_list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
+ tp = cfs_list_entry (el, lnet_test_peer_t, tp_list);
if (tp->tp_threshold == 0) {
/* zombie entry */
/* only cull zombies on outgoing tests,
* since we may be at interrupt priority on
* incoming messages. */
- list_del (&tp->tp_list);
- list_add (&tp->tp_list, &cull);
+ cfs_list_del (&tp->tp_list);
+ cfs_list_add (&tp->tp_list, &cull);
}
continue;
}
if (outgoing &&
tp->tp_threshold == 0) {
/* see above */
- list_del (&tp->tp_list);
- list_add (&tp->tp_list, &cull);
+ cfs_list_del (&tp->tp_list);
+ cfs_list_add (&tp->tp_list, &cull);
}
}
break;
LNET_UNLOCK ();
- while (!list_empty (&cull)) {
- tp = list_entry (cull.next, lnet_test_peer_t, tp_list);
- list_del (&tp->tp_list);
+ while (!cfs_list_empty (&cull)) {
+ tp = cfs_list_entry (cull.next, lnet_test_peer_t, tp_list);
+ cfs_list_del (&tp->tp_list);
LIBCFS_FREE(tp, sizeof (*tp));
}
if (nob == 0)
return;
- LASSERT (!in_interrupt ());
+ LASSERT (!cfs_in_interrupt ());
LASSERT (ndiov > 0);
while (doffset >= diov->kiov_len) {
if (nob == 0)
return;
- LASSERT (!in_interrupt ());
+ LASSERT (!cfs_in_interrupt ());
LASSERT (niov > 0);
while (iovoffset >= iov->iov_len) {
if (nob == 0)
return;
- LASSERT (!in_interrupt ());
+ LASSERT (!cfs_in_interrupt ());
LASSERT (nkiov > 0);
while (kiovoffset >= kiov->kiov_len) {
lnet_kiov_t *kiov = NULL;
int rc;
- LASSERT (!in_interrupt ());
+ LASSERT (!cfs_in_interrupt ());
LASSERT (mlen == 0 || msg != NULL);
if (msg != NULL) {
void *priv = msg->msg_private;
int rc;
- LASSERT (!in_interrupt ());
+ LASSERT (!cfs_in_interrupt ());
LASSERT (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
(msg->msg_txcredit && msg->msg_peertxcredit));
}
if (!msg->msg_peertxcredit) {
- LASSERT ((lp->lp_txcredits < 0) == !list_empty(&lp->lp_txq));
+ LASSERT ((lp->lp_txcredits < 0) ==
+ !cfs_list_empty(&lp->lp_txq));
msg->msg_peertxcredit = 1;
lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
if (lp->lp_txcredits < 0) {
msg->msg_delayed = 1;
- list_add_tail (&msg->msg_list, &lp->lp_txq);
+ cfs_list_add_tail (&msg->msg_list, &lp->lp_txq);
return EAGAIN;
}
}
if (!msg->msg_txcredit) {
- LASSERT ((ni->ni_txcredits < 0) == !list_empty(&ni->ni_txq));
+ LASSERT ((ni->ni_txcredits < 0) ==
+ !cfs_list_empty(&ni->ni_txq));
msg->msg_txcredit = 1;
ni->ni_txcredits--;
if (ni->ni_txcredits < 0) {
msg->msg_delayed = 1;
- list_add_tail (&msg->msg_list, &ni->ni_txq);
+ cfs_list_add_tail (&msg->msg_list, &ni->ni_txq);
return EAGAIN;
}
}
LASSERT (!msg->msg_onactivelist);
msg->msg_onactivelist = 1;
- list_add (&msg->msg_activelist, &the_lnet.ln_active_msgs);
+ cfs_list_add (&msg->msg_activelist, &the_lnet.ln_active_msgs);
}
lnet_rtrbufpool_t *
LASSERT (!do_recv || msg->msg_delayed);
if (!msg->msg_peerrtrcredit) {
- LASSERT ((lp->lp_rtrcredits < 0) == !list_empty(&lp->lp_rtrq));
+ LASSERT ((lp->lp_rtrcredits < 0) ==
+ !cfs_list_empty(&lp->lp_rtrq));
msg->msg_peerrtrcredit = 1;
lp->lp_rtrcredits--;
if (lp->lp_rtrcredits < 0) {
/* must have checked eager_recv before here */
LASSERT (msg->msg_delayed);
- list_add_tail(&msg->msg_list, &lp->lp_rtrq);
+ cfs_list_add_tail(&msg->msg_list, &lp->lp_rtrq);
return EAGAIN;
}
}
rbp = lnet_msg2bufpool(msg);
if (!msg->msg_rtrcredit) {
- LASSERT ((rbp->rbp_credits < 0) == !list_empty(&rbp->rbp_msgs));
+ LASSERT ((rbp->rbp_credits < 0) ==
+ !cfs_list_empty(&rbp->rbp_msgs));
msg->msg_rtrcredit = 1;
rbp->rbp_credits--;
if (rbp->rbp_credits < 0) {
/* must have checked eager_recv before here */
LASSERT (msg->msg_delayed);
- list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
+ cfs_list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
return EAGAIN;
}
}
- LASSERT (!list_empty(&rbp->rbp_bufs));
- rb = list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
- list_del(&rb->rb_list);
+ LASSERT (!cfs_list_empty(&rbp->rbp_bufs));
+ rb = cfs_list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
+ cfs_list_del(&rb->rb_list);
msg->msg_niov = rbp->rbp_npages;
msg->msg_kiov = &rb->rb_kiov[0];
msg->msg_txcredit = 0;
ni = txpeer->lp_ni;
- LASSERT((ni->ni_txcredits < 0) == !list_empty(&ni->ni_txq));
+ LASSERT((ni->ni_txcredits < 0) == !cfs_list_empty(&ni->ni_txq));
ni->ni_txcredits++;
if (ni->ni_txcredits <= 0) {
- msg2 = list_entry(ni->ni_txq.next, lnet_msg_t, msg_list);
- list_del(&msg2->msg_list);
+ msg2 = cfs_list_entry(ni->ni_txq.next, lnet_msg_t,
+ msg_list);
+ cfs_list_del(&msg2->msg_list);
LASSERT(msg2->msg_txpeer->lp_ni == ni);
LASSERT(msg2->msg_delayed);
/* give back peer txcredits */
msg->msg_peertxcredit = 0;
- LASSERT((txpeer->lp_txcredits < 0) == !list_empty(&txpeer->lp_txq));
+ LASSERT((txpeer->lp_txcredits < 0) ==
+ !cfs_list_empty(&txpeer->lp_txq));
txpeer->lp_txqnob -= msg->msg_len + sizeof(lnet_hdr_t);
LASSERT (txpeer->lp_txqnob >= 0);
txpeer->lp_txcredits++;
if (txpeer->lp_txcredits <= 0) {
- msg2 = list_entry(txpeer->lp_txq.next,
- lnet_msg_t, msg_list);
- list_del(&msg2->msg_list);
+ msg2 = cfs_list_entry(txpeer->lp_txq.next,
+ lnet_msg_t, msg_list);
+ cfs_list_del(&msg2->msg_list);
LASSERT (msg2->msg_txpeer == txpeer);
LASSERT (msg2->msg_delayed);
* itself */
LASSERT (msg->msg_kiov != NULL);
- rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
+ rb = cfs_list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
rbp = rb->rb_pool;
LASSERT (rbp == lnet_msg2bufpool(msg));
msg->msg_kiov = NULL;
msg->msg_rtrcredit = 0;
- LASSERT((rbp->rbp_credits < 0) == !list_empty(&rbp->rbp_msgs));
- LASSERT((rbp->rbp_credits > 0) == !list_empty(&rbp->rbp_bufs));
+ LASSERT((rbp->rbp_credits < 0) ==
+ !cfs_list_empty(&rbp->rbp_msgs));
+ LASSERT((rbp->rbp_credits > 0) ==
+ !cfs_list_empty(&rbp->rbp_bufs));
- list_add(&rb->rb_list, &rbp->rbp_bufs);
+ cfs_list_add(&rb->rb_list, &rbp->rbp_bufs);
rbp->rbp_credits++;
if (rbp->rbp_credits <= 0) {
- msg2 = list_entry(rbp->rbp_msgs.next,
- lnet_msg_t, msg_list);
- list_del(&msg2->msg_list);
+ msg2 = cfs_list_entry(rbp->rbp_msgs.next,
+ lnet_msg_t, msg_list);
+ cfs_list_del(&msg2->msg_list);
(void) lnet_post_routed_recv_locked(msg2, 1);
}
/* give back peer router credits */
msg->msg_peerrtrcredit = 0;
- LASSERT((rxpeer->lp_rtrcredits < 0) == !list_empty(&rxpeer->lp_rtrq));
+ LASSERT((rxpeer->lp_rtrcredits < 0) ==
+ !cfs_list_empty(&rxpeer->lp_rtrq));
rxpeer->lp_rtrcredits++;
if (rxpeer->lp_rtrcredits <= 0) {
- msg2 = list_entry(rxpeer->lp_rtrq.next,
- lnet_msg_t, msg_list);
- list_del(&msg2->msg_list);
+ msg2 = cfs_list_entry(rxpeer->lp_rtrq.next,
+ lnet_msg_t, msg_list);
+ cfs_list_del(&msg2->msg_list);
(void) lnet_post_routed_recv_locked(msg2, 1);
}
lnet_remotenet_t *rnet;
lnet_route_t *route;
lnet_route_t *best_route;
- struct list_head *tmp;
+ cfs_list_t *tmp;
lnet_peer_t *lp;
lnet_peer_t *lp2;
int rc;
/* Find the best gateway I can use */
lp = NULL;
best_route = NULL;
- list_for_each(tmp, &rnet->lrn_routes) {
- route = list_entry(tmp, lnet_route_t, lr_list);
+ cfs_list_for_each(tmp, &rnet->lrn_routes) {
+ route = cfs_list_entry(tmp, lnet_route_t, lr_list);
lp2 = route->lr_gateway;
if (lp2->lp_alive &&
/* Place selected route at the end of the route list to ensure
* fairness; everything else being equal... */
- list_del(&best_route->lr_list);
- list_add_tail(&best_route->lr_list, &rnet->lrn_routes);
+ cfs_list_del(&best_route->lr_list);
+ cfs_list_add_tail(&best_route->lr_list, &rnet->lrn_routes);
if (src_ni == NULL) {
src_ni = lp->lp_ni;
LASSERT (!msg->msg_onactivelist);
msg->msg_onactivelist = 1;
- list_add (&msg->msg_activelist, &the_lnet.ln_active_msgs);
+ cfs_list_add (&msg->msg_activelist, &the_lnet.ln_active_msgs);
}
static void
int
LNetClearLazyPortal(int portal)
{
- struct list_head zombies;
+ cfs_list_t zombies;
lnet_portal_t *ptl = &the_lnet.ln_portals[portal];
lnet_msg_t *msg;
CDEBUG (D_NET, "clearing portal %d lazy\n", portal);
/* grab all the blocked messages atomically */
- list_add(&zombies, &ptl->ptl_msgq);
- list_del_init(&ptl->ptl_msgq);
+ cfs_list_add(&zombies, &ptl->ptl_msgq);
+ cfs_list_del_init(&ptl->ptl_msgq);
ptl->ptl_msgq_version++;
ptl->ptl_options &= ~LNET_PTL_LAZY;
LNET_UNLOCK();
- while (!list_empty(&zombies)) {
- msg = list_entry(zombies.next, lnet_msg_t, msg_list);
- list_del(&msg->msg_list);
+ while (!cfs_list_empty(&zombies)) {
+ msg = cfs_list_entry(zombies.next, lnet_msg_t, msg_list);
+ cfs_list_del(&msg->msg_list);
lnet_drop_delayed_put(msg, "Clearing lazy portal attr");
}
{
CFS_LIST_HEAD (drops);
CFS_LIST_HEAD (matches);
- struct list_head *tmp;
- struct list_head *entry;
+ cfs_list_t *tmp;
+ cfs_list_t *entry;
lnet_msg_t *msg;
lnet_me_t *me = md->md_me;
lnet_portal_t *ptl = &the_lnet.ln_portals[me->me_portal];
LASSERT (me->me_portal < (unsigned int)the_lnet.ln_nportals);
if ((ptl->ptl_options & LNET_PTL_LAZY) == 0) {
- LASSERT (list_empty(&ptl->ptl_msgq));
+ LASSERT (cfs_list_empty(&ptl->ptl_msgq));
return;
}
LASSERT (md->md_refcount == 0); /* a brand new MD */
- list_for_each_safe (entry, tmp, &ptl->ptl_msgq) {
+ cfs_list_for_each_safe (entry, tmp, &ptl->ptl_msgq) {
int rc;
int index;
unsigned int mlength;
lnet_hdr_t *hdr;
lnet_process_id_t src;
- msg = list_entry(entry, lnet_msg_t, msg_list);
+ msg = cfs_list_entry(entry, lnet_msg_t, msg_list);
LASSERT (msg->msg_delayed);
continue;
/* Hurrah! This _is_ a match */
- list_del(&msg->msg_list);
+ cfs_list_del(&msg->msg_list);
ptl->ptl_msgq_version++;
if (rc == LNET_MATCHMD_OK) {
- list_add_tail(&msg->msg_list, &matches);
+ cfs_list_add_tail(&msg->msg_list, &matches);
CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
"match "LPU64" offset %d length %d.\n",
} else {
LASSERT (rc == LNET_MATCHMD_DROP);
- list_add_tail(&msg->msg_list, &drops);
+ cfs_list_add_tail(&msg->msg_list, &drops);
}
if (lnet_md_exhausted(md))
LNET_UNLOCK();
- list_for_each_safe (entry, tmp, &drops) {
- msg = list_entry(entry, lnet_msg_t, msg_list);
+ cfs_list_for_each_safe (entry, tmp, &drops) {
+ msg = cfs_list_entry(entry, lnet_msg_t, msg_list);
- list_del(&msg->msg_list);
+ cfs_list_del(&msg->msg_list);
lnet_drop_delayed_put(msg, "Bad match");
}
- list_for_each_safe (entry, tmp, &matches) {
- msg = list_entry(entry, lnet_msg_t, msg_list);
+ cfs_list_for_each_safe (entry, tmp, &matches) {
+ msg = cfs_list_entry(entry, lnet_msg_t, msg_list);
- list_del(&msg->msg_list);
+ cfs_list_del(&msg->msg_list);
/* md won't disappear under me, since each msg
* holds a ref on it */
if (version != ptl->ptl_ml_version)
goto again;
- list_add_tail(&msg->msg_list, &ptl->ptl_msgq);
+ cfs_list_add_tail(&msg->msg_list, &ptl->ptl_msgq);
ptl->ptl_msgq_version++;
LNET_UNLOCK();
__u32 payload_length;
__u32 type;
- LASSERT (!in_interrupt ());
+ LASSERT (!cfs_in_interrupt ());
type = le32_to_cpu(hdr->type);
src_nid = le64_to_cpu(hdr->src_nid);
/* Message looks OK; we're not going to return an error, so we MUST
* call back lnd_recv() come what may... */
- if (!list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
+ if (!cfs_list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
fail_peer (src_nid, 0)) /* shall we now? */
{
CERROR("%s, src %s: Dropping %s to simulate failure\n",
LASSERT (the_lnet.ln_init);
LASSERT (the_lnet.ln_refcount > 0);
- if (!list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
+ if (!cfs_list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
fail_peer (target.nid, 1)) /* shall we now? */
{
CERROR("Dropping PUT to %s: simulated failure\n",
LASSERT (the_lnet.ln_init);
LASSERT (the_lnet.ln_refcount > 0);
- if (!list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
+ if (!cfs_list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
fail_peer (target.nid, 1)) /* shall we now? */
{
CERROR("Dropping GET to %s: simulated failure\n",
int
LNetDist (lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
{
- struct list_head *e;
+ cfs_list_t *e;
lnet_ni_t *ni;
lnet_remotenet_t *rnet;
__u32 dstnet = LNET_NIDNET(dstnid);
LNET_LOCK();
- list_for_each (e, &the_lnet.ln_nis) {
- ni = list_entry(e, lnet_ni_t, ni_list);
+ cfs_list_for_each (e, &the_lnet.ln_nis) {
+ ni = cfs_list_entry(e, lnet_ni_t, ni_list);
if (ni->ni_nid == dstnid) {
if (srcnidp != NULL)
order++;
}
- list_for_each (e, &the_lnet.ln_remote_nets) {
- rnet = list_entry(e, lnet_remotenet_t, lrn_list);
+ cfs_list_for_each (e, &the_lnet.ln_remote_nets) {
+ rnet = cfs_list_entry(e, lnet_remotenet_t, lrn_list);
if (rnet->lrn_net == dstnet) {
lnet_route_t *route;
lnet_route_t *shortest = NULL;
- LASSERT (!list_empty(&rnet->lrn_routes));
+ LASSERT (!cfs_list_empty(&rnet->lrn_routes));
- list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
+ cfs_list_for_each_entry(route, &rnet->lrn_routes,
+ lr_list) {
if (shortest == NULL ||
route->lr_hops < shortest->lr_hops)
shortest = route;
#else
lnet_ni_t *ni;
lnet_remotenet_t *rnet;
- struct list_head *tmp;
+ cfs_list_t *tmp;
lnet_route_t *route;
lnet_nid_t *nids;
int nnids;
int rc = 0;
int rc2;
- /* Target on a local network? */
+ /* Target on a local network? */
ni = lnet_net2ni(LNET_NIDNET(id.nid));
if (ni != NULL) {
LNET_LOCK();
rnet = lnet_find_net_locked(LNET_NIDNET(id.nid));
if (rnet != NULL) {
- list_for_each(tmp, &rnet->lrn_routes) {
+ cfs_list_for_each(tmp, &rnet->lrn_routes) {
if (nnids == maxnids) {
LNET_UNLOCK();
LIBCFS_FREE(nids, maxnids * sizeof(*nids));
goto again;
}
- route = list_entry(tmp, lnet_route_t, lr_list);
+ route = cfs_list_entry(tmp, lnet_route_t, lr_list);
nids[nnids++] = route->lr_gateway->lp_nid;
}
}
LASSERT (msg->msg_onactivelist);
msg->msg_onactivelist = 0;
- list_del (&msg->msg_activelist);
+ cfs_list_del (&msg->msg_activelist);
the_lnet.ln_counters.msgs_alloc--;
lnet_msg_free(msg);
}
#endif
lnet_libmd_t *md;
- LASSERT (!in_interrupt ());
+ LASSERT (!cfs_in_interrupt ());
if (msg == NULL)
return;
msg->msg_md = NULL;
}
- list_add_tail (&msg->msg_list, &the_lnet.ln_finalizeq);
+ cfs_list_add_tail (&msg->msg_list, &the_lnet.ln_finalizeq);
/* Recursion breaker. Don't complete the message here if I am (or
* enough other threads are) already completing messages */
the_lnet.ln_finalizing = 1;
#endif
- while (!list_empty(&the_lnet.ln_finalizeq)) {
- msg = list_entry(the_lnet.ln_finalizeq.next,
- lnet_msg_t, msg_list);
-
- list_del(&msg->msg_list);
+ while (!cfs_list_empty(&the_lnet.ln_finalizeq)) {
+ msg = cfs_list_entry(the_lnet.ln_finalizeq.next,
+ lnet_msg_t, msg_list);
+
+ cfs_list_del(&msg->msg_list);
/* NB drops and regains the lnet lock if it actually does
* anything, so my finalizing friends can chomp along too */
CFS_MODULE_PARM(config_on_load, "i", int, 0444,
"configure network at module load");
-static struct semaphore lnet_config_mutex;
+static cfs_semaphore_t lnet_config_mutex;
int
lnet_configure (void *arg)
int rc;
ENTRY;
- init_mutex(&lnet_config_mutex);
+ cfs_init_mutex(&lnet_config_mutex);
rc = LNetInit();
if (rc != 0) {
int
lnet_create_peer_table(void)
{
- struct list_head *hash;
- int i;
+ cfs_list_t *hash;
+ int i;
LASSERT (the_lnet.ln_peer_hash == NULL);
- LIBCFS_ALLOC(hash, LNET_PEER_HASHSIZE * sizeof(struct list_head));
+ LIBCFS_ALLOC(hash, LNET_PEER_HASHSIZE * sizeof(cfs_list_t));
if (hash == NULL) {
CERROR("Can't allocate peer hash table\n");
return;
for (i = 0; i < LNET_PEER_HASHSIZE; i++)
- LASSERT (list_empty(&the_lnet.ln_peer_hash[i]));
-
+ LASSERT (cfs_list_empty(&the_lnet.ln_peer_hash[i]));
+
LIBCFS_FREE(the_lnet.ln_peer_hash,
- LNET_PEER_HASHSIZE * sizeof (struct list_head));
+ LNET_PEER_HASHSIZE * sizeof (cfs_list_t));
the_lnet.ln_peer_hash = NULL;
}
int i;
LASSERT (the_lnet.ln_shutdown); /* i.e. no new peers */
-
+
for (i = 0; i < LNET_PEER_HASHSIZE; i++) {
- struct list_head *peers = &the_lnet.ln_peer_hash[i];
+ cfs_list_t *peers = &the_lnet.ln_peer_hash[i];
LNET_LOCK();
- while (!list_empty(peers)) {
- lnet_peer_t *lp = list_entry(peers->next,
- lnet_peer_t, lp_hashlist);
-
- list_del(&lp->lp_hashlist);
+ while (!cfs_list_empty(peers)) {
+ lnet_peer_t *lp = cfs_list_entry(peers->next,
+ lnet_peer_t,
+ lp_hashlist);
+
+ cfs_list_del(&lp->lp_hashlist);
lnet_peer_decref_locked(lp); /* lose hash table's ref */
}
LNET_UNLOCK();
LASSERT (lp->lp_refcount == 0);
LASSERT (lp->lp_rtr_refcount == 0);
- LASSERT (list_empty(&lp->lp_txq));
+ LASSERT (cfs_list_empty(&lp->lp_txq));
LASSERT (lp->lp_txqnob == 0);
LASSERT (lp->lp_rcd == NULL);
lnet_find_peer_locked (lnet_nid_t nid)
{
unsigned int idx = LNET_NIDADDR(nid) % LNET_PEER_HASHSIZE;
- struct list_head *peers = &the_lnet.ln_peer_hash[idx];
- struct list_head *tmp;
+ cfs_list_t *peers = &the_lnet.ln_peer_hash[idx];
+ cfs_list_t *tmp;
lnet_peer_t *lp;
if (the_lnet.ln_shutdown)
return NULL;
- list_for_each (tmp, peers) {
- lp = list_entry(tmp, lnet_peer_t, lp_hashlist);
-
+ cfs_list_for_each (tmp, peers) {
+ lp = cfs_list_entry(tmp, lnet_peer_t, lp_hashlist);
+
if (lp->lp_nid == nid) {
lnet_peer_addref_locked(lp);
return lp;
}
}
-
+
return NULL;
}
/* can't add peers after shutdown starts */
LASSERT (!the_lnet.ln_shutdown);
- list_add_tail(&lp->lp_hashlist, lnet_nid2peerhash(nid));
+ cfs_list_add_tail(&lp->lp_hashlist, lnet_nid2peerhash(nid));
the_lnet.ln_npeers++;
the_lnet.ln_peertable_version++;
*lpp = lp;
lp->lp_rtr_refcount++;
if (lp->lp_rtr_refcount == 1) {
- struct list_head *pos;
+ cfs_list_t *pos;
/* a simple insertion sort */
- list_for_each_prev(pos, &the_lnet.ln_routers) {
- lnet_peer_t *rtr = list_entry(pos, lnet_peer_t,
- lp_rtr_list);
+ cfs_list_for_each_prev(pos, &the_lnet.ln_routers) {
+ lnet_peer_t *rtr = cfs_list_entry(pos, lnet_peer_t,
+ lp_rtr_list);
if (rtr->lp_nid < lp->lp_nid)
break;
}
- list_add(&lp->lp_rtr_list, pos);
+ cfs_list_add(&lp->lp_rtr_list, pos);
/* addref for the_lnet.ln_routers */
lnet_peer_addref_locked(lp);
the_lnet.ln_routers_version++;
lp->lp_rtr_refcount--;
if (lp->lp_rtr_refcount == 0) {
if (lp->lp_rcd != NULL) {
- list_add(&lp->lp_rcd->rcd_list,
- &the_lnet.ln_zombie_rcd);
+ cfs_list_add(&lp->lp_rcd->rcd_list,
+ &the_lnet.ln_zombie_rcd);
lp->lp_rcd = NULL;
}
- list_del(&lp->lp_rtr_list);
+ cfs_list_del(&lp->lp_rtr_list);
/* decref for the_lnet.ln_routers */
lnet_peer_decref_locked(lp);
the_lnet.ln_routers_version++;
lnet_find_net_locked (__u32 net)
{
lnet_remotenet_t *rnet;
- struct list_head *tmp;
+ cfs_list_t *tmp;
LASSERT (!the_lnet.ln_shutdown);
- list_for_each (tmp, &the_lnet.ln_remote_nets) {
- rnet = list_entry(tmp, lnet_remotenet_t, lrn_list);
+ cfs_list_for_each (tmp, &the_lnet.ln_remote_nets) {
+ rnet = cfs_list_entry(tmp, lnet_remotenet_t, lrn_list);
if (rnet->lrn_net == net)
return rnet;
{
unsigned int len = 0;
unsigned int offset = 0;
- struct list_head *e;
+ cfs_list_t *e;
extern __u64 lnet_create_interface_cookie(void);
- list_for_each (e, &rnet->lrn_routes) {
+ cfs_list_for_each (e, &rnet->lrn_routes) {
len++;
}
* See bug 18751 */
/* len+1 positions to add a new entry, also prevents division by 0 */
offset = ((unsigned int) lnet_create_interface_cookie()) % (len + 1);
- list_for_each (e, &rnet->lrn_routes) {
+ cfs_list_for_each (e, &rnet->lrn_routes) {
if (offset == 0)
break;
offset--;
}
- list_add(&route->lr_list, e);
+ cfs_list_add(&route->lr_list, e);
the_lnet.ln_remote_nets_version++;
lnet_rtr_addref_locked(route->lr_gateway);
int
lnet_add_route (__u32 net, unsigned int hops, lnet_nid_t gateway)
{
- struct list_head *e;
+ cfs_list_t *e;
lnet_remotenet_t *rnet;
lnet_remotenet_t *rnet2;
lnet_route_t *route;
rnet2 = lnet_find_net_locked(net);
if (rnet2 == NULL) {
/* new network */
- list_add_tail(&rnet->lrn_list, &the_lnet.ln_remote_nets);
+ cfs_list_add_tail(&rnet->lrn_list, &the_lnet.ln_remote_nets);
rnet2 = rnet;
}
/* Search for a duplicate route (it's a NOOP if it is) */
add_route = 1;
- list_for_each (e, &rnet2->lrn_routes) {
- lnet_route_t *route2 = list_entry(e, lnet_route_t, lr_list);
+ cfs_list_for_each (e, &rnet2->lrn_routes) {
+ lnet_route_t *route2 = cfs_list_entry(e, lnet_route_t, lr_list);
if (route2->lr_gateway == route->lr_gateway) {
add_route = 0;
lnet_remotenet_t *rnet;
lnet_route_t *route;
lnet_route_t *route2;
- struct list_head *e1;
- struct list_head *e2;
+ cfs_list_t *e1;
+ cfs_list_t *e2;
LNET_LOCK();
- list_for_each (e1, &the_lnet.ln_remote_nets) {
- rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
+ cfs_list_for_each (e1, &the_lnet.ln_remote_nets) {
+ rnet = cfs_list_entry(e1, lnet_remotenet_t, lrn_list);
route2 = NULL;
- list_for_each (e2, &rnet->lrn_routes) {
- route = list_entry(e2, lnet_route_t, lr_list);
+ cfs_list_for_each (e2, &rnet->lrn_routes) {
+ route = cfs_list_entry(e2, lnet_route_t, lr_list);
if (route2 == NULL)
route2 = route;
{
lnet_remotenet_t *rnet;
lnet_route_t *route;
- struct list_head *e1;
- struct list_head *e2;
+ cfs_list_t *e1;
+ cfs_list_t *e2;
int rc = -ENOENT;
CDEBUG(D_NET, "Del route: net %s : gw %s\n",
again:
LNET_LOCK();
- list_for_each (e1, &the_lnet.ln_remote_nets) {
- rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
+ cfs_list_for_each (e1, &the_lnet.ln_remote_nets) {
+ rnet = cfs_list_entry(e1, lnet_remotenet_t, lrn_list);
if (!(net == LNET_NIDNET(LNET_NID_ANY) ||
net == rnet->lrn_net))
continue;
- list_for_each (e2, &rnet->lrn_routes) {
- route = list_entry(e2, lnet_route_t, lr_list);
+ cfs_list_for_each (e2, &rnet->lrn_routes) {
+ route = cfs_list_entry(e2, lnet_route_t, lr_list);
if (!(gw_nid == LNET_NID_ANY ||
gw_nid == route->lr_gateway->lp_nid))
continue;
- list_del(&route->lr_list);
+ cfs_list_del(&route->lr_list);
the_lnet.ln_remote_nets_version++;
- if (list_empty(&rnet->lrn_routes))
- list_del(&rnet->lrn_list);
+ if (cfs_list_empty(&rnet->lrn_routes))
+ cfs_list_del(&rnet->lrn_list);
else
rnet = NULL;
lnet_get_route (int idx, __u32 *net, __u32 *hops,
lnet_nid_t *gateway, __u32 *alive)
{
- struct list_head *e1;
- struct list_head *e2;
+ cfs_list_t *e1;
+ cfs_list_t *e2;
lnet_remotenet_t *rnet;
lnet_route_t *route;
LNET_LOCK();
- list_for_each (e1, &the_lnet.ln_remote_nets) {
- rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
+ cfs_list_for_each (e1, &the_lnet.ln_remote_nets) {
+ rnet = cfs_list_entry(e1, lnet_remotenet_t, lrn_list);
- list_for_each (e2, &rnet->lrn_routes) {
- route = list_entry(e2, lnet_route_t, lr_list);
+ cfs_list_for_each (e2, &rnet->lrn_routes) {
+ route = cfs_list_entry(e2, lnet_route_t, lr_list);
if (idx-- == 0) {
*net = rnet->lrn_net;
lnet_wait_known_routerstate(void)
{
lnet_peer_t *rtr;
- struct list_head *entry;
+ cfs_list_t *entry;
int all_known;
LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
LNET_LOCK();
all_known = 1;
- list_for_each (entry, &the_lnet.ln_routers) {
- rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
+ cfs_list_for_each (entry, &the_lnet.ln_routers) {
+ rtr = cfs_list_entry(entry, lnet_peer_t, lp_rtr_list);
if (rtr->lp_alive_count == 0) {
all_known = 0;
LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_UNLINKING);
the_lnet.ln_rc_state = LNET_RC_STATE_UNLINKED;
#ifdef __KERNEL__
- mutex_up(&the_lnet.ln_rc_signal);
+ cfs_mutex_up(&the_lnet.ln_rc_signal);
#endif
return;
}
LNET_LOCK();
- list_for_each_entry (ni, &the_lnet.ln_nis, ni_list) {
+ cfs_list_for_each_entry (ni, &the_lnet.ln_nis, ni_list) {
lnet_ni_status_t *ns = ni->ni_status;
LASSERT (ns != NULL);
void
lnet_destroy_rc_data (lnet_rc_data_t *rcd)
{
- LASSERT (list_empty(&rcd->rcd_list));
+ LASSERT (cfs_list_empty(&rcd->rcd_list));
/* detached from network */
LASSERT (LNetHandleIsInvalid(rcd->rcd_mdh));
if (!lnet_isrouter(rtr)) {
lnet_peer_decref_locked(rtr);
if (rcd != NULL)
- list_add(&rcd->rcd_list, &the_lnet.ln_zombie_rcd);
+ cfs_list_add(&rcd->rcd_list, &the_lnet.ln_zombie_rcd);
return; /* router table changed! */
}
* outstanding events as it is allowed outstanding sends */
eqsz = 0;
version = the_lnet.ln_routers_version;
- list_for_each_entry(rtr, &the_lnet.ln_routers, lp_rtr_list) {
+ cfs_list_for_each_entry(rtr, &the_lnet.ln_routers, lp_rtr_list) {
lnet_ni_t *ni = rtr->lp_ni;
lnet_process_id_t id;
return 0;
#ifdef __KERNEL__
- init_mutex_locked(&the_lnet.ln_rc_signal);
+ cfs_init_mutex_locked(&the_lnet.ln_rc_signal);
/* EQ size doesn't matter; the callback is guaranteed to get every
* event */
eqsz = 1;
rc = LNetMDUnlink(the_lnet.ln_rc_mdh);
LASSERT (rc == 0);
/* block until event callback signals exit */
- mutex_down(&the_lnet.ln_rc_signal);
+ cfs_mutex_down(&the_lnet.ln_rc_signal);
rc = LNetEQFree(the_lnet.ln_rc_eqh);
LASSERT (rc == 0);
the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
#ifdef __KERNEL__
/* block until event callback signals exit */
- mutex_down(&the_lnet.ln_rc_signal);
+ cfs_mutex_down(&the_lnet.ln_rc_signal);
#else
while (the_lnet.ln_rc_state != LNET_RC_STATE_UNLINKED) {
lnet_router_checker();
{
lnet_rc_data_t *rcd;
lnet_rc_data_t *tmp;
- struct list_head free_rcd;
+ cfs_list_t free_rcd;
int i;
__u64 version;
LNET_LOCK();
rescan:
version = the_lnet.ln_routers_version;
- list_for_each_entry_safe (rcd, tmp, &the_lnet.ln_zombie_rcd, rcd_list) {
+ cfs_list_for_each_entry_safe (rcd, tmp, &the_lnet.ln_zombie_rcd,
+ rcd_list) {
if (LNetHandleIsInvalid(rcd->rcd_mdh)) {
- list_del(&rcd->rcd_list);
- list_add(&rcd->rcd_list, &free_rcd);
+ cfs_list_del(&rcd->rcd_list);
+ cfs_list_add(&rcd->rcd_list, &free_rcd);
continue;
}
}
i = 2;
- while (wait_unlink && !list_empty(&the_lnet.ln_zombie_rcd)) {
- rcd = list_entry(the_lnet.ln_zombie_rcd.next,
- lnet_rc_data_t, rcd_list);
+ while (wait_unlink && !cfs_list_empty(&the_lnet.ln_zombie_rcd)) {
+ rcd = cfs_list_entry(the_lnet.ln_zombie_rcd.next,
+ lnet_rc_data_t, rcd_list);
if (LNetHandleIsInvalid(rcd->rcd_mdh)) {
- list_del(&rcd->rcd_list);
- list_add(&rcd->rcd_list, &free_rcd);
+ cfs_list_del(&rcd->rcd_list);
+ cfs_list_add(&rcd->rcd_list, &free_rcd);
continue;
}
LNET_UNLOCK();
- while (!list_empty(&free_rcd)) {
- rcd = list_entry(free_rcd.next, lnet_rc_data_t, rcd_list);
- list_del_init(&rcd->rcd_list);
+ while (!cfs_list_empty(&free_rcd)) {
+ rcd = cfs_list_entry(free_rcd.next, lnet_rc_data_t, rcd_list);
+ cfs_list_del_init(&rcd->rcd_list);
lnet_destroy_rc_data(rcd);
}
return;
{
int rc;
lnet_peer_t *rtr;
- struct list_head *entry;
+ cfs_list_t *entry;
lnet_process_id_t rtr_id;
cfs_daemonize("router_checker");
rescan:
version = the_lnet.ln_routers_version;
- list_for_each (entry, &the_lnet.ln_routers) {
- rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
+ cfs_list_for_each (entry, &the_lnet.ln_routers) {
+ rtr = cfs_list_entry(entry, lnet_peer_t, lp_rtr_list);
lnet_ping_router_locked(rtr);
/* NB dropped lock */
/* Call cfs_pause() here always adds 1 to load average
* because kernel counts # active tasks as nr_running
* + nr_uninterruptible. */
- cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE,
- cfs_time_seconds(1));
+ cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
+ cfs_time_seconds(1));
}
LNET_LOCK();
- list_for_each (entry, &the_lnet.ln_routers) {
- rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
+ cfs_list_for_each (entry, &the_lnet.ln_routers) {
+ rtr = cfs_list_entry(entry, lnet_peer_t, lp_rtr_list);
if (rtr->lp_rcd == NULL)
continue;
- LASSERT (list_empty(&rtr->lp_rcd->rcd_list));
- list_add(&rtr->lp_rcd->rcd_list, &the_lnet.ln_zombie_rcd);
+ LASSERT (cfs_list_empty(&rtr->lp_rcd->rcd_list));
+ cfs_list_add(&rtr->lp_rcd->rcd_list, &the_lnet.ln_zombie_rcd);
rtr->lp_rcd = NULL;
}
int nbuffers = 0;
lnet_rtrbuf_t *rb;
- LASSERT (list_empty(&rbp->rbp_msgs));
+ LASSERT (cfs_list_empty(&rbp->rbp_msgs));
LASSERT (rbp->rbp_credits == rbp->rbp_nbuffers);
- while (!list_empty(&rbp->rbp_bufs)) {
+ while (!cfs_list_empty(&rbp->rbp_bufs)) {
LASSERT (rbp->rbp_credits > 0);
- rb = list_entry(rbp->rbp_bufs.next,
- lnet_rtrbuf_t, rb_list);
- list_del(&rb->rb_list);
+ rb = cfs_list_entry(rbp->rbp_bufs.next,
+ lnet_rtrbuf_t, rb_list);
+ cfs_list_del(&rb->rb_list);
lnet_destroy_rtrbuf(rb, npages);
nbuffers++;
}
rbp->rbp_nbuffers++;
rbp->rbp_credits++;
rbp->rbp_mincredits++;
- list_add(&rb->rb_list, &rbp->rbp_bufs);
+ cfs_list_add(&rb->rb_list, &rbp->rbp_bufs);
/* No allocation "under fire" */
/* Otherwise we'd need code to schedule blocked msgs etc */
lnet_peer_t *lp = NULL;
cfs_time_t now = cfs_time_current();
- LASSERT (!in_interrupt ());
+ LASSERT (!cfs_in_interrupt ());
CDEBUG (D_NET, "%s notifying %s: %s\n",
(ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
LNET_LOCK();
version = the_lnet.ln_routers_version;
- list_for_each_entry (rtr, &the_lnet.ln_routers, lp_rtr_list) {
+ cfs_list_for_each_entry (rtr, &the_lnet.ln_routers, lp_rtr_list) {
lnet_ping_router_locked(rtr);
LASSERT (version == the_lnet.ln_routers_version);
}
if (pos >= min_t(int, len, strlen(tmpstr)))
rc = 0;
else
- rc = trace_copyout_string(buffer, nob,
- tmpstr + pos, "\n");
+ rc = cfs_trace_copyout_string(buffer, nob,
+ tmpstr + pos, "\n");
LIBCFS_FREE(tmpstr, tmpsiz);
LIBCFS_FREE(ctrs, sizeof(*ctrs));
*ver_p = (unsigned int)the_lnet.ln_remote_nets_version;
LNET_UNLOCK();
} else {
- struct list_head *n;
- struct list_head *r;
+ cfs_list_t *n;
+ cfs_list_t *r;
lnet_route_t *route = NULL;
lnet_remotenet_t *rnet = NULL;
int skip = *ppos - 1;
n = the_lnet.ln_remote_nets.next;
while (n != &the_lnet.ln_remote_nets && route == NULL) {
- rnet = list_entry(n, lnet_remotenet_t, lrn_list);
+ rnet = cfs_list_entry(n, lnet_remotenet_t, lrn_list);
r = rnet->lrn_routes.next;
while (r != &rnet->lrn_routes) {
- lnet_route_t *re = list_entry(r, lnet_route_t,
- lr_list);
+ lnet_route_t *re =
+ cfs_list_entry(r, lnet_route_t,
+ lr_list);
if (skip == 0) {
route = re;
break;
if (len > *lenp) { /* linux-supplied buffer is too small */
rc = -EINVAL;
} else if (len > 0) { /* wrote something */
- if (copy_to_user(buffer, tmpstr, len))
+ if (cfs_copy_to_user(buffer, tmpstr, len))
rc = -EFAULT;
else
*ppos += 1;
*ver_p = (unsigned int)the_lnet.ln_routers_version;
LNET_UNLOCK();
} else {
- struct list_head *r;
+ cfs_list_t *r;
lnet_peer_t *peer = NULL;
int skip = *ppos - 1;
r = the_lnet.ln_routers.next;
while (r != &the_lnet.ln_routers) {
- lnet_peer_t *lp = list_entry(r, lnet_peer_t,
- lp_rtr_list);
+ lnet_peer_t *lp = cfs_list_entry(r, lnet_peer_t,
+ lp_rtr_list);
if (skip == 0) {
peer = lp;
if (len > *lenp) { /* linux-supplied buffer is too small */
rc = -EINVAL;
} else if (len > 0) { /* wrote something */
- if (copy_to_user(buffer, tmpstr, len))
+ if (cfs_copy_to_user(buffer, tmpstr, len))
rc = -EFAULT;
else
*ppos += 1;
num++;
} else {
- struct list_head *p = NULL;
+ cfs_list_t *p = NULL;
lnet_peer_t *peer = NULL;
int skip = num - 1;
p = the_lnet.ln_peer_hash[idx].next;
while (p != &the_lnet.ln_peer_hash[idx]) {
- lnet_peer_t *lp = list_entry(p, lnet_peer_t,
- lp_hashlist);
+ lnet_peer_t *lp = cfs_list_entry(p, lnet_peer_t,
+ lp_hashlist);
if (skip == 0) {
peer = lp;
if (len > *lenp) { /* linux-supplied buffer is too small */
rc = -EINVAL;
} else if (len > 0) { /* wrote something */
- if (copy_to_user(buffer, tmpstr, len))
+ if (cfs_copy_to_user(buffer, tmpstr, len))
rc = -EFAULT;
else
*ppos = LNET_PHASH_POS_MAKE(idx, num);
if (pos >= min_t(int, len, strlen(tmpstr)))
rc = 0;
else
- rc = trace_copyout_string(buffer, nob,
- tmpstr + pos, NULL);
+ rc = cfs_trace_copyout_string(buffer, nob,
+ tmpstr + pos, NULL);
LIBCFS_FREE(tmpstr, tmpsiz);
return rc;
"rtr", "max", "tx", "min");
LASSERT (tmpstr + tmpsiz - s > 0);
} else {
- struct list_head *n;
+ cfs_list_t *n;
lnet_ni_t *ni = NULL;
int skip = *ppos - 1;
n = the_lnet.ln_nis.next;
while (n != &the_lnet.ln_nis) {
- lnet_ni_t *a_ni = list_entry(n, lnet_ni_t, ni_list);
+ lnet_ni_t *a_ni = cfs_list_entry(n, lnet_ni_t, ni_list);
if (skip == 0) {
ni = a_ni;
if (len > *lenp) { /* linux-supplied buffer is too small */
rc = -EINVAL;
} else if (len > 0) { /* wrote something */
- if (copy_to_user(buffer, tmpstr, len))
+ if (cfs_copy_to_user(buffer, tmpstr, len))
rc = -EFAULT;
else
*ppos += 1;
#ifndef __KERNEL__
gettimeofday(&tv, NULL);
#else
- do_gettimeofday(&tv);
+ cfs_gettimeofday(&tv);
#endif
if ((tv.tv_usec & 1) == 0) return 0;
CERROR ("BRW RPC to %s failed with %d\n",
libcfs_id2str(rpc->crpc_dest), rpc->crpc_status);
if (!tsi->tsi_stopping) /* rpc could have been aborted */
- atomic_inc(&sn->sn_brw_errors);
+ cfs_atomic_inc(&sn->sn_brw_errors);
goto out;
}
libcfs_id2str(rpc->crpc_dest), reply->brw_status);
if (reply->brw_status != 0) {
- atomic_inc(&sn->sn_brw_errors);
+ cfs_atomic_inc(&sn->sn_brw_errors);
rpc->crpc_status = -(int)reply->brw_status;
goto out;
}
if (brw_check_bulk(&rpc->crpc_bulk, reqst->brw_flags, magic) != 0) {
CERROR ("Bulk data from %s is corrupted!\n",
libcfs_id2str(rpc->crpc_dest));
- atomic_inc(&sn->sn_brw_errors);
+ cfs_atomic_inc(&sn->sn_brw_errors);
rpc->crpc_status = -EBADMSG;
}
#include "console.h"
int
-lst_session_new_ioctl(lstio_session_new_args_t *args)
+lst_session_new_ioctl(lstio_session_new_args_t *args)
{
char *name;
int rc;
args->lstio_ses_nmlen <= 0 ||
args->lstio_ses_nmlen > LST_NAME_SIZE)
return -EINVAL;
-
+
LIBCFS_ALLOC(name, args->lstio_ses_nmlen + 1);
if (name == NULL)
return -ENOMEM;
-
- if (copy_from_user(name,
- args->lstio_ses_namep,
- args->lstio_ses_nmlen)) {
+
+ if (cfs_copy_from_user(name,
+ args->lstio_ses_namep,
+ args->lstio_ses_nmlen)) {
LIBCFS_FREE(name, args->lstio_ses_nmlen + 1);
return -EFAULT;
}
-
+
name[args->lstio_ses_nmlen] = 0;
-
+
rc = lstcon_session_new(name,
args->lstio_ses_key,
args->lstio_ses_timeout,
args->lstio_ses_force,
args->lstio_ses_idp);
-
+
LIBCFS_FREE(name, args->lstio_ses_nmlen + 1);
return rc;
if (name == NULL)
return -ENOMEM;
- if (copy_from_user(name, args->lstio_dbg_namep,
- args->lstio_dbg_nmlen)) {
+ if (cfs_copy_from_user(name, args->lstio_dbg_namep,
+ args->lstio_dbg_nmlen)) {
LIBCFS_FREE(name, args->lstio_dbg_nmlen + 1);
return -EFAULT;
if (name == NULL)
return -ENOMEM;
- if (copy_from_user(name,
- args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
+ if (cfs_copy_from_user(name,
+ args->lstio_grp_namep,
+ args->lstio_grp_nmlen)) {
LIBCFS_FREE(name, args->lstio_grp_nmlen);
return -EFAULT;
}
if (args->lstio_grp_key != console_session.ses_key)
return -EACCES;
-
+
if (args->lstio_grp_namep == NULL ||
- args->lstio_grp_nmlen <= 0 ||
+ args->lstio_grp_nmlen <= 0 ||
args->lstio_grp_nmlen > LST_NAME_SIZE)
return -EINVAL;
if (name == NULL)
return -ENOMEM;
- if (copy_from_user(name,
- args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
+ if (cfs_copy_from_user(name,
+ args->lstio_grp_namep,
+ args->lstio_grp_nmlen)) {
LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
return -EFAULT;
}
if (name == NULL)
return -ENOMEM;
- if (copy_from_user(name,
+ if (cfs_copy_from_user(name,
args->lstio_grp_namep,
args->lstio_grp_nmlen)) {
LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
}
LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
-
+
return rc;
}
if (name == NULL)
return -ENOMEM;
- if (copy_from_user(name, args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
+ if (cfs_copy_from_user(name, args->lstio_grp_namep,
+ args->lstio_grp_nmlen)) {
LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
return -EFAULT;
if (args->lstio_grp_idxp == NULL || /* node index */
args->lstio_grp_ndentp == NULL) /* # of node entry */
return -EINVAL;
-
- if (copy_from_user(&ndent,
- args->lstio_grp_ndentp, sizeof(ndent)) ||
- copy_from_user(&index, args->lstio_grp_idxp, sizeof(index)))
+
+ if (cfs_copy_from_user(&ndent, args->lstio_grp_ndentp,
+ sizeof(ndent)) ||
+ cfs_copy_from_user(&index, args->lstio_grp_idxp,
+ sizeof(index)))
return -EFAULT;
if (ndent <= 0 || index < 0)
if (name == NULL)
return -ENOMEM;
- if (copy_from_user(name,
- args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
+ if (cfs_copy_from_user(name,
+ args->lstio_grp_namep,
+ args->lstio_grp_nmlen)) {
LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
return -EFAULT;
}
return rc;
if (args->lstio_grp_dentsp != NULL &&
- (copy_to_user(args->lstio_grp_idxp, &index, sizeof(index)) ||
- copy_to_user(args->lstio_grp_ndentp, &ndent, sizeof(ndent))))
+ (cfs_copy_to_user(args->lstio_grp_idxp, &index, sizeof(index)) ||
+ cfs_copy_to_user(args->lstio_grp_ndentp, &ndent, sizeof(ndent))))
rc = -EFAULT;
return 0;
if (name == NULL)
return -ENOMEM;
- if (copy_from_user(name,
- args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
+ if (cfs_copy_from_user(name,
+ args->lstio_bat_namep,
+ args->lstio_bat_nmlen)) {
LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
return -EFAULT;
}
if (name == NULL)
return -ENOMEM;
- if (copy_from_user(name,
- args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
+ if (cfs_copy_from_user(name,
+ args->lstio_bat_namep,
+ args->lstio_bat_nmlen)) {
LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
return -EFAULT;
}
if (name == NULL)
return -ENOMEM;
- if (copy_from_user(name,
- args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
+ if (cfs_copy_from_user(name,
+ args->lstio_bat_namep,
+ args->lstio_bat_nmlen)) {
LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
return -EFAULT;
}
if (name == NULL)
return -ENOMEM;
- if (copy_from_user(name,
- args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
+ if (cfs_copy_from_user(name,
+ args->lstio_bat_namep,
+ args->lstio_bat_nmlen)) {
LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
return -EFAULT;
}
if (args->lstio_bat_idxp == NULL || /* node index */
args->lstio_bat_ndentp == NULL) /* # of node entry */
return -EINVAL;
-
- if (copy_from_user(&index, args->lstio_bat_idxp, sizeof(index)) ||
- copy_from_user(&ndent, args->lstio_bat_ndentp, sizeof(ndent)))
+
+ if (cfs_copy_from_user(&index, args->lstio_bat_idxp,
+ sizeof(index)) ||
+ cfs_copy_from_user(&ndent, args->lstio_bat_ndentp,
+ sizeof(ndent)))
return -EFAULT;
if (ndent <= 0 || index < 0)
if (name == NULL)
return -ENOMEM;
- if (copy_from_user(name,
- args->lstio_bat_namep, args->lstio_bat_nmlen)) {
+ if (cfs_copy_from_user(name,
+ args->lstio_bat_namep, args->lstio_bat_nmlen)) {
LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
return -EFAULT;
}
return rc;
if (args->lstio_bat_dentsp != NULL &&
- (copy_to_user(args->lstio_bat_idxp, &index, sizeof(index)) ||
- copy_to_user(args->lstio_bat_ndentp, &ndent, sizeof(ndent))))
+ (cfs_copy_to_user(args->lstio_bat_idxp, &index, sizeof(index)) ||
+ cfs_copy_to_user(args->lstio_bat_ndentp, &ndent, sizeof(ndent))))
rc = -EFAULT;
return rc;
if (name == NULL)
return -ENOMEM;
- if (copy_from_user(name, args->lstio_sta_namep,
- args->lstio_sta_nmlen)) {
+ if (cfs_copy_from_user(name, args->lstio_sta_namep,
+ args->lstio_sta_nmlen)) {
LIBCFS_FREE(name, args->lstio_sta_nmlen + 1);
return -EFAULT;
}
}
rc = -EFAULT;
- if (copy_from_user(name,
- args->lstio_tes_bat_name,
- args->lstio_tes_bat_nmlen) ||
- copy_from_user(srcgrp,
- args->lstio_tes_sgrp_name,
- args->lstio_tes_sgrp_nmlen) ||
- copy_from_user(dstgrp,
- args->lstio_tes_dgrp_name,
- args->lstio_tes_dgrp_nmlen) ||
- copy_from_user(param, args->lstio_tes_param,
- args->lstio_tes_param_len))
+ if (cfs_copy_from_user(name,
+ args->lstio_tes_bat_name,
+ args->lstio_tes_bat_nmlen) ||
+ cfs_copy_from_user(srcgrp,
+ args->lstio_tes_sgrp_name,
+ args->lstio_tes_sgrp_nmlen) ||
+ cfs_copy_from_user(dstgrp,
+ args->lstio_tes_dgrp_name,
+ args->lstio_tes_dgrp_nmlen) ||
+ cfs_copy_from_user(param, args->lstio_tes_param,
+ args->lstio_tes_param_len))
goto out;
rc = lstcon_test_add(name,
&ret, args->lstio_tes_resultp);
if (ret != 0)
- rc = (copy_to_user(args->lstio_tes_retp, &ret, sizeof(ret))) ?
- -EFAULT : 0;
+ rc = (cfs_copy_to_user(args->lstio_tes_retp, &ret,
+ sizeof(ret))) ? -EFAULT : 0;
out:
if (name != NULL)
LIBCFS_FREE(name, args->lstio_tes_bat_nmlen + 1);
return -ENOMEM;
/* copy in parameter */
- if (copy_from_user(buf, data->ioc_pbuf1, data->ioc_plen1)) {
+ if (cfs_copy_from_user(buf, data->ioc_pbuf1, data->ioc_plen1)) {
LIBCFS_FREE(buf, data->ioc_plen1);
return -EFAULT;
}
- mutex_down(&console_session.ses_mutex);
+ cfs_mutex_down(&console_session.ses_mutex);
console_session.ses_laststamp = cfs_time_current_sec();
}
memset(&console_session.ses_trans_stat, 0, sizeof(lstcon_trans_stat_t));
-
+
switch (opc) {
case LSTIO_SESSION_NEW:
rc = lst_session_new_ioctl((lstio_session_new_args_t *)buf);
rc = -EINVAL;
}
- if (copy_to_user(data->ioc_pbuf2, &console_session.ses_trans_stat,
- sizeof(lstcon_trans_stat_t)))
+ if (cfs_copy_to_user(data->ioc_pbuf2, &console_session.ses_trans_stat,
+ sizeof(lstcon_trans_stat_t)))
rc = -EFAULT;
out:
- mutex_up(&console_session.ses_mutex);
+ cfs_mutex_up(&console_session.ses_mutex);
LIBCFS_FREE(buf, data->ioc_plen1);
LASSERT (crpc != NULL && rpc == crpc->crp_rpc);
LASSERT (crpc->crp_posted && !crpc->crp_finished);
- spin_lock(&rpc->crpc_lock);
+ cfs_spin_lock(&rpc->crpc_lock);
if (crpc->crp_trans == NULL) {
/* Orphan RPC is not in any transaction,
* I'm just a poor body and nobody loves me */
- spin_unlock(&rpc->crpc_lock);
+ cfs_spin_unlock(&rpc->crpc_lock);
/* release it */
lstcon_rpc_put(crpc);
}
/* wakeup (transaction)thread if I'm the last RPC in the transaction */
- if (atomic_dec_and_test(&crpc->crp_trans->tas_remaining))
+ if (cfs_atomic_dec_and_test(&crpc->crp_trans->tas_remaining))
cfs_waitq_signal(&crpc->crp_trans->tas_waitq);
- spin_unlock(&rpc->crpc_lock);
+ cfs_spin_unlock(&rpc->crpc_lock);
}
int
crpc->crp_static = !cached;
CFS_INIT_LIST_HEAD(&crpc->crp_link);
- atomic_inc(&console_session.ses_rpc_counter);
+ cfs_atomic_inc(&console_session.ses_rpc_counter);
return 0;
}
lstcon_rpc_t *crpc = NULL;
int rc;
- spin_lock(&console_session.ses_rpc_lock);
+ cfs_spin_lock(&console_session.ses_rpc_lock);
- if (!list_empty(&console_session.ses_rpc_freelist)) {
- crpc = list_entry(console_session.ses_rpc_freelist.next,
- lstcon_rpc_t, crp_link);
- list_del_init(&crpc->crp_link);
+ if (!cfs_list_empty(&console_session.ses_rpc_freelist)) {
+ crpc = cfs_list_entry(console_session.ses_rpc_freelist.next,
+ lstcon_rpc_t, crp_link);
+ cfs_list_del_init(&crpc->crp_link);
}
- spin_unlock(&console_session.ses_rpc_lock);
+ cfs_spin_unlock(&console_session.ses_rpc_lock);
if (crpc == NULL) {
LIBCFS_ALLOC(crpc, sizeof(*crpc));
srpc_bulk_t *bulk = &crpc->crp_rpc->crpc_bulk;
int i;
- LASSERT (list_empty(&crpc->crp_link));
+ LASSERT (cfs_list_empty(&crpc->crp_link));
for (i = 0; i < bulk->bk_niov; i++) {
if (bulk->bk_iovs[i].kiov_page == NULL)
crpc->crp_static = 1;
} else {
- spin_lock(&console_session.ses_rpc_lock);
+ cfs_spin_lock(&console_session.ses_rpc_lock);
- list_add(&crpc->crp_link, &console_session.ses_rpc_freelist);
+ cfs_list_add(&crpc->crp_link,
+ &console_session.ses_rpc_freelist);
- spin_unlock(&console_session.ses_rpc_lock);
+ cfs_spin_unlock(&console_session.ses_rpc_lock);
}
/* RPC is not alive now */
- atomic_dec(&console_session.ses_rpc_counter);
+ cfs_atomic_dec(&console_session.ses_rpc_counter);
}
void
LASSERT (trans != NULL);
- atomic_inc(&trans->tas_remaining);
+ cfs_atomic_inc(&trans->tas_remaining);
crpc->crp_posted = 1;
sfw_post_rpc(crpc->crp_rpc);
}
int
-lstcon_rpc_trans_prep(struct list_head *translist,
+lstcon_rpc_trans_prep(cfs_list_t *translist,
int transop, lstcon_rpc_trans_t **transpp)
{
lstcon_rpc_trans_t *trans;
if (translist == NULL)
CFS_INIT_LIST_HEAD(&trans->tas_olink);
else
- list_add_tail(&trans->tas_olink, translist);
+ cfs_list_add_tail(&trans->tas_olink, translist);
- list_add_tail(&trans->tas_link, &console_session.ses_trans_list);
+ cfs_list_add_tail(&trans->tas_link, &console_session.ses_trans_list);
CFS_INIT_LIST_HEAD(&trans->tas_rpcs_list);
- atomic_set(&trans->tas_remaining, 0);
+ cfs_atomic_set(&trans->tas_remaining, 0);
cfs_waitq_init(&trans->tas_waitq);
*transpp = trans;
void
lstcon_rpc_trans_addreq(lstcon_rpc_trans_t *trans, lstcon_rpc_t *crpc)
{
- list_add_tail(&crpc->crp_link, &trans->tas_rpcs_list);
+ cfs_list_add_tail(&crpc->crp_link, &trans->tas_rpcs_list);
crpc->crp_trans = trans;
}
lstcon_rpc_t, crp_link) {
rpc = crpc->crp_rpc;
- spin_lock(&rpc->crpc_lock);
+ cfs_spin_lock(&rpc->crpc_lock);
if (!crpc->crp_posted || crpc->crp_stamp != 0) {
/* rpc done or aborted already */
- spin_unlock(&rpc->crpc_lock);
+ cfs_spin_unlock(&rpc->crpc_lock);
continue;
}
crpc->crp_stamp = cfs_time_current();
crpc->crp_status = error;
- spin_unlock(&rpc->crpc_lock);
+ cfs_spin_unlock(&rpc->crpc_lock);
sfw_abort_rpc(rpc);
lstcon_rpc_trans_check(lstcon_rpc_trans_t *trans)
{
if (console_session.ses_shutdown &&
- !list_empty(&trans->tas_olink)) /* It's not an end session RPC */
+ !cfs_list_empty(&trans->tas_olink)) /* Not an end session RPC */
return 1;
- return (atomic_read(&trans->tas_remaining) == 0) ? 1: 0;
+ return (cfs_atomic_read(&trans->tas_remaining) == 0) ? 1: 0;
}
int
lstcon_rpc_t *crpc;
int rc;
- if (list_empty(&trans->tas_rpcs_list))
+ if (cfs_list_empty(&trans->tas_rpcs_list))
return 0;
if (timeout < LST_TRANS_MIN_TIMEOUT)
lstcon_rpc_post(crpc);
}
- mutex_up(&console_session.ses_mutex);
+ cfs_mutex_up(&console_session.ses_mutex);
cfs_waitq_wait_event_interruptible_timeout(trans->tas_waitq,
lstcon_rpc_trans_check(trans),
rc = (rc > 0)? 0: ((rc < 0)? -EINTR: -ETIMEDOUT);
- mutex_down(&console_session.ses_mutex);
+ cfs_mutex_down(&console_session.ses_mutex);
if (console_session.ses_shutdown)
rc = -ESHUTDOWN;
int
lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
- struct list_head *head_up,
+ cfs_list_t *head_up,
lstcon_rpc_readent_func_t readent)
{
- struct list_head tmp;
- struct list_head *next;
+ cfs_list_t tmp;
+ cfs_list_t *next;
lstcon_rpc_ent_t *ent;
srpc_generic_reply_t *rep;
srpc_client_rpc_t *rpc;
cfs_list_for_each_entry_typed(crpc, &trans->tas_rpcs_list,
lstcon_rpc_t, crp_link) {
- if (copy_from_user(&tmp, next, sizeof(struct list_head)))
+ if (cfs_copy_from_user(&tmp, next,
+ sizeof(cfs_list_t)))
return -EFAULT;
if (tmp.next == head_up)
next = tmp.next;
- ent = list_entry(next, lstcon_rpc_ent_t, rpe_link);
+ ent = cfs_list_entry(next, lstcon_rpc_ent_t, rpe_link);
rpc = crpc->crp_rpc;
(cfs_time_t)console_session.ses_id.ses_stamp);
cfs_duration_usec(dur, &tv);
- if (copy_to_user(&ent->rpe_peer,
- &nd->nd_id, sizeof(lnet_process_id_t)) ||
- copy_to_user(&ent->rpe_stamp, &tv, sizeof(tv)) ||
- copy_to_user(&ent->rpe_state,
- &nd->nd_state, sizeof(nd->nd_state)) ||
- copy_to_user(&ent->rpe_rpc_errno, &error, sizeof(error)))
+ if (cfs_copy_to_user(&ent->rpe_peer,
+ &nd->nd_id, sizeof(lnet_process_id_t)) ||
+ cfs_copy_to_user(&ent->rpe_stamp, &tv, sizeof(tv)) ||
+ cfs_copy_to_user(&ent->rpe_state,
+ &nd->nd_state, sizeof(nd->nd_state)) ||
+ cfs_copy_to_user(&ent->rpe_rpc_errno, &error,
+ sizeof(error)))
return -EFAULT;
if (error != 0)
/* RPC is done */
rep = (srpc_generic_reply_t *)&msg->msg_body.reply;
- if (copy_to_user(&ent->rpe_sid,
- &rep->sid, sizeof(lst_sid_t)) ||
- copy_to_user(&ent->rpe_fwk_errno,
- &rep->status, sizeof(rep->status)))
+ if (cfs_copy_to_user(&ent->rpe_sid,
+ &rep->sid, sizeof(lst_sid_t)) ||
+ cfs_copy_to_user(&ent->rpe_fwk_errno,
+ &rep->status, sizeof(rep->status)))
return -EFAULT;
if (readent == NULL)
lstcon_rpc_t *crpc;
lstcon_rpc_t *tmp;
int count = 0;
-
+
cfs_list_for_each_entry_safe_typed(crpc, tmp,
&trans->tas_rpcs_list,
lstcon_rpc_t, crp_link) {
rpc = crpc->crp_rpc;
- spin_lock(&rpc->crpc_lock);
+ cfs_spin_lock(&rpc->crpc_lock);
/* free it if not posted or finished already */
if (!crpc->crp_posted || crpc->crp_finished) {
- spin_unlock(&rpc->crpc_lock);
+ cfs_spin_unlock(&rpc->crpc_lock);
- list_del_init(&crpc->crp_link);
+ cfs_list_del_init(&crpc->crp_link);
lstcon_rpc_put(crpc);
continue;
/* rpcs can be still not callbacked (even LNetMDUnlink is called)
* because huge timeout for inaccessible network, don't make
- * user wait for them, just abandon them, they will be recycled
+ * user wait for them, just abandon them, they will be recycled
* in callback */
LASSERT (crpc->crp_status != 0);
crpc->crp_node = NULL;
crpc->crp_trans = NULL;
- list_del_init(&crpc->crp_link);
+ cfs_list_del_init(&crpc->crp_link);
count ++;
- spin_unlock(&rpc->crpc_lock);
+ cfs_spin_unlock(&rpc->crpc_lock);
- atomic_dec(&trans->tas_remaining);
+ cfs_atomic_dec(&trans->tas_remaining);
}
- LASSERT (atomic_read(&trans->tas_remaining) == 0);
+ LASSERT (cfs_atomic_read(&trans->tas_remaining) == 0);
- list_del(&trans->tas_link);
- if (!list_empty(&trans->tas_olink))
- list_del(&trans->tas_olink);
+ cfs_list_del(&trans->tas_link);
+ if (!cfs_list_empty(&trans->tas_olink))
+ cfs_list_del(&trans->tas_olink);
CDEBUG(D_NET, "Transaction %s destroyed with %d pending RPCs\n",
lstcon_rpc_trans_name(trans->tas_opc), count);
}
int
-lstcon_rpc_trans_ndlist(struct list_head *ndlist,
- struct list_head *translist, int transop,
+lstcon_rpc_trans_ndlist(cfs_list_t *ndlist,
+ cfs_list_t *translist, int transop,
void *arg, lstcon_rpc_cond_func_t condition,
lstcon_rpc_trans_t **transpp)
{
/* RPC pinger is a special case of transaction,
* it's called by timer at 8 seconds interval.
*/
- mutex_down(&console_session.ses_mutex);
+ cfs_mutex_down(&console_session.ses_mutex);
if (console_session.ses_shutdown || console_session.ses_expired) {
- mutex_up(&console_session.ses_mutex);
+ cfs_mutex_up(&console_session.ses_mutex);
return;
}
if (crpc->crp_rpc != NULL) {
LASSERT (crpc->crp_trans == trans);
- LASSERT (!list_empty(&crpc->crp_link));
+ LASSERT (!cfs_list_empty(&crpc->crp_link));
- spin_lock(&crpc->crp_rpc->crpc_lock);
+ cfs_spin_lock(&crpc->crp_rpc->crpc_lock);
LASSERT (crpc->crp_posted);
if (!crpc->crp_finished) {
/* in flight */
- spin_unlock(&crpc->crp_rpc->crpc_lock);
+ cfs_spin_unlock(&crpc->crp_rpc->crpc_lock);
continue;
}
- spin_unlock(&crpc->crp_rpc->crpc_lock);
+ cfs_spin_unlock(&crpc->crp_rpc->crpc_lock);
lstcon_rpc_get_reply(crpc, &rep);
- list_del_init(&crpc->crp_link);
+ cfs_list_del_init(&crpc->crp_link);
lstcon_rpc_put(crpc);
}
}
if (console_session.ses_expired) {
- mutex_up(&console_session.ses_mutex);
+ cfs_mutex_up(&console_session.ses_mutex);
return;
}
ptimer->stt_expires = (cfs_time_t)(cfs_time_current_sec() + LST_PING_INTERVAL);
stt_add_timer(ptimer);
- mutex_up(&console_session.ses_mutex);
+ cfs_mutex_up(&console_session.ses_mutex);
}
int
stt_timer_t *ptimer;
int rc;
- LASSERT (list_empty(&console_session.ses_rpc_freelist));
- LASSERT (atomic_read(&console_session.ses_rpc_counter) == 0);
+ LASSERT (cfs_list_empty(&console_session.ses_rpc_freelist));
+ LASSERT (cfs_atomic_read(&console_session.ses_rpc_counter) == 0);
rc = lstcon_rpc_trans_prep(NULL, LST_TRANS_SESPING,
&console_session.ses_ping);
{
lstcon_rpc_trans_t *trans;
lstcon_rpc_t *crpc;
- struct list_head *pacer;
- struct list_head zlist;
+ cfs_list_t *pacer;
+ cfs_list_t zlist;
/* Called with hold of global mutex */
LASSERT (console_session.ses_shutdown);
- while (!list_empty(&console_session.ses_trans_list)) {
- list_for_each(pacer, &console_session.ses_trans_list) {
- trans = list_entry(pacer, lstcon_rpc_trans_t, tas_link);
+ while (!cfs_list_empty(&console_session.ses_trans_list)) {
+ cfs_list_for_each(pacer, &console_session.ses_trans_list) {
+ trans = cfs_list_entry(pacer, lstcon_rpc_trans_t,
+ tas_link);
CDEBUG(D_NET, "Session closed, wakeup transaction %s\n",
lstcon_rpc_trans_name(trans->tas_opc));
cfs_waitq_signal(&trans->tas_waitq);
}
- mutex_up(&console_session.ses_mutex);
+ cfs_mutex_up(&console_session.ses_mutex);
CWARN("Session is shutting down, "
"waiting for termination of transactions\n");
cfs_pause(cfs_time_seconds(1));
- mutex_down(&console_session.ses_mutex);
+ cfs_mutex_down(&console_session.ses_mutex);
}
- spin_lock(&console_session.ses_rpc_lock);
+ cfs_spin_lock(&console_session.ses_rpc_lock);
- lst_wait_until((atomic_read(&console_session.ses_rpc_counter) == 0),
+ lst_wait_until((cfs_atomic_read(&console_session.ses_rpc_counter) == 0),
console_session.ses_rpc_lock,
"Network is not accessable or target is down, "
"waiting for %d console RPCs to being recycled\n",
- atomic_read(&console_session.ses_rpc_counter));
+ cfs_atomic_read(&console_session.ses_rpc_counter));
- list_add(&zlist, &console_session.ses_rpc_freelist);
- list_del_init(&console_session.ses_rpc_freelist);
+ cfs_list_add(&zlist, &console_session.ses_rpc_freelist);
+ cfs_list_del_init(&console_session.ses_rpc_freelist);
- spin_unlock(&console_session.ses_rpc_lock);
+ cfs_spin_unlock(&console_session.ses_rpc_lock);
- while (!list_empty(&zlist)) {
- crpc = list_entry(zlist.next, lstcon_rpc_t, crp_link);
+ while (!cfs_list_empty(&zlist)) {
+ crpc = cfs_list_entry(zlist.next, lstcon_rpc_t, crp_link);
- list_del(&crpc->crp_link);
+ cfs_list_del(&crpc->crp_link);
LIBCFS_FREE(crpc, sizeof(lstcon_rpc_t));
}
}
console_session.ses_ping = NULL;
- spin_lock_init(&console_session.ses_rpc_lock);
- atomic_set(&console_session.ses_rpc_counter, 0);
+ cfs_spin_lock_init(&console_session.ses_rpc_lock);
+ cfs_atomic_set(&console_session.ses_rpc_counter, 0);
CFS_INIT_LIST_HEAD(&console_session.ses_rpc_freelist);
return 0;
void
lstcon_rpc_module_fini(void)
{
- LASSERT (list_empty(&console_session.ses_rpc_freelist));
- LASSERT (atomic_read(&console_session.ses_rpc_counter) == 0);
+ LASSERT (cfs_list_empty(&console_session.ses_rpc_freelist));
+ LASSERT (cfs_atomic_read(&console_session.ses_rpc_counter) == 0);
}
#endif
struct lstcon_node;
typedef struct lstcon_rpc {
- struct list_head crp_link; /* chain on rpc transaction */
- srpc_client_rpc_t *crp_rpc; /* client rpc */
- struct lstcon_node *crp_node; /* destination node */
+ cfs_list_t crp_link; /* chain on rpc transaction */
+ srpc_client_rpc_t *crp_rpc; /* client rpc */
+ struct lstcon_node *crp_node; /* destination node */
struct lstcon_rpc_trans *crp_trans; /* conrpc transaction */
- int crp_posted:1; /* rpc is posted */
- int crp_finished:1; /* rpc is finished */
- int crp_unpacked:1; /* reply is unpacked */
- int crp_static:1; /* not from RPC buffer */
- int crp_status; /* console rpc errors */
- cfs_time_t crp_stamp; /* replied time stamp */
+ int crp_posted:1; /* rpc is posted */
+ int crp_finished:1; /* rpc is finished */
+ int crp_unpacked:1; /* reply is unpacked */
+ int crp_static:1; /* not from RPC buffer */
+ int crp_status; /* console rpc errors */
+ cfs_time_t crp_stamp; /* replied time stamp */
} lstcon_rpc_t;
typedef struct lstcon_rpc_trans {
- struct list_head tas_olink; /* link chain on owner list */
- struct list_head tas_link; /* link chain on global list */
- int tas_opc; /* operation code of transaction */
- cfs_waitq_t tas_waitq; /* wait queue head */
- atomic_t tas_remaining; /* # of un-scheduled rpcs */
- struct list_head tas_rpcs_list; /* queued requests */
+ cfs_list_t tas_olink; /* link chain on owner list */
+ cfs_list_t tas_link; /* link chain on global list */
+ int tas_opc; /* operation code of transaction */
+ cfs_waitq_t tas_waitq; /* wait queue head */
+ cfs_atomic_t tas_remaining; /* # of un-scheduled rpcs */
+ cfs_list_t tas_rpcs_list; /* queued requests */
} lstcon_rpc_trans_t;
#define LST_TRANS_PRIVATE 0x1000
struct lstcon_test *test, lstcon_rpc_t **crpc);
int lstcon_statrpc_prep(struct lstcon_node *nd, lstcon_rpc_t **crpc);
void lstcon_rpc_put(lstcon_rpc_t *crpc);
-int lstcon_rpc_trans_prep(struct list_head *translist,
+int lstcon_rpc_trans_prep(cfs_list_t *translist,
int transop, lstcon_rpc_trans_t **transpp);
-int lstcon_rpc_trans_ndlist(struct list_head *ndlist,
- struct list_head *translist, int transop,
+int lstcon_rpc_trans_ndlist(cfs_list_t *ndlist,
+ cfs_list_t *translist, int transop,
void *arg, lstcon_rpc_cond_func_t condition,
lstcon_rpc_trans_t **transpp);
void lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans,
lstcon_trans_stat_t *stat);
int lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
- struct list_head *head_up,
+ cfs_list_t *head_up,
lstcon_rpc_readent_func_t readent);
void lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error);
void lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans);
/* queued in global hash & list, no refcount is taken by
* global hash & list, if caller release his refcount,
* node will be released */
- list_add_tail(&ndl->ndl_hlink, &console_session.ses_ndl_hash[idx]);
- list_add_tail(&ndl->ndl_link, &console_session.ses_ndl_list);
+ cfs_list_add_tail(&ndl->ndl_hlink, &console_session.ses_ndl_hash[idx]);
+ cfs_list_add_tail(&ndl->ndl_link, &console_session.ses_ndl_list);
return 0;
}
ndl = (lstcon_ndlink_t *)(nd + 1);
- LASSERT (!list_empty(&ndl->ndl_link));
- LASSERT (!list_empty(&ndl->ndl_hlink));
+ LASSERT (!cfs_list_empty(&ndl->ndl_link));
+ LASSERT (!cfs_list_empty(&ndl->ndl_hlink));
/* remove from session */
- list_del(&ndl->ndl_link);
- list_del(&ndl->ndl_hlink);
+ cfs_list_del(&ndl->ndl_link);
+ cfs_list_del(&ndl->ndl_hlink);
LIBCFS_FREE(nd, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t));
}
static int
-lstcon_ndlink_find(struct list_head *hash,
+lstcon_ndlink_find(cfs_list_t *hash,
lnet_process_id_t id, lstcon_ndlink_t **ndlpp, int create)
{
unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
ndl->ndl_node = nd;
CFS_INIT_LIST_HEAD(&ndl->ndl_link);
- list_add_tail(&ndl->ndl_hlink, &hash[idx]);
+ cfs_list_add_tail(&ndl->ndl_hlink, &hash[idx]);
return 0;
}
static void
lstcon_ndlink_release(lstcon_ndlink_t *ndl)
{
- LASSERT (list_empty(&ndl->ndl_link));
- LASSERT (!list_empty(&ndl->ndl_hlink));
+ LASSERT (cfs_list_empty(&ndl->ndl_link));
+ LASSERT (!cfs_list_empty(&ndl->ndl_hlink));
- list_del(&ndl->ndl_hlink); /* delete from hash */
+ cfs_list_del(&ndl->ndl_hlink); /* delete from hash */
lstcon_node_put(ndl->ndl_node);
LIBCFS_FREE(ndl, sizeof(*ndl));
if (--grp->grp_ref > 0)
return;
- if (!list_empty(&grp->grp_link))
- list_del(&grp->grp_link);
+ if (!cfs_list_empty(&grp->grp_link))
+ cfs_list_del(&grp->grp_link);
lstcon_group_drain(grp, 0);
for (i = 0; i < LST_NODE_HASHSIZE; i++) {
- LASSERT (list_empty(&grp->grp_ndl_hash[i]));
+ LASSERT (cfs_list_empty(&grp->grp_ndl_hash[i]));
}
LIBCFS_FREE(grp, offsetof(lstcon_group_t,
if (rc != 0)
return rc;
- if (!list_empty(&(*ndlpp)->ndl_link))
+ if (!cfs_list_empty(&(*ndlpp)->ndl_link))
return 0;
- list_add_tail(&(*ndlpp)->ndl_link, &grp->grp_ndl_list);
+ cfs_list_add_tail(&(*ndlpp)->ndl_link, &grp->grp_ndl_list);
grp->grp_nnode ++;
return 0;
static void
lstcon_group_ndlink_release(lstcon_group_t *grp, lstcon_ndlink_t *ndl)
{
- list_del_init(&ndl->ndl_link);
+ cfs_list_del_init(&ndl->ndl_link);
lstcon_ndlink_release(ndl);
grp->grp_nnode --;
}
unsigned int idx = LNET_NIDADDR(ndl->ndl_node->nd_id.nid) %
LST_NODE_HASHSIZE;
- list_del(&ndl->ndl_hlink);
- list_del(&ndl->ndl_link);
+ cfs_list_del(&ndl->ndl_hlink);
+ cfs_list_del(&ndl->ndl_link);
old->grp_nnode --;
- list_add_tail(&ndl->ndl_hlink, &new->grp_ndl_hash[idx]);
- list_add_tail(&ndl->ndl_link, &new->grp_ndl_list);
+ cfs_list_add_tail(&ndl->ndl_hlink, &new->grp_ndl_hash[idx]);
+ cfs_list_add_tail(&ndl->ndl_link, &new->grp_ndl_list);
new->grp_nnode ++;
return;
{
lstcon_ndlink_t *ndl;
- while (!list_empty(&old->grp_ndl_list)) {
- ndl = list_entry(old->grp_ndl_list.next,
- lstcon_ndlink_t, ndl_link);
+ while (!cfs_list_empty(&old->grp_ndl_list)) {
+ ndl = cfs_list_entry(old->grp_ndl_list.next,
+ lstcon_ndlink_t, ndl_link);
lstcon_group_ndlink_move(old, new, ndl);
}
}
case LST_TRANS_SESQRY:
rep = &msg->msg_body.dbg_reply;
- if (copy_to_user(&ent_up->rpe_priv[0],
- &rep->dbg_timeout, sizeof(int)) ||
- copy_to_user(&ent_up->rpe_payload[0],
- &rep->dbg_name, LST_NAME_SIZE))
+ if (cfs_copy_to_user(&ent_up->rpe_priv[0],
+ &rep->dbg_timeout, sizeof(int)) ||
+ cfs_copy_to_user(&ent_up->rpe_payload[0],
+ &rep->dbg_name, LST_NAME_SIZE))
return -EFAULT;
return 0;
static int
lstcon_group_nodes_add(lstcon_group_t *grp, int count,
- lnet_process_id_t *ids_up, struct list_head *result_up)
+ lnet_process_id_t *ids_up,
+ cfs_list_t *result_up)
{
lstcon_rpc_trans_t *trans;
lstcon_ndlink_t *ndl;
}
for (i = 0 ; i < count; i++) {
- if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
+ if (cfs_copy_from_user(&id, &ids_up[i], sizeof(id))) {
rc = -EFAULT;
break;
}
/* post all RPCs */
lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
-
+
rc = lstcon_rpc_trans_interpreter(trans, result_up,
lstcon_sesrpc_readent);
/* destroy all RPGs */
static int
lstcon_group_nodes_remove(lstcon_group_t *grp,
int count, lnet_process_id_t *ids_up,
- struct list_head *result_up)
+ cfs_list_t *result_up)
{
lstcon_rpc_trans_t *trans;
lstcon_ndlink_t *ndl;
}
for (i = 0; i < count; i++) {
- if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
+ if (cfs_copy_from_user(&id, &ids_up[i], sizeof(id))) {
rc = -EFAULT;
goto error;
}
return -ENOMEM;
}
- list_add_tail(&grp->grp_link, &console_session.ses_grp_list);
+ cfs_list_add_tail(&grp->grp_link, &console_session.ses_grp_list);
return rc;
}
int
lstcon_nodes_add(char *name, int count,
- lnet_process_id_t *ids_up, struct list_head *result_up)
+ lnet_process_id_t *ids_up, cfs_list_t *result_up)
{
lstcon_group_t *grp;
int rc;
lstcon_group_put(grp);
/* release empty group */
- if (list_empty(&grp->grp_ndl_list))
+ if (cfs_list_empty(&grp->grp_ndl_list))
lstcon_group_put(grp);
return 0;
int
lstcon_nodes_remove(char *name, int count,
- lnet_process_id_t *ids_up, struct list_head *result_up)
+ lnet_process_id_t *ids_up, cfs_list_t *result_up)
{
lstcon_group_t *grp = NULL;
int rc;
lstcon_group_put(grp);
/* release empty group */
- if (list_empty(&grp->grp_ndl_list))
+ if (cfs_list_empty(&grp->grp_ndl_list))
lstcon_group_put(grp);
return rc;
}
int
-lstcon_group_refresh(char *name, struct list_head *result_up)
+lstcon_group_refresh(char *name, cfs_list_t *result_up)
{
lstcon_rpc_trans_t *trans;
lstcon_group_t *grp;
cfs_list_for_each_entry_typed(grp, &console_session.ses_grp_list,
lstcon_group_t, grp_link) {
if (index-- == 0) {
- return copy_to_user(name_up, grp->grp_name, len) ?
+ return cfs_copy_to_user(name_up, grp->grp_name, len) ?
-EFAULT : 0;
}
}
}
static int
-lstcon_nodes_getent(struct list_head *head, int *index_p,
+lstcon_nodes_getent(cfs_list_t *head, int *index_p,
int *count_p, lstcon_node_ent_t *dents_up)
{
lstcon_ndlink_t *ndl;
break;
nd = ndl->ndl_node;
- if (copy_to_user(&dents_up[count].nde_id,
- &nd->nd_id, sizeof(nd->nd_id)) ||
- copy_to_user(&dents_up[count].nde_state,
- &nd->nd_state, sizeof(nd->nd_state)))
+ if (cfs_copy_to_user(&dents_up[count].nde_id,
+ &nd->nd_id, sizeof(nd->nd_id)) ||
+ cfs_copy_to_user(&dents_up[count].nde_state,
+ &nd->nd_state, sizeof(nd->nd_state)))
return -EFAULT;
count ++;
lstcon_ndlink_t, ndl_link)
LST_NODE_STATE_COUNTER(ndl->ndl_node, gentp);
- rc = copy_to_user(gents_p, gentp,
- sizeof(lstcon_ndlist_ent_t)) ? -EFAULT: 0;
+ rc = cfs_copy_to_user(gents_p, gentp,
+ sizeof(lstcon_ndlist_ent_t)) ? -EFAULT: 0;
LIBCFS_FREE(gentp, sizeof(lstcon_ndlist_ent_t));
-
+
lstcon_group_put(grp);
return 0;
}
LIBCFS_ALLOC(bat->bat_cli_hash,
- sizeof(struct list_head) * LST_NODE_HASHSIZE);
+ sizeof(cfs_list_t) * LST_NODE_HASHSIZE);
if (bat->bat_cli_hash == NULL) {
CERROR("Can't allocate hash for batch %s\n", name);
LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
}
LIBCFS_ALLOC(bat->bat_srv_hash,
- sizeof(struct list_head) * LST_NODE_HASHSIZE);
+ sizeof(cfs_list_t) * LST_NODE_HASHSIZE);
if (bat->bat_srv_hash == NULL) {
CERROR("Can't allocate hash for batch %s\n", name);
LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE);
CFS_INIT_LIST_HEAD(&bat->bat_srv_hash[i]);
}
- list_add_tail(&bat->bat_link, &console_session.ses_bat_list);
+ cfs_list_add_tail(&bat->bat_link, &console_session.ses_bat_list);
return rc;
}
cfs_list_for_each_entry_typed(bat, &console_session.ses_bat_list,
lstcon_batch_t, bat_link) {
if (index-- == 0) {
- return copy_to_user(name_up,bat->bat_name, len) ?
+ return cfs_copy_to_user(name_up,bat->bat_name, len) ?
-EFAULT: 0;
}
}
lstcon_node_ent_t *dents_up)
{
lstcon_test_batch_ent_t *entp;
- struct list_head *clilst;
- struct list_head *srvlst;
+ cfs_list_t *clilst;
+ cfs_list_t *srvlst;
lstcon_test_t *test = NULL;
lstcon_batch_t *bat;
lstcon_ndlink_t *ndl;
cfs_list_for_each_entry_typed(ndl, srvlst, lstcon_ndlink_t, ndl_link)
LST_NODE_STATE_COUNTER(ndl->ndl_node, &entp->tbe_srv_nle);
- rc = copy_to_user(ent_up, entp,
- sizeof(lstcon_test_batch_ent_t)) ? -EFAULT : 0;
+ rc = cfs_copy_to_user(ent_up, entp,
+ sizeof(lstcon_test_batch_ent_t)) ? -EFAULT : 0;
LIBCFS_FREE(entp, sizeof(lstcon_test_batch_ent_t));
}
static int
-lstcon_batch_op(lstcon_batch_t *bat, int transop, struct list_head *result_up)
+lstcon_batch_op(lstcon_batch_t *bat, int transop,
+ cfs_list_t *result_up)
{
lstcon_rpc_trans_t *trans;
int rc;
}
int
-lstcon_batch_run(char *name, int timeout, struct list_head *result_up)
+lstcon_batch_run(char *name, int timeout, cfs_list_t *result_up)
{
lstcon_batch_t *bat;
int rc;
}
int
-lstcon_batch_stop(char *name, int force, struct list_head *result_up)
+lstcon_batch_stop(char *name, int force, cfs_list_t *result_up)
{
lstcon_batch_t *bat;
int rc;
lstcon_test_t *test;
int i;
- list_del(&bat->bat_link);
+ cfs_list_del(&bat->bat_link);
- while (!list_empty(&bat->bat_test_list)) {
- test = list_entry(bat->bat_test_list.next,
- lstcon_test_t, tes_link);
- LASSERT (list_empty(&test->tes_trans_list));
+ while (!cfs_list_empty(&bat->bat_test_list)) {
+ test = cfs_list_entry(bat->bat_test_list.next,
+ lstcon_test_t, tes_link);
+ LASSERT (cfs_list_empty(&test->tes_trans_list));
- list_del(&test->tes_link);
+ cfs_list_del(&test->tes_link);
lstcon_group_put(test->tes_src_grp);
lstcon_group_put(test->tes_dst_grp);
tes_param[test->tes_paramlen]));
}
- LASSERT (list_empty(&bat->bat_trans_list));
+ LASSERT (cfs_list_empty(&bat->bat_trans_list));
- while (!list_empty(&bat->bat_cli_list)) {
- ndl = list_entry(bat->bat_cli_list.next,
- lstcon_ndlink_t, ndl_link);
- list_del_init(&ndl->ndl_link);
+ while (!cfs_list_empty(&bat->bat_cli_list)) {
+ ndl = cfs_list_entry(bat->bat_cli_list.next,
+ lstcon_ndlink_t, ndl_link);
+ cfs_list_del_init(&ndl->ndl_link);
lstcon_ndlink_release(ndl);
}
- while (!list_empty(&bat->bat_srv_list)) {
- ndl = list_entry(bat->bat_srv_list.next,
- lstcon_ndlink_t, ndl_link);
- list_del_init(&ndl->ndl_link);
+ while (!cfs_list_empty(&bat->bat_srv_list)) {
+ ndl = cfs_list_entry(bat->bat_srv_list.next,
+ lstcon_ndlink_t, ndl_link);
+ cfs_list_del_init(&ndl->ndl_link);
lstcon_ndlink_release(ndl);
}
for (i = 0; i < LST_NODE_HASHSIZE; i++) {
- LASSERT (list_empty(&bat->bat_cli_hash[i]));
- LASSERT (list_empty(&bat->bat_srv_hash[i]));
+ LASSERT (cfs_list_empty(&bat->bat_cli_hash[i]));
+ LASSERT (cfs_list_empty(&bat->bat_srv_hash[i]));
}
LIBCFS_FREE(bat->bat_cli_hash,
- sizeof(struct list_head) * LST_NODE_HASHSIZE);
+ sizeof(cfs_list_t) * LST_NODE_HASHSIZE);
LIBCFS_FREE(bat->bat_srv_hash,
- sizeof(struct list_head) * LST_NODE_HASHSIZE);
+ sizeof(cfs_list_t) * LST_NODE_HASHSIZE);
LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
}
lstcon_test_t *test;
lstcon_batch_t *batch;
lstcon_ndlink_t *ndl;
- struct list_head *hash;
- struct list_head *head;
+ cfs_list_t *hash;
+ cfs_list_t *head;
test = (lstcon_test_t *)arg;
LASSERT (test != NULL);
if (lstcon_ndlink_find(hash, nd->nd_id, &ndl, 1) != 0)
return -ENOMEM;
- if (list_empty(&ndl->ndl_link))
- list_add_tail(&ndl->ndl_link, head);
+ if (cfs_list_empty(&ndl->ndl_link))
+ cfs_list_add_tail(&ndl->ndl_link, head);
return 1;
}
static int
-lstcon_test_nodes_add(lstcon_test_t *test, struct list_head *result_up)
+lstcon_test_nodes_add(lstcon_test_t *test, cfs_list_t *result_up)
{
lstcon_rpc_trans_t *trans;
lstcon_group_t *grp;
int
lstcon_test_add(char *name, int type, int loop, int concur,
int dist, int span, char *src_name, char * dst_name,
- void *param, int paramlen, int *retp, struct list_head *result_up)
-
+ void *param, int paramlen, int *retp,
+ cfs_list_t *result_up)
{
lstcon_group_t *src_grp = NULL;
lstcon_group_t *dst_grp = NULL;
CDEBUG(D_NET, "Failed to add test %d to batch %s\n", type, name);
/* add to test list anyway, so user can check what's going on */
- list_add_tail(&test->tes_link, &batch->bat_test_list);
+ cfs_list_add_tail(&test->tes_link, &batch->bat_test_list);
batch->bat_ntest ++;
test->tes_hdr.tsb_index = batch->bat_ntest;
transop == LST_TRANS_TSBSRVQRY);
/* positive errno, framework error code */
- if (copy_to_user(&ent_up->rpe_priv[0],
- &rep->bar_active, sizeof(rep->bar_active)))
+ if (cfs_copy_to_user(&ent_up->rpe_priv[0],
+ &rep->bar_active, sizeof(rep->bar_active)))
return -EFAULT;
return 0;
int
lstcon_test_batch_query(char *name, int testidx, int client,
- int timeout, struct list_head *result_up)
+ int timeout, cfs_list_t *result_up)
{
lstcon_rpc_trans_t *trans;
- struct list_head *translist;
- struct list_head *ndlist;
+ cfs_list_t *translist;
+ cfs_list_t *ndlist;
lstcon_tsb_hdr_t *hdr;
lstcon_batch_t *batch;
lstcon_test_t *test = NULL;
srpc_stat = (srpc_counters_t *)((char *)sfwk_stat + sizeof(*sfwk_stat));
lnet_stat = (lnet_counters_t *)((char *)srpc_stat + sizeof(*srpc_stat));
- if (copy_to_user(sfwk_stat, &rep->str_fw, sizeof(*sfwk_stat)) ||
- copy_to_user(srpc_stat, &rep->str_rpc, sizeof(*srpc_stat)) ||
- copy_to_user(lnet_stat, &rep->str_lnet, sizeof(*lnet_stat)))
+ if (cfs_copy_to_user(sfwk_stat, &rep->str_fw, sizeof(*sfwk_stat)) ||
+ cfs_copy_to_user(srpc_stat, &rep->str_rpc, sizeof(*srpc_stat)) ||
+ cfs_copy_to_user(lnet_stat, &rep->str_lnet, sizeof(*lnet_stat)))
return -EFAULT;
return 0;
}
int
-lstcon_ndlist_stat(struct list_head *ndlist,
- int timeout, struct list_head *result_up)
+lstcon_ndlist_stat(cfs_list_t *ndlist,
+ int timeout, cfs_list_t *result_up)
{
- struct list_head head;
+ cfs_list_t head;
lstcon_rpc_trans_t *trans;
int rc;
}
int
-lstcon_group_stat(char *grp_name, int timeout, struct list_head *result_up)
+lstcon_group_stat(char *grp_name, int timeout, cfs_list_t *result_up)
{
lstcon_group_t *grp;
int rc;
int
lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
- int timeout, struct list_head *result_up)
+ int timeout, cfs_list_t *result_up)
{
lstcon_ndlink_t *ndl;
lstcon_group_t *tmp;
}
for (i = 0 ; i < count; i++) {
- if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
+ if (cfs_copy_from_user(&id, &ids_up[i], sizeof(id))) {
rc = -EFAULT;
break;
}
}
int
-lstcon_debug_ndlist(struct list_head *ndlist,
- struct list_head *translist,
- int timeout, struct list_head *result_up)
+lstcon_debug_ndlist(cfs_list_t *ndlist,
+ cfs_list_t *translist,
+ int timeout, cfs_list_t *result_up)
{
lstcon_rpc_trans_t *trans;
int rc;
}
int
-lstcon_session_debug(int timeout, struct list_head *result_up)
+lstcon_session_debug(int timeout, cfs_list_t *result_up)
{
return lstcon_debug_ndlist(&console_session.ses_ndl_list,
NULL, timeout, result_up);
int
lstcon_batch_debug(int timeout, char *name,
- int client, struct list_head *result_up)
+ int client, cfs_list_t *result_up)
{
lstcon_batch_t *bat;
int rc;
int
lstcon_group_debug(int timeout, char *name,
- struct list_head *result_up)
+ cfs_list_t *result_up)
{
lstcon_group_t *grp;
int rc;
int
lstcon_nodes_debug(int timeout,
int count, lnet_process_id_t *ids_up,
- struct list_head *result_up)
+ cfs_list_t *result_up)
{
lnet_process_id_t id;
lstcon_ndlink_t *ndl;
}
for (i = 0; i < count; i++) {
- if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
+ if (cfs_copy_from_user(&id, &ids_up[i], sizeof(id))) {
rc = -EFAULT;
break;
}
}
for (i = 0; i < LST_GLOBAL_HASHSIZE; i++) {
- LASSERT (list_empty(&console_session.ses_ndl_hash[i]));
+ LASSERT (cfs_list_empty(&console_session.ses_ndl_hash[i]));
}
rc = lstcon_batch_add(LST_DEFAULT_BATCH);
timeout;
strcpy(console_session.ses_name, name);
- if (copy_to_user(sid_up, &console_session.ses_id,
- sizeof(lst_sid_t)) == 0)
+ if (cfs_copy_to_user(sid_up, &console_session.ses_id,
+ sizeof(lst_sid_t)) == 0)
return rc;
lstcon_session_end();
lstcon_ndlist_ent_t *entp;
lstcon_ndlink_t *ndl;
int rc = 0;
-
+
if (console_session.ses_state != LST_SESSION_ACTIVE)
return -ESRCH;
lstcon_ndlink_t, ndl_link)
LST_NODE_STATE_COUNTER(ndl->ndl_node, entp);
- if (copy_to_user(sid_up, &console_session.ses_id, sizeof(lst_sid_t)) ||
- copy_to_user(key_up, &console_session.ses_key, sizeof(int)) ||
- copy_to_user(ndinfo_up, entp, sizeof(*entp)) ||
- copy_to_user(name_up, console_session.ses_name, len))
+ if (cfs_copy_to_user(sid_up, &console_session.ses_id,
+ sizeof(lst_sid_t)) ||
+ cfs_copy_to_user(key_up, &console_session.ses_key, sizeof(int)) ||
+ cfs_copy_to_user(ndinfo_up, entp, sizeof(*entp)) ||
+ cfs_copy_to_user(name_up, console_session.ses_name, len))
rc = -EFAULT;
LIBCFS_FREE(entp, sizeof(*entp));
console_session.ses_force = 0;
/* destroy all batches */
- while (!list_empty(&console_session.ses_bat_list)) {
- bat = list_entry(console_session.ses_bat_list.next,
- lstcon_batch_t, bat_link);
+ while (!cfs_list_empty(&console_session.ses_bat_list)) {
+ bat = cfs_list_entry(console_session.ses_bat_list.next,
+ lstcon_batch_t, bat_link);
lstcon_batch_destroy(bat);
}
/* destroy all groups */
- while (!list_empty(&console_session.ses_grp_list)) {
- grp = list_entry(console_session.ses_grp_list.next,
- lstcon_group_t, grp_link);
+ while (!cfs_list_empty(&console_session.ses_grp_list)) {
+ grp = cfs_list_entry(console_session.ses_grp_list.next,
+ lstcon_group_t, grp_link);
LASSERT (grp->grp_ref == 1);
lstcon_group_put(grp);
}
/* all nodes should be released */
- LASSERT (list_empty(&console_session.ses_ndl_list));
+ LASSERT (cfs_list_empty(&console_session.ses_ndl_list));
console_session.ses_shutdown = 0;
console_session.ses_expired = 0;
sfw_unpack_message(req);
- mutex_down(&console_session.ses_mutex);
+ cfs_mutex_down(&console_session.ses_mutex);
jrep->join_sid = console_session.ses_id;
goto out;
}
- list_add_tail(&grp->grp_link,
- &console_session.ses_grp_list);
+ cfs_list_add_tail(&grp->grp_link,
+ &console_session.ses_grp_list);
lstcon_group_addref(grp);
}
if (grp != NULL)
lstcon_group_put(grp);
- mutex_up(&console_session.ses_mutex);
+ cfs_mutex_up(&console_session.ses_mutex);
return rc;
}
console_session.ses_expired = 0;
console_session.ses_laststamp = cfs_time_current_sec();
- init_mutex(&console_session.ses_mutex);
+ cfs_init_mutex(&console_session.ses_mutex);
CFS_INIT_LIST_HEAD(&console_session.ses_ndl_list);
CFS_INIT_LIST_HEAD(&console_session.ses_grp_list);
CFS_INIT_LIST_HEAD(&console_session.ses_trans_list);
LIBCFS_ALLOC(console_session.ses_ndl_hash,
- sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
+ sizeof(cfs_list_t) * LST_GLOBAL_HASHSIZE);
if (console_session.ses_ndl_hash == NULL)
return -ENOMEM;
LASSERT (rc != -EBUSY);
if (rc != 0) {
LIBCFS_FREE(console_session.ses_ndl_hash,
- sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
+ sizeof(cfs_list_t) * LST_GLOBAL_HASHSIZE);
return rc;
}
srpc_remove_service(&lstcon_acceptor_service);
LIBCFS_FREE(console_session.ses_ndl_hash,
- sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
+ sizeof(cfs_list_t) * LST_GLOBAL_HASHSIZE);
srpc_wait_service_shutdown(&lstcon_acceptor_service);
libcfs_deregister_ioctl(&lstcon_ioctl_handler);
- mutex_down(&console_session.ses_mutex);
+ cfs_mutex_down(&console_session.ses_mutex);
srpc_shutdown_service(&lstcon_acceptor_service);
srpc_remove_service(&lstcon_acceptor_service);
- if (console_session.ses_state != LST_SESSION_NONE)
+ if (console_session.ses_state != LST_SESSION_NONE)
lstcon_session_end();
lstcon_rpc_module_fini();
- mutex_up(&console_session.ses_mutex);
+ cfs_mutex_up(&console_session.ses_mutex);
- LASSERT (list_empty(&console_session.ses_ndl_list));
- LASSERT (list_empty(&console_session.ses_grp_list));
- LASSERT (list_empty(&console_session.ses_bat_list));
- LASSERT (list_empty(&console_session.ses_trans_list));
+ LASSERT (cfs_list_empty(&console_session.ses_ndl_list));
+ LASSERT (cfs_list_empty(&console_session.ses_grp_list));
+ LASSERT (cfs_list_empty(&console_session.ses_bat_list));
+ LASSERT (cfs_list_empty(&console_session.ses_trans_list));
for (i = 0; i < LST_NODE_HASHSIZE; i++) {
- LASSERT (list_empty(&console_session.ses_ndl_hash[i]));
+ LASSERT (cfs_list_empty(&console_session.ses_ndl_hash[i]));
}
LIBCFS_FREE(console_session.ses_ndl_hash,
- sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
+ sizeof(cfs_list_t) * LST_GLOBAL_HASHSIZE);
srpc_wait_service_shutdown(&lstcon_acceptor_service);
#include "conrpc.h"
typedef struct lstcon_node {
- lnet_process_id_t nd_id; /* id of the node */
- int nd_ref; /* reference count */
- int nd_state; /* state of the node */
- int nd_timeout; /* session timeout */
- cfs_time_t nd_stamp; /* timestamp of last replied RPC */
- struct lstcon_rpc nd_ping; /* ping rpc */
+ lnet_process_id_t nd_id; /* id of the node */
+ int nd_ref; /* reference count */
+ int nd_state; /* state of the node */
+ int nd_timeout; /* session timeout */
+ cfs_time_t nd_stamp; /* timestamp of last replied RPC */
+ struct lstcon_rpc nd_ping; /* ping rpc */
} lstcon_node_t; /*** node descriptor */
typedef struct {
- struct list_head ndl_link; /* chain on list */
- struct list_head ndl_hlink; /* chain on hash */
- lstcon_node_t *ndl_node; /* pointer to node */
+ cfs_list_t ndl_link; /* chain on list */
+ cfs_list_t ndl_hlink; /* chain on hash */
+ lstcon_node_t *ndl_node; /* pointer to node */
} lstcon_ndlink_t; /*** node link descriptor */
typedef struct {
- struct list_head grp_link; /* chain on global group list */
- int grp_ref; /* reference count */
- int grp_userland; /* has userland nodes */
- int grp_nnode; /* # of nodes */
- char grp_name[LST_NAME_SIZE]; /* group name */
+ cfs_list_t grp_link; /* chain on global group list */
+ int grp_ref; /* reference count */
+ int grp_userland; /* has userland nodes */
+ int grp_nnode; /* # of nodes */
+ char grp_name[LST_NAME_SIZE]; /* group name */
- struct list_head grp_trans_list; /* transaction list */
- struct list_head grp_ndl_list; /* nodes list */
- struct list_head grp_ndl_hash[0];/* hash table for nodes */
-} lstcon_group_t; /*** (alias of nodes) group descriptor */
+ cfs_list_t grp_trans_list; /* transaction list */
+ cfs_list_t grp_ndl_list; /* nodes list */
+ cfs_list_t grp_ndl_hash[0];/* hash table for nodes */
+} lstcon_group_t; /*** (alias of nodes) group descriptor */
#define LST_BATCH_IDLE 0xB0 /* idle batch */
#define LST_BATCH_RUNNING 0xB1 /* running batch */
typedef struct {
lstcon_tsb_hdr_t bat_hdr; /* test_batch header */
- struct list_head bat_link; /* chain on session's batches list */
+ cfs_list_t bat_link; /* chain on session's batches list */
int bat_ntest; /* # of test */
int bat_state; /* state of the batch */
int bat_arg; /* parameter for run|stop, timeout for run, force for stop */
char bat_name[LST_NAME_SIZE]; /* name of batch */
- struct list_head bat_test_list; /* list head of tests (lstcon_test_t) */
- struct list_head bat_trans_list; /* list head of transaction */
- struct list_head bat_cli_list; /* list head of client nodes (lstcon_node_t) */
- struct list_head *bat_cli_hash; /* hash table of client nodes */
- struct list_head bat_srv_list; /* list head of server nodes */
- struct list_head *bat_srv_hash; /* hash table of server nodes */
-} lstcon_batch_t; /*** (tests ) batch descritptor */
+ cfs_list_t bat_test_list; /* list head of tests (lstcon_test_t) */
+ cfs_list_t bat_trans_list; /* list head of transaction */
+ cfs_list_t bat_cli_list; /* list head of client nodes (lstcon_node_t) */
+ cfs_list_t *bat_cli_hash; /* hash table of client nodes */
+ cfs_list_t bat_srv_list; /* list head of server nodes */
+ cfs_list_t *bat_srv_hash; /* hash table of server nodes */
+} lstcon_batch_t; /*** (tests ) batch descritptor */
typedef struct lstcon_test {
- lstcon_tsb_hdr_t tes_hdr; /* test batch header */
- struct list_head tes_link; /* chain on batch's tests list */
- lstcon_batch_t *tes_batch; /* pointer to batch */
-
- int tes_type; /* type of the test, i.e: bulk, ping */
- int tes_stop_onerr; /* stop on error */
- int tes_oneside; /* one-sided test */
- int tes_concur; /* concurrency */
- int tes_loop; /* loop count */
- int tes_dist; /* nodes distribution of target group */
- int tes_span; /* nodes span of target group */
- int tes_cliidx; /* client index, used for RPC creating */
-
- struct list_head tes_trans_list; /* transaction list */
- lstcon_group_t *tes_src_grp; /* group run the test */
- lstcon_group_t *tes_dst_grp; /* target group */
-
- int tes_paramlen; /* test parameter length */
- char tes_param[0]; /* test parameter */
-} lstcon_test_t; /*** a single test descriptor */
+ lstcon_tsb_hdr_t tes_hdr; /* test batch header */
+ cfs_list_t tes_link; /* chain on batch's tests list */
+ lstcon_batch_t *tes_batch; /* pointer to batch */
+
+ int tes_type; /* type of the test, i.e: bulk, ping */
+ int tes_stop_onerr; /* stop on error */
+ int tes_oneside; /* one-sided test */
+ int tes_concur; /* concurrency */
+ int tes_loop; /* loop count */
+ int tes_dist; /* nodes distribution of target group */
+ int tes_span; /* nodes span of target group */
+ int tes_cliidx; /* client index, used for RPC creating */
+
+ cfs_list_t tes_trans_list; /* transaction list */
+ lstcon_group_t *tes_src_grp; /* group run the test */
+ lstcon_group_t *tes_dst_grp; /* target group */
+
+ int tes_paramlen; /* test parameter length */
+ char tes_param[0]; /* test parameter */
+} lstcon_test_t; /*** a single test descriptor */
#define LST_GLOBAL_HASHSIZE 503 /* global nodes hash table size */
#define LST_NODE_HASHSIZE 239 /* node hash table (for batch or group) */
#define LST_CONSOLE_TIMEOUT 300 /* default console timeout */
typedef struct {
- struct semaphore ses_mutex; /* lock for session, only one thread can enter session */
+ cfs_semaphore_t ses_mutex; /* lock for session, only one thread can enter session */
lst_sid_t ses_id; /* global session id */
int ses_key; /* local session key */
int ses_state; /* state of session */
stt_timer_t ses_ping_timer; /* timer for pinger */
lstcon_trans_stat_t ses_trans_stat; /* transaction stats */
- struct list_head ses_trans_list; /* global list of transaction */
- struct list_head ses_grp_list; /* global list of groups */
- struct list_head ses_bat_list; /* global list of batches */
- struct list_head ses_ndl_list; /* global list of nodes */
- struct list_head *ses_ndl_hash; /* hash table of nodes */
+ cfs_list_t ses_trans_list; /* global list of transaction */
+ cfs_list_t ses_grp_list; /* global list of groups */
+ cfs_list_t ses_bat_list; /* global list of batches */
+ cfs_list_t ses_ndl_list; /* global list of nodes */
+ cfs_list_t *ses_ndl_hash; /* hash table of nodes */
- spinlock_t ses_rpc_lock; /* serialize */
- atomic_t ses_rpc_counter;/* # of initialized RPCs */
- struct list_head ses_rpc_freelist; /* idle console rpc */
+ cfs_spinlock_t ses_rpc_lock; /* serialize */
+ cfs_atomic_t ses_rpc_counter;/* # of initialized RPCs */
+ cfs_list_t ses_rpc_freelist; /* idle console rpc */
} lstcon_session_t; /*** session descriptor */
extern lstcon_session_t console_session;
return &console_session.ses_trans_stat;
}
-static inline struct list_head *
-lstcon_id2hash (lnet_process_id_t id, struct list_head *hash)
+static inline cfs_list_t *
+lstcon_id2hash (lnet_process_id_t id, cfs_list_t *hash)
{
unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
extern int lstcon_session_info(lst_sid_t *sid_up, int *key,
lstcon_ndlist_ent_t *entp, char *name_up, int len);
extern int lstcon_session_end(void);
-extern int lstcon_session_debug(int timeout, struct list_head *result_up);
+extern int lstcon_session_debug(int timeout, cfs_list_t *result_up);
extern int lstcon_batch_debug(int timeout, char *name,
- int client, struct list_head *result_up);
+ int client, cfs_list_t *result_up);
extern int lstcon_group_debug(int timeout, char *name,
- struct list_head *result_up);
+ cfs_list_t *result_up);
extern int lstcon_nodes_debug(int timeout, int nnd, lnet_process_id_t *nds_up,
- struct list_head *result_up);
+ cfs_list_t *result_up);
extern int lstcon_group_add(char *name);
extern int lstcon_group_del(char *name);
extern int lstcon_group_clean(char *name, int args);
-extern int lstcon_group_refresh(char *name, struct list_head *result_up);
+extern int lstcon_group_refresh(char *name, cfs_list_t *result_up);
extern int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t *nds_up,
- struct list_head *result_up);
+ cfs_list_t *result_up);
extern int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t *nds_up,
- struct list_head *result_up);
+ cfs_list_t *result_up);
extern int lstcon_group_info(char *name, lstcon_ndlist_ent_t *gent_up,
int *index_p, int *ndent_p, lstcon_node_ent_t *ndents_up);
extern int lstcon_group_list(int idx, int len, char *name_up);
extern int lstcon_batch_add(char *name);
-extern int lstcon_batch_run(char *name, int timeout, struct list_head *result_up);
-extern int lstcon_batch_stop(char *name, int force, struct list_head *result_up);
+extern int lstcon_batch_run(char *name, int timeout,
+ cfs_list_t *result_up);
+extern int lstcon_batch_stop(char *name, int force,
+ cfs_list_t *result_up);
extern int lstcon_test_batch_query(char *name, int testidx,
int client, int timeout,
- struct list_head *result_up);
+ cfs_list_t *result_up);
extern int lstcon_batch_del(char *name);
extern int lstcon_batch_list(int idx, int namelen, char *name_up);
extern int lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up,
int server, int testidx, int *index_p,
int *ndent_p, lstcon_node_ent_t *dents_up);
extern int lstcon_group_stat(char *grp_name, int timeout,
- struct list_head *result_up);
+ cfs_list_t *result_up);
extern int lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
- int timeout, struct list_head *result_up);
+ int timeout, cfs_list_t *result_up);
extern int lstcon_test_add(char *name, int type, int loop, int concur,
int dist, int span, char *src_name, char * dst_name,
- void *param, int paramlen, int *retp, struct list_head *result_up);
+ void *param, int paramlen, int *retp,
+ cfs_list_t *result_up);
#endif
-#endif
+#endif
__swab64s(&(lc).route_length); \
} while (0)
-#define sfw_test_active(t) (atomic_read(&(t)->tsi_nactive) != 0)
-#define sfw_batch_active(b) (atomic_read(&(b)->bat_nactive) != 0)
+#define sfw_test_active(t) (cfs_atomic_read(&(t)->tsi_nactive) != 0)
+#define sfw_batch_active(b) (cfs_atomic_read(&(b)->bat_nactive) != 0)
struct smoketest_framework {
- struct list_head fw_zombie_rpcs; /* RPCs to be recycled */
- struct list_head fw_zombie_sessions; /* stopping sessions */
- struct list_head fw_tests; /* registered test cases */
- atomic_t fw_nzombies; /* # zombie sessions */
- spinlock_t fw_lock; /* serialise */
+ cfs_list_t fw_zombie_rpcs; /* RPCs to be recycled */
+ cfs_list_t fw_zombie_sessions; /* stopping sessions */
+ cfs_list_t fw_tests; /* registered test cases */
+ cfs_atomic_t fw_nzombies; /* # zombie sessions */
+ cfs_spinlock_t fw_lock; /* serialise */
sfw_session_t *fw_session; /* _the_ session */
int fw_shuttingdown; /* shutdown in progress */
srpc_server_rpc_t *fw_active_srpc; /* running RPC */
tsc->tsc_cli_ops = cliops;
tsc->tsc_srv_service = service;
- list_add_tail(&tsc->tsc_list, &sfw_data.fw_tests);
+ cfs_list_add_tail(&tsc->tsc_list, &sfw_data.fw_tests);
return 0;
}
LASSERT (!sn->sn_timer_active);
sfw_data.fw_session = NULL;
- atomic_inc(&sfw_data.fw_nzombies);
- list_add(&sn->sn_list, &sfw_data.fw_zombie_sessions);
+ cfs_atomic_inc(&sfw_data.fw_nzombies);
+ cfs_list_add(&sn->sn_list, &sfw_data.fw_zombie_sessions);
- spin_unlock(&sfw_data.fw_lock);
+ cfs_spin_unlock(&sfw_data.fw_lock);
cfs_list_for_each_entry_typed (tsc, &sfw_data.fw_tests,
sfw_test_case_t, tsc_list) {
srpc_abort_service(tsc->tsc_srv_service);
}
- spin_lock(&sfw_data.fw_lock);
+ cfs_spin_lock(&sfw_data.fw_lock);
cfs_list_for_each_entry_typed (tsb, &sn->sn_batches,
sfw_batch_t, bat_list) {
if (nactive != 0)
return; /* wait for active batches to stop */
- list_del_init(&sn->sn_list);
- spin_unlock(&sfw_data.fw_lock);
+ cfs_list_del_init(&sn->sn_list);
+ cfs_spin_unlock(&sfw_data.fw_lock);
sfw_destroy_session(sn);
- spin_lock(&sfw_data.fw_lock);
+ cfs_spin_lock(&sfw_data.fw_lock);
return;
}
{
sfw_session_t *sn = data;
- spin_lock(&sfw_data.fw_lock);
+ cfs_spin_lock(&sfw_data.fw_lock);
LASSERT (sn->sn_timer_active);
LASSERT (sn == sfw_data.fw_session);
sn->sn_timer_active = 0;
sfw_deactivate_session();
- spin_unlock(&sfw_data.fw_lock);
+ cfs_spin_unlock(&sfw_data.fw_lock);
return;
}
memset(sn, 0, sizeof(sfw_session_t));
CFS_INIT_LIST_HEAD(&sn->sn_list);
CFS_INIT_LIST_HEAD(&sn->sn_batches);
- atomic_set(&sn->sn_refcount, 1); /* +1 for caller */
- atomic_set(&sn->sn_brw_errors, 0);
- atomic_set(&sn->sn_ping_errors, 0);
+ cfs_atomic_set(&sn->sn_refcount, 1); /* +1 for caller */
+ cfs_atomic_set(&sn->sn_brw_errors, 0);
+ cfs_atomic_set(&sn->sn_ping_errors, 0);
strncpy(&sn->sn_name[0], name, LST_NAME_SIZE);
sn->sn_timer_active = 0;
sfw_client_rpc_fini (srpc_client_rpc_t *rpc)
{
LASSERT (rpc->crpc_bulk.bk_niov == 0);
- LASSERT (list_empty(&rpc->crpc_list));
- LASSERT (atomic_read(&rpc->crpc_refcount) == 0);
+ LASSERT (cfs_list_empty(&rpc->crpc_list));
+ LASSERT (cfs_atomic_read(&rpc->crpc_refcount) == 0);
#ifndef __KERNEL__
LASSERT (rpc->crpc_bulk.bk_pages == NULL);
#endif
swi_state2str(rpc->crpc_wi.wi_state),
rpc->crpc_aborted, rpc->crpc_status);
- spin_lock(&sfw_data.fw_lock);
+ cfs_spin_lock(&sfw_data.fw_lock);
/* my callers must finish all RPCs before shutting me down */
LASSERT (!sfw_data.fw_shuttingdown);
- list_add(&rpc->crpc_list, &sfw_data.fw_zombie_rpcs);
+ cfs_list_add(&rpc->crpc_list, &sfw_data.fw_zombie_rpcs);
- spin_unlock(&sfw_data.fw_lock);
+ cfs_spin_unlock(&sfw_data.fw_lock);
return;
}
bat->bat_error = 0;
bat->bat_session = sn;
bat->bat_id = bid;
- atomic_set(&bat->bat_nactive, 0);
+ cfs_atomic_set(&bat->bat_nactive, 0);
CFS_INIT_LIST_HEAD(&bat->bat_tests);
- list_add_tail(&bat->bat_list, &sn->sn_batches);
+ cfs_list_add_tail(&bat->bat_list, &sn->sn_batches);
return bat;
}
srpc_get_counters(&reply->str_rpc);
- cnt->brw_errors = atomic_read(&sn->sn_brw_errors);
- cnt->ping_errors = atomic_read(&sn->sn_ping_errors);
- cnt->zombie_sessions = atomic_read(&sfw_data.fw_nzombies);
+ cnt->brw_errors = cfs_atomic_read(&sn->sn_brw_errors);
+ cnt->ping_errors = cfs_atomic_read(&sn->sn_ping_errors);
+ cnt->zombie_sessions = cfs_atomic_read(&sfw_data.fw_nzombies);
cnt->active_tests = cnt->active_batches = 0;
cfs_list_for_each_entry_typed (bat, &sn->sn_batches,
sfw_batch_t, bat_list) {
- int n = atomic_read(&bat->bat_nactive);
+ int n = cfs_atomic_read(&bat->bat_nactive);
if (n > 0) {
cnt->active_batches++;
reply->mksn_timeout = sn->sn_timeout;
if (sfw_sid_equal(request->mksn_sid, sn->sn_id)) {
- atomic_inc(&sn->sn_refcount);
+ cfs_atomic_inc(&sn->sn_refcount);
return 0;
}
sfw_init_session(sn, request->mksn_sid, &request->mksn_name[0]);
- spin_lock(&sfw_data.fw_lock);
+ cfs_spin_lock(&sfw_data.fw_lock);
sfw_deactivate_session();
LASSERT (sfw_data.fw_session == NULL);
sfw_data.fw_session = sn;
- spin_unlock(&sfw_data.fw_lock);
+ cfs_spin_unlock(&sfw_data.fw_lock);
reply->mksn_status = 0;
reply->mksn_sid = sn->sn_id;
return 0;
}
- if (!atomic_dec_and_test(&sn->sn_refcount)) {
+ if (!cfs_atomic_dec_and_test(&sn->sn_refcount)) {
reply->rmsn_status = 0;
return 0;
}
- spin_lock(&sfw_data.fw_lock);
+ cfs_spin_lock(&sfw_data.fw_lock);
sfw_deactivate_session();
- spin_unlock(&sfw_data.fw_lock);
+ cfs_spin_unlock(&sfw_data.fw_lock);
reply->rmsn_status = 0;
reply->rmsn_sid = LST_INVALID_SID;
sfw_test_instance_t *tsi = tsu->tsu_instance;
/* Called with hold of tsi->tsi_lock */
- LASSERT (list_empty(&rpc->crpc_list));
- list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
+ LASSERT (cfs_list_empty(&rpc->crpc_list));
+ cfs_list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
}
int
tsi->tsi_ops->tso_fini(tsi);
LASSERT (!tsi->tsi_stopping);
- LASSERT (list_empty(&tsi->tsi_active_rpcs));
+ LASSERT (cfs_list_empty(&tsi->tsi_active_rpcs));
LASSERT (!sfw_test_active(tsi));
- while (!list_empty(&tsi->tsi_units)) {
- tsu = list_entry(tsi->tsi_units.next,
- sfw_test_unit_t, tsu_list);
- list_del(&tsu->tsu_list);
+ while (!cfs_list_empty(&tsi->tsi_units)) {
+ tsu = cfs_list_entry(tsi->tsi_units.next,
+ sfw_test_unit_t, tsu_list);
+ cfs_list_del(&tsu->tsu_list);
LIBCFS_FREE(tsu, sizeof(*tsu));
}
- while (!list_empty(&tsi->tsi_free_rpcs)) {
- rpc = list_entry(tsi->tsi_free_rpcs.next,
- srpc_client_rpc_t, crpc_list);
- list_del(&rpc->crpc_list);
+ while (!cfs_list_empty(&tsi->tsi_free_rpcs)) {
+ rpc = cfs_list_entry(tsi->tsi_free_rpcs.next,
+ srpc_client_rpc_t, crpc_list);
+ cfs_list_del(&rpc->crpc_list);
LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
}
sfw_test_instance_t *tsi;
LASSERT (!sfw_batch_active(tsb));
- LASSERT (list_empty(&tsb->bat_list));
+ LASSERT (cfs_list_empty(&tsb->bat_list));
- while (!list_empty(&tsb->bat_tests)) {
- tsi = list_entry(tsb->bat_tests.next,
- sfw_test_instance_t, tsi_list);
- list_del_init(&tsi->tsi_list);
+ while (!cfs_list_empty(&tsb->bat_tests)) {
+ tsi = cfs_list_entry(tsb->bat_tests.next,
+ sfw_test_instance_t, tsi_list);
+ cfs_list_del_init(&tsi->tsi_list);
sfw_destroy_test_instance(tsi);
}
{
sfw_batch_t *batch;
- LASSERT (list_empty(&sn->sn_list));
+ LASSERT (cfs_list_empty(&sn->sn_list));
LASSERT (sn != sfw_data.fw_session);
- while (!list_empty(&sn->sn_batches)) {
- batch = list_entry(sn->sn_batches.next,
- sfw_batch_t, bat_list);
- list_del_init(&batch->bat_list);
+ while (!cfs_list_empty(&sn->sn_batches)) {
+ batch = cfs_list_entry(sn->sn_batches.next,
+ sfw_batch_t, bat_list);
+ cfs_list_del_init(&batch->bat_list);
sfw_destroy_batch(batch);
}
LIBCFS_FREE(sn, sizeof(*sn));
- atomic_dec(&sfw_data.fw_nzombies);
+ cfs_atomic_dec(&sfw_data.fw_nzombies);
return;
}
}
memset(tsi, 0, sizeof(*tsi));
- spin_lock_init(&tsi->tsi_lock);
- atomic_set(&tsi->tsi_nactive, 0);
+ cfs_spin_lock_init(&tsi->tsi_lock);
+ cfs_atomic_set(&tsi->tsi_nactive, 0);
CFS_INIT_LIST_HEAD(&tsi->tsi_units);
CFS_INIT_LIST_HEAD(&tsi->tsi_free_rpcs);
CFS_INIT_LIST_HEAD(&tsi->tsi_active_rpcs);
if (!tsi->tsi_is_client) {
/* it's test server, just add it to tsb */
- list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
+ cfs_list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
return 0;
}
tsu->tsu_dest = id;
tsu->tsu_instance = tsi;
tsu->tsu_private = NULL;
- list_add_tail(&tsu->tsu_list, &tsi->tsi_units);
+ cfs_list_add_tail(&tsu->tsu_list, &tsi->tsi_units);
}
}
rc = tsi->tsi_ops->tso_init(tsi);
if (rc == 0) {
- list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
+ cfs_list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
return 0;
}
LASSERT (sfw_test_active(tsi));
- if (!atomic_dec_and_test(&tsi->tsi_nactive))
+ if (!cfs_atomic_dec_and_test(&tsi->tsi_nactive))
return;
-
+
/* the test instance is done */
- spin_lock(&tsi->tsi_lock);
+ cfs_spin_lock(&tsi->tsi_lock);
tsi->tsi_stopping = 0;
- spin_unlock(&tsi->tsi_lock);
+ cfs_spin_unlock(&tsi->tsi_lock);
- spin_lock(&sfw_data.fw_lock);
+ cfs_spin_lock(&sfw_data.fw_lock);
- if (!atomic_dec_and_test(&tsb->bat_nactive) || /* tsb still active */
+ if (!cfs_atomic_dec_and_test(&tsb->bat_nactive) ||/* tsb still active */
sn == sfw_data.fw_session) { /* sn also active */
- spin_unlock(&sfw_data.fw_lock);
+ cfs_spin_unlock(&sfw_data.fw_lock);
return;
}
-
- LASSERT (!list_empty(&sn->sn_list)); /* I'm a zombie! */
+
+ LASSERT (!cfs_list_empty(&sn->sn_list)); /* I'm a zombie! */
cfs_list_for_each_entry_typed (tsb, &sn->sn_batches,
sfw_batch_t, bat_list) {
if (sfw_batch_active(tsb)) {
- spin_unlock(&sfw_data.fw_lock);
+ cfs_spin_unlock(&sfw_data.fw_lock);
return;
}
}
- list_del_init(&sn->sn_list);
- spin_unlock(&sfw_data.fw_lock);
+ cfs_list_del_init(&sn->sn_list);
+ cfs_spin_unlock(&sfw_data.fw_lock);
sfw_destroy_session(sn);
return;
tsi->tsi_ops->tso_done_rpc(tsu, rpc);
- spin_lock(&tsi->tsi_lock);
+ cfs_spin_lock(&tsi->tsi_lock);
LASSERT (sfw_test_active(tsi));
- LASSERT (!list_empty(&rpc->crpc_list));
+ LASSERT (!cfs_list_empty(&rpc->crpc_list));
- list_del_init(&rpc->crpc_list);
+ cfs_list_del_init(&rpc->crpc_list);
/* batch is stopping or loop is done or get error */
if (tsi->tsi_stopping ||
/* dec ref for poster */
srpc_client_rpc_decref(rpc);
- spin_unlock(&tsi->tsi_lock);
+ cfs_spin_unlock(&tsi->tsi_lock);
if (!done) {
swi_schedule_workitem(&tsu->tsu_worker);
{
srpc_client_rpc_t *rpc = NULL;
sfw_test_instance_t *tsi = tsu->tsu_instance;
-
- spin_lock(&tsi->tsi_lock);
+
+ cfs_spin_lock(&tsi->tsi_lock);
LASSERT (sfw_test_active(tsi));
- if (!list_empty(&tsi->tsi_free_rpcs)) {
+ if (!cfs_list_empty(&tsi->tsi_free_rpcs)) {
/* pick request from buffer */
- rpc = list_entry(tsi->tsi_free_rpcs.next,
- srpc_client_rpc_t, crpc_list);
+ rpc = cfs_list_entry(tsi->tsi_free_rpcs.next,
+ srpc_client_rpc_t, crpc_list);
LASSERT (nblk == rpc->crpc_bulk.bk_niov);
- list_del_init(&rpc->crpc_list);
+ cfs_list_del_init(&rpc->crpc_list);
srpc_init_client_rpc(rpc, peer, tsi->tsi_service, nblk,
blklen, sfw_test_rpc_done,
sfw_test_rpc_fini, tsu);
}
- spin_unlock(&tsi->tsi_lock);
+ cfs_spin_unlock(&tsi->tsi_lock);
if (rpc == NULL)
rpc = srpc_create_client_rpc(peer, tsi->tsi_service, nblk,
LASSERT (rpc != NULL);
- spin_lock(&tsi->tsi_lock);
+ cfs_spin_lock(&tsi->tsi_lock);
if (tsi->tsi_stopping) {
- list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
- spin_unlock(&tsi->tsi_lock);
+ cfs_list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
+ cfs_spin_unlock(&tsi->tsi_lock);
goto test_done;
}
if (tsu->tsu_loop > 0)
tsu->tsu_loop--;
- list_add_tail(&rpc->crpc_list, &tsi->tsi_active_rpcs);
- spin_unlock(&tsi->tsi_lock);
+ cfs_list_add_tail(&rpc->crpc_list, &tsi->tsi_active_rpcs);
+ cfs_spin_unlock(&tsi->tsi_lock);
rpc->crpc_timeout = rpc_timeout;
- spin_lock(&rpc->crpc_lock);
+ cfs_spin_lock(&rpc->crpc_lock);
srpc_post_rpc(rpc);
- spin_unlock(&rpc->crpc_lock);
+ cfs_spin_unlock(&rpc->crpc_lock);
return 0;
test_done:
if (sfw_batch_active(tsb)) {
CDEBUG(D_NET, "Batch already active: "LPU64" (%d)\n",
- tsb->bat_id.bat_id, atomic_read(&tsb->bat_nactive));
+ tsb->bat_id.bat_id, cfs_atomic_read(&tsb->bat_nactive));
return 0;
}
LASSERT (!tsi->tsi_stopping);
LASSERT (!sfw_test_active(tsi));
- atomic_inc(&tsb->bat_nactive);
+ cfs_atomic_inc(&tsb->bat_nactive);
cfs_list_for_each_entry_typed (tsu, &tsi->tsi_units,
sfw_test_unit_t, tsu_list) {
- atomic_inc(&tsi->tsi_nactive);
+ cfs_atomic_inc(&tsi->tsi_nactive);
tsu->tsu_loop = tsi->tsi_loop;
wi = &tsu->tsu_worker;
swi_init_workitem(wi, tsu, sfw_run_test);
cfs_list_for_each_entry_typed (tsi, &tsb->bat_tests,
sfw_test_instance_t, tsi_list) {
- spin_lock(&tsi->tsi_lock);
+ cfs_spin_lock(&tsi->tsi_lock);
if (!tsi->tsi_is_client ||
!sfw_test_active(tsi) || tsi->tsi_stopping) {
- spin_unlock(&tsi->tsi_lock);
+ cfs_spin_unlock(&tsi->tsi_lock);
continue;
}
tsi->tsi_stopping = 1;
if (!force) {
- spin_unlock(&tsi->tsi_lock);
+ cfs_spin_unlock(&tsi->tsi_lock);
continue;
}
/* abort launched rpcs in the test */
cfs_list_for_each_entry_typed (rpc, &tsi->tsi_active_rpcs,
srpc_client_rpc_t, crpc_list) {
- spin_lock(&rpc->crpc_lock);
+ cfs_spin_lock(&rpc->crpc_lock);
srpc_abort_rpc(rpc, -EINTR);
- spin_unlock(&rpc->crpc_lock);
+ cfs_spin_unlock(&rpc->crpc_lock);
}
- spin_unlock(&tsi->tsi_lock);
+ cfs_spin_unlock(&tsi->tsi_lock);
}
return 0;
return -EINVAL;
if (testidx == 0) {
- reply->bar_active = atomic_read(&tsb->bat_nactive);
+ reply->bar_active = cfs_atomic_read(&tsb->bat_nactive);
return 0;
}
if (testidx-- > 1)
continue;
- reply->bar_active = atomic_read(&tsi->tsi_nactive);
+ reply->bar_active = cfs_atomic_read(&tsi->tsi_nactive);
return 0;
}
LASSERT (sfw_data.fw_active_srpc == NULL);
LASSERT (sv->sv_id <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
- spin_lock(&sfw_data.fw_lock);
+ cfs_spin_lock(&sfw_data.fw_lock);
if (sfw_data.fw_shuttingdown) {
- spin_unlock(&sfw_data.fw_lock);
+ cfs_spin_unlock(&sfw_data.fw_lock);
return -ESHUTDOWN;
}
if (sfw_del_session_timer() != 0) {
CERROR ("Dropping RPC (%s) from %s: racing with expiry timer.",
sv->sv_name, libcfs_id2str(rpc->srpc_peer));
- spin_unlock(&sfw_data.fw_lock);
+ cfs_spin_unlock(&sfw_data.fw_lock);
return -EAGAIN;
}
sfw_data.fw_active_srpc = rpc;
- spin_unlock(&sfw_data.fw_lock);
+ cfs_spin_unlock(&sfw_data.fw_lock);
sfw_unpack_message(request);
LASSERT (request->msg_type == srpc_service2request(sv->sv_id));
}
rpc->srpc_done = sfw_server_rpc_done;
- spin_lock(&sfw_data.fw_lock);
+ cfs_spin_lock(&sfw_data.fw_lock);
#ifdef __KERNEL__
if (!sfw_data.fw_shuttingdown)
#endif
sfw_data.fw_active_srpc = NULL;
- spin_unlock(&sfw_data.fw_lock);
+ cfs_spin_unlock(&sfw_data.fw_lock);
return rc;
}
LASSERT (sfw_data.fw_active_srpc == NULL);
LASSERT (rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst.tsr_is_client);
- spin_lock(&sfw_data.fw_lock);
+ cfs_spin_lock(&sfw_data.fw_lock);
if (status != 0) {
CERROR ("Bulk transfer failed for RPC: "
"service %s, peer %s, status %d\n",
sv->sv_name, libcfs_id2str(rpc->srpc_peer), status);
- spin_unlock(&sfw_data.fw_lock);
+ cfs_spin_unlock(&sfw_data.fw_lock);
return -EIO;
}
if (sfw_data.fw_shuttingdown) {
- spin_unlock(&sfw_data.fw_lock);
+ cfs_spin_unlock(&sfw_data.fw_lock);
return -ESHUTDOWN;
}
if (sfw_del_session_timer() != 0) {
CERROR ("Dropping RPC (%s) from %s: racing with expiry timer",
sv->sv_name, libcfs_id2str(rpc->srpc_peer));
- spin_unlock(&sfw_data.fw_lock);
+ cfs_spin_unlock(&sfw_data.fw_lock);
return -EAGAIN;
}
sfw_data.fw_active_srpc = rpc;
- spin_unlock(&sfw_data.fw_lock);
+ cfs_spin_unlock(&sfw_data.fw_lock);
rc = sfw_add_test(rpc);
- spin_lock(&sfw_data.fw_lock);
+ cfs_spin_lock(&sfw_data.fw_lock);
#ifdef __KERNEL__
if (!sfw_data.fw_shuttingdown)
#endif
sfw_data.fw_active_srpc = NULL;
- spin_unlock(&sfw_data.fw_lock);
+ cfs_spin_unlock(&sfw_data.fw_lock);
return rc;
}
{
srpc_client_rpc_t *rpc;
- spin_lock(&sfw_data.fw_lock);
+ cfs_spin_lock(&sfw_data.fw_lock);
LASSERT (!sfw_data.fw_shuttingdown);
LASSERT (service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
- if (nbulkiov == 0 && !list_empty(&sfw_data.fw_zombie_rpcs)) {
- rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
- srpc_client_rpc_t, crpc_list);
- list_del(&rpc->crpc_list);
- spin_unlock(&sfw_data.fw_lock);
+ if (nbulkiov == 0 && !cfs_list_empty(&sfw_data.fw_zombie_rpcs)) {
+ rpc = cfs_list_entry(sfw_data.fw_zombie_rpcs.next,
+ srpc_client_rpc_t, crpc_list);
+ cfs_list_del(&rpc->crpc_list);
+ cfs_spin_unlock(&sfw_data.fw_lock);
srpc_init_client_rpc(rpc, peer, service, 0, 0,
done, sfw_client_rpc_fini, priv);
return rpc;
}
- spin_unlock(&sfw_data.fw_lock);
+ cfs_spin_unlock(&sfw_data.fw_lock);
rpc = srpc_create_client_rpc(peer, service, nbulkiov, bulklen, done,
nbulkiov != 0 ? NULL : sfw_client_rpc_fini,
void
sfw_abort_rpc (srpc_client_rpc_t *rpc)
{
- LASSERT (atomic_read(&rpc->crpc_refcount) > 0);
+ LASSERT (cfs_atomic_read(&rpc->crpc_refcount) > 0);
LASSERT (rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
- spin_lock(&rpc->crpc_lock);
+ cfs_spin_lock(&rpc->crpc_lock);
srpc_abort_rpc(rpc, -EINTR);
- spin_unlock(&rpc->crpc_lock);
+ cfs_spin_unlock(&rpc->crpc_lock);
return;
}
void
sfw_post_rpc (srpc_client_rpc_t *rpc)
{
- spin_lock(&rpc->crpc_lock);
+ cfs_spin_lock(&rpc->crpc_lock);
LASSERT (!rpc->crpc_closed);
LASSERT (!rpc->crpc_aborted);
- LASSERT (list_empty(&rpc->crpc_list));
+ LASSERT (cfs_list_empty(&rpc->crpc_list));
LASSERT (!sfw_data.fw_shuttingdown);
rpc->crpc_timeout = rpc_timeout;
srpc_post_rpc(rpc);
- spin_unlock(&rpc->crpc_lock);
+ cfs_spin_unlock(&rpc->crpc_lock);
return;
}
sfw_data.fw_session = NULL;
sfw_data.fw_active_srpc = NULL;
- spin_lock_init(&sfw_data.fw_lock);
- atomic_set(&sfw_data.fw_nzombies, 0);
+ cfs_spin_lock_init(&sfw_data.fw_lock);
+ cfs_atomic_set(&sfw_data.fw_nzombies, 0);
CFS_INIT_LIST_HEAD(&sfw_data.fw_tests);
CFS_INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
CFS_INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
}
/* about to sfw_shutdown, no need to add buffer */
- if (error) continue;
+ if (error) continue;
rc = srpc_service_add_buffers(sv, SFW_POST_BUFFERS);
if (rc != SFW_POST_BUFFERS) {
sfw_test_case_t *tsc;
int i;
- spin_lock(&sfw_data.fw_lock);
+ cfs_spin_lock(&sfw_data.fw_lock);
sfw_data.fw_shuttingdown = 1;
#ifdef __KERNEL__
"waiting for session timer to explode.\n");
sfw_deactivate_session();
- lst_wait_until(atomic_read(&sfw_data.fw_nzombies) == 0,
+ lst_wait_until(cfs_atomic_read(&sfw_data.fw_nzombies) == 0,
sfw_data.fw_lock,
"waiting for %d zombie sessions to die.\n",
- atomic_read(&sfw_data.fw_nzombies));
+ cfs_atomic_read(&sfw_data.fw_nzombies));
- spin_unlock(&sfw_data.fw_lock);
+ cfs_spin_unlock(&sfw_data.fw_lock);
for (i = 0; ; i++) {
sv = &sfw_services[i];
srpc_remove_service(sv);
}
- while (!list_empty(&sfw_data.fw_zombie_rpcs)) {
+ while (!cfs_list_empty(&sfw_data.fw_zombie_rpcs)) {
srpc_client_rpc_t *rpc;
- rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
- srpc_client_rpc_t, crpc_list);
- list_del(&rpc->crpc_list);
+ rpc = cfs_list_entry(sfw_data.fw_zombie_rpcs.next,
+ srpc_client_rpc_t, crpc_list);
+ cfs_list_del(&rpc->crpc_list);
LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
}
srpc_wait_service_shutdown(sv);
}
- while (!list_empty(&sfw_data.fw_tests)) {
- tsc = list_entry(sfw_data.fw_tests.next,
- sfw_test_case_t, tsc_list);
-
+ while (!cfs_list_empty(&sfw_data.fw_tests)) {
+ tsc = cfs_list_entry(sfw_data.fw_tests.next,
+ sfw_test_case_t, tsc_list);
+
srpc_wait_service_shutdown(tsc->tsc_srv_service);
- list_del(&tsc->tsc_list);
+ cfs_list_del(&tsc->tsc_list);
LIBCFS_FREE(tsc, sizeof(*tsc));
}
#define LST_PING_TEST_MAGIC 0xbabeface
typedef struct {
- spinlock_t pnd_lock; /* serialize */
+ cfs_spinlock_t pnd_lock; /* serialize */
int pnd_counter; /* sequence counter */
} lst_ping_data_t;
{
LASSERT (tsi->tsi_is_client);
- spin_lock_init(&lst_ping_data.pnd_lock);
+ cfs_spin_lock_init(&lst_ping_data.pnd_lock);
lst_ping_data.pnd_counter = 0;
return 0;
LASSERT (sn != NULL);
LASSERT (tsi->tsi_is_client);
- errors = atomic_read(&sn->sn_ping_errors);
+ errors = cfs_atomic_read(&sn->sn_ping_errors);
if (errors)
CWARN ("%d pings have failed.\n", errors);
else
req->pnr_magic = LST_PING_TEST_MAGIC;
- spin_lock(&lst_ping_data.pnd_lock);
+ cfs_spin_lock(&lst_ping_data.pnd_lock);
req->pnr_seq = lst_ping_data.pnd_counter ++;
- spin_unlock(&lst_ping_data.pnd_lock);
+ cfs_spin_unlock(&lst_ping_data.pnd_lock);
cfs_fs_timeval(&tv);
req->pnr_time_sec = tv.tv_sec;
if (rpc->crpc_status != 0) {
if (!tsi->tsi_stopping) /* rpc could have been aborted */
- atomic_inc(&sn->sn_ping_errors);
+ cfs_atomic_inc(&sn->sn_ping_errors);
CERROR ("Unable to ping %s (%d): %d\n",
libcfs_id2str(rpc->crpc_dest),
reqst->pnr_seq, rpc->crpc_status);
if (reply->pnr_magic != LST_PING_TEST_MAGIC) {
rpc->crpc_status = -EBADMSG;
- atomic_inc(&sn->sn_ping_errors);
+ cfs_atomic_inc(&sn->sn_ping_errors);
CERROR ("Bad magic %u from %s, %u expected.\n",
reply->pnr_magic, libcfs_id2str(rpc->crpc_dest),
LST_PING_TEST_MAGIC);
if (reply->pnr_seq != reqst->pnr_seq) {
rpc->crpc_status = -EBADMSG;
- atomic_inc(&sn->sn_ping_errors);
+ cfs_atomic_inc(&sn->sn_ping_errors);
CERROR ("Bad seq %u from %s, %u expected.\n",
reply->pnr_seq, libcfs_id2str(rpc->crpc_dest),
reqst->pnr_seq);
} srpc_state_t;
struct smoketest_rpc {
- spinlock_t rpc_glock; /* global lock */
+ cfs_spinlock_t rpc_glock; /* global lock */
srpc_service_t *rpc_services[SRPC_SERVICE_MAX_ID + 1];
lnet_handle_eq_t rpc_lnet_eq; /* _the_ LNet event queue */
srpc_state_t rpc_state;
void srpc_get_counters (srpc_counters_t *cnt)
{
- spin_lock(&srpc_data.rpc_glock);
+ cfs_spin_lock(&srpc_data.rpc_glock);
*cnt = srpc_data.rpc_counters;
- spin_unlock(&srpc_data.rpc_glock);
+ cfs_spin_unlock(&srpc_data.rpc_glock);
}
void srpc_set_counters (const srpc_counters_t *cnt)
{
- spin_lock(&srpc_data.rpc_glock);
+ cfs_spin_lock(&srpc_data.rpc_glock);
srpc_data.rpc_counters = *cnt;
- spin_unlock(&srpc_data.rpc_glock);
+ cfs_spin_unlock(&srpc_data.rpc_glock);
}
void
{
__u64 id;
- spin_lock(&srpc_data.rpc_glock);
+ cfs_spin_lock(&srpc_data.rpc_glock);
id = srpc_data.rpc_matchbits++;
- spin_unlock(&srpc_data.rpc_glock);
+ cfs_spin_unlock(&srpc_data.rpc_glock);
return id;
}
LASSERT (sv->sv_concur > 0);
LASSERT (0 <= id && id <= SRPC_SERVICE_MAX_ID);
- spin_lock(&srpc_data.rpc_glock);
+ cfs_spin_lock(&srpc_data.rpc_glock);
LASSERT (srpc_data.rpc_state == SRPC_STATE_RUNNING);
if (srpc_data.rpc_services[id] != NULL) {
- spin_unlock(&srpc_data.rpc_glock);
+ cfs_spin_unlock(&srpc_data.rpc_glock);
return -EBUSY;
}
srpc_data.rpc_services[id] = sv;
- spin_unlock(&srpc_data.rpc_glock);
+ cfs_spin_unlock(&srpc_data.rpc_glock);
sv->sv_nprune = 0;
sv->sv_nposted_msg = 0;
sv->sv_shuttingdown = 0;
- spin_lock_init(&sv->sv_lock);
+ cfs_spin_lock_init(&sv->sv_lock);
CFS_INIT_LIST_HEAD(&sv->sv_free_rpcq);
CFS_INIT_LIST_HEAD(&sv->sv_active_rpcq);
CFS_INIT_LIST_HEAD(&sv->sv_posted_msgq);
LIBCFS_ALLOC(rpc, sizeof(*rpc));
if (rpc == NULL) goto enomem;
- list_add(&rpc->srpc_list, &sv->sv_free_rpcq);
+ cfs_list_add(&rpc->srpc_list, &sv->sv_free_rpcq);
}
CDEBUG (D_NET, "Adding service: id %d, name %s, concurrency %d\n",
return 0;
enomem:
- while (!list_empty(&sv->sv_free_rpcq)) {
- rpc = list_entry(sv->sv_free_rpcq.next,
- srpc_server_rpc_t, srpc_list);
- list_del(&rpc->srpc_list);
+ while (!cfs_list_empty(&sv->sv_free_rpcq)) {
+ rpc = cfs_list_entry(sv->sv_free_rpcq.next,
+ srpc_server_rpc_t, srpc_list);
+ cfs_list_del(&rpc->srpc_list);
LIBCFS_FREE(rpc, sizeof(*rpc));
}
- spin_lock(&srpc_data.rpc_glock);
+ cfs_spin_lock(&srpc_data.rpc_glock);
srpc_data.rpc_services[id] = NULL;
- spin_unlock(&srpc_data.rpc_glock);
+ cfs_spin_unlock(&srpc_data.rpc_glock);
return -ENOMEM;
}
{
int id = sv->sv_id;
- spin_lock(&srpc_data.rpc_glock);
+ cfs_spin_lock(&srpc_data.rpc_glock);
if (srpc_data.rpc_services[id] != sv) {
- spin_unlock(&srpc_data.rpc_glock);
+ cfs_spin_unlock(&srpc_data.rpc_glock);
return -ENOENT;
}
srpc_data.rpc_services[id] = NULL;
- spin_unlock(&srpc_data.rpc_glock);
+ cfs_spin_unlock(&srpc_data.rpc_glock);
return 0;
}
LASSERT (!sv->sv_shuttingdown);
LNetInvalidateHandle(&buf->buf_mdh);
- list_add(&buf->buf_list, &sv->sv_posted_msgq);
+ cfs_list_add(&buf->buf_list, &sv->sv_posted_msgq);
sv->sv_nposted_msg++;
- spin_unlock(&sv->sv_lock);
+ cfs_spin_unlock(&sv->sv_lock);
rc = srpc_post_passive_rqtbuf(sv->sv_id, msg, sizeof(*msg),
&buf->buf_mdh, &sv->sv_ev);
* msg and its event handler has been called. So we must add
* buf to sv_posted_msgq _before_ dropping sv_lock */
- spin_lock(&sv->sv_lock);
+ cfs_spin_lock(&sv->sv_lock);
if (rc == 0) {
if (sv->sv_shuttingdown) {
- spin_unlock(&sv->sv_lock);
+ cfs_spin_unlock(&sv->sv_lock);
/* srpc_shutdown_service might have tried to unlink me
* when my buf_mdh was still invalid */
LNetMDUnlink(buf->buf_mdh);
- spin_lock(&sv->sv_lock);
+ cfs_spin_lock(&sv->sv_lock);
}
return 0;
}
sv->sv_nposted_msg--;
if (sv->sv_shuttingdown) return rc;
- list_del(&buf->buf_list);
+ cfs_list_del(&buf->buf_list);
- spin_unlock(&sv->sv_lock);
+ cfs_spin_unlock(&sv->sv_lock);
LIBCFS_FREE(buf, sizeof(*buf));
- spin_lock(&sv->sv_lock);
+ cfs_spin_lock(&sv->sv_lock);
return rc;
}
LIBCFS_ALLOC(buf, sizeof(*buf));
if (buf == NULL) break;
- spin_lock(&sv->sv_lock);
+ cfs_spin_lock(&sv->sv_lock);
rc = srpc_service_post_buffer(sv, buf);
- spin_unlock(&sv->sv_lock);
+ cfs_spin_unlock(&sv->sv_lock);
if (rc != 0) break;
}
LASSERTF (nbuffer > 0,
"nbuffer must be positive: %d\n", nbuffer);
- spin_lock(&sv->sv_lock);
+ cfs_spin_lock(&sv->sv_lock);
LASSERT (sv->sv_nprune >= 0);
LASSERT (!sv->sv_shuttingdown);
sv->sv_nprune += nbuffer;
- spin_unlock(&sv->sv_lock);
+ cfs_spin_unlock(&sv->sv_lock);
return;
}
srpc_server_rpc_t *rpc;
srpc_buffer_t *buf;
- spin_lock(&sv->sv_lock);
+ cfs_spin_lock(&sv->sv_lock);
LASSERT (sv->sv_shuttingdown); /* srpc_shutdown_service called */
- if (sv->sv_nposted_msg != 0 || !list_empty(&sv->sv_active_rpcq)) {
+ if (sv->sv_nposted_msg != 0 || !cfs_list_empty(&sv->sv_active_rpcq)) {
CDEBUG (D_NET,
"waiting for %d posted buffers to unlink and "
"in-flight RPCs to die.\n",
sv->sv_nposted_msg);
- if (!list_empty(&sv->sv_active_rpcq)) {
- rpc = list_entry(sv->sv_active_rpcq.next,
- srpc_server_rpc_t, srpc_list);
+ if (!cfs_list_empty(&sv->sv_active_rpcq)) {
+ rpc = cfs_list_entry(sv->sv_active_rpcq.next,
+ srpc_server_rpc_t, srpc_list);
CDEBUG (D_NETERROR,
"Active RPC %p on shutdown: sv %s, peer %s, "
"wi %s scheduled %d running %d, "
rpc->srpc_ev.ev_lnet);
}
- spin_unlock(&sv->sv_lock);
+ cfs_spin_unlock(&sv->sv_lock);
return 0;
}
- spin_unlock(&sv->sv_lock); /* no lock needed from now on */
+ cfs_spin_unlock(&sv->sv_lock); /* no lock needed from now on */
for (;;) {
- struct list_head *q;
+ cfs_list_t *q;
- if (!list_empty(&sv->sv_posted_msgq))
+ if (!cfs_list_empty(&sv->sv_posted_msgq))
q = &sv->sv_posted_msgq;
- else if (!list_empty(&sv->sv_blocked_msgq))
+ else if (!cfs_list_empty(&sv->sv_blocked_msgq))
q = &sv->sv_blocked_msgq;
else
break;
- buf = list_entry(q->next, srpc_buffer_t, buf_list);
- list_del(&buf->buf_list);
+ buf = cfs_list_entry(q->next, srpc_buffer_t, buf_list);
+ cfs_list_del(&buf->buf_list);
LIBCFS_FREE(buf, sizeof(*buf));
}
- while (!list_empty(&sv->sv_free_rpcq)) {
- rpc = list_entry(sv->sv_free_rpcq.next,
- srpc_server_rpc_t, srpc_list);
- list_del(&rpc->srpc_list);
+ while (!cfs_list_empty(&sv->sv_free_rpcq)) {
+ rpc = cfs_list_entry(sv->sv_free_rpcq.next,
+ srpc_server_rpc_t, srpc_list);
+ cfs_list_del(&rpc->srpc_list);
LIBCFS_FREE(rpc, sizeof(*rpc));
}
sv->sv_nprune--;
free:
- spin_unlock(&sv->sv_lock);
+ cfs_spin_unlock(&sv->sv_lock);
LIBCFS_FREE(buf, sizeof(*buf));
- spin_lock(&sv->sv_lock);
+ cfs_spin_lock(&sv->sv_lock);
}
/* called with srpc_service_t::sv_lock held */
{
srpc_server_rpc_t *rpc;
- spin_lock(&sv->sv_lock);
+ cfs_spin_lock(&sv->sv_lock);
CDEBUG(D_NET, "Aborting service: id %d, name %s\n",
sv->sv_id, sv->sv_name);
/* schedule in-flight RPCs to notice the abort, NB:
* racing with incoming RPCs; complete fix should make test
* RPCs carry session ID in its headers */
- list_for_each_entry (rpc, &sv->sv_active_rpcq, srpc_list) {
+ cfs_list_for_each_entry (rpc, &sv->sv_active_rpcq, srpc_list) {
rpc->srpc_aborted = 1;
srpc_schedule_server_rpc(rpc);
}
- spin_unlock(&sv->sv_lock);
+ cfs_spin_unlock(&sv->sv_lock);
return;
}
srpc_server_rpc_t *rpc;
srpc_buffer_t *buf;
- spin_lock(&sv->sv_lock);
+ cfs_spin_lock(&sv->sv_lock);
CDEBUG(D_NET, "Shutting down service: id %d, name %s\n",
sv->sv_id, sv->sv_name);
srpc_schedule_server_rpc(rpc);
}
- spin_unlock(&sv->sv_lock);
+ cfs_spin_unlock(&sv->sv_lock);
/* OK to traverse sv_posted_msgq without lock, since no one
* touches sv_posted_msgq now */
swi_state2str(rpc->srpc_wi.wi_state), status);
if (status != 0) {
- spin_lock(&srpc_data.rpc_glock);
+ cfs_spin_lock(&srpc_data.rpc_glock);
srpc_data.rpc_counters.rpcs_dropped++;
- spin_unlock(&srpc_data.rpc_glock);
+ cfs_spin_unlock(&srpc_data.rpc_glock);
}
if (rpc->srpc_done != NULL)
(*rpc->srpc_done) (rpc);
LASSERT (rpc->srpc_bulk == NULL);
- spin_lock(&sv->sv_lock);
+ cfs_spin_lock(&sv->sv_lock);
if (rpc->srpc_reqstbuf != NULL) {
/* NB might drop sv_lock in srpc_service_recycle_buffer, but
rpc->srpc_reqstbuf = NULL;
}
- list_del(&rpc->srpc_list); /* from sv->sv_active_rpcq */
+ cfs_list_del(&rpc->srpc_list); /* from sv->sv_active_rpcq */
/*
* No one can schedule me now since:
LASSERT (rpc->srpc_ev.ev_fired);
swi_kill_workitem(&rpc->srpc_wi);
- if (!sv->sv_shuttingdown && !list_empty(&sv->sv_blocked_msgq)) {
- buffer = list_entry(sv->sv_blocked_msgq.next,
+ if (!sv->sv_shuttingdown && !cfs_list_empty(&sv->sv_blocked_msgq)) {
+ buffer = cfs_list_entry(sv->sv_blocked_msgq.next,
srpc_buffer_t, buf_list);
- list_del(&buffer->buf_list);
+ cfs_list_del(&buffer->buf_list);
srpc_init_server_rpc(rpc, sv, buffer);
- list_add_tail(&rpc->srpc_list, &sv->sv_active_rpcq);
+ cfs_list_add_tail(&rpc->srpc_list, &sv->sv_active_rpcq);
srpc_schedule_server_rpc(rpc);
} else {
- list_add(&rpc->srpc_list, &sv->sv_free_rpcq);
+ cfs_list_add(&rpc->srpc_list, &sv->sv_free_rpcq);
}
- spin_unlock(&sv->sv_lock);
+ cfs_spin_unlock(&sv->sv_lock);
return;
}
LASSERT (wi == &rpc->srpc_wi);
- spin_lock(&sv->sv_lock);
+ cfs_spin_lock(&sv->sv_lock);
if (sv->sv_shuttingdown || rpc->srpc_aborted) {
- spin_unlock(&sv->sv_lock);
+ cfs_spin_unlock(&sv->sv_lock);
if (rpc->srpc_bulk != NULL)
LNetMDUnlink(rpc->srpc_bulk->bk_mdh);
return 0;
}
- spin_unlock(&sv->sv_lock);
+ cfs_spin_unlock(&sv->sv_lock);
switch (wi->wi_state) {
default:
rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
rpc->crpc_timeout);
- spin_lock(&rpc->crpc_lock);
+ cfs_spin_lock(&rpc->crpc_lock);
rpc->crpc_timeout = 0;
srpc_abort_rpc(rpc, -ETIMEDOUT);
- spin_unlock(&rpc->crpc_lock);
+ cfs_spin_unlock(&rpc->crpc_lock);
- spin_lock(&srpc_data.rpc_glock);
+ cfs_spin_lock(&srpc_data.rpc_glock);
srpc_data.rpc_counters.rpcs_expired++;
- spin_unlock(&srpc_data.rpc_glock);
+ cfs_spin_unlock(&srpc_data.rpc_glock);
return;
}
#ifdef __KERNEL__
/* timer detonated, wait for it to explode */
while (rpc->crpc_timeout != 0) {
- spin_unlock(&rpc->crpc_lock);
+ cfs_spin_unlock(&rpc->crpc_lock);
cfs_schedule();
- spin_lock(&rpc->crpc_lock);
+ cfs_spin_lock(&rpc->crpc_lock);
}
#else
LBUG(); /* impossible in single-threaded runtime */
LASSERT (status != 0 || wi->wi_state == SWI_STATE_DONE);
- spin_lock(&rpc->crpc_lock);
+ cfs_spin_lock(&rpc->crpc_lock);
rpc->crpc_closed = 1;
if (rpc->crpc_status == 0)
LASSERT (!srpc_event_pending(rpc));
swi_kill_workitem(wi);
- spin_unlock(&rpc->crpc_lock);
+ cfs_spin_unlock(&rpc->crpc_lock);
(*rpc->crpc_done) (rpc);
return;
LASSERT (rpc != NULL);
LASSERT (wi == &rpc->crpc_wi);
- spin_lock(&rpc->crpc_lock);
+ cfs_spin_lock(&rpc->crpc_lock);
if (rpc->crpc_aborted) {
- spin_unlock(&rpc->crpc_lock);
+ cfs_spin_unlock(&rpc->crpc_lock);
goto abort;
}
- spin_unlock(&rpc->crpc_lock);
+ cfs_spin_unlock(&rpc->crpc_lock);
switch (wi->wi_state) {
default:
}
if (rc != 0) {
- spin_lock(&rpc->crpc_lock);
+ cfs_spin_lock(&rpc->crpc_lock);
srpc_abort_rpc(rpc, rc);
- spin_unlock(&rpc->crpc_lock);
+ cfs_spin_unlock(&rpc->crpc_lock);
}
abort:
LASSERT (buffer != NULL);
rpyid = buffer->buf_msg.msg_body.reqst.rpyid;
- spin_lock(&sv->sv_lock);
+ cfs_spin_lock(&sv->sv_lock);
if (!sv->sv_shuttingdown &&
sv->sv_id > SRPC_FRAMEWORK_SERVICE_MAX_ID) {
rpc->srpc_reqstbuf = NULL;
}
- spin_unlock(&sv->sv_lock);
+ cfs_spin_unlock(&sv->sv_lock);
ev->ev_fired = 0;
ev->ev_data = rpc;
srpc_msg_t *msg;
srpc_msg_type_t type;
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
if (ev->status != 0) {
- spin_lock(&srpc_data.rpc_glock);
+ cfs_spin_lock(&srpc_data.rpc_glock);
srpc_data.rpc_counters.errors++;
- spin_unlock(&srpc_data.rpc_glock);
+ cfs_spin_unlock(&srpc_data.rpc_glock);
}
rpcev->ev_lnet = ev->type;
LBUG ();
case SRPC_REQUEST_SENT:
if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
- spin_lock(&srpc_data.rpc_glock);
+ cfs_spin_lock(&srpc_data.rpc_glock);
srpc_data.rpc_counters.rpcs_sent++;
- spin_unlock(&srpc_data.rpc_glock);
+ cfs_spin_unlock(&srpc_data.rpc_glock);
}
case SRPC_REPLY_RCVD:
case SRPC_BULK_REQ_RCVD:
LBUG ();
}
- spin_lock(&crpc->crpc_lock);
+ cfs_spin_lock(&crpc->crpc_lock);
LASSERT (rpcev->ev_fired == 0);
rpcev->ev_fired = 1;
-EINTR : ev->status;
swi_schedule_workitem(&crpc->crpc_wi);
- spin_unlock(&crpc->crpc_lock);
+ cfs_spin_unlock(&crpc->crpc_lock);
break;
case SRPC_REQUEST_RCVD:
LASSERT (rpcev == &sv->sv_ev);
- spin_lock(&sv->sv_lock);
+ cfs_spin_lock(&sv->sv_lock);
LASSERT (ev->unlinked);
LASSERT (ev->type == LNET_EVENT_PUT ||
if (sv->sv_shuttingdown) {
/* Leave buffer on sv->sv_posted_msgq since
* srpc_finish_service needs to traverse it. */
- spin_unlock(&sv->sv_lock);
+ cfs_spin_unlock(&sv->sv_lock);
break;
}
- list_del(&buffer->buf_list); /* from sv->sv_posted_msgq */
+ cfs_list_del(&buffer->buf_list); /* from sv->sv_posted_msgq */
msg = &buffer->buf_msg;
type = srpc_service2request(sv->sv_id);
msg->msg_magic = 0;
}
- if (!list_empty(&sv->sv_free_rpcq)) {
- srpc = list_entry(sv->sv_free_rpcq.next,
- srpc_server_rpc_t, srpc_list);
- list_del(&srpc->srpc_list);
+ if (!cfs_list_empty(&sv->sv_free_rpcq)) {
+ srpc = cfs_list_entry(sv->sv_free_rpcq.next,
+ srpc_server_rpc_t, srpc_list);
+ cfs_list_del(&srpc->srpc_list);
srpc_init_server_rpc(srpc, sv, buffer);
- list_add_tail(&srpc->srpc_list, &sv->sv_active_rpcq);
+ cfs_list_add_tail(&srpc->srpc_list,
+ &sv->sv_active_rpcq);
srpc_schedule_server_rpc(srpc);
} else {
- list_add_tail(&buffer->buf_list, &sv->sv_blocked_msgq);
+ cfs_list_add_tail(&buffer->buf_list,
+ &sv->sv_blocked_msgq);
}
- spin_unlock(&sv->sv_lock);
+ cfs_spin_unlock(&sv->sv_lock);
- spin_lock(&srpc_data.rpc_glock);
+ cfs_spin_lock(&srpc_data.rpc_glock);
srpc_data.rpc_counters.rpcs_rcvd++;
- spin_unlock(&srpc_data.rpc_glock);
+ cfs_spin_unlock(&srpc_data.rpc_glock);
break;
case SRPC_BULK_GET_RPLD:
case SRPC_BULK_PUT_SENT:
if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
- spin_lock(&srpc_data.rpc_glock);
+ cfs_spin_lock(&srpc_data.rpc_glock);
if (rpcev->ev_type == SRPC_BULK_GET_RPLD)
srpc_data.rpc_counters.bulk_get += ev->mlength;
else
srpc_data.rpc_counters.bulk_put += ev->mlength;
- spin_unlock(&srpc_data.rpc_glock);
+ cfs_spin_unlock(&srpc_data.rpc_glock);
}
case SRPC_REPLY_SENT:
srpc = rpcev->ev_data;
LASSERT (rpcev == &srpc->srpc_ev);
- spin_lock(&sv->sv_lock);
+ cfs_spin_lock(&sv->sv_lock);
rpcev->ev_fired = 1;
rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
-EINTR : ev->status;
srpc_schedule_server_rpc(srpc);
- spin_unlock(&sv->sv_lock);
+ cfs_spin_unlock(&sv->sv_lock);
break;
}
int rc;
memset(&srpc_data, 0, sizeof(struct smoketest_rpc));
- spin_lock_init(&srpc_data.rpc_glock);
+ cfs_spin_lock_init(&srpc_data.rpc_glock);
/* 1 second pause to avoid timestamp reuse */
cfs_pause(cfs_time_seconds(1));
default:
LBUG ();
case SRPC_STATE_RUNNING:
- spin_lock(&srpc_data.rpc_glock);
+ cfs_spin_lock(&srpc_data.rpc_glock);
for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) {
srpc_service_t *sv = srpc_data.rpc_services[i];
i, sv->sv_name);
}
- spin_unlock(&srpc_data.rpc_glock);
+ cfs_spin_unlock(&srpc_data.rpc_glock);
stt_shutdown();
*/
typedef int (*swi_action_t) (struct swi_workitem *);
typedef struct swi_workitem {
- struct list_head wi_list; /* chain on runq */
+ cfs_list_t wi_list; /* chain on runq */
int wi_state;
swi_action_t wi_action;
void *wi_data;
/* message buffer descriptor */
typedef struct {
- struct list_head buf_list; /* chain on srpc_service::*_msgq */
+ cfs_list_t buf_list; /* chain on srpc_service::*_msgq */
srpc_msg_t buf_msg;
lnet_handle_md_t buf_mdh;
lnet_nid_t buf_self;
/* server-side state of a RPC */
typedef struct srpc_server_rpc {
- struct list_head srpc_list; /* chain on srpc_service::*_rpcq */
+ cfs_list_t srpc_list; /* chain on srpc_service::*_rpcq */
struct srpc_service *srpc_service;
swi_workitem_t srpc_wi;
srpc_event_t srpc_ev; /* bulk/reply event */
/* client-side state of a RPC */
typedef struct srpc_client_rpc {
- struct list_head crpc_list; /* chain on user's lists */
- spinlock_t crpc_lock; /* serialize */
+ cfs_list_t crpc_list; /* chain on user's lists */
+ cfs_spinlock_t crpc_lock; /* serialize */
int crpc_service;
- atomic_t crpc_refcount;
+ cfs_atomic_t crpc_refcount;
int crpc_timeout; /* # seconds to wait for reply */
stt_timer_t crpc_timer;
swi_workitem_t crpc_wi;
do { \
CDEBUG(D_NET, "RPC[%p] -> %s (%d)++\n", \
(rpc), libcfs_id2str((rpc)->crpc_dest), \
- atomic_read(&(rpc)->crpc_refcount)); \
- LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \
- atomic_inc(&(rpc)->crpc_refcount); \
+ cfs_atomic_read(&(rpc)->crpc_refcount)); \
+ LASSERT(cfs_atomic_read(&(rpc)->crpc_refcount) > 0); \
+ cfs_atomic_inc(&(rpc)->crpc_refcount); \
} while (0)
#define srpc_client_rpc_decref(rpc) \
do { \
CDEBUG(D_NET, "RPC[%p] -> %s (%d)--\n", \
(rpc), libcfs_id2str((rpc)->crpc_dest), \
- atomic_read(&(rpc)->crpc_refcount)); \
- LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \
- if (atomic_dec_and_test(&(rpc)->crpc_refcount)) \
+ cfs_atomic_read(&(rpc)->crpc_refcount)); \
+ LASSERT(cfs_atomic_read(&(rpc)->crpc_refcount) > 0); \
+ if (cfs_atomic_dec_and_test(&(rpc)->crpc_refcount)) \
srpc_destroy_client_rpc(rpc); \
} while (0)
int sv_nprune; /* # posted RPC to be pruned */
int sv_concur; /* max # concurrent RPCs */
- spinlock_t sv_lock;
+ cfs_spinlock_t sv_lock;
int sv_shuttingdown;
srpc_event_t sv_ev; /* LNet event */
int sv_nposted_msg; /* # posted message buffers */
- struct list_head sv_free_rpcq; /* free RPC descriptors */
- struct list_head sv_active_rpcq; /* in-flight RPCs */
- struct list_head sv_posted_msgq; /* posted message buffers */
- struct list_head sv_blocked_msgq; /* blocked for RPC descriptor */
+ cfs_list_t sv_free_rpcq; /* free RPC descriptors */
+ cfs_list_t sv_active_rpcq; /* in-flight RPCs */
+ cfs_list_t sv_posted_msgq; /* posted message buffers */
+ cfs_list_t sv_blocked_msgq; /* blocked for RPC descriptor */
/* Service callbacks:
* - sv_handler: process incoming RPC request
* - sv_bulk_ready: notify bulk data
*/
- int (*sv_handler) (srpc_server_rpc_t *);
- int (*sv_bulk_ready) (srpc_server_rpc_t *, int);
+ int (*sv_handler) (srpc_server_rpc_t *);
+ int (*sv_bulk_ready) (srpc_server_rpc_t *, int);
} srpc_service_t;
#define SFW_POST_BUFFERS 256
#define SFW_SERVICE_CONCURRENCY (SFW_POST_BUFFERS/2)
typedef struct {
- struct list_head sn_list; /* chain on fw_zombie_sessions */
+ cfs_list_t sn_list; /* chain on fw_zombie_sessions */
lst_sid_t sn_id; /* unique identifier */
unsigned int sn_timeout; /* # seconds' inactivity to expire */
int sn_timer_active;
stt_timer_t sn_timer;
- struct list_head sn_batches; /* list of batches */
+ cfs_list_t sn_batches; /* list of batches */
char sn_name[LST_NAME_SIZE];
- atomic_t sn_refcount;
- atomic_t sn_brw_errors;
- atomic_t sn_ping_errors;
+ cfs_atomic_t sn_refcount;
+ cfs_atomic_t sn_brw_errors;
+ cfs_atomic_t sn_ping_errors;
} sfw_session_t;
#define sfw_sid_equal(sid0, sid1) ((sid0).ses_nid == (sid1).ses_nid && \
(sid0).ses_stamp == (sid1).ses_stamp)
typedef struct {
- struct list_head bat_list; /* chain on sn_batches */
+ cfs_list_t bat_list; /* chain on sn_batches */
lst_bid_t bat_id; /* batch id */
int bat_error; /* error code of batch */
sfw_session_t *bat_session; /* batch's session */
- atomic_t bat_nactive; /* # of active tests */
- struct list_head bat_tests; /* test instances */
+ cfs_atomic_t bat_nactive; /* # of active tests */
+ cfs_list_t bat_tests; /* test instances */
} sfw_batch_t;
typedef struct {
} sfw_test_client_ops_t;
typedef struct sfw_test_instance {
- struct list_head tsi_list; /* chain on batch */
+ cfs_list_t tsi_list; /* chain on batch */
int tsi_service; /* test type */
sfw_batch_t *tsi_batch; /* batch */
sfw_test_client_ops_t *tsi_ops; /* test client operations */
int tsi_loop; /* loop count */
/* status of test instance */
- spinlock_t tsi_lock; /* serialize */
+ cfs_spinlock_t tsi_lock; /* serialize */
int tsi_stopping:1; /* test is stopping */
- atomic_t tsi_nactive; /* # of active test unit */
- struct list_head tsi_units; /* test units */
- struct list_head tsi_free_rpcs; /* free rpcs */
- struct list_head tsi_active_rpcs; /* active rpcs */
+ cfs_atomic_t tsi_nactive; /* # of active test unit */
+ cfs_list_t tsi_units; /* test units */
+ cfs_list_t tsi_free_rpcs; /* free rpcs */
+ cfs_list_t tsi_active_rpcs; /* active rpcs */
union {
test_bulk_req_t bulk; /* bulk parameter */
#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
typedef struct sfw_test_unit {
- struct list_head tsu_list; /* chain on lst_test_instance */
- lnet_process_id_t tsu_dest; /* id of dest node */
- int tsu_loop; /* loop count of the test */
- sfw_test_instance_t *tsu_instance; /* pointer to test instance */
- void *tsu_private; /* private data */
- swi_workitem_t tsu_worker; /* workitem of the test unit */
+ cfs_list_t tsu_list; /* chain on lst_test_instance */
+ lnet_process_id_t tsu_dest; /* id of dest node */
+ int tsu_loop; /* loop count of the test */
+ sfw_test_instance_t *tsu_instance; /* pointer to test instance */
+ void *tsu_private; /* private data */
+ swi_workitem_t tsu_worker; /* workitem of the test unit */
} sfw_test_unit_t;
typedef struct {
- struct list_head tsc_list; /* chain on fw_tests */
+ cfs_list_t tsc_list; /* chain on fw_tests */
srpc_service_t *tsc_srv_service; /* test service */
sfw_test_client_ops_t *tsc_cli_ops; /* ops of test client */
} sfw_test_case_t;
{
LASSERT (rpc != NULL);
LASSERT (!srpc_event_pending(rpc));
- LASSERT (atomic_read(&rpc->crpc_refcount) == 0);
+ LASSERT (cfs_atomic_read(&rpc->crpc_refcount) == 0);
#ifndef __KERNEL__
LASSERT (rpc->crpc_bulk.bk_pages == NULL);
#endif
CFS_INIT_LIST_HEAD(&rpc->crpc_list);
swi_init_workitem(&rpc->crpc_wi, rpc, srpc_send_rpc);
- spin_lock_init(&rpc->crpc_lock);
- atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */
+ cfs_spin_lock_init(&rpc->crpc_lock);
+ cfs_atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */
rpc->crpc_dest = peer;
rpc->crpc_priv = priv;
while (!(cond)) { \
CDEBUG(IS_PO2(++__I) ? D_WARNING : D_NET, \
fmt, ## __VA_ARGS__); \
- spin_unlock(&(lock)); \
+ cfs_spin_unlock(&(lock)); \
\
selftest_wait_events(); \
\
- spin_lock(&(lock)); \
+ cfs_spin_lock(&(lock)); \
} \
} while (0)
{
int i = 2;
- spin_lock(&sv->sv_lock);
+ cfs_spin_lock(&sv->sv_lock);
LASSERT (sv->sv_shuttingdown);
- spin_unlock(&sv->sv_lock);
+ cfs_spin_unlock(&sv->sv_lock);
while (srpc_finish_service(sv) == 0) {
i++;
(STTIMER_NSLOTS - 1))])
struct st_timer_data {
- spinlock_t stt_lock;
+ cfs_spinlock_t stt_lock;
/* start time of the slot processed previously */
- cfs_time_t stt_prev_slot;
- struct list_head stt_hash[STTIMER_NSLOTS];
+ cfs_time_t stt_prev_slot;
+ cfs_list_t stt_hash[STTIMER_NSLOTS];
int stt_shuttingdown;
#ifdef __KERNEL__
cfs_waitq_t stt_waitq;
void
stt_add_timer (stt_timer_t *timer)
{
- struct list_head *pos;
+ cfs_list_t *pos;
- spin_lock(&stt_data.stt_lock);
+ cfs_spin_lock(&stt_data.stt_lock);
#ifdef __KERNEL__
LASSERT (stt_data.stt_nthreads > 0);
#endif
LASSERT (!stt_data.stt_shuttingdown);
LASSERT (timer->stt_func != NULL);
- LASSERT (list_empty(&timer->stt_list));
+ LASSERT (cfs_list_empty(&timer->stt_list));
LASSERT (cfs_time_after(timer->stt_expires, cfs_time_current_sec()));
/* a simple insertion sort */
- list_for_each_prev (pos, STTIMER_SLOT(timer->stt_expires)) {
- stt_timer_t *old = list_entry(pos, stt_timer_t, stt_list);
+ cfs_list_for_each_prev (pos, STTIMER_SLOT(timer->stt_expires)) {
+ stt_timer_t *old = cfs_list_entry(pos, stt_timer_t, stt_list);
if (cfs_time_aftereq(timer->stt_expires, old->stt_expires))
break;
}
- list_add(&timer->stt_list, pos);
+ cfs_list_add(&timer->stt_list, pos);
- spin_unlock(&stt_data.stt_lock);
+ cfs_spin_unlock(&stt_data.stt_lock);
}
/*
{
int ret = 0;
- spin_lock(&stt_data.stt_lock);
+ cfs_spin_lock(&stt_data.stt_lock);
#ifdef __KERNEL__
LASSERT (stt_data.stt_nthreads > 0);
#endif
LASSERT (!stt_data.stt_shuttingdown);
- if (!list_empty(&timer->stt_list)) {
+ if (!cfs_list_empty(&timer->stt_list)) {
ret = 1;
- list_del_init(&timer->stt_list);
+ cfs_list_del_init(&timer->stt_list);
}
- spin_unlock(&stt_data.stt_lock);
+ cfs_spin_unlock(&stt_data.stt_lock);
return ret;
}
/* called with stt_data.stt_lock held */
int
-stt_expire_list (struct list_head *slot, cfs_time_t now)
+stt_expire_list (cfs_list_t *slot, cfs_time_t now)
{
int expired = 0;
stt_timer_t *timer;
- while (!list_empty(slot)) {
- timer = list_entry(slot->next, stt_timer_t, stt_list);
+ while (!cfs_list_empty(slot)) {
+ timer = cfs_list_entry(slot->next, stt_timer_t, stt_list);
if (cfs_time_after(timer->stt_expires, now))
break;
- list_del_init(&timer->stt_list);
- spin_unlock(&stt_data.stt_lock);
+ cfs_list_del_init(&timer->stt_list);
+ cfs_spin_unlock(&stt_data.stt_lock);
expired++;
(*timer->stt_func) (timer->stt_data);
- spin_lock(&stt_data.stt_lock);
+ cfs_spin_lock(&stt_data.stt_lock);
}
return expired;
now = cfs_time_current_sec();
this_slot = now & STTIMER_SLOTTIMEMASK;
- spin_lock(&stt_data.stt_lock);
+ cfs_spin_lock(&stt_data.stt_lock);
while (cfs_time_aftereq(this_slot, *last)) {
expired += stt_expire_list(STTIMER_SLOT(this_slot), now);
}
*last = now & STTIMER_SLOTTIMEMASK;
- spin_unlock(&stt_data.stt_lock);
+ cfs_spin_unlock(&stt_data.stt_lock);
return expired;
}
rc);
}
- spin_lock(&stt_data.stt_lock);
+ cfs_spin_lock(&stt_data.stt_lock);
stt_data.stt_nthreads--;
- spin_unlock(&stt_data.stt_lock);
+ cfs_spin_unlock(&stt_data.stt_lock);
return 0;
}
if (pid < 0)
return (int)pid;
- spin_lock(&stt_data.stt_lock);
+ cfs_spin_lock(&stt_data.stt_lock);
stt_data.stt_nthreads++;
- spin_unlock(&stt_data.stt_lock);
+ cfs_spin_unlock(&stt_data.stt_lock);
return 0;
}
stt_data.stt_shuttingdown = 0;
stt_data.stt_prev_slot = cfs_time_current_sec() & STTIMER_SLOTTIMEMASK;
- spin_lock_init(&stt_data.stt_lock);
+ cfs_spin_lock_init(&stt_data.stt_lock);
for (i = 0; i < STTIMER_NSLOTS; i++)
CFS_INIT_LIST_HEAD(&stt_data.stt_hash[i]);
{
int i;
- spin_lock(&stt_data.stt_lock);
+ cfs_spin_lock(&stt_data.stt_lock);
for (i = 0; i < STTIMER_NSLOTS; i++)
- LASSERT (list_empty(&stt_data.stt_hash[i]));
+ LASSERT (cfs_list_empty(&stt_data.stt_hash[i]));
stt_data.stt_shuttingdown = 1;
stt_data.stt_nthreads);
#endif
- spin_unlock(&stt_data.stt_lock);
+ cfs_spin_unlock(&stt_data.stt_lock);
return;
}
#define __SELFTEST_TIMER_H__
typedef struct {
- struct list_head stt_list;
+ cfs_list_t stt_list;
cfs_time_t stt_expires;
void (*stt_func) (void *);
void *stt_data;
* module info
*/
-struct module libcfs_global_module = {"selftest"};
+cfs_module_t libcfs_global_module = {"selftest"};
/*
* structure definitions
struct smoketest_workitem {
- struct list_head wi_runq; /* concurrent workitems */
- struct list_head wi_serial_runq; /* serialised workitems */
+ cfs_list_t wi_runq; /* concurrent workitems */
+ cfs_list_t wi_serial_runq; /* serialised workitems */
cfs_waitq_t wi_waitq; /* where schedulers sleep */
cfs_waitq_t wi_serial_waitq; /* where serial scheduler sleep */
- spinlock_t wi_lock; /* serialize */
+ cfs_spinlock_t wi_lock; /* serialize */
int wi_shuttingdown;
int wi_nthreads;
} swi_data;
static inline int
-swi_sched_cansleep (struct list_head *q)
+swi_sched_cansleep (cfs_list_t *q)
{
int rc;
- spin_lock(&swi_data.wi_lock);
+ cfs_spin_lock(&swi_data.wi_lock);
- rc = !swi_data.wi_shuttingdown && list_empty(q);
+ rc = !swi_data.wi_shuttingdown && cfs_list_empty(q);
- spin_unlock(&swi_data.wi_lock);
+ cfs_spin_unlock(&swi_data.wi_lock);
return rc;
}
void
swi_kill_workitem (swi_workitem_t *wi)
{
- LASSERT (!in_interrupt()); /* because we use plain spinlock */
+ LASSERT (!cfs_in_interrupt()); /* because we use plain spinlock */
LASSERT (!swi_data.wi_shuttingdown);
- spin_lock(&swi_data.wi_lock);
+ cfs_spin_lock(&swi_data.wi_lock);
#ifdef __KERNEL__
LASSERT (wi->wi_running);
#endif
if (wi->wi_scheduled) { /* cancel pending schedules */
- LASSERT (!list_empty(&wi->wi_list));
- list_del_init(&wi->wi_list);
+ LASSERT (!cfs_list_empty(&wi->wi_list));
+ cfs_list_del_init(&wi->wi_list);
}
- LASSERT (list_empty(&wi->wi_list));
+ LASSERT (cfs_list_empty(&wi->wi_list));
wi->wi_scheduled = 1; /* LBUG future schedule attempts */
- spin_unlock(&swi_data.wi_lock);
+ cfs_spin_unlock(&swi_data.wi_lock);
return;
}
void
swi_schedule_workitem (swi_workitem_t *wi)
{
- LASSERT (!in_interrupt()); /* because we use plain spinlock */
+ LASSERT (!cfs_in_interrupt()); /* because we use plain spinlock */
LASSERT (!swi_data.wi_shuttingdown);
- spin_lock(&swi_data.wi_lock);
+ cfs_spin_lock(&swi_data.wi_lock);
if (!wi->wi_scheduled) {
- LASSERT (list_empty(&wi->wi_list));
+ LASSERT (cfs_list_empty(&wi->wi_list));
wi->wi_scheduled = 1;
- list_add_tail(&wi->wi_list, &swi_data.wi_runq);
+ cfs_list_add_tail(&wi->wi_list, &swi_data.wi_runq);
cfs_waitq_signal(&swi_data.wi_waitq);
}
- LASSERT (!list_empty(&wi->wi_list));
- spin_unlock(&swi_data.wi_lock);
+ LASSERT (!cfs_list_empty(&wi->wi_list));
+ cfs_spin_unlock(&swi_data.wi_lock);
return;
}
void
swi_schedule_serial_workitem (swi_workitem_t *wi)
{
- LASSERT (!in_interrupt()); /* because we use plain spinlock */
+ LASSERT (!cfs_in_interrupt()); /* because we use plain spinlock */
LASSERT (!swi_data.wi_shuttingdown);
- spin_lock(&swi_data.wi_lock);
+ cfs_spin_lock(&swi_data.wi_lock);
if (!wi->wi_scheduled) {
- LASSERT (list_empty(&wi->wi_list));
+ LASSERT (cfs_list_empty(&wi->wi_list));
wi->wi_scheduled = 1;
- list_add_tail(&wi->wi_list, &swi_data.wi_serial_runq);
+ cfs_list_add_tail(&wi->wi_list, &swi_data.wi_serial_runq);
cfs_waitq_signal(&swi_data.wi_serial_waitq);
}
- LASSERT (!list_empty(&wi->wi_list));
- spin_unlock(&swi_data.wi_lock);
+ LASSERT (!cfs_list_empty(&wi->wi_list));
+ cfs_spin_unlock(&swi_data.wi_lock);
return;
}
cfs_daemonize(name);
cfs_block_allsigs();
- spin_lock(&swi_data.wi_lock);
+ cfs_spin_lock(&swi_data.wi_lock);
while (!swi_data.wi_shuttingdown) {
int nloops = 0;
int rc;
swi_workitem_t *wi;
- while (!list_empty(&swi_data.wi_runq) &&
+ while (!cfs_list_empty(&swi_data.wi_runq) &&
nloops < SWI_RESCHED) {
- wi = list_entry(swi_data.wi_runq.next,
- swi_workitem_t, wi_list);
- list_del_init(&wi->wi_list);
+ wi = cfs_list_entry(swi_data.wi_runq.next,
+ swi_workitem_t, wi_list);
+ cfs_list_del_init(&wi->wi_list);
LASSERT (wi->wi_scheduled);
nloops++;
if (wi->wi_running) {
- list_add_tail(&wi->wi_list, &swi_data.wi_runq);
+ cfs_list_add_tail(&wi->wi_list,
+ &swi_data.wi_runq);
continue;
}
wi->wi_running = 1;
wi->wi_scheduled = 0;
- spin_unlock(&swi_data.wi_lock);
+ cfs_spin_unlock(&swi_data.wi_lock);
rc = (*wi->wi_action) (wi);
- spin_lock(&swi_data.wi_lock);
+ cfs_spin_lock(&swi_data.wi_lock);
if (rc == 0) /* wi still active */
wi->wi_running = 0;
}
- spin_unlock(&swi_data.wi_lock);
+ cfs_spin_unlock(&swi_data.wi_lock);
if (nloops < SWI_RESCHED)
cfs_wait_event_interruptible_exclusive(
- swi_data.wi_waitq,
- !swi_sched_cansleep(&swi_data.wi_runq), rc);
+ swi_data.wi_waitq,
+ !swi_sched_cansleep(&swi_data.wi_runq), rc);
else
- our_cond_resched();
+ cfs_cond_resched();
- spin_lock(&swi_data.wi_lock);
+ cfs_spin_lock(&swi_data.wi_lock);
}
swi_data.wi_nthreads--;
- spin_unlock(&swi_data.wi_lock);
+ cfs_spin_unlock(&swi_data.wi_lock);
return 0;
}
cfs_daemonize("swi_serial_sd");
cfs_block_allsigs();
- spin_lock(&swi_data.wi_lock);
+ cfs_spin_lock(&swi_data.wi_lock);
while (!swi_data.wi_shuttingdown) {
int nloops = 0;
int rc;
swi_workitem_t *wi;
- while (!list_empty(&swi_data.wi_serial_runq) &&
+ while (!cfs_list_empty(&swi_data.wi_serial_runq) &&
nloops < SWI_RESCHED) {
- wi = list_entry(swi_data.wi_serial_runq.next,
- swi_workitem_t, wi_list);
- list_del_init(&wi->wi_list);
+ wi = cfs_list_entry(swi_data.wi_serial_runq.next,
+ swi_workitem_t, wi_list);
+ cfs_list_del_init(&wi->wi_list);
LASSERTF (!wi->wi_running && wi->wi_scheduled,
"wi %p running %d scheduled %d\n",
nloops++;
wi->wi_running = 1;
wi->wi_scheduled = 0;
- spin_unlock(&swi_data.wi_lock);
+ cfs_spin_unlock(&swi_data.wi_lock);
rc = (*wi->wi_action) (wi);
- spin_lock(&swi_data.wi_lock);
+ cfs_spin_lock(&swi_data.wi_lock);
if (rc == 0) /* wi still active */
wi->wi_running = 0;
}
- spin_unlock(&swi_data.wi_lock);
+ cfs_spin_unlock(&swi_data.wi_lock);
if (nloops < SWI_RESCHED)
cfs_wait_event_interruptible_exclusive(
- swi_data.wi_serial_waitq,
- !swi_sched_cansleep(&swi_data.wi_serial_runq), rc);
+ swi_data.wi_serial_waitq,
+ !swi_sched_cansleep(&swi_data.wi_serial_runq),
+ rc);
else
- our_cond_resched();
+ cfs_cond_resched();
- spin_lock(&swi_data.wi_lock);
+ cfs_spin_lock(&swi_data.wi_lock);
}
swi_data.wi_nthreads--;
- spin_unlock(&swi_data.wi_lock);
+ cfs_spin_unlock(&swi_data.wi_lock);
return 0;
}
if (pid < 0)
return (int)pid;
- spin_lock(&swi_data.wi_lock);
+ cfs_spin_lock(&swi_data.wi_lock);
swi_data.wi_nthreads++;
- spin_unlock(&swi_data.wi_lock);
+ cfs_spin_unlock(&swi_data.wi_lock);
return 0;
}
{
int n = 0;
swi_workitem_t *wi;
- struct list_head *q;
+ cfs_list_t *q;
- spin_lock(&swi_data.wi_lock);
+ cfs_spin_lock(&swi_data.wi_lock);
for (;;) {
- if (!list_empty(&swi_data.wi_serial_runq))
+ if (!cfs_list_empty(&swi_data.wi_serial_runq))
q = &swi_data.wi_serial_runq;
- else if (!list_empty(&swi_data.wi_runq))
+ else if (!cfs_list_empty(&swi_data.wi_runq))
q = &swi_data.wi_runq;
else
break;
- wi = list_entry(q->next, swi_workitem_t, wi_list);
- list_del_init(&wi->wi_list);
+ wi = cfs_list_entry(q->next, swi_workitem_t, wi_list);
+ cfs_list_del_init(&wi->wi_list);
LASSERT (wi->wi_scheduled);
wi->wi_scheduled = 0;
- spin_unlock(&swi_data.wi_lock);
+ cfs_spin_unlock(&swi_data.wi_lock);
n++;
(*wi->wi_action) (wi);
- spin_lock(&swi_data.wi_lock);
+ cfs_spin_lock(&swi_data.wi_lock);
}
- spin_unlock(&swi_data.wi_lock);
+ cfs_spin_unlock(&swi_data.wi_lock);
return n;
}
swi_data.wi_nthreads = 0;
swi_data.wi_shuttingdown = 0;
- spin_lock_init(&swi_data.wi_lock);
+ cfs_spin_lock_init(&swi_data.wi_lock);
cfs_waitq_init(&swi_data.wi_waitq);
cfs_waitq_init(&swi_data.wi_serial_waitq);
CFS_INIT_LIST_HEAD(&swi_data.wi_runq);
return rc;
}
- for (i = 0; i < num_online_cpus(); i++) {
+ for (i = 0; i < cfs_num_online_cpus(); i++) {
rc = swi_start_thread(swi_scheduler_main,
(void *) (long_ptr_t) i);
if (rc != 0) {
void
swi_shutdown (void)
{
- spin_lock(&swi_data.wi_lock);
+ cfs_spin_lock(&swi_data.wi_lock);
- LASSERT (list_empty(&swi_data.wi_runq));
- LASSERT (list_empty(&swi_data.wi_serial_runq));
+ LASSERT (cfs_list_empty(&swi_data.wi_runq));
+ LASSERT (cfs_list_empty(&swi_data.wi_serial_runq));
swi_data.wi_shuttingdown = 1;
swi_data.wi_nthreads);
#endif
- spin_unlock(&swi_data.wi_lock);
+ cfs_spin_unlock(&swi_data.wi_lock);
return;
}
static int ptllnd_ni_count = 0;
-static struct list_head ptllnd_idle_history;
-static struct list_head ptllnd_history_list;
+static cfs_list_t ptllnd_idle_history;
+static cfs_list_t ptllnd_history_list;
void
ptllnd_history_fini(void)
{
ptllnd_he_t *he;
- while (!list_empty(&ptllnd_idle_history)) {
- he = list_entry(ptllnd_idle_history.next,
- ptllnd_he_t, he_list);
+ while (!cfs_list_empty(&ptllnd_idle_history)) {
+ he = cfs_list_entry(ptllnd_idle_history.next,
+ ptllnd_he_t, he_list);
- list_del(&he->he_list);
+ cfs_list_del(&he->he_list);
LIBCFS_FREE(he, sizeof(*he));
}
- while (!list_empty(&ptllnd_history_list)) {
- he = list_entry(ptllnd_history_list.next,
- ptllnd_he_t, he_list);
+ while (!cfs_list_empty(&ptllnd_history_list)) {
+ he = cfs_list_entry(ptllnd_history_list.next,
+ ptllnd_he_t, he_list);
- list_del(&he->he_list);
+ cfs_list_del(&he->he_list);
LIBCFS_FREE(he, sizeof(*he));
}
}
return -ENOMEM;
}
- list_add(&he->he_list, &ptllnd_idle_history);
+ cfs_list_add(&he->he_list, &ptllnd_idle_history);
}
PTLLND_HISTORY("Init");
va_list ap;
ptllnd_he_t *he;
- if (!list_empty(&ptllnd_idle_history)) {
- he = list_entry(ptllnd_idle_history.next,
- ptllnd_he_t, he_list);
- } else if (!list_empty(&ptllnd_history_list)) {
- he = list_entry(ptllnd_history_list.next,
- ptllnd_he_t, he_list);
+ if (!cfs_list_empty(&ptllnd_idle_history)) {
+ he = cfs_list_entry(ptllnd_idle_history.next,
+ ptllnd_he_t, he_list);
+ } else if (!cfs_list_empty(&ptllnd_history_list)) {
+ he = cfs_list_entry(ptllnd_history_list.next,
+ ptllnd_he_t, he_list);
} else {
return;
}
- list_del(&he->he_list);
- list_add_tail(&he->he_list, &ptllnd_history_list);
+ cfs_list_del(&he->he_list);
+ cfs_list_add_tail(&he->he_list, &ptllnd_history_list);
he->he_seq = seq++;
he->he_fn = fn;
PTLLND_HISTORY("dumping...");
- while (!list_empty(&ptllnd_history_list)) {
- he = list_entry(ptllnd_history_list.next,
+ while (!cfs_list_empty(&ptllnd_history_list)) {
+ he = cfs_list_entry(ptllnd_history_list.next,
ptllnd_he_t, he_list);
- list_del(&he->he_list);
+ cfs_list_del(&he->he_list);
CDEBUG(D_WARNING, "%d %d.%06d (%s:%d:%s()) %s\n", he->he_seq,
(int)he->he_time.tv_sec, (int)he->he_time.tv_usec,
he->he_file, he->he_line, he->he_fn, he->he_msg);
- list_add_tail(&he->he_list, &ptllnd_idle_history);
+ cfs_list_add_tail(&he->he_list, &ptllnd_idle_history);
}
PTLLND_HISTORY("complete");
return NULL;
}
- list_add(&buf->plb_list, &plni->plni_buffers);
+ cfs_list_add(&buf->plb_list, &plni->plni_buffers);
plni->plni_nbuffers++;
return buf;
LASSERT (!buf->plb_posted);
plni->plni_nbuffers--;
- list_del(&buf->plb_list);
+ cfs_list_del(&buf->plb_list);
LIBCFS_FREE(buf->plb_buffer, plni->plni_buffer_size);
LIBCFS_FREE(buf, sizeof(*buf));
}
{
ptllnd_ni_t *plni = ni->ni_data;
ptllnd_buffer_t *buf;
- struct list_head *tmp;
- struct list_head *nxt;
+ cfs_list_t *tmp;
+ cfs_list_t *nxt;
CDEBUG(D_NET, "nposted_buffers = %d (before)\n",plni->plni_nposted_buffers);
CDEBUG(D_NET, "nbuffers = %d (before)\n",plni->plni_nbuffers);
- list_for_each_safe(tmp, nxt, &plni->plni_buffers) {
- buf = list_entry(tmp, ptllnd_buffer_t, plb_list);
+ cfs_list_for_each_safe(tmp, nxt, &plni->plni_buffers) {
+ buf = cfs_list_entry(tmp, ptllnd_buffer_t, plb_list);
//CDEBUG(D_NET, "buf=%p posted=%d\n",buf,buf->plb_posted);
LASSERT( plni->plni_npeers == 0);
for (i = 0; i < plni->plni_peer_hash_size; i++)
- LASSERT (list_empty(&plni->plni_peer_hash[i]));
+ LASSERT (cfs_list_empty(&plni->plni_peer_hash[i]));
LIBCFS_FREE(plni->plni_peer_hash,
plni->plni_peer_hash_size * sizeof(*plni->plni_peer_hash));
int i;
for (i = 0; i < plni->plni_peer_hash_size; i++)
- while (!list_empty(&plni->plni_peer_hash[i])) {
- plp = list_entry(plni->plni_peer_hash[i].next,
- ptllnd_peer_t, plp_list);
+ while (!cfs_list_empty(&plni->plni_peer_hash[i])) {
+ plp = cfs_list_entry(plni->plni_peer_hash[i].next,
+ ptllnd_peer_t, plp_list);
ptllnd_close_peer(plp, 0);
}
#include <lnet/ptllnd.h> /* Depends on portals/p30.h */
#include <stdarg.h>
-/* Hack to record history
+/* Hack to record history
* This should really be done by CDEBUG(D_NETTRACE... */
typedef struct {
- struct list_head he_list;
+ cfs_list_t he_list;
struct timeval he_time;
const char *he_fn;
const char *he_file;
#define PTLLND_HISTORY(fmt, a...) \
ptllnd_history(__FUNCTION__, __FILE__, __LINE__, fmt, ## a)
-
+
#define PTLLND_MD_OPTIONS (PTL_MD_LUSTRE_COMPLETION_SEMANTICS |\
PTL_MD_EVENT_START_DISABLE)
typedef struct
int plni_timeout;
__u64 plni_stamp;
- struct list_head plni_active_txs;
- struct list_head plni_zombie_txs;
+ cfs_list_t plni_active_txs;
+ cfs_list_t plni_zombie_txs;
int plni_ntxs;
int plni_nrxs;
ptl_handle_eq_t plni_eqh;
ptl_process_id_t plni_portals_id; /* Portals ID of interface */
- struct list_head *plni_peer_hash;
+ cfs_list_t *plni_peer_hash;
int plni_npeers;
int plni_watchdog_nextt;
int plni_watchdog_peeridx;
- struct list_head plni_tx_history;
+ cfs_list_t plni_tx_history;
int plni_ntx_history;
- struct list_head plni_buffers;
+ cfs_list_t plni_buffers;
int plni_nbuffers;
int plni_nposted_buffers;
int plni_nmsgs;
typedef struct
{
- struct list_head plp_list;
+ cfs_list_t plp_list;
lnet_ni_t *plp_ni;
lnet_process_id_t plp_id;
ptl_process_id_t plp_ptlid;
int plp_closing:1;
__u64 plp_match;
__u64 plp_stamp;
- struct list_head plp_txq;
- struct list_head plp_noopq;
- struct list_head plp_activeq;
+ cfs_list_t plp_txq;
+ cfs_list_t plp_noopq;
+ cfs_list_t plp_activeq;
} ptllnd_peer_t;
typedef struct
{
- struct list_head plb_list;
+ cfs_list_t plb_list;
lnet_ni_t *plb_ni;
int plb_posted;
ptl_handle_md_t plb_md;
typedef struct
{
- struct list_head tx_list;
+ cfs_list_t tx_list;
int tx_type;
int tx_status;
ptllnd_peer_t *tx_peer;
LASSERT (tx->tx_type != PTLLND_MSG_TYPE_NOOP);
ptllnd_set_tx_deadline(tx);
- list_add_tail(&tx->tx_list, &peer->plp_txq);
+ cfs_list_add_tail(&tx->tx_list, &peer->plp_txq);
ptllnd_check_sends(peer);
}
LASSERT (peer->plp_closing);
LASSERT (plni->plni_npeers > 0);
- LASSERT (list_empty(&peer->plp_txq));
- LASSERT (list_empty(&peer->plp_noopq));
- LASSERT (list_empty(&peer->plp_activeq));
+ LASSERT (cfs_list_empty(&peer->plp_txq));
+ LASSERT (cfs_list_empty(&peer->plp_noopq));
+ LASSERT (cfs_list_empty(&peer->plp_activeq));
plni->plni_npeers--;
LIBCFS_FREE(peer, sizeof(*peer));
}
void
-ptllnd_abort_txs(ptllnd_ni_t *plni, struct list_head *q)
+ptllnd_abort_txs(ptllnd_ni_t *plni, cfs_list_t *q)
{
- while (!list_empty(q)) {
- ptllnd_tx_t *tx = list_entry(q->next, ptllnd_tx_t, tx_list);
+ while (!cfs_list_empty(q)) {
+ ptllnd_tx_t *tx = cfs_list_entry(q->next, ptllnd_tx_t, tx_list);
tx->tx_status = -ESHUTDOWN;
- list_del(&tx->tx_list);
- list_add_tail(&tx->tx_list, &plni->plni_zombie_txs);
+ cfs_list_del(&tx->tx_list);
+ cfs_list_add_tail(&tx->tx_list, &plni->plni_zombie_txs);
}
}
peer->plp_closing = 1;
- if (!list_empty(&peer->plp_txq) ||
- !list_empty(&peer->plp_noopq) ||
- !list_empty(&peer->plp_activeq) ||
+ if (!cfs_list_empty(&peer->plp_txq) ||
+ !cfs_list_empty(&peer->plp_noopq) ||
+ !cfs_list_empty(&peer->plp_activeq) ||
error != 0) {
CWARN("Closing %s: %d\n", libcfs_id2str(peer->plp_id), error);
if (plni->plni_debug)
ptllnd_abort_txs(plni, &peer->plp_noopq);
ptllnd_abort_txs(plni, &peer->plp_activeq);
- list_del(&peer->plp_list);
+ cfs_list_del(&peer->plp_list);
ptllnd_peer_decref(peer);
}
LASSERT (LNET_NIDNET(id.nid) == LNET_NIDNET(ni->ni_nid));
- list_for_each_entry (plp, &plni->plni_peer_hash[hash], plp_list) {
+ cfs_list_for_each_entry (plp, &plni->plni_peer_hash[hash], plp_list) {
if (plp->plp_id.nid == id.nid &&
plp->plp_id.pid == id.pid) {
ptllnd_peer_addref(plp);
CFS_INIT_LIST_HEAD(&plp->plp_activeq);
ptllnd_peer_addref(plp);
- list_add_tail(&plp->plp_list, &plni->plni_peer_hash[hash]);
+ cfs_list_add_tail(&plp->plp_list, &plni->plni_peer_hash[hash]);
tx = ptllnd_new_tx(plp, PTLLND_MSG_TYPE_HELLO, 0);
if (tx == NULL) {
}
int
-ptllnd_count_q(struct list_head *q)
+ptllnd_count_q(cfs_list_t *q)
{
- struct list_head *e;
- int n = 0;
+ cfs_list_t *e;
+ int n = 0;
- list_for_each(e, q) {
+ cfs_list_for_each(e, q) {
n++;
}
plni->plni_peer_credits + plp->plp_lazy_credits);
CDEBUG(D_WARNING, "txq:\n");
- list_for_each_entry (tx, &plp->plp_txq, tx_list) {
+ cfs_list_for_each_entry (tx, &plp->plp_txq, tx_list) {
ptllnd_debug_tx(tx);
}
CDEBUG(D_WARNING, "noopq:\n");
- list_for_each_entry (tx, &plp->plp_noopq, tx_list) {
+ cfs_list_for_each_entry (tx, &plp->plp_noopq, tx_list) {
ptllnd_debug_tx(tx);
}
CDEBUG(D_WARNING, "activeq:\n");
- list_for_each_entry (tx, &plp->plp_activeq, tx_list) {
+ cfs_list_for_each_entry (tx, &plp->plp_activeq, tx_list) {
ptllnd_debug_tx(tx);
}
CDEBUG(D_WARNING, "zombies:\n");
- list_for_each_entry (tx, &plni->plni_zombie_txs, tx_list) {
+ cfs_list_for_each_entry (tx, &plni->plni_zombie_txs, tx_list) {
if (tx->tx_peer->plp_id.nid == id.nid &&
tx->tx_peer->plp_id.pid == id.pid)
ptllnd_debug_tx(tx);
}
CDEBUG(D_WARNING, "history:\n");
- list_for_each_entry (tx, &plni->plni_tx_history, tx_list) {
+ cfs_list_for_each_entry (tx, &plni->plni_tx_history, tx_list) {
if (tx->tx_peer->plp_id.nid == id.nid &&
tx->tx_peer->plp_id.pid == id.pid)
ptllnd_debug_tx(tx);
int max = plni->plni_max_tx_history;
while (plni->plni_ntx_history > max) {
- ptllnd_tx_t *tx = list_entry(plni->plni_tx_history.next,
- ptllnd_tx_t, tx_list);
- list_del(&tx->tx_list);
+ ptllnd_tx_t *tx = cfs_list_entry(plni->plni_tx_history.next,
+ ptllnd_tx_t, tx_list);
+ cfs_list_del(&tx->tx_list);
ptllnd_peer_decref(tx->tx_peer);
tx->tx_completing = 1;
- if (!list_empty(&tx->tx_list))
- list_del_init(&tx->tx_list);
+ if (!cfs_list_empty(&tx->tx_list))
+ cfs_list_del_init(&tx->tx_list);
if (tx->tx_status != 0) {
if (plni->plni_debug) {
}
plni->plni_ntx_history++;
- list_add_tail(&tx->tx_list, &plni->plni_tx_history);
+ cfs_list_add_tail(&tx->tx_list, &plni->plni_tx_history);
ptllnd_cull_tx_history(plni);
}
if (!peer->plp_sent_hello ||
peer->plp_credits == 0 ||
- !list_empty(&peer->plp_noopq) ||
+ !cfs_list_empty(&peer->plp_noopq) ||
peer->plp_outstanding_credits < PTLLND_CREDIT_HIGHWATER(plni))
return 0;
/* No tx to piggyback NOOP onto or no credit to send a tx */
- return (list_empty(&peer->plp_txq) || peer->plp_credits == 1);
+ return (cfs_list_empty(&peer->plp_txq) || peer->plp_credits == 1);
}
void
libcfs_id2str(peer->plp_id));
} else {
ptllnd_set_tx_deadline(tx);
- list_add_tail(&tx->tx_list, &peer->plp_noopq);
+ cfs_list_add_tail(&tx->tx_list, &peer->plp_noopq);
}
}
for (;;) {
- if (!list_empty(&peer->plp_noopq)) {
+ if (!cfs_list_empty(&peer->plp_noopq)) {
LASSERT (peer->plp_sent_hello);
- tx = list_entry(peer->plp_noopq.next,
- ptllnd_tx_t, tx_list);
- } else if (!list_empty(&peer->plp_txq)) {
- tx = list_entry(peer->plp_txq.next,
- ptllnd_tx_t, tx_list);
+ tx = cfs_list_entry(peer->plp_noopq.next,
+ ptllnd_tx_t, tx_list);
+ } else if (!cfs_list_empty(&peer->plp_txq)) {
+ tx = cfs_list_entry(peer->plp_txq.next,
+ ptllnd_tx_t, tx_list);
} else {
/* nothing to send right now */
break;
/* say HELLO first */
if (!peer->plp_sent_hello) {
- LASSERT (list_empty(&peer->plp_noopq));
+ LASSERT (cfs_list_empty(&peer->plp_noopq));
LASSERT (tx->tx_type == PTLLND_MSG_TYPE_HELLO);
peer->plp_sent_hello = 1;
break;
}
- list_del(&tx->tx_list);
- list_add_tail(&tx->tx_list, &peer->plp_activeq);
+ cfs_list_del(&tx->tx_list);
+ cfs_list_add_tail(&tx->tx_list, &peer->plp_activeq);
CDEBUG(D_NET, "Sending at TX=%p type=%s (%d)\n",tx,
ptllnd_msgtype2str(tx->tx_type),tx->tx_type);
tx->tx_lnetmsg = msg;
ptllnd_set_tx_deadline(tx);
- list_add_tail(&tx->tx_list, &peer->plp_activeq);
+ cfs_list_add_tail(&tx->tx_list, &peer->plp_activeq);
gettimeofday(&tx->tx_bulk_posted, NULL);
if (type == PTLLND_RDMA_READ)
PtlHandleIsEqual(tx->tx_reqmdh, PTL_INVALID_HANDLE))) {
if (error)
tx->tx_status = -EIO;
- list_del(&tx->tx_list);
- list_add_tail(&tx->tx_list, &plni->plni_zombie_txs);
+ cfs_list_del(&tx->tx_list);
+ cfs_list_add_tail(&tx->tx_list, &plni->plni_zombie_txs);
}
}
time_t now = cfs_time_current_sec();
ptllnd_tx_t *tx;
- list_for_each_entry (tx, &peer->plp_txq, tx_list) {
+ cfs_list_for_each_entry (tx, &peer->plp_txq, tx_list) {
if (tx->tx_deadline < now)
return tx;
}
- list_for_each_entry (tx, &peer->plp_noopq, tx_list) {
+ cfs_list_for_each_entry (tx, &peer->plp_noopq, tx_list) {
if (tx->tx_deadline < now)
return tx;
}
- list_for_each_entry (tx, &peer->plp_activeq, tx_list) {
+ cfs_list_for_each_entry (tx, &peer->plp_activeq, tx_list) {
if (tx->tx_deadline < now)
return tx;
}
int chunk = plni->plni_peer_hash_size;
int interval = now - (plni->plni_watchdog_nextt - p);
int i;
- struct list_head *hashlist;
- struct list_head *tmp;
- struct list_head *nxt;
+ cfs_list_t *hashlist;
+ cfs_list_t *tmp;
+ cfs_list_t *nxt;
- /* Time to check for RDMA timeouts on a few more peers:
+ /* Time to check for RDMA timeouts on a few more peers:
* I try to do checks every 'p' seconds on a proportion of the peer
* table and I need to check every connection 'n' times within a
* timeout interval, to ensure I detect a timeout on any connection
for (i = 0; i < chunk; i++) {
hashlist = &plni->plni_peer_hash[plni->plni_watchdog_peeridx];
- list_for_each_safe(tmp, nxt, hashlist) {
- ptllnd_check_peer(list_entry(tmp, ptllnd_peer_t, plp_list));
+ cfs_list_for_each_safe(tmp, nxt, hashlist) {
+ ptllnd_check_peer(cfs_list_entry(tmp, ptllnd_peer_t,
+ plp_list));
}
plni->plni_watchdog_peeridx = (plni->plni_watchdog_peeridx + 1) %
}
}
- while (!list_empty(&plni->plni_zombie_txs)) {
- tx = list_entry(plni->plni_zombie_txs.next,
+ while (!cfs_list_empty(&plni->plni_zombie_txs)) {
+ tx = cfs_list_entry(plni->plni_zombie_txs.next,
ptllnd_tx_t, tx_list);
- list_del_init(&tx->tx_list);
+ cfs_list_del_init(&tx->tx_list);
ptllnd_tx_done(tx);
}
}
/* we cannot finilize txs right now (bug #18844) */
- list_splice_init(&conn->uc_tx_list, &zombie_txs);
+ cfs_list_splice_init(&conn->uc_tx_list, &zombie_txs);
peer->up_conns[idx] = NULL;
conn->uc_peer = NULL;
return;
}
- if (cfs_atomic_read(&peer->up_refcount) == 2) {
+ if (cfs_mt_atomic_read(&peer->up_refcount) == 2) {
int i;
for (i = 0; i < N_CONN_TYPES; i++)
LASSERT (peer->up_conns[i] == NULL);
- list_del(&peer->up_list);
+ cfs_list_del(&peer->up_list);
if (peer->up_errored &&
(peer->up_peerid.pid & LNET_PID_USERFLAG) == 0)
CFS_INIT_LIST_HEAD (&conn->uc_tx_list);
CFS_INIT_LIST_HEAD (&conn->uc_zcack_list);
pthread_mutex_init(&conn->uc_lock, NULL);
- cfs_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */
+ cfs_mt_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */
*connp = conn;
return 0;
CFS_INIT_LIST_HEAD (&conn->uc_tx_list);
CFS_INIT_LIST_HEAD (&conn->uc_zcack_list);
pthread_mutex_init(&conn->uc_lock, NULL);
- cfs_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */
+ cfs_mt_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */
*connp = conn;
return 0;
}
void
-usocklnd_destroy_txlist(lnet_ni_t *ni, struct list_head *txlist)
+usocklnd_destroy_txlist(lnet_ni_t *ni, cfs_list_t *txlist)
{
usock_tx_t *tx;
- while (!list_empty(txlist)) {
- tx = list_entry(txlist->next, usock_tx_t, tx_list);
- list_del(&tx->tx_list);
+ while (!cfs_list_empty(txlist)) {
+ tx = cfs_list_entry(txlist->next, usock_tx_t, tx_list);
+ cfs_list_del(&tx->tx_list);
usocklnd_destroy_tx(ni, tx);
}
}
void
-usocklnd_destroy_zcack_list(struct list_head *zcack_list)
+usocklnd_destroy_zcack_list(cfs_list_t *zcack_list)
{
usock_zc_ack_t *zcack;
- while (!list_empty(zcack_list)) {
- zcack = list_entry(zcack_list->next, usock_zc_ack_t, zc_list);
- list_del(&zcack->zc_list);
+ while (!cfs_list_empty(zcack_list)) {
+ zcack = cfs_list_entry(zcack_list->next, usock_zc_ack_t,
+ zc_list);
+ cfs_list_del(&zcack->zc_list);
LIBCFS_FREE (zcack, sizeof(*zcack));
}
lnet_finalize(conn->uc_peer->up_ni, conn->uc_rx_lnetmsg, -EIO);
}
- if (!list_empty(&conn->uc_tx_list)) {
+ if (!cfs_list_empty(&conn->uc_tx_list)) {
LASSERT (conn->uc_peer != NULL);
usocklnd_destroy_txlist(conn->uc_peer->up_ni, &conn->uc_tx_list);
}
usock_peer_t *
usocklnd_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id)
{
- struct list_head *peer_list = usocklnd_nid2peerlist(id.nid);
- struct list_head *tmp;
+ cfs_list_t *peer_list = usocklnd_nid2peerlist(id.nid);
+ cfs_list_t *tmp;
usock_peer_t *peer;
- list_for_each (tmp, peer_list) {
+ cfs_list_for_each (tmp, peer_list) {
- peer = list_entry (tmp, usock_peer_t, up_list);
+ peer = cfs_list_entry (tmp, usock_peer_t, up_list);
if (peer->up_ni != ni)
continue;
peer->up_incrn_is_set = 0;
peer->up_errored = 0;
peer->up_last_alive = 0;
- cfs_atomic_set (&peer->up_refcount, 1); /* 1 ref for caller */
+ cfs_mt_atomic_set (&peer->up_refcount, 1); /* 1 ref for caller */
pthread_mutex_init(&peer->up_lock, NULL);
pthread_mutex_lock(&net->un_lock);
/* peer table will take 1 of my refs on peer */
usocklnd_peer_addref(peer);
- list_add_tail (&peer->up_list,
- usocklnd_nid2peerlist(id.nid));
+ cfs_list_add_tail (&peer->up_list,
+ usocklnd_nid2peerlist(id.nid));
} else {
usocklnd_peer_decref(peer); /* should destroy peer */
peer = peer2;
usocklnd_enqueue_zcack(usock_conn_t *conn, usock_zc_ack_t *zc_ack)
{
if (conn->uc_state == UC_READY &&
- list_empty(&conn->uc_tx_list) &&
- list_empty(&conn->uc_zcack_list) &&
+ cfs_list_empty(&conn->uc_tx_list) &&
+ cfs_list_empty(&conn->uc_zcack_list) &&
!conn->uc_sending) {
int rc = usocklnd_add_pollrequest(conn, POLL_TX_SET_REQUEST,
POLLOUT);
return rc;
}
- list_add_tail(&zc_ack->zc_list, &conn->uc_zcack_list);
+ cfs_list_add_tail(&zc_ack->zc_list, &conn->uc_zcack_list);
return 0;
}
int *send_immediately)
{
if (conn->uc_state == UC_READY &&
- list_empty(&conn->uc_tx_list) &&
- list_empty(&conn->uc_zcack_list) &&
+ cfs_list_empty(&conn->uc_tx_list) &&
+ cfs_list_empty(&conn->uc_zcack_list) &&
!conn->uc_sending) {
conn->uc_sending = 1;
*send_immediately = 1;
}
*send_immediately = 0;
- list_add_tail(&tx->tx_list, &conn->uc_tx_list);
+ cfs_list_add_tail(&tx->tx_list, &conn->uc_tx_list);
}
/* Safely create new conn if needed. Save result in *connp.
* Don't try to link it to peer because the conn
* has already had a chance to proceed at the beginning */
if (peer == NULL) {
- LASSERT(list_empty(&conn->uc_tx_list) &&
- list_empty(&conn->uc_zcack_list));
+ LASSERT(cfs_list_empty(&conn->uc_tx_list) &&
+ cfs_list_empty(&conn->uc_zcack_list));
usocklnd_conn_kill(conn);
return 0;
* make us zombie soon and take care of our txs and
* zc_acks */
- struct list_head tx_list, zcack_list;
+ cfs_list_t tx_list, zcack_list;
usock_conn_t *conn2;
int idx = usocklnd_type2idx(conn->uc_type);
conn2->uc_peer = peer;
/* unlink txs and zcack from the conn */
- list_add(&tx_list, &conn->uc_tx_list);
- list_del_init(&conn->uc_tx_list);
- list_add(&zcack_list, &conn->uc_zcack_list);
- list_del_init(&conn->uc_zcack_list);
+ cfs_list_add(&tx_list, &conn->uc_tx_list);
+ cfs_list_del_init(&conn->uc_tx_list);
+ cfs_list_add(&zcack_list, &conn->uc_zcack_list);
+ cfs_list_del_init(&conn->uc_zcack_list);
/* link they to the conn2 */
- list_add(&conn2->uc_tx_list, &tx_list);
- list_del_init(&tx_list);
- list_add(&conn2->uc_zcack_list, &zcack_list);
- list_del_init(&zcack_list);
+ cfs_list_add(&conn2->uc_tx_list, &tx_list);
+ cfs_list_del_init(&tx_list);
+ cfs_list_add(&conn2->uc_zcack_list, &zcack_list);
+ cfs_list_del_init(&zcack_list);
/* make conn zombie */
conn->uc_peer = NULL;
* received hello, but maybe we've smth. to
* send? */
LASSERT (conn->uc_sending == 0);
- if ( !list_empty(&conn->uc_tx_list) ||
- !list_empty(&conn->uc_zcack_list) ) {
+ if ( !cfs_list_empty(&conn->uc_tx_list) ||
+ !cfs_list_empty(&conn->uc_zcack_list) ) {
conn->uc_tx_deadline =
cfs_time_shift(usock_tuns.ut_timeout);
LASSERT (peer != NULL);
ni = peer->up_ni;
- if (list_empty(&conn->uc_tx_list) &&
- list_empty(&conn->uc_zcack_list)) {
+ if (cfs_list_empty(&conn->uc_tx_list) &&
+ cfs_list_empty(&conn->uc_zcack_list)) {
LASSERT(usock_tuns.ut_fair_limit > 1);
pthread_mutex_unlock(&conn->uc_lock);
return 0;
rc = usocklnd_send_tx(conn, tx);
if (rc == 0) { /* partial send or connection closed */
pthread_mutex_lock(&conn->uc_lock);
- list_add(&tx->tx_list, &conn->uc_tx_list);
+ cfs_list_add(&tx->tx_list, &conn->uc_tx_list);
conn->uc_sending = 0;
pthread_mutex_unlock(&conn->uc_lock);
break;
pthread_mutex_lock(&conn->uc_lock);
conn->uc_sending = 0;
if (conn->uc_state != UC_DEAD &&
- list_empty(&conn->uc_tx_list) &&
- list_empty(&conn->uc_zcack_list)) {
+ cfs_list_empty(&conn->uc_tx_list) &&
+ cfs_list_empty(&conn->uc_zcack_list)) {
conn->uc_tx_flag = 0;
ret = usocklnd_add_pollrequest(conn,
POLL_TX_SET_REQUEST, 0);
* brand new noop tx for zc_ack from zcack_list. Return NULL
* if an error happened */
usock_tx_t *
-usocklnd_try_piggyback(struct list_head *tx_list_p,
- struct list_head *zcack_list_p)
+usocklnd_try_piggyback(cfs_list_t *tx_list_p,
+ cfs_list_t *zcack_list_p)
{
usock_tx_t *tx;
usock_zc_ack_t *zc_ack;
/* assign tx and zc_ack */
- if (list_empty(tx_list_p))
+ if (cfs_list_empty(tx_list_p))
tx = NULL;
else {
- tx = list_entry(tx_list_p->next, usock_tx_t, tx_list);
- list_del(&tx->tx_list);
+ tx = cfs_list_entry(tx_list_p->next, usock_tx_t, tx_list);
+ cfs_list_del(&tx->tx_list);
/* already piggybacked or partially send */
if (tx->tx_msg.ksm_zc_cookies[1] != 0 ||
return tx;
}
- if (list_empty(zcack_list_p)) {
+ if (cfs_list_empty(zcack_list_p)) {
/* nothing to piggyback */
return tx;
} else {
- zc_ack = list_entry(zcack_list_p->next,
- usock_zc_ack_t, zc_list);
- list_del(&zc_ack->zc_list);
+ zc_ack = cfs_list_entry(zcack_list_p->next,
+ usock_zc_ack_t, zc_list);
+ cfs_list_del(&zc_ack->zc_list);
}
if (tx != NULL)
{
usock_conn_t *conn2;
usock_peer_t *peer;
- struct list_head tx_list;
- struct list_head zcack_list;
+ cfs_list_t tx_list;
+ cfs_list_t zcack_list;
int idx;
int rc = 0;
/* conn is passive and isn't linked to any peer,
so its tx and zc_ack lists have to be empty */
- LASSERT (list_empty(&conn->uc_tx_list) &&
- list_empty(&conn->uc_zcack_list) &&
+ LASSERT (cfs_list_empty(&conn->uc_tx_list) &&
+ cfs_list_empty(&conn->uc_zcack_list) &&
conn->uc_sending == 0);
rc = usocklnd_find_or_create_peer(conn->uc_ni, conn->uc_peerid, &peer);
* We're sure that nobody but us can access to conn,
* nevertheless we use mutex (if we're wrong yet,
* deadlock is easy to see that corrupted list */
- list_add(&tx_list, &conn2->uc_tx_list);
- list_del_init(&conn2->uc_tx_list);
- list_add(&zcack_list, &conn2->uc_zcack_list);
- list_del_init(&conn2->uc_zcack_list);
+ cfs_list_add(&tx_list, &conn2->uc_tx_list);
+ cfs_list_del_init(&conn2->uc_tx_list);
+ cfs_list_add(&zcack_list, &conn2->uc_zcack_list);
+ cfs_list_del_init(&conn2->uc_zcack_list);
pthread_mutex_lock(&conn->uc_lock);
- list_add_tail(&conn->uc_tx_list, &tx_list);
- list_del_init(&tx_list);
- list_add_tail(&conn->uc_zcack_list, &zcack_list);
- list_del_init(&zcack_list);
+ cfs_list_add_tail(&conn->uc_tx_list, &tx_list);
+ cfs_list_del_init(&tx_list);
+ cfs_list_add_tail(&conn->uc_zcack_list, &zcack_list);
+ cfs_list_del_init(&zcack_list);
conn->uc_peer = peer;
pthread_mutex_unlock(&conn->uc_lock);
/* we're ready to recive incoming packets and maybe
already have smth. to transmit */
LASSERT (conn->uc_sending == 0);
- if ( list_empty(&conn->uc_tx_list) &&
- list_empty(&conn->uc_zcack_list) ) {
+ if ( cfs_list_empty(&conn->uc_tx_list) &&
+ cfs_list_empty(&conn->uc_zcack_list) ) {
conn->uc_tx_flag = 0;
rc = usocklnd_add_pollrequest(conn, POLL_SET_REQUEST,
POLLIN);
void
usocklnd_process_stale_list(usock_pollthread_t *pt_data)
{
- while (!list_empty(&pt_data->upt_stale_list)) {
+ while (!cfs_list_empty(&pt_data->upt_stale_list)) {
usock_conn_t *conn;
- conn = list_entry(pt_data->upt_stale_list.next,
- usock_conn_t, uc_stale_list);
+ conn = cfs_list_entry(pt_data->upt_stale_list.next,
+ usock_conn_t, uc_stale_list);
- list_del(&conn->uc_stale_list);
+ cfs_list_del(&conn->uc_stale_list);
usocklnd_tear_peer_conn(conn);
usocklnd_conn_decref(conn); /* -1 for idx2conn[idx] or pr */
/* Process all enqueued poll requests */
pthread_mutex_lock(&pt_data->upt_pollrequests_lock);
- while (!list_empty(&pt_data->upt_pollrequests)) {
+ while (!cfs_list_empty(&pt_data->upt_pollrequests)) {
usock_pollrequest_t *pr;
- pr = list_entry(pt_data->upt_pollrequests.next,
- usock_pollrequest_t, upr_list);
+ pr = cfs_list_entry(pt_data->upt_pollrequests.next,
+ usock_pollrequest_t, upr_list);
- list_del(&pr->upr_list);
+ cfs_list_del(&pr->upr_list);
rc = usocklnd_process_pollrequest(pr, pt_data);
if (rc)
break;
/* Block new poll requests to be enqueued */
pt_data->upt_errno = rc;
- while (!list_empty(&pt_data->upt_pollrequests)) {
+ while (!cfs_list_empty(&pt_data->upt_pollrequests)) {
usock_pollrequest_t *pr;
- pr = list_entry(pt_data->upt_pollrequests.next,
+ pr = cfs_list_entry(pt_data->upt_pollrequests.next,
usock_pollrequest_t, upr_list);
- list_del(&pr->upr_list);
+ cfs_list_del(&pr->upr_list);
if (pr->upr_type == POLL_ADD_REQUEST) {
libcfs_sock_release(pr->upr_conn->uc_sock);
- list_add_tail(&pr->upr_conn->uc_stale_list,
- &pt_data->upt_stale_list);
+ cfs_list_add_tail(&pr->upr_conn->uc_stale_list,
+ &pt_data->upt_stale_list);
} else {
usocklnd_conn_decref(pr->upr_conn);
}
}
/* unblock usocklnd_shutdown() */
- cfs_complete(&pt_data->upt_completion);
+ cfs_mt_complete(&pt_data->upt_completion);
return 0;
}
return rc;
}
- list_add_tail(&pr->upr_list, &pt->upt_pollrequests);
+ cfs_list_add_tail(&pr->upr_list, &pt->upt_pollrequests);
pthread_mutex_unlock(&pt->upt_pollrequests_lock);
return 0;
}
return; /* conn will be killed in poll thread anyway */
}
- list_add_tail(&pr->upr_list, &pt->upt_pollrequests);
+ cfs_list_add_tail(&pr->upr_list, &pt->upt_pollrequests);
pthread_mutex_unlock(&pt->upt_pollrequests_lock);
conn->uc_preq = NULL;
}
libcfs_sock_release(conn->uc_sock);
- list_add_tail(&conn->uc_stale_list, &pt_data->upt_stale_list);
+ cfs_list_add_tail(&conn->uc_stale_list,
+ &pt_data->upt_stale_list);
break;
case POLL_RX_SET_REQUEST:
pollfd[idx].events = (pollfd[idx].events & ~POLLIN) | value;
libcfs_sock_release(pt->upt_notifier[1]);
pthread_mutex_destroy(&pt->upt_pollrequests_lock);
- cfs_fini_completion(&pt->upt_completion);
+ cfs_mt_fini_completion(&pt->upt_completion);
LIBCFS_FREE (pt->upt_pollfd,
sizeof(struct pollfd) * pt->upt_npollfd);
CFS_INIT_LIST_HEAD (&pt->upt_pollrequests);
CFS_INIT_LIST_HEAD (&pt->upt_stale_list);
pthread_mutex_init(&pt->upt_pollrequests_lock, NULL);
- cfs_init_completion(&pt->upt_completion);
+ cfs_mt_init_completion(&pt->upt_completion);
}
/* Initialize peer hash list */
for (i = 0; i < n; i++) {
usock_pollthread_t *pt = &usock_data.ud_pollthreads[i];
usocklnd_wakeup_pollthread(i);
- cfs_wait_for_completion(&pt->upt_completion);
+ cfs_mt_wait_for_completion(&pt->upt_completion);
}
pthread_rwlock_destroy(&usock_data.ud_peers_lock);
void
usocklnd_del_all_peers(lnet_ni_t *ni)
{
- struct list_head *ptmp;
- struct list_head *pnxt;
+ cfs_list_t *ptmp;
+ cfs_list_t *pnxt;
usock_peer_t *peer;
int i;
pthread_rwlock_wrlock(&usock_data.ud_peers_lock);
for (i = 0; i < UD_PEER_HASH_SIZE; i++) {
- list_for_each_safe (ptmp, pnxt, &usock_data.ud_peers[i]) {
- peer = list_entry (ptmp, usock_peer_t, up_list);
+ cfs_list_for_each_safe (ptmp, pnxt, &usock_data.ud_peers[i]) {
+ peer = cfs_list_entry (ptmp, usock_peer_t, up_list);
if (peer->up_ni != ni)
continue;
pthread_mutex_unlock(&peer->up_lock);
/* peer hash list is still protected by the caller */
- list_del(&peer->up_list);
+ cfs_list_del(&peer->up_list);
usocklnd_peer_decref(peer); /* peer isn't in hash list anymore */
}
#include <lnet/socklnd.h>
typedef struct {
- struct list_head tx_list; /* neccessary to form tx list */
+ cfs_list_t tx_list; /* neccessary to form tx list */
lnet_msg_t *tx_lnetmsg; /* lnet message for lnet_finalize() */
ksock_msg_t tx_msg; /* buffer for wire header of ksock msg */
int tx_resid; /* # of residual bytes */
struct usock_preq_s *uc_preq; /* preallocated request */
__u32 uc_peer_ip; /* IP address of the peer */
__u16 uc_peer_port; /* port of the peer */
- struct list_head uc_stale_list; /* orphaned connections */
+ cfs_list_t uc_stale_list; /* orphaned connections */
/* Receive state */
int uc_rx_state; /* message or hello state */
ksock_msg_t uc_rx_msg; /* message buffer */
/* Send state */
- struct list_head uc_tx_list; /* pending txs */
- struct list_head uc_zcack_list; /* pending zc_acks */
+ cfs_list_t uc_tx_list; /* pending txs */
+ cfs_list_t uc_zcack_list; /* pending zc_acks */
cfs_time_t uc_tx_deadline; /* when to time out */
int uc_tx_flag; /* deadline valid? */
int uc_sending; /* send op is in progress */
usock_tx_t *uc_tx_hello; /* fake tx with hello */
- cfs_atomic_t uc_refcount; /* # of users */
+ cfs_mt_atomic_t uc_refcount; /* # of users */
pthread_mutex_t uc_lock; /* serialize */
int uc_errored; /* a flag for lnet_notify() */
} usock_conn_t;
#define N_CONN_TYPES 3 /* CONTROL, BULK_IN and BULK_OUT */
typedef struct usock_peer_s {
- struct list_head up_list; /* neccessary to form peer list */
- lnet_process_id_t up_peerid; /* id of remote peer */
+ cfs_list_t up_list; /* neccessary to form peer list */
+ lnet_process_id_t up_peerid; /* id of remote peer */
usock_conn_t *up_conns[N_CONN_TYPES]; /* conns that connect us
- * us with the peer */
- lnet_ni_t *up_ni; /* pointer to parent NI */
- __u64 up_incarnation; /* peer's incarnation */
- int up_incrn_is_set; /* 0 if peer's incarnation
- * hasn't been set so far */
- cfs_atomic_t up_refcount; /* # of users */
- pthread_mutex_t up_lock; /* serialize */
- int up_errored; /* a flag for lnet_notify() */
- cfs_time_t up_last_alive; /* when the peer was last alive */
+ * us with the peer */
+ lnet_ni_t *up_ni; /* pointer to parent NI */
+ __u64 up_incarnation; /* peer's incarnation */
+ int up_incrn_is_set;/* 0 if peer's incarnation
+ * hasn't been set so far */
+ cfs_mt_atomic_t up_refcount; /* # of users */
+ pthread_mutex_t up_lock; /* serialize */
+ int up_errored; /* a flag for lnet_notify() */
+ cfs_time_t up_last_alive; /* when the peer was last alive */
} usock_peer_t;
typedef struct {
- cfs_socket_t *upt_notifier[2]; /* notifier sockets: 1st for
- writing, 2nd for reading */
- struct pollfd *upt_pollfd; /* poll fds */
- int upt_nfds; /* active poll fds */
- int upt_npollfd; /* allocated poll fds */
- usock_conn_t **upt_idx2conn; /* conns corresponding to
- * upt_pollfd[idx] */
- int *upt_skip; /* skip chain */
- int *upt_fd2idx; /* index into upt_pollfd[]
- * by fd */
- int upt_nfd2idx; /* # of allocated elements
- * of upt_fd2idx[] */
- struct list_head upt_stale_list; /* list of orphaned conns */
- struct list_head upt_pollrequests; /* list of poll requests */
- pthread_mutex_t upt_pollrequests_lock; /* serialize */
- int upt_errno; /* non-zero if errored */
- struct cfs_completion upt_completion; /* wait/signal facility for
- * syncronizing shutdown */
+ cfs_socket_t *upt_notifier[2]; /* notifier sockets: 1st for
+ * writing, 2nd for reading */
+ struct pollfd *upt_pollfd; /* poll fds */
+ int upt_nfds; /* active poll fds */
+ int upt_npollfd; /* allocated poll fds */
+ usock_conn_t **upt_idx2conn; /* conns corresponding to
+ * upt_pollfd[idx] */
+ int *upt_skip; /* skip chain */
+ int *upt_fd2idx; /* index into upt_pollfd[]
+ * by fd */
+ int upt_nfd2idx; /* # of allocated elements
+ * of upt_fd2idx[] */
+ cfs_list_t upt_stale_list; /* list of orphaned conns */
+ cfs_list_t upt_pollrequests; /* list of poll requests */
+ pthread_mutex_t upt_pollrequests_lock; /* serialize */
+ int upt_errno; /* non-zero if errored */
+ cfs_mt_completion_t upt_completion; /* wait/signal facility for
+ * syncronizing shutdown */
} usock_pollthread_t;
/* Number of elements in upt_pollfd[], upt_idx2conn[] and upt_fd2idx[]
usock_pollthread_t *ud_pollthreads; /* their state */
int ud_shutdown; /* shutdown flag */
int ud_nets_count; /* # of instances */
- struct list_head ud_peers[UD_PEER_HASH_SIZE]; /* peer hash table */
+ cfs_list_t ud_peers[UD_PEER_HASH_SIZE]; /* peer hash table */
pthread_rwlock_t ud_peers_lock; /* serialize */
} usock_data_t;
extern usock_tunables_t usock_tuns;
typedef struct usock_preq_s {
- int upr_type; /* type of requested action */
+ int upr_type; /* type of requested action */
short upr_value; /* bitmask of POLLIN and POLLOUT bits */
usock_conn_t * upr_conn; /* a conn for the sake of which
* action will be performed */
- struct list_head upr_list; /* neccessary to form list */
+ cfs_list_t upr_list; /* neccessary to form list */
} usock_pollrequest_t;
/* Allowable poll request types are: */
#define POLL_SET_REQUEST 5
typedef struct {
- struct list_head zc_list; /* neccessary to form zc_ack list */
+ cfs_list_t zc_list; /* neccessary to form zc_ack list */
__u64 zc_cookie; /* zero-copy cookie */
} usock_zc_ack_t;
static inline void
usocklnd_conn_addref(usock_conn_t *conn)
{
- LASSERT (cfs_atomic_read(&conn->uc_refcount) > 0);
- cfs_atomic_inc(&conn->uc_refcount);
+ LASSERT (cfs_mt_atomic_read(&conn->uc_refcount) > 0);
+ cfs_mt_atomic_inc(&conn->uc_refcount);
}
void usocklnd_destroy_conn(usock_conn_t *conn);
static inline void
usocklnd_conn_decref(usock_conn_t *conn)
{
- LASSERT (cfs_atomic_read(&conn->uc_refcount) > 0);
- if (cfs_atomic_dec_and_test(&conn->uc_refcount))
+ LASSERT (cfs_mt_atomic_read(&conn->uc_refcount) > 0);
+ if (cfs_mt_atomic_dec_and_test(&conn->uc_refcount))
usocklnd_destroy_conn(conn);
}
static inline void
usocklnd_peer_addref(usock_peer_t *peer)
{
- LASSERT (cfs_atomic_read(&peer->up_refcount) > 0);
- cfs_atomic_inc(&peer->up_refcount);
+ LASSERT (cfs_mt_atomic_read(&peer->up_refcount) > 0);
+ cfs_mt_atomic_inc(&peer->up_refcount);
}
void usocklnd_destroy_peer(usock_peer_t *peer);
static inline void
usocklnd_peer_decref(usock_peer_t *peer)
{
- LASSERT (cfs_atomic_read(&peer->up_refcount) > 0);
- if (cfs_atomic_dec_and_test(&peer->up_refcount))
+ LASSERT (cfs_mt_atomic_read(&peer->up_refcount) > 0);
+ if (cfs_mt_atomic_dec_and_test(&peer->up_refcount))
usocklnd_destroy_peer(peer);
}
return ip % usock_data.ud_npollthreads;
}
-static inline struct list_head *
+static inline cfs_list_t *
usocklnd_nid2peerlist(lnet_nid_t nid)
{
unsigned int hash = ((unsigned int)nid) % UD_PEER_HASH_SIZE;
int usocklnd_activeconn_hellorecv(usock_conn_t *conn);
int usocklnd_passiveconn_hellorecv(usock_conn_t *conn);
int usocklnd_write_handler(usock_conn_t *conn);
-usock_tx_t * usocklnd_try_piggyback(struct list_head *tx_list_p,
- struct list_head *zcack_list_p);
+usock_tx_t * usocklnd_try_piggyback(cfs_list_t *tx_list_p,
+ cfs_list_t *zcack_list_p);
int usocklnd_activeconn_hellosent(usock_conn_t *conn);
int usocklnd_passiveconn_hellosent(usock_conn_t *conn);
int usocklnd_send_tx(usock_conn_t *conn, usock_tx_t *tx);
usock_tx_t *usocklnd_create_cr_hello_tx(lnet_ni_t *ni,
int type, lnet_nid_t peer_nid);
void usocklnd_destroy_tx(lnet_ni_t *ni, usock_tx_t *tx);
-void usocklnd_destroy_txlist(lnet_ni_t *ni, struct list_head *txlist);
-void usocklnd_destroy_zcack_list(struct list_head *zcack_list);
+void usocklnd_destroy_txlist(lnet_ni_t *ni, cfs_list_t *txlist);
+void usocklnd_destroy_zcack_list(cfs_list_t *zcack_list);
void usocklnd_destroy_peer (usock_peer_t *peer);
int usocklnd_get_conn_type(lnet_msg_t *lntmsg);
int usocklnd_type2idx(int type);
rc = usocklnd_send_tx(conn, tx);
if (rc == 0) { /* partial send or connection closed */
pthread_mutex_lock(&conn->uc_lock);
- list_add(&tx->tx_list, &conn->uc_tx_list);
+ cfs_list_add(&tx->tx_list, &conn->uc_tx_list);
conn->uc_sending = 0;
pthread_mutex_unlock(&conn->uc_lock);
partial_send = 1;
/* schedule write handler */
if (partial_send ||
(conn->uc_state == UC_READY &&
- (!list_empty(&conn->uc_tx_list) ||
- !list_empty(&conn->uc_zcack_list)))) {
+ (!cfs_list_empty(&conn->uc_tx_list) ||
+ !cfs_list_empty(&conn->uc_zcack_list)))) {
conn->uc_tx_deadline =
cfs_time_shift(usock_tuns.ut_timeout);
conn->uc_tx_flag = 1;
strcpy(filename, argv[1]);
else
sprintf(filename, "%s"CFS_TIME_T".%u",
- DEBUG_FILE_PATH_DEFAULT, time(NULL), getpid());
+ LIBCFS_DEBUG_FILE_PATH_DEFAULT, time(NULL), getpid());
if (stat(filename, &st) == 0 && S_ISREG(st.st_mode))
unlink(filename);
}
void
-lst_free_rpcent(struct list_head *head)
+lst_free_rpcent(cfs_list_t *head)
{
lstcon_rpc_ent_t *ent;
- while (!list_empty(head)) {
- ent = list_entry(head->next, lstcon_rpc_ent_t, rpe_link);
+ while (!cfs_list_empty(head)) {
+ ent = cfs_list_entry(head->next, lstcon_rpc_ent_t, rpe_link);
- list_del(&ent->rpe_link);
+ cfs_list_del(&ent->rpe_link);
free(ent);
}
}
void
-lst_reset_rpcent(struct list_head *head)
+lst_reset_rpcent(cfs_list_t *head)
{
lstcon_rpc_ent_t *ent;
}
int
-lst_alloc_rpcent(struct list_head *head, int count, int offset)
+lst_alloc_rpcent(cfs_list_t *head, int count, int offset)
{
lstcon_rpc_ent_t *ent;
int i;
ent->rpe_sid = LST_INVALID_SID;
ent->rpe_peer.nid = LNET_NID_ANY;
ent->rpe_peer.pid = LNET_PID_ANY;
- list_add(&ent->rpe_link, head);
+ cfs_list_add(&ent->rpe_link, head);
}
return 0;
}
void
-lst_print_transerr(struct list_head *head, char *optstr)
+lst_print_transerr(cfs_list_t *head, char *optstr)
{
lstcon_rpc_ent_t *ent;
int *idx, int *count, lstcon_node_ent_t *dents);
int lst_query_batch_ioctl(char *batch, int test, int server,
- int timeout, struct list_head *head);
+ int timeout, cfs_list_t *head);
int
lst_ioctl(unsigned int opc, void *buf, int len)
int
lst_ping_ioctl(char *str, int type, int timeout,
- int count, lnet_process_id_t *ids, struct list_head *head)
+ int count, lnet_process_id_t *ids, cfs_list_t *head)
{
lstio_debug_args_t args = {0};
int
jt_lst_ping(int argc, char **argv)
{
- struct list_head head;
+ cfs_list_t head;
lnet_process_id_t *ids = NULL;
lstcon_rpc_ent_t *ent = NULL;
char *str = NULL;
int
lst_add_nodes_ioctl (char *name, int count, lnet_process_id_t *ids,
- struct list_head *resultp)
+ cfs_list_t *resultp)
{
lstio_group_nodes_args_t args = {0};
int
jt_lst_add_group(int argc, char **argv)
{
- struct list_head head;
+ cfs_list_t head;
lnet_process_id_t *ids;
char *name;
int count;
int
lst_update_group_ioctl(int opc, char *name, int clean, int count,
- lnet_process_id_t *ids, struct list_head *resultp)
+ lnet_process_id_t *ids, cfs_list_t *resultp)
{
lstio_group_update_args_t args = {0};
int
jt_lst_update_group(int argc, char **argv)
{
- struct list_head head;
+ cfs_list_t head;
lnet_process_id_t *ids = NULL;
char *str = NULL;
char *grp = NULL;
int
lst_stat_ioctl (char *name, int count, lnet_process_id_t *idsp,
- int timeout, struct list_head *resultp)
+ int timeout, cfs_list_t *resultp)
{
lstio_stat_args_t args = {0};
}
typedef struct {
- struct list_head srp_link;
+ cfs_list_t srp_link;
int srp_count;
char *srp_name;
lnet_process_id_t *srp_ids;
- struct list_head srp_result[2];
+ cfs_list_t srp_result[2];
} lst_stat_req_param_t;
static void
}
void
-lst_print_stat(char *name, struct list_head *resultp,
+lst_print_stat(char *name, cfs_list_t *resultp,
int idx, int lnet, int bwrt, int rdwr, int type)
{
- struct list_head tmp[2];
+ cfs_list_t tmp[2];
lstcon_rpc_ent_t *new;
lstcon_rpc_ent_t *old;
sfw_counters_t *sfwk_new;
memset(&lnet_stat_result, 0, sizeof(lnet_stat_result));
- while (!list_empty(&resultp[idx])) {
- if (list_empty(&resultp[1 - idx])) {
+ while (!cfs_list_empty(&resultp[idx])) {
+ if (cfs_list_empty(&resultp[1 - idx])) {
fprintf(stderr, "Group is changed, re-run stat\n");
break;
}
- new = list_entry(resultp[idx].next, lstcon_rpc_ent_t, rpe_link);
- old = list_entry(resultp[1 - idx].next, lstcon_rpc_ent_t, rpe_link);
+ new = cfs_list_entry(resultp[idx].next, lstcon_rpc_ent_t,
+ rpe_link);
+ old = cfs_list_entry(resultp[1 - idx].next, lstcon_rpc_ent_t,
+ rpe_link);
/* first time get stats result, can't calculate diff */
if (new->rpe_peer.nid == LNET_NID_ANY)
break;
}
- list_del(&new->rpe_link);
- list_add_tail(&new->rpe_link, &tmp[idx]);
+ cfs_list_del(&new->rpe_link);
+ cfs_list_add_tail(&new->rpe_link, &tmp[idx]);
- list_del(&old->rpe_link);
- list_add_tail(&old->rpe_link, &tmp[1 - idx]);
+ cfs_list_del(&old->rpe_link);
+ cfs_list_add_tail(&old->rpe_link, &tmp[1 - idx]);
if (new->rpe_rpc_errno != 0 || new->rpe_fwk_errno != 0 ||
old->rpe_rpc_errno != 0 || old->rpe_fwk_errno != 0) {
lst_cal_lnet_stat(delta, lnet_new, lnet_old);
}
- list_splice(&tmp[idx], &resultp[idx]);
- list_splice(&tmp[1 - idx], &resultp[1 - idx]);
+ cfs_list_splice(&tmp[idx], &resultp[idx]);
+ cfs_list_splice(&tmp[1 - idx], &resultp[1 - idx]);
if (errcount > 0)
fprintf(stdout, "Failed to stat on %d nodes\n", errcount);
int
jt_lst_stat(int argc, char **argv)
{
- struct list_head head;
+ cfs_list_t head;
lst_stat_req_param_t *srp;
time_t last = 0;
int optidx = 0;
if (rc != 0)
goto out;
- list_add_tail(&srp->srp_link, &head);
+ cfs_list_add_tail(&srp->srp_link, &head);
}
while (1) {
}
out:
- while (!list_empty(&head)) {
- srp = list_entry(head.next, lst_stat_req_param_t, srp_link);
+ while (!cfs_list_empty(&head)) {
+ srp = cfs_list_entry(head.next, lst_stat_req_param_t, srp_link);
- list_del(&srp->srp_link);
+ cfs_list_del(&srp->srp_link);
lst_stat_req_param_free(srp);
}
int
jt_lst_show_error(int argc, char **argv)
{
- struct list_head head;
+ cfs_list_t head;
lst_stat_req_param_t *srp;
lstcon_rpc_ent_t *ent;
sfw_counters_t *sfwk;
if (rc != 0)
goto out;
- list_add_tail(&srp->srp_link, &head);
+ cfs_list_add_tail(&srp->srp_link, &head);
}
cfs_list_for_each_entry_typed(srp, &head, lst_stat_req_param_t,
fprintf(stdout, "Total %d error nodes in %s\n", ecount, srp->srp_name);
}
out:
- while (!list_empty(&head)) {
- srp = list_entry(head.next, lst_stat_req_param_t, srp_link);
+ while (!cfs_list_empty(&head)) {
+ srp = cfs_list_entry(head.next, lst_stat_req_param_t, srp_link);
- list_del(&srp->srp_link);
+ cfs_list_del(&srp->srp_link);
lst_stat_req_param_free(srp);
}
}
int
-lst_start_batch_ioctl (char *name, int timeout, struct list_head *resultp)
+lst_start_batch_ioctl (char *name, int timeout, cfs_list_t *resultp)
{
lstio_batch_run_args_t args = {0};
int
jt_lst_start_batch(int argc, char **argv)
{
- struct list_head head;
+ cfs_list_t head;
char *batch;
int optidx = 0;
int timeout = 0;
}
int
-lst_stop_batch_ioctl(char *name, int force, struct list_head *resultp)
+lst_stop_batch_ioctl(char *name, int force, cfs_list_t *resultp)
{
lstio_batch_stop_args_t args = {0};
int
jt_lst_stop_batch(int argc, char **argv)
{
- struct list_head head;
+ cfs_list_t head;
char *batch;
int force = 0;
int optidx;
int
lst_query_batch_ioctl(char *batch, int test, int server,
- int timeout, struct list_head *head)
+ int timeout, cfs_list_t *head)
{
lstio_batch_query_args_t args = {0};
}
void
-lst_print_tsb_verbose(struct list_head *head,
+lst_print_tsb_verbose(cfs_list_t *head,
int active, int idle, int error)
{
lstcon_rpc_ent_t *ent;
jt_lst_query_batch(int argc, char **argv)
{
lstcon_test_batch_ent_t ent;
- struct list_head head;
- char *batch = NULL;
- time_t last = 0;
- int optidx = 0;
- int verbose = 0;
- int server = 0;
- int timeout = 5; /* default 5 seconds */
- int delay = 5; /* default 5 seconds */
- int loop = 1; /* default 1 loop */
- int active = 0;
- int error = 0;
- int idle = 0;
- int count = 0;
- int test = 0;
- int rc = 0;
- int c = 0;
- int i;
+ cfs_list_t head;
+ char *batch = NULL;
+ time_t last = 0;
+ int optidx = 0;
+ int verbose = 0;
+ int server = 0;
+ int timeout = 5; /* default 5 seconds */
+ int delay = 5; /* default 5 seconds */
+ int loop = 1; /* default 1 loop */
+ int active = 0;
+ int error = 0;
+ int idle = 0;
+ int count = 0;
+ int test = 0;
+ int rc = 0;
+ int c = 0;
+ int i;
static struct option query_batch_opts[] =
{
int
lst_add_test_ioctl(char *batch, int type, int loop, int concur,
int dist, int span, char *sgrp, char *dgrp,
- void *param, int plen, int *retp, struct list_head *resultp)
+ void *param, int plen, int *retp, cfs_list_t *resultp)
{
lstio_test_args_t args = {0};
int
jt_lst_add_test(int argc, char **argv)
{
- struct list_head head;
- char *batch = NULL;
- char *test = NULL;
- char *dstr = NULL;
- char *from = NULL;
- char *to = NULL;
- void *param = NULL;
- int optidx = 0;
- int concur = 1;
- int loop = -1;
- int dist = 1;
- int span = 1;
- int plen = 0;
- int fcount = 0;
- int tcount = 0;
- int ret = 0;
- int type;
- int rc;
- int c;
+ cfs_list_t head;
+ char *batch = NULL;
+ char *test = NULL;
+ char *dstr = NULL;
+ char *from = NULL;
+ char *to = NULL;
+ void *param = NULL;
+ int optidx = 0;
+ int concur = 1;
+ int loop = -1;
+ int dist = 1;
+ int span = 1;
+ int plen = 0;
+ int fcount = 0;
+ int tcount = 0;
+ int ret = 0;
+ int type;
+ int rc;
+ int c;
static struct option add_test_opts[] =
{
}
static int
-lwt_snapshot(cycles_t *now, int *ncpu, int *totalsize,
+lwt_snapshot(cfs_cycles_t *now, int *ncpu, int *totalsize,
lwt_event_t *events, int size)
{
struct libcfs_ioctl_data data;
}
static int
-lwt_print(FILE *f, cycles_t t0, cycles_t tlast, double mhz, int cpu, lwt_event_t *e)
+lwt_print(FILE *f, cfs_cycles_t t0, cfs_cycles_t tlast, double mhz, int cpu,
+ lwt_event_t *e)
{
#ifndef __WORDSIZE
# error "__WORDSIZE not defined"
int rc;
int i;
double mhz;
- cycles_t t0;
- cycles_t tlast;
- cycles_t tnow;
+ cfs_cycles_t t0;
+ cfs_cycles_t tlast;
+ cfs_cycles_t tnow;
struct timeval tvnow;
int printed_date = 0;
int nlines = 0;
if (t0 <= next_event[cpu]->lwte_when) {
/* on or after the first event */
if (!printed_date) {
- cycles_t du = (tnow - t0) / mhz;
+ cfs_cycles_t du = (tnow - t0) / mhz;
time_t then = tvnow.tv_sec - du/1000000;
if (du % 1000000 > tvnow.tv_usec)
}
static int cmm_statfs(const struct lu_env *env, struct md_device *md,
- struct kstatfs *sfs)
+ cfs_kstatfs_t *sfs)
{
struct cmm_device *cmm_dev = md2cmm_dev(md);
int rc;
/* get the max mdsize and cookiesize from lower layer */
rc = cmm_maxsize_get(env, &cmm->cmm_md_dev, &max_mdsize,
- &max_cookiesize);
+ &max_cookiesize);
if (rc)
RETURN(rc);
- spin_lock(&cmm->cmm_tgt_guard);
- list_for_each_entry_safe(mc, tmp, &cmm->cmm_targets,
- mc_linkage) {
+ cfs_spin_lock(&cmm->cmm_tgt_guard);
+ cfs_list_for_each_entry_safe(mc, tmp, &cmm->cmm_targets,
+ mc_linkage) {
cmm_mdc_init_ea_size(env, mc, max_mdsize, max_cookiesize);
}
- spin_unlock(&cmm->cmm_tgt_guard);
+ cfs_spin_unlock(&cmm->cmm_tgt_guard);
RETURN(rc);
}
RETURN(-EINVAL);
}
- spin_lock(&cm->cmm_tgt_guard);
- list_for_each_entry_safe(mc, tmp, &cm->cmm_targets,
- mc_linkage) {
+ cfs_spin_lock(&cm->cmm_tgt_guard);
+ cfs_list_for_each_entry_safe(mc, tmp, &cm->cmm_targets,
+ mc_linkage) {
if (mc->mc_num == mdc_num) {
- spin_unlock(&cm->cmm_tgt_guard);
+ cfs_spin_unlock(&cm->cmm_tgt_guard);
RETURN(-EEXIST);
}
}
- spin_unlock(&cm->cmm_tgt_guard);
+ cfs_spin_unlock(&cm->cmm_tgt_guard);
ld = ldt->ldt_ops->ldto_device_alloc(env, ldt, cfg);
if (IS_ERR(ld))
RETURN(PTR_ERR(ld));
RETURN(rc);
}
- spin_lock(&cm->cmm_tgt_guard);
- list_for_each_entry_safe(mc, tmp, &cm->cmm_targets,
- mc_linkage) {
+ cfs_spin_lock(&cm->cmm_tgt_guard);
+ cfs_list_for_each_entry_safe(mc, tmp, &cm->cmm_targets,
+ mc_linkage) {
if (mc->mc_num == mdc_num) {
- spin_unlock(&cm->cmm_tgt_guard);
+ cfs_spin_unlock(&cm->cmm_tgt_guard);
ldt->ldt_ops->ldto_device_fini(env, ld);
ldt->ldt_ops->ldto_device_free(env, ld);
RETURN(-EEXIST);
}
}
mc = lu2mdc_dev(ld);
- list_add_tail(&mc->mc_linkage, &cm->cmm_targets);
+ cfs_list_add_tail(&mc->mc_linkage, &cm->cmm_targets);
cm->cmm_tgt_count++;
#ifdef HAVE_QUOTA_SUPPORT
first = cm->cmm_tgt_count;
#endif
- spin_unlock(&cm->cmm_tgt_guard);
+ cfs_spin_unlock(&cm->cmm_tgt_guard);
lu_device_get(cmm_lu);
lu_ref_add(&cmm_lu->ld_reference, "mdc-child", ld);
fld_client_del_target(cm->cmm_fld, cm->cmm_local_num);
/* Finish all mdc devices. */
- spin_lock(&cm->cmm_tgt_guard);
- list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, mc_linkage) {
+ cfs_spin_lock(&cm->cmm_tgt_guard);
+ cfs_list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, mc_linkage) {
struct lu_device *ld_m = mdc2lu_dev(mc);
fld_client_del_target(cm->cmm_fld, mc->mc_num);
ld_m->ld_ops->ldo_process_config(env, ld_m, cfg);
}
- spin_unlock(&cm->cmm_tgt_guard);
+ cfs_spin_unlock(&cm->cmm_tgt_guard);
/* remove upcall device*/
md_upcall_fini(&cm->cmm_md_dev);
ENTRY;
LASSERT(m->cmm_tgt_count == 0);
- LASSERT(list_empty(&m->cmm_targets));
+ LASSERT(cfs_list_empty(&m->cmm_targets));
if (m->cmm_fld != NULL) {
OBD_FREE_PTR(m->cmm_fld);
m->cmm_fld = NULL;
int err = 0;
ENTRY;
- spin_lock_init(&m->cmm_tgt_guard);
+ cfs_spin_lock_init(&m->cmm_tgt_guard);
CFS_INIT_LIST_HEAD(&m->cmm_targets);
m->cmm_tgt_count = 0;
m->cmm_child = lu2md_dev(next);
ENTRY;
/* Finish all mdc devices */
- spin_lock(&cm->cmm_tgt_guard);
- list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, mc_linkage) {
+ cfs_spin_lock(&cm->cmm_tgt_guard);
+ cfs_list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, mc_linkage) {
struct lu_device *ld_m = mdc2lu_dev(mc);
struct lu_device *ld_c = cmm2lu_dev(cm);
- list_del_init(&mc->mc_linkage);
+ cfs_list_del_init(&mc->mc_linkage);
lu_ref_del(&ld_c->ld_reference, "mdc-child", ld_m);
lu_device_put(ld_c);
ld_m->ld_type->ldt_ops->ldto_device_fini(env, ld_m);
ld_m->ld_type->ldt_ops->ldto_device_free(env, ld_m);
cm->cmm_tgt_count--;
}
- spin_unlock(&cm->cmm_tgt_guard);
+ cfs_spin_unlock(&cm->cmm_tgt_guard);
fld_client_fini(cm->cmm_fld);
ls = cmm2lu_dev(cm)->ld_site;
/* other MD servers in cluster */
mdsno_t cmm_local_num;
__u32 cmm_tgt_count;
- struct list_head cmm_targets;
- spinlock_t cmm_tgt_guard;
+ cfs_list_t cmm_targets;
+ cfs_spinlock_t cmm_tgt_guard;
cfs_proc_dir_entry_t *cmm_proc_entry;
struct lprocfs_stats *cmm_stats;
};
struct lu_device *next = NULL;
struct mdc_device *mdc;
- spin_lock(&d->cmm_tgt_guard);
- list_for_each_entry(mdc, &d->cmm_targets, mc_linkage) {
+ cfs_spin_lock(&d->cmm_tgt_guard);
+ cfs_list_for_each_entry(mdc, &d->cmm_targets, mc_linkage) {
if (mdc->mc_num == num) {
next = mdc2lu_dev(mdc);
break;
}
}
- spin_unlock(&d->cmm_tgt_guard);
+ cfs_spin_unlock(&d->cmm_tgt_guard);
return next;
}
LASSERT(cmm != NULL && mc != NULL && fid != NULL);
- down(&mc->mc_fid_sem);
+ cfs_down(&mc->mc_fid_sem);
/* Alloc new fid on @mc. */
rc = obd_fid_alloc(mc->mc_desc.cl_exp, fid, NULL);
if (rc > 0)
rc = 0;
- up(&mc->mc_fid_sem);
+ cfs_up(&mc->mc_fid_sem);
RETURN(rc);
}
slave_lmv->mea_magic = MEA_MAGIC_HASH_SEGMENT;
slave_lmv->mea_count = 0;
- list_for_each_entry_safe(mc, tmp, &cmm->cmm_targets, mc_linkage) {
+ cfs_list_for_each_entry_safe(mc, tmp, &cmm->cmm_targets, mc_linkage) {
rc = cmm_split_slave_create(env, cmm, mc, &lmv->mea_ids[i],
ma, slave_lmv, sizeof(*slave_lmv));
if (rc)
mc->mc_md_dev.md_ops = &mdc_md_ops;
ld = mdc2lu_dev(mc);
ld->ld_ops = &mdc_lu_ops;
- sema_init(&mc->mc_fid_sem, 1);
+ cfs_sema_init(&mc->mc_fid_sem, 1);
}
RETURN (ld);
{
struct mdc_device *mc = lu2mdc_dev(ld);
- LASSERTF(atomic_read(&ld->ld_ref) == 0,
- "Refcount = %i\n", atomic_read(&ld->ld_ref));
- LASSERT(list_empty(&mc->mc_linkage));
+ LASSERTF(cfs_atomic_read(&ld->ld_ref) == 0,
+ "Refcount = %i\n", cfs_atomic_read(&ld->ld_ref));
+ LASSERT(cfs_list_empty(&mc->mc_linkage));
md_device_fini(&mc->mc_md_dev);
OBD_FREE_PTR(mc);
return NULL;
struct mdc_device {
struct md_device mc_md_dev;
/* other MD servers in cluster */
- struct list_head mc_linkage;
+ cfs_list_t mc_linkage;
mdsno_t mc_num;
struct mdc_cli_desc mc_desc;
- struct semaphore mc_fid_sem;
+ cfs_semaphore_t mc_fid_sem;
};
struct mdc_thread_info {
* Ask client for new range, assign that range to ->seq_space and write
* seq state to backing store should be atomic.
*/
- down(&seq->lss_sem);
+ cfs_down(&seq->lss_sem);
if (cli == NULL) {
CDEBUG(D_INFO, "%s: Detached sequence client %s\n",
cli->lcs_space.lsr_mdt = seq->lss_site->ms_node_id;
EXIT;
out_up:
- up(&seq->lss_sem);
+ cfs_up(&seq->lss_sem);
return rc;
}
EXPORT_SYMBOL(seq_server_set_cli);
int rc;
ENTRY;
- down(&seq->lss_sem);
+ cfs_down(&seq->lss_sem);
rc = __seq_server_alloc_super(seq, in, out, env);
- up(&seq->lss_sem);
+ cfs_up(&seq->lss_sem);
RETURN(rc);
}
int rc;
ENTRY;
- down(&seq->lss_sem);
+ cfs_down(&seq->lss_sem);
rc = __seq_server_alloc_meta(seq, in, out, env);
- up(&seq->lss_sem);
+ cfs_up(&seq->lss_sem);
RETURN(rc);
}
seq->lss_type = type;
seq->lss_site = ms;
range_init(&seq->lss_space);
- sema_init(&seq->lss_sem, 1);
+ cfs_sema_init(&seq->lss_sem, 1);
seq->lss_width = is_srv ?
LUSTRE_SEQ_META_WIDTH : LUSTRE_SEQ_SUPER_WIDTH;
int rc;
ENTRY;
- down(&seq->lcs_sem);
+ cfs_down(&seq->lcs_sem);
#ifdef __KERNEL__
if (seq->lcs_srv) {
#ifdef __KERNEL__
}
#endif
- up(&seq->lcs_sem);
+ cfs_up(&seq->lcs_sem);
RETURN(rc);
}
LASSERT(seq != NULL);
LASSERT(fid != NULL);
- down(&seq->lcs_sem);
+ cfs_down(&seq->lcs_sem);
if (fid_is_zero(&seq->lcs_fid) ||
fid_oid(&seq->lcs_fid) >= seq->lcs_width)
if (rc) {
CERROR("%s: Can't allocate new sequence, "
"rc %d\n", seq->lcs_name, rc);
- up(&seq->lcs_sem);
+ cfs_up(&seq->lcs_sem);
RETURN(rc);
}
}
*fid = seq->lcs_fid;
- up(&seq->lcs_sem);
+ cfs_up(&seq->lcs_sem);
CDEBUG(D_INFO, "%s: Allocated FID "DFID"\n", seq->lcs_name, PFID(fid));
RETURN(rc);
void seq_client_flush(struct lu_client_seq *seq)
{
LASSERT(seq != NULL);
- down(&seq->lcs_sem);
+ cfs_down(&seq->lcs_sem);
fid_zero(&seq->lcs_fid);
/**
* this id shld not be used for seq range allocation.
seq->lcs_space.lsr_mdt = -1;
range_init(&seq->lcs_space);
- up(&seq->lcs_sem);
+ cfs_up(&seq->lcs_sem);
}
EXPORT_SYMBOL(seq_client_flush);
seq->lcs_exp = exp;
seq->lcs_srv = srv;
seq->lcs_type = type;
- sema_init(&seq->lcs_sem, 1);
+ cfs_sema_init(&seq->lcs_sem, 1);
seq->lcs_width = LUSTRE_SEQ_MAX_WIDTH;
/* Make sure that things are clear before work is started. */
LASSERT(seq != NULL);
- down(&seq->lss_sem);
+ cfs_down(&seq->lss_sem);
rc = seq_proc_write_common(file, buffer, count,
data, &seq->lss_space);
if (rc == 0) {
CDEBUG(D_INFO, "%s: Space: "DRANGE"\n",
seq->lss_name, PRANGE(&seq->lss_space));
}
-
- up(&seq->lss_sem);
-
+
+ cfs_up(&seq->lss_sem);
+
RETURN(count);
}
LASSERT(seq != NULL);
- down(&seq->lss_sem);
+ cfs_down(&seq->lss_sem);
rc = seq_proc_read_common(page, start, off, count, eof,
data, &seq->lss_space);
- up(&seq->lss_sem);
-
+ cfs_up(&seq->lss_sem);
+
RETURN(rc);
}
} else {
rc = snprintf(page, count, "<none>\n");
}
-
+
RETURN(rc);
}
LASSERT(seq != NULL);
- down(&seq->lss_sem);
+ cfs_down(&seq->lss_sem);
rc = lprocfs_write_helper(buffer, count, &val);
if (rc)
CDEBUG(D_INFO, "%s: Width: "LPU64"\n",
seq->lss_name, seq->lss_width);
}
-
- up(&seq->lss_sem);
-
+
+ cfs_up(&seq->lss_sem);
+
RETURN(count);
}
LASSERT(seq != NULL);
- down(&seq->lss_sem);
+ cfs_down(&seq->lss_sem);
rc = snprintf(page, count, LPU64"\n", seq->lss_width);
- up(&seq->lss_sem);
-
+ cfs_up(&seq->lss_sem);
+
RETURN(rc);
}
LASSERT(seq != NULL);
- down(&seq->lcs_sem);
+ cfs_down(&seq->lcs_sem);
rc = seq_proc_write_common(file, buffer, count,
data, &seq->lcs_space);
CDEBUG(D_INFO, "%s: Space: "DRANGE"\n",
seq->lcs_name, PRANGE(&seq->lcs_space));
}
-
- up(&seq->lcs_sem);
-
+
+ cfs_up(&seq->lcs_sem);
+
RETURN(count);
}
LASSERT(seq != NULL);
- down(&seq->lcs_sem);
+ cfs_down(&seq->lcs_sem);
rc = seq_proc_read_common(page, start, off, count, eof,
data, &seq->lcs_space);
- up(&seq->lcs_sem);
-
+ cfs_up(&seq->lcs_sem);
+
RETURN(rc);
}
LASSERT(seq != NULL);
- down(&seq->lcs_sem);
+ cfs_down(&seq->lcs_sem);
rc = lprocfs_write_helper(buffer, count, &val);
if (rc)
seq->lcs_name, seq->lcs_width);
}
}
-
- up(&seq->lcs_sem);
-
+
+ cfs_up(&seq->lcs_sem);
+
RETURN(count);
}
LASSERT(seq != NULL);
- down(&seq->lcs_sem);
+ cfs_down(&seq->lcs_sem);
rc = snprintf(page, count, LPU64"\n", seq->lcs_width);
- up(&seq->lcs_sem);
-
+ cfs_up(&seq->lcs_sem);
+
RETURN(rc);
}
LASSERT(seq != NULL);
- down(&seq->lcs_sem);
+ cfs_down(&seq->lcs_sem);
rc = snprintf(page, count, DFID"\n", PFID(&seq->lcs_fid));
- up(&seq->lcs_sem);
-
+ cfs_up(&seq->lcs_sem);
+
RETURN(rc);
}
CFS_INIT_LIST_HEAD(&cache->fci_lru);
cache->fci_cache_count = 0;
- spin_lock_init(&cache->fci_lock);
+ cfs_spin_lock_init(&cache->fci_lock);
strncpy(cache->fci_name, name,
sizeof(cache->fci_name));
static inline void fld_cache_entry_delete(struct fld_cache *cache,
struct fld_cache_entry *node)
{
- list_del(&node->fce_list);
- list_del(&node->fce_lru);
+ cfs_list_del(&node->fce_list);
+ cfs_list_del(&node->fce_lru);
cache->fci_cache_count--;
OBD_FREE_PTR(node);
}
struct fld_cache_entry *f_next;
struct lu_seq_range *c_range;
struct lu_seq_range *n_range;
- struct list_head *head = &cache->fci_entries_head;
+ cfs_list_t *head = &cache->fci_entries_head;
ENTRY;
restart_fixup:
- list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
+ cfs_list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
c_range = &f_curr->fce_range;
n_range = &f_next->fce_range;
*/
static inline void fld_cache_entry_add(struct fld_cache *cache,
struct fld_cache_entry *f_new,
- struct list_head *pos)
+ cfs_list_t *pos)
{
- list_add(&f_new->fce_list, pos);
- list_add(&f_new->fce_lru, &cache->fci_lru);
+ cfs_list_add(&f_new->fce_list, pos);
+ cfs_list_add(&f_new->fce_lru, &cache->fci_lru);
cache->fci_cache_count++;
fld_fix_new_list(cache);
static int fld_cache_shrink(struct fld_cache *cache)
{
struct fld_cache_entry *flde;
- struct list_head *curr;
+ cfs_list_t *curr;
int num = 0;
ENTRY;
while (cache->fci_cache_count + cache->fci_threshold >
cache->fci_cache_size && curr != &cache->fci_lru) {
- flde = list_entry(curr, struct fld_cache_entry, fce_lru);
+ flde = cfs_list_entry(curr, struct fld_cache_entry, fce_lru);
curr = curr->prev;
fld_cache_entry_delete(cache, flde);
num++;
{
ENTRY;
- spin_lock(&cache->fci_lock);
+ cfs_spin_lock(&cache->fci_lock);
cache->fci_cache_size = 0;
fld_cache_shrink(cache);
- spin_unlock(&cache->fci_lock);
+ cfs_spin_unlock(&cache->fci_lock);
EXIT;
}
struct fld_cache_entry *f_new;
struct fld_cache_entry *f_curr;
struct fld_cache_entry *n;
- struct list_head *head;
- struct list_head *prev = NULL;
+ cfs_list_t *head;
+ cfs_list_t *prev = NULL;
const seqno_t new_start = range->lsr_start;
const seqno_t new_end = range->lsr_end;
ENTRY;
* So we don't need to search new entry before starting insertion loop.
*/
- spin_lock(&cache->fci_lock);
+ cfs_spin_lock(&cache->fci_lock);
fld_cache_shrink(cache);
head = &cache->fci_entries_head;
- list_for_each_entry_safe(f_curr, n, head, fce_list) {
+ cfs_list_for_each_entry_safe(f_curr, n, head, fce_list) {
/* add list if next is end of list */
if (new_end < f_curr->fce_range.lsr_start)
break;
/* Add new entry to cache and lru list. */
fld_cache_entry_add(cache, f_new, prev);
out:
- spin_unlock(&cache->fci_lock);
+ cfs_spin_unlock(&cache->fci_lock);
EXIT;
}
const seqno_t seq, struct lu_seq_range *range)
{
struct fld_cache_entry *flde;
- struct list_head *head;
+ cfs_list_t *head;
ENTRY;
- spin_lock(&cache->fci_lock);
+ cfs_spin_lock(&cache->fci_lock);
head = &cache->fci_entries_head;
cache->fci_stat.fst_count++;
- list_for_each_entry(flde, head, fce_list) {
+ cfs_list_for_each_entry(flde, head, fce_list) {
if (flde->fce_range.lsr_start > seq)
break;
*range = flde->fce_range;
/* update position of this entry in lru list. */
- list_move(&flde->fce_lru, &cache->fci_lru);
+ cfs_list_move(&flde->fce_lru, &cache->fci_lru);
cache->fci_stat.fst_cache++;
- spin_unlock(&cache->fci_lock);
+ cfs_spin_unlock(&cache->fci_lock);
RETURN(0);
}
}
- spin_unlock(&cache->fci_lock);
+ cfs_spin_unlock(&cache->fci_lock);
RETURN(-ENOENT);
}
ENTRY;
info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
- mutex_lock(&fld->lsf_lock);
+ cfs_mutex_lock(&fld->lsf_lock);
erange = &info->fti_lrange;
new = &info->fti_irange;
if (rc == 0)
fld_cache_insert(fld->lsf_cache, new);
- mutex_unlock(&fld->lsf_lock);
+ cfs_mutex_unlock(&fld->lsf_lock);
CDEBUG((rc != 0 ? D_ERROR : D_INFO),
"%s: FLD create: given range : "DRANGE
cache_threshold = cache_size *
FLD_SERVER_CACHE_THRESHOLD / 100;
- mutex_init(&fld->lsf_lock);
+ cfs_mutex_init(&fld->lsf_lock);
fld->lsf_cache = fld_cache_init(fld->lsf_name,
cache_size, cache_threshold);
if (IS_ERR(fld->lsf_cache)) {
};
struct fld_cache_entry {
- struct list_head fce_lru;
- struct list_head fce_list;
+ cfs_list_t fce_lru;
+ cfs_list_t fce_list;
/**
* fld cache entries are sorted on range->lsr_start field. */
struct lu_seq_range fce_range;
* Cache guard, protects fci_hash mostly because others immutable after
* init is finished.
*/
- spinlock_t fci_lock;
+ cfs_spinlock_t fci_lock;
/**
* Cache shrink threshold */
/**
* LRU list fld entries. */
- struct list_head fci_lru;
+ cfs_list_t fci_lru;
/**
* sorted fld entries. */
- struct list_head fci_entries_head;
+ cfs_list_t fci_entries_head;
/**
* Cache statistics. */
int rc;
ENTRY;
client_obd_list_lock(&cli->cl_loi_list_lock);
- rc = list_empty(&mcw->mcw_entry);
+ rc = cfs_list_empty(&mcw->mcw_entry);
client_obd_list_unlock(&cli->cl_loi_list_lock);
RETURN(rc);
};
client_obd_list_lock(&cli->cl_loi_list_lock);
if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
- list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
+ cfs_list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
cfs_waitq_init(&mcw.mcw_waitq);
client_obd_list_unlock(&cli->cl_loi_list_lock);
l_wait_event(mcw.mcw_waitq, fld_req_avail(cli, &mcw), &lwi);
static void fld_exit_request(struct client_obd *cli)
{
- struct list_head *l, *tmp;
+ cfs_list_t *l, *tmp;
struct mdc_cache_waiter *mcw;
client_obd_list_lock(&cli->cl_loi_list_lock);
cli->cl_r_in_flight--;
- list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
+ cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
/* No free request slots anymore */
break;
}
- mcw = list_entry(l, struct mdc_cache_waiter, mcw_entry);
- list_del_init(&mcw->mcw_entry);
+ mcw = cfs_list_entry(l, struct mdc_cache_waiter, mcw_entry);
+ cfs_list_del_init(&mcw->mcw_entry);
cli->cl_r_in_flight++;
cfs_waitq_signal(&mcw->mcw_waitq);
}
hash = fld_rrb_hash(fld, seq);
- list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
+ cfs_list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
if (target->ft_idx == hash)
RETURN(target);
}
"Targets (%d):\n", fld->lcf_name, hash, seq,
fld->lcf_count);
- list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
+ cfs_list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
const char *srv_name = target->ft_srv != NULL ?
target->ft_srv->lsf_name : "<null>";
const char *exp_name = target->ft_exp != NULL ?
LASSERT(fld->lcf_hash != NULL);
- spin_lock(&fld->lcf_lock);
+ cfs_spin_lock(&fld->lcf_lock);
target = fld->lcf_hash->fh_scan_func(fld, seq);
- spin_unlock(&fld->lcf_lock);
+ cfs_spin_unlock(&fld->lcf_lock);
if (target != NULL) {
CDEBUG(D_INFO, "%s: Found target (idx "LPU64
if (target == NULL)
RETURN(-ENOMEM);
- spin_lock(&fld->lcf_lock);
- list_for_each_entry(tmp, &fld->lcf_targets, ft_chain) {
+ cfs_spin_lock(&fld->lcf_lock);
+ cfs_list_for_each_entry(tmp, &fld->lcf_targets, ft_chain) {
if (tmp->ft_idx == tar->ft_idx) {
- spin_unlock(&fld->lcf_lock);
+ cfs_spin_unlock(&fld->lcf_lock);
OBD_FREE_PTR(target);
CERROR("Target %s exists in FLD and known as %s:#"LPU64"\n",
name, fld_target_name(tmp), tmp->ft_idx);
target->ft_srv = tar->ft_srv;
target->ft_idx = tar->ft_idx;
- list_add_tail(&target->ft_chain,
- &fld->lcf_targets);
+ cfs_list_add_tail(&target->ft_chain,
+ &fld->lcf_targets);
fld->lcf_count++;
- spin_unlock(&fld->lcf_lock);
+ cfs_spin_unlock(&fld->lcf_lock);
RETURN(0);
}
struct lu_fld_target *target, *tmp;
ENTRY;
- spin_lock(&fld->lcf_lock);
- list_for_each_entry_safe(target, tmp,
- &fld->lcf_targets, ft_chain) {
+ cfs_spin_lock(&fld->lcf_lock);
+ cfs_list_for_each_entry_safe(target, tmp,
+ &fld->lcf_targets, ft_chain) {
if (target->ft_idx == idx) {
fld->lcf_count--;
- list_del(&target->ft_chain);
- spin_unlock(&fld->lcf_lock);
+ cfs_list_del(&target->ft_chain);
+ cfs_spin_unlock(&fld->lcf_lock);
if (target->ft_exp != NULL)
class_export_put(target->ft_exp);
RETURN(0);
}
}
- spin_unlock(&fld->lcf_lock);
+ cfs_spin_unlock(&fld->lcf_lock);
RETURN(-ENOENT);
}
EXPORT_SYMBOL(fld_client_del_target);
}
fld->lcf_count = 0;
- spin_lock_init(&fld->lcf_lock);
+ cfs_spin_lock_init(&fld->lcf_lock);
fld->lcf_hash = &fld_hash[hash];
fld->lcf_flags = LUSTRE_FLD_INIT;
CFS_INIT_LIST_HEAD(&fld->lcf_targets);
fld_client_proc_fini(fld);
- spin_lock(&fld->lcf_lock);
- list_for_each_entry_safe(target, tmp,
- &fld->lcf_targets, ft_chain) {
+ cfs_spin_lock(&fld->lcf_lock);
+ cfs_list_for_each_entry_safe(target, tmp,
+ &fld->lcf_targets, ft_chain) {
fld->lcf_count--;
- list_del(&target->ft_chain);
+ cfs_list_del(&target->ft_chain);
if (target->ft_exp != NULL)
class_export_put(target->ft_exp);
OBD_FREE_PTR(target);
}
- spin_unlock(&fld->lcf_lock);
+ cfs_spin_unlock(&fld->lcf_lock);
if (fld->lcf_cache != NULL) {
if (!IS_ERR(fld->lcf_cache))
LASSERT(fld != NULL);
- spin_lock(&fld->lcf_lock);
- list_for_each_entry(target,
- &fld->lcf_targets, ft_chain)
+ cfs_spin_lock(&fld->lcf_lock);
+ cfs_list_for_each_entry(target,
+ &fld->lcf_targets, ft_chain)
{
rc = snprintf(page, count, "%s\n",
fld_target_name(target));
if (count == 0)
break;
}
- spin_unlock(&fld->lcf_lock);
+ cfs_spin_unlock(&fld->lcf_lock);
RETURN(total);
}
LASSERT(fld != NULL);
- spin_lock(&fld->lcf_lock);
+ cfs_spin_lock(&fld->lcf_lock);
rc = snprintf(page, count, "%s\n",
fld->lcf_hash->fh_name);
- spin_unlock(&fld->lcf_lock);
+ cfs_spin_unlock(&fld->lcf_lock);
RETURN(rc);
}
}
if (hash != NULL) {
- spin_lock(&fld->lcf_lock);
+ cfs_spin_lock(&fld->lcf_lock);
fld->lcf_hash = hash;
- spin_unlock(&fld->lcf_lock);
+ cfs_spin_unlock(&fld->lcf_lock);
CDEBUG(D_INFO, "%s: Changed hash to \"%s\"\n",
fld->lcf_name, hash->fh_name);
*/
/** @{ */
/** Lock protecting page tree. */
- spinlock_t coh_page_guard;
+ cfs_spinlock_t coh_page_guard;
/** Lock protecting lock list. */
- spinlock_t coh_lock_guard;
+ cfs_spinlock_t coh_lock_guard;
/** @} locks */
/** Radix tree of cl_page's, cached for this object. */
struct radix_tree_root coh_tree;
/** # of pages in radix tree. */
unsigned long coh_pages;
/** List of cl_lock's granted for this object. */
- struct list_head coh_locks;
+ cfs_list_t coh_locks;
/**
* Parent object. It is assumed that an object has a well-defined
*
* \todo XXX this can be read/write lock if needed.
*/
- spinlock_t coh_attr_guard;
+ cfs_spinlock_t coh_attr_guard;
/**
* Number of objects above this one: 0 for a top-object, 1 for its
* sub-object, etc.
* Helper macro: iterate over all layers of the object \a obj, assigning every
* layer top-to-bottom to \a slice.
*/
-#define cl_object_for_each(slice, obj) \
- list_for_each_entry((slice), \
- &(obj)->co_lu.lo_header->loh_layers, \
- co_lu.lo_linkage)
+#define cl_object_for_each(slice, obj) \
+ cfs_list_for_each_entry((slice), \
+ &(obj)->co_lu.lo_header->loh_layers, \
+ co_lu.lo_linkage)
/**
* Helper macro: iterate over all layers of the object \a obj, assigning every
* layer bottom-to-top to \a slice.
*/
-#define cl_object_for_each_reverse(slice, obj) \
- list_for_each_entry_reverse((slice), \
- &(obj)->co_lu.lo_header->loh_layers, \
- co_lu.lo_linkage)
+#define cl_object_for_each_reverse(slice, obj) \
+ cfs_list_for_each_entry_reverse((slice), \
+ &(obj)->co_lu.lo_header->loh_layers, \
+ co_lu.lo_linkage)
/** @} cl_object */
#ifndef pgoff_t
*/
struct cl_page {
/** Reference counter. */
- atomic_t cp_ref;
+ cfs_atomic_t cp_ref;
/** An object this page is a part of. Immutable after creation. */
struct cl_object *cp_obj;
/** Logical page index within the object. Immutable after creation. */
pgoff_t cp_index;
/** List of slices. Immutable after creation. */
- struct list_head cp_layers;
+ cfs_list_t cp_layers;
/** Parent page, NULL for top-level page. Immutable after creation. */
struct cl_page *cp_parent;
/** Lower-layer page. NULL for bottommost page. Immutable after
/**
* Linkage of pages within some group. Protected by
* cl_page::cp_mutex. */
- struct list_head cp_batch;
+ cfs_list_t cp_batch;
/** Mutex serializing membership of a page in a batch. */
- struct mutex cp_mutex;
+ cfs_mutex_t cp_mutex;
/** Linkage of pages within cl_req. */
- struct list_head cp_flight;
+ cfs_list_t cp_flight;
/** Transfer error. */
int cp_error;
struct cl_object *cpl_obj;
const struct cl_page_operations *cpl_ops;
/** Linkage into cl_page::cp_layers. Immutable after creation. */
- struct list_head cpl_linkage;
+ cfs_list_t cpl_linkage;
};
/**
do { \
static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask); \
\
- if (cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
cl_page_print(env, &__info, lu_cdebug_printer, page); \
CDEBUG(mask, format , ## __VA_ARGS__); \
} \
do { \
static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask); \
\
- if (cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
cl_page_header_print(env, &__info, lu_cdebug_printer, page); \
CDEBUG(mask, format , ## __VA_ARGS__); \
} \
* List of enclosed locks, so far. Locks are linked here through
* cl_lock::cll_inclosure.
*/
- struct list_head clc_list;
+ cfs_list_t clc_list;
/**
* True iff closure is in a `wait' mode. This determines what
* cl_lock_enclosure() does when a lock L to be added to the closure
*/
struct cl_lock {
/** Reference counter. */
- atomic_t cll_ref;
+ cfs_atomic_t cll_ref;
/** List of slices. Immutable after creation. */
- struct list_head cll_layers;
+ cfs_list_t cll_layers;
/**
* Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected
* by cl_lock::cll_descr::cld_obj::coh_lock_guard.
*/
- struct list_head cll_linkage;
+ cfs_list_t cll_linkage;
/**
* Parameters of this lock. Protected by
* cl_lock::cll_descr::cld_obj::coh_lock_guard nested within
*
* \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
*/
- struct mutex cll_guard;
+ cfs_mutex_t cll_guard;
cfs_task_t *cll_guarder;
int cll_depth;
*
* \see cl_lock_closure
*/
- struct list_head cll_inclosure;
+ cfs_list_t cll_inclosure;
/**
* Confict lock at queuing time.
*/
struct cl_object *cls_obj;
const struct cl_lock_operations *cls_ops;
/** Linkage into cl_lock::cll_layers. Immutable after creation. */
- struct list_head cls_linkage;
+ cfs_list_t cls_linkage;
};
/**
do { \
static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask); \
\
- if (cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
cl_lock_print(env, &__info, lu_cdebug_printer, lock); \
CDEBUG(mask, format , ## __VA_ARGS__); \
} \
* @{
*/
struct cl_page_list {
- unsigned pl_nr;
- struct list_head pl_pages;
- cfs_task_t *pl_owner;
+ unsigned pl_nr;
+ cfs_list_t pl_pages;
+ cfs_task_t *pl_owner;
};
/**
* linkage into a list of all slices for a given cl_io, hanging off
* cl_io::ci_layers. Immutable after creation.
*/
- struct list_head cis_linkage;
+ cfs_list_t cis_linkage;
};
*/
struct cl_io_lock_link {
/** linkage into one of cl_lockset lists. */
- struct list_head cill_linkage;
+ cfs_list_t cill_linkage;
struct cl_lock_descr cill_descr;
struct cl_lock *cill_lock;
/** optional destructor */
*/
struct cl_lockset {
/** locks to be acquired. */
- struct list_head cls_todo;
+ cfs_list_t cls_todo;
/** locks currently being processed. */
- struct list_head cls_curr;
+ cfs_list_t cls_curr;
/** locks acquired. */
- struct list_head cls_done;
+ cfs_list_t cls_done;
};
/**
*/
struct cl_io *ci_parent;
/** List of slices. Immutable after creation. */
- struct list_head ci_layers;
+ cfs_list_t ci_layers;
/** list of locks (to be) acquired by this io. */
struct cl_lockset ci_lockset;
/** lock requirements, this is just a help info for sublayers. */
* req's pages.
*/
struct cl_req {
- enum cl_req_type crq_type;
+ enum cl_req_type crq_type;
/** A list of pages being transfered */
- struct list_head crq_pages;
+ cfs_list_t crq_pages;
/** Number of pages in cl_req::crq_pages */
- unsigned crq_nrpages;
+ unsigned crq_nrpages;
/** An array of objects which pages are in ->crq_pages */
- struct cl_req_obj *crq_o;
+ struct cl_req_obj *crq_o;
/** Number of elements in cl_req::crq_objs[] */
- unsigned crq_nrobjs;
- struct list_head crq_layers;
+ unsigned crq_nrobjs;
+ cfs_list_t crq_layers;
};
/**
struct cl_req_slice {
struct cl_req *crs_req;
struct cl_device *crs_dev;
- struct list_head crs_linkage;
+ cfs_list_t crs_linkage;
const struct cl_req_operations *crs_ops;
};
struct cache_stats {
const char *cs_name;
/** how many entities were created at all */
- atomic_t cs_created;
+ cfs_atomic_t cs_created;
/** how many cache lookups were performed */
- atomic_t cs_lookup;
+ cfs_atomic_t cs_lookup;
/** how many times cache lookup resulted in a hit */
- atomic_t cs_hit;
+ cfs_atomic_t cs_hit;
/** how many entities are in the cache right now */
- atomic_t cs_total;
+ cfs_atomic_t cs_total;
/** how many entities in the cache are actively used (and cannot be
* evicted) right now */
- atomic_t cs_busy;
+ cfs_atomic_t cs_busy;
};
/** These are not exported so far */
*/
struct cache_stats cs_pages;
struct cache_stats cs_locks;
- atomic_t cs_pages_state[CPS_NR];
- atomic_t cs_locks_state[CLS_NR];
+ cfs_atomic_t cs_pages_state[CPS_NR];
+ cfs_atomic_t cs_locks_state[CLS_NR];
};
int cl_site_init (struct cl_site *s, struct cl_device *top);
int cl_lock_state_wait (const struct lu_env *env, struct cl_lock *lock);
void cl_lock_state_set (const struct lu_env *env, struct cl_lock *lock,
enum cl_lock_state state);
-int cl_queue_match (const struct list_head *queue,
+int cl_queue_match (const cfs_list_t *queue,
const struct cl_lock_descr *need);
void cl_lock_mutex_get (const struct lu_env *env, struct cl_lock *lock);
* Iterate over pages in a page list.
*/
#define cl_page_list_for_each(page, list) \
- list_for_each_entry((page), &(list)->pl_pages, cp_batch)
+ cfs_list_for_each_entry((page), &(list)->pl_pages, cp_batch)
/**
* Iterate over pages in a page list, taking possible removals into account.
*/
#define cl_page_list_for_each_safe(page, temp, list) \
- list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
+ cfs_list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
void cl_page_list_init (struct cl_page_list *plist);
void cl_page_list_add (struct cl_page_list *plist, struct cl_page *page);
*/
struct cl_sync_io {
/** number of pages yet to be transferred. */
- atomic_t csi_sync_nr;
+ cfs_atomic_t csi_sync_nr;
/** completion to be signaled when transfer is complete. */
cfs_waitq_t csi_waitq;
/** error code. */
- int csi_sync_rc;
+ int csi_sync_rc;
};
void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages);
* Return device-wide statistics.
*/
int (*dt_statfs)(const struct lu_env *env,
- struct dt_device *dev, struct kstatfs *sfs);
+ struct dt_device *dev, cfs_kstatfs_t *sfs);
/**
* Start transaction, described by \a param.
*/
void (*do_ah_init)(const struct lu_env *env,
struct dt_allocation_hint *ah,
struct dt_object *parent,
- umode_t child_mode);
+ cfs_umode_t child_mode);
/**
* Create new object on this device.
*
* way, because callbacks are supposed to be added/deleted only during
* single-threaded start-up shut-down procedures.
*/
- struct list_head dd_txn_callbacks;
+ cfs_list_t dd_txn_callbacks;
};
int dt_device_init(struct dt_device *dev, struct lu_device_type *t);
struct thandle *txn, void *cookie);
int (*dtc_txn_commit)(const struct lu_env *env,
struct thandle *txn, void *cookie);
- void *dtc_cookie;
- __u32 dtc_tag;
- struct list_head dtc_linkage;
+ void *dtc_cookie;
+ __u32 dtc_tag;
+ cfs_list_t dtc_linkage;
};
void dt_txn_callback_add(struct dt_device *dev, struct dt_txn_callback *cb);
*
* \see ccc_page::cpg_pending_linkage
*/
- struct list_head cob_pending_list;
+ cfs_list_t cob_pending_list;
/**
* Access this counter is protected by inode->i_sem. Now that
*
* \see ll_vm_open(), ll_vm_close().
*/
- atomic_t cob_mmap_cnt;
+ cfs_atomic_t cob_mmap_cnt;
};
/**
* that is, never iterated through, only checked for list_empty(), but
* having a list is useful for debugging.
*/
- struct list_head cpg_pending_linkage;
+ cfs_list_t cpg_pending_linkage;
/** VM page */
cfs_page_t *cpg_page;
};
#define loff_t long long
#define ERESTART 2001
-typedef unsigned short umode_t;
+typedef unsigned short cfs_umode_t;
#endif
static __inline__ int ext2_set_bit(int nr, void *addr)
{
#ifdef __BIG_ENDIAN
- return set_bit((nr ^ ((BITS_PER_LONG-1) & ~0x7)), addr);
+ return cfs_set_bit((nr ^ ((BITS_PER_LONG-1) & ~0x7)), addr);
#else
- return set_bit(nr, addr);
+ return cfs_set_bit(nr, addr);
#endif
}
static __inline__ int ext2_clear_bit(int nr, void *addr)
{
#ifdef __BIG_ENDIAN
- return clear_bit((nr ^ ((BITS_PER_LONG-1) & ~0x7)), addr);
+ return cfs_clear_bit((nr ^ ((BITS_PER_LONG-1) & ~0x7)), addr);
#else
- return clear_bit(nr, addr);
+ return cfs_clear_bit(nr, addr);
#endif
}
__const__ unsigned char *tmp = (__const__ unsigned char *) addr;
return (tmp[nr >> 3] >> (nr & 7)) & 1;
#else
- return test_bit(nr, addr);
+ return cfs_test_bit(nr, addr);
#endif
}
#define EXPORT_SYMBOL(S)
-struct rcu_head { };
+typedef struct cfs_rcu_head { } cfs_rcu_head_t;
typedef __u64 kdev_t;
#ifndef ERESTARTSYS
#define ERESTARTSYS ERESTART
#endif
-#define HZ 1
+#define CFS_HZ 1
/* random */
-void get_random_bytes(void *ptr, int size);
+void cfs_get_random_bytes(void *ptr, int size);
/* memory */
/* memory size: used for some client tunables */
-#define num_physpages (256 * 1024) /* 1GB */
-#define CFS_NUM_CACHEPAGES num_physpages
+#define cfs_num_physpages (256 * 1024) /* 1GB */
+#define CFS_NUM_CACHEPAGES cfs_num_physpages
/* VFS stuff */
struct iattr {
unsigned int ia_valid;
- umode_t ia_mode;
+ cfs_umode_t ia_mode;
uid_t ia_uid;
gid_t ia_gid;
loff_t ia_size;
#define cfs_curproc_comm() (current->comm)
extern struct task_struct *current;
-int in_group_p(gid_t gid);
+int cfs_curproc_is_in_groups(gid_t gid);
-#define set_current_state(foo) do { current->state = foo; } while (0)
+#define cfs_set_current_state(foo) do { current->state = foo; } while (0)
-#define wait_event_interruptible(wq, condition) \
-({ \
+#define cfs_wait_event_interruptible(wq, condition, ret) \
+{ \
struct l_wait_info lwi; \
int timeout = 100000000;/* for ever */ \
int ret; \
ret = l_wait_event(NULL, condition, &lwi); \
\
ret; \
-})
+}
-#define lock_kernel() do {} while (0)
-#define unlock_kernel() do {} while (0)
+#define cfs_lock_kernel() do {} while (0)
+#define cfs_unlock_kernel() do {} while (0)
#define daemonize(l) do {} while (0)
#define sigfillset(l) do {} while (0)
#define recalc_sigpending(l) do {} while (0)
-#define kernel_thread(l,m,n) LBUG()
+#define cfs_kernel_thread(l,m,n) LBUG()
#define USERMODEHELPER(path, argv, envp) (0)
#define SIGNAL_MASK_ASSERT()
-#define KERN_INFO
+#define CFS_KERN_INFO
-#if HZ != 1
+#if CFS_HZ != 1
#error "liblustre's jiffies currently expects HZ to be 1"
#endif
#define jiffies \
#define unlikely(exp) (exp)
#endif
-#define might_sleep()
+#define cfs_might_sleep()
#define might_sleep_if(c)
#define smp_mb()
-#define libcfs_memory_pressure_get() (0)
-#define libcfs_memory_pressure_put() do {} while (0)
+#define libcfs_memory_pressure_get() (0)
+#define libcfs_memory_pressure_put() do {} while (0)
#define libcfs_memory_pressure_clr() do {} while (0)
/* FIXME sys/capability will finally included linux/fs.h thus
struct liblustre_wait_callback {
- struct list_head llwc_list;
- const char *llwc_name;
- int (*llwc_fn)(void *arg);
- void *llwc_arg;
+ cfs_list_t llwc_list;
+ const char *llwc_name;
+ int (*llwc_fn)(void *arg);
+ void *llwc_arg;
};
void *liblustre_register_wait_callback(const char *name,
};
typedef struct file_lock {
- struct file_lock *fl_next; /* singly linked list for this inode */
- struct list_head fl_link; /* doubly linked list of all locks */
- struct list_head fl_block; /* circular list of blocked processes */
+ struct file_lock *fl_next; /* singly linked list for this inode */
+ cfs_list_t fl_link; /* doubly linked list of all locks */
+ cfs_list_t fl_block; /* circular list of blocked processes */
void *fl_owner;
unsigned int fl_pid;
cfs_waitq_t fl_wait;
};
struct posix_acl {
- atomic_t a_refcount;
+ cfs_atomic_t a_refcount;
unsigned int a_count;
struct posix_acl_entry a_entries[0];
};
#include <libcfs/libcfs.h>
#include <linux/statfs.h>
-#else
-# define kstatfs statfs
+#else
+typedef struct statfs cfs_kstatfs_t;
#endif
#endif /* LPROCFS_SNMP_H */
struct dentry *old_pwd;
struct vfsmount *old_pwdmnt;
- write_lock(&fs->lock);
+ cfs_write_lock(&fs->lock);
old_pwd = fs->pwd;
old_pwdmnt = fs->pwdmnt;
fs->pwdmnt = mntget(mnt);
fs->pwd = dget(dentry);
- write_unlock(&fs->lock);
+ cfs_write_unlock(&fs->lock);
if (old_pwd) {
dput(old_pwd);
#define ATTR_BLOCKS (1 << 27)
#if HAVE_INODE_I_MUTEX
-#define UNLOCK_INODE_MUTEX(inode) do {mutex_unlock(&(inode)->i_mutex); } while(0)
-#define LOCK_INODE_MUTEX(inode) do {mutex_lock(&(inode)->i_mutex); } while(0)
+#define UNLOCK_INODE_MUTEX(inode) \
+do {cfs_mutex_unlock(&(inode)->i_mutex); } while(0)
+#define LOCK_INODE_MUTEX(inode) \
+do {cfs_mutex_lock(&(inode)->i_mutex); } while(0)
#define LOCK_INODE_MUTEX_PARENT(inode) \
-do {mutex_lock_nested(&(inode)->i_mutex, I_MUTEX_PARENT); } while(0)
-#define TRYLOCK_INODE_MUTEX(inode) mutex_trylock(&(inode)->i_mutex)
+do {cfs_mutex_lock_nested(&(inode)->i_mutex, I_MUTEX_PARENT); } while(0)
+#define TRYLOCK_INODE_MUTEX(inode) cfs_mutex_trylock(&(inode)->i_mutex)
#else
-#define UNLOCK_INODE_MUTEX(inode) do {up(&(inode)->i_sem); } while(0)
-#define LOCK_INODE_MUTEX(inode) do {down(&(inode)->i_sem); } while(0)
+#define UNLOCK_INODE_MUTEX(inode) do cfs_up(&(inode)->i_sem); } while(0)
+#define LOCK_INODE_MUTEX(inode) do cfs_down(&(inode)->i_sem); } while(0)
#define TRYLOCK_INODE_MUTEX(inode) (!down_trylock(&(inode)->i_sem))
#define LOCK_INODE_MUTEX_PARENT(inode) LOCK_INODE_MUTEX(inode)
#endif /* HAVE_INODE_I_MUTEX */
#ifdef HAVE_SEQ_LOCK
-#define LL_SEQ_LOCK(seq) mutex_lock(&(seq)->lock)
-#define LL_SEQ_UNLOCK(seq) mutex_unlock(&(seq)->lock)
+#define LL_SEQ_LOCK(seq) cfs_mutex_lock(&(seq)->lock)
+#define LL_SEQ_UNLOCK(seq) cfs_mutex_unlock(&(seq)->lock)
#else
-#define LL_SEQ_LOCK(seq) down(&(seq)->sem)
-#define LL_SEQ_UNLOCK(seq) up(&(seq)->sem)
+#define LL_SEQ_LOCK(seq) cfs_down(&(seq)->sem)
+#define LL_SEQ_UNLOCK(seq) cfs_up(&(seq)->sem)
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
#endif
#ifdef HAVE_DQUOTOFF_MUTEX
-#define UNLOCK_DQONOFF_MUTEX(dqopt) do {mutex_unlock(&(dqopt)->dqonoff_mutex); } while(0)
-#define LOCK_DQONOFF_MUTEX(dqopt) do {mutex_lock(&(dqopt)->dqonoff_mutex); } while(0)
+#define UNLOCK_DQONOFF_MUTEX(dqopt) cfs_mutex_unlock(&(dqopt)->dqonoff_mutex)
+#define LOCK_DQONOFF_MUTEX(dqopt) cfs_mutex_lock(&(dqopt)->dqonoff_mutex)
#else
-#define UNLOCK_DQONOFF_MUTEX(dqopt) do {up(&(dqopt)->dqonoff_sem); } while(0)
-#define LOCK_DQONOFF_MUTEX(dqopt) do {down(&(dqopt)->dqonoff_sem); } while(0)
+#define UNLOCK_DQONOFF_MUTEX(dqopt) cfs_up(&(dqopt)->dqonoff_sem)
+#define LOCK_DQONOFF_MUTEX(dqopt) cfs_down(&(dqopt)->dqonoff_sem)
#endif /* HAVE_DQUOTOFF_MUTEX */
#define current_ngroups current->group_info->ngroups
#define gfp_t int
#endif
-#define lock_dentry(___dentry) spin_lock(&(___dentry)->d_lock)
-#define unlock_dentry(___dentry) spin_unlock(&(___dentry)->d_lock)
+#define lock_dentry(___dentry) cfs_spin_lock(&(___dentry)->d_lock)
+#define unlock_dentry(___dentry) cfs_spin_unlock(&(___dentry)->d_lock)
#define ll_kernel_locked() kernel_locked()
#define ll_path_lookup path_lookup
#define ll_permission(inode,mask,nd) permission(inode,mask,nd)
-#define ll_pgcache_lock(mapping) spin_lock(&mapping->page_lock)
-#define ll_pgcache_unlock(mapping) spin_unlock(&mapping->page_lock)
+#define ll_pgcache_lock(mapping) cfs_spin_lock(&mapping->page_lock)
+#define ll_pgcache_unlock(mapping) cfs_spin_unlock(&mapping->page_lock)
#define ll_call_writepage(inode, page) \
(inode)->i_mapping->a_ops->writepage(page, NULL)
#define ll_invalidate_inode_pages(inode) \
#include <linux/writeback.h>
-static inline int cleanup_group_info(void)
+static inline int cfs_cleanup_group_info(void)
{
struct group_info *ginfo;
int rc = 1;
ll_pgcache_lock(mapping);
- if (list_empty(&mapping->dirty_pages) &&
- list_empty(&mapping->clean_pages) &&
- list_empty(&mapping->locked_pages)) {
+ if (cfs_list_empty(&mapping->dirty_pages) &&
+ cfs_list_empty(&mapping->clean_pages) &&
+ cfs_list_empty(&mapping->locked_pages)) {
rc = 0;
}
ll_pgcache_unlock(mapping);
#define ll_set_dflags(dentry, flags) do { dentry->d_vfs_flags |= flags; } while(0)
#else
#define ll_set_dflags(dentry, flags) do { \
- spin_lock(&dentry->d_lock); \
+ cfs_spin_lock(&dentry->d_lock); \
dentry->d_flags |= flags; \
- spin_unlock(&dentry->d_lock); \
+ cfs_spin_unlock(&dentry->d_lock); \
} while(0)
#endif
if (!type)
return ERR_PTR(-ENODEV);
mnt = vfs_kern_mount(type, flags, name, data);
- module_put(type->owner);
+ cfs_module_put(type->owner);
return mnt;
}
#else
#define TREE_READ_LOCK_IRQ(mapping) read_lock_irq(&(mapping)->tree_lock)
#define TREE_READ_UNLOCK_IRQ(mapping) read_unlock_irq(&(mapping)->tree_lock)
#else
-#define TREE_READ_LOCK_IRQ(mapping) spin_lock_irq(&(mapping)->tree_lock)
-#define TREE_READ_UNLOCK_IRQ(mapping) spin_unlock_irq(&(mapping)->tree_lock)
+#define TREE_READ_LOCK_IRQ(mapping) cfs_spin_lock_irq(&(mapping)->tree_lock)
+#define TREE_READ_UNLOCK_IRQ(mapping) cfs_spin_unlock_irq(&(mapping)->tree_lock)
#endif
#ifdef HAVE_UNREGISTER_BLKDEV_RETURN_INT
vfs_rename(old,old_dir,new,new_dir)
#endif /* HAVE_SECURITY_PLUG */
-#ifndef for_each_possible_cpu
-#define for_each_possible_cpu(i) for_each_cpu(i)
+#ifdef for_each_possible_cpu
+#define cfs_for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
+#elif defined(for_each_cpu)
+#define cfs_for_each_possible_cpu(cpu) for_each_cpu(cpu)
#endif
#ifndef cpu_to_node
#endif
#ifdef HAVE_REGISTER_SHRINKER
-typedef int (*shrinker_t)(int nr_to_scan, gfp_t gfp_mask);
+typedef int (*cfs_shrinker_t)(int nr_to_scan, gfp_t gfp_mask);
static inline
-struct shrinker *set_shrinker(int seek, shrinker_t func)
+struct shrinker *cfs_set_shrinker(int seek, cfs_shrinker_t func)
{
struct shrinker *s;
}
static inline
-void remove_shrinker(struct shrinker *shrinker)
+void cfs_remove_shrinker(struct shrinker *shrinker)
{
if (shrinker == NULL)
return;
}
#endif /* HAVE_REGISTER_SHRINKER */
-/* Using kernel fls(). Userspace will use one defined in user-bitops.h. */
-#ifndef __fls
-#define __fls fls
-#endif
-
#ifdef HAVE_INVALIDATE_INODE_PAGES
#define invalidate_mapping_pages(mapping,s,e) invalidate_inode_pages(mapping)
#endif
#endif
#ifndef SLAB_DESTROY_BY_RCU
-#define SLAB_DESTROY_BY_RCU 0
+#define CFS_SLAB_DESTROY_BY_RCU 0
+#else
+#define CFS_SLAB_DESTROY_BY_RCU SLAB_DESTROY_BY_RCU
#endif
#ifdef HAVE_SB_HAS_QUOTA_ACTIVE
struct lustre_dquot;
struct fsfilt_operations {
- struct list_head fs_list;
- struct module *fs_owner;
+ cfs_list_t fs_list;
+ cfs_module_t *fs_owner;
char *fs_type;
char *(* fs_getlabel)(struct super_block *sb);
int (* fs_setlabel)(struct super_block *sb, char *label);
int (* fs_map_inode_pages)(struct inode *inode, struct page **page,
int pages, unsigned long *blocks,
int *created, int create,
- struct semaphore *sem);
+ cfs_semaphore_t *sem);
int (* fs_write_record)(struct file *, void *, int size, loff_t *,
int force_sync);
int (* fs_read_record)(struct file *, void *, int size, loff_t *);
int (* fs_quotainfo)(struct lustre_quota_info *lqi, int type,
int cmd);
int (* fs_qids)(struct file *file, struct inode *inode, int type,
- struct list_head *list);
+ cfs_list_t *list);
int (* fs_get_mblk)(struct super_block *sb, int *count,
struct inode *inode, int frags);
int (* fs_dquot)(struct lustre_dquot *dquot, int cmd);
#define FSFILT_OP_UNLINK_PARTIAL_PARENT 22
#define FSFILT_OP_CREATE_PARTIAL_CHILD 23
-#define __fsfilt_check_slow(obd, start, msg) \
-do { \
- if (time_before(jiffies, start + 15 * HZ)) \
- break; \
- else if (time_before(jiffies, start + 30 * HZ)) \
- CDEBUG(D_VFSTRACE, "%s: slow %s %lus\n", obd->obd_name, \
- msg, (jiffies-start) / HZ); \
- else if (time_before(jiffies, start + DISK_TIMEOUT * HZ)) \
- CWARN("%s: slow %s %lus\n", obd->obd_name, msg, \
- (jiffies - start) / HZ); \
- else \
- CERROR("%s: slow %s %lus\n", obd->obd_name, msg, \
- (jiffies - start) / HZ); \
+#define __fsfilt_check_slow(obd, start, msg) \
+do { \
+ if (cfs_time_before(jiffies, start + 15 * CFS_HZ)) \
+ break; \
+ else if (cfs_time_before(jiffies, start + 30 * CFS_HZ)) \
+ CDEBUG(D_VFSTRACE, "%s: slow %s %lus\n", obd->obd_name, \
+ msg, (jiffies-start) / CFS_HZ); \
+ else if (cfs_time_before(jiffies, start + DISK_TIMEOUT * CFS_HZ)) \
+ CWARN("%s: slow %s %lus\n", obd->obd_name, msg, \
+ (jiffies - start) / CFS_HZ); \
+ else \
+ CERROR("%s: slow %s %lus\n", obd->obd_name, msg, \
+ (jiffies - start) / CFS_HZ); \
} while (0)
#define fsfilt_check_slow(obd, start, msg) \
static inline int fsfilt_qids(struct obd_device *obd, struct file *file,
struct inode *inode, int type,
- struct list_head *list)
+ cfs_list_t *list)
{
if (obd->obd_fsops->fs_qids)
return obd->obd_fsops->fs_qids(file, inode, type, list);
struct inode *inode,
struct page **page, int pages,
unsigned long *blocks, int *created,
- int create, struct semaphore *sem)
+ int create, cfs_semaphore_t *sem)
{
return obd->obd_fsops->fs_map_inode_pages(inode, page, pages, blocks,
created, create, sem);
# ifdef HAVE_RCU
# include <linux/rcupdate.h> /* for rcu_head{} */
+typedef struct rcu_head cfs_rcu_head_t;
# else
-struct rcu_head { };
+typedef struct cfs_rcu_head { } cfs_rcu_head_t;
# endif
#endif /* ifdef __KERNEL__ */
#define lcounter_destroy(counter) percpu_counter_destroy(counter)
#else
-typedef struct { atomic_t count; } lcounter_t;
+typedef struct { cfs_atomic_t count; } lcounter_t;
-#define lcounter_read(counter) atomic_read(&counter->count)
-#define lcounter_inc(counter) atomic_inc(&counter->count)
-#define lcounter_dec(counter) atomic_dec(&counter->count)
-#define lcounter_init(counter) atomic_set(&counter->count, 0)
+#define lcounter_read(counter) cfs_atomic_read(&counter->count)
+#define lcounter_inc(counter) cfs_atomic_inc(&counter->count)
+#define lcounter_dec(counter) cfs_atomic_dec(&counter->count)
+#define lcounter_init(counter) cfs_atomic_set(&counter->count, 0)
#define lcounter_destroy(counter)
#endif /* if defined HAVE_PERCPU_COUNTER */
#ifdef HAVE_RW_TREE_LOCK
write_lock_irq(&mapping->tree_lock);
#else
- spin_lock_irq(&mapping->tree_lock);
+ cfs_spin_lock_irq(&mapping->tree_lock);
#endif
radix_tree_delete(&mapping->page_tree, page->index);
page->mapping = NULL;
mapping->nrpages--;
#ifdef HAVE_NR_PAGECACHE
- atomic_add(-1, &nr_pagecache); // XXX pagecache_acct(-1);
+ cfs_atomic_add(-1, &nr_pagecache); // XXX pagecache_acct(-1);
#else
__dec_zone_page_state(page, NR_FILE_PAGES);
#endif
#ifdef HAVE_RW_TREE_LOCK
write_unlock_irq(&mapping->tree_lock);
#else
- spin_unlock_irq(&mapping->tree_lock);
+ cfs_spin_unlock_irq(&mapping->tree_lock);
#endif
}
#include <linux/lustre_compat25.h>
#include <linux/lvfs_linux.h>
#else
-struct group_info { /* unused */ };
#include <liblustre.h>
#endif
int lustre_fread(struct file *file, void *buf, int len, loff_t *off);
int lustre_fwrite(struct file *file, const void *buf, int len, loff_t *off);
int lustre_fsync(struct file *file);
-long l_readdir(struct file * file, struct list_head *dentry_list);
+long l_readdir(struct file * file, cfs_list_t *dentry_list);
int l_notify_change(struct vfsmount *mnt, struct dentry *dchild,
struct iattr *newattrs);
int simple_truncate(struct dentry *dir, struct vfsmount *mnt,
if (!de || IS_ERR(de))
return;
//shrink_dcache_parent(de);
- LASSERT(atomic_read(&de->d_count) > 0);
+ LASSERT(cfs_atomic_read(&de->d_count) > 0);
dput(de);
}
static inline void ll_sleep(int t)
{
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(t * HZ);
- set_current_state(TASK_RUNNING);
+ cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+ cfs_schedule_timeout(t * CFS_HZ);
+ cfs_set_current_state(CFS_TASK_RUNNING);
}
#endif
int flags);
struct l_linux_dirent {
- struct list_head lld_list;
+ cfs_list_t lld_list;
ino_t lld_ino;
unsigned long lld_off;
char lld_name[LL_FID_NAMELEN];
};
struct l_readdir_callback {
struct l_linux_dirent *lrc_dirent;
- struct list_head *lrc_list;
+ cfs_list_t *lrc_list;
};
#define LVFS_DENTRY_PARAM_MAGIC 20070216UL
#endif
typedef struct {
- spinlock_t lock;
+ cfs_spinlock_t lock;
#ifdef CLIENT_OBD_LIST_LOCK_DEBUG
unsigned long time;
{
unsigned long cur = jiffies;
while (1) {
- if (spin_trylock(&lock->lock)) {
+ if (cfs_spin_trylock(&lock->lock)) {
LASSERT(lock->task == NULL);
lock->task = current;
lock->func = func;
break;
}
- if ((jiffies - cur > 5 * HZ) &&
- (jiffies - lock->time > 5 * HZ)) {
+ if ((jiffies - cur > 5 * CFS_HZ) &&
+ (jiffies - lock->time > 5 * CFS_HZ)) {
LCONSOLE_WARN("LOCK UP! the lock %p was acquired"
" by <%s:%d:%s:%d> %lu time, I'm %s:%d\n",
lock, lock->task->comm, lock->task->pid,
LCONSOLE_WARN("====== for current process =====\n");
libcfs_debug_dumpstack(NULL);
LCONSOLE_WARN("====== end =======\n");
- cfs_pause(1000* HZ);
+ cfs_pause(1000 * CFS_HZ);
}
}
}
LASSERT(lock->task != NULL);
lock->task = NULL;
lock->time = jiffies;
- spin_unlock(&lock->lock);
+ cfs_spin_unlock(&lock->lock);
}
#else /* ifdef CLIENT_OBD_LIST_LOCK_DEBUG */
static inline void client_obd_list_lock(client_obd_lock_t *lock)
{
- spin_lock(&lock->lock);
+ cfs_spin_lock(&lock->lock);
}
static inline void client_obd_list_unlock(client_obd_lock_t *lock)
{
- spin_unlock(&lock->lock);
+ cfs_spin_unlock(&lock->lock);
}
#endif /* ifdef CLIENT_OBD_LIST_LOCK_DEBUG */
static inline void client_obd_list_lock_init(client_obd_lock_t *lock)
{
- spin_lock_init(&lock->lock);
+ cfs_spin_lock_init(&lock->lock);
}
static inline void client_obd_list_lock_done(client_obd_lock_t *lock)
/* if we find more consumers this could be generalized */
#define OBD_HIST_MAX 32
struct obd_histogram {
- spinlock_t oh_lock;
+ cfs_spinlock_t oh_lock;
unsigned long oh_buckets[OBD_HIST_MAX];
};
};
struct lprocfs_atomic {
- atomic_t la_entry;
- atomic_t la_exit;
+ cfs_atomic_t la_entry;
+ cfs_atomic_t la_exit;
};
#define LC_MIN_INIT ((~(__u64)0) >> 1)
struct lprocfs_stats {
unsigned int ls_num; /* # of counters */
int ls_flags; /* See LPROCFS_STATS_FLAG_* */
- spinlock_t ls_lock; /* Lock used only when there are
+ cfs_spinlock_t ls_lock; /* Lock used only when there are
* no percpu stats areas */
struct lprocfs_percpu *ls_percpu[0];
};
rc = 1;
if (type & LPROCFS_GET_SMP_ID)
rc = 0;
- spin_lock(&stats->ls_lock);
+ cfs_spin_lock(&stats->ls_lock);
} else {
if (type & LPROCFS_GET_NUM_CPU)
- rc = num_possible_cpus();
+ rc = cfs_num_possible_cpus();
if (type & LPROCFS_GET_SMP_ID) {
stats->ls_flags |= LPROCFS_STATS_GET_SMP_ID;
rc = cfs_get_cpu();
static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats)
{
if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU)
- spin_unlock(&stats->ls_lock);
+ cfs_spin_unlock(&stats->ls_lock);
else if (stats->ls_flags & LPROCFS_STATS_GET_SMP_ID)
cfs_put_cpu();
}
int i;
LASSERT(stats != NULL);
- for (i = 0; i < num_possible_cpus(); i++)
+ for (i = 0; i < cfs_num_possible_cpus(); i++)
ret += lprocfs_read_helper(&(stats->ls_percpu[i]->lp_cntr[idx]),
field);
return ret;
extern int lprocfs_seq_release(struct inode *, struct file *);
/* in lprocfs_stat.c, to protect the private data for proc entries */
-extern struct rw_semaphore _lprocfs_lock;
+extern cfs_rw_semaphore_t _lprocfs_lock;
#define LPROCFS_ENTRY() do { \
- down_read(&_lprocfs_lock); \
+ cfs_down_read(&_lprocfs_lock); \
} while(0)
#define LPROCFS_EXIT() do { \
- up_read(&_lprocfs_lock); \
+ cfs_up_read(&_lprocfs_lock); \
} while(0)
#ifdef HAVE_PROCFS_DELETED
#endif
#define LPROCFS_WRITE_ENTRY() do { \
- down_write(&_lprocfs_lock); \
+ cfs_down_write(&_lprocfs_lock); \
} while(0)
#define LPROCFS_WRITE_EXIT() do { \
- up_write(&_lprocfs_lock); \
+ cfs_up_write(&_lprocfs_lock); \
} while(0)
* the import in a client obd_device for a lprocfs entry */
#define LPROCFS_CLIMP_CHECK(obd) do { \
typecheck(struct obd_device *, obd); \
- down_read(&(obd)->u.cli.cl_sem); \
+ cfs_down_read(&(obd)->u.cli.cl_sem); \
if ((obd)->u.cli.cl_import == NULL) { \
- up_read(&(obd)->u.cli.cl_sem); \
+ cfs_up_read(&(obd)->u.cli.cl_sem); \
return -ENODEV; \
} \
} while(0)
#define LPROCFS_CLIMP_EXIT(obd) \
- up_read(&(obd)->u.cli.cl_sem);
+ cfs_up_read(&(obd)->u.cli.cl_sem);
/* write the name##_seq_show function, call LPROC_SEQ_FOPS_RO for read-only
*
* \todo XXX which means that atomic_t is probably too small.
*/
- atomic_t ld_ref;
+ cfs_atomic_t ld_ref;
/**
* Pointer to device type. Never modified once set.
*/
*
* \see lu_device_types.
*/
- struct list_head ldt_linkage;
+ cfs_list_t ldt_linkage;
};
/**
/**
* Linkage into list of all layers.
*/
- struct list_head lo_linkage;
+ cfs_list_t lo_linkage;
/**
* Depth. Top level layer depth is 0.
*/
* Object flags from enum lu_object_header_flags. Set and checked
* atomically.
*/
- unsigned long loh_flags;
+ unsigned long loh_flags;
/**
* Object reference count. Protected by lu_site::ls_guard.
*/
- atomic_t loh_ref;
+ cfs_atomic_t loh_ref;
/**
* Fid, uniquely identifying this object.
*/
- struct lu_fid loh_fid;
+ struct lu_fid loh_fid;
/**
* Common object attributes, cached for efficiency. From enum
* lu_object_header_attr.
*/
- __u32 loh_attr;
+ __u32 loh_attr;
/**
* Linkage into per-site hash table. Protected by lu_site::ls_guard.
*/
- struct hlist_node loh_hash;
+ cfs_hlist_node_t loh_hash;
/**
* Linkage into per-site LRU list. Protected by lu_site::ls_guard.
*/
- struct list_head loh_lru;
+ cfs_list_t loh_lru;
/**
* Linkage into list of layers. Never modified once set (except lately
* during object destruction). No locking is necessary.
*/
- struct list_head loh_layers;
+ cfs_list_t loh_layers;
/**
* A list of references to this object, for debugging.
*/
- struct lu_ref loh_reference;
+ struct lu_ref loh_reference;
};
struct fld;
*
* yes, it's heavy.
*/
- rwlock_t ls_guard;
+ cfs_rwlock_t ls_guard;
/**
* Hash-table where objects are indexed by fid.
*/
- struct hlist_head *ls_hash;
+ cfs_hlist_head_t *ls_hash;
/**
* Bit-mask for hash-table size.
*/
- int ls_hash_mask;
+ int ls_hash_mask;
/**
* Order of hash-table.
*/
- int ls_hash_bits;
+ int ls_hash_bits;
/**
* Number of buckets in the hash-table.
*/
- int ls_hash_size;
+ int ls_hash_size;
/**
* LRU list, updated on each access to object. Protected by
* moved to the lu_site::ls_lru.prev (this is due to the non-existence
* of list_for_each_entry_safe_reverse()).
*/
- struct list_head ls_lru;
+ cfs_list_t ls_lru;
/**
* Total number of objects in this site. Protected by
* lu_site::ls_guard.
*/
- unsigned ls_total;
+ unsigned ls_total;
/**
* Total number of objects in this site with reference counter greater
* than 0. Protected by lu_site::ls_guard.
*/
- unsigned ls_busy;
+ unsigned ls_busy;
/**
* Top-level device for this stack.
*/
- struct lu_device *ls_top_dev;
+ struct lu_device *ls_top_dev;
/**
* Wait-queue signaled when an object in this site is ultimately
*
* \see htable_lookup().
*/
- cfs_waitq_t ls_marche_funebre;
+ cfs_waitq_t ls_marche_funebre;
/** statistical counters. Protected by nothing, races are accepted. */
struct {
/**
* Linkage into global list of sites.
*/
- struct list_head ls_linkage;
- struct lprocfs_stats *ls_time_stats;
+ cfs_list_t ls_linkage;
+ struct lprocfs_stats *ls_time_stats;
};
/** \name ctors
*/
static inline void lu_object_get(struct lu_object *o)
{
- LASSERT(atomic_read(&o->lo_header->loh_ref) > 0);
- atomic_inc(&o->lo_header->loh_ref);
+ LASSERT(cfs_atomic_read(&o->lo_header->loh_ref) > 0);
+ cfs_atomic_inc(&o->lo_header->loh_ref);
}
/**
*/
static inline int lu_object_is_dying(const struct lu_object_header *h)
{
- return test_bit(LU_OBJECT_HEARD_BANSHEE, &h->loh_flags);
+ return cfs_test_bit(LU_OBJECT_HEARD_BANSHEE, &h->loh_flags);
}
void lu_object_put(const struct lu_env *env, struct lu_object *o);
*/
static inline struct lu_object *lu_object_top(struct lu_object_header *h)
{
- LASSERT(!list_empty(&h->loh_layers));
+ LASSERT(!cfs_list_empty(&h->loh_layers));
return container_of0(h->loh_layers.next, struct lu_object, lo_linkage);
}
do { \
static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask); \
\
- if (cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
lu_object_print(env, &__info, lu_cdebug_printer, object); \
CDEBUG(mask, format , ## __VA_ARGS__); \
} \
do { \
static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask); \
\
- if (cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
lu_object_header_print(env, &__info, lu_cdebug_printer, \
(object)->lo_header); \
lu_cdebug_printer(env, &__info, "\n"); \
* `non-transient' contexts, i.e., ones created for service threads
* are placed here.
*/
- struct list_head lc_remember;
+ cfs_list_t lc_remember;
/**
* Version counter used to skip calls to lu_context_refill() when no
* keys were registered.
* Internal implementation detail: number of values created for this
* key.
*/
- atomic_t lct_used;
+ cfs_atomic_t lct_used;
/**
* Internal implementation detail: module for this key.
*/
- struct module *lct_owner;
+ cfs_module_t *lct_owner;
/**
* References to this key. For debugging.
*/
/**
* Spin-lock protecting lu_ref::lf_list.
*/
- spinlock_t lf_guard;
+ cfs_spinlock_t lf_guard;
/**
* List of all outstanding references (each represented by struct
* lu_ref_link), pointing to this object.
*/
- struct list_head lf_list;
+ cfs_list_t lf_list;
/**
* # of links.
*/
- short lf_refs;
+ short lf_refs;
/**
* Flag set when lu_ref_add() failed to allocate lu_ref_link. It is
* used to mask spurious failure of the following lu_ref_del().
*/
- short lf_failed;
+ short lf_failed;
/**
* flags - attribute for the lu_ref, for pad and future use.
*/
- short lf_flags;
+ short lf_flags;
/**
* Where was I initialized?
*/
- short lf_line;
- const char *lf_func;
+ short lf_line;
+ const char *lf_func;
/**
* Linkage into a global list of all lu_ref's (lu_ref_refs).
*/
- struct list_head lf_linkage;
+ cfs_list_t lf_linkage;
};
void lu_ref_init_loc(struct lu_ref *ref, const char *func, const int line);
/** Server last transaction number */
__u64 lut_last_transno;
/** Lock protecting last transaction number */
- spinlock_t lut_translock;
+ cfs_spinlock_t lut_translock;
/** Lock protecting client bitmap */
- spinlock_t lut_client_bitmap_lock;
+ cfs_spinlock_t lut_client_bitmap_lock;
/** Bitmap of known clients */
unsigned long lut_client_bitmap[LR_CLIENT_BITMAP_SIZE];
/** Number of mounts */
__u64 lut_mount_count;
__u32 lut_stale_export_age;
- spinlock_t lut_trans_table_lock;
+ cfs_spinlock_t lut_trans_table_lock;
};
typedef void (*lut_cb_t)(struct lu_target *lut, __u64 transno,
static __inline__ struct hsm_action_item * hai_zero(struct hsm_action_list *hal)
{
return (struct hsm_action_item *)(hal->hal_fsname +
- size_round(strlen(hal->hal_fsname)));
+ cfs_size_round(strlen(hal-> \
+ hal_fsname)));
}
/* Return pointer to next hai */
static __inline__ struct hsm_action_item * hai_next(struct hsm_action_item *hai)
{
return (struct hsm_action_item *)((char *)hai +
- size_round(hai->hai_len));
+ cfs_size_round(hai->hai_len));
}
}
struct client_capa {
- struct inode *inode;
- struct list_head lli_list; /* link to lli_oss_capas */
+ struct inode *inode;
+ cfs_list_t lli_list; /* link to lli_oss_capas */
};
struct target_capa {
- struct hlist_node c_hash; /* link to capa hash */
+ cfs_hlist_node_t c_hash; /* link to capa hash */
};
struct obd_capa {
- struct list_head c_list; /* link to capa_list */
+ cfs_list_t c_list; /* link to capa_list */
struct lustre_capa c_capa; /* capa */
- atomic_t c_refc; /* ref count */
+ cfs_atomic_t c_refc; /* ref count */
cfs_time_t c_expiry; /* jiffies */
- spinlock_t c_lock; /* protect capa content */
+ cfs_spinlock_t c_lock; /* protect capa content */
int c_site;
union {
typedef int (* renew_capa_cb_t)(struct obd_capa *, struct lustre_capa *);
/* obdclass/capa.c */
-extern struct list_head capa_list[];
-extern spinlock_t capa_lock;
+extern cfs_list_t capa_list[];
+extern cfs_spinlock_t capa_lock;
extern int capa_count[];
extern cfs_mem_cache_t *capa_cachep;
-struct hlist_head *init_capa_hash(void);
-void cleanup_capa_hash(struct hlist_head *hash);
+cfs_hlist_head_t *init_capa_hash(void);
+void cleanup_capa_hash(cfs_hlist_head_t *hash);
-struct obd_capa *capa_add(struct hlist_head *hash, struct lustre_capa *capa);
-struct obd_capa *capa_lookup(struct hlist_head *hash, struct lustre_capa *capa,
- int alive);
+struct obd_capa *capa_add(cfs_hlist_head_t *hash,
+ struct lustre_capa *capa);
+struct obd_capa *capa_lookup(cfs_hlist_head_t *hash,
+ struct lustre_capa *capa, int alive);
int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key);
int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen);
return ERR_PTR(-ENOMEM);
CFS_INIT_LIST_HEAD(&ocapa->c_list);
- atomic_set(&ocapa->c_refc, 1);
- spin_lock_init(&ocapa->c_lock);
+ cfs_atomic_set(&ocapa->c_refc, 1);
+ cfs_spin_lock_init(&ocapa->c_lock);
ocapa->c_site = site;
if (ocapa->c_site == CAPA_SITE_CLIENT)
CFS_INIT_LIST_HEAD(&ocapa->u.cli.lli_list);
if (!ocapa)
return NULL;
- atomic_inc(&ocapa->c_refc);
+ cfs_atomic_inc(&ocapa->c_refc);
return ocapa;
}
if (!ocapa)
return;
- if (atomic_read(&ocapa->c_refc) == 0) {
+ if (cfs_atomic_read(&ocapa->c_refc) == 0) {
DEBUG_CAPA(D_ERROR, &ocapa->c_capa, "refc is 0 for");
LBUG();
}
- if (atomic_dec_and_test(&ocapa->c_refc)) {
- LASSERT(list_empty(&ocapa->c_list));
+ if (cfs_atomic_dec_and_test(&ocapa->c_refc)) {
+ LASSERT(cfs_list_empty(&ocapa->c_list));
if (ocapa->c_site == CAPA_SITE_CLIENT) {
- LASSERT(list_empty(&ocapa->u.cli.lli_list));
+ LASSERT(cfs_list_empty(&ocapa->u.cli.lli_list));
} else {
- struct hlist_node *hnode;
+ cfs_hlist_node_t *hnode;
hnode = &ocapa->u.tgt.c_hash;
LASSERT(!hnode->next && !hnode->pprev);
}
struct filter_capa_key {
- struct list_head k_list;
+ cfs_list_t k_list;
struct lustre_capa_key k_key;
};
#define LUSTRE_CFG_MAX_BUFCOUNT 8
#define LCFG_HDR_SIZE(count) \
- size_round(offsetof (struct lustre_cfg, lcfg_buflens[(count)]))
+ cfs_size_round(offsetof (struct lustre_cfg, lcfg_buflens[(count)]))
/* If the LCFG_REQUIRED bit is set in a configuration command,
* then the client is required to understand this parameter
offset = LCFG_HDR_SIZE(lcfg->lcfg_bufcount);
for (i = 0; i < index; i++)
- offset += size_round(lcfg->lcfg_buflens[i]);
+ offset += cfs_size_round(lcfg->lcfg_buflens[i]);
return (char *)lcfg + offset;
}
*/
if (s[lcfg->lcfg_buflens[index] - 1] != '\0') {
int last = min((int)lcfg->lcfg_buflens[index],
- size_round(lcfg->lcfg_buflens[index]) - 1);
+ cfs_size_round(lcfg->lcfg_buflens[index]) - 1);
char lost = s[last];
s[last] = '\0';
if (lost != '\0') {
len = LCFG_HDR_SIZE(bufcount);
for (i = 0; i < bufcount; i++)
- len += size_round(buflens[i]);
+ len += cfs_size_round(buflens[i]);
- RETURN(size_round(len));
+ RETURN(cfs_size_round(len));
}
struct lustre_disk_data *lsi_ldd; /* mount info on-disk */
struct ll_sb_info *lsi_llsbi; /* add'l client sbi info */
struct vfsmount *lsi_srv_mnt; /* the one server mount */
- atomic_t lsi_mounts; /* references to the srv_mnt */
+ cfs_atomic_t lsi_mounts; /* references to the srv_mnt */
};
#define LSI_SERVER 0x00000001
/****************** mount lookup info *********************/
struct lustre_mount_info {
- char *lmi_name;
- struct super_block *lmi_sb;
- struct vfsmount *lmi_mnt;
- struct list_head lmi_list_chain;
+ char *lmi_name;
+ struct super_block *lmi_sb;
+ struct vfsmount *lmi_mnt;
+ cfs_list_t lmi_list_chain;
};
/****************** prototypes *********************/
/* 1.5 times the maximum 128 tasks available in VN mode */
#define LDLM_DEFAULT_LRU_SIZE 196
#else
-#define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus())
+#define LDLM_DEFAULT_LRU_SIZE (100 * cfs_num_online_cpus())
#endif
#define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000))
#define LDLM_CTIME_AGE_LIMIT (10)
/**
* Lock for protecting slv/clv updates.
*/
- spinlock_t pl_lock;
+ cfs_spinlock_t pl_lock;
/**
* Number of allowed locks in in pool, both, client and server side.
*/
- atomic_t pl_limit;
+ cfs_atomic_t pl_limit;
/**
* Number of granted locks in
*/
- atomic_t pl_granted;
+ cfs_atomic_t pl_granted;
/**
* Grant rate per T.
*/
- atomic_t pl_grant_rate;
+ cfs_atomic_t pl_grant_rate;
/**
* Cancel rate per T.
*/
- atomic_t pl_cancel_rate;
+ cfs_atomic_t pl_cancel_rate;
/**
* Grant speed (GR-CR) per T.
*/
- atomic_t pl_grant_speed;
+ cfs_atomic_t pl_grant_speed;
/**
* Server lock volume. Protected by pl_lock.
*/
* Lock volume factor. SLV on client is calculated as following:
* server_slv * lock_volume_factor.
*/
- atomic_t pl_lock_volume_factor;
+ cfs_atomic_t pl_lock_volume_factor;
/**
* Time when last slv from server was obtained.
*/
/**
* Hash table for namespace.
*/
- struct list_head *ns_hash;
- spinlock_t ns_hash_lock;
+ cfs_list_t *ns_hash;
+ cfs_spinlock_t ns_hash_lock;
/**
* Count of resources in the hash.
/**
* All root resources in namespace.
*/
- struct list_head ns_root_list;
+ cfs_list_t ns_root_list;
/**
* Position in global namespace list.
*/
- struct list_head ns_list_chain;
+ cfs_list_t ns_list_chain;
/**
* All root resources in namespace.
*/
- struct list_head ns_unused_list;
+ cfs_list_t ns_unused_list;
int ns_nr_unused;
- spinlock_t ns_unused_lock;
+ cfs_spinlock_t ns_unused_lock;
unsigned int ns_max_unused;
unsigned int ns_max_age;
*/
cfs_time_t ns_next_dump;
- atomic_t ns_locks;
+ cfs_atomic_t ns_locks;
__u64 ns_resources;
ldlm_res_policy ns_policy;
struct ldlm_valblock_ops *ns_lvbo;
/* Interval node data for each LDLM_EXTENT lock */
struct ldlm_interval {
struct interval_node li_node; /* node for tree mgmt */
- struct list_head li_group; /* the locks which have the same
+ cfs_list_t li_group; /* the locks which have the same
* policy - group of the policy */
};
#define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node)
/**
* Lock reference count.
*/
- atomic_t l_refc;
+ cfs_atomic_t l_refc;
/**
* Internal spinlock protects l_resource. we should hold this lock
* first before grabbing res_lock.
*/
- spinlock_t l_lock;
+ cfs_spinlock_t l_lock;
/**
* ldlm_lock_change_resource() can change this.
*/
/**
* Protected by ns_hash_lock. List item for client side lru list.
*/
- struct list_head l_lru;
+ cfs_list_t l_lru;
/**
* Protected by lr_lock, linkage to resource's lock queues.
*/
- struct list_head l_res_link;
+ cfs_list_t l_res_link;
/**
* Tree node for ldlm_extent.
*/
* Protected by per-bucket exp->exp_lock_hash locks. Per export hash
* of locks.
*/
- struct hlist_node l_exp_hash;
+ cfs_hlist_node_t l_exp_hash;
/**
* Protected by lr_lock. Requested mode.
*/
void *l_lvb_data;
void *l_ast_data;
- spinlock_t l_extents_list_lock;
- struct list_head l_extents_list;
+ cfs_spinlock_t l_extents_list_lock;
+ cfs_list_t l_extents_list;
- struct list_head l_cache_locks_list;
+ cfs_list_t l_cache_locks_list;
/*
* Server-side-only members.
/**
* Protected by elt_lock. Callbacks pending.
*/
- struct list_head l_pending_chain;
+ cfs_list_t l_pending_chain;
cfs_time_t l_callback_timeout;
/**
* For ldlm_add_ast_work_item().
*/
- struct list_head l_bl_ast;
+ cfs_list_t l_bl_ast;
/**
* For ldlm_add_ast_work_item().
*/
- struct list_head l_cp_ast;
+ cfs_list_t l_cp_ast;
/**
* For ldlm_add_ast_work_item().
*/
- struct list_head l_rk_ast;
+ cfs_list_t l_rk_ast;
struct ldlm_lock *l_blocking_lock;
int l_bl_ast_run;
/**
* Protected by lr_lock, linkages to "skip lists".
*/
- struct list_head l_sl_mode;
- struct list_head l_sl_policy;
+ cfs_list_t l_sl_mode;
+ cfs_list_t l_sl_policy;
struct lu_ref l_reference;
#if LUSTRE_TRACKS_LOCK_EXP_REFS
/* Debugging stuff for bug 20498, for tracking export
/** number of export references taken */
int l_exp_refs_nr;
/** link all locks referencing one export */
- struct list_head l_exp_refs_link;
+ cfs_list_t l_exp_refs_link;
/** referenced export object */
struct obd_export *l_exp_refs_target;
#endif
struct ldlm_namespace *lr_namespace;
/* protected by ns_hash_lock */
- struct list_head lr_hash;
+ cfs_list_t lr_hash;
struct ldlm_resource *lr_parent; /* 0 for a root resource */
- struct list_head lr_children; /* list head for child resources */
- struct list_head lr_childof; /* part of ns_root_list if root res,
+ cfs_list_t lr_children; /* list head for child resources */
+ cfs_list_t lr_childof; /* part of ns_root_list if root res,
* part of lr_children if child */
- spinlock_t lr_lock;
+ cfs_spinlock_t lr_lock;
/* protected by lr_lock */
- struct list_head lr_granted;
- struct list_head lr_converting;
- struct list_head lr_waiting;
+ cfs_list_t lr_granted;
+ cfs_list_t lr_converting;
+ cfs_list_t lr_waiting;
ldlm_mode_t lr_most_restr;
ldlm_type_t lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK} */
struct ldlm_res_id lr_name;
- atomic_t lr_refcount;
+ cfs_atomic_t lr_refcount;
struct ldlm_interval_tree lr_itree[LCK_MODE_NUM]; /* interval trees*/
/* Server-side-only lock value block elements */
- struct semaphore lr_lvb_sem;
+ cfs_semaphore_t lr_lvb_sem;
__u32 lr_lvb_len;
void *lr_lvb_data;
};
struct ldlm_ast_work {
- struct ldlm_lock *w_lock;
- int w_blocking;
- struct ldlm_lock_desc w_desc;
- struct list_head w_list;
- int w_flags;
- void *w_data;
- int w_datalen;
+ struct ldlm_lock *w_lock;
+ int w_blocking;
+ struct ldlm_lock_desc w_desc;
+ cfs_list_t w_list;
+ int w_flags;
+ void *w_data;
+ int w_datalen;
};
/* ldlm_enqueue parameters common */
extern char *ldlm_it2str(int it);
#ifdef LIBCFS_DEBUG
#define ldlm_lock_debug(cdls, level, lock, file, func, line, fmt, a...) do { \
- CHECK_STACK(); \
+ CFS_CHECK_STACK(); \
\
if (((level) & D_CANTMASK) != 0 || \
((libcfs_debug & (level)) != 0 && \
typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, int *flags,
int first_enq, ldlm_error_t *err,
- struct list_head *work_list);
+ cfs_list_t *work_list);
/*
* Iterators.
lock; \
})
-#define ldlm_lock_list_put(head, member, count) \
-({ \
- struct ldlm_lock *_lock, *_next; \
- int c = count; \
- list_for_each_entry_safe(_lock, _next, head, member) { \
- if (c-- == 0) \
- break; \
- list_del_init(&_lock->member); \
- LDLM_LOCK_RELEASE(_lock); \
- } \
- LASSERT(c <= 0); \
+#define ldlm_lock_list_put(head, member, count) \
+({ \
+ struct ldlm_lock *_lock, *_next; \
+ int c = count; \
+ cfs_list_for_each_entry_safe(_lock, _next, head, member) { \
+ if (c-- == 0) \
+ break; \
+ cfs_list_del_init(&_lock->member); \
+ LDLM_LOCK_RELEASE(_lock); \
+ } \
+ LASSERT(c <= 0); \
})
struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
ldlm_type_t type, int create);
struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res);
int ldlm_resource_putref(struct ldlm_resource *res);
-void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
+void ldlm_resource_add_lock(struct ldlm_resource *res,
+ cfs_list_t *head,
struct ldlm_lock *lock);
void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
int async);
int ldlm_prep_enqueue_req(struct obd_export *exp,
struct ptlrpc_request *req,
- struct list_head *cancels,
+ cfs_list_t *cancels,
int count);
int ldlm_prep_elc_req(struct obd_export *exp,
struct ptlrpc_request *req,
int version, int opc, int canceloff,
- struct list_head *cancels, int count);
+ cfs_list_t *cancels, int count);
int ldlm_handle_enqueue0(struct ldlm_namespace *ns, struct ptlrpc_request *req,
const struct ldlm_request *dlm_req,
const struct ldlm_callback_suite *cbs);
const struct ldlm_res_id *res_id,
ldlm_policy_data_t *policy,
ldlm_mode_t mode, int flags, void *opaque);
-int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *head,
+int ldlm_cli_cancel_req(struct obd_export *exp, cfs_list_t *head,
int count, int flags);
int ldlm_cancel_resource_local(struct ldlm_resource *res,
- struct list_head *cancels,
+ cfs_list_t *cancels,
ldlm_policy_data_t *policy,
ldlm_mode_t mode, int lock_flags,
int cancel_flags, void *opaque);
-int ldlm_cli_cancel_list(struct list_head *head, int count,
+int ldlm_cli_cancel_list(cfs_list_t *head, int count,
struct ptlrpc_request *req, int flags);
/* mds/handler.c */
static inline void lock_res(struct ldlm_resource *res)
{
- spin_lock(&res->lr_lock);
+ cfs_spin_lock(&res->lr_lock);
}
static inline void lock_res_nested(struct ldlm_resource *res,
enum lock_res_type mode)
{
- spin_lock_nested(&res->lr_lock, mode);
+ cfs_spin_lock_nested(&res->lr_lock, mode);
}
static inline void unlock_res(struct ldlm_resource *res)
{
- spin_unlock(&res->lr_lock);
+ cfs_spin_unlock(&res->lr_lock);
}
static inline void check_res_locked(struct ldlm_resource *res)
struct lu_export_data {
/** Protects led_lcd below */
- struct semaphore led_lcd_lock;
+ cfs_semaphore_t led_lcd_lock;
/** Per-client data for each export */
struct lsd_client_data *led_lcd;
/** Offset of record in last_rcvd file */
struct mdt_export_data {
struct lu_export_data med_led;
- struct list_head med_open_head;
- spinlock_t med_open_lock; /* lock med_open_head, mfd_list*/
+ cfs_list_t med_open_head;
+ cfs_spinlock_t med_open_lock; /* lock med_open_head, mfd_list*/
__u64 med_ibits_known;
- struct semaphore med_idmap_sem;
+ cfs_semaphore_t med_idmap_sem;
struct lustre_idmap_table *med_idmap;
};
#define med_lr_idx med_led.led_lr_idx
struct osc_creator {
- spinlock_t oscc_lock;
- struct list_head oscc_wait_create_list;
- struct obd_device *oscc_obd;
+ cfs_spinlock_t oscc_lock;
+ cfs_list_t oscc_wait_create_list;
+ struct obd_device *oscc_obd;
obd_id oscc_last_id;//last available pre-created object
obd_id oscc_next_id;// what object id to give out next
int oscc_grow_count;
};
struct ec_export_data { /* echo client */
- struct list_head eced_locks;
+ cfs_list_t eced_locks;
};
/* In-memory access to client data from OST struct */
struct filter_export_data {
struct lu_export_data fed_led;
- spinlock_t fed_lock; /**< protects fed_mod_list */
+ cfs_spinlock_t fed_lock; /**< protects fed_mod_list */
long fed_dirty; /* in bytes */
long fed_grant; /* in bytes */
- struct list_head fed_mod_list; /* files being modified */
+ cfs_list_t fed_mod_list; /* files being modified */
int fed_mod_count;/* items in fed_writing list */
long fed_pending; /* bytes just being written */
__u32 fed_group;
typedef struct nid_stat {
lnet_nid_t nid;
- struct hlist_node nid_hash;
- struct list_head nid_list;
+ cfs_hlist_node_t nid_hash;
+ cfs_list_t nid_list;
struct obd_device *nid_obd;
struct proc_dir_entry *nid_proc;
struct lprocfs_stats *nid_stats;
struct lprocfs_stats *nid_ldlm_stats;
struct brw_stats *nid_brw_stats;
- atomic_t nid_exp_ref_count; /* for obd_nid_stats_hash
+ cfs_atomic_t nid_exp_ref_count; /* for obd_nid_stats_hash
exp_nid_stats */
}nid_stat_t;
#define nidstat_getref(nidstat) \
do { \
- atomic_inc(&(nidstat)->nid_exp_ref_count); \
+ cfs_atomic_inc(&(nidstat)->nid_exp_ref_count); \
} while(0)
#define nidstat_putref(nidstat) \
do { \
- atomic_dec(&(nidstat)->nid_exp_ref_count); \
- LASSERTF(atomic_read(&(nidstat)->nid_exp_ref_count) >= 0, \
+ cfs_atomic_dec(&(nidstat)->nid_exp_ref_count); \
+ LASSERTF(cfs_atomic_read(&(nidstat)->nid_exp_ref_count) >= 0, \
"stat %p nid_exp_ref_count < 0\n", nidstat); \
} while(0)
struct obd_export {
struct portals_handle exp_handle;
- atomic_t exp_refcount;
+ cfs_atomic_t exp_refcount;
/**
* Set of counters below is to track where export references are
* kept. The exp_rpc_count is used for reconnect handling also,
* the cb_count and locks_count are for debug purposes only for now.
* The sum of them should be less than exp_refcount by 3
*/
- atomic_t exp_rpc_count; /** RPC references */
- atomic_t exp_cb_count; /** Commit callback references */
- atomic_t exp_locks_count; /** Lock references */
+ cfs_atomic_t exp_rpc_count; /* RPC references */
+ cfs_atomic_t exp_cb_count; /* Commit callback references */
+ cfs_atomic_t exp_locks_count; /** Lock references */
#if LUSTRE_TRACKS_LOCK_EXP_REFS
- struct list_head exp_locks_list;
- spinlock_t exp_locks_list_guard;
+ cfs_list_t exp_locks_list;
+ cfs_spinlock_t exp_locks_list_guard;
#endif
- atomic_t exp_replay_count;
+ cfs_atomic_t exp_replay_count;
struct obd_uuid exp_client_uuid;
- struct list_head exp_obd_chain;
- struct hlist_node exp_uuid_hash; /* uuid-export hash*/
- struct hlist_node exp_nid_hash; /* nid-export hash */
+ cfs_list_t exp_obd_chain;
+ cfs_hlist_node_t exp_uuid_hash; /* uuid-export hash*/
+ cfs_hlist_node_t exp_nid_hash; /* nid-export hash */
/* exp_obd_chain_timed fo ping evictor, protected by obd_dev_lock */
- struct list_head exp_obd_chain_timed;
+ cfs_list_t exp_obd_chain_timed;
struct obd_device *exp_obd;
struct obd_import *exp_imp_reverse; /* to make RPCs backwards */
struct nid_stat *exp_nid_stats;
struct ptlrpc_connection *exp_connection;
__u32 exp_conn_cnt;
cfs_hash_t *exp_lock_hash; /* existing lock hash */
- spinlock_t exp_lock_hash_lock;
- struct list_head exp_outstanding_replies;
- struct list_head exp_uncommitted_replies;
- spinlock_t exp_uncommitted_replies_lock;
+ cfs_spinlock_t exp_lock_hash_lock;
+ cfs_list_t exp_outstanding_replies;
+ cfs_list_t exp_uncommitted_replies;
+ cfs_spinlock_t exp_uncommitted_replies_lock;
__u64 exp_last_committed;
cfs_time_t exp_last_request_time;
- struct list_head exp_req_replay_queue;
- spinlock_t exp_lock; /* protects flags int below */
+ cfs_list_t exp_req_replay_queue;
+ cfs_spinlock_t exp_lock; /* protects flags int below */
/* ^ protects exp_outstanding_replies too */
__u64 exp_connect_flags;
enum obd_option exp_flags;
/* client timed out and tried to reconnect,
* but couldn't because of active rpcs */
exp_abort_active_req:1;
- struct list_head exp_queued_rpc; /* RPC to be handled */
+ cfs_list_t exp_queued_rpc; /* RPC to be handled */
/* also protected by exp_lock */
enum lustre_sec_part exp_sp_peer;
struct sptlrpc_flavor exp_flvr; /* current */
struct lu_client_seq {
/* Sequence-controller export. */
struct obd_export *lcs_exp;
- struct semaphore lcs_sem;
+ cfs_semaphore_t lcs_sem;
/*
* Range of allowed for allocation sequeces. When using lu_client_seq on
struct lu_client_seq *lss_cli;
/* Semaphore for protecting allocation */
- struct semaphore lss_sem;
+ cfs_semaphore_t lss_sem;
/*
* Service uuid, passed from MDT + seq name to form unique seq name to
struct lu_fld_target {
- struct list_head ft_chain;
+ cfs_list_t ft_chain;
struct obd_export *ft_exp;
struct lu_server_fld *ft_srv;
__u64 ft_idx;
/**
* Protect index modifications */
- struct mutex lsf_lock;
+ cfs_mutex_t lsf_lock;
/**
* Fld service name in form "fld-srv-lustre-MDTXXX" */
/**
* List of exports client FLD knows about. */
- struct list_head lcf_targets;
+ cfs_list_t lcf_targets;
/**
* Current hash to be used to chose an export. */
/**
* Lock protecting exports list and fld_hash. */
- spinlock_t lcf_lock;
+ cfs_spinlock_t lcf_lock;
/**
* Client FLD cache. */
#error Unsupported operating system.
#endif
+#include <libcfs/libcfs.h>
+
typedef void (*portals_handle_addref_cb)(void *object);
/* These handles are most easily used by having them appear at the very top of
* uses some offsetof() magic. */
struct portals_handle {
- struct list_head h_link;
+ cfs_list_t h_link;
__u64 h_cookie;
portals_handle_addref_cb h_addref;
/* newly added fields to handle the RCU issue. -jxiong */
- spinlock_t h_lock;
+ cfs_spinlock_t h_lock;
void *h_ptr;
void (*h_free_cb)(void *, size_t);
- struct rcu_head h_rcu;
+ cfs_rcu_head_t h_rcu;
unsigned int h_size;
__u8 h_in:1;
__u8 h_unused[3];
void class_handle_unhash(struct portals_handle *);
void class_handle_hash_back(struct portals_handle *);
void *class_handle2object(__u64 cookie);
-void class_handle_free_cb(struct rcu_head *);
+void class_handle_free_cb(cfs_rcu_head_t *);
int class_handle_init(void);
void class_handle_cleanup(void);
};
struct lustre_idmap_table {
- spinlock_t lit_lock;
- struct list_head lit_idmaps[CFS_IDMAP_N_HASHES][CFS_IDMAP_HASHSIZE];
+ cfs_spinlock_t lit_lock;
+ cfs_list_t lit_idmaps[CFS_IDMAP_N_HASHES][CFS_IDMAP_HASHSIZE];
};
-extern void lustre_groups_from_list(struct group_info *ginfo, gid_t *glist);
-extern void lustre_groups_sort(struct group_info *group_info);
+extern void lustre_groups_from_list(cfs_group_info_t *ginfo, gid_t *glist);
+extern void lustre_groups_sort(cfs_group_info_t *group_info);
extern int lustre_in_group_p(struct md_ucred *mu, gid_t grp);
extern int lustre_idmap_add(struct lustre_idmap_table *t,
#define AT_FLG_NOHIST 0x1 /* use last reported value only */
struct adaptive_timeout {
- time_t at_binstart; /* bin start time */
- unsigned int at_hist[AT_BINS]; /* timeout history bins */
- unsigned int at_flags;
- unsigned int at_current; /* current timeout value */
- unsigned int at_worst_ever; /* worst-ever timeout value */
- time_t at_worst_time; /* worst-ever timeout timestamp */
- spinlock_t at_lock;
+ time_t at_binstart; /* bin start time */
+ unsigned int at_hist[AT_BINS]; /* timeout history bins */
+ unsigned int at_flags;
+ unsigned int at_current; /* current timeout value */
+ unsigned int at_worst_ever; /* worst-ever timeout value */
+ time_t at_worst_time; /* worst-ever timeout timestamp */
+ cfs_spinlock_t at_lock;
};
enum lustre_imp_state {
};
struct ptlrpc_at_array {
- struct list_head *paa_reqs_array; /* array to hold requests */
+ cfs_list_t *paa_reqs_array; /* array to hold requests */
__u32 paa_size; /* the size of array */
__u32 paa_count; /* the total count of reqs */
- time_t paa_deadline; /* the earliest deadline of reqs */
- __u32 *paa_reqs_count; /* the count of reqs in each entry */
+ time_t paa_deadline; /* earliest deadline of reqs */
+ __u32 *paa_reqs_count; /* count of reqs in each entry */
};
static inline char * ptlrpc_import_state_name(enum lustre_imp_state state)
};
struct obd_import_conn {
- struct list_head oic_item;
+ cfs_list_t oic_item;
struct ptlrpc_connection *oic_conn;
struct obd_uuid oic_uuid;
__u64 oic_last_attempt; /* jiffies, 64-bit */
struct obd_import {
struct portals_handle imp_handle;
- atomic_t imp_refcount;
+ cfs_atomic_t imp_refcount;
struct lustre_handle imp_dlm_handle; /* client's ldlm export */
struct ptlrpc_connection *imp_connection;
struct ptlrpc_client *imp_client;
- struct list_head imp_pinger_chain;
- struct list_head imp_zombie_chain; /* queue for destruction */
+ cfs_list_t imp_pinger_chain;
+ cfs_list_t imp_zombie_chain; /* queue for destruction */
/* Lists of requests that are retained for replay, waiting for a reply,
* or waiting for recovery to complete, respectively.
*/
- struct list_head imp_replay_list;
- struct list_head imp_sending_list;
- struct list_head imp_delayed_list;
+ cfs_list_t imp_replay_list;
+ cfs_list_t imp_sending_list;
+ cfs_list_t imp_delayed_list;
struct obd_device *imp_obd;
struct ptlrpc_sec *imp_sec;
- struct semaphore imp_sec_mutex;
+ cfs_semaphore_t imp_sec_mutex;
cfs_time_t imp_sec_expire;
cfs_waitq_t imp_recovery_waitq;
- atomic_t imp_inflight;
- atomic_t imp_unregistering;
- atomic_t imp_replay_inflight;
- atomic_t imp_inval_count; /* in-progress invalidations */
- atomic_t imp_timeouts;
+ cfs_atomic_t imp_inflight;
+ cfs_atomic_t imp_unregistering;
+ cfs_atomic_t imp_replay_inflight;
+ cfs_atomic_t imp_inval_count; /* in-progress invalidations */
+ cfs_atomic_t imp_timeouts;
enum lustre_imp_state imp_state;
struct import_state_hist imp_state_hist[IMP_STATE_HIST_LEN];
int imp_state_hist_idx;
__u64 imp_last_success_conn; /* jiffies, 64-bit */
/* all available obd_import_conn linked here */
- struct list_head imp_conn_list;
+ cfs_list_t imp_conn_list;
struct obd_import_conn *imp_conn_current;
/* Protects flags, level, generation, conn_cnt, *_list */
- spinlock_t imp_lock;
+ cfs_spinlock_t imp_lock;
/* flags */
unsigned long imp_no_timeout:1, /* timeouts are disabled */
int event, void *event_arg, void *cb_data);
struct obd_import_observer {
- struct list_head oio_chain;
+ cfs_list_t oio_chain;
obd_import_callback oio_cb;
void *oio_cb_data;
};
at->at_worst_ever = val;
at->at_worst_time = cfs_time_current_sec();
at->at_flags = flags;
- spin_lock_init(&at->at_lock);
+ cfs_spin_lock_init(&at->at_lock);
}
static inline int at_get(struct adaptive_timeout *at) {
return at->at_current;
#define OBD_CLIENT_HANDLE_MAGIC 0xd15ea5ed
/* statfs_pack.c */
-void statfs_pack(struct obd_statfs *osfs, struct kstatfs *sfs);
-void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs);
+void statfs_pack(struct obd_statfs *osfs, cfs_kstatfs_t *sfs);
+void statfs_unpack(cfs_kstatfs_t *sfs, struct obd_statfs *osfs);
/* l_lock.c */
struct lustre_lock {
int l_depth;
cfs_task_t *l_owner;
- struct semaphore l_sem;
- spinlock_t l_spin;
+ cfs_semaphore_t l_sem;
+ cfs_spinlock_t l_spin;
};
void l_lock_init(struct lustre_lock *);
static inline int obd_ioctl_packlen(struct obd_ioctl_data *data)
{
- int len = size_round(sizeof(struct obd_ioctl_data));
- len += size_round(data->ioc_inllen1);
- len += size_round(data->ioc_inllen2);
- len += size_round(data->ioc_inllen3);
- len += size_round(data->ioc_inllen4);
+ int len = cfs_size_round(sizeof(struct obd_ioctl_data));
+ len += cfs_size_round(data->ioc_inllen1);
+ len += cfs_size_round(data->ioc_inllen2);
+ len += cfs_size_round(data->ioc_inllen3);
+ len += cfs_size_round(data->ioc_inllen4);
return len;
}
int offset = 0;
ENTRY;
- err = copy_from_user(&hdr, (void *)arg, sizeof(hdr));
+ err = cfs_copy_from_user(&hdr, (void *)arg, sizeof(hdr));
if (err)
RETURN(err);
*len = hdr.ioc_len;
data = (struct obd_ioctl_data *)*buf;
- err = copy_from_user(*buf, (void *)arg, hdr.ioc_len);
+ err = cfs_copy_from_user(*buf, (void *)arg, hdr.ioc_len);
if (err) {
OBD_VFREE(*buf, hdr.ioc_len);
RETURN(err);
if (data->ioc_inllen1) {
data->ioc_inlbuf1 = &data->ioc_bulk[0];
- offset += size_round(data->ioc_inllen1);
+ offset += cfs_size_round(data->ioc_inllen1);
}
if (data->ioc_inllen2) {
data->ioc_inlbuf2 = &data->ioc_bulk[0] + offset;
- offset += size_round(data->ioc_inllen2);
+ offset += cfs_size_round(data->ioc_inllen2);
}
if (data->ioc_inllen3) {
data->ioc_inlbuf3 = &data->ioc_bulk[0] + offset;
- offset += size_round(data->ioc_inllen3);
+ offset += cfs_size_round(data->ioc_inllen3);
}
if (data->ioc_inllen4) {
static inline int obd_ioctl_popdata(void *arg, void *data, int len)
{
- int err = copy_to_user(arg, data, len);
+ int err = cfs_copy_to_user(arg, data, len);
if (err)
err = -EFAULT;
return err;
__blocked = l_w_e_set_sigs(0); \
\
for (;;) { \
- set_current_state(TASK_INTERRUPTIBLE); \
+ cfs_set_current_state(CFS_TASK_INTERRUPTIBLE); \
\
if (condition) \
break; \
\
cfs_block_sigs(__blocked); \
\
- set_current_state(TASK_RUNNING); \
+ cfs_set_current_state(CFS_TASK_RUNNING); \
cfs_waitq_del(&wq, &__wait); \
} while (0)
__ret; \
})
-#define cfs_wait_event(wq, condition) \
+#define l_cfs_wait_event(wq, condition) \
({ \
struct l_wait_info lwi = { 0 }; \
l_wait_event(wq, condition, &lwi); \
* mount is connected to. This field is updated by ll_ocd_update()
* under ->lco_lock.
*/
- __u64 lco_flags;
- struct semaphore lco_lock;
+ __u64 lco_flags;
+ cfs_semaphore_t lco_lock;
struct obd_export *lco_md_exp;
struct obd_export *lco_dt_exp;
};
#define LLOG_EEMPTY 4711
struct plain_handle_data {
- struct list_head phd_entry;
+ cfs_list_t phd_entry;
struct llog_handle *phd_cat_handle;
struct llog_cookie phd_cookie; /* cookie of this log in its cat */
int phd_last_idx;
};
struct cat_handle_data {
- struct list_head chd_head;
+ cfs_list_t chd_head;
struct llog_handle *chd_current_log; /* currently open log */
};
/* In-memory descriptor for a log object or log catalog */
struct llog_handle {
- struct rw_semaphore lgh_lock;
+ cfs_rw_semaphore_t lgh_lock;
struct llog_logid lgh_id; /* id of this log */
struct llog_log_hdr *lgh_hdr;
struct file *lgh_file;
struct llog_handle *loc_handle;
struct llog_commit_master *loc_lcm;
struct llog_canceld_ctxt *loc_llcd;
- struct semaphore loc_sem; /* protects loc_llcd and loc_imp */
- atomic_t loc_refcount;
+ cfs_semaphore_t loc_sem; /* protects loc_llcd and loc_imp */
+ cfs_atomic_t loc_refcount;
void *llog_proc_cb;
long loc_flags; /* flags, see above defines */
};
/**
* Number of llcds onthis lcm.
*/
- atomic_t lcm_count;
+ cfs_atomic_t lcm_count;
/**
* The refcount for lcm
*/
- atomic_t lcm_refcount;
+ cfs_atomic_t lcm_refcount;
/**
* Thread control structure. Used for control commit thread.
*/
/**
* Lock protecting list of llcds.
*/
- spinlock_t lcm_lock;
+ cfs_spinlock_t lcm_lock;
/**
* Llcds in flight for debugging purposes.
*/
- struct list_head lcm_llcds;
+ cfs_list_t lcm_llcds;
/**
* Commit thread name buffer. Only used for thread start.
*/
static inline struct llog_commit_master
*lcm_get(struct llog_commit_master *lcm)
{
- LASSERT(atomic_read(&lcm->lcm_refcount) > 0);
- atomic_inc(&lcm->lcm_refcount);
+ LASSERT(cfs_atomic_read(&lcm->lcm_refcount) > 0);
+ cfs_atomic_inc(&lcm->lcm_refcount);
return lcm;
}
static inline void
lcm_put(struct llog_commit_master *lcm)
{
- if (!atomic_dec_and_test(&lcm->lcm_refcount)) {
+ if (!cfs_atomic_dec_and_test(&lcm->lcm_refcount)) {
return ;
}
OBD_FREE_PTR(lcm);
/**
* Link to lcm llcds list.
*/
- struct list_head llcd_list;
+ cfs_list_t llcd_list;
/**
* Current llcd size while gathering cookies. This should not be
* more than ->llcd_size. Used for determining if we need to
static inline int llog_data_len(int len)
{
- return size_round(len);
+ return cfs_size_round(len);
}
static inline struct llog_ctxt *llog_ctxt_get(struct llog_ctxt *ctxt)
{
- LASSERT(atomic_read(&ctxt->loc_refcount) > 0);
- atomic_inc(&ctxt->loc_refcount);
+ LASSERT(cfs_atomic_read(&ctxt->loc_refcount) > 0);
+ cfs_atomic_inc(&ctxt->loc_refcount);
CDEBUG(D_INFO, "GETting ctxt %p : new refcount %d\n", ctxt,
- atomic_read(&ctxt->loc_refcount));
+ cfs_atomic_read(&ctxt->loc_refcount));
return ctxt;
}
{
if (ctxt == NULL)
return;
- LASSERT(atomic_read(&ctxt->loc_refcount) > 0);
- LASSERT(atomic_read(&ctxt->loc_refcount) < 0x5a5a5a);
+ LASSERT(cfs_atomic_read(&ctxt->loc_refcount) > 0);
+ LASSERT(cfs_atomic_read(&ctxt->loc_refcount) < 0x5a5a5a);
CDEBUG(D_INFO, "PUTting ctxt %p : new refcount %d\n", ctxt,
- atomic_read(&ctxt->loc_refcount) - 1);
+ cfs_atomic_read(&ctxt->loc_refcount) - 1);
__llog_ctxt_put(ctxt);
}
static inline void llog_group_init(struct obd_llog_group *olg, int group)
{
cfs_waitq_init(&olg->olg_waitq);
- spin_lock_init(&olg->olg_lock);
- sema_init(&olg->olg_cat_processing, 1);
+ cfs_spin_lock_init(&olg->olg_lock);
+ cfs_sema_init(&olg->olg_cat_processing, 1);
olg->olg_group = group;
}
{
LASSERT(exp != NULL);
- spin_lock(&olg->olg_lock);
+ cfs_spin_lock(&olg->olg_lock);
if (olg->olg_exp != NULL && olg->olg_exp != exp)
CWARN("%s: export for group %d is changed: 0x%p -> 0x%p\n",
exp->exp_obd->obd_name, olg->olg_group,
olg->olg_exp, exp);
olg->olg_exp = exp;
- spin_unlock(&olg->olg_lock);
+ cfs_spin_unlock(&olg->olg_lock);
}
static inline int llog_group_set_ctxt(struct obd_llog_group *olg,
{
LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
- spin_lock(&olg->olg_lock);
+ cfs_spin_lock(&olg->olg_lock);
if (olg->olg_ctxts[index] != NULL) {
- spin_unlock(&olg->olg_lock);
+ cfs_spin_unlock(&olg->olg_lock);
return -EEXIST;
}
olg->olg_ctxts[index] = ctxt;
- spin_unlock(&olg->olg_lock);
+ cfs_spin_unlock(&olg->olg_lock);
return 0;
}
LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
- spin_lock(&olg->olg_lock);
+ cfs_spin_lock(&olg->olg_lock);
if (olg->olg_ctxts[index] == NULL) {
ctxt = NULL;
} else {
ctxt = llog_ctxt_get(olg->olg_ctxts[index]);
}
- spin_unlock(&olg->olg_lock);
+ cfs_spin_unlock(&olg->olg_lock);
return ctxt;
}
+ sizeof(struct llog_rec_tail);
else
buflen = rec->lrh_len;
- LASSERT(size_round(buflen) == buflen);
+ LASSERT(cfs_size_round(buflen) == buflen);
raised = cfs_cap_raised(CFS_CAP_SYS_RESOURCE);
if (!raised)
struct obd_device;
struct mdc_rpc_lock {
- struct semaphore rpcl_sem;
+ cfs_semaphore_t rpcl_sem;
struct lookup_intent *rpcl_it;
};
static inline void mdc_init_rpc_lock(struct mdc_rpc_lock *lck)
{
- sema_init(&lck->rpcl_sem, 1);
+ cfs_sema_init(&lck->rpcl_sem, 1);
lck->rpcl_it = NULL;
}
{
ENTRY;
if (!it || (it->it_op != IT_GETATTR && it->it_op != IT_LOOKUP)) {
- down(&lck->rpcl_sem);
+ cfs_down(&lck->rpcl_sem);
LASSERT(lck->rpcl_it == NULL);
lck->rpcl_it = it;
}
if (!it || (it->it_op != IT_GETATTR && it->it_op != IT_LOOKUP)) {
LASSERT(it == lck->rpcl_it);
lck->rpcl_it = NULL;
- up(&lck->rpcl_sem);
+ cfs_up(&lck->rpcl_sem);
}
EXIT;
}
struct mdc_cache_waiter {
- struct list_head mcw_entry;
+ cfs_list_t mcw_entry;
cfs_waitq_t mcw_waitq;
};
*/
#define LDLM_THREADS_AUTO_MIN (2)
-#define LDLM_THREADS_AUTO_MAX min(num_online_cpus()*num_online_cpus()*32, 128)
+#define LDLM_THREADS_AUTO_MAX min(cfs_num_online_cpus() * \
+ cfs_num_online_cpus() * 32, 128)
#define LDLM_BL_THREADS LDLM_THREADS_AUTO_MIN
-#define LDLM_NBUFS (64 * num_online_cpus())
+#define LDLM_NBUFS (64 * cfs_num_online_cpus())
#define LDLM_BUFSIZE (8 * 1024)
#define LDLM_MAXREQSIZE (5 * 1024)
#define LDLM_MAXREPSIZE (1024)
#define MDT_MIN_THREADS 2UL
#define MDT_MAX_THREADS 512UL
#define MDT_NUM_THREADS max(min_t(unsigned long, MDT_MAX_THREADS, \
- num_physpages >> (25 - CFS_PAGE_SHIFT)), 2UL)
+ cfs_num_physpages >> (25 - CFS_PAGE_SHIFT)), \
+ 2UL)
#define FLD_NUM_THREADS max(min_t(unsigned long, MDT_MAX_THREADS, \
- num_physpages >> (25 - CFS_PAGE_SHIFT)), 2UL)
+ cfs_num_physpages >> (25 - CFS_PAGE_SHIFT)), \
+ 2UL)
#define SEQ_NUM_THREADS max(min_t(unsigned long, MDT_MAX_THREADS, \
- num_physpages >> (25 - CFS_PAGE_SHIFT)), 2UL)
+ cfs_num_physpages >> (25 - CFS_PAGE_SHIFT)), \
+ 2UL)
/* Absolute limits */
#define MDS_THREADS_MIN 2
#define MDS_THREADS_MAX 512
#define MDS_THREADS_MIN_READPAGE 2
-#define MDS_NBUFS (64 * num_online_cpus())
+#define MDS_NBUFS (64 * cfs_num_online_cpus())
#define MDS_BUFSIZE (8 * 1024)
/* Assume file name length = FNAME_MAX = 256 (true for ext3).
* path name length = PATH_MAX = 4096
#define MGS_THREADS_AUTO_MIN 2
#define MGS_THREADS_AUTO_MAX 32
-#define MGS_NBUFS (64 * num_online_cpus())
+#define MGS_NBUFS (64 * cfs_num_online_cpus())
#define MGS_BUFSIZE (8 * 1024)
#define MGS_MAXREQSIZE (7 * 1024)
#define MGS_MAXREPSIZE (9 * 1024)
/* Absolute limits */
#define OSS_THREADS_MIN 3 /* difficult replies, HPQ, others */
#define OSS_THREADS_MAX 512
-#define OST_NBUFS (64 * num_online_cpus())
+#define OST_NBUFS (64 * cfs_num_online_cpus())
#define OST_BUFSIZE (8 * 1024)
/* OST_MAXREQSIZE ~= 4768 bytes =
* lustre_msg + obdo + 16 * obd_ioobj + 256 * niobuf_remote
#define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
struct ptlrpc_connection {
- struct hlist_node c_hash;
+ cfs_hlist_node_t c_hash;
lnet_nid_t c_self;
lnet_process_id_t c_peer;
struct obd_uuid c_remote_uuid;
- atomic_t c_refcount;
+ cfs_atomic_t c_refcount;
};
struct ptlrpc_client {
typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int);
struct ptlrpc_request_set {
- int set_remaining; /* # uncompleted requests */
- cfs_waitq_t set_waitq;
- cfs_waitq_t *set_wakeup_ptr;
- struct list_head set_requests;
- struct list_head set_cblist; /* list of completion callbacks */
- set_interpreter_func set_interpret; /* completion callback */
- void *set_arg; /* completion context */
+ int set_remaining; /* # uncompleted requests */
+ cfs_waitq_t set_waitq;
+ cfs_waitq_t *set_wakeup_ptr;
+ cfs_list_t set_requests;
+ cfs_list_t set_cblist; /* list of completion callbacks */
+ set_interpreter_func set_interpret; /* completion callback */
+ void *set_arg; /* completion context */
/* locked so that any old caller can communicate requests to
* the set holder who can then fold them into the lock-free set */
- spinlock_t set_new_req_lock;
- struct list_head set_new_requests;
+ cfs_spinlock_t set_new_req_lock;
+ cfs_list_t set_new_requests;
};
struct ptlrpc_set_cbdata {
- struct list_head psc_item;
+ cfs_list_t psc_item;
set_interpreter_func psc_interpret;
void *psc_data;
};
struct ptlrpc_reply_state {
struct ptlrpc_cb_id rs_cb_id;
- struct list_head rs_list;
- struct list_head rs_exp_list;
- struct list_head rs_obd_list;
+ cfs_list_t rs_list;
+ cfs_list_t rs_exp_list;
+ cfs_list_t rs_obd_list;
#if RS_DEBUG
- struct list_head rs_debug_list;
+ cfs_list_t rs_debug_list;
#endif
/* A spinlock to protect the reply state flags */
- spinlock_t rs_lock;
+ cfs_spinlock_t rs_lock;
/* Reply state flags */
unsigned long rs_difficult:1; /* ACK/commit stuff */
unsigned long rs_no_ack:1; /* no ACK, even for
struct obd_export *rs_export;
struct ptlrpc_service *rs_service;
lnet_handle_md_t rs_md_h;
- atomic_t rs_refcount;
+ cfs_atomic_t rs_refcount;
struct ptlrpc_svc_ctx *rs_svc_ctx;
struct lustre_msg *rs_repbuf; /* wrapper */
void *arg, int rc);
struct ptlrpc_request_pool {
- spinlock_t prp_lock;
- struct list_head prp_req_list; /* list of ptlrpc_request structs */
+ cfs_spinlock_t prp_lock;
+ cfs_list_t prp_req_list; /* list of ptlrpc_request structs */
int prp_rq_size;
void (*prp_populate)(struct ptlrpc_request_pool *, int);
};
*/
struct ptlrpc_request {
int rq_type; /* one of PTL_RPC_MSG_* */
- struct list_head rq_list;
- struct list_head rq_timed_list; /* server-side early replies */
- struct list_head rq_history_list; /* server-side history */
- struct list_head rq_exp_list; /* server-side per-export list */
- struct ptlrpc_hpreq_ops *rq_ops; /* server-side hp handlers */
- __u64 rq_history_seq; /* history sequence # */
+ cfs_list_t rq_list;
+ cfs_list_t rq_timed_list; /* server-side early replies */
+ cfs_list_t rq_history_list; /* server-side history */
+ cfs_list_t rq_exp_list; /* server-side per-export list */
+ struct ptlrpc_hpreq_ops *rq_ops; /* server-side hp handlers */
+ __u64 rq_history_seq; /* history sequence # */
/* the index of service's srv_at_array into which request is linked */
time_t rq_at_index;
int rq_status;
- spinlock_t rq_lock;
+ cfs_spinlock_t rq_lock;
/* client-side flags are serialized by rq_lock */
unsigned long rq_intr:1, rq_replied:1, rq_err:1,
rq_timedout:1, rq_resend:1, rq_restart:1,
enum rq_phase rq_phase; /* one of RQ_PHASE_* */
enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
- atomic_t rq_refcount; /* client-side refcount for SENT race,
- server-side refcounf for multiple replies */
+ cfs_atomic_t rq_refcount;/* client-side refcount for SENT race,
+ server-side refcounf for multiple replies */
struct ptlrpc_thread *rq_svc_thread; /* initial thread servicing req */
struct lustre_msg *rq_repmsg;
__u64 rq_transno;
__u64 rq_xid;
- struct list_head rq_replay_list;
+ cfs_list_t rq_replay_list;
struct ptlrpc_cli_ctx *rq_cli_ctx; /* client's half ctx */
struct ptlrpc_svc_ctx *rq_svc_ctx; /* server's half ctx */
- struct list_head rq_ctx_chain; /* link to waited ctx */
+ cfs_list_t rq_ctx_chain; /* link to waited ctx */
struct sptlrpc_flavor rq_flvr; /* client & server */
enum lustre_sec_part rq_sp_from;
int rq_timeout; /* service time estimate (secs) */
/* Multi-rpc bits */
- struct list_head rq_set_chain;
+ cfs_list_t rq_set_chain;
struct ptlrpc_request_set *rq_set;
/** Async completion handler */
ptlrpc_interpterer_t rq_interpret_reply;
#define debug_req(cdls, level, req, file, func, line, fmt, a...) \
do { \
- CHECK_STACK(); \
+ CFS_CHECK_STACK(); \
\
if (((level) & D_CANTMASK) != 0 || \
((libcfs_debug & (level)) != 0 && \
} while (0)
struct ptlrpc_bulk_page {
- struct list_head bp_link;
+ cfs_list_t bp_link;
int bp_buflen;
int bp_pageoffset; /* offset within a page */
struct page *bp_page;
unsigned long bd_network_rw:1; /* accessible to the network */
unsigned long bd_type:2; /* {put,get}{source,sink} */
unsigned long bd_registered:1; /* client side */
- spinlock_t bd_lock; /* serialise with callback */
+ cfs_spinlock_t bd_lock; /* serialise with callback */
int bd_import_generation;
struct obd_export *bd_export;
struct obd_import *bd_import;
/**
* active threads in svc->srv_threads
*/
- struct list_head t_link;
+ cfs_list_t t_link;
/**
* thread-private data (preallocated memory)
*/
};
struct ptlrpc_request_buffer_desc {
- struct list_head rqbd_list;
- struct list_head rqbd_reqs;
+ cfs_list_t rqbd_list;
+ cfs_list_t rqbd_reqs;
struct ptlrpc_service *rqbd_service;
lnet_handle_md_t rqbd_md_h;
int rqbd_refcount;
#define PTLRPC_SVC_HP_RATIO 10
struct ptlrpc_service {
- struct list_head srv_list; /* chain thru all services */
+ cfs_list_t srv_list; /* chain thru all services */
int srv_max_req_size; /* biggest request to receive */
int srv_max_reply_size; /* biggest reply to send */
int srv_buf_size; /* size of individual buffers */
int srv_threads_max; /* thread upper limit */
int srv_threads_started; /* index of last started thread */
int srv_threads_running; /* # running threads */
- atomic_t srv_n_difficult_replies; /* # 'difficult' replies */
+ cfs_atomic_t srv_n_difficult_replies; /* # 'difficult' replies */
int srv_n_active_reqs; /* # reqs being served */
int srv_n_hpreq; /* # HPreqs being served */
cfs_duration_t srv_rqbd_timeout; /* timeout before re-posting reqs, in tick */
/* AT stuff */
struct adaptive_timeout srv_at_estimate;/* estimated rpc service time */
- spinlock_t srv_at_lock;
+ cfs_spinlock_t srv_at_lock;
struct ptlrpc_at_array srv_at_array; /* reqs waiting for replies */
- cfs_timer_t srv_at_timer; /* early reply timer */
-
- int srv_n_queued_reqs; /* # reqs in either of the queues below */
- int srv_hpreq_count; /* # hp requests handled */
- int srv_hpreq_ratio; /* # hp per lp reqs to handle */
- struct list_head srv_req_in_queue; /* incoming reqs */
- struct list_head srv_request_queue; /* reqs waiting for service */
- struct list_head srv_request_hpq; /* high priority queue */
-
- struct list_head srv_request_history; /* request history */
- __u64 srv_request_seq; /* next request sequence # */
- __u64 srv_request_max_cull_seq; /* highest seq culled from history */
- svcreq_printfn_t srv_request_history_print_fn; /* service-specific print fn */
-
- struct list_head srv_idle_rqbds; /* request buffers to be reposted */
- struct list_head srv_active_rqbds; /* req buffers receiving */
- struct list_head srv_history_rqbds; /* request buffer history */
- int srv_nrqbd_receiving; /* # posted request buffers */
- int srv_n_history_rqbds; /* # request buffers in history */
- int srv_max_history_rqbds;/* max # request buffers in history */
-
- atomic_t srv_outstanding_replies;
- struct list_head srv_active_replies; /* all the active replies */
+ cfs_timer_t srv_at_timer; /* early reply timer */
+
+ int srv_n_queued_reqs; /* # reqs in either of the queues below */
+ int srv_hpreq_count; /* # hp requests handled */
+ int srv_hpreq_ratio; /* # hp per lp reqs to handle */
+ cfs_list_t srv_req_in_queue; /* incoming reqs */
+ cfs_list_t srv_request_queue; /* reqs waiting for service */
+ cfs_list_t srv_request_hpq; /* high priority queue */
+
+ cfs_list_t srv_request_history; /* request history */
+ __u64 srv_request_seq; /* next request sequence # */
+ __u64 srv_request_max_cull_seq; /* highest seq culled from history */
+ svcreq_printfn_t srv_request_history_print_fn; /* service-specific print fn */
+
+ cfs_list_t srv_idle_rqbds; /* request buffers to be reposted */
+ cfs_list_t srv_active_rqbds; /* req buffers receiving */
+ cfs_list_t srv_history_rqbds; /* request buffer history */
+ int srv_nrqbd_receiving; /* # posted request buffers */
+ int srv_n_history_rqbds; /* # request buffers in history */
+ int srv_max_history_rqbds;/* max # request buffers in history */
+
+ cfs_atomic_t srv_outstanding_replies;
+ cfs_list_t srv_active_replies; /* all the active replies */
#ifndef __KERNEL__
- struct list_head srv_reply_queue; /* replies waiting for service */
+ cfs_list_t srv_reply_queue; /* replies waiting for service */
#endif
- cfs_waitq_t srv_waitq; /* all threads sleep on this. This
- * wait-queue is signalled when new
- * incoming request arrives and when
- * difficult reply has to be handled. */
+ cfs_waitq_t srv_waitq; /* all threads sleep on this. This
+ * wait-queue is signalled when new
+ * incoming request arrives and when
+ * difficult reply has to be handled. */
- struct list_head srv_threads; /* service thread list */
- svc_handler_t srv_handler;
- svc_hpreq_handler_t srv_hpreq_handler; /* hp request handler */
+ cfs_list_t srv_threads; /* service thread list */
+ svc_handler_t srv_handler;
+ svc_hpreq_handler_t srv_hpreq_handler; /* hp request handler */
char *srv_name; /* only statically allocated strings here; we don't clean them */
char *srv_thread_name; /* only statically allocated strings here; we don't clean them */
- spinlock_t srv_lock;
+ cfs_spinlock_t srv_lock;
- cfs_proc_dir_entry_t *srv_procroot;
- struct lprocfs_stats *srv_stats;
+ cfs_proc_dir_entry_t *srv_procroot;
+ struct lprocfs_stats *srv_stats;
/* List of free reply_states */
- struct list_head srv_free_rs_list;
+ cfs_list_t srv_free_rs_list;
/* waitq to run, when adding stuff to srv_free_rs_list */
- cfs_waitq_t srv_free_rs_waitq;
+ cfs_waitq_t srv_free_rs_waitq;
/*
* Tags for lu_context associated with this thread, see struct
* lu_context.
*/
- __u32 srv_ctx_tags;
+ __u32 srv_ctx_tags;
/*
* if non-NULL called during thread creation (ptlrpc_start_thread())
* to initialize service specific per-thread state.
/**
* Thread lock protecting structure fields.
*/
- spinlock_t pc_lock;
+ cfs_spinlock_t pc_lock;
/**
* Start completion.
*/
- struct completion pc_starting;
+ cfs_completion_t pc_starting;
/**
* Stop completion.
*/
- struct completion pc_finishing;
+ cfs_completion_t pc_finishing;
/**
* Thread requests set.
*/
LASSERT(desc != NULL);
- spin_lock(&desc->bd_lock);
+ cfs_spin_lock(&desc->bd_lock);
rc = desc->bd_network_rw;
- spin_unlock(&desc->bd_lock);
+ cfs_spin_unlock(&desc->bd_lock);
return rc;
}
if (!desc)
return 0;
- spin_lock(&desc->bd_lock);
+ cfs_spin_lock(&desc->bd_lock);
rc = desc->bd_network_rw;
- spin_unlock(&desc->bd_lock);
+ cfs_spin_unlock(&desc->bd_lock);
return rc;
}
if (new_phase == RQ_PHASE_UNREGISTERING) {
req->rq_next_phase = req->rq_phase;
if (req->rq_import)
- atomic_inc(&req->rq_import->imp_unregistering);
+ cfs_atomic_inc(&req->rq_import->imp_unregistering);
}
if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
if (req->rq_import)
- atomic_dec(&req->rq_import->imp_unregistering);
+ cfs_atomic_dec(&req->rq_import->imp_unregistering);
}
DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"",
{
int rc;
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
req->rq_reply_deadline > cfs_time_current_sec()) {
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
return 1;
}
rc = req->rq_receiving_reply || req->rq_must_unlink;
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
return rc;
}
static inline void
ptlrpc_rs_addref(struct ptlrpc_reply_state *rs)
{
- LASSERT(atomic_read(&rs->rs_refcount) > 0);
- atomic_inc(&rs->rs_refcount);
+ LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0);
+ cfs_atomic_inc(&rs->rs_refcount);
}
static inline void
ptlrpc_rs_decref(struct ptlrpc_reply_state *rs)
{
- LASSERT(atomic_read(&rs->rs_refcount) > 0);
- if (atomic_dec_and_test(&rs->rs_refcount))
+ LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0);
+ if (cfs_atomic_dec_and_test(&rs->rs_refcount))
lustre_free_reply_state(rs);
}
int ptlrpc_pinger_del_import(struct obd_import *imp);
int ptlrpc_add_timeout_client(int time, enum timeout_event event,
timeout_cb_t cb, void *data,
- struct list_head *obd_list);
-int ptlrpc_del_timeout_client(struct list_head *obd_list,
+ cfs_list_t *obd_list);
+int ptlrpc_del_timeout_client(cfs_list_t *obd_list,
enum timeout_event event);
struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp);
int ptlrpc_obd_ping(struct obd_device *obd);
struct lustre_dquot {
/** Hash list in memory, protect by dquot_hash_lock */
- struct list_head dq_hash;
+ cfs_list_t dq_hash;
/** Protect the data in lustre_dquot */
- struct semaphore dq_sem;
+ cfs_semaphore_t dq_sem;
/** Use count */
int dq_refcnt;
/** Pointer of quota info it belongs to */
};
struct dquot_id {
- struct list_head di_link;
+ cfs_list_t di_link;
__u32 di_id;
__u32 di_flag;
};
int lustre_commit_dquot(struct lustre_dquot *dquot);
int lustre_init_quota_info(struct lustre_quota_info *lqi, int type);
int lustre_get_qids(struct file *file, struct inode *inode, int type,
- struct list_head *list);
+ cfs_list_t *list);
int lustre_quota_convert(struct lustre_quota_info *lqi, int type);
typedef int (*dqacq_handler_t) (struct obd_device * obd, struct qunit_data * qd,
*/
int lqc_sync_blk;
/** guard lqc_imp_valid now */
- spinlock_t lqc_lock;
+ cfs_spinlock_t lqc_lock;
/**
* when mds isn't connected, threads
* on osts who send the quota reqs
/** lquota statistics */
struct lprocfs_stats *lqc_stats;
/** the number of used hashed lqs */
- atomic_t lqc_lqs;
+ cfs_atomic_t lqc_lqs;
/** no lqs are in use */
cfs_waitq_t lqc_lqs_waitq;
};
#define QUOTA_MASTER_UNREADY(qctxt) (qctxt)->lqc_setup = 0
struct lustre_qunit_size {
- struct hlist_node lqs_hash; /** the hash entry */
+ cfs_hlist_node_t lqs_hash; /** the hash entry */
unsigned int lqs_id; /** id of user/group */
unsigned long lqs_flags; /** 31st bit is QB_SET, 30th bit is QI_SET
* other bits are same as LQUOTA_FLAGS_*
long long lqs_ino_rec;
/** when blocks are allocated/released, this value will record it */
long long lqs_blk_rec;
- atomic_t lqs_refcount;
+ cfs_atomic_t lqs_refcount;
cfs_time_t lqs_last_bshrink; /** time of last block shrink */
cfs_time_t lqs_last_ishrink; /** time of last inode shrink */
- spinlock_t lqs_lock;
+ cfs_spinlock_t lqs_lock;
unsigned long long lqs_key; /** hash key */
struct lustre_quota_ctxt *lqs_ctxt; /** quota ctxt */
};
static inline void __lqs_getref(struct lustre_qunit_size *lqs)
{
- int count = atomic_inc_return(&lqs->lqs_refcount);
+ int count = cfs_atomic_inc_return(&lqs->lqs_refcount);
if (count == 2) /* quota_create_lqs */
- atomic_inc(&lqs->lqs_ctxt->lqc_lqs);
+ cfs_atomic_inc(&lqs->lqs_ctxt->lqc_lqs);
CDEBUG(D_INFO, "lqs=%p refcount %d\n", lqs, count);
}
static inline void __lqs_putref(struct lustre_qunit_size *lqs)
{
- LASSERT(atomic_read(&lqs->lqs_refcount) > 0);
+ LASSERT(cfs_atomic_read(&lqs->lqs_refcount) > 0);
- if (atomic_dec_return(&lqs->lqs_refcount) == 1)
- if (atomic_dec_and_test(&lqs->lqs_ctxt->lqc_lqs))
+ if (cfs_atomic_dec_return(&lqs->lqs_refcount) == 1)
+ if (cfs_atomic_dec_and_test(&lqs->lqs_ctxt->lqc_lqs))
cfs_waitq_signal(&lqs->lqs_ctxt->lqc_lqs_waitq);
CDEBUG(D_INFO, "lqs=%p refcount %d\n",
- lqs, atomic_read(&lqs->lqs_refcount));
+ lqs, cfs_atomic_read(&lqs->lqs_refcount));
}
static inline void lqs_putref(struct lustre_qunit_size *lqs)
static inline void lqs_initref(struct lustre_qunit_size *lqs)
{
- atomic_set(&lqs->lqs_refcount, 0);
+ cfs_atomic_set(&lqs->lqs_refcount, 0);
}
#else
struct obd_device *qta_obd; /** obd device */
struct obd_quotactl qta_oqctl; /** obd_quotactl args */
struct super_block *qta_sb; /** obd super block */
- struct semaphore *qta_sem; /** obt_quotachecking */
+ cfs_semaphore_t *qta_sem; /** obt_quotachecking */
};
struct obd_trans_info;
PTLRPC_CTX_ERROR)
struct ptlrpc_cli_ctx {
- struct hlist_node cc_cache; /* linked into ctx cache */
- atomic_t cc_refcount;
+ cfs_hlist_node_t cc_cache; /* linked into ctx cache */
+ cfs_atomic_t cc_refcount;
struct ptlrpc_sec *cc_sec;
struct ptlrpc_ctx_ops *cc_ops;
cfs_time_t cc_expire; /* in seconds */
unsigned int cc_early_expire:1;
unsigned long cc_flags;
struct vfs_cred cc_vcred;
- spinlock_t cc_lock;
- struct list_head cc_req_list; /* waiting reqs linked here */
- struct list_head cc_gc_chain; /* linked to gc chain */
+ cfs_spinlock_t cc_lock;
+ cfs_list_t cc_req_list; /* waiting reqs linked here */
+ cfs_list_t cc_gc_chain; /* linked to gc chain */
};
struct ptlrpc_sec_cops {
};
struct ptlrpc_sec_policy {
- struct module *sp_owner;
+ cfs_module_t *sp_owner;
char *sp_name;
__u16 sp_policy; /* policy number */
struct ptlrpc_sec_cops *sp_cops; /* client ops */
struct ptlrpc_sec {
struct ptlrpc_sec_policy *ps_policy;
- atomic_t ps_refcount;
- atomic_t ps_nctx; /* statistic only */
+ cfs_atomic_t ps_refcount;
+ cfs_atomic_t ps_nctx; /* statistic only */
int ps_id; /* unique identifier */
struct sptlrpc_flavor ps_flvr; /* flavor */
enum lustre_sec_part ps_part;
unsigned int ps_dying:1;
struct obd_import *ps_import; /* owning import */
- spinlock_t ps_lock; /* protect ccache */
+ cfs_spinlock_t ps_lock; /* protect ccache */
/*
* garbage collection
*/
- struct list_head ps_gc_list;
+ cfs_list_t ps_gc_list;
cfs_time_t ps_gc_interval; /* in seconds */
cfs_time_t ps_gc_next; /* in seconds */
};
struct ptlrpc_svc_ctx {
- atomic_t sc_refcount;
+ cfs_atomic_t sc_refcount;
struct ptlrpc_sec_policy *sc_policy;
};
static inline
struct ptlrpc_sec_policy *sptlrpc_policy_get(struct ptlrpc_sec_policy *policy)
{
- __module_get(policy->sp_owner);
+ __cfs_module_get(policy->sp_owner);
return policy;
}
static inline
void sptlrpc_policy_put(struct ptlrpc_sec_policy *policy)
{
- module_put(policy->sp_owner);
+ cfs_module_put(policy->sp_owner);
}
/*
struct upcall_cache_entry *mi_uc_entry;
uid_t mi_uid;
gid_t mi_gid;
- struct group_info *mi_ginfo;
+ cfs_group_info_t *mi_ginfo;
int mi_nperms;
struct md_perm *mi_perms;
};
struct upcall_cache_entry {
- struct list_head ue_hash;
+ cfs_list_t ue_hash;
__u64 ue_key;
- atomic_t ue_refcount;
+ cfs_atomic_t ue_refcount;
int ue_flags;
cfs_waitq_t ue_waitq;
cfs_time_t ue_acquire_expire;
};
struct upcall_cache {
- struct list_head uc_hashtable[UC_CACHE_HASH_SIZE];
- spinlock_t uc_lock;
- rwlock_t uc_upcall_rwlock;
+ cfs_list_t uc_hashtable[UC_CACHE_HASH_SIZE];
+ cfs_spinlock_t uc_lock;
+ cfs_rwlock_t uc_upcall_rwlock;
char uc_name[40]; /* for upcall */
char uc_upcall[UC_CACHE_UPCALL_MAXPATH];
__u32 mu_suppgids[2];
cfs_cap_t mu_cap;
__u32 mu_umask;
- struct group_info *mu_ginfo;
+ cfs_group_info_t *mu_ginfo;
struct md_identity *mu_identity;
};
int *md_size, int *cookie_size);
int (*mdo_statfs)(const struct lu_env *env, struct md_device *m,
- struct kstatfs *sfs);
+ cfs_kstatfs_t *sfs);
int (*mdo_init_capa_ctxt)(const struct lu_env *env, struct md_device *m,
int mode, unsigned long timeout, __u32 alg,
struct md_upcall {
/** this lock protects upcall using against its removal
* read lock is for usage the upcall, write - for init/fini */
- struct rw_semaphore mu_upcall_sem;
+ cfs_rw_semaphore_t mu_upcall_sem;
/** device to call, upper layer normally */
struct md_device *mu_upcall_dev;
/** upcall function */
static inline void md_upcall_init(struct md_device *m, void *upcl)
{
- init_rwsem(&m->md_upcall.mu_upcall_sem);
+ cfs_init_rwsem(&m->md_upcall.mu_upcall_sem);
m->md_upcall.mu_upcall_dev = NULL;
m->md_upcall.mu_upcall = upcl;
}
static inline void md_upcall_dev_set(struct md_device *m, struct md_device *up)
{
- down_write(&m->md_upcall.mu_upcall_sem);
+ cfs_down_write(&m->md_upcall.mu_upcall_sem);
m->md_upcall.mu_upcall_dev = up;
- up_write(&m->md_upcall.mu_upcall_sem);
+ cfs_up_write(&m->md_upcall.mu_upcall_sem);
}
static inline void md_upcall_fini(struct md_device *m)
{
- down_write(&m->md_upcall.mu_upcall_sem);
+ cfs_down_write(&m->md_upcall.mu_upcall_sem);
m->md_upcall.mu_upcall_dev = NULL;
m->md_upcall.mu_upcall = NULL;
- up_write(&m->md_upcall.mu_upcall_sem);
+ cfs_up_write(&m->md_upcall.mu_upcall_sem);
}
static inline int md_do_upcall(const struct lu_env *env, struct md_device *m,
enum md_upcall_event ev, void *data)
{
int rc = 0;
- down_read(&m->md_upcall.mu_upcall_sem);
+ cfs_down_read(&m->md_upcall.mu_upcall_sem);
if (m->md_upcall.mu_upcall_dev != NULL &&
m->md_upcall.mu_upcall_dev->md_upcall.mu_upcall != NULL) {
rc = m->md_upcall.mu_upcall_dev->md_upcall.mu_upcall(env,
m->md_upcall.mu_upcall_dev,
ev, data);
}
- up_read(&m->md_upcall.mu_upcall_sem);
+ cfs_up_read(&m->md_upcall.mu_upcall_sem);
return rc;
}
const char *llod_name;
__u32 llod_oid;
int llod_is_index;
- const struct dt_index_features * llod_feat;
- struct list_head llod_linkage;
+ const struct dt_index_features *llod_feat;
+ cfs_list_t llod_linkage;
};
struct md_object *llo_store_resolve(const struct lu_env *env,
/* this is really local to the OSC */
struct loi_oap_pages {
- struct list_head lop_pending;
- struct list_head lop_urgent;
- struct list_head lop_pending_group;
+ cfs_list_t lop_pending;
+ cfs_list_t lop_urgent;
+ cfs_list_t lop_pending_group;
int lop_num_pending;
};
/* used by the osc to keep track of what objects to build into rpcs */
struct loi_oap_pages loi_read_lop;
struct loi_oap_pages loi_write_lop;
- struct list_head loi_ready_item;
- struct list_head loi_hp_ready_item;
- struct list_head loi_write_item;
- struct list_head loi_read_item;
+ cfs_list_t loi_ready_item;
+ cfs_list_t loi_hp_ready_item;
+ cfs_list_t loi_write_item;
+ cfs_list_t loi_read_item;
unsigned long loi_kms_valid:1;
__u64 loi_kms; /* known minimum size */
}
struct lov_stripe_md {
- spinlock_t lsm_lock;
+ cfs_spinlock_t lsm_lock;
pid_t lsm_lock_owner; /* debugging */
struct {
void lov_stripe_unlock(struct lov_stripe_md *md);
struct obd_type {
- struct list_head typ_chain;
+ cfs_list_t typ_chain;
struct obd_ops *typ_dt_ops;
struct md_ops *typ_md_ops;
cfs_proc_dir_entry_t *typ_procroot;
char *typ_name;
int typ_refcnt;
struct lu_device_type *typ_lu;
- spinlock_t obd_type_lock;
+ cfs_spinlock_t obd_type_lock;
};
struct brw_page {
/** server data in last_rcvd file */
struct lr_server_data *obt_lsd;
/** Lock protecting client bitmap */
- spinlock_t obt_client_bitmap_lock;
+ cfs_spinlock_t obt_client_bitmap_lock;
/** Bitmap of known clients */
unsigned long *obt_client_bitmap;
/** Server last transaction number */
__u64 obt_last_transno;
/** Lock protecting last transaction number */
- spinlock_t obt_translock;
+ cfs_spinlock_t obt_translock;
/** Number of mounts */
__u64 obt_mount_count;
- struct semaphore obt_quotachecking;
+ cfs_semaphore_t obt_quotachecking;
struct lustre_quota_ctxt obt_qctxt;
lustre_quota_version_t obt_qfmt;
- struct rw_semaphore obt_rwsem;
+ cfs_rw_semaphore_t obt_rwsem;
struct vfsmount *obt_vfsmnt;
struct file *obt_health_check_filp;
};
cfs_dentry_t *fo_dentry_O;
cfs_dentry_t **fo_dentry_O_groups;
struct filter_subdirs *fo_dentry_O_sub;
- struct semaphore fo_init_lock; /* group initialization lock */
+ cfs_semaphore_t fo_init_lock; /* group initialization lock */
int fo_committed_group;
#define CLIENT_QUOTA_DEFAULT_RESENDS 10
- spinlock_t fo_objidlock; /* protect fo_lastobjid */
+ cfs_spinlock_t fo_objidlock; /* protect fo_lastobjid */
unsigned long fo_destroys_in_progress;
- struct semaphore fo_create_locks[FILTER_SUBDIR_COUNT];
+ cfs_semaphore_t fo_create_locks[FILTER_SUBDIR_COUNT];
- struct list_head fo_export_list;
+ cfs_list_t fo_export_list;
int fo_subdir_count;
obd_size fo_tot_dirty; /* protected by obd_osfs_lock */
__u64 *fo_last_objids; /* last created objid for groups,
* protected by fo_objidlock */
- struct semaphore fo_alloc_lock;
+ cfs_semaphore_t fo_alloc_lock;
- atomic_t fo_r_in_flight;
- atomic_t fo_w_in_flight;
+ cfs_atomic_t fo_r_in_flight;
+ cfs_atomic_t fo_w_in_flight;
/*
* per-filter pool of kiobuf's allocated by filter_common_setup() and
struct filter_iobuf **fo_iobuf_pool;
int fo_iobuf_count;
- struct list_head fo_llog_list;
- spinlock_t fo_llog_list_lock;
+ cfs_list_t fo_llog_list;
+ cfs_spinlock_t fo_llog_list_lock;
struct brw_stats fo_filter_stats;
struct lustre_quota_ctxt fo_quota_ctxt;
- spinlock_t fo_quotacheck_lock;
- atomic_t fo_quotachecking;
+ cfs_spinlock_t fo_quotacheck_lock;
+ cfs_atomic_t fo_quotachecking;
int fo_fmd_max_num; /* per exp filter_mod_data */
int fo_fmd_max_age; /* jiffies to fmd expiry */
/* sptlrpc stuff */
- rwlock_t fo_sptlrpc_lock;
+ cfs_rwlock_t fo_sptlrpc_lock;
struct sptlrpc_rule_set fo_sptlrpc_rset;
/* capability related */
unsigned int fo_fl_oss_capa;
- struct list_head fo_capa_keys;
- struct hlist_head *fo_capa_hash;
+ cfs_list_t fo_capa_keys;
+ cfs_hlist_head_t *fo_capa_hash;
struct llog_commit_master *fo_lcm;
int fo_sec_level;
};
cfs_time_t ti_timeout;
timeout_cb_t ti_cb;
void *ti_cb_data;
- struct list_head ti_obd_list;
- struct list_head ti_chain;
+ cfs_list_t ti_obd_list;
+ cfs_list_t ti_chain;
};
#define OSC_MAX_RIF_DEFAULT 8
struct mdc_rpc_lock;
struct obd_import;
struct client_obd {
- struct rw_semaphore cl_sem;
+ cfs_rw_semaphore_t cl_sem;
struct obd_uuid cl_target_uuid;
struct obd_import *cl_import; /* ptlrpc connection state */
int cl_conn_count;
long cl_dirty_transit; /* dirty synchronous */
long cl_avail_grant; /* bytes of credit for ost */
long cl_lost_grant; /* lost credits (trunc) */
- struct list_head cl_cache_waiters; /* waiting for cache/grant */
+ cfs_list_t cl_cache_waiters; /* waiting for cache/grant */
cfs_time_t cl_next_shrink_grant; /* jiffies */
- struct list_head cl_grant_shrink_list; /* Timeout event list */
- struct semaphore cl_grant_sem; /*grant shrink list semaphore*/
+ cfs_list_t cl_grant_shrink_list; /* Timeout event list */
+ cfs_semaphore_t cl_grant_sem; /*grant shrink list cfs_semaphore*/
int cl_grant_shrink_interval; /* seconds */
/* keep track of objects that have lois that contain pages which
* client_obd_list_lock_{init,done}() functions.
*/
client_obd_lock_t cl_loi_list_lock;
- struct list_head cl_loi_ready_list;
- struct list_head cl_loi_hp_ready_list;
- struct list_head cl_loi_write_list;
- struct list_head cl_loi_read_list;
+ cfs_list_t cl_loi_ready_list;
+ cfs_list_t cl_loi_hp_ready_list;
+ cfs_list_t cl_loi_write_list;
+ cfs_list_t cl_loi_read_list;
int cl_r_in_flight;
int cl_w_in_flight;
/* just a sum of the loi/lop pending numbers to be exported by /proc */
struct obd_histogram cl_write_offset_hist;
/* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
- atomic_t cl_destroy_in_flight;
+ cfs_atomic_t cl_destroy_in_flight;
cfs_waitq_t cl_destroy_waitq;
struct mdc_rpc_lock *cl_rpc_lock;
struct osc_creator cl_oscc;
/* mgc datastruct */
- struct semaphore cl_mgc_sem;
+ cfs_semaphore_t cl_mgc_sem;
struct vfsmount *cl_mgc_vfsmnt;
struct dentry *cl_mgc_configs_dir;
- atomic_t cl_mgc_refcount;
+ cfs_atomic_t cl_mgc_refcount;
struct obd_export *cl_mgc_mgsexp;
/* checksumming for data sent over the network */
/* sequence manager */
struct lu_client_seq *cl_seq;
- atomic_t cl_resends; /* resend count */
- atomic_t cl_quota_resends; /* quota related resend count */
+ cfs_atomic_t cl_resends; /* resend count */
+ cfs_atomic_t cl_quota_resends; /* quota related resend count */
};
#define obd2cli_tgt(obd) ((char *)(obd)->u.cli.cl_target_uuid.uuid)
struct super_block *mgs_sb;
struct dentry *mgs_configs_dir;
struct dentry *mgs_fid_de;
- struct list_head mgs_fs_db_list;
- struct semaphore mgs_sem;
+ cfs_list_t mgs_fs_db_list;
+ cfs_semaphore_t mgs_sem;
cfs_proc_dir_entry_t *mgs_proc_live;
};
int mds_max_cookiesize;
__u64 mds_io_epoch;
unsigned long mds_atime_diff;
- struct semaphore mds_epoch_sem;
+ cfs_semaphore_t mds_epoch_sem;
struct ll_fid mds_rootfid;
cfs_dentry_t *mds_pending_dir;
cfs_dentry_t *mds_logs_dir;
__u32 mds_id;
/* mark pages dirty for write. */
- bitmap_t *mds_lov_page_dirty;
+ cfs_bitmap_t *mds_lov_page_dirty;
/* array for store pages with obd_id */
void **mds_lov_page_array;
/* file for store objid */
struct lustre_quota_info mds_quota_info;
- struct semaphore mds_qonoff_sem;
- struct semaphore mds_health_sem;
+ cfs_semaphore_t mds_qonoff_sem;
+ cfs_semaphore_t mds_health_sem;
unsigned long mds_fl_user_xattr:1,
mds_fl_acl:1,
mds_evict_ost_nids:1,
/* for capability keys update */
struct lustre_capa_key *mds_capa_keys;
- struct rw_semaphore mds_notify_lock;
+ cfs_rw_semaphore_t mds_notify_lock;
};
#define mds_transno_lock mds_obt.obt_translock
struct echo_obd {
struct obdo eo_oa;
- spinlock_t eo_lock;
+ cfs_spinlock_t eo_lock;
__u64 eo_lastino;
struct lustre_handle eo_nl_lock;
- atomic_t eo_prep;
+ cfs_atomic_t eo_prep;
};
struct ost_obd {
struct ptlrpc_service *ost_service;
struct ptlrpc_service *ost_create_service;
struct ptlrpc_service *ost_io_service;
- struct semaphore ost_health_sem;
+ cfs_semaphore_t ost_health_sem;
};
struct echo_client_obd {
struct obd_export *ec_exp; /* the local connection to osc/lov */
- spinlock_t ec_lock;
- struct list_head ec_objects;
- struct list_head ec_locks;
+ cfs_spinlock_t ec_lock;
+ cfs_list_t ec_objects;
+ cfs_list_t ec_locks;
int ec_nstripes;
__u64 ec_unique;
};
struct lov_qos_oss {
struct obd_uuid lqo_uuid; /* ptlrpc's c_remote_uuid */
- struct list_head lqo_oss_list; /* link to lov_qos */
+ cfs_list_t lqo_oss_list; /* link to lov_qos */
__u64 lqo_bavail; /* total bytes avail on OSS */
__u64 lqo_penalty; /* current penalty */
- __u64 lqo_penalty_per_obj; /* penalty decrease every obj*/
+ __u64 lqo_penalty_per_obj;/* penalty decrease every obj*/
time_t lqo_used; /* last used time, seconds */
__u32 lqo_ost_count; /* number of osts on this oss */
};
/* Generic subset of OSTs */
struct ost_pool {
- __u32 *op_array; /* array of index of
- lov_obd->lov_tgts */
- unsigned int op_count; /* number of OSTs in the array */
- unsigned int op_size; /* allocated size of lp_array */
- struct rw_semaphore op_rw_sem; /* to protect ost_pool use */
+ __u32 *op_array; /* array of index of
+ lov_obd->lov_tgts */
+ unsigned int op_count; /* number of OSTs in the array */
+ unsigned int op_size; /* allocated size of lp_array */
+ cfs_rw_semaphore_t op_rw_sem; /* to protect ost_pool use */
};
/* Round-robin allocator data */
};
/* Stripe placement optimization */
struct lov_qos {
- struct list_head lq_oss_list; /* list of OSSs that targets use */
- struct rw_semaphore lq_rw_sem;
+ cfs_list_t lq_oss_list; /* list of OSSs that targets use */
+ cfs_rw_semaphore_t lq_rw_sem;
__u32 lq_active_oss_count;
unsigned int lq_prio_free; /* priority for free space */
unsigned int lq_threshold_rr;/* priority for rr */
lq_same_space:1,/* the ost's all have approx.
the same space avail */
lq_reset:1, /* zero current penalties */
- lq_statfs_in_progress:1; /* statfs op in progress */
+ lq_statfs_in_progress:1; /* statfs op in
+ progress */
/* qos statfs data */
struct lov_statfs_data *lq_statfs_data;
cfs_waitq_t lq_statfs_waitq; /* waitqueue to notify statfs
};
struct lov_tgt_desc {
- struct list_head ltd_kill;
+ cfs_list_t ltd_kill;
struct obd_uuid ltd_uuid;
struct obd_device *ltd_obd;
struct obd_export *ltd_exp;
__u32 ltd_gen;
__u32 ltd_index; /* index in lov_obd->tgts */
unsigned long ltd_active:1,/* is this target up for requests */
- ltd_activate:1,/* should this target be activated */
+ ltd_activate:1,/* should target be activated */
ltd_reap:1; /* should this target be deleted */
};
struct pool_desc {
char pool_name[LOV_MAXPOOLNAME + 1]; /* name of pool */
struct ost_pool pool_obds; /* pool members */
- atomic_t pool_refcount; /* pool ref. counter */
+ cfs_atomic_t pool_refcount; /* pool ref. counter */
struct lov_qos_rr pool_rr; /* round robin qos */
- struct hlist_node pool_hash; /* access by poolname */
- struct list_head pool_list; /* serial access */
+ cfs_hlist_node_t pool_hash; /* access by poolname */
+ cfs_list_t pool_list; /* serial access */
cfs_proc_dir_entry_t *pool_proc_entry; /* file in /proc */
struct lov_obd *pool_lov; /* lov obd to which this
pool belong */
struct lov_tgt_desc **lov_tgts; /* sparse array */
struct ost_pool lov_packed; /* all OSTs in a packed
array */
- struct semaphore lov_lock;
+ cfs_semaphore_t lov_lock;
struct obd_connect_data lov_ocd;
struct lov_qos lov_qos; /* qos info per lov */
- atomic_t lov_refcount;
+ cfs_atomic_t lov_refcount;
__u32 lov_tgt_count; /* how many OBD's */
__u32 lov_active_tgt_count; /* how many active */
__u32 lov_death_row;/* tgts scheduled to be deleted */
int lov_connects;
int lov_pool_count;
cfs_hash_t *lov_pools_hash_body; /* used for key access */
- struct list_head lov_pool_list; /* used for sequential access */
+ cfs_list_t lov_pool_list; /* used for sequential access */
cfs_proc_dir_entry_t *lov_pool_proc_entry;
enum lustre_sec_part lov_sp_me;
};
struct lmv_tgt_desc {
struct obd_uuid ltd_uuid;
struct obd_export *ltd_exp;
- int ltd_active; /* is this target up for requests */
+ int ltd_active; /* is this target up for requests */
int ltd_idx;
- struct semaphore ltd_fid_sem;
+ cfs_semaphore_t ltd_fid_sem;
};
enum placement_policy {
struct lmv_obd {
int refcount;
struct lu_client_fld lmv_fld;
- spinlock_t lmv_lock;
+ cfs_spinlock_t lmv_lock;
placement_policy_t lmv_placement;
struct lmv_desc desc;
struct obd_uuid cluuid;
int max_def_easize;
int max_cookiesize;
int server_timeout;
- struct semaphore init_sem;
+ cfs_semaphore_t init_sem;
struct lmv_tgt_desc *tgts;
int tgts_size;
struct target_recovery_data {
svc_handler_t trd_recovery_handler;
pid_t trd_processing_task;
- struct completion trd_starting;
- struct completion trd_finishing;
+ cfs_completion_t trd_starting;
+ cfs_completion_t trd_finishing;
};
enum filter_groups {
}
struct obd_llog_group {
- struct list_head olg_list;
+ cfs_list_t olg_list;
int olg_group;
struct llog_ctxt *olg_ctxts[LLOG_MAX_CTXTS];
cfs_waitq_t olg_waitq;
- spinlock_t olg_lock;
+ cfs_spinlock_t olg_lock;
struct obd_export *olg_exp;
int olg_initializing;
- struct semaphore olg_cat_processing;
+ cfs_semaphore_t olg_cat_processing;
};
/* corresponds to one of the obd's */
cfs_hash_t *obd_nid_hash;
/* nid stats body */
cfs_hash_t *obd_nid_stats_hash;
- struct list_head obd_nid_stats;
- atomic_t obd_refcount;
+ cfs_list_t obd_nid_stats;
+ cfs_atomic_t obd_refcount;
cfs_waitq_t obd_refcount_waitq;
- struct list_head obd_exports;
- struct list_head obd_unlinked_exports;
- struct list_head obd_delayed_exports;
+ cfs_list_t obd_exports;
+ cfs_list_t obd_unlinked_exports;
+ cfs_list_t obd_delayed_exports;
int obd_num_exports;
- spinlock_t obd_nid_lock;
+ cfs_spinlock_t obd_nid_lock;
struct ldlm_namespace *obd_namespace;
struct ptlrpc_client obd_ldlm_client; /* XXX OST/MDS only */
/* a spinlock is OK for what we do now, may need a semaphore later */
- spinlock_t obd_dev_lock;
- struct semaphore obd_dev_sem;
+ cfs_spinlock_t obd_dev_lock;
+ cfs_semaphore_t obd_dev_sem;
__u64 obd_last_committed;
struct fsfilt_operations *obd_fsops;
- spinlock_t obd_osfs_lock;
+ cfs_spinlock_t obd_osfs_lock;
struct obd_statfs obd_osfs; /* locked by obd_osfs_lock */
__u64 obd_osfs_age;
struct lvfs_run_ctxt obd_lvfs_ctxt;
struct obd_llog_group obd_olg; /* default llog group */
- struct obd_device *obd_observer;
- struct rw_semaphore obd_observer_link_sem;
+ struct obd_device *obd_observer;
+ cfs_rw_semaphore_t obd_observer_link_sem;
struct obd_notify_upcall obd_upcall;
struct obd_export *obd_self_export;
/* list of exports in LRU order, for ping evictor, with obd_dev_lock */
- struct list_head obd_exports_timed;
+ cfs_list_t obd_exports_timed;
time_t obd_eviction_timer; /* for ping evictor */
int obd_max_recoverable_clients;
int obd_connected_clients;
int obd_stale_clients;
int obd_delayed_clients;
- spinlock_t obd_processing_task_lock; /* BH lock (timer) */
+ cfs_spinlock_t obd_processing_task_lock; /* BH lock (timer) */
__u64 obd_next_recovery_transno;
int obd_replayed_requests;
int obd_requests_queued_for_recovery;
/* new recovery stuff from CMD2 */
struct target_recovery_data obd_recovery_data;
int obd_replayed_locks;
- atomic_t obd_req_replay_clients;
- atomic_t obd_lock_replay_clients;
- struct list_head obd_req_replay_queue;
- struct list_head obd_lock_replay_queue;
- struct list_head obd_final_req_queue;
+ cfs_atomic_t obd_req_replay_clients;
+ cfs_atomic_t obd_lock_replay_clients;
+ cfs_list_t obd_req_replay_queue;
+ cfs_list_t obd_lock_replay_queue;
+ cfs_list_t obd_final_req_queue;
int obd_recovery_stage;
union {
cfs_proc_dir_entry_t *obd_proc_exports_entry;
cfs_proc_dir_entry_t *obd_svc_procroot;
struct lprocfs_stats *obd_svc_stats;
- atomic_t obd_evict_inprogress;
+ cfs_atomic_t obd_evict_inprogress;
cfs_waitq_t obd_evict_inprogress_waitq;
- struct list_head obd_evict_list; /* protected with pet_lock */
+ cfs_list_t obd_evict_list; /* protected with pet_lock */
/**
* Ldlm pool part. Save last calculated SLV and Limit.
*/
- rwlock_t obd_pool_lock;
+ cfs_rwlock_t obd_pool_lock;
int obd_pool_limit;
__u64 obd_pool_slv;
};
struct obd_ops {
- struct module *o_owner;
+ cfs_module_t *o_owner;
int (*o_iocontrol)(unsigned int cmd, struct obd_export *exp, int len,
void *karg, void *uarg);
int (*o_get_info)(struct obd_export *, __u32 keylen, void *key,
struct obd_client_handle *mod_och;
struct ptlrpc_request *mod_open_req;
struct ptlrpc_request *mod_close_req;
- atomic_t mod_refcount;
+ cfs_atomic_t mod_refcount;
};
struct lookup_intent;
OBD_ALLOC_PTR(mod);
if (mod == NULL)
return NULL;
- atomic_set(&mod->mod_refcount, 1);
+ cfs_atomic_set(&mod->mod_refcount, 1);
return mod;
}
-#define obd_mod_get(mod) atomic_inc(&(mod)->mod_refcount)
+#define obd_mod_get(mod) cfs_atomic_inc(&(mod)->mod_refcount)
#define obd_mod_put(mod) \
({ \
- if (atomic_dec_and_test(&(mod)->mod_refcount)) { \
+ if (cfs_atomic_dec_and_test(&(mod)->mod_refcount)) { \
if ((mod)->mod_open_req) \
ptlrpc_req_finished((mod)->mod_open_req); \
OBD_FREE_PTR(mod); \
/* OBD Device Declarations */
extern struct obd_device *obd_devs[MAX_OBD_DEVICES];
-extern spinlock_t obd_dev_lock;
+extern cfs_spinlock_t obd_dev_lock;
/* OBD Operations Declarations */
extern struct obd_device *class_conn2obd(struct lustre_handle *);
/* list of active configuration logs */
struct config_llog_data {
- char *cld_logname;
- struct ldlm_res_id cld_resid;
+ char *cld_logname;
+ struct ldlm_res_id cld_resid;
struct config_llog_instance cld_cfg;
- struct list_head cld_list_chain;
- atomic_t cld_refcount;
- struct config_llog_data *cld_sptlrpc;/* depended sptlrpc log */
- struct obd_export *cld_mgcexp;
- unsigned int cld_stopping:1, /* we were told to stop watching */
- cld_lostlock:1, /* lock not requeued */
- cld_is_sptlrpc:1;
+ cfs_list_t cld_list_chain;
+ cfs_atomic_t cld_refcount;
+ struct config_llog_data *cld_sptlrpc;/* depended sptlrpc log */
+ struct obd_export *cld_mgcexp;
+ unsigned int cld_stopping:1, /* we were told to stop
+ * watching */
+ cld_lostlock:1, /* lock not requeued */
+ cld_is_sptlrpc:1;
};
struct lustre_profile {
- struct list_head lp_list;
+ cfs_list_t lp_list;
char *lp_profile;
char *lp_dt;
char *lp_md;
#define class_export_rpc_get(exp) \
({ \
- atomic_inc(&(exp)->exp_rpc_count); \
+ cfs_atomic_inc(&(exp)->exp_rpc_count); \
CDEBUG(D_INFO, "RPC GETting export %p : new rpc_count %d\n", \
- (exp), atomic_read(&(exp)->exp_rpc_count)); \
+ (exp), cfs_atomic_read(&(exp)->exp_rpc_count)); \
class_export_get(exp); \
})
#define class_export_rpc_put(exp) \
({ \
- LASSERT(atomic_read(&exp->exp_rpc_count) > 0); \
- atomic_dec(&(exp)->exp_rpc_count); \
+ LASSERT(cfs_atomic_read(&exp->exp_rpc_count) > 0); \
+ cfs_atomic_dec(&(exp)->exp_rpc_count); \
CDEBUG(D_INFO, "RPC PUTting export %p : new rpc_count %d\n", \
- (exp), atomic_read(&(exp)->exp_rpc_count)); \
+ (exp), cfs_atomic_read(&(exp)->exp_rpc_count)); \
class_export_put(exp); \
})
#define class_export_lock_get(exp, lock) \
({ \
- atomic_inc(&(exp)->exp_locks_count); \
+ cfs_atomic_inc(&(exp)->exp_locks_count); \
__class_export_add_lock_ref(exp, lock); \
CDEBUG(D_INFO, "lock GETting export %p : new locks_count %d\n", \
- (exp), atomic_read(&(exp)->exp_locks_count)); \
+ (exp), cfs_atomic_read(&(exp)->exp_locks_count)); \
class_export_get(exp); \
})
#define class_export_lock_put(exp, lock) \
({ \
- LASSERT(atomic_read(&exp->exp_locks_count) > 0); \
- atomic_dec(&(exp)->exp_locks_count); \
+ LASSERT(cfs_atomic_read(&exp->exp_locks_count) > 0); \
+ cfs_atomic_dec(&(exp)->exp_locks_count); \
__class_export_del_lock_ref(exp, lock); \
CDEBUG(D_INFO, "lock PUTting export %p : new locks_count %d\n", \
- (exp), atomic_read(&(exp)->exp_locks_count)); \
+ (exp), cfs_atomic_read(&(exp)->exp_locks_count)); \
class_export_put(exp); \
})
#define class_export_cb_get(exp) \
({ \
- atomic_inc(&(exp)->exp_cb_count); \
+ cfs_atomic_inc(&(exp)->exp_cb_count); \
CDEBUG(D_INFO, "callback GETting export %p : new cb_count %d\n",\
- (exp), atomic_read(&(exp)->exp_cb_count)); \
+ (exp), cfs_atomic_read(&(exp)->exp_cb_count)); \
class_export_get(exp); \
})
#define class_export_cb_put(exp) \
({ \
- LASSERT(atomic_read(&exp->exp_cb_count) > 0); \
- atomic_dec(&(exp)->exp_cb_count); \
+ LASSERT(cfs_atomic_read(&exp->exp_cb_count) > 0); \
+ cfs_atomic_dec(&(exp)->exp_cb_count); \
CDEBUG(D_INFO, "callback PUTting export %p : new cb_count %d\n",\
- (exp), atomic_read(&(exp)->exp_cb_count)); \
+ (exp), cfs_atomic_read(&(exp)->exp_cb_count)); \
class_export_put(exp); \
})
return 0;
}
-#ifndef time_before
-#define time_before(t1, t2) ((long)t2 - (long)t1 > 0)
-#endif
-
/* @max_age is the oldest time in jiffies that we accept using a cached data.
* If the cache is older than @max_age we will get a new value from the
* target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. */
obd->obd_name, &obd->obd_osfs,
obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
- spin_lock(&obd->obd_osfs_lock);
+ cfs_spin_lock(&obd->obd_osfs_lock);
memcpy(oinfo->oi_osfs, &obd->obd_osfs, sizeof(*oinfo->oi_osfs));
- spin_unlock(&obd->obd_osfs_lock);
+ cfs_spin_unlock(&obd->obd_osfs_lock);
oinfo->oi_flags |= OBD_STATFS_FROM_CACHE;
if (oinfo->oi_cb_up)
oinfo->oi_cb_up(oinfo, 0);
if (cfs_time_before_64(obd->obd_osfs_age, max_age)) {
rc = OBP(obd, statfs)(obd, osfs, max_age, flags);
if (rc == 0) {
- spin_lock(&obd->obd_osfs_lock);
+ cfs_spin_lock(&obd->obd_osfs_lock);
memcpy(&obd->obd_osfs, osfs, sizeof(obd->obd_osfs));
obd->obd_osfs_age = cfs_time_current_64();
- spin_unlock(&obd->obd_osfs_lock);
+ cfs_spin_unlock(&obd->obd_osfs_lock);
}
} else {
CDEBUG(D_SUPER,"%s: use %p cache blocks "LPU64"/"LPU64
obd->obd_name, &obd->obd_osfs,
obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
- spin_lock(&obd->obd_osfs_lock);
+ cfs_spin_lock(&obd->obd_osfs_lock);
memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
- spin_unlock(&obd->obd_osfs_lock);
+ cfs_spin_unlock(&obd->obd_osfs_lock);
}
RETURN(rc);
}
#if defined(LPROCFS) && defined(HAVE_QUOTA_SUPPORT)
if (qctxt)
- do_gettimeofday(&work_start);
+ cfs_gettimeofday(&work_start);
#endif
EXP_CHECK_DT_OP(exp, quota_adjust_qunit);
EXP_COUNTER_INCREMENT(exp, quota_adjust_qunit);
#if defined(LPROCFS) && defined(HAVE_QUOTA_SUPPORT)
if (qctxt) {
- do_gettimeofday(&work_end);
+ cfs_gettimeofday(&work_end);
timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
lprocfs_counter_add(qctxt->lqc_stats, LQUOTA_ADJUST_QUNIT,
timediff);
{
ENTRY;
OBD_CHECK_DEV(obd);
- down_write(&obd->obd_observer_link_sem);
+ cfs_down_write(&obd->obd_observer_link_sem);
if (obd->obd_observer && observer) {
- up_write(&obd->obd_observer_link_sem);
+ cfs_up_write(&obd->obd_observer_link_sem);
RETURN(-EALREADY);
}
obd->obd_observer = observer;
- up_write(&obd->obd_observer_link_sem);
+ cfs_up_write(&obd->obd_observer_link_sem);
RETURN(0);
}
struct obd_device **observer)
{
ENTRY;
- down_read(&obd->obd_observer_link_sem);
+ cfs_down_read(&obd->obd_observer_link_sem);
if (!obd->obd_observer) {
*observer = NULL;
- up_read(&obd->obd_observer_link_sem);
+ cfs_up_read(&obd->obd_observer_link_sem);
RETURN(-ENOENT);
}
*observer = obd->obd_observer;
static inline int obd_unpin_observer(struct obd_device *obd)
{
ENTRY;
- up_read(&obd->obd_observer_link_sem);
+ cfs_up_read(&obd->obd_observer_link_sem);
RETURN(0);
}
int aa_resends;
struct brw_page **aa_ppga;
struct client_obd *aa_cli;
- struct list_head aa_oaps;
+ cfs_list_t aa_oaps;
struct obd_capa *aa_ocapa;
struct cl_req *aa_clerq;
};
extern int at_extra;
extern unsigned int obd_sync_filter;
extern unsigned int obd_max_dirty_pages;
-extern atomic_t obd_dirty_pages;
-extern atomic_t obd_dirty_transit_pages;
+extern cfs_atomic_t obd_dirty_pages;
+extern cfs_atomic_t obd_dirty_transit_pages;
extern cfs_waitq_t obd_race_waitq;
extern int obd_race_state;
extern unsigned int obd_alloc_fail_rate;
} else {
CERROR("obd_fail_race id %x waking\n", id);
obd_race_state = 1;
- wake_up(&obd_race_waitq);
+ cfs_waitq_signal(&obd_race_waitq);
}
}
}
#define fixme() CDEBUG(D_OTHER, "FIXME\n");
-extern atomic_t libcfs_kmemory;
+extern cfs_atomic_t libcfs_kmemory;
#ifdef LPROCFS
#define obd_memory_add(size) \
CERROR("vmalloc of '" #ptr "' (%d bytes) failed\n", \
(int)(size)); \
CERROR(LPU64" total bytes allocated by Lustre, %d by LNET\n", \
- obd_memory_sum(), atomic_read(&libcfs_kmemory)); \
+ obd_memory_sum(), cfs_atomic_read(&libcfs_kmemory)); \
} else { \
memset(ptr, 0, size); \
OBD_ALLOC_POST(ptr, size, "vmalloced"); \
})
#define OBD_SLAB_ALLOC(ptr, slab, type, size) \
do { \
- LASSERT(ergo(type != CFS_ALLOC_ATOMIC, !in_interrupt())); \
+ LASSERT(ergo(type != CFS_ALLOC_ATOMIC, !cfs_in_interrupt())); \
(ptr) = cfs_mem_cache_alloc(slab, (type)); \
if (likely((ptr) != NULL && \
(!HAS_FAIL_ALLOC_FLAG || obd_alloc_fail_rate == 0 || \
obd_memory_sum(), \
obd_pages_sum() << CFS_PAGE_SHIFT, \
obd_pages_sum(), \
- atomic_read(&libcfs_kmemory)); \
+ cfs_atomic_read(&libcfs_kmemory)); \
} else { \
obd_pages_add(order); \
CDEBUG(D_MALLOC, "alloc_pages '" #ptr "': %d page(s) / " \
* A mutex serializing calls to slp_inode_fini() under extreme memory
* pressure, when environments cannot be allocated.
*/
-static DEFINE_MUTEX(ccc_inode_fini_guard);
+static CFS_DEFINE_MUTEX(ccc_inode_fini_guard);
static int dummy_refcheck;
int ccc_global_init(struct lu_device_type *device_type)
env = cl_env_get(&refcheck);
emergency = IS_ERR(env);
if (emergency) {
- mutex_lock(&ccc_inode_fini_guard);
+ cfs_mutex_lock(&ccc_inode_fini_guard);
LASSERT(ccc_inode_fini_env != NULL);
cl_env_implant(ccc_inode_fini_env, &refcheck);
env = ccc_inode_fini_env;
lli->lli_clob = NULL;
if (emergency) {
cl_env_unplant(ccc_inode_fini_env, &refcheck);
- mutex_unlock(&ccc_inode_fini_guard);
+ cfs_mutex_unlock(&ccc_inode_fini_guard);
} else
cl_env_put(env, &refcheck);
cl_env_reexit(cookie);
flags = cli->cl_import->imp_connect_data.ocd_connect_flags;
CDEBUG(D_SUPER, "Changing connect_flags: "LPX64" -> "LPX64"\n",
lco->lco_flags, flags);
- mutex_down(&lco->lco_lock);
+ cfs_mutex_down(&lco->lco_lock);
lco->lco_flags &= flags;
/* for each osc event update ea size */
if (lco->lco_dt_exp)
cl_init_ea_size(lco->lco_md_exp, lco->lco_dt_exp);
- mutex_up(&lco->lco_lock);
+ cfs_mutex_up(&lco->lco_lock);
result = 0;
} else {
CERROR("unexpected notification from %s %s!\n",
{
struct ldlm_resource *res = NULL;
- spin_lock(&lock->l_lock);
+ cfs_spin_lock(&lock->l_lock);
res = lock->l_resource;
if (ns_is_server(res->lr_namespace))
/* on server-side resource of lock doesn't change */
- spin_unlock(&lock->l_lock);
+ cfs_spin_unlock(&lock->l_lock);
lock_res(res);
return res;
}
unlock_res(res);
- spin_unlock(&lock->l_lock);
+ cfs_spin_unlock(&lock->l_lock);
}
ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
struct ldlm_extent *new_ex)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct ldlm_resource *res = req->l_resource;
ldlm_mode_t req_mode = req->l_req_mode;
__u64 req_start = req->l_req_extent.start;
lockmode_verify(req_mode);
/* for waiting locks */
- list_for_each(tmp, &res->lr_waiting) {
+ cfs_list_for_each(tmp, &res->lr_waiting) {
struct ldlm_lock *lock;
struct ldlm_extent *l_extent;
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
l_extent = &lock->l_policy_data.l_extent;
/* We already hit the minimum requested size, search no more */
}
struct ldlm_extent_compat_args {
- struct list_head *work_list;
+ cfs_list_t *work_list;
struct ldlm_lock *lock;
ldlm_mode_t mode;
int *locks;
struct ldlm_extent_compat_args *priv = data;
struct ldlm_interval *node = to_ldlm_interval(n);
struct ldlm_extent *extent;
- struct list_head *work_list = priv->work_list;
+ cfs_list_t *work_list = priv->work_list;
struct ldlm_lock *lock, *enq = priv->lock;
ldlm_mode_t mode = priv->mode;
int count = 0;
ENTRY;
- LASSERT(!list_empty(&node->li_group));
+ LASSERT(!cfs_list_empty(&node->li_group));
- list_for_each_entry(lock, &node->li_group, l_sl_policy) {
+ cfs_list_for_each_entry(lock, &node->li_group, l_sl_policy) {
/* interval tree is for granted lock */
LASSERTF(mode == lock->l_granted_mode,
"mode = %s, lock->l_granted_mode = %s\n",
* negative error, such as EWOULDBLOCK for group locks
*/
static int
-ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
+ldlm_extent_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
int *flags, ldlm_error_t *err,
- struct list_head *work_list, int *contended_locks)
+ cfs_list_t *work_list, int *contended_locks)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct ldlm_lock *lock;
struct ldlm_resource *res = req->l_resource;
ldlm_mode_t req_mode = req->l_req_mode;
} else {
interval_search(tree->lit_root, &ex,
ldlm_extent_compat_cb, &data);
- if (!list_empty(work_list) && compat)
+ if (!cfs_list_empty(work_list) && compat)
compat = 0;
}
}
} else { /* for waiting queue */
- list_for_each(tmp, queue) {
+ cfs_list_for_each(tmp, queue) {
check_contention = 1;
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ lock = cfs_list_entry(tmp, struct ldlm_lock,
+ l_res_link);
if (req == lock)
break;
* front of first non-GROUP lock */
ldlm_resource_insert_lock_after(lock, req);
- list_del_init(&lock->l_res_link);
+ cfs_list_del_init(&lock->l_res_link);
ldlm_resource_insert_lock_after(req, lock);
compat = 0;
break;
first non-GROUP lock */
ldlm_resource_insert_lock_after(lock, req);
- list_del_init(&lock->l_res_link);
+ cfs_list_del_init(&lock->l_res_link);
ldlm_resource_insert_lock_after(req, lock);
break;
}
RETURN(compat);
destroylock:
- list_del_init(&req->l_res_link);
+ cfs_list_del_init(&req->l_res_link);
ldlm_lock_destroy_nolock(req);
*err = compat;
RETURN(compat);
}
-static void discard_bl_list(struct list_head *bl_list)
+static void discard_bl_list(cfs_list_t *bl_list)
{
- struct list_head *tmp, *pos;
+ cfs_list_t *tmp, *pos;
ENTRY;
- list_for_each_safe(pos, tmp, bl_list) {
+ cfs_list_for_each_safe(pos, tmp, bl_list) {
struct ldlm_lock *lock =
- list_entry(pos, struct ldlm_lock, l_bl_ast);
+ cfs_list_entry(pos, struct ldlm_lock, l_bl_ast);
- list_del_init(&lock->l_bl_ast);
+ cfs_list_del_init(&lock->l_bl_ast);
LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
lock->l_flags &= ~LDLM_FL_AST_SENT;
LASSERT(lock->l_bl_ast_run == 0);
* - blocking ASTs have not been sent
* - must call this function with the ns lock held once */
int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
- ldlm_error_t *err, struct list_head *work_list)
+ ldlm_error_t *err, cfs_list_t *work_list)
{
struct ldlm_resource *res = lock->l_resource;
CFS_LIST_HEAD(rpc_list);
int contended_locks = 0;
ENTRY;
- LASSERT(list_empty(&res->lr_converting));
+ LASSERT(cfs_list_empty(&res->lr_converting));
LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
!(lock->l_flags & LDLM_AST_DISCARD_DATA));
check_res_locked(res);
* bug 2322: we used to unlink and re-add here, which was a
* terrible folly -- if we goto restart, we could get
* re-ordered! Causes deadlock, because ASTs aren't sent! */
- if (list_empty(&lock->l_res_link))
+ if (cfs_list_empty(&lock->l_res_link))
ldlm_resource_add_lock(res, &res->lr_waiting, lock);
unlock_res(res);
rc = ldlm_run_ast_work(&rpc_list, LDLM_WORK_BL_AST);
}
RETURN(0);
out:
- if (!list_empty(&rpc_list)) {
+ if (!cfs_list_empty(&rpc_list)) {
LASSERT(!(lock->l_flags & LDLM_AST_DISCARD_DATA));
discard_bl_list(&rpc_list);
}
__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
{
struct ldlm_resource *res = lock->l_resource;
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct ldlm_lock *lck;
__u64 kms = 0;
ENTRY;
* calculation of the kms */
lock->l_flags |= LDLM_FL_KMS_IGNORE;
- list_for_each(tmp, &res->lr_granted) {
- lck = list_entry(tmp, struct ldlm_lock, l_res_link);
+ cfs_list_for_each(tmp, &res->lr_granted) {
+ lck = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
if (lck->l_flags & LDLM_FL_KMS_IGNORE)
continue;
void ldlm_interval_free(struct ldlm_interval *node)
{
if (node) {
- LASSERT(list_empty(&node->li_group));
+ LASSERT(cfs_list_empty(&node->li_group));
LASSERT(!interval_is_intree(&node->li_node));
OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
}
LASSERT(l->l_tree_node == NULL);
LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
- list_add_tail(&l->l_sl_policy, &n->li_group);
+ cfs_list_add_tail(&l->l_sl_policy, &n->li_group);
l->l_tree_node = n;
}
if (n == NULL)
return NULL;
- LASSERT(!list_empty(&n->li_group));
+ LASSERT(!cfs_list_empty(&n->li_group));
l->l_tree_node = NULL;
- list_del_init(&l->l_sl_policy);
+ cfs_list_del_init(&l->l_sl_policy);
- return (list_empty(&n->li_group) ? n : NULL);
+ return (cfs_list_empty(&n->li_group) ? n : NULL);
}
static inline int lock_mode_to_index(ldlm_mode_t mode)
/**
* Lock protecting access to ldlm_flock_waitq.
*/
-spinlock_t ldlm_flock_waitq_lock = SPIN_LOCK_UNLOCKED;
+cfs_spinlock_t ldlm_flock_waitq_lock = CFS_SPIN_LOCK_UNLOCKED;
int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
void *data, int flag);
mode, flags);
/* Safe to not lock here, since it should be empty anyway */
- LASSERT(list_empty(&lock->l_flock_waitq));
+ LASSERT(cfs_list_empty(&lock->l_flock_waitq));
- list_del_init(&lock->l_res_link);
+ cfs_list_del_init(&lock->l_res_link);
if (flags == LDLM_FL_WAIT_NOREPROC &&
!(lock->l_flags & LDLM_FL_FAILED)) {
/* client side - set a flag to prevent sending a CANCEL */
pid_t blocking_pid = blocking_lock->l_policy_data.l_flock.pid;
struct ldlm_lock *lock;
- spin_lock(&ldlm_flock_waitq_lock);
+ cfs_spin_lock(&ldlm_flock_waitq_lock);
restart:
- list_for_each_entry(lock, &ldlm_flock_waitq, l_flock_waitq) {
+ cfs_list_for_each_entry(lock, &ldlm_flock_waitq, l_flock_waitq) {
if ((lock->l_policy_data.l_flock.pid != blocking_pid) ||
(lock->l_export != blocking_export))
continue;
blocking_export = (struct obd_export *)(long)
lock->l_policy_data.l_flock.blocking_export;
if (blocking_pid == req_pid && blocking_export == req_export) {
- spin_unlock(&ldlm_flock_waitq_lock);
+ cfs_spin_unlock(&ldlm_flock_waitq_lock);
return 1;
}
goto restart;
}
- spin_unlock(&ldlm_flock_waitq_lock);
+ cfs_spin_unlock(&ldlm_flock_waitq_lock);
return 0;
}
int
ldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq,
- ldlm_error_t *err, struct list_head *work_list)
+ ldlm_error_t *err, cfs_list_t *work_list)
{
struct ldlm_resource *res = req->l_resource;
struct ldlm_namespace *ns = res->lr_namespace;
- struct list_head *tmp;
- struct list_head *ownlocks = NULL;
+ cfs_list_t *tmp;
+ cfs_list_t *ownlocks = NULL;
struct ldlm_lock *lock = NULL;
struct ldlm_lock *new = req;
struct ldlm_lock *new2 = NULL;
if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
/* This loop determines where this processes locks start
* in the resource lr_granted list. */
- list_for_each(tmp, &res->lr_granted) {
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ cfs_list_for_each(tmp, &res->lr_granted) {
+ lock = cfs_list_entry(tmp, struct ldlm_lock,
+ l_res_link);
if (ldlm_same_flock_owner(lock, req)) {
ownlocks = tmp;
break;
/* This loop determines if there are existing locks
* that conflict with the new lock request. */
- list_for_each(tmp, &res->lr_granted) {
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ cfs_list_for_each(tmp, &res->lr_granted) {
+ lock = cfs_list_entry(tmp, struct ldlm_lock,
+ l_res_link);
if (ldlm_same_flock_owner(lock, req)) {
if (!ownlocks)
req->l_policy_data.l_flock.blocking_export =
(long)(void *)lock->l_export;
- LASSERT(list_empty(&req->l_flock_waitq));
- spin_lock(&ldlm_flock_waitq_lock);
- list_add_tail(&req->l_flock_waitq, &ldlm_flock_waitq);
- spin_unlock(&ldlm_flock_waitq_lock);
+ LASSERT(cfs_list_empty(&req->l_flock_waitq));
+ cfs_spin_lock(&ldlm_flock_waitq_lock);
+ cfs_list_add_tail(&req->l_flock_waitq,
+ &ldlm_flock_waitq);
+ cfs_spin_unlock(&ldlm_flock_waitq_lock);
ldlm_resource_add_lock(res, &res->lr_waiting, req);
*flags |= LDLM_FL_BLOCK_GRANTED;
/* In case we had slept on this lock request take it off of the
* deadlock detection waitq. */
- spin_lock(&ldlm_flock_waitq_lock);
- list_del_init(&req->l_flock_waitq);
- spin_unlock(&ldlm_flock_waitq_lock);
+ cfs_spin_lock(&ldlm_flock_waitq_lock);
+ cfs_list_del_init(&req->l_flock_waitq);
+ cfs_spin_unlock(&ldlm_flock_waitq_lock);
/* Scan the locks owned by this process that overlap this request.
* We may have to merge or split existing locks. */
ownlocks = &res->lr_granted;
list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
- lock = list_entry(ownlocks, struct ldlm_lock, l_res_link);
+ lock = cfs_list_entry(ownlocks, struct ldlm_lock, l_res_link);
if (!ldlm_same_flock_owner(lock, new))
break;
if (lock->l_export != NULL) {
new2->l_export = class_export_lock_get(lock->l_export, new2);
if (new2->l_export->exp_lock_hash &&
- hlist_unhashed(&new2->l_exp_hash))
+ cfs_hlist_unhashed(&new2->l_exp_hash))
cfs_hash_add(new2->l_export->exp_lock_hash,
&new2->l_remote_handle,
&new2->l_exp_hash);
/* Add req to the granted queue before calling ldlm_reprocess_all(). */
if (!added) {
- list_del_init(&req->l_res_link);
+ cfs_list_del_init(&req->l_res_link);
/* insert new lock before ownlocks in list. */
ldlm_resource_add_lock(res, ownlocks, req);
}
lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
/* take lock off the deadlock detection waitq. */
- spin_lock(&ldlm_flock_waitq_lock);
- list_del_init(&lock->l_flock_waitq);
- spin_unlock(&ldlm_flock_waitq_lock);
+ cfs_spin_lock(&ldlm_flock_waitq_lock);
+ cfs_list_del_init(&lock->l_flock_waitq);
+ cfs_spin_unlock(&ldlm_flock_waitq_lock);
/* client side - set flag to prevent lock from being put on lru list */
lock->l_flags |= LDLM_FL_CBPENDING;
imp = obd->u.cli.cl_import;
if (NULL != imp) {
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
fwd.fwd_generation = imp->imp_generation;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
}
lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
LDLM_DEBUG(lock, "client-side enqueue granted");
/* take lock off the deadlock detection waitq. */
- spin_lock(&ldlm_flock_waitq_lock);
- list_del_init(&lock->l_flock_waitq);
- spin_unlock(&ldlm_flock_waitq_lock);
+ cfs_spin_lock(&ldlm_flock_waitq_lock);
+ cfs_list_del_init(&lock->l_flock_waitq);
+ cfs_spin_unlock(&ldlm_flock_waitq_lock);
/* ldlm_lock_enqueue() has already placed lock on the granted list. */
- list_del_init(&lock->l_res_link);
+ cfs_list_del_init(&lock->l_res_link);
if (flags & LDLM_FL_TEST_LOCK) {
/* fcntl(F_GETLK) request */
ns = lock->l_resource->lr_namespace;
/* take lock off the deadlock detection waitq. */
- spin_lock(&ldlm_flock_waitq_lock);
- list_del_init(&lock->l_flock_waitq);
- spin_unlock(&ldlm_flock_waitq_lock);
+ cfs_spin_lock(&ldlm_flock_waitq_lock);
+ cfs_list_del_init(&lock->l_flock_waitq);
+ cfs_spin_unlock(&ldlm_flock_waitq_lock);
RETURN(0);
}
/* Determine if the lock is compatible with all locks on the queue. */
static int
-ldlm_inodebits_compat_queue(struct list_head *queue, struct ldlm_lock *req,
- struct list_head *work_list)
+ldlm_inodebits_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
+ cfs_list_t *work_list)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct ldlm_lock *lock;
ldlm_mode_t req_mode = req->l_req_mode;
__u64 req_bits = req->l_policy_data.l_inodebits.bits;
I think. Also such a lock would be compatible
with any other bit lock */
- list_for_each(tmp, queue) {
- struct list_head *mode_tail;
+ cfs_list_for_each(tmp, queue) {
+ cfs_list_t *mode_tail;
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
if (req == lock)
RETURN(compat);
/* last lock in mode group */
LASSERT(lock->l_sl_mode.prev != NULL);
- mode_tail = &list_entry(lock->l_sl_mode.prev,
- struct ldlm_lock,
- l_sl_mode)->l_res_link;
+ mode_tail = &cfs_list_entry(lock->l_sl_mode.prev,
+ struct ldlm_lock,
+ l_sl_mode)->l_res_link;
/* locks are compatible, bits don't matter */
if (lockmode_compat(lock->l_req_mode, req_mode)) {
}
for (;;) {
- struct list_head *head;
+ cfs_list_t *head;
/* last lock in policy group */
- tmp = &list_entry(lock->l_sl_policy.prev,
- struct ldlm_lock,
- l_sl_policy)->l_res_link;
+ tmp = &cfs_list_entry(lock->l_sl_policy.prev,
+ struct ldlm_lock,
+ l_sl_policy)->l_res_link;
/* locks with bits overlapped are conflicting locks */
if (lock->l_policy_data.l_inodebits.bits & req_bits) {
ldlm_add_ast_work_item(lock, req,
work_list);
head = &lock->l_sl_policy;
- list_for_each_entry(lock, head, l_sl_policy)
+ cfs_list_for_each_entry(lock, head, l_sl_policy)
if (lock->l_blocking_ast)
ldlm_add_ast_work_item(lock, req,
work_list);
break;
tmp = tmp->next;
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ lock = cfs_list_entry(tmp, struct ldlm_lock,
+ l_res_link);
} /* loop over policy groups within one mode group */
} /* loop over mode groups within @queue */
* - must call this function with the ns lock held once */
int ldlm_process_inodebits_lock(struct ldlm_lock *lock, int *flags,
int first_enq, ldlm_error_t *err,
- struct list_head *work_list)
+ cfs_list_t *work_list)
{
struct ldlm_resource *res = lock->l_resource;
CFS_LIST_HEAD(rpc_list);
int rc;
ENTRY;
- LASSERT(list_empty(&res->lr_converting));
+ LASSERT(cfs_list_empty(&res->lr_converting));
check_res_locked(res);
if (!first_enq) {
* bug 2322: we used to unlink and re-add here, which was a
* terrible folly -- if we goto restart, we could get
* re-ordered! Causes deadlock, because ASTs aren't sent! */
- if (list_empty(&lock->l_res_link))
+ if (cfs_list_empty(&lock->l_res_link))
ldlm_resource_add_lock(res, &res->lr_waiting, lock);
unlock_res(res);
rc = ldlm_run_ast_work(&rpc_list, LDLM_WORK_BL_AST);
#define MAX_STRING_SIZE 128
-extern atomic_t ldlm_srv_namespace_nr;
-extern atomic_t ldlm_cli_namespace_nr;
-extern struct semaphore ldlm_srv_namespace_lock;
-extern struct list_head ldlm_srv_namespace_list;
-extern struct semaphore ldlm_cli_namespace_lock;
-extern struct list_head ldlm_cli_namespace_list;
-
-static inline atomic_t *ldlm_namespace_nr(ldlm_side_t client)
+extern cfs_atomic_t ldlm_srv_namespace_nr;
+extern cfs_atomic_t ldlm_cli_namespace_nr;
+extern cfs_semaphore_t ldlm_srv_namespace_lock;
+extern cfs_list_t ldlm_srv_namespace_list;
+extern cfs_semaphore_t ldlm_cli_namespace_lock;
+extern cfs_list_t ldlm_cli_namespace_list;
+
+static inline cfs_atomic_t *ldlm_namespace_nr(ldlm_side_t client)
{
return client == LDLM_NAMESPACE_SERVER ?
&ldlm_srv_namespace_nr : &ldlm_cli_namespace_nr;
}
-static inline struct list_head *ldlm_namespace_list(ldlm_side_t client)
+static inline cfs_list_t *ldlm_namespace_list(ldlm_side_t client)
{
return client == LDLM_NAMESPACE_SERVER ?
&ldlm_srv_namespace_list : &ldlm_cli_namespace_list;
}
-static inline struct semaphore *ldlm_namespace_lock(ldlm_side_t client)
+static inline cfs_semaphore_t *ldlm_namespace_lock(ldlm_side_t client)
{
return client == LDLM_NAMESPACE_SERVER ?
&ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock;
int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync,
int flags);
-int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
- int count, int max, int cancel_flags, int flags);
+int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
+ cfs_list_t *cancels, int count, int max,
+ int cancel_flags, int flags);
extern int ldlm_enqueue_min;
int ldlm_get_enq_timeout(struct ldlm_lock *lock);
struct ldlm_cb_set_arg {
struct ptlrpc_request_set *set;
- atomic_t restart;
+ cfs_atomic_t restart;
__u32 type; /* LDLM_BL_CALLBACK or LDLM_CP_CALLBACK */
};
LDLM_WORK_REVOKE_AST
} ldlm_desc_ast_t;
-void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list);
+void ldlm_grant_lock(struct ldlm_lock *lock, cfs_list_t *work_list);
struct ldlm_lock *
ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *,
ldlm_type_t type, ldlm_mode_t,
void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode);
void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, __u32 mode);
void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
- struct list_head *work_list);
-int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue,
- struct list_head *work_list);
-int ldlm_run_ast_work(struct list_head *rpc_list, ldlm_desc_ast_t ast_type);
+ cfs_list_t *work_list);
+int ldlm_reprocess_queue(struct ldlm_resource *res, cfs_list_t *queue,
+ cfs_list_t *work_list);
+int ldlm_run_ast_work(cfs_list_t *rpc_list, ldlm_desc_ast_t ast_type);
int ldlm_lock_remove_from_lru(struct ldlm_lock *lock);
int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock);
void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock);
int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
struct ldlm_lock *lock);
int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
- struct list_head *cancels, int count);
+ cfs_list_t *cancels, int count);
void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
struct ldlm_lock_desc *ld, struct ldlm_lock *lock);
/* ldlm_plain.c */
int ldlm_process_plain_lock(struct ldlm_lock *lock, int *flags, int first_enq,
- ldlm_error_t *err, struct list_head *work_list);
+ ldlm_error_t *err, cfs_list_t *work_list);
/* ldlm_extent.c */
int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
- ldlm_error_t *err, struct list_head *work_list);
+ ldlm_error_t *err, cfs_list_t *work_list);
void ldlm_extent_add_lock(struct ldlm_resource *res, struct ldlm_lock *lock);
void ldlm_extent_unlink_lock(struct ldlm_lock *lock);
/* ldlm_flock.c */
int ldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq,
- ldlm_error_t *err, struct list_head *work_list);
+ ldlm_error_t *err, cfs_list_t *work_list);
/* ldlm_inodebits.c */
int ldlm_process_inodebits_lock(struct ldlm_lock *lock, int *flags,
int first_enq, ldlm_error_t *err,
- struct list_head *work_list);
+ cfs_list_t *work_list);
/* l_lock.c */
void l_check_ns_lock(struct ldlm_namespace *ns);
ldlm_interval_extent(struct ldlm_interval *node)
{
struct ldlm_lock *lock;
- LASSERT(!list_empty(&node->li_group));
+ LASSERT(!cfs_list_empty(&node->li_group));
- lock = list_entry(node->li_group.next, struct ldlm_lock, l_sl_policy);
+ lock = cfs_list_entry(node->li_group.next, struct ldlm_lock,
+ l_sl_policy);
return &lock->l_policy_data.l_extent;
}
struct ldlm_pool *pl = data; \
type tmp; \
\
- spin_lock(&pl->pl_lock); \
+ cfs_spin_lock(&pl->pl_lock); \
tmp = pl->pl_##var; \
- spin_unlock(&pl->pl_lock); \
+ cfs_spin_unlock(&pl->pl_lock); \
\
return lprocfs_rd_uint(page, start, off, count, eof, &tmp); \
} \
return rc; \
} \
\
- spin_lock(&pl->pl_lock); \
+ cfs_spin_lock(&pl->pl_lock); \
pl->pl_##var = tmp; \
- spin_unlock(&pl->pl_lock); \
+ cfs_spin_unlock(&pl->pl_lock); \
\
return rc; \
} \
}
}
- spin_lock(&imp->imp_lock);
- list_for_each_entry(item, &imp->imp_conn_list, oic_item) {
+ cfs_spin_lock(&imp->imp_lock);
+ cfs_list_for_each_entry(item, &imp->imp_conn_list, oic_item) {
if (obd_uuid_equals(uuid, &item->oic_uuid)) {
if (priority) {
- list_del(&item->oic_item);
- list_add(&item->oic_item, &imp->imp_conn_list);
+ cfs_list_del(&item->oic_item);
+ cfs_list_add(&item->oic_item,
+ &imp->imp_conn_list);
item->oic_last_attempt = 0;
}
CDEBUG(D_HA, "imp %p@%s: found existing conn %s%s\n",
imp, imp->imp_obd->obd_name, uuid->uuid,
(priority ? ", moved to head" : ""));
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
GOTO(out_free, rc = 0);
}
}
imp_conn->oic_uuid = *uuid;
imp_conn->oic_last_attempt = 0;
if (priority)
- list_add(&imp_conn->oic_item, &imp->imp_conn_list);
+ cfs_list_add(&imp_conn->oic_item, &imp->imp_conn_list);
else
- list_add_tail(&imp_conn->oic_item, &imp->imp_conn_list);
+ cfs_list_add_tail(&imp_conn->oic_item,
+ &imp->imp_conn_list);
CDEBUG(D_HA, "imp %p@%s: add connection %s at %s\n",
imp, imp->imp_obd->obd_name, uuid->uuid,
(priority ? "head" : "tail"));
} else {
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
GOTO(out_free, rc = -ENOENT);
}
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
RETURN(0);
out_free:
if (imp_conn)
int rc = -ENOENT;
ENTRY;
- spin_lock(&imp->imp_lock);
- if (list_empty(&imp->imp_conn_list)) {
+ cfs_spin_lock(&imp->imp_lock);
+ if (cfs_list_empty(&imp->imp_conn_list)) {
LASSERT(!imp->imp_connection);
GOTO(out, rc);
}
- list_for_each_entry(imp_conn, &imp->imp_conn_list, oic_item) {
+ cfs_list_for_each_entry(imp_conn, &imp->imp_conn_list, oic_item) {
if (!obd_uuid_equals(uuid, &imp_conn->oic_uuid))
continue;
LASSERT(imp_conn->oic_conn);
}
}
- list_del(&imp_conn->oic_item);
+ cfs_list_del(&imp_conn->oic_item);
ptlrpc_connection_put(imp_conn->oic_conn);
OBD_FREE(imp_conn, sizeof(*imp_conn));
CDEBUG(D_HA, "imp %p@%s: remove connection %s\n",
break;
}
out:
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
if (rc == -ENOENT)
CERROR("connection %s not found\n", uuid->uuid);
RETURN(rc);
RETURN(-EINVAL);
}
- init_rwsem(&cli->cl_sem);
- sema_init(&cli->cl_mgc_sem, 1);
+ cfs_init_rwsem(&cli->cl_sem);
+ cfs_sema_init(&cli->cl_mgc_sem, 1);
cli->cl_conn_count = 0;
memcpy(server_uuid.uuid, lustre_cfg_buf(lcfg, 2),
min_t(unsigned int, LUSTRE_CFG_BUFLEN(lcfg, 2),
cli->cl_avail_grant = 0;
/* FIXME: should limit this for the sum of all cl_dirty_max */
cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
- if (cli->cl_dirty_max >> CFS_PAGE_SHIFT > num_physpages / 8)
- cli->cl_dirty_max = num_physpages << (CFS_PAGE_SHIFT - 3);
+ if (cli->cl_dirty_max >> CFS_PAGE_SHIFT > cfs_num_physpages / 8)
+ cli->cl_dirty_max = cfs_num_physpages << (CFS_PAGE_SHIFT - 3);
CFS_INIT_LIST_HEAD(&cli->cl_cache_waiters);
CFS_INIT_LIST_HEAD(&cli->cl_loi_ready_list);
CFS_INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
cli->cl_r_in_flight = 0;
cli->cl_w_in_flight = 0;
- spin_lock_init(&cli->cl_read_rpc_hist.oh_lock);
- spin_lock_init(&cli->cl_write_rpc_hist.oh_lock);
- spin_lock_init(&cli->cl_read_page_hist.oh_lock);
- spin_lock_init(&cli->cl_write_page_hist.oh_lock);
- spin_lock_init(&cli->cl_read_offset_hist.oh_lock);
- spin_lock_init(&cli->cl_write_offset_hist.oh_lock);
+ cfs_spin_lock_init(&cli->cl_read_rpc_hist.oh_lock);
+ cfs_spin_lock_init(&cli->cl_write_rpc_hist.oh_lock);
+ cfs_spin_lock_init(&cli->cl_read_page_hist.oh_lock);
+ cfs_spin_lock_init(&cli->cl_write_page_hist.oh_lock);
+ cfs_spin_lock_init(&cli->cl_read_offset_hist.oh_lock);
+ cfs_spin_lock_init(&cli->cl_write_offset_hist.oh_lock);
cfs_waitq_init(&cli->cl_destroy_waitq);
- atomic_set(&cli->cl_destroy_in_flight, 0);
+ cfs_atomic_set(&cli->cl_destroy_in_flight, 0);
#ifdef ENABLE_CHECKSUM
/* Turn on checksumming by default. */
cli->cl_checksum = 1;
*/
cli->cl_cksum_type = cli->cl_supp_cksum_types = OBD_CKSUM_CRC32;
#endif
- atomic_set(&cli->cl_resends, OSC_DEFAULT_RESENDS);
- atomic_set(&cli->cl_quota_resends, CLIENT_QUOTA_DEFAULT_RESENDS);
+ cfs_atomic_set(&cli->cl_resends, OSC_DEFAULT_RESENDS);
+ cfs_atomic_set(&cli->cl_quota_resends, CLIENT_QUOTA_DEFAULT_RESENDS);
/* This value may be changed at connect time in
ptlrpc_connect_interpret. */
if (!strcmp(name, LUSTRE_MDC_NAME)) {
cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
- } else if (num_physpages >> (20 - CFS_PAGE_SHIFT) <= 128 /* MB */) {
+ } else if (cfs_num_physpages >> (20 - CFS_PAGE_SHIFT) <= 128 /* MB */) {
cli->cl_max_rpcs_in_flight = 2;
- } else if (num_physpages >> (20 - CFS_PAGE_SHIFT) <= 256 /* MB */) {
+ } else if (cfs_num_physpages >> (20 - CFS_PAGE_SHIFT) <= 256 /* MB */) {
cli->cl_max_rpcs_in_flight = 3;
- } else if (num_physpages >> (20 - CFS_PAGE_SHIFT) <= 512 /* MB */) {
+ } else if (cfs_num_physpages >> (20 - CFS_PAGE_SHIFT) <= 512 /* MB */) {
cli->cl_max_rpcs_in_flight = 4;
} else {
cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT;
CDEBUG(D_HA, "marking %s %s->%s as inactive\n",
name, obddev->obd_name,
cli->cl_target_uuid.uuid);
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_deactive = 1;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
}
}
ENTRY;
*exp = NULL;
- down_write(&cli->cl_sem);
+ cfs_down_write(&cli->cl_sem);
if (cli->cl_conn_count > 0 )
GOTO(out_sem, rc = -EALREADY);
*exp = NULL;
}
out_sem:
- up_write(&cli->cl_sem);
+ cfs_up_write(&cli->cl_sem);
return rc;
}
cli = &obd->u.cli;
imp = cli->cl_import;
- down_write(&cli->cl_sem);
+ cfs_down_write(&cli->cl_sem);
CDEBUG(D_INFO, "disconnect %s - %d\n", obd->obd_name,
cli->cl_conn_count);
/* Mark import deactivated now, so we don't try to reconnect if any
* of the cleanup RPCs fails (e.g. ldlm cancel, etc). We don't
* fully deactivate the import, or that would drop all requests. */
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_deactive = 1;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
/* Some non-replayable imports (MDS's OSCs) are pinged, so just
* delete it regardless. (It's safe to delete an import that was
* there's no need to hold sem during disconnecting an import,
* and actually it may cause deadlock in gss.
*/
- up_write(&cli->cl_sem);
+ cfs_up_write(&cli->cl_sem);
rc = ptlrpc_disconnect_import(imp, 0);
- down_write(&cli->cl_sem);
+ cfs_down_write(&cli->cl_sem);
ptlrpc_invalidate_import(imp);
if (!rc && err)
rc = err;
- up_write(&cli->cl_sem);
+ cfs_up_write(&cli->cl_sem);
RETURN(rc);
}
ldlm_cancel_locks_for_export(exp);
/* complete all outstanding replies */
- spin_lock(&exp->exp_lock);
- while (!list_empty(&exp->exp_outstanding_replies)) {
+ cfs_spin_lock(&exp->exp_lock);
+ while (!cfs_list_empty(&exp->exp_outstanding_replies)) {
struct ptlrpc_reply_state *rs =
- list_entry(exp->exp_outstanding_replies.next,
- struct ptlrpc_reply_state, rs_exp_list);
+ cfs_list_entry(exp->exp_outstanding_replies.next,
+ struct ptlrpc_reply_state, rs_exp_list);
struct ptlrpc_service *svc = rs->rs_service;
- spin_lock(&svc->srv_lock);
- list_del_init(&rs->rs_exp_list);
- spin_lock(&rs->rs_lock);
+ cfs_spin_lock(&svc->srv_lock);
+ cfs_list_del_init(&rs->rs_exp_list);
+ cfs_spin_lock(&rs->rs_lock);
ptlrpc_schedule_difficult_reply(rs);
- spin_unlock(&rs->rs_lock);
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&rs->rs_lock);
+ cfs_spin_unlock(&svc->srv_lock);
}
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
/* release nid stat refererence */
lprocfs_exp_cleanup(exp);
CDEBUG(D_RPCTRACE, "%s: committing for initial connect of %s\n",
obd->obd_name, exp->exp_client_uuid.uuid);
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_need_sync = 0;
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
class_export_cb_put(exp);
}
EXPORT_SYMBOL(target_client_add_cb);
class_export_put(export);
export = NULL;
} else {
- spin_lock(&export->exp_lock);
+ cfs_spin_lock(&export->exp_lock);
export->exp_connecting = 1;
- spin_unlock(&export->exp_lock);
+ cfs_spin_unlock(&export->exp_lock);
class_export_put(export);
LASSERT(export->exp_obd == target);
no_export:
OBD_FAIL_TIMEOUT(OBD_FAIL_TGT_DELAY_CONNECT, 2 * obd_timeout);
} else if (req->rq_export == NULL &&
- atomic_read(&export->exp_rpc_count) > 0) {
+ cfs_atomic_read(&export->exp_rpc_count) > 0) {
CWARN("%s: refuse connection from %s/%s to 0x%p/%d\n",
target->obd_name, cluuid.uuid,
libcfs_nid2str(req->rq_peer.nid),
- export, atomic_read(&export->exp_refcount));
+ export, cfs_atomic_read(&export->exp_refcount));
GOTO(out, rc = -EBUSY);
} else if (req->rq_export != NULL &&
- (atomic_read(&export->exp_rpc_count) > 1)) {
+ (cfs_atomic_read(&export->exp_rpc_count) > 1)) {
/* the current connect rpc has increased exp_rpc_count */
CWARN("%s: refuse reconnection from %s@%s to 0x%p/%d\n",
target->obd_name, cluuid.uuid,
libcfs_nid2str(req->rq_peer.nid),
- export, atomic_read(&export->exp_rpc_count) - 1);
- spin_lock(&export->exp_lock);
+ export, cfs_atomic_read(&export->exp_rpc_count) - 1);
+ cfs_spin_lock(&export->exp_lock);
if (req->rq_export->exp_conn_cnt <
lustre_msg_get_conn_cnt(req->rq_reqmsg))
/* try to abort active requests */
req->rq_export->exp_abort_active_req = 1;
- spin_unlock(&export->exp_lock);
+ cfs_spin_unlock(&export->exp_lock);
GOTO(out, rc = -EBUSY);
} else if (lustre_msg_get_conn_cnt(req->rq_reqmsg) == 1) {
CERROR("%s: NID %s (%s) reconnected with 1 conn_cnt; "
"%d clients in recovery for "CFS_TIME_T"s\n",
target->obd_name,
libcfs_nid2str(req->rq_peer.nid), cluuid.uuid,
- atomic_read(&target->obd_lock_replay_clients),
+ cfs_atomic_read(&target-> \
+ obd_lock_replay_clients),
cfs_duration_sec(t));
rc = -EBUSY;
} else {
req->rq_export = export;
- spin_lock(&export->exp_lock);
+ cfs_spin_lock(&export->exp_lock);
if (export->exp_conn_cnt >= lustre_msg_get_conn_cnt(req->rq_reqmsg)) {
- spin_unlock(&export->exp_lock);
+ cfs_spin_unlock(&export->exp_lock);
CERROR("%s: %s already connected at higher conn_cnt: %d > %d\n",
cluuid.uuid, libcfs_nid2str(req->rq_peer.nid),
export->exp_conn_cnt,
/* request from liblustre? Don't evict it for not pinging. */
if (lustre_msg_get_op_flags(req->rq_reqmsg) & MSG_CONNECT_LIBCLIENT) {
export->exp_libclient = 1;
- spin_unlock(&export->exp_lock);
+ cfs_spin_unlock(&export->exp_lock);
- spin_lock(&target->obd_dev_lock);
- list_del_init(&export->exp_obd_chain_timed);
- spin_unlock(&target->obd_dev_lock);
+ cfs_spin_lock(&target->obd_dev_lock);
+ cfs_list_del_init(&export->exp_obd_chain_timed);
+ cfs_spin_unlock(&target->obd_dev_lock);
} else {
- spin_unlock(&export->exp_lock);
+ cfs_spin_unlock(&export->exp_lock);
}
if (export->exp_connection != NULL) {
/* Check to see if connection came from another NID */
if ((export->exp_connection->c_peer.nid != req->rq_peer.nid) &&
- !hlist_unhashed(&export->exp_nid_hash))
+ !cfs_hlist_unhashed(&export->exp_nid_hash))
cfs_hash_del(export->exp_obd->obd_nid_hash,
&export->exp_connection->c_peer.nid,
&export->exp_nid_hash);
export->exp_connection = ptlrpc_connection_get(req->rq_peer,
req->rq_self,
&remote_uuid);
- if (hlist_unhashed(&export->exp_nid_hash)) {
+ if (cfs_hlist_unhashed(&export->exp_nid_hash)) {
cfs_hash_add_unique(export->exp_obd->obd_nid_hash,
&export->exp_connection->c_peer.nid,
&export->exp_nid_hash);
}
- spin_lock_bh(&target->obd_processing_task_lock);
+ cfs_spin_lock_bh(&target->obd_processing_task_lock);
if (target->obd_recovering && !export->exp_in_recovery) {
- spin_lock(&export->exp_lock);
+ cfs_spin_lock(&export->exp_lock);
export->exp_in_recovery = 1;
export->exp_req_replay_needed = 1;
export->exp_lock_replay_needed = 1;
- spin_unlock(&export->exp_lock);
+ cfs_spin_unlock(&export->exp_lock);
if ((lustre_msg_get_op_flags(req->rq_reqmsg) & MSG_CONNECT_TRANSNO)
&& (data->ocd_transno == 0))
CWARN("Connect with zero transno!\n");
&& data->ocd_transno < target->obd_next_recovery_transno)
target->obd_next_recovery_transno = data->ocd_transno;
target->obd_connected_clients++;
- atomic_inc(&target->obd_req_replay_clients);
- atomic_inc(&target->obd_lock_replay_clients);
+ cfs_atomic_inc(&target->obd_req_replay_clients);
+ cfs_atomic_inc(&target->obd_lock_replay_clients);
if (target->obd_connected_clients ==
target->obd_max_recoverable_clients)
cfs_waitq_signal(&target->obd_next_transno_waitq);
}
- spin_unlock_bh(&target->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&target->obd_processing_task_lock);
tmp = req_capsule_client_get(&req->rq_pill, &RMF_CONN);
conn = *tmp;
class_import_put(revimp);
out:
if (export) {
- spin_lock(&export->exp_lock);
+ cfs_spin_lock(&export->exp_lock);
export->exp_connecting = 0;
- spin_unlock(&export->exp_lock);
+ cfs_spin_unlock(&export->exp_lock);
}
if (targref)
class_decref(targref, __FUNCTION__, cfs_current());
if (exp->exp_imp_reverse != NULL)
client_destroy_import(exp->exp_imp_reverse);
- LASSERT(atomic_read(&exp->exp_locks_count) == 0);
- LASSERT(atomic_read(&exp->exp_rpc_count) == 0);
- LASSERT(atomic_read(&exp->exp_cb_count) == 0);
- LASSERT(atomic_read(&exp->exp_replay_count) == 0);
+ LASSERT(cfs_atomic_read(&exp->exp_locks_count) == 0);
+ LASSERT(cfs_atomic_read(&exp->exp_rpc_count) == 0);
+ LASSERT(cfs_atomic_read(&exp->exp_cb_count) == 0);
+ LASSERT(cfs_atomic_read(&exp->exp_replay_count) == 0);
}
/*
static void target_request_copy_get(struct ptlrpc_request *req)
{
class_export_rpc_get(req->rq_export);
- LASSERT(list_empty(&req->rq_list));
+ LASSERT(cfs_list_empty(&req->rq_list));
CFS_INIT_LIST_HEAD(&req->rq_replay_list);
/* increase refcount to keep request in queue */
- LASSERT(atomic_read(&req->rq_refcount));
- atomic_inc(&req->rq_refcount);
+ LASSERT(cfs_atomic_read(&req->rq_refcount));
+ cfs_atomic_inc(&req->rq_refcount);
/** let export know it has replays to be handled */
- atomic_inc(&req->rq_export->exp_replay_count);
+ cfs_atomic_inc(&req->rq_export->exp_replay_count);
}
static void target_request_copy_put(struct ptlrpc_request *req)
{
- LASSERT(list_empty(&req->rq_replay_list));
- LASSERT(atomic_read(&req->rq_export->exp_replay_count) > 0);
- atomic_dec(&req->rq_export->exp_replay_count);
+ LASSERT(cfs_list_empty(&req->rq_replay_list));
+ LASSERT(cfs_atomic_read(&req->rq_export->exp_replay_count) > 0);
+ cfs_atomic_dec(&req->rq_export->exp_replay_count);
class_export_rpc_put(req->rq_export);
ptlrpc_server_drop_request(req);
}
LASSERT(exp);
- spin_lock(&exp->exp_lock);
- list_for_each_entry(reqiter, &exp->exp_req_replay_queue,
- rq_replay_list) {
+ cfs_spin_lock(&exp->exp_lock);
+ cfs_list_for_each_entry(reqiter, &exp->exp_req_replay_queue,
+ rq_replay_list) {
if (lustre_msg_get_transno(reqiter->rq_reqmsg) == transno) {
dup = 1;
break;
CERROR("invalid flags %x of resent replay\n",
lustre_msg_get_flags(req->rq_reqmsg));
} else {
- list_add_tail(&req->rq_replay_list, &exp->exp_req_replay_queue);
+ cfs_list_add_tail(&req->rq_replay_list,
+ &exp->exp_req_replay_queue);
}
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
return dup;
}
static void target_exp_dequeue_req_replay(struct ptlrpc_request *req)
{
- LASSERT(!list_empty(&req->rq_replay_list));
+ LASSERT(!cfs_list_empty(&req->rq_replay_list));
LASSERT(req->rq_export);
- spin_lock(&req->rq_export->exp_lock);
- list_del_init(&req->rq_replay_list);
- spin_unlock(&req->rq_export->exp_lock);
+ cfs_spin_lock(&req->rq_export->exp_lock);
+ cfs_list_del_init(&req->rq_replay_list);
+ cfs_spin_unlock(&req->rq_export->exp_lock);
}
#ifdef __KERNEL__
obd->obd_name);
ldlm_reprocess_all_ns(obd->obd_namespace);
- spin_lock_bh(&obd->obd_processing_task_lock);
- if (!list_empty(&obd->obd_req_replay_queue) ||
- !list_empty(&obd->obd_lock_replay_queue) ||
- !list_empty(&obd->obd_final_req_queue)) {
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
+ if (!cfs_list_empty(&obd->obd_req_replay_queue) ||
+ !cfs_list_empty(&obd->obd_lock_replay_queue) ||
+ !cfs_list_empty(&obd->obd_final_req_queue)) {
CERROR("%s: Recovery queues ( %s%s%s) are not empty\n",
obd->obd_name,
- list_empty(&obd->obd_req_replay_queue) ? "" : "req ",
- list_empty(&obd->obd_lock_replay_queue) ? "" : "lock ",
- list_empty(&obd->obd_final_req_queue) ? "" : "final ");
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_list_empty(&obd->obd_req_replay_queue) ? "" : "req ",
+ cfs_list_empty(&obd->obd_lock_replay_queue) ? \
+ "" : "lock ",
+ cfs_list_empty(&obd->obd_final_req_queue) ? \
+ "" : "final ");
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
LBUG();
}
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
obd->obd_recovery_end = cfs_time_current_sec();
static void abort_req_replay_queue(struct obd_device *obd)
{
struct ptlrpc_request *req, *n;
- struct list_head abort_list;
+ cfs_list_t abort_list;
CFS_INIT_LIST_HEAD(&abort_list);
- spin_lock_bh(&obd->obd_processing_task_lock);
- list_splice_init(&obd->obd_req_replay_queue, &abort_list);
- spin_unlock_bh(&obd->obd_processing_task_lock);
- list_for_each_entry_safe(req, n, &abort_list, rq_list) {
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_list_splice_init(&obd->obd_req_replay_queue, &abort_list);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_list_for_each_entry_safe(req, n, &abort_list, rq_list) {
DEBUG_REQ(D_WARNING, req, "aborted:");
req->rq_status = -ENOTCONN;
if (ptlrpc_error(req)) {
static void abort_lock_replay_queue(struct obd_device *obd)
{
struct ptlrpc_request *req, *n;
- struct list_head abort_list;
+ cfs_list_t abort_list;
CFS_INIT_LIST_HEAD(&abort_list);
- spin_lock_bh(&obd->obd_processing_task_lock);
- list_splice_init(&obd->obd_lock_replay_queue, &abort_list);
- spin_unlock_bh(&obd->obd_processing_task_lock);
- list_for_each_entry_safe(req, n, &abort_list, rq_list){
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_list_splice_init(&obd->obd_lock_replay_queue, &abort_list);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_list_for_each_entry_safe(req, n, &abort_list, rq_list){
DEBUG_REQ(D_ERROR, req, "aborted:");
req->rq_status = -ENOTCONN;
if (ptlrpc_error(req)) {
void target_cleanup_recovery(struct obd_device *obd)
{
struct ptlrpc_request *req, *n;
- struct list_head clean_list;
+ cfs_list_t clean_list;
ENTRY;
CFS_INIT_LIST_HEAD(&clean_list);
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
if (!obd->obd_recovering) {
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
EXIT;
return;
}
obd->obd_recovering = obd->obd_abort_recovery = 0;
target_cancel_recovery_timer(obd);
- list_splice_init(&obd->obd_req_replay_queue, &clean_list);
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_list_splice_init(&obd->obd_req_replay_queue, &clean_list);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
- list_for_each_entry_safe(req, n, &clean_list, rq_list) {
+ cfs_list_for_each_entry_safe(req, n, &clean_list, rq_list) {
LASSERT(req->rq_reply_state == 0);
target_exp_dequeue_req_replay(req);
target_request_copy_put(req);
}
- spin_lock_bh(&obd->obd_processing_task_lock);
- list_splice_init(&obd->obd_lock_replay_queue, &clean_list);
- list_splice_init(&obd->obd_final_req_queue, &clean_list);
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_list_splice_init(&obd->obd_lock_replay_queue, &clean_list);
+ cfs_list_splice_init(&obd->obd_final_req_queue, &clean_list);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
- list_for_each_entry_safe(req, n, &clean_list, rq_list){
+ cfs_list_for_each_entry_safe(req, n, &clean_list, rq_list){
LASSERT(req->rq_reply_state == 0);
target_request_copy_put(req);
}
cfs_time_t now = cfs_time_current_sec();
cfs_duration_t left;
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
if (!obd->obd_recovering || obd->obd_abort_recovery) {
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
return;
}
left = cfs_time_sub(obd->obd_recovery_end, now);
cfs_timer_arm(&obd->obd_recovery_timer, cfs_time_shift(left));
}
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
CDEBUG(D_HA, "%s: recovery timer will expire in %u seconds\n",
obd->obd_name, (unsigned)left);
}
static void check_and_start_recovery_timer(struct obd_device *obd)
{
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
if (cfs_timer_is_armed(&obd->obd_recovery_timer)) {
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
return;
}
CDEBUG(D_HA, "%s: starting recovery timer\n", obd->obd_name);
obd->obd_recovery_start = cfs_time_current_sec();
/* minimum */
obd->obd_recovery_timeout = OBD_RECOVERY_FACTOR * obd_timeout;
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
reset_recovery_timer(obd, obd->obd_recovery_timeout, 0);
}
static inline int exp_req_replay_healthy(struct obd_export *exp)
{
return (!exp->exp_req_replay_needed ||
- atomic_read(&exp->exp_replay_count) > 0);
+ cfs_atomic_read(&exp->exp_replay_count) > 0);
}
/** if export done lock_replay or has replay in queue */
static inline int exp_lock_replay_healthy(struct obd_export *exp)
{
return (!exp->exp_lock_replay_needed ||
- atomic_read(&exp->exp_replay_count) > 0);
+ cfs_atomic_read(&exp->exp_replay_count) > 0);
}
static inline int exp_vbr_healthy(struct obd_export *exp)
int wake_up = 0, connected, completed, queue_len;
__u64 next_transno, req_transno;
ENTRY;
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
- if (!list_empty(&obd->obd_req_replay_queue)) {
- req = list_entry(obd->obd_req_replay_queue.next,
- struct ptlrpc_request, rq_list);
+ if (!cfs_list_empty(&obd->obd_req_replay_queue)) {
+ req = cfs_list_entry(obd->obd_req_replay_queue.next,
+ struct ptlrpc_request, rq_list);
req_transno = lustre_msg_get_transno(req->rq_reqmsg);
} else {
req_transno = 0;
}
connected = obd->obd_connected_clients;
- completed = connected - atomic_read(&obd->obd_req_replay_clients);
+ completed = connected - cfs_atomic_read(&obd->obd_req_replay_clients);
queue_len = obd->obd_requests_queued_for_recovery;
next_transno = obd->obd_next_recovery_transno;
} else if (obd->obd_recovery_expired) {
CDEBUG(D_HA, "waking for expired recovery\n");
wake_up = 1;
- } else if (atomic_read(&obd->obd_req_replay_clients) == 0) {
+ } else if (cfs_atomic_read(&obd->obd_req_replay_clients) == 0) {
CDEBUG(D_HA, "waking for completed recovery\n");
wake_up = 1;
} else if (req_transno == next_transno) {
CDEBUG(D_HA, "waking for next ("LPD64")\n", next_transno);
wake_up = 1;
- } else if (queue_len == atomic_read(&obd->obd_req_replay_clients)) {
+ } else if (queue_len == cfs_atomic_read(&obd->obd_req_replay_clients)) {
int d_lvl = D_HA;
/** handle gaps occured due to lost reply or VBR */
LASSERTF(req_transno >= next_transno,
obd->obd_next_recovery_transno = req_transno;
wake_up = 1;
}
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
return wake_up;
}
{
int wake_up = 0;
- spin_lock_bh(&obd->obd_processing_task_lock);
- if (!list_empty(&obd->obd_lock_replay_queue)) {
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
+ if (!cfs_list_empty(&obd->obd_lock_replay_queue)) {
CDEBUG(D_HA, "waking for next lock\n");
wake_up = 1;
- } else if (atomic_read(&obd->obd_lock_replay_clients) == 0) {
+ } else if (cfs_atomic_read(&obd->obd_lock_replay_clients) == 0) {
CDEBUG(D_HA, "waking for completed lock replay\n");
wake_up = 1;
} else if (obd->obd_abort_recovery) {
CDEBUG(D_HA, "waking for expired recovery\n");
wake_up = 1;
}
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
return wake_up;
}
do {
cfs_wait_event(obd->obd_next_transno_waitq, check_routine(obd));
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
abort = obd->obd_abort_recovery;
expired = obd->obd_recovery_expired;
obd->obd_recovery_expired = 0;
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
if (abort) {
CWARN("recovery is aborted, evict exports in recovery\n");
/** evict exports which didn't finish recovery yet */
/** evict cexports with no replay in queue, they are stalled */
class_disconnect_stale_exports(obd, health_check);
/** continue with VBR */
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
obd->obd_version_recov = 1;
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
/**
* reset timer, recovery will proceed with versions now,
* timeout is set just to handle reconnection delays
abort_lock_replay_queue(obd);
}
- spin_lock_bh(&obd->obd_processing_task_lock);
- if (!list_empty(&obd->obd_req_replay_queue)) {
- req = list_entry(obd->obd_req_replay_queue.next,
- struct ptlrpc_request, rq_list);
- list_del_init(&req->rq_list);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
+ if (!cfs_list_empty(&obd->obd_req_replay_queue)) {
+ req = cfs_list_entry(obd->obd_req_replay_queue.next,
+ struct ptlrpc_request, rq_list);
+ cfs_list_del_init(&req->rq_list);
obd->obd_requests_queued_for_recovery--;
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
} else {
- spin_unlock_bh(&obd->obd_processing_task_lock);
- LASSERT(list_empty(&obd->obd_req_replay_queue));
- LASSERT(atomic_read(&obd->obd_req_replay_clients) == 0);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
+ LASSERT(cfs_list_empty(&obd->obd_req_replay_queue));
+ LASSERT(cfs_atomic_read(&obd->obd_req_replay_clients) == 0);
/** evict exports failed VBR */
class_disconnect_stale_exports(obd, exp_vbr_healthy);
}
exp_lock_replay_healthy))
abort_lock_replay_queue(obd);
- spin_lock_bh(&obd->obd_processing_task_lock);
- if (!list_empty(&obd->obd_lock_replay_queue)) {
- req = list_entry(obd->obd_lock_replay_queue.next,
- struct ptlrpc_request, rq_list);
- list_del_init(&req->rq_list);
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
+ if (!cfs_list_empty(&obd->obd_lock_replay_queue)) {
+ req = cfs_list_entry(obd->obd_lock_replay_queue.next,
+ struct ptlrpc_request, rq_list);
+ cfs_list_del_init(&req->rq_list);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
} else {
- spin_unlock_bh(&obd->obd_processing_task_lock);
- LASSERT(list_empty(&obd->obd_lock_replay_queue));
- LASSERT(atomic_read(&obd->obd_lock_replay_clients) == 0);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
+ LASSERT(cfs_list_empty(&obd->obd_lock_replay_queue));
+ LASSERT(cfs_atomic_read(&obd->obd_lock_replay_clients) == 0);
/** evict exports failed VBR */
class_disconnect_stale_exports(obd, exp_vbr_healthy);
}
{
struct ptlrpc_request *req = NULL;
- spin_lock_bh(&obd->obd_processing_task_lock);
- if (!list_empty(&obd->obd_final_req_queue)) {
- req = list_entry(obd->obd_final_req_queue.next,
- struct ptlrpc_request, rq_list);
- list_del_init(&req->rq_list);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
+ if (!cfs_list_empty(&obd->obd_final_req_queue)) {
+ req = cfs_list_entry(obd->obd_final_req_queue.next,
+ struct ptlrpc_request, rq_list);
+ cfs_list_del_init(&req->rq_list);
if (req->rq_export->exp_in_recovery) {
- spin_lock(&req->rq_export->exp_lock);
+ cfs_spin_lock(&req->rq_export->exp_lock);
req->rq_export->exp_in_recovery = 0;
- spin_unlock(&req->rq_export->exp_lock);
+ cfs_spin_unlock(&req->rq_export->exp_lock);
}
}
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
return req;
}
trd->trd_processing_task = cfs_curproc_pid();
obd->obd_recovering = 1;
- complete(&trd->trd_starting);
+ cfs_complete(&trd->trd_starting);
/* first of all, we have to know the first transno to replay */
if (target_recovery_overseer(obd, check_for_clients,
delta = jiffies;
obd->obd_req_replaying = 1;
CDEBUG(D_INFO, "1: request replay stage - %d clients from t"LPU64"\n",
- atomic_read(&obd->obd_req_replay_clients),
+ cfs_atomic_read(&obd->obd_req_replay_clients),
obd->obd_next_recovery_transno);
while ((req = target_next_replay_req(obd))) {
LASSERT(trd->trd_processing_task == cfs_curproc_pid());
* bz18031: increase next_recovery_transno before
* target_request_copy_put() will drop exp_rpc reference
*/
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
obd->obd_next_recovery_transno++;
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
target_exp_dequeue_req_replay(req);
target_request_copy_put(req);
obd->obd_replayed_requests++;
* The second stage: replay locks
*/
CDEBUG(D_INFO, "2: lock replay stage - %d clients\n",
- atomic_read(&obd->obd_lock_replay_clients));
+ cfs_atomic_read(&obd->obd_lock_replay_clients));
while ((req = target_next_replay_lock(obd))) {
LASSERT(trd->trd_processing_task == cfs_curproc_pid());
DEBUG_REQ(D_HA, req, "processing lock from %s: ",
lut_boot_epoch_update(lut);
/* We drop recoverying flag to forward all new requests
* to regular mds_handle() since now */
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
obd->obd_recovering = obd->obd_abort_recovery = 0;
target_cancel_recovery_timer(obd);
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
while ((req = target_next_final_ping(obd))) {
LASSERT(trd->trd_processing_task == cfs_curproc_pid());
DEBUG_REQ(D_HA, req, "processing final ping from %s: ",
target_request_copy_put(req);
}
- delta = (jiffies - delta) / HZ;
+ delta = (jiffies - delta) / CFS_HZ;
CDEBUG(D_INFO,"4: recovery completed in %lus - %d/%d reqs/locks\n",
delta, obd->obd_replayed_requests, obd->obd_replayed_locks);
if (delta > obd_timeout * OBD_RECOVERY_FACTOR) {
lu_context_fini(&env.le_ctx);
trd->trd_processing_task = 0;
- complete(&trd->trd_finishing);
+ cfs_complete(&trd->trd_finishing);
RETURN(rc);
}
struct target_recovery_data *trd = &obd->obd_recovery_data;
memset(trd, 0, sizeof(*trd));
- init_completion(&trd->trd_starting);
- init_completion(&trd->trd_finishing);
+ cfs_init_completion(&trd->trd_starting);
+ cfs_init_completion(&trd->trd_finishing);
trd->trd_recovery_handler = handler;
- if (kernel_thread(target_recovery_thread, lut, 0) > 0) {
- wait_for_completion(&trd->trd_starting);
+ if (cfs_kernel_thread(target_recovery_thread, lut, 0) > 0) {
+ cfs_wait_for_completion(&trd->trd_starting);
LASSERT(obd->obd_recovering != 0);
} else
rc = -ECHILD;
void target_stop_recovery_thread(struct obd_device *obd)
{
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
if (obd->obd_recovery_data.trd_processing_task > 0) {
struct target_recovery_data *trd = &obd->obd_recovery_data;
/** recovery can be done but postrecovery is not yet */
obd->obd_abort_recovery = 1;
cfs_waitq_signal(&obd->obd_next_transno_waitq);
}
- spin_unlock_bh(&obd->obd_processing_task_lock);
- wait_for_completion(&trd->trd_finishing);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_wait_for_completion(&trd->trd_finishing);
} else {
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
}
}
struct obd_device *obd = (struct obd_device *)castmeharder;
CDEBUG(D_HA, "%s: recovery timed out; %d clients are still in recovery"
" after %lds (%d clients connected)\n",
- obd->obd_name, atomic_read(&obd->obd_lock_replay_clients),
+ obd->obd_name, cfs_atomic_read(&obd->obd_lock_replay_clients),
cfs_time_current_sec()- obd->obd_recovery_start,
obd->obd_connected_clients);
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
obd->obd_recovery_expired = 1;
cfs_waitq_signal(&obd->obd_next_transno_waitq);
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
}
void target_recovery_init(struct lu_target *lut, svc_handler_t handler)
LASSERT(exp != NULL);
if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REQ_REPLAY_DONE) {
/* client declares he's ready to replay locks */
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
if (exp->exp_req_replay_needed) {
- LASSERT(atomic_read(&obd->obd_req_replay_clients) > 0);
- spin_lock(&exp->exp_lock);
+ LASSERT(cfs_atomic_read(&obd->obd_req_replay_clients) >
+ 0);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_req_replay_needed = 0;
- spin_unlock(&exp->exp_lock);
- atomic_dec(&obd->obd_req_replay_clients);
+ cfs_spin_unlock(&exp->exp_lock);
+ cfs_atomic_dec(&obd->obd_req_replay_clients);
}
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
}
if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LOCK_REPLAY_DONE) {
/* client declares he's ready to complete recovery
* so, we put the request on th final queue */
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
if (exp->exp_lock_replay_needed) {
- LASSERT(atomic_read(&obd->obd_lock_replay_clients) > 0);
- spin_lock(&exp->exp_lock);
+ LASSERT(cfs_atomic_read(&obd->obd_lock_replay_clients) >
+ 0);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_lock_replay_needed = 0;
- spin_unlock(&exp->exp_lock);
- atomic_dec(&obd->obd_lock_replay_clients);
+ cfs_spin_unlock(&exp->exp_lock);
+ cfs_atomic_dec(&obd->obd_lock_replay_clients);
}
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
}
return 0;
int target_queue_recovery_request(struct ptlrpc_request *req,
struct obd_device *obd)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
int inserted = 0;
__u64 transno = lustre_msg_get_transno(req->rq_reqmsg);
ENTRY;
* so, we put the request on th final queue */
target_request_copy_get(req);
DEBUG_REQ(D_HA, req, "queue final req");
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
cfs_waitq_signal(&obd->obd_next_transno_waitq);
if (obd->obd_recovering) {
- list_add_tail(&req->rq_list, &obd->obd_final_req_queue);
+ cfs_list_add_tail(&req->rq_list,
+ &obd->obd_final_req_queue);
} else {
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
target_request_copy_put(req);
RETURN(obd->obd_stopping ? -ENOTCONN : 1);
}
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
RETURN(0);
}
if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REQ_REPLAY_DONE) {
/* client declares he's ready to replay locks */
target_request_copy_get(req);
DEBUG_REQ(D_HA, req, "queue lock replay req");
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
cfs_waitq_signal(&obd->obd_next_transno_waitq);
LASSERT(obd->obd_recovering);
/* usually due to recovery abort */
if (!req->rq_export->exp_in_recovery) {
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
target_request_copy_put(req);
RETURN(-ENOTCONN);
}
LASSERT(req->rq_export->exp_lock_replay_needed);
- list_add_tail(&req->rq_list, &obd->obd_lock_replay_queue);
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_list_add_tail(&req->rq_list, &obd->obd_lock_replay_queue);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
RETURN(0);
}
RETURN(1);
}
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
/* If we're processing the queue, we want don't want to queue this
* message.
obd->obd_next_recovery_transno, transno, obd->obd_req_replaying);
if (transno < obd->obd_next_recovery_transno && obd->obd_req_replaying) {
/* Processing the queue right now, don't re-add. */
- LASSERT(list_empty(&req->rq_list));
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ LASSERT(cfs_list_empty(&req->rq_list));
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
RETURN(1);
}
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
if (OBD_FAIL_CHECK(OBD_FAIL_TGT_REPLAY_DROP))
RETURN(0);
target_request_copy_get(req);
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
LASSERT(obd->obd_recovering);
if (!req->rq_export->exp_in_recovery) {
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
target_request_copy_put(req);
RETURN(-ENOTCONN);
}
LASSERT(req->rq_export->exp_req_replay_needed);
if (target_exp_enqueue_req_replay(req)) {
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
DEBUG_REQ(D_ERROR, req, "dropping resent queued req");
target_request_copy_put(req);
RETURN(0);
}
/* XXX O(n^2) */
- list_for_each(tmp, &obd->obd_req_replay_queue) {
+ cfs_list_for_each(tmp, &obd->obd_req_replay_queue) {
struct ptlrpc_request *reqiter =
- list_entry(tmp, struct ptlrpc_request, rq_list);
+ cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
if (lustre_msg_get_transno(reqiter->rq_reqmsg) > transno) {
- list_add_tail(&req->rq_list, &reqiter->rq_list);
+ cfs_list_add_tail(&req->rq_list, &reqiter->rq_list);
inserted = 1;
break;
}
transno)) {
DEBUG_REQ(D_ERROR, req, "dropping replay: transno "
"has been claimed by another client");
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
target_exp_dequeue_req_replay(req);
target_request_copy_put(req);
RETURN(0);
}
if (!inserted)
- list_add_tail(&req->rq_list, &obd->obd_req_replay_queue);
+ cfs_list_add_tail(&req->rq_list, &obd->obd_req_replay_queue);
obd->obd_requests_queued_for_recovery++;
cfs_waitq_signal(&obd->obd_next_transno_waitq);
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
RETURN(0);
}
*/
obd = req->rq_export->exp_obd;
- read_lock(&obd->obd_pool_lock);
+ cfs_read_lock(&obd->obd_pool_lock);
lustre_msg_set_slv(req->rq_repmsg, obd->obd_pool_slv);
lustre_msg_set_limit(req->rq_repmsg, obd->obd_pool_limit);
- read_unlock(&obd->obd_pool_lock);
+ cfs_read_unlock(&obd->obd_pool_lock);
RETURN(0);
}
LASSERT (!rs->rs_handled);
LASSERT (!rs->rs_on_net);
LASSERT (rs->rs_export == NULL);
- LASSERT (list_empty(&rs->rs_obd_list));
- LASSERT (list_empty(&rs->rs_exp_list));
+ LASSERT (cfs_list_empty(&rs->rs_obd_list));
+ LASSERT (cfs_list_empty(&rs->rs_exp_list));
exp = class_export_get (req->rq_export);
obd = exp->exp_obd;
rs->rs_export = exp;
rs->rs_opc = lustre_msg_get_opc(rs->rs_msg);
- spin_lock(&exp->exp_uncommitted_replies_lock);
+ cfs_spin_lock(&exp->exp_uncommitted_replies_lock);
CDEBUG(D_NET, "rs transno = "LPU64", last committed = "LPU64"\n",
rs->rs_transno, exp->exp_last_committed);
if (rs->rs_transno > exp->exp_last_committed) {
/* not committed already */
- list_add_tail(&rs->rs_obd_list,
- &exp->exp_uncommitted_replies);
+ cfs_list_add_tail(&rs->rs_obd_list,
+ &exp->exp_uncommitted_replies);
}
- spin_unlock (&exp->exp_uncommitted_replies_lock);
+ cfs_spin_unlock (&exp->exp_uncommitted_replies_lock);
- spin_lock(&exp->exp_lock);
- list_add_tail(&rs->rs_exp_list, &exp->exp_outstanding_replies);
- spin_unlock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
+ cfs_list_add_tail(&rs->rs_exp_list, &exp->exp_outstanding_replies);
+ cfs_spin_unlock(&exp->exp_lock);
netrc = target_send_reply_msg (req, rc, fail_id);
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
- atomic_inc(&svc->srv_n_difficult_replies);
+ cfs_atomic_inc(&svc->srv_n_difficult_replies);
if (netrc != 0) {
/* error sending: reply is off the net. Also we need +1
* reply_out_callback leaves alone) */
rs->rs_on_net = 0;
ptlrpc_rs_addref(rs);
- atomic_inc (&svc->srv_outstanding_replies);
+ cfs_atomic_inc (&svc->srv_outstanding_replies);
}
- spin_lock(&rs->rs_lock);
+ cfs_spin_lock(&rs->rs_lock);
if (rs->rs_transno <= exp->exp_last_committed ||
(!rs->rs_on_net && !rs->rs_no_ack) ||
- list_empty(&rs->rs_exp_list) || /* completed already */
- list_empty(&rs->rs_obd_list)) {
+ cfs_list_empty(&rs->rs_exp_list) || /* completed already */
+ cfs_list_empty(&rs->rs_obd_list)) {
CDEBUG(D_HA, "Schedule reply immediately\n");
ptlrpc_dispatch_difficult_reply(rs);
} else {
- list_add (&rs->rs_list, &svc->srv_active_replies);
+ cfs_list_add (&rs->rs_list, &svc->srv_active_replies);
rs->rs_scheduled = 0; /* allow notifier to schedule */
}
- spin_unlock(&rs->rs_lock);
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&rs->rs_lock);
+ cfs_spin_unlock(&svc->srv_lock);
EXIT;
}
GOTO(out, rc);
}
- down_read(&obt->obt_rwsem);
+ cfs_down_read(&obt->obt_rwsem);
if (qctxt->lqc_lqs_hash == NULL) {
- up_read(&obt->obt_rwsem);
+ cfs_up_read(&obt->obt_rwsem);
/* quota_type has not been processed yet, return EAGAIN
* until we know whether or not quotas are supposed to
* be enabled */
LASSERT(qctxt->lqc_handler);
rc = qctxt->lqc_handler(master_obd, qdata,
lustre_msg_get_opc(req->rq_reqmsg));
- up_read(&obt->obt_rwsem);
+ cfs_up_read(&obt->obt_rwsem);
if (rc && rc != -EDQUOT)
CDEBUG(rc == -EBUSY ? D_QUOTA : D_ERROR,
"dqacq/dqrel failed! (rc:%d)\n", rc);
#if LUSTRE_TRACKS_LOCK_EXP_REFS
void ldlm_dump_export_locks(struct obd_export *exp)
{
- spin_lock(&exp->exp_locks_list_guard);
- if (!list_empty(&exp->exp_locks_list)) {
+ cfs_spin_lock(&exp->exp_locks_list_guard);
+ if (!cfs_list_empty(&exp->exp_locks_list)) {
struct ldlm_lock *lock;
CERROR("dumping locks for export %p,"
"ignore if the unmount doesn't hang\n", exp);
- list_for_each_entry(lock, &exp->exp_locks_list, l_exp_refs_link)
+ cfs_list_for_each_entry(lock, &exp->exp_locks_list, l_exp_refs_link)
ldlm_lock_dump(D_ERROR, lock, 0);
}
- spin_unlock(&exp->exp_locks_list_guard);
+ cfs_spin_unlock(&exp->exp_locks_list_guard);
}
#endif
*/
struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
{
- atomic_inc(&lock->l_refc);
+ cfs_atomic_inc(&lock->l_refc);
return lock;
}
ENTRY;
LASSERT(lock->l_resource != LP_POISON);
- LASSERT(atomic_read(&lock->l_refc) > 0);
- if (atomic_dec_and_test(&lock->l_refc)) {
+ LASSERT(cfs_atomic_read(&lock->l_refc) > 0);
+ if (cfs_atomic_dec_and_test(&lock->l_refc)) {
struct ldlm_resource *res;
LDLM_DEBUG(lock,
res = lock->l_resource;
LASSERT(lock->l_destroyed);
- LASSERT(list_empty(&lock->l_res_link));
- LASSERT(list_empty(&lock->l_pending_chain));
+ LASSERT(cfs_list_empty(&lock->l_res_link));
+ LASSERT(cfs_list_empty(&lock->l_pending_chain));
- atomic_dec(&res->lr_namespace->ns_locks);
+ cfs_atomic_dec(&res->lr_namespace->ns_locks);
lu_ref_del(&res->lr_reference, "lock", lock);
ldlm_resource_putref(res);
lock->l_resource = NULL;
int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
{
int rc = 0;
- if (!list_empty(&lock->l_lru)) {
+ if (!cfs_list_empty(&lock->l_lru)) {
struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
- list_del_init(&lock->l_lru);
+ cfs_list_del_init(&lock->l_lru);
LASSERT(ns->ns_nr_unused > 0);
ns->ns_nr_unused--;
rc = 1;
struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
int rc;
ENTRY;
- spin_lock(&ns->ns_unused_lock);
+ cfs_spin_lock(&ns->ns_unused_lock);
rc = ldlm_lock_remove_from_lru_nolock(lock);
- spin_unlock(&ns->ns_unused_lock);
+ cfs_spin_unlock(&ns->ns_unused_lock);
EXIT;
return rc;
}
{
struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
lock->l_last_used = cfs_time_current();
- LASSERT(list_empty(&lock->l_lru));
+ LASSERT(cfs_list_empty(&lock->l_lru));
LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
- list_add_tail(&lock->l_lru, &ns->ns_unused_list);
+ cfs_list_add_tail(&lock->l_lru, &ns->ns_unused_list);
LASSERT(ns->ns_nr_unused >= 0);
ns->ns_nr_unused++;
}
{
struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
ENTRY;
- spin_lock(&ns->ns_unused_lock);
+ cfs_spin_lock(&ns->ns_unused_lock);
ldlm_lock_add_to_lru_nolock(lock);
- spin_unlock(&ns->ns_unused_lock);
+ cfs_spin_unlock(&ns->ns_unused_lock);
EXIT;
}
{
struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
ENTRY;
- spin_lock(&ns->ns_unused_lock);
- if (!list_empty(&lock->l_lru)) {
+ cfs_spin_lock(&ns->ns_unused_lock);
+ if (!cfs_list_empty(&lock->l_lru)) {
ldlm_lock_remove_from_lru_nolock(lock);
ldlm_lock_add_to_lru_nolock(lock);
}
- spin_unlock(&ns->ns_unused_lock);
+ cfs_spin_unlock(&ns->ns_unused_lock);
EXIT;
}
LBUG();
}
- if (!list_empty(&lock->l_res_link)) {
+ if (!cfs_list_empty(&lock->l_res_link)) {
LDLM_ERROR(lock, "lock still on resource");
ldlm_lock_dump(D_ERROR, lock, 0);
LBUG();
}
if (lock->l_destroyed) {
- LASSERT(list_empty(&lock->l_lru));
+ LASSERT(cfs_list_empty(&lock->l_lru));
EXIT;
return 0;
}
lock->l_destroyed = 1;
if (lock->l_export && lock->l_export->exp_lock_hash &&
- !hlist_unhashed(&lock->l_exp_hash))
+ !cfs_hlist_unhashed(&lock->l_exp_hash))
cfs_hash_del(lock->l_export->exp_lock_hash,
&lock->l_remote_handle, &lock->l_exp_hash);
if (lock == NULL)
RETURN(NULL);
- spin_lock_init(&lock->l_lock);
+ cfs_spin_lock_init(&lock->l_lock);
lock->l_resource = ldlm_resource_getref(resource);
lu_ref_add(&resource->lr_reference, "lock", lock);
- atomic_set(&lock->l_refc, 2);
+ cfs_atomic_set(&lock->l_refc, 2);
CFS_INIT_LIST_HEAD(&lock->l_res_link);
CFS_INIT_LIST_HEAD(&lock->l_lru);
CFS_INIT_LIST_HEAD(&lock->l_pending_chain);
CFS_INIT_LIST_HEAD(&lock->l_sl_policy);
CFS_INIT_HLIST_NODE(&lock->l_exp_hash);
- atomic_inc(&resource->lr_namespace->ns_locks);
+ cfs_atomic_inc(&resource->lr_namespace->ns_locks);
CFS_INIT_LIST_HEAD(&lock->l_handle.h_link);
class_handle_hash(&lock->l_handle, lock_handle_addref);
CFS_INIT_LIST_HEAD(&lock->l_extents_list);
- spin_lock_init(&lock->l_extents_list_lock);
+ cfs_spin_lock_init(&lock->l_extents_list_lock);
CFS_INIT_LIST_HEAD(&lock->l_cache_locks_list);
lu_ref_init(&lock->l_reference);
lu_ref_add(&lock->l_reference, "hash", lock);
LASSERT(new_resid->name[0] != 0);
/* This function assumes that the lock isn't on any lists */
- LASSERT(list_empty(&lock->l_res_link));
+ LASSERT(cfs_list_empty(&lock->l_res_link));
type = oldres->lr_type;
unlock_res_and_lock(lock);
* lock->l_lock, and are taken in the memory address order to avoid
* dead-locks.
*/
- spin_lock(&lock->l_lock);
+ cfs_spin_lock(&lock->l_lock);
oldres = lock->l_resource;
if (oldres < newres) {
lock_res(oldres);
}
void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
- struct list_head *work_list)
+ cfs_list_t *work_list)
{
if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) {
LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
* discard dirty data, rather than writing back. */
if (new->l_flags & LDLM_AST_DISCARD_DATA)
lock->l_flags |= LDLM_FL_DISCARD_DATA;
- LASSERT(list_empty(&lock->l_bl_ast));
- list_add(&lock->l_bl_ast, work_list);
+ LASSERT(cfs_list_empty(&lock->l_bl_ast));
+ cfs_list_add(&lock->l_bl_ast, work_list);
LDLM_LOCK_GET(lock);
LASSERT(lock->l_blocking_lock == NULL);
lock->l_blocking_lock = LDLM_LOCK_GET(new);
}
}
-void ldlm_add_cp_work_item(struct ldlm_lock *lock, struct list_head *work_list)
+void ldlm_add_cp_work_item(struct ldlm_lock *lock, cfs_list_t *work_list)
{
if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) {
lock->l_flags |= LDLM_FL_CP_REQD;
LDLM_DEBUG(lock, "lock granted; sending completion AST.");
- LASSERT(list_empty(&lock->l_cp_ast));
- list_add(&lock->l_cp_ast, work_list);
+ LASSERT(cfs_list_empty(&lock->l_cp_ast));
+ cfs_list_add(&lock->l_cp_ast, work_list);
LDLM_LOCK_GET(lock);
}
}
/* must be called with lr_lock held */
void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
- struct list_head *work_list)
+ cfs_list_t *work_list)
{
ENTRY;
check_res_locked(lock->l_resource);
}
struct sl_insert_point {
- struct list_head *res_link;
- struct list_head *mode_link;
- struct list_head *policy_link;
+ cfs_list_t *res_link;
+ cfs_list_t *mode_link;
+ cfs_list_t *policy_link;
};
/*
* NOTE: called by
* - ldlm_grant_lock_with_skiplist
*/
-static void search_granted_lock(struct list_head *queue,
+static void search_granted_lock(cfs_list_t *queue,
struct ldlm_lock *req,
struct sl_insert_point *prev)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct ldlm_lock *lock, *mode_end, *policy_end;
ENTRY;
- list_for_each(tmp, queue) {
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ cfs_list_for_each(tmp, queue) {
+ lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
- mode_end = list_entry(lock->l_sl_mode.prev, struct ldlm_lock,
- l_sl_mode);
+ mode_end = cfs_list_entry(lock->l_sl_mode.prev,
+ struct ldlm_lock, l_sl_mode);
if (lock->l_req_mode != req->l_req_mode) {
/* jump to last lock of mode group */
return;
} else if (lock->l_resource->lr_type == LDLM_IBITS) {
for (;;) {
- policy_end = list_entry(lock->l_sl_policy.prev,
- struct ldlm_lock,
- l_sl_policy);
+ policy_end =
+ cfs_list_entry(lock->l_sl_policy.prev,
+ struct ldlm_lock,
+ l_sl_policy);
if (lock->l_policy_data.l_inodebits.bits ==
req->l_policy_data.l_inodebits.bits) {
/* go to next policy group within mode group */
tmp = policy_end->l_res_link.next;
- lock = list_entry(tmp, struct ldlm_lock,
- l_res_link);
+ lock = cfs_list_entry(tmp, struct ldlm_lock,
+ l_res_link);
} /* loop over policy groups within the mode group */
/* insert point is last lock of the mode group,
return;
}
- LASSERT(list_empty(&lock->l_res_link));
- LASSERT(list_empty(&lock->l_sl_mode));
- LASSERT(list_empty(&lock->l_sl_policy));
+ LASSERT(cfs_list_empty(&lock->l_res_link));
+ LASSERT(cfs_list_empty(&lock->l_sl_mode));
+ LASSERT(cfs_list_empty(&lock->l_sl_policy));
- list_add(&lock->l_res_link, prev->res_link);
- list_add(&lock->l_sl_mode, prev->mode_link);
- list_add(&lock->l_sl_policy, prev->policy_link);
+ cfs_list_add(&lock->l_res_link, prev->res_link);
+ cfs_list_add(&lock->l_sl_mode, prev->mode_link);
+ cfs_list_add(&lock->l_sl_policy, prev->policy_link);
EXIT;
}
*
* must be called with lr_lock held
*/
-void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
+void ldlm_grant_lock(struct ldlm_lock *lock, cfs_list_t *work_list)
{
struct ldlm_resource *res = lock->l_resource;
ENTRY;
/* returns a referenced lock or NULL. See the flag descriptions below, in the
* comment above ldlm_lock_match */
-static struct ldlm_lock *search_queue(struct list_head *queue,
+static struct ldlm_lock *search_queue(cfs_list_t *queue,
ldlm_mode_t *mode,
ldlm_policy_data_t *policy,
struct ldlm_lock *old_lock,
int flags, int unref)
{
struct ldlm_lock *lock;
- struct list_head *tmp;
+ cfs_list_t *tmp;
- list_for_each(tmp, queue) {
+ cfs_list_for_each(tmp, queue) {
ldlm_mode_t match;
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
if (lock == old_lock)
break;
}
/* Must be called with namespace taken: queue is waiting or converting. */
-int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue,
- struct list_head *work_list)
+int ldlm_reprocess_queue(struct ldlm_resource *res, cfs_list_t *queue,
+ cfs_list_t *work_list)
{
- struct list_head *tmp, *pos;
+ cfs_list_t *tmp, *pos;
ldlm_processing_policy policy;
int flags;
int rc = LDLM_ITER_CONTINUE;
policy = ldlm_processing_policy_table[res->lr_type];
LASSERT(policy);
- list_for_each_safe(tmp, pos, queue) {
+ cfs_list_for_each_safe(tmp, pos, queue) {
struct ldlm_lock *pending;
- pending = list_entry(tmp, struct ldlm_lock, l_res_link);
+ pending = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
}
static int
-ldlm_work_bl_ast_lock(struct list_head *tmp, struct ldlm_cb_set_arg *arg)
+ldlm_work_bl_ast_lock(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg)
{
struct ldlm_lock_desc d;
- struct ldlm_lock *lock = list_entry(tmp, struct ldlm_lock, l_bl_ast);
+ struct ldlm_lock *lock = cfs_list_entry(tmp, struct ldlm_lock,
+ l_bl_ast);
ENTRY;
/* nobody should touch l_bl_ast */
lock_res_and_lock(lock);
- list_del_init(&lock->l_bl_ast);
+ cfs_list_del_init(&lock->l_bl_ast);
LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
LASSERT(lock->l_bl_ast_run == 0);
}
static int
-ldlm_work_cp_ast_lock(struct list_head *tmp, struct ldlm_cb_set_arg *arg)
+ldlm_work_cp_ast_lock(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg)
{
- struct ldlm_lock *lock = list_entry(tmp, struct ldlm_lock, l_cp_ast);
+ struct ldlm_lock *lock = cfs_list_entry(tmp, struct ldlm_lock, l_cp_ast);
ldlm_completion_callback completion_callback;
int rc = 0;
ENTRY;
/* nobody should touch l_cp_ast */
lock_res_and_lock(lock);
- list_del_init(&lock->l_cp_ast);
+ cfs_list_del_init(&lock->l_cp_ast);
LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
/* save l_completion_ast since it can be changed by
* mds_intent_policy(), see bug 14225 */
}
static int
-ldlm_work_revoke_ast_lock(struct list_head *tmp, struct ldlm_cb_set_arg *arg)
+ldlm_work_revoke_ast_lock(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg)
{
struct ldlm_lock_desc desc;
- struct ldlm_lock *lock = list_entry(tmp, struct ldlm_lock, l_rk_ast);
+ struct ldlm_lock *lock = cfs_list_entry(tmp, struct ldlm_lock,
+ l_rk_ast);
ENTRY;
- list_del_init(&lock->l_rk_ast);
+ cfs_list_del_init(&lock->l_rk_ast);
/* the desc just pretend to exclusive */
ldlm_lock2desc(lock, &desc);
RETURN(1);
}
-int ldlm_run_ast_work(struct list_head *rpc_list, ldlm_desc_ast_t ast_type)
+int ldlm_run_ast_work(cfs_list_t *rpc_list, ldlm_desc_ast_t ast_type)
{
struct ldlm_cb_set_arg arg;
- struct list_head *tmp, *pos;
- int (*work_ast_lock)(struct list_head *tmp,struct ldlm_cb_set_arg *arg);
+ cfs_list_t *tmp, *pos;
+ int (*work_ast_lock)(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg);
int ast_count;
ENTRY;
- if (list_empty(rpc_list))
+ if (cfs_list_empty(rpc_list))
RETURN(0);
arg.set = ptlrpc_prep_set();
if (NULL == arg.set)
RETURN(-ERESTART);
- atomic_set(&arg.restart, 0);
+ cfs_atomic_set(&arg.restart, 0);
switch (ast_type) {
case LDLM_WORK_BL_AST:
arg.type = LDLM_BL_CALLBACK;
}
ast_count = 0;
- list_for_each_safe(tmp, pos, rpc_list) {
+ cfs_list_for_each_safe(tmp, pos, rpc_list) {
ast_count += work_ast_lock(tmp, &arg);
/* Send the request set if it exceeds the PARALLEL_AST_LIMIT,
* write memory leaking. */
ptlrpc_set_destroy(arg.set);
- RETURN(atomic_read(&arg.restart) ? -ERESTART : 0);
+ RETURN(cfs_atomic_read(&arg.restart) ? -ERESTART : 0);
}
static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
int i, rc;
if (ns == NULL)
return;
ENTRY;
- spin_lock(&ns->ns_hash_lock);
+ cfs_spin_lock(&ns->ns_hash_lock);
for (i = 0; i < RES_HASH_SIZE; i++) {
tmp = ns->ns_hash[i].next;
while (tmp != &(ns->ns_hash[i])) {
struct ldlm_resource *res =
- list_entry(tmp, struct ldlm_resource, lr_hash);
+ cfs_list_entry(tmp, struct ldlm_resource,
+ lr_hash);
ldlm_resource_getref(res);
- spin_unlock(&ns->ns_hash_lock);
+ cfs_spin_unlock(&ns->ns_hash_lock);
LDLM_RESOURCE_ADDREF(res);
rc = reprocess_one_queue(res, NULL);
LDLM_RESOURCE_DELREF(res);
- spin_lock(&ns->ns_hash_lock);
+ cfs_spin_lock(&ns->ns_hash_lock);
tmp = tmp->next;
ldlm_resource_putref_locked(res);
}
}
out:
- spin_unlock(&ns->ns_hash_lock);
+ cfs_spin_unlock(&ns->ns_hash_lock);
EXIT;
}
rc = ldlm_run_ast_work(&rpc_list, LDLM_WORK_CP_AST);
if (rc == -ERESTART) {
- LASSERT(list_empty(&rpc_list));
+ LASSERT(cfs_list_empty(&rpc_list));
goto restart;
}
EXIT;
req->l_resource->lr_type != LDLM_IBITS)
return;
- list_del_init(&req->l_sl_policy);
- list_del_init(&req->l_sl_mode);
+ cfs_list_del_init(&req->l_sl_policy);
+ cfs_list_del_init(&req->l_sl_mode);
}
void ldlm_lock_cancel(struct ldlm_lock *lock)
}
CDEBUG(level," -- Lock dump: %p/"LPX64" (rc: %d) (pos: %d) (pid: %d)\n",
- lock, lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock, lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
pos, lock->l_pid);
if (lock->l_conn_export != NULL)
obd = lock->l_conn_export->exp_obd;
CDEBUG(level, " Req mode: %s, grant mode: %s, rc: %u, read: %d, "
"write: %d flags: "LPX64"\n", ldlm_lockname[lock->l_req_mode],
ldlm_lockname[lock->l_granted_mode],
- atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers,
+ cfs_atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers,
lock->l_flags);
if (lock->l_resource->lr_type == LDLM_EXTENT)
CDEBUG(level, " Extent: "LPU64" -> "LPU64
" ns: \?\? lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
"res: \?\? rrc=\?\? type: \?\?\? flags: "LPX64" remote: "
LPX64" expref: %d pid: %u timeout: %lu\n", lock,
- lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
lock->l_flags, lock->l_remote_handle.cookie,
lock->l_export ?
- atomic_read(&lock->l_export->exp_refcount) : -99,
+ cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
lock->l_pid, lock->l_callback_timeout);
va_end(args);
return;
"] (req "LPU64"->"LPU64") flags: "LPX64" remote: "LPX64
" expref: %d pid: %u timeout %lu\n",
lock->l_resource->lr_namespace->ns_name, lock,
- lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
lock->l_resource->lr_name.name[0],
lock->l_resource->lr_name.name[1],
- atomic_read(&lock->l_resource->lr_refcount),
+ cfs_atomic_read(&lock->l_resource->lr_refcount),
ldlm_typename[lock->l_resource->lr_type],
lock->l_policy_data.l_extent.start,
lock->l_policy_data.l_extent.end,
lock->l_req_extent.start, lock->l_req_extent.end,
lock->l_flags, lock->l_remote_handle.cookie,
lock->l_export ?
- atomic_read(&lock->l_export->exp_refcount) : -99,
+ cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
lock->l_pid, lock->l_callback_timeout);
break;
"["LPU64"->"LPU64"] flags: "LPX64" remote: "LPX64
" expref: %d pid: %u timeout: %lu\n",
lock->l_resource->lr_namespace->ns_name, lock,
- lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
lock->l_resource->lr_name.name[0],
lock->l_resource->lr_name.name[1],
- atomic_read(&lock->l_resource->lr_refcount),
+ cfs_atomic_read(&lock->l_resource->lr_refcount),
ldlm_typename[lock->l_resource->lr_type],
lock->l_policy_data.l_flock.pid,
lock->l_policy_data.l_flock.start,
lock->l_policy_data.l_flock.end,
lock->l_flags, lock->l_remote_handle.cookie,
lock->l_export ?
- atomic_read(&lock->l_export->exp_refcount) : -99,
+ cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
lock->l_pid, lock->l_callback_timeout);
break;
"pid: %u timeout: %lu\n",
lock->l_resource->lr_namespace->ns_name,
lock, lock->l_handle.h_cookie,
- atomic_read (&lock->l_refc),
+ cfs_atomic_read (&lock->l_refc),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
lock->l_resource->lr_name.name[0],
lock->l_resource->lr_name.name[1],
lock->l_policy_data.l_inodebits.bits,
- atomic_read(&lock->l_resource->lr_refcount),
+ cfs_atomic_read(&lock->l_resource->lr_refcount),
ldlm_typename[lock->l_resource->lr_type],
lock->l_flags, lock->l_remote_handle.cookie,
lock->l_export ?
- atomic_read(&lock->l_export->exp_refcount) : -99,
+ cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
lock->l_pid, lock->l_callback_timeout);
break;
"remote: "LPX64" expref: %d pid: %u timeout %lu\n",
lock->l_resource->lr_namespace->ns_name,
lock, lock->l_handle.h_cookie,
- atomic_read (&lock->l_refc),
+ cfs_atomic_read (&lock->l_refc),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
lock->l_resource->lr_name.name[0],
lock->l_resource->lr_name.name[1],
- atomic_read(&lock->l_resource->lr_refcount),
+ cfs_atomic_read(&lock->l_resource->lr_refcount),
ldlm_typename[lock->l_resource->lr_type],
lock->l_flags, lock->l_remote_handle.cookie,
lock->l_export ?
- atomic_read(&lock->l_export->exp_refcount) : -99,
+ cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
lock->l_pid, lock->l_callback_timeout);
break;
}
extern cfs_mem_cache_t *ldlm_resource_slab;
extern cfs_mem_cache_t *ldlm_lock_slab;
-static struct semaphore ldlm_ref_sem;
+static cfs_semaphore_t ldlm_ref_sem;
static int ldlm_refcount;
/* LDLM state */
#ifdef __KERNEL__
/* w_l_spinlock protects both waiting_locks_list and expired_lock_thread */
-static spinlock_t waiting_locks_spinlock; /* BH lock (timer) */
-static struct list_head waiting_locks_list;
+static cfs_spinlock_t waiting_locks_spinlock; /* BH lock (timer) */
+static cfs_list_t waiting_locks_list;
static cfs_timer_t waiting_locks_timer;
static struct expired_lock_thread {
cfs_waitq_t elt_waitq;
int elt_state;
int elt_dump;
- struct list_head elt_expired_locks;
+ cfs_list_t elt_expired_locks;
} expired_lock_thread;
#endif
#define ELT_TERMINATE 2
struct ldlm_bl_pool {
- spinlock_t blp_lock;
+ cfs_spinlock_t blp_lock;
/*
* blp_prio_list is used for callbacks that should be handled
* as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
* see bug 13843
*/
- struct list_head blp_prio_list;
+ cfs_list_t blp_prio_list;
/*
* blp_list is used for all other callbacks which are likely
* to take longer to process.
*/
- struct list_head blp_list;
+ cfs_list_t blp_list;
cfs_waitq_t blp_waitq;
- struct completion blp_comp;
- atomic_t blp_num_threads;
- atomic_t blp_busy_threads;
+ cfs_completion_t blp_comp;
+ cfs_atomic_t blp_num_threads;
+ cfs_atomic_t blp_busy_threads;
int blp_min_threads;
int blp_max_threads;
};
struct ldlm_bl_work_item {
- struct list_head blwi_entry;
- struct ldlm_namespace *blwi_ns;
+ cfs_list_t blwi_entry;
+ struct ldlm_namespace *blwi_ns;
struct ldlm_lock_desc blwi_ld;
- struct ldlm_lock *blwi_lock;
- struct list_head blwi_head;
+ struct ldlm_lock *blwi_lock;
+ cfs_list_t blwi_head;
int blwi_count;
};
int need_to_run;
ENTRY;
- spin_lock_bh(&waiting_locks_spinlock);
- need_to_run = !list_empty(&expired_lock_thread.elt_expired_locks);
- spin_unlock_bh(&waiting_locks_spinlock);
+ cfs_spin_lock_bh(&waiting_locks_spinlock);
+ need_to_run = !cfs_list_empty(&expired_lock_thread.elt_expired_locks);
+ cfs_spin_unlock_bh(&waiting_locks_spinlock);
RETURN(need_to_run);
}
static int expired_lock_main(void *arg)
{
- struct list_head *expired = &expired_lock_thread.elt_expired_locks;
+ cfs_list_t *expired = &expired_lock_thread.elt_expired_locks;
struct l_wait_info lwi = { 0 };
int do_dump;
expired_lock_thread.elt_state == ELT_TERMINATE,
&lwi);
- spin_lock_bh(&waiting_locks_spinlock);
+ cfs_spin_lock_bh(&waiting_locks_spinlock);
if (expired_lock_thread.elt_dump) {
- spin_unlock_bh(&waiting_locks_spinlock);
+ cfs_spin_unlock_bh(&waiting_locks_spinlock);
/* from waiting_locks_callback, but not in timer */
libcfs_debug_dumplog();
"waiting_locks_callback",
expired_lock_thread.elt_dump);
- spin_lock_bh(&waiting_locks_spinlock);
+ cfs_spin_lock_bh(&waiting_locks_spinlock);
expired_lock_thread.elt_dump = 0;
}
do_dump = 0;
- while (!list_empty(expired)) {
+ while (!cfs_list_empty(expired)) {
struct obd_export *export;
struct ldlm_lock *lock;
- lock = list_entry(expired->next, struct ldlm_lock,
+ lock = cfs_list_entry(expired->next, struct ldlm_lock,
l_pending_chain);
if ((void *)lock < LP_POISON + CFS_PAGE_SIZE &&
(void *)lock >= LP_POISON) {
- spin_unlock_bh(&waiting_locks_spinlock);
+ cfs_spin_unlock_bh(&waiting_locks_spinlock);
CERROR("free lock on elt list %p\n", lock);
LBUG();
}
- list_del_init(&lock->l_pending_chain);
+ cfs_list_del_init(&lock->l_pending_chain);
if ((void *)lock->l_export < LP_POISON + CFS_PAGE_SIZE &&
(void *)lock->l_export >= LP_POISON) {
CERROR("lock with free export on elt list %p\n",
continue;
}
export = class_export_lock_get(lock->l_export, lock);
- spin_unlock_bh(&waiting_locks_spinlock);
+ cfs_spin_unlock_bh(&waiting_locks_spinlock);
do_dump++;
class_fail_export(export);
/* release extra ref grabbed by ldlm_add_waiting_lock()
* or ldlm_failed_ast() */
LDLM_LOCK_RELEASE(lock);
-
- spin_lock_bh(&waiting_locks_spinlock);
+
+ cfs_spin_lock_bh(&waiting_locks_spinlock);
}
- spin_unlock_bh(&waiting_locks_spinlock);
+ cfs_spin_unlock_bh(&waiting_locks_spinlock);
if (do_dump && obd_dump_on_eviction) {
CERROR("dump the log upon eviction\n");
if (lock->l_export == NULL)
return 0;
- spin_lock(&lock->l_export->exp_lock);
- list_for_each_entry(req, &lock->l_export->exp_queued_rpc, rq_exp_list) {
+ cfs_spin_lock(&lock->l_export->exp_lock);
+ cfs_list_for_each_entry(req, &lock->l_export->exp_queued_rpc,
+ rq_exp_list) {
if (req->rq_ops->hpreq_lock_match) {
match = req->rq_ops->hpreq_lock_match(req, lock);
if (match)
break;
}
}
- spin_unlock(&lock->l_export->exp_lock);
+ cfs_spin_unlock(&lock->l_export->exp_lock);
RETURN(match);
}
struct ldlm_lock *lock, *last = NULL;
repeat:
- spin_lock_bh(&waiting_locks_spinlock);
- while (!list_empty(&waiting_locks_list)) {
- lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
- l_pending_chain);
- if (cfs_time_after(lock->l_callback_timeout, cfs_time_current()) ||
+ cfs_spin_lock_bh(&waiting_locks_spinlock);
+ while (!cfs_list_empty(&waiting_locks_list)) {
+ lock = cfs_list_entry(waiting_locks_list.next, struct ldlm_lock,
+ l_pending_chain);
+ if (cfs_time_after(lock->l_callback_timeout,
+ cfs_time_current()) ||
(lock->l_req_mode == LCK_GROUP))
break;
lock->l_export->exp_connection->c_remote_uuid.uuid,
libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
- list_del_init(&lock->l_pending_chain);
- spin_unlock_bh(&waiting_locks_spinlock);
+ cfs_list_del_init(&lock->l_pending_chain);
+ cfs_spin_unlock_bh(&waiting_locks_spinlock);
ldlm_add_waiting_lock(lock);
goto repeat;
}
lock->l_export->exp_connection->c_remote_uuid.uuid,
libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
- list_del_init(&lock->l_pending_chain);
- spin_unlock_bh(&waiting_locks_spinlock);
+ cfs_list_del_init(&lock->l_pending_chain);
+ cfs_spin_unlock_bh(&waiting_locks_spinlock);
ldlm_add_waiting_lock(lock);
goto repeat;
}
LDLM_LOCK_GET(lock);
- spin_unlock_bh(&waiting_locks_spinlock);
+ cfs_spin_unlock_bh(&waiting_locks_spinlock);
LDLM_DEBUG(lock, "prolong the busy lock");
ldlm_refresh_waiting_lock(lock,
ldlm_get_enq_timeout(lock));
- spin_lock_bh(&waiting_locks_spinlock);
+ cfs_spin_lock_bh(&waiting_locks_spinlock);
if (!cont) {
LDLM_LOCK_RELEASE(lock);
/* no needs to take an extra ref on the lock since it was in
* the waiting_locks_list and ldlm_add_waiting_lock()
* already grabbed a ref */
- list_del(&lock->l_pending_chain);
- list_add(&lock->l_pending_chain,
- &expired_lock_thread.elt_expired_locks);
+ cfs_list_del(&lock->l_pending_chain);
+ cfs_list_add(&lock->l_pending_chain,
+ &expired_lock_thread.elt_expired_locks);
}
- if (!list_empty(&expired_lock_thread.elt_expired_locks)) {
+ if (!cfs_list_empty(&expired_lock_thread.elt_expired_locks)) {
if (obd_dump_on_timeout)
expired_lock_thread.elt_dump = __LINE__;
* Make sure the timer will fire again if we have any locks
* left.
*/
- if (!list_empty(&waiting_locks_list)) {
+ if (!cfs_list_empty(&waiting_locks_list)) {
cfs_time_t timeout_rounded;
- lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
- l_pending_chain);
+ lock = cfs_list_entry(waiting_locks_list.next, struct ldlm_lock,
+ l_pending_chain);
timeout_rounded = (cfs_time_t)round_timeout(lock->l_callback_timeout);
cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
}
- spin_unlock_bh(&waiting_locks_spinlock);
+ cfs_spin_unlock_bh(&waiting_locks_spinlock);
}
/*
cfs_time_t timeout;
cfs_time_t timeout_rounded;
- if (!list_empty(&lock->l_pending_chain))
+ if (!cfs_list_empty(&lock->l_pending_chain))
return 0;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT) ||
}
/* if the new lock has a shorter timeout than something earlier on
the list, we'll wait the longer amount of time; no big deal. */
- list_add_tail(&lock->l_pending_chain, &waiting_locks_list); /* FIFO */
+ /* FIFO */
+ cfs_list_add_tail(&lock->l_pending_chain, &waiting_locks_list);
return 1;
}
LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
- spin_lock_bh(&waiting_locks_spinlock);
+ cfs_spin_lock_bh(&waiting_locks_spinlock);
if (lock->l_destroyed) {
static cfs_time_t next;
- spin_unlock_bh(&waiting_locks_spinlock);
+ cfs_spin_unlock_bh(&waiting_locks_spinlock);
LDLM_ERROR(lock, "not waiting on destroyed lock (bug 5653)");
if (cfs_time_after(cfs_time_current(), next)) {
next = cfs_time_shift(14400);
/* grab ref on the lock if it has been added to the
* waiting list */
LDLM_LOCK_GET(lock);
- spin_unlock_bh(&waiting_locks_spinlock);
+ cfs_spin_unlock_bh(&waiting_locks_spinlock);
LDLM_DEBUG(lock, "%sadding to wait list(timeout: %d, AT: %s)",
ret == 0 ? "not re-" : "", timeout,
*/
static int __ldlm_del_waiting_lock(struct ldlm_lock *lock)
{
- struct list_head *list_next;
+ cfs_list_t *list_next;
- if (list_empty(&lock->l_pending_chain))
+ if (cfs_list_empty(&lock->l_pending_chain))
return 0;
list_next = lock->l_pending_chain.next;
cfs_timer_disarm(&waiting_locks_timer);
} else {
struct ldlm_lock *next;
- next = list_entry(list_next, struct ldlm_lock,
- l_pending_chain);
+ next = cfs_list_entry(list_next, struct ldlm_lock,
+ l_pending_chain);
cfs_timer_arm(&waiting_locks_timer,
round_timeout(next->l_callback_timeout));
}
}
- list_del_init(&lock->l_pending_chain);
+ cfs_list_del_init(&lock->l_pending_chain);
return 1;
}
return 0;
}
- spin_lock_bh(&waiting_locks_spinlock);
+ cfs_spin_lock_bh(&waiting_locks_spinlock);
ret = __ldlm_del_waiting_lock(lock);
- spin_unlock_bh(&waiting_locks_spinlock);
+ cfs_spin_unlock_bh(&waiting_locks_spinlock);
if (ret)
/* release lock ref if it has indeed been removed
* from a list */
return 0;
}
- spin_lock_bh(&waiting_locks_spinlock);
+ cfs_spin_lock_bh(&waiting_locks_spinlock);
- if (list_empty(&lock->l_pending_chain)) {
- spin_unlock_bh(&waiting_locks_spinlock);
+ if (cfs_list_empty(&lock->l_pending_chain)) {
+ cfs_spin_unlock_bh(&waiting_locks_spinlock);
LDLM_DEBUG(lock, "wasn't waiting");
return 0;
}
* release/take a lock reference */
__ldlm_del_waiting_lock(lock);
__ldlm_add_waiting_lock(lock, timeout);
- spin_unlock_bh(&waiting_locks_spinlock);
+ cfs_spin_unlock_bh(&waiting_locks_spinlock);
LDLM_DEBUG(lock, "refreshed");
return 1;
if (obd_dump_on_timeout)
libcfs_debug_dumplog();
#ifdef __KERNEL__
- spin_lock_bh(&waiting_locks_spinlock);
+ cfs_spin_lock_bh(&waiting_locks_spinlock);
if (__ldlm_del_waiting_lock(lock) == 0)
/* the lock was not in any list, grab an extra ref before adding
* the lock to the expired list */
LDLM_LOCK_GET(lock);
- list_add(&lock->l_pending_chain, &expired_lock_thread.elt_expired_locks);
+ cfs_list_add(&lock->l_pending_chain,
+ &expired_lock_thread.elt_expired_locks);
cfs_waitq_signal(&expired_lock_thread.elt_waitq);
- spin_unlock_bh(&waiting_locks_spinlock);
+ cfs_spin_unlock_bh(&waiting_locks_spinlock);
#else
class_fail_export(lock->l_export);
#endif
LDLM_LOCK_RELEASE(lock);
if (rc == -ERESTART)
- atomic_set(&arg->restart, 1);
+ cfs_atomic_set(&arg->restart, 1);
RETURN(0);
}
if (rc == 0)
/* If we cancelled the lock, we need to restart
* ldlm_reprocess_queue */
- atomic_set(&arg->restart, 1);
+ cfs_atomic_set(&arg->restart, 1);
} else {
LDLM_LOCK_GET(lock);
ptlrpc_set_add_req(arg->set, req);
RETURN_EXIT;
}
- spin_lock(&lock->l_export->exp_lock);
- list_for_each_entry(req, &lock->l_export->exp_queued_rpc, rq_exp_list) {
+ cfs_spin_lock(&lock->l_export->exp_lock);
+ cfs_list_for_each_entry(req, &lock->l_export->exp_queued_rpc,
+ rq_exp_list) {
if (!req->rq_hp && req->rq_ops->hpreq_lock_match &&
req->rq_ops->hpreq_lock_match(req, lock))
ptlrpc_hpreq_reorder(req);
}
- spin_unlock(&lock->l_export->exp_lock);
+ cfs_spin_unlock(&lock->l_export->exp_lock);
EXIT;
}
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
int to = cfs_time_seconds(1);
while (to > 0) {
- cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE, to);
+ cfs_schedule_timeout_and_set_state(
+ CFS_TASK_INTERRUPTIBLE, to);
if (lock->l_granted_mode == lock->l_req_mode ||
lock->l_destroyed)
break;
#ifdef __KERNEL__
static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
struct ldlm_lock_desc *ld, struct ldlm_lock *lock,
- struct list_head *cancels, int count)
+ cfs_list_t *cancels, int count)
{
struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
struct ldlm_bl_work_item *blwi;
if (ld != NULL)
blwi->blwi_ld = *ld;
if (count) {
- list_add(&blwi->blwi_head, cancels);
- list_del_init(cancels);
+ cfs_list_add(&blwi->blwi_head, cancels);
+ cfs_list_del_init(cancels);
blwi->blwi_count = count;
} else {
blwi->blwi_lock = lock;
}
- spin_lock(&blp->blp_lock);
+ cfs_spin_lock(&blp->blp_lock);
if (lock && lock->l_flags & LDLM_FL_DISCARD_DATA) {
/* add LDLM_FL_DISCARD_DATA requests to the priority list */
- list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
+ cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
} else {
/* other blocking callbacks are added to the regular list */
- list_add_tail(&blwi->blwi_entry, &blp->blp_list);
+ cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_list);
}
cfs_waitq_signal(&blp->blp_waitq);
- spin_unlock(&blp->blp_lock);
+ cfs_spin_unlock(&blp->blp_lock);
RETURN(0);
}
}
int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
- struct list_head *cancels, int count)
+ cfs_list_t *cancels, int count)
{
#ifdef __KERNEL__
RETURN(ldlm_bl_to_thread(ns, ld, NULL, cancels, count));
void ldlm_revoke_lock_cb(void *obj, void *data)
{
- struct list_head *rpc_list = data;
+ cfs_list_t *rpc_list = data;
struct ldlm_lock *lock = obj;
lock_res_and_lock(lock);
lock->l_flags |= LDLM_FL_AST_SENT;
if (lock->l_export && lock->l_export->exp_lock_hash &&
- !hlist_unhashed(&lock->l_exp_hash))
+ !cfs_hlist_unhashed(&lock->l_exp_hash))
cfs_hash_del(lock->l_export->exp_lock_hash,
&lock->l_remote_handle, &lock->l_exp_hash);
- list_add_tail(&lock->l_rk_ast, rpc_list);
+ cfs_list_add_tail(&lock->l_rk_ast, rpc_list);
LDLM_LOCK_GET(lock);
unlock_res_and_lock(lock);
void ldlm_revoke_export_locks(struct obd_export *exp)
{
- struct list_head rpc_list;
+ cfs_list_t rpc_list;
ENTRY;
CFS_INIT_LIST_HEAD(&rpc_list);
struct ldlm_bl_work_item *blwi = NULL;
static unsigned int num_bl = 0;
- spin_lock(&blp->blp_lock);
+ cfs_spin_lock(&blp->blp_lock);
/* process a request from the blp_list at least every blp_num_threads */
- if (!list_empty(&blp->blp_list) &&
- (list_empty(&blp->blp_prio_list) || num_bl == 0))
- blwi = list_entry(blp->blp_list.next,
- struct ldlm_bl_work_item, blwi_entry);
+ if (!cfs_list_empty(&blp->blp_list) &&
+ (cfs_list_empty(&blp->blp_prio_list) || num_bl == 0))
+ blwi = cfs_list_entry(blp->blp_list.next,
+ struct ldlm_bl_work_item, blwi_entry);
else
- if (!list_empty(&blp->blp_prio_list))
- blwi = list_entry(blp->blp_prio_list.next,
- struct ldlm_bl_work_item, blwi_entry);
+ if (!cfs_list_empty(&blp->blp_prio_list))
+ blwi = cfs_list_entry(blp->blp_prio_list.next,
+ struct ldlm_bl_work_item,
+ blwi_entry);
if (blwi) {
- if (++num_bl >= atomic_read(&blp->blp_num_threads))
+ if (++num_bl >= cfs_atomic_read(&blp->blp_num_threads))
num_bl = 0;
- list_del(&blwi->blwi_entry);
+ cfs_list_del(&blwi->blwi_entry);
}
- spin_unlock(&blp->blp_lock);
+ cfs_spin_unlock(&blp->blp_lock);
return blwi;
}
struct ldlm_bl_thread_data {
char bltd_name[CFS_CURPROC_COMM_MAX];
struct ldlm_bl_pool *bltd_blp;
- struct completion bltd_comp;
+ cfs_completion_t bltd_comp;
int bltd_num;
};
struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
int rc;
- init_completion(&bltd.bltd_comp);
+ cfs_init_completion(&bltd.bltd_comp);
rc = cfs_kernel_thread(ldlm_bl_thread_main, &bltd, 0);
if (rc < 0) {
CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %d\n",
- atomic_read(&blp->blp_num_threads), rc);
+ cfs_atomic_read(&blp->blp_num_threads), rc);
return rc;
}
- wait_for_completion(&bltd.bltd_comp);
+ cfs_wait_for_completion(&bltd.bltd_comp);
return 0;
}
blp = bltd->bltd_blp;
- bltd->bltd_num = atomic_inc_return(&blp->blp_num_threads) - 1;
- atomic_inc(&blp->blp_busy_threads);
+ bltd->bltd_num =
+ cfs_atomic_inc_return(&blp->blp_num_threads) - 1;
+ cfs_atomic_inc(&blp->blp_busy_threads);
snprintf(bltd->bltd_name, sizeof(bltd->bltd_name) - 1,
"ldlm_bl_%02d", bltd->bltd_num);
cfs_daemonize(bltd->bltd_name);
- complete(&bltd->bltd_comp);
+ cfs_complete(&bltd->bltd_comp);
/* cannot use bltd after this, it is only on caller's stack */
}
if (blwi == NULL) {
int busy;
- atomic_dec(&blp->blp_busy_threads);
+ cfs_atomic_dec(&blp->blp_busy_threads);
l_wait_event_exclusive(blp->blp_waitq,
(blwi = ldlm_bl_get_work(blp)) != NULL,
&lwi);
- busy = atomic_inc_return(&blp->blp_busy_threads);
+ busy = cfs_atomic_inc_return(&blp->blp_busy_threads);
if (blwi->blwi_ns == NULL)
/* added by ldlm_cleanup() */
/* Not fatal if racy and have a few too many threads */
if (unlikely(busy < blp->blp_max_threads &&
- busy >= atomic_read(&blp->blp_num_threads)))
+ busy >= cfs_atomic_read(&blp->blp_num_threads)))
/* discard the return value, we tried */
ldlm_bl_thread_start(blp);
} else {
OBD_FREE(blwi, sizeof(*blwi));
}
- atomic_dec(&blp->blp_busy_threads);
- atomic_dec(&blp->blp_num_threads);
- complete(&blp->blp_comp);
+ cfs_atomic_dec(&blp->blp_busy_threads);
+ cfs_atomic_dec(&blp->blp_num_threads);
+ cfs_complete(&blp->blp_comp);
RETURN(0);
}
{
int rc = 0;
ENTRY;
- mutex_down(&ldlm_ref_sem);
+ cfs_mutex_down(&ldlm_ref_sem);
if (++ldlm_refcount == 1) {
rc = ldlm_setup();
if (rc)
ldlm_refcount--;
}
- mutex_up(&ldlm_ref_sem);
+ cfs_mutex_up(&ldlm_ref_sem);
RETURN(rc);
}
void ldlm_put_ref(void)
{
ENTRY;
- mutex_down(&ldlm_ref_sem);
+ cfs_mutex_down(&ldlm_ref_sem);
if (ldlm_refcount == 1) {
int rc = ldlm_cleanup();
if (rc)
} else {
ldlm_refcount--;
}
- mutex_up(&ldlm_ref_sem);
+ cfs_mutex_up(&ldlm_ref_sem);
EXIT;
}
}
static void *
-ldlm_export_lock_key(struct hlist_node *hnode)
+ldlm_export_lock_key(cfs_hlist_node_t *hnode)
{
struct ldlm_lock *lock;
ENTRY;
- lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+ lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
RETURN(&lock->l_remote_handle);
}
static int
-ldlm_export_lock_compare(void *key, struct hlist_node *hnode)
+ldlm_export_lock_compare(void *key, cfs_hlist_node_t *hnode)
{
ENTRY;
RETURN(lustre_handle_equal(ldlm_export_lock_key(hnode), key));
}
static void *
-ldlm_export_lock_get(struct hlist_node *hnode)
+ldlm_export_lock_get(cfs_hlist_node_t *hnode)
{
struct ldlm_lock *lock;
ENTRY;
- lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+ lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
LDLM_LOCK_GET(lock);
RETURN(lock);
}
static void *
-ldlm_export_lock_put(struct hlist_node *hnode)
+ldlm_export_lock_put(cfs_hlist_node_t *hnode)
{
struct ldlm_lock *lock;
ENTRY;
- lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+ lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
LDLM_LOCK_RELEASE(lock);
RETURN(lock);
GOTO(out_proc, rc = -ENOMEM);
ldlm_state->ldlm_bl_pool = blp;
- spin_lock_init(&blp->blp_lock);
+ cfs_spin_lock_init(&blp->blp_lock);
CFS_INIT_LIST_HEAD(&blp->blp_list);
CFS_INIT_LIST_HEAD(&blp->blp_prio_list);
cfs_waitq_init(&blp->blp_waitq);
- atomic_set(&blp->blp_num_threads, 0);
- atomic_set(&blp->blp_busy_threads, 0);
+ cfs_atomic_set(&blp->blp_num_threads, 0);
+ cfs_atomic_set(&blp->blp_busy_threads, 0);
blp->blp_min_threads = ldlm_min_threads;
blp->blp_max_threads = ldlm_max_threads;
cfs_waitq_init(&expired_lock_thread.elt_waitq);
CFS_INIT_LIST_HEAD(&waiting_locks_list);
- spin_lock_init(&waiting_locks_spinlock);
+ cfs_spin_lock_init(&waiting_locks_spinlock);
cfs_timer_init(&waiting_locks_timer, waiting_locks_callback, 0);
rc = cfs_kernel_thread(expired_lock_main, NULL, CLONE_VM | CLONE_FILES);
GOTO(out_thread, rc);
}
- wait_event(expired_lock_thread.elt_waitq,
- expired_lock_thread.elt_state == ELT_READY);
+ cfs_wait_event(expired_lock_thread.elt_waitq,
+ expired_lock_thread.elt_state == ELT_READY);
#endif
#ifdef __KERNEL__
#endif
ENTRY;
- if (!list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
- !list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
+ if (!cfs_list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
+ !cfs_list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
CERROR("ldlm still has namespaces; clean these up first.\n");
ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
#endif
#ifdef __KERNEL__
- while (atomic_read(&blp->blp_num_threads) > 0) {
+ while (cfs_atomic_read(&blp->blp_num_threads) > 0) {
struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
- init_completion(&blp->blp_comp);
+ cfs_init_completion(&blp->blp_comp);
- spin_lock(&blp->blp_lock);
- list_add_tail(&blwi.blwi_entry, &blp->blp_list);
+ cfs_spin_lock(&blp->blp_lock);
+ cfs_list_add_tail(&blwi.blwi_entry, &blp->blp_list);
cfs_waitq_signal(&blp->blp_waitq);
- spin_unlock(&blp->blp_lock);
+ cfs_spin_unlock(&blp->blp_lock);
- wait_for_completion(&blp->blp_comp);
+ cfs_wait_for_completion(&blp->blp_comp);
}
OBD_FREE(blp, sizeof(*blp));
expired_lock_thread.elt_state = ELT_TERMINATE;
cfs_waitq_signal(&expired_lock_thread.elt_waitq);
- wait_event(expired_lock_thread.elt_waitq,
- expired_lock_thread.elt_state == ELT_STOPPED);
+ cfs_wait_event(expired_lock_thread.elt_waitq,
+ expired_lock_thread.elt_state == ELT_STOPPED);
#else
ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
int __init ldlm_init(void)
{
- init_mutex(&ldlm_ref_sem);
- init_mutex(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
- init_mutex(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
+ cfs_init_mutex(&ldlm_ref_sem);
+ cfs_init_mutex(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
+ cfs_init_mutex(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
ldlm_resource_slab = cfs_mem_cache_create("ldlm_resources",
sizeof(struct ldlm_resource), 0,
- SLAB_HWCACHE_ALIGN);
+ CFS_SLAB_HWCACHE_ALIGN);
if (ldlm_resource_slab == NULL)
return -ENOMEM;
ldlm_lock_slab = cfs_mem_cache_create("ldlm_locks",
- sizeof(struct ldlm_lock), 0,
- SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU);
+ sizeof(struct ldlm_lock), 0,
+ CFS_SLAB_HWCACHE_ALIGN | CFS_SLAB_DESTROY_BY_RCU);
if (ldlm_lock_slab == NULL) {
cfs_mem_cache_destroy(ldlm_resource_slab);
return -ENOMEM;
ldlm_interval_slab = cfs_mem_cache_create("interval_node",
sizeof(struct ldlm_interval),
- 0, SLAB_HWCACHE_ALIGN);
+ 0, CFS_SLAB_HWCACHE_ALIGN);
if (ldlm_interval_slab == NULL) {
cfs_mem_cache_destroy(ldlm_resource_slab);
cfs_mem_cache_destroy(ldlm_lock_slab);
#include "ldlm_internal.h"
static inline int
-ldlm_plain_compat_queue(struct list_head *queue, struct ldlm_lock *req,
- struct list_head *work_list)
+ldlm_plain_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
+ cfs_list_t *work_list)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct ldlm_lock *lock;
ldlm_mode_t req_mode = req->l_req_mode;
int compat = 1;
lockmode_verify(req_mode);
- list_for_each(tmp, queue) {
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ cfs_list_for_each(tmp, queue) {
+ lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
if (req == lock)
RETURN(compat);
/* last lock in mode group */
- tmp = &list_entry(lock->l_sl_mode.prev,
- struct ldlm_lock,
- l_sl_mode)->l_res_link;
+ tmp = &cfs_list_entry(lock->l_sl_mode.prev,
+ struct ldlm_lock,
+ l_sl_mode)->l_res_link;
if (lockmode_compat(lock->l_req_mode, req_mode))
continue;
ldlm_add_ast_work_item(lock, req, work_list);
{
- struct list_head *head;
+ cfs_list_t *head;
head = &lock->l_sl_mode;
- list_for_each_entry(lock, head, l_sl_mode)
+ cfs_list_for_each_entry(lock, head, l_sl_mode)
if (lock->l_blocking_ast)
ldlm_add_ast_work_item(lock, req,
work_list);
* - blocking ASTs have not been sent
* - must call this function with the resource lock held */
int ldlm_process_plain_lock(struct ldlm_lock *lock, int *flags, int first_enq,
- ldlm_error_t *err, struct list_head *work_list)
+ ldlm_error_t *err, cfs_list_t *work_list)
{
struct ldlm_resource *res = lock->l_resource;
CFS_LIST_HEAD(rpc_list);
ENTRY;
check_res_locked(res);
- LASSERT(list_empty(&res->lr_converting));
+ LASSERT(cfs_list_empty(&res->lr_converting));
if (!first_enq) {
LASSERT(work_list != NULL);
* bug 2322: we used to unlink and re-add here, which was a
* terrible folly -- if we goto restart, we could get
* re-ordered! Causes deadlock, because ASTs aren't sent! */
- if (list_empty(&lock->l_res_link))
+ if (cfs_list_empty(&lock->l_res_link))
ldlm_resource_add_lock(res, &res->lr_waiting, lock);
unlock_res(res);
rc = ldlm_run_ast_work(&rpc_list, LDLM_WORK_BL_AST);
int granted, grant_step, limit;
limit = ldlm_pool_get_limit(pl);
- granted = atomic_read(&pl->pl_granted);
+ granted = cfs_atomic_read(&pl->pl_granted);
grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
grant_step = ((limit - granted) * grant_step) / 100;
slv = pl->pl_server_lock_volume;
grant_plan = pl->pl_grant_plan;
limit = ldlm_pool_get_limit(pl);
- granted = atomic_read(&pl->pl_granted);
+ granted = cfs_atomic_read(&pl->pl_granted);
grant_usage = limit - (granted - grant_plan);
if (grant_usage <= 0)
{
int grant_plan = pl->pl_grant_plan;
__u64 slv = pl->pl_server_lock_volume;
- int granted = atomic_read(&pl->pl_granted);
- int grant_rate = atomic_read(&pl->pl_grant_rate);
- int cancel_rate = atomic_read(&pl->pl_cancel_rate);
+ int granted = cfs_atomic_read(&pl->pl_granted);
+ int grant_rate = cfs_atomic_read(&pl->pl_grant_rate);
+ int cancel_rate = cfs_atomic_read(&pl->pl_cancel_rate);
lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
slv);
*/
obd = ldlm_pl2ns(pl)->ns_obd;
LASSERT(obd != NULL);
- write_lock(&obd->obd_pool_lock);
+ cfs_write_lock(&obd->obd_pool_lock);
obd->obd_pool_slv = pl->pl_server_lock_volume;
- write_unlock(&obd->obd_pool_lock);
+ cfs_write_unlock(&obd->obd_pool_lock);
}
/**
time_t recalc_interval_sec;
ENTRY;
- spin_lock(&pl->pl_lock);
+ cfs_spin_lock(&pl->pl_lock);
recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
if (recalc_interval_sec >= pl->pl_recalc_period) {
/*
recalc_interval_sec);
}
- spin_unlock(&pl->pl_lock);
+ cfs_spin_unlock(&pl->pl_lock);
RETURN(0);
}
* VM is asking how many entries may be potentially freed.
*/
if (nr == 0)
- return atomic_read(&pl->pl_granted);
+ return cfs_atomic_read(&pl->pl_granted);
/*
* Client already canceled locks but server is already in shrinker
* and can't cancel anything. Let's catch this race.
*/
- if (atomic_read(&pl->pl_granted) == 0)
+ if (cfs_atomic_read(&pl->pl_granted) == 0)
RETURN(0);
- spin_lock(&pl->pl_lock);
+ cfs_spin_lock(&pl->pl_lock);
/*
* We want shrinker to possibly cause cancelation of @nr locks from
* Make sure that pool informed obd of last SLV changes.
*/
ldlm_srv_pool_push_slv(pl);
- spin_unlock(&pl->pl_lock);
+ cfs_spin_unlock(&pl->pl_lock);
/*
* We did not really free any memory here so far, it only will be
obd = ldlm_pl2ns(pl)->ns_obd;
LASSERT(obd != NULL && obd != LP_POISON);
LASSERT(obd->obd_type != LP_POISON);
- write_lock(&obd->obd_pool_lock);
+ cfs_write_lock(&obd->obd_pool_lock);
obd->obd_pool_limit = limit;
- write_unlock(&obd->obd_pool_lock);
+ cfs_write_unlock(&obd->obd_pool_lock);
ldlm_pool_set_limit(pl, limit);
RETURN(0);
*/
obd = ldlm_pl2ns(pl)->ns_obd;
LASSERT(obd != NULL);
- read_lock(&obd->obd_pool_lock);
+ cfs_read_lock(&obd->obd_pool_lock);
pl->pl_server_lock_volume = obd->obd_pool_slv;
ldlm_pool_set_limit(pl, obd->obd_pool_limit);
- read_unlock(&obd->obd_pool_lock);
+ cfs_read_unlock(&obd->obd_pool_lock);
}
/**
time_t recalc_interval_sec;
ENTRY;
- spin_lock(&pl->pl_lock);
+ cfs_spin_lock(&pl->pl_lock);
/*
* Check if we need to recalc lists now.
*/
recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
if (recalc_interval_sec < pl->pl_recalc_period) {
- spin_unlock(&pl->pl_lock);
+ cfs_spin_unlock(&pl->pl_lock);
RETURN(0);
}
pl->pl_recalc_time = cfs_time_current_sec();
lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
recalc_interval_sec);
- spin_unlock(&pl->pl_lock);
+ cfs_spin_unlock(&pl->pl_lock);
/*
* Do not cancel locks in case lru resize is disabled for this ns.
*/
ldlm_cli_pool_pop_slv(pl);
- spin_lock(&ns->ns_unused_lock);
+ cfs_spin_lock(&ns->ns_unused_lock);
unused = ns->ns_nr_unused;
- spin_unlock(&ns->ns_unused_lock);
+ cfs_spin_unlock(&ns->ns_unused_lock);
if (nr) {
canceled = ldlm_cancel_lru(ns, nr, LDLM_SYNC,
time_t recalc_interval_sec;
int count;
- spin_lock(&pl->pl_lock);
+ cfs_spin_lock(&pl->pl_lock);
recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
if (recalc_interval_sec > 0) {
/*
/*
* Zero out all rates and speed for the last period.
*/
- atomic_set(&pl->pl_grant_rate, 0);
- atomic_set(&pl->pl_cancel_rate, 0);
- atomic_set(&pl->pl_grant_speed, 0);
+ cfs_atomic_set(&pl->pl_grant_rate, 0);
+ cfs_atomic_set(&pl->pl_cancel_rate, 0);
+ cfs_atomic_set(&pl->pl_grant_speed, 0);
}
- spin_unlock(&pl->pl_lock);
+ cfs_spin_unlock(&pl->pl_lock);
if (pl->pl_ops->po_recalc != NULL) {
count = pl->pl_ops->po_recalc(pl);
__u64 slv, clv;
__u32 limit;
- spin_lock(&pl->pl_lock);
+ cfs_spin_lock(&pl->pl_lock);
slv = pl->pl_server_lock_volume;
clv = pl->pl_client_lock_volume;
limit = ldlm_pool_get_limit(pl);
grant_plan = pl->pl_grant_plan;
- granted = atomic_read(&pl->pl_granted);
- grant_rate = atomic_read(&pl->pl_grant_rate);
- lvf = atomic_read(&pl->pl_lock_volume_factor);
- grant_speed = atomic_read(&pl->pl_grant_speed);
- cancel_rate = atomic_read(&pl->pl_cancel_rate);
+ granted = cfs_atomic_read(&pl->pl_granted);
+ grant_rate = cfs_atomic_read(&pl->pl_grant_rate);
+ lvf = cfs_atomic_read(&pl->pl_lock_volume_factor);
+ grant_speed = cfs_atomic_read(&pl->pl_grant_speed);
+ cancel_rate = cfs_atomic_read(&pl->pl_cancel_rate);
grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
- spin_unlock(&pl->pl_lock);
+ cfs_spin_unlock(&pl->pl_lock);
nr += snprintf(page + nr, count - nr, "LDLM pool state (%s):\n",
pl->pl_name);
int rc;
ENTRY;
- spin_lock_init(&pl->pl_lock);
- atomic_set(&pl->pl_granted, 0);
+ cfs_spin_lock_init(&pl->pl_lock);
+ cfs_atomic_set(&pl->pl_granted, 0);
pl->pl_recalc_time = cfs_time_current_sec();
- atomic_set(&pl->pl_lock_volume_factor, 1);
+ cfs_atomic_set(&pl->pl_lock_volume_factor, 1);
- atomic_set(&pl->pl_grant_rate, 0);
- atomic_set(&pl->pl_cancel_rate, 0);
- atomic_set(&pl->pl_grant_speed, 0);
+ cfs_atomic_set(&pl->pl_grant_rate, 0);
+ cfs_atomic_set(&pl->pl_cancel_rate, 0);
+ cfs_atomic_set(&pl->pl_grant_speed, 0);
pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
ENTRY;
LDLM_DEBUG(lock, "add lock to pool");
- atomic_inc(&pl->pl_granted);
- atomic_inc(&pl->pl_grant_rate);
- atomic_inc(&pl->pl_grant_speed);
+ cfs_atomic_inc(&pl->pl_granted);
+ cfs_atomic_inc(&pl->pl_grant_rate);
+ cfs_atomic_inc(&pl->pl_grant_speed);
lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
/*
ENTRY;
LDLM_DEBUG(lock, "del lock from pool");
- LASSERT(atomic_read(&pl->pl_granted) > 0);
- atomic_dec(&pl->pl_granted);
- atomic_inc(&pl->pl_cancel_rate);
- atomic_dec(&pl->pl_grant_speed);
+ LASSERT(cfs_atomic_read(&pl->pl_granted) > 0);
+ cfs_atomic_dec(&pl->pl_granted);
+ cfs_atomic_inc(&pl->pl_cancel_rate);
+ cfs_atomic_dec(&pl->pl_grant_speed);
lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
__u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
{
__u64 slv;
- spin_lock(&pl->pl_lock);
+ cfs_spin_lock(&pl->pl_lock);
slv = pl->pl_server_lock_volume;
- spin_unlock(&pl->pl_lock);
+ cfs_spin_unlock(&pl->pl_lock);
return slv;
}
EXPORT_SYMBOL(ldlm_pool_get_slv);
*/
void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
{
- spin_lock(&pl->pl_lock);
+ cfs_spin_lock(&pl->pl_lock);
pl->pl_server_lock_volume = slv;
- spin_unlock(&pl->pl_lock);
+ cfs_spin_unlock(&pl->pl_lock);
}
EXPORT_SYMBOL(ldlm_pool_set_slv);
__u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
{
__u64 slv;
- spin_lock(&pl->pl_lock);
+ cfs_spin_lock(&pl->pl_lock);
slv = pl->pl_client_lock_volume;
- spin_unlock(&pl->pl_lock);
+ cfs_spin_unlock(&pl->pl_lock);
return slv;
}
EXPORT_SYMBOL(ldlm_pool_get_clv);
*/
void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
{
- spin_lock(&pl->pl_lock);
+ cfs_spin_lock(&pl->pl_lock);
pl->pl_client_lock_volume = clv;
- spin_unlock(&pl->pl_lock);
+ cfs_spin_unlock(&pl->pl_lock);
}
EXPORT_SYMBOL(ldlm_pool_set_clv);
*/
__u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
{
- return atomic_read(&pl->pl_limit);
+ return cfs_atomic_read(&pl->pl_limit);
}
EXPORT_SYMBOL(ldlm_pool_get_limit);
*/
void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
{
- atomic_set(&pl->pl_limit, limit);
+ cfs_atomic_set(&pl->pl_limit, limit);
}
EXPORT_SYMBOL(ldlm_pool_set_limit);
*/
__u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
{
- return atomic_read(&pl->pl_lock_volume_factor);
+ return cfs_atomic_read(&pl->pl_lock_volume_factor);
}
EXPORT_SYMBOL(ldlm_pool_get_lvf);
#ifdef __KERNEL__
static int ldlm_pool_granted(struct ldlm_pool *pl)
{
- return atomic_read(&pl->pl_granted);
+ return cfs_atomic_read(&pl->pl_granted);
}
static struct ptlrpc_thread *ldlm_pools_thread;
-static struct shrinker *ldlm_pools_srv_shrinker;
-static struct shrinker *ldlm_pools_cli_shrinker;
-static struct completion ldlm_pools_comp;
+static struct cfs_shrinker *ldlm_pools_srv_shrinker;
+static struct cfs_shrinker *ldlm_pools_cli_shrinker;
+static cfs_completion_t ldlm_pools_comp;
/*
* Cancel \a nr locks from all namespaces (if possible). Returns number of
/*
* Find out how many resources we may release.
*/
- for (nr_ns = atomic_read(ldlm_namespace_nr(client));
+ for (nr_ns = cfs_atomic_read(ldlm_namespace_nr(client));
nr_ns > 0; nr_ns--)
{
- mutex_down(ldlm_namespace_lock(client));
- if (list_empty(ldlm_namespace_list(client))) {
- mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_down(ldlm_namespace_lock(client));
+ if (cfs_list_empty(ldlm_namespace_list(client))) {
+ cfs_mutex_up(ldlm_namespace_lock(client));
cl_env_reexit(cookie);
return 0;
}
ns = ldlm_namespace_first_locked(client);
ldlm_namespace_get(ns);
ldlm_namespace_move_locked(ns, client);
- mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_up(ldlm_namespace_lock(client));
total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
ldlm_namespace_put(ns, 1);
}
/*
* Shrink at least ldlm_namespace_nr(client) namespaces.
*/
- for (nr_ns = atomic_read(ldlm_namespace_nr(client));
+ for (nr_ns = cfs_atomic_read(ldlm_namespace_nr(client));
nr_ns > 0; nr_ns--)
{
int cancel, nr_locks;
/*
* Do not call shrink under ldlm_namespace_lock(client)
*/
- mutex_down(ldlm_namespace_lock(client));
- if (list_empty(ldlm_namespace_list(client))) {
- mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_down(ldlm_namespace_lock(client));
+ if (cfs_list_empty(ldlm_namespace_list(client))) {
+ cfs_mutex_up(ldlm_namespace_lock(client));
/*
* If list is empty, we can't return any @cached > 0,
* that probably would cause needless shrinker
ns = ldlm_namespace_first_locked(client);
ldlm_namespace_get(ns);
ldlm_namespace_move_locked(ns, client);
- mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_up(ldlm_namespace_lock(client));
nr_locks = ldlm_pool_granted(&ns->ns_pool);
cancel = 1 + nr_locks * nr / total;
/*
* Check all modest namespaces first.
*/
- mutex_down(ldlm_namespace_lock(client));
- list_for_each_entry(ns, ldlm_namespace_list(client),
- ns_list_chain)
+ cfs_mutex_down(ldlm_namespace_lock(client));
+ cfs_list_for_each_entry(ns, ldlm_namespace_list(client),
+ ns_list_chain)
{
if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
continue;
/*
* The rest is given to greedy namespaces.
*/
- list_for_each_entry(ns, ldlm_namespace_list(client),
- ns_list_chain)
+ cfs_list_for_each_entry(ns, ldlm_namespace_list(client),
+ ns_list_chain)
{
if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
continue;
* for _all_ pools.
*/
l = LDLM_POOL_HOST_L /
- atomic_read(ldlm_namespace_nr(client));
+ cfs_atomic_read(
+ ldlm_namespace_nr(client));
} else {
/*
* All the rest of greedy pools will have
* all locks in equal parts.
*/
l = (LDLM_POOL_HOST_L - nr_l) /
- (atomic_read(ldlm_namespace_nr(client)) -
+ (cfs_atomic_read(
+ ldlm_namespace_nr(client)) -
nr_p);
}
ldlm_pool_setup(&ns->ns_pool, l);
}
- mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_up(ldlm_namespace_lock(client));
}
/*
* Recalc at least ldlm_namespace_nr(client) namespaces.
*/
- for (nr = atomic_read(ldlm_namespace_nr(client)); nr > 0; nr--) {
+ for (nr = cfs_atomic_read(ldlm_namespace_nr(client)); nr > 0; nr--) {
int skip;
/*
* Lock the list, get first @ns in the list, getref, move it
* rid of potential deadlock on client nodes when canceling
* locks synchronously.
*/
- mutex_down(ldlm_namespace_lock(client));
- if (list_empty(ldlm_namespace_list(client))) {
- mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_down(ldlm_namespace_lock(client));
+ if (cfs_list_empty(ldlm_namespace_list(client))) {
+ cfs_mutex_up(ldlm_namespace_lock(client));
break;
}
ns = ldlm_namespace_first_locked(client);
- spin_lock(&ns->ns_hash_lock);
+ cfs_spin_lock(&ns->ns_hash_lock);
/*
* skip ns which is being freed, and we don't want to increase
* its refcount again, not even temporarily. bz21519.
skip = 0;
ldlm_namespace_get_locked(ns);
}
- spin_unlock(&ns->ns_hash_lock);
+ cfs_spin_unlock(&ns->ns_hash_lock);
ldlm_namespace_move_locked(ns, client);
- mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_up(ldlm_namespace_lock(client));
/*
* After setup is done - recalc the pool.
CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
t_name, cfs_curproc_pid());
- complete_and_exit(&ldlm_pools_comp, 0);
+ cfs_complete_and_exit(&ldlm_pools_comp, 0);
}
static int ldlm_pools_thread_start(void)
if (ldlm_pools_thread == NULL)
RETURN(-ENOMEM);
- init_completion(&ldlm_pools_comp);
+ cfs_init_completion(&ldlm_pools_comp);
cfs_waitq_init(&ldlm_pools_thread->t_ctl_waitq);
/*
* This fixes possible race and oops due to accessing freed memory
* in pools thread.
*/
- wait_for_completion(&ldlm_pools_comp);
+ cfs_wait_for_completion(&ldlm_pools_comp);
OBD_FREE_PTR(ldlm_pools_thread);
ldlm_pools_thread = NULL;
EXIT;
rc = ldlm_pools_thread_start();
if (rc == 0) {
- ldlm_pools_srv_shrinker = set_shrinker(DEFAULT_SEEKS,
- ldlm_pools_srv_shrink);
- ldlm_pools_cli_shrinker = set_shrinker(DEFAULT_SEEKS,
- ldlm_pools_cli_shrink);
+ ldlm_pools_srv_shrinker =
+ cfs_set_shrinker(CFS_DEFAULT_SEEKS,
+ ldlm_pools_srv_shrink);
+ ldlm_pools_cli_shrinker =
+ cfs_set_shrinker(CFS_DEFAULT_SEEKS,
+ ldlm_pools_cli_shrink);
}
RETURN(rc);
}
void ldlm_pools_fini(void)
{
if (ldlm_pools_srv_shrinker != NULL) {
- remove_shrinker(ldlm_pools_srv_shrinker);
+ cfs_remove_shrinker(ldlm_pools_srv_shrinker);
ldlm_pools_srv_shrinker = NULL;
}
if (ldlm_pools_cli_shrinker != NULL) {
- remove_shrinker(ldlm_pools_cli_shrinker);
+ cfs_remove_shrinker(ldlm_pools_cli_shrinker);
ldlm_pools_cli_shrinker = NULL;
}
ldlm_pools_thread_stop();
}
if (imp != NULL) {
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
lwd.lwd_conn_cnt = imp->imp_conn_cnt;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
}
if (ns_is_client(lock->l_resource->lr_namespace) &&
* @count locks in @cancels. */
int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
int version, int opc, int canceloff,
- struct list_head *cancels, int count)
+ cfs_list_t *cancels, int count)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
struct req_capsule *pill = &req->rq_pill;
}
int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req,
- struct list_head *cancels, int count)
+ cfs_list_t *cancels, int count)
{
return ldlm_prep_elc_req(exp, req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE,
LDLM_ENQUEUE_CANCEL_OFF, cancels, count);
/* Pack @count locks in @head into ldlm_request buffer at the offset @off,
of the request @req. */
static void ldlm_cancel_pack(struct ptlrpc_request *req,
- struct list_head *head, int count)
+ cfs_list_t *head, int count)
{
struct ldlm_request *dlm;
struct ldlm_lock *lock;
/* XXX: it would be better to pack lock handles grouped by resource.
* so that the server cancel would call filter_lvbo_update() less
* frequently. */
- list_for_each_entry(lock, head, l_bl_ast) {
+ cfs_list_for_each_entry(lock, head, l_bl_ast) {
if (!count--)
break;
LASSERT(lock->l_conn_export);
/* Prepare and send a batched cancel rpc, it will include count lock handles
* of locks given in @head. */
-int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels,
+int ldlm_cli_cancel_req(struct obd_export *exp, cfs_list_t *cancels,
int count, int flags)
{
struct ptlrpc_request *req = NULL;
* alive in cleanup time. Evil races are possible which may cause
* oops in that time.
*/
- write_lock(&obd->obd_pool_lock);
+ cfs_write_lock(&obd->obd_pool_lock);
old_slv = obd->obd_pool_slv;
obd->obd_pool_slv = new_slv;
obd->obd_pool_limit = new_limit;
- write_unlock(&obd->obd_pool_lock);
+ cfs_write_unlock(&obd->obd_pool_lock);
RETURN(0);
}
/* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL
* rpc which goes to canceld portal, so we can cancel other lru locks
* here and send them all as one LDLM_CANCEL rpc. */
- LASSERT(list_empty(&lock->l_bl_ast));
- list_add(&lock->l_bl_ast, &cancels);
+ LASSERT(cfs_list_empty(&lock->l_bl_ast));
+ cfs_list_add(&lock->l_bl_ast, &cancels);
exp = lock->l_conn_export;
if (exp_connect_cancelset(exp)) {
/* XXX until we will have compound requests and can cut cancels from generic rpc
* we need send cancels with LDLM_FL_BL_AST flag as separate rpc */
-static int ldlm_cancel_list(struct list_head *cancels, int count, int flags)
+static int ldlm_cancel_list(cfs_list_t *cancels, int count, int flags)
{
CFS_LIST_HEAD(head);
struct ldlm_lock *lock, *next;
int left = 0, bl_ast = 0, rc;
left = count;
- list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {
+ cfs_list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {
if (left-- == 0)
break;
}
if (!(flags & LDLM_FL_BL_AST) && (rc == LDLM_FL_BL_AST)) {
LDLM_DEBUG(lock, "Cancel lock separately");
- list_del_init(&lock->l_bl_ast);
- list_add(&lock->l_bl_ast, &head);
+ cfs_list_del_init(&lock->l_bl_ast);
+ cfs_list_add(&lock->l_bl_ast, &head);
bl_ast ++;
continue;
}
if (rc == LDLM_FL_LOCAL_ONLY) {
/* CANCEL RPC should not be sent to server. */
- list_del_init(&lock->l_bl_ast);
+ cfs_list_del_init(&lock->l_bl_ast);
LDLM_LOCK_RELEASE(lock);
count--;
}
*
* flags & LDLM_CANCEL_AGED - cancel alocks according to "aged policy".
*/
-int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
+int ldlm_cancel_lru_local(struct ldlm_namespace *ns, cfs_list_t *cancels,
int count, int max, int cancel_flags, int flags)
{
ldlm_cancel_lru_policy_t pf;
int added = 0, unused;
ENTRY;
- spin_lock(&ns->ns_unused_lock);
+ cfs_spin_lock(&ns->ns_unused_lock);
unused = ns->ns_nr_unused;
if (!ns_connect_lru_resize(ns))
pf = ldlm_cancel_lru_policy(ns, flags);
LASSERT(pf != NULL);
- while (!list_empty(&ns->ns_unused_list)) {
+ while (!cfs_list_empty(&ns->ns_unused_list)) {
/* For any flags, stop scanning if @max is reached. */
if (max && added >= max)
break;
- list_for_each_entry_safe(lock, next, &ns->ns_unused_list, l_lru){
+ cfs_list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
+ l_lru){
/* No locks which got blocking requests. */
LASSERT(!(lock->l_flags & LDLM_FL_BL_AST));
break;
LDLM_LOCK_GET(lock);
- spin_unlock(&ns->ns_unused_lock);
+ cfs_spin_unlock(&ns->ns_unused_lock);
lu_ref_add(&lock->l_reference, __FUNCTION__, cfs_current());
/* Pass the lock through the policy filter and see if it
lu_ref_del(&lock->l_reference,
__FUNCTION__, cfs_current());
LDLM_LOCK_RELEASE(lock);
- spin_lock(&ns->ns_unused_lock);
+ cfs_spin_lock(&ns->ns_unused_lock);
break;
}
lu_ref_del(&lock->l_reference,
__FUNCTION__, cfs_current());
LDLM_LOCK_RELEASE(lock);
- spin_lock(&ns->ns_unused_lock);
+ cfs_spin_lock(&ns->ns_unused_lock);
continue;
}
LASSERT(!lock->l_readers && !lock->l_writers);
* and can't use l_pending_chain as it is used both on
* server and client nevertheless bug 5666 says it is
* used only on server */
- LASSERT(list_empty(&lock->l_bl_ast));
- list_add(&lock->l_bl_ast, cancels);
+ LASSERT(cfs_list_empty(&lock->l_bl_ast));
+ cfs_list_add(&lock->l_bl_ast, cancels);
unlock_res_and_lock(lock);
lu_ref_del(&lock->l_reference, __FUNCTION__, cfs_current());
- spin_lock(&ns->ns_unused_lock);
+ cfs_spin_lock(&ns->ns_unused_lock);
added++;
unused--;
}
- spin_unlock(&ns->ns_unused_lock);
+ cfs_spin_unlock(&ns->ns_unused_lock);
RETURN(ldlm_cancel_list(cancels, added, cancel_flags));
}
* given policy, mode. GET the found locks and add them into the @cancels
* list. */
int ldlm_cancel_resource_local(struct ldlm_resource *res,
- struct list_head *cancels,
+ cfs_list_t *cancels,
ldlm_policy_data_t *policy,
ldlm_mode_t mode, int lock_flags,
int cancel_flags, void *opaque)
ENTRY;
lock_res(res);
- list_for_each_entry(lock, &res->lr_granted, l_res_link) {
+ cfs_list_for_each_entry(lock, &res->lr_granted, l_res_link) {
if (opaque != NULL && lock->l_ast_data != opaque) {
LDLM_ERROR(lock, "data %p doesn't match opaque %p",
lock->l_ast_data, opaque);
lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING |
lock_flags;
- LASSERT(list_empty(&lock->l_bl_ast));
- list_add(&lock->l_bl_ast, cancels);
+ LASSERT(cfs_list_empty(&lock->l_bl_ast));
+ cfs_list_add(&lock->l_bl_ast, cancels);
LDLM_LOCK_GET(lock);
count++;
}
* If @req is not NULL, put handles of locks in @cancels into the request
* buffer at the offset @off.
* Destroy @cancels at the end. */
-int ldlm_cli_cancel_list(struct list_head *cancels, int count,
+int ldlm_cli_cancel_list(cfs_list_t *cancels, int count,
struct ptlrpc_request *req, int flags)
{
struct ldlm_lock *lock;
int res = 0;
ENTRY;
- if (list_empty(cancels) || count == 0)
+ if (cfs_list_empty(cancels) || count == 0)
RETURN(0);
/* XXX: requests (both batched and not) could be sent in parallel.
* It would also speed up the case when the server does not support
* the feature. */
while (count > 0) {
- LASSERT(!list_empty(cancels));
- lock = list_entry(cancels->next, struct ldlm_lock, l_bl_ast);
+ LASSERT(!cfs_list_empty(cancels));
+ lock = cfs_list_entry(cancels->next, struct ldlm_lock,
+ l_bl_ast);
LASSERT(lock->l_conn_export);
if (exp_connect_cancelset(lock->l_conn_export)) {
{
int no_resource = 0;
- spin_lock(&ns->ns_hash_lock);
+ cfs_spin_lock(&ns->ns_hash_lock);
if (ns->ns_resources == 0)
no_resource = 1;
- spin_unlock(&ns->ns_hash_lock);
+ cfs_spin_unlock(&ns->ns_hash_lock);
RETURN(no_resource);
}
LCK_MINMODE, flags,
opaque));
- spin_lock(&ns->ns_hash_lock);
+ cfs_spin_lock(&ns->ns_hash_lock);
for (i = 0; i < RES_HASH_SIZE; i++) {
- struct list_head *tmp;
+ cfs_list_t *tmp;
tmp = ns->ns_hash[i].next;
while (tmp != &(ns->ns_hash[i])) {
struct ldlm_resource *res;
int rc;
- res = list_entry(tmp, struct ldlm_resource, lr_hash);
+ res = cfs_list_entry(tmp, struct ldlm_resource,
+ lr_hash);
ldlm_resource_getref(res);
- spin_unlock(&ns->ns_hash_lock);
+ cfs_spin_unlock(&ns->ns_hash_lock);
LDLM_RESOURCE_ADDREF(res);
rc = ldlm_cli_cancel_unused_resource(ns, &res->lr_name,
res->lr_name.name[0], rc);
LDLM_RESOURCE_DELREF(res);
- spin_lock(&ns->ns_hash_lock);
+ cfs_spin_lock(&ns->ns_hash_lock);
tmp = tmp->next;
ldlm_resource_putref_locked(res);
}
}
- spin_unlock(&ns->ns_hash_lock);
+ cfs_spin_unlock(&ns->ns_hash_lock);
RETURN(ELDLM_OK);
}
int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
void *closure)
{
- struct list_head *tmp, *next;
+ cfs_list_t *tmp, *next;
struct ldlm_lock *lock;
int rc = LDLM_ITER_CONTINUE;
RETURN(LDLM_ITER_CONTINUE);
lock_res(res);
- list_for_each_safe(tmp, next, &res->lr_granted) {
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ cfs_list_for_each_safe(tmp, next, &res->lr_granted) {
+ lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
if (iter(lock, closure) == LDLM_ITER_STOP)
GOTO(out, rc = LDLM_ITER_STOP);
}
- list_for_each_safe(tmp, next, &res->lr_converting) {
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ cfs_list_for_each_safe(tmp, next, &res->lr_converting) {
+ lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
if (iter(lock, closure) == LDLM_ITER_STOP)
GOTO(out, rc = LDLM_ITER_STOP);
}
- list_for_each_safe(tmp, next, &res->lr_waiting) {
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ cfs_list_for_each_safe(tmp, next, &res->lr_waiting) {
+ lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
if (iter(lock, closure) == LDLM_ITER_STOP)
GOTO(out, rc = LDLM_ITER_STOP);
{
int i, rc = LDLM_ITER_CONTINUE;
struct ldlm_resource *res;
- struct list_head *tmp;
+ cfs_list_t *tmp;
ENTRY;
- spin_lock(&ns->ns_hash_lock);
+ cfs_spin_lock(&ns->ns_hash_lock);
for (i = 0; i < RES_HASH_SIZE; i++) {
tmp = ns->ns_hash[i].next;
while (tmp != &(ns->ns_hash[i])) {
- res = list_entry(tmp, struct ldlm_resource, lr_hash);
+ res = cfs_list_entry(tmp, struct ldlm_resource,
+ lr_hash);
ldlm_resource_getref(res);
- spin_unlock(&ns->ns_hash_lock);
+ cfs_spin_unlock(&ns->ns_hash_lock);
LDLM_RESOURCE_ADDREF(res);
rc = iter(res, closure);
LDLM_RESOURCE_DELREF(res);
- spin_lock(&ns->ns_hash_lock);
+ cfs_spin_lock(&ns->ns_hash_lock);
tmp = tmp->next;
ldlm_resource_putref_locked(res);
if (rc == LDLM_ITER_STOP)
}
}
out:
- spin_unlock(&ns->ns_hash_lock);
+ cfs_spin_unlock(&ns->ns_hash_lock);
RETURN(rc);
}
static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
{
- struct list_head *list = closure;
+ cfs_list_t *list = closure;
/* we use l_pending_chain here, because it's unused on clients. */
- LASSERTF(list_empty(&lock->l_pending_chain),"lock %p next %p prev %p\n",
+ LASSERTF(cfs_list_empty(&lock->l_pending_chain),
+ "lock %p next %p prev %p\n",
lock, &lock->l_pending_chain.next,&lock->l_pending_chain.prev);
/* bug 9573: don't replay locks left after eviction, or
* bug 17614: locks being actively cancelled. Get a reference
* on a lock so that it does not disapear under us (e.g. due to cancel)
*/
if (!(lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_CANCELING))) {
- list_add(&lock->l_pending_chain, list);
+ cfs_list_add(&lock->l_pending_chain, list);
LDLM_LOCK_GET(lock);
}
struct obd_export *exp;
ENTRY;
- atomic_dec(&req->rq_import->imp_replay_inflight);
+ cfs_atomic_dec(&req->rq_import->imp_replay_inflight);
if (rc != ELDLM_OK)
GOTO(out, rc);
flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED;
else if (lock->l_granted_mode)
flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_CONV;
- else if (!list_empty(&lock->l_res_link))
+ else if (!cfs_list_empty(&lock->l_res_link))
flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT;
else
flags = LDLM_FL_REPLAY;
LDLM_DEBUG(lock, "replaying lock:");
- atomic_inc(&req->rq_import->imp_replay_inflight);
+ cfs_atomic_inc(&req->rq_import->imp_replay_inflight);
CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
aa->lock_handle = body->lock_handle[0];
ENTRY;
- LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
+ LASSERT(cfs_atomic_read(&imp->imp_replay_inflight) == 0);
/* ensure this doesn't fall to 0 before all have been queued */
- atomic_inc(&imp->imp_replay_inflight);
+ cfs_atomic_inc(&imp->imp_replay_inflight);
(void)ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
- list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
- list_del_init(&lock->l_pending_chain);
+ cfs_list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
+ cfs_list_del_init(&lock->l_pending_chain);
if (rc) {
LDLM_LOCK_PUT(lock);
continue; /* or try to do the rest? */
LDLM_LOCK_PUT(lock);
}
- atomic_dec(&imp->imp_replay_inflight);
+ cfs_atomic_dec(&imp->imp_replay_inflight);
RETURN(rc);
}
cfs_mem_cache_t *ldlm_resource_slab, *ldlm_lock_slab;
-atomic_t ldlm_srv_namespace_nr = ATOMIC_INIT(0);
-atomic_t ldlm_cli_namespace_nr = ATOMIC_INIT(0);
+cfs_atomic_t ldlm_srv_namespace_nr = CFS_ATOMIC_INIT(0);
+cfs_atomic_t ldlm_cli_namespace_nr = CFS_ATOMIC_INIT(0);
-struct semaphore ldlm_srv_namespace_lock;
+cfs_semaphore_t ldlm_srv_namespace_lock;
CFS_LIST_HEAD(ldlm_srv_namespace_list);
-struct semaphore ldlm_cli_namespace_lock;
+cfs_semaphore_t ldlm_cli_namespace_lock;
CFS_LIST_HEAD(ldlm_cli_namespace_list);
cfs_proc_dir_entry_t *ldlm_type_proc_dir = NULL;
int lru_resize;
dummy[MAX_STRING_SIZE] = '\0';
- if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
+ if (cfs_copy_from_user(dummy, buffer, MAX_STRING_SIZE))
return -EFAULT;
if (strncmp(dummy, "clear", 5) == 0) {
ldlm_side_t client, ldlm_appetite_t apt)
{
struct ldlm_namespace *ns = NULL;
- struct list_head *bucket;
+ cfs_list_t *bucket;
int rc, idx, namelen;
ENTRY;
CFS_INIT_LIST_HEAD(&ns->ns_list_chain);
ns->ns_refcount = 0;
ns->ns_client = client;
- spin_lock_init(&ns->ns_hash_lock);
- atomic_set(&ns->ns_locks, 0);
+ cfs_spin_lock_init(&ns->ns_hash_lock);
+ cfs_atomic_set(&ns->ns_locks, 0);
ns->ns_resources = 0;
cfs_waitq_init(&ns->ns_waitq);
ns->ns_max_nolock_size = NS_DEFAULT_MAX_NOLOCK_BYTES;
ns->ns_max_age = LDLM_DEFAULT_MAX_ALIVE;
ns->ns_ctime_age_limit = LDLM_CTIME_AGE_LIMIT;
ns->ns_timeouts = 0;
- spin_lock_init(&ns->ns_unused_lock);
+ cfs_spin_lock_init(&ns->ns_unused_lock);
ns->ns_orig_connect_flags = 0;
ns->ns_connect_flags = 0;
ldlm_proc_namespace(ns);
- idx = atomic_read(ldlm_namespace_nr(client));
+ idx = cfs_atomic_read(ldlm_namespace_nr(client));
rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
if (rc) {
CERROR("Can't initialize lock pool, rc %d\n", rc);
* as a result--notably, that we shouldn't cancel locks with refs. -phil
*
* Called with the ns_lock held. */
-static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
+static void cleanup_resource(struct ldlm_resource *res, cfs_list_t *q,
int flags)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
int rc = 0, client = ns_is_client(res->lr_namespace);
int local_only = (flags & LDLM_FL_LOCAL_ONLY);
ENTRY;
/* first, we look for non-cleaned-yet lock
* all cleaned locks are marked by CLEANED flag */
lock_res(res);
- list_for_each(tmp, q) {
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ cfs_list_for_each(tmp, q) {
+ lock = cfs_list_entry(tmp, struct ldlm_lock,
+ l_res_link);
if (lock->l_flags & LDLM_FL_CLEANED) {
lock = NULL;
continue;
int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
int i;
if (ns == NULL) {
}
for (i = 0; i < RES_HASH_SIZE; i++) {
- spin_lock(&ns->ns_hash_lock);
+ cfs_spin_lock(&ns->ns_hash_lock);
tmp = ns->ns_hash[i].next;
while (tmp != &(ns->ns_hash[i])) {
struct ldlm_resource *res;
- res = list_entry(tmp, struct ldlm_resource, lr_hash);
+ res = cfs_list_entry(tmp, struct ldlm_resource,
+ lr_hash);
ldlm_resource_getref(res);
- spin_unlock(&ns->ns_hash_lock);
+ cfs_spin_unlock(&ns->ns_hash_lock);
LDLM_RESOURCE_ADDREF(res);
cleanup_resource(res, &res->lr_granted, flags);
cleanup_resource(res, &res->lr_converting, flags);
cleanup_resource(res, &res->lr_waiting, flags);
- spin_lock(&ns->ns_hash_lock);
+ cfs_spin_lock(&ns->ns_hash_lock);
tmp = tmp->next;
/* XXX: former stuff caused issues in case of race
LDLM_RESOURCE_DELREF(res);
if (!ldlm_resource_putref_locked(res)) {
CERROR("Namespace %s resource refcount nonzero "
- "(%d) after lock cleanup; forcing cleanup.\n",
- ns->ns_name, atomic_read(&res->lr_refcount));
+ "(%d) after lock cleanup; forcing "
+ "cleanup.\n",
+ ns->ns_name,
+ cfs_atomic_read(&res->lr_refcount));
CERROR("Resource: %p ("LPU64"/"LPU64"/"LPU64"/"
LPU64") (rc: %d)\n", res,
- res->lr_name.name[0], res->lr_name.name[1],
- res->lr_name.name[2], res->lr_name.name[3],
- atomic_read(&res->lr_refcount));
+ res->lr_name.name[0],
+ res->lr_name.name[1],
+ res->lr_name.name[2],
+ res->lr_name.name[3],
+ cfs_atomic_read(&res->lr_refcount));
}
}
- spin_unlock(&ns->ns_hash_lock);
+ cfs_spin_unlock(&ns->ns_hash_lock);
}
return ELDLM_OK;
ns->ns_name, ns->ns_refcount);
force_wait:
if (force)
- lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
+ lwi = LWI_TIMEOUT(obd_timeout * CFS_HZ / 4, NULL, NULL);
rc = l_wait_event(ns->ns_waitq,
ns->ns_refcount == 0, &lwi);
* Namespace \a ns should be not on list in this time, otherwise this
* will cause issues realted to using freed \a ns in pools thread.
*/
- LASSERT(list_empty(&ns->ns_list_chain));
+ LASSERT(cfs_list_empty(&ns->ns_list_chain));
OBD_FREE_PTR(ns);
ldlm_put_ref();
EXIT;
void ldlm_namespace_get(struct ldlm_namespace *ns)
{
- spin_lock(&ns->ns_hash_lock);
+ cfs_spin_lock(&ns->ns_hash_lock);
ldlm_namespace_get_locked(ns);
- spin_unlock(&ns->ns_hash_lock);
+ cfs_spin_unlock(&ns->ns_hash_lock);
}
void ldlm_namespace_put_locked(struct ldlm_namespace *ns, int wakeup)
LASSERT(ns->ns_refcount > 0);
ns->ns_refcount--;
if (ns->ns_refcount == 0 && wakeup)
- wake_up(&ns->ns_waitq);
+ cfs_waitq_signal(&ns->ns_waitq);
}
void ldlm_namespace_put(struct ldlm_namespace *ns, int wakeup)
{
- spin_lock(&ns->ns_hash_lock);
+ cfs_spin_lock(&ns->ns_hash_lock);
ldlm_namespace_put_locked(ns, wakeup);
- spin_unlock(&ns->ns_hash_lock);
+ cfs_spin_unlock(&ns->ns_hash_lock);
}
/* Register @ns in the list of namespaces */
void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client)
{
- mutex_down(ldlm_namespace_lock(client));
- LASSERT(list_empty(&ns->ns_list_chain));
- list_add(&ns->ns_list_chain, ldlm_namespace_list(client));
- atomic_inc(ldlm_namespace_nr(client));
- mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_down(ldlm_namespace_lock(client));
+ LASSERT(cfs_list_empty(&ns->ns_list_chain));
+ cfs_list_add(&ns->ns_list_chain, ldlm_namespace_list(client));
+ cfs_atomic_inc(ldlm_namespace_nr(client));
+ cfs_mutex_up(ldlm_namespace_lock(client));
}
/* Unregister @ns from the list of namespaces */
void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client)
{
- mutex_down(ldlm_namespace_lock(client));
- LASSERT(!list_empty(&ns->ns_list_chain));
+ cfs_mutex_down(ldlm_namespace_lock(client));
+ LASSERT(!cfs_list_empty(&ns->ns_list_chain));
/*
* Some asserts and possibly other parts of code still using
* list_empty(&ns->ns_list_chain). This is why it is important
* to use list_del_init() here.
*/
- list_del_init(&ns->ns_list_chain);
- atomic_dec(ldlm_namespace_nr(client));
- mutex_up(ldlm_namespace_lock(client));
+ cfs_list_del_init(&ns->ns_list_chain);
+ cfs_atomic_dec(ldlm_namespace_nr(client));
+ cfs_mutex_up(ldlm_namespace_lock(client));
}
/* Should be called under ldlm_namespace_lock(client) taken */
void ldlm_namespace_move_locked(struct ldlm_namespace *ns, ldlm_side_t client)
{
- LASSERT(!list_empty(&ns->ns_list_chain));
+ LASSERT(!cfs_list_empty(&ns->ns_list_chain));
LASSERT_SEM_LOCKED(ldlm_namespace_lock(client));
- list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
+ cfs_list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
}
/* Should be called under ldlm_namespace_lock(client) taken */
struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
{
LASSERT_SEM_LOCKED(ldlm_namespace_lock(client));
- LASSERT(!list_empty(ldlm_namespace_list(client)));
+ LASSERT(!cfs_list_empty(ldlm_namespace_list(client)));
return container_of(ldlm_namespace_list(client)->next,
struct ldlm_namespace, ns_list_chain);
}
res->lr_itree[idx].lit_root = NULL;
}
- atomic_set(&res->lr_refcount, 1);
- spin_lock_init(&res->lr_lock);
+ cfs_atomic_set(&res->lr_refcount, 1);
+ cfs_spin_lock_init(&res->lr_lock);
lu_ref_init(&res->lr_reference);
/* one who creates the resource must unlock
* the semaphore after lvb initialization */
- init_MUTEX_LOCKED(&res->lr_lvb_sem);
+ cfs_init_mutex_locked(&res->lr_lvb_sem);
return res;
}
ldlm_resource_find(struct ldlm_namespace *ns, const struct ldlm_res_id *name,
__u32 hash)
{
- struct list_head *bucket, *tmp;
+ cfs_list_t *bucket, *tmp;
struct ldlm_resource *res;
LASSERT_SPIN_LOCKED(&ns->ns_hash_lock);
bucket = ns->ns_hash + hash;
- list_for_each(tmp, bucket) {
- res = list_entry(tmp, struct ldlm_resource, lr_hash);
+ cfs_list_for_each(tmp, bucket) {
+ res = cfs_list_entry(tmp, struct ldlm_resource, lr_hash);
if (memcmp(&res->lr_name, name, sizeof(res->lr_name)) == 0)
return res;
}
ldlm_resource_add(struct ldlm_namespace *ns, struct ldlm_resource *parent,
const struct ldlm_res_id *name, __u32 hash, ldlm_type_t type)
{
- struct list_head *bucket;
+ cfs_list_t *bucket;
struct ldlm_resource *res, *old_res;
ENTRY;
res->lr_type = type;
res->lr_most_restr = LCK_NL;
- spin_lock(&ns->ns_hash_lock);
+ cfs_spin_lock(&ns->ns_hash_lock);
old_res = ldlm_resource_find(ns, name, hash);
if (old_res) {
/* someone won the race and added the resource before */
ldlm_resource_getref(old_res);
- spin_unlock(&ns->ns_hash_lock);
+ cfs_spin_unlock(&ns->ns_hash_lock);
OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
/* synchronize WRT resource creation */
if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
- down(&old_res->lr_lvb_sem);
- up(&old_res->lr_lvb_sem);
+ cfs_down(&old_res->lr_lvb_sem);
+ cfs_up(&old_res->lr_lvb_sem);
}
RETURN(old_res);
}
/* we won! let's add the resource */
bucket = ns->ns_hash + hash;
- list_add(&res->lr_hash, bucket);
+ cfs_list_add(&res->lr_hash, bucket);
ns->ns_resources++;
ldlm_namespace_get_locked(ns);
if (parent == NULL) {
- list_add(&res->lr_childof, &ns->ns_root_list);
+ cfs_list_add(&res->lr_childof, &ns->ns_root_list);
} else {
res->lr_parent = parent;
- list_add(&res->lr_childof, &parent->lr_children);
+ cfs_list_add(&res->lr_childof, &parent->lr_children);
}
- spin_unlock(&ns->ns_hash_lock);
+ cfs_spin_unlock(&ns->ns_hash_lock);
if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
int rc;
CERROR("lvbo_init failed for resource "
LPU64": rc %d\n", name->name[0], rc);
/* we create resource with locked lr_lvb_sem */
- up(&res->lr_lvb_sem);
+ cfs_up(&res->lr_lvb_sem);
}
RETURN(res);
LASSERT(ns->ns_hash != NULL);
LASSERT(name->name[0] != 0);
- spin_lock(&ns->ns_hash_lock);
+ cfs_spin_lock(&ns->ns_hash_lock);
res = ldlm_resource_find(ns, name, hash);
if (res) {
ldlm_resource_getref(res);
- spin_unlock(&ns->ns_hash_lock);
+ cfs_spin_unlock(&ns->ns_hash_lock);
/* synchronize WRT resource creation */
if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
- down(&res->lr_lvb_sem);
- up(&res->lr_lvb_sem);
+ cfs_down(&res->lr_lvb_sem);
+ cfs_up(&res->lr_lvb_sem);
}
RETURN(res);
}
- spin_unlock(&ns->ns_hash_lock);
+ cfs_spin_unlock(&ns->ns_hash_lock);
if (create == 0)
RETURN(NULL);
{
LASSERT(res != NULL);
LASSERT(res != LP_POISON);
- atomic_inc(&res->lr_refcount);
+ cfs_atomic_inc(&res->lr_refcount);
CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
- atomic_read(&res->lr_refcount));
+ cfs_atomic_read(&res->lr_refcount));
return res;
}
LASSERT_SPIN_LOCKED(&ns->ns_hash_lock);
- if (!list_empty(&res->lr_granted)) {
+ if (!cfs_list_empty(&res->lr_granted)) {
ldlm_resource_dump(D_ERROR, res);
LBUG();
}
- if (!list_empty(&res->lr_converting)) {
+ if (!cfs_list_empty(&res->lr_converting)) {
ldlm_resource_dump(D_ERROR, res);
LBUG();
}
- if (!list_empty(&res->lr_waiting)) {
+ if (!cfs_list_empty(&res->lr_waiting)) {
ldlm_resource_dump(D_ERROR, res);
LBUG();
}
- if (!list_empty(&res->lr_children)) {
+ if (!cfs_list_empty(&res->lr_children)) {
ldlm_resource_dump(D_ERROR, res);
LBUG();
}
/* Pass 0 here to not wake ->ns_waitq up yet, we will do it few
* lines below when all children are freed. */
ldlm_namespace_put_locked(ns, 0);
- list_del_init(&res->lr_hash);
- list_del_init(&res->lr_childof);
+ cfs_list_del_init(&res->lr_hash);
+ cfs_list_del_init(&res->lr_childof);
lu_ref_fini(&res->lr_reference);
ns->ns_resources--;
if (ns->ns_resources == 0)
- wake_up(&ns->ns_waitq);
+ cfs_waitq_signal(&ns->ns_waitq);
}
/* Returns 1 if the resource was freed, 0 if it remains. */
ENTRY;
CDEBUG(D_INFO, "putref res: %p count: %d\n", res,
- atomic_read(&res->lr_refcount) - 1);
- LASSERTF(atomic_read(&res->lr_refcount) > 0, "%d",
- atomic_read(&res->lr_refcount));
- LASSERTF(atomic_read(&res->lr_refcount) < LI_POISON, "%d",
- atomic_read(&res->lr_refcount));
+ cfs_atomic_read(&res->lr_refcount) - 1);
+ LASSERTF(cfs_atomic_read(&res->lr_refcount) > 0, "%d",
+ cfs_atomic_read(&res->lr_refcount));
+ LASSERTF(cfs_atomic_read(&res->lr_refcount) < LI_POISON, "%d",
+ cfs_atomic_read(&res->lr_refcount));
- if (atomic_dec_and_lock(&res->lr_refcount, &ns->ns_hash_lock)) {
+ if (cfs_atomic_dec_and_lock(&res->lr_refcount, &ns->ns_hash_lock)) {
__ldlm_resource_putref_final(res);
- spin_unlock(&ns->ns_hash_lock);
+ cfs_spin_unlock(&ns->ns_hash_lock);
if (res->lr_lvb_data)
OBD_FREE(res->lr_lvb_data, res->lr_lvb_len);
OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
ENTRY;
CDEBUG(D_INFO, "putref res: %p count: %d\n", res,
- atomic_read(&res->lr_refcount) - 1);
- LASSERT(atomic_read(&res->lr_refcount) > 0);
- LASSERT(atomic_read(&res->lr_refcount) < LI_POISON);
+ cfs_atomic_read(&res->lr_refcount) - 1);
+ LASSERT(cfs_atomic_read(&res->lr_refcount) > 0);
+ LASSERT(cfs_atomic_read(&res->lr_refcount) < LI_POISON);
- LASSERT(atomic_read(&res->lr_refcount) >= 0);
- if (atomic_dec_and_test(&res->lr_refcount)) {
+ LASSERT(cfs_atomic_read(&res->lr_refcount) >= 0);
+ if (cfs_atomic_dec_and_test(&res->lr_refcount)) {
__ldlm_resource_putref_final(res);
if (res->lr_lvb_data)
OBD_FREE(res->lr_lvb_data, res->lr_lvb_len);
RETURN(rc);
}
-void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
+void ldlm_resource_add_lock(struct ldlm_resource *res, cfs_list_t *head,
struct ldlm_lock *lock)
{
check_res_locked(res);
return;
}
- LASSERT(list_empty(&lock->l_res_link));
+ LASSERT(cfs_list_empty(&lock->l_res_link));
- list_add_tail(&lock->l_res_link, head);
+ cfs_list_add_tail(&lock->l_res_link, head);
}
void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
goto out;
}
- LASSERT(list_empty(&new->l_res_link));
+ LASSERT(cfs_list_empty(&new->l_res_link));
- list_add(&new->l_res_link, &original->l_res_link);
+ cfs_list_add(&new->l_res_link, &original->l_res_link);
out:;
}
ldlm_unlink_lock_skiplist(lock);
else if (type == LDLM_EXTENT)
ldlm_extent_unlink_lock(lock);
- list_del_init(&lock->l_res_link);
+ cfs_list_del_init(&lock->l_res_link);
}
void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
if (!((libcfs_debug | D_ERROR) & level))
return;
- mutex_down(ldlm_namespace_lock(client));
+ cfs_mutex_down(ldlm_namespace_lock(client));
- list_for_each(tmp, ldlm_namespace_list(client)) {
+ cfs_list_for_each(tmp, ldlm_namespace_list(client)) {
struct ldlm_namespace *ns;
- ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
+ ns = cfs_list_entry(tmp, struct ldlm_namespace, ns_list_chain);
ldlm_namespace_dump(level, ns);
}
- mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_up(ldlm_namespace_lock(client));
}
void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
if (!((libcfs_debug | D_ERROR) & level))
return;
if (cfs_time_before(cfs_time_current(), ns->ns_next_dump))
return;
- spin_lock(&ns->ns_hash_lock);
+ cfs_spin_lock(&ns->ns_hash_lock);
tmp = ns->ns_root_list.next;
while (tmp != &ns->ns_root_list) {
struct ldlm_resource *res;
- res = list_entry(tmp, struct ldlm_resource, lr_childof);
+ res = cfs_list_entry(tmp, struct ldlm_resource, lr_childof);
ldlm_resource_getref(res);
- spin_unlock(&ns->ns_hash_lock);
+ cfs_spin_unlock(&ns->ns_hash_lock);
LDLM_RESOURCE_ADDREF(res);
lock_res(res);
unlock_res(res);
LDLM_RESOURCE_DELREF(res);
- spin_lock(&ns->ns_hash_lock);
+ cfs_spin_lock(&ns->ns_hash_lock);
tmp = tmp->next;
ldlm_resource_putref_locked(res);
}
ns->ns_next_dump = cfs_time_shift(10);
- spin_unlock(&ns->ns_hash_lock);
+ cfs_spin_unlock(&ns->ns_hash_lock);
}
void ldlm_resource_dump(int level, struct ldlm_resource *res)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
int pos;
CLASSERT(RES_NAME_SIZE == 4);
CDEBUG(level, "--- Resource: %p ("LPU64"/"LPU64"/"LPU64"/"LPU64
") (rc: %d)\n", res, res->lr_name.name[0], res->lr_name.name[1],
res->lr_name.name[2], res->lr_name.name[3],
- atomic_read(&res->lr_refcount));
+ cfs_atomic_read(&res->lr_refcount));
- if (!list_empty(&res->lr_granted)) {
+ if (!cfs_list_empty(&res->lr_granted)) {
pos = 0;
CDEBUG(level, "Granted locks:\n");
- list_for_each(tmp, &res->lr_granted) {
+ cfs_list_for_each(tmp, &res->lr_granted) {
struct ldlm_lock *lock;
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ lock = cfs_list_entry(tmp, struct ldlm_lock,
+ l_res_link);
ldlm_lock_dump(level, lock, ++pos);
}
}
- if (!list_empty(&res->lr_converting)) {
+ if (!cfs_list_empty(&res->lr_converting)) {
pos = 0;
CDEBUG(level, "Converting locks:\n");
- list_for_each(tmp, &res->lr_converting) {
+ cfs_list_for_each(tmp, &res->lr_converting) {
struct ldlm_lock *lock;
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ lock = cfs_list_entry(tmp, struct ldlm_lock,
+ l_res_link);
ldlm_lock_dump(level, lock, ++pos);
}
}
- if (!list_empty(&res->lr_waiting)) {
+ if (!cfs_list_empty(&res->lr_waiting)) {
pos = 0;
CDEBUG(level, "Waiting locks:\n");
- list_for_each(tmp, &res->lr_waiting) {
+ cfs_list_for_each(tmp, &res->lr_waiting) {
struct ldlm_lock *lock;
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ lock = cfs_list_entry(tmp, struct ldlm_lock,
+ l_res_link);
ldlm_lock_dump(level, lock, ++pos);
}
}
#include <file.h>
#endif
-#undef LIST_HEAD
-
#ifdef HAVE_LINUX_UNISTD_H
#include <linux/unistd.h>
#elif defined(HAVE_UNISTD_H)
#include <file.h>
#endif
-#undef LIST_HEAD
-
#include "llite_lib.h"
/* Pack the required supplementary groups into the supplied groups array.
LASSERT(i1 != NULL);
LASSERT(suppgids != NULL);
- if (in_group_p(i1->i_stbuf.st_gid))
+ if (cfs_curproc_is_in_groups(i1->i_stbuf.st_gid))
suppgids[0] = i1->i_stbuf.st_gid;
else
suppgids[0] = -1;
if (i2) {
- if (in_group_p(i2->i_stbuf.st_gid))
+ if (cfs_curproc_is_in_groups(i2->i_stbuf.st_gid))
suppgids[1] = i2->i_stbuf.st_gid;
else
suppgids[1] = -1;
op_data->op_name = name;
op_data->op_mode = mode;
op_data->op_namelen = namelen;
- op_data->op_mod_time = CURRENT_TIME;
+ op_data->op_mod_time = CFS_CURRENT_TIME;
op_data->op_data = NULL;
}
#include <file.h>
#endif
-/* both sys/queue.h (libsysio require it) and portals/lists.h have definition
- * of 'LIST_HEAD'. undef it to suppress warnings
- */
-#undef LIST_HEAD
#include <liblustre.h>
#include <lnet/lnetctl.h> /* needed for parse_dump */
struct lu_fid ll_root_fid;
int ll_flags;
struct lustre_client_ocd ll_lco;
- struct list_head ll_conn_chain;
+ cfs_list_t ll_conn_chain;
struct obd_uuid ll_mds_uuid;
struct obd_uuid ll_mds_peer_uuid;
struct lov_stripe_md *lli_smd;
char *lli_symlink_name;
- struct semaphore lli_open_sem;
+ cfs_semaphore_t lli_open_sem;
__u64 lli_maxbytes;
unsigned long lli_flags;
__u64 lli_ioepoch;
#endif
}
-int in_group_p(gid_t gid)
+int cfs_curproc_is_in_groups(gid_t gid)
{
int i;
#include <file.h>
#endif
-#undef LIST_HEAD
-
#include "llite_lib.h"
void ll_intent_drop_lock(struct lookup_intent *it)
if (it_disposition(it, DISP_OPEN_CREATE) &&
!it_open_error(DISP_OPEN_CREATE, it)) {
LASSERT(request);
- LASSERT(atomic_read(&request->rq_refcount) > 1);
+ LASSERT(cfs_atomic_read(&request->rq_refcount) > 1);
CDEBUG(D_INODE, "dec a ref of req %p\n", request);
ptlrpc_req_finished(request);
}
#include <file.h>
#endif
-#undef LIST_HEAD
-
#include "llite_lib.h"
typedef ssize_t llu_file_piov_t(const struct iovec *iovec, int iovlen,
int ret;
/* BUG: 5972 */
- st->st_atime = CURRENT_TIME;
+ st->st_atime = CFS_CURRENT_TIME;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
int refcheck;
int ret;
- st->st_mtime = st->st_ctime = CURRENT_TIME;
+ st->st_mtime = st->st_ctime = CFS_CURRENT_TIME;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
#include <file.h>
#endif
-#undef LIST_HEAD
-
#include "llite_lib.h"
#ifndef MAY_EXEC
if (current->fsuid == st->st_uid)
mode >>= 6;
- else if (in_group_p(st->st_gid))
+ else if (cfs_curproc_is_in_groups(st->st_gid))
mode >>= 3;
if ((mode & mask & (MAY_READ|MAY_WRITE|MAY_EXEC)) == mask)
int next = 0;
ENTRY;
- list_del(&sbi->ll_conn_chain);
+ cfs_list_del(&sbi->ll_conn_chain);
cl_sb_fini(sbi);
obd_disconnect(sbi->ll_dt_exp);
obd_disconnect(sbi->ll_md_exp);
st->st_ctime = attr->ia_ctime;
if (ia_valid & ATTR_MODE) {
st->st_mode = attr->ia_mode;
- if (!in_group_p(st->st_gid) && !cfs_capable(CFS_CAP_FSETID))
+ if (!cfs_curproc_is_in_groups(st->st_gid) &&
+ !cfs_capable(CFS_CAP_FSETID))
st->st_mode &= ~S_ISGID;
}
/* mark_inode_dirty(inode); */
/* We mark all of the fields "set" so MDS/OST does not re-set them */
if (attr->ia_valid & ATTR_CTIME) {
- attr->ia_ctime = CURRENT_TIME;
+ attr->ia_ctime = CFS_CURRENT_TIME;
attr->ia_valid |= ATTR_CTIME_SET;
}
if (!(ia_valid & ATTR_ATIME_SET) && (attr->ia_valid & ATTR_ATIME)) {
- attr->ia_atime = CURRENT_TIME;
+ attr->ia_atime = CFS_CURRENT_TIME;
attr->ia_valid |= ATTR_ATIME_SET;
}
if (!(ia_valid & ATTR_MTIME_SET) && (attr->ia_valid & ATTR_MTIME)) {
- attr->ia_mtime = CURRENT_TIME;
+ attr->ia_mtime = CFS_CURRENT_TIME;
attr->ia_valid |= ATTR_MTIME_SET;
}
if ((attr->ia_valid & ATTR_CTIME) && !(attr->ia_valid & ATTR_MTIME)) {
CDEBUG(D_INODE, "setting mtime "CFS_TIME_T", ctime "CFS_TIME_T
", now = "CFS_TIME_T"\n",
LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
- LTIME_S(CURRENT_TIME));
+ LTIME_S(CFS_CURRENT_TIME));
/* NB: ATTR_SIZE will only be set after this point if the size
* resides on the MDS, ie, this file has no objects. */
}
iattr.ia_valid |= ATTR_RAW | ATTR_CTIME;
- iattr.ia_ctime = CURRENT_TIME;
+ iattr.ia_ctime = CFS_CURRENT_TIME;
rc = llu_setattr_raw(ino, &iattr);
liblustre_wait_idle();
/* For now we will always get up-to-date statfs values, but in the
* future we may allow some amount of caching on the client (e.g.
* from QOS or lprocfs updates). */
- rc = llu_statfs_internal(sbi, &osfs, cfs_time_current_64() - HZ);
+ rc = llu_statfs_internal(sbi, &osfs, cfs_time_current_64() - CFS_HZ);
if (rc)
return rc;
LASSERT(sizeof(lum) == sizeof(*lump));
LASSERT(sizeof(lum.lmm_objects[0]) ==
sizeof(lump->lmm_objects[0]));
- if (copy_from_user(&lum, lump, sizeof(lum)))
+ if (cfs_copy_from_user(&lum, lump, sizeof(lum)))
return(-EFAULT);
switch (lum.lmm_magic) {
LASSERT(sizeof(lum) == sizeof(*lump));
LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lump->lmm_objects[0]));
- if (copy_from_user(&lum, lump, sizeof(lum)))
+ if (cfs_copy_from_user(&lum, lump, sizeof(lum)))
RETURN(-EFAULT);
rc = llu_lov_setstripe_ea_info(ino, flags, &lum, sizeof(lum));
/* Check for the proper lock. */
if (!ll_have_md_lock(inode, MDS_INODELOCK_LOOKUP))
goto do_lock;
- down(&lli->lli_och_sem);
+ cfs_down(&lli->lli_och_sem);
if (*och_p) { /* Everything is open already, do nothing */
/*(*och_usecount)++; Do not let them steal our open
handle from under us */
hope the lock won't be invalidated in between. But
if it would be, we'll reopen the open request to
MDS later during file open path */
- up(&lli->lli_och_sem);
+ cfs_up(&lli->lli_och_sem);
ll_finish_md_op_data(op_data);
RETURN(1);
} else {
- up(&lli->lli_och_sem);
+ cfs_up(&lli->lli_och_sem);
}
}
ENTRY;
LASSERT(ldd);
- lock_kernel();
+ cfs_lock_kernel();
/* Strictly speaking this introduces an additional race: the
* increments should wait until the rpc has returned.
* However, given that at present the function is void, this
* issue is moot. */
if (flag == 1 && (++ldd->lld_mnt_count) > 1) {
- unlock_kernel();
+ cfs_unlock_kernel();
EXIT;
return;
}
if (flag == 0 && (++ldd->lld_cwd_count) > 1) {
- unlock_kernel();
+ cfs_unlock_kernel();
EXIT;
return;
}
- unlock_kernel();
+ cfs_unlock_kernel();
handle = (flag) ? &ldd->lld_mnt_och : &ldd->lld_cwd_och;
oc = ll_mdscapa_get(inode);
rc = obd_pin(sbi->ll_md_exp, ll_inode2fid(inode), oc, handle, flag);
capa_put(oc);
if (rc) {
- lock_kernel();
+ cfs_lock_kernel();
memset(handle, 0, sizeof(*handle));
if (flag == 0)
ldd->lld_cwd_count--;
else
ldd->lld_mnt_count--;
- unlock_kernel();
+ cfs_unlock_kernel();
}
EXIT;
ENTRY;
LASSERT(ldd);
- lock_kernel();
+ cfs_lock_kernel();
/* Strictly speaking this introduces an additional race: the
* increments should wait until the rpc has returned.
* However, given that at present the function is void, this
handle = (flag) ? ldd->lld_mnt_och : ldd->lld_cwd_och;
if (handle.och_magic != OBD_CLIENT_HANDLE_MAGIC) {
/* the "pin" failed */
- unlock_kernel();
+ cfs_unlock_kernel();
EXIT;
return;
}
count = --ldd->lld_mnt_count;
else
count = --ldd->lld_cwd_count;
- unlock_kernel();
+ cfs_unlock_kernel();
if (count != 0) {
EXIT;
LASSERT(sizeof(lumv3.lmm_objects[0]) ==
sizeof(lumv3p->lmm_objects[0]));
/* first try with v1 which is smaller than v3 */
- if (copy_from_user(lumv1, lumv1p, sizeof(*lumv1)))
+ if (cfs_copy_from_user(lumv1, lumv1p, sizeof(*lumv1)))
RETURN(-EFAULT);
if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
- if (copy_from_user(&lumv3, lumv3p, sizeof(lumv3)))
+ if (cfs_copy_from_user(&lumv3, lumv3p, sizeof(lumv3)))
RETURN(-EFAULT);
}
lmdp = (struct lov_user_mds_data *)arg;
lump = &lmdp->lmd_lmm;
}
- if (copy_to_user(lump, lmm, lmmsize))
+ if (cfs_copy_to_user(lump, lmm, lmmsize))
GOTO(out_req, rc = -EFAULT);
skip_lmm:
if (cmd == IOC_MDC_GETFILEINFO || cmd == LL_IOC_MDC_GETINFO) {
st.st_ino = inode->i_ino;
lmdp = (struct lov_user_mds_data *)arg;
- if (copy_to_user(&lmdp->lmd_st, &st, sizeof(st)))
+ if (cfs_copy_to_user(&lmdp->lmd_st, &st, sizeof(st)))
GOTO(out_req, rc = -EFAULT);
}
RETURN(rc);
OBD_ALLOC(lmm, lmmsize);
- if (copy_from_user(lmm, lum, lmmsize))
+ if (cfs_copy_from_user(lmm, lum, lmmsize))
GOTO(free_lmm, rc = -EFAULT);
switch (lmm->lmm_magic) {
if (rc)
GOTO(free_lsm, rc);
- if (copy_to_user(&lumd->lmd_st, &st, sizeof(st)))
+ if (cfs_copy_to_user(&lumd->lmd_st, &st, sizeof(st)))
GOTO(free_lsm, rc = -EFAULT);
EXIT;
if (!rc) {
str = req_capsule_server_get(&req->rq_pill,
&RMF_STRING);
- if (copy_to_user(data->ioc_pbuf1, str, data->ioc_plen1))
+ if (cfs_copy_to_user(data->ioc_pbuf1, str,
+ data->ioc_plen1))
rc = -EFAULT;
}
ptlrpc_req_finished(req);
NULL);
if (rc) {
CDEBUG(D_QUOTA, "mdc ioctl %d failed: %d\n", cmd, rc);
- if (copy_to_user((void *)arg, check, sizeof(*check)))
+ if (cfs_copy_to_user((void *)arg, check,
+ sizeof(*check)))
rc = -EFAULT;
GOTO(out_poll, rc);
}
NULL);
if (rc) {
CDEBUG(D_QUOTA, "osc ioctl %d failed: %d\n", cmd, rc);
- if (copy_to_user((void *)arg, check, sizeof(*check)))
+ if (cfs_copy_to_user((void *)arg, check,
+ sizeof(*check)))
rc = -EFAULT;
GOTO(out_poll, rc);
}
if (!qctl)
RETURN(-ENOMEM);
- if (copy_from_user(qctl, (void *)arg, sizeof(*qctl)))
+ if (cfs_copy_from_user(qctl, (void *)arg, sizeof(*qctl)))
GOTO(out_quotactl, rc = -EFAULT);
cmd = qctl->qc_cmd;
}
}
- if (copy_to_user((void *)arg, qctl, sizeof(*qctl)))
+ if (cfs_copy_to_user((void *)arg, qctl, sizeof(*qctl)))
rc = -EFAULT;
out_quotactl:
struct obd_device *obd = class_exp2obd(sbi->ll_dt_exp);
if (!obd)
RETURN(-EFAULT);
- if (copy_to_user((void *)arg, obd->obd_name,
- strlen(obd->obd_name) + 1))
+ if (cfs_copy_to_user((void *)arg, obd->obd_name,
+ strlen(obd->obd_name) + 1))
RETURN (-EFAULT);
RETURN(0);
}
case LL_IOC_GETOBDCOUNT: {
int count;
- if (copy_from_user(&count, (int *)arg, sizeof(int)))
+ if (cfs_copy_from_user(&count, (int *)arg, sizeof(int)))
RETURN(-EFAULT);
if (!count) {
count = lmv->desc.ld_tgt_count;
}
- if (copy_to_user((int *)arg, &count, sizeof(int)))
+ if (cfs_copy_to_user((int *)arg, &count, sizeof(int)))
RETURN(-EFAULT);
RETURN(0);
}
case LL_IOC_PATH2FID:
- if (copy_to_user((void *)arg, ll_inode2fid(inode),
- sizeof(struct lu_fid)))
+ if (cfs_copy_to_user((void *)arg, ll_inode2fid(inode),
+ sizeof(struct lu_fid)))
RETURN(-EFAULT);
RETURN(0);
case OBD_IOC_CHANGELOG_CLEAR: {
OBD_ALLOC_PTR(icc);
if (icc == NULL)
RETURN(-ENOMEM);
- if (copy_from_user(icc, (void *)arg, sizeof(*icc)))
+ if (cfs_copy_from_user(icc, (void *)arg, sizeof(*icc)))
GOTO(icc_free, rc = -EFAULT);
rc = obd_iocontrol(cmd, sbi->ll_md_exp, sizeof(*icc), icc,NULL);
och_usecount = &lli->lli_open_fd_read_count;
}
- down(&lli->lli_och_sem);
+ cfs_down(&lli->lli_och_sem);
if (*och_usecount) { /* There are still users of this handle, so
skip freeing it. */
- up(&lli->lli_och_sem);
+ cfs_up(&lli->lli_och_sem);
RETURN(0);
}
och=*och_p;
*och_p = NULL;
- up(&lli->lli_och_sem);
+ cfs_up(&lli->lli_och_sem);
if (och) { /* There might be a race and somebody have freed this och
already */
struct inode *inode = file->f_dentry->d_inode;
ldlm_policy_data_t policy = {.l_inodebits={MDS_INODELOCK_OPEN}};
- down(&lli->lli_och_sem);
+ cfs_down(&lli->lli_och_sem);
if (fd->fd_omode & FMODE_WRITE) {
lockmode = LCK_CW;
LASSERT(lli->lli_open_fd_write_count);
LASSERT(lli->lli_open_fd_read_count);
lli->lli_open_fd_read_count--;
}
- up(&lli->lli_och_sem);
+ cfs_up(&lli->lli_och_sem);
if (!md_lock_match(md_exp, flags, ll_inode2fid(inode),
LDLM_IBITS, &policy, lockmode,
fd->fd_file = file;
if (S_ISDIR(inode->i_mode)) {
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
if (lli->lli_opendir_key == NULL && lli->lli_opendir_pid == 0) {
LASSERT(lli->lli_sai == NULL);
lli->lli_opendir_key = fd;
lli->lli_opendir_pid = cfs_curproc_pid();
opendir_set = 1;
}
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
}
if (inode->i_sb->s_root == file->f_dentry) {
och_usecount = &lli->lli_open_fd_read_count;
}
- down(&lli->lli_och_sem);
+ cfs_down(&lli->lli_och_sem);
if (*och_p) { /* Open handle is present */
if (it_disposition(it, DISP_OPEN_OPEN)) {
/* Well, there's extra open request that we do not need,
let's close it somehow. This will decref request. */
rc = it_open_error(DISP_OPEN_OPEN, it);
if (rc) {
- up(&lli->lli_och_sem);
+ cfs_up(&lli->lli_och_sem);
ll_file_data_put(fd);
GOTO(out_openerr, rc);
}
rc = ll_local_open(file, it, fd, NULL);
if (rc) {
(*och_usecount)--;
- up(&lli->lli_och_sem);
+ cfs_up(&lli->lli_och_sem);
ll_file_data_put(fd);
GOTO(out_openerr, rc);
}
could be cancelled, and since blocking ast handler
would attempt to grab och_sem as well, that would
result in a deadlock */
- up(&lli->lli_och_sem);
+ cfs_up(&lli->lli_och_sem);
it->it_create_mode |= M_CHECK_STALE;
rc = ll_intent_file_open(file, NULL, 0, it);
it->it_create_mode &= ~M_CHECK_STALE;
GOTO(out_och_free, rc);
}
}
- up(&lli->lli_och_sem);
+ cfs_up(&lli->lli_och_sem);
/* Must do this outside lli_och_sem lock to prevent deadlock where
different kind of OPEN lock for this same inode gets cancelled
*och_p = NULL; /* OBD_FREE writes some magic there */
(*och_usecount)--;
}
- up(&lli->lli_och_sem);
+ cfs_up(&lli->lli_och_sem);
out_openerr:
if (opendir_set != 0)
ll_stop_statahead(inode, lli->lli_opendir_key);
#endif
if ((iot == CIT_WRITE) &&
!(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- down(&lli->lli_write_sem);
+ cfs_down(&lli->lli_write_sem);
write_sem_locked = 1;
}
break;
}
result = cl_io_loop(env, io);
if (write_sem_locked)
- up(&lli->lli_write_sem);
+ cfs_up(&lli->lli_write_sem);
} else {
/* cl_io_rw_init() handled IO */
result = io->ci_result;
if (!cfs_capable(CFS_CAP_SYS_ADMIN))
RETURN(-EPERM);
- if (copy_from_user(&ucreatp, (struct ll_recreate_obj *)arg,
- sizeof(struct ll_recreate_obj)))
+ if (cfs_copy_from_user(&ucreatp, (struct ll_recreate_obj *)arg,
+ sizeof(struct ll_recreate_obj)))
RETURN(-EFAULT);
OBDO_ALLOC(oa);
if (lump == NULL) {
RETURN(-ENOMEM);
}
- if (copy_from_user(lump, (struct lov_user_md *)arg, lum_size)) {
+ if (cfs_copy_from_user(lump, (struct lov_user_md *)arg, lum_size)) {
OBD_FREE(lump, lum_size);
RETURN(-EFAULT);
}
/* first try with v1 which is smaller than v3 */
lum_size = sizeof(struct lov_user_md_v1);
- if (copy_from_user(lumv1, lumv1p, lum_size))
+ if (cfs_copy_from_user(lumv1, lumv1p, lum_size))
RETURN(-EFAULT);
if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
lum_size = sizeof(struct lov_user_md_v3);
- if (copy_from_user(&lumv3, lumv3p, lum_size))
+ if (cfs_copy_from_user(&lumv3, lumv3p, lum_size))
RETURN(-EFAULT);
}
if (ll_file_nolock(file))
RETURN(-EOPNOTSUPP);
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
CWARN("group lock already existed with gid %lu\n",
fd->fd_grouplock.cg_gid);
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
RETURN(-EINVAL);
}
LASSERT(fd->fd_grouplock.cg_lock == NULL);
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
rc = cl_get_grouplock(cl_i2info(inode)->lli_clob,
arg, (file->f_flags & O_NONBLOCK), &grouplock);
if (rc)
RETURN(rc);
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
CERROR("another thread just won the race\n");
cl_put_grouplock(&grouplock);
RETURN(-EINVAL);
fd->fd_flags |= LL_FILE_GROUP_LOCKED;
fd->fd_grouplock = grouplock;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
CDEBUG(D_INFO, "group lock %lu obtained\n", arg);
RETURN(0);
struct ccc_grouplock grouplock;
ENTRY;
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
CWARN("no group lock held\n");
RETURN(-EINVAL);
}
if (fd->fd_grouplock.cg_gid != arg) {
CWARN("group lock %lu doesn't match current id %lu\n",
arg, fd->fd_grouplock.cg_gid);
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
RETURN(-EINVAL);
}
grouplock = fd->fd_grouplock;
memset(&fd->fd_grouplock, 0, sizeof(fd->fd_grouplock));
fd->fd_flags &= ~LL_FILE_GROUP_LOCKED;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
cl_put_grouplock(&grouplock);
CDEBUG(D_INFO, "group lock %lu released\n", arg);
OBD_ALLOC_PTR(gfin);
if (gfin == NULL)
RETURN(-ENOMEM);
- if (copy_from_user(gfin, arg, sizeof(*gfin))) {
+ if (cfs_copy_from_user(gfin, arg, sizeof(*gfin))) {
OBD_FREE_PTR(gfin);
RETURN(-EFAULT);
}
rc = obd_iocontrol(OBD_IOC_FID2PATH, exp, outsize, gfout, NULL);
if (rc)
GOTO(gf_free, rc);
- if (copy_to_user(arg, gfout, outsize))
+ if (cfs_copy_to_user(arg, gfout, outsize))
rc = -EFAULT;
gf_free:
if (fiemap_s == NULL)
RETURN(-ENOMEM);
- if (copy_from_user(fiemap_s,(struct ll_user_fiemap __user *)arg,
- sizeof(*fiemap_s)))
+ if (cfs_copy_from_user(fiemap_s,
+ (struct ll_user_fiemap __user *)arg,
+ sizeof(*fiemap_s)))
GOTO(error, rc = -EFAULT);
if (fiemap_s->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) {
fiemap_s->fm_flags = fiemap_s->fm_flags &
~LUSTRE_FIEMAP_FLAGS_COMPAT;
- if (copy_to_user((char *)arg, fiemap_s,
- sizeof(*fiemap_s)))
+ if (cfs_copy_to_user((char *)arg, fiemap_s,
+ sizeof(*fiemap_s)))
GOTO(error, rc = -EFAULT);
GOTO(error, rc = -EBADR);
* it is used to calculate end_offset and device from previous
* fiemap call. */
if (extent_count) {
- if (copy_from_user(&fiemap_s->fm_extents[0],
+ if (cfs_copy_from_user(&fiemap_s->fm_extents[0],
(char __user *)arg + sizeof(*fiemap_s),
sizeof(struct ll_fiemap_extent)))
GOTO(error, rc = -EFAULT);
ret_bytes += (fiemap_s->fm_mapped_extents *
sizeof(struct ll_fiemap_extent));
- if (copy_to_user((void *)arg, fiemap_s, ret_bytes))
+ if (cfs_copy_to_user((void *)arg, fiemap_s, ret_bytes))
rc = -EFAULT;
error:
case LL_IOC_FLUSHCTX:
RETURN(ll_flush_ctx(inode));
case LL_IOC_PATH2FID: {
- if (copy_to_user((void *)arg, ll_inode2fid(inode),
- sizeof(struct lu_fid)))
+ if (cfs_copy_to_user((void *)arg, ll_inode2fid(inode),
+ sizeof(struct lu_fid)))
RETURN(-EFAULT);
RETURN(0);
int rc;
ENTRY;
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
acl = posix_acl_dup(lli->lli_posix_acl);
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
if (!acl)
RETURN(-EAGAIN);
return rc;
} else {
check_groups:
- if (in_group_p(inode->i_gid))
+ if (cfs_curproc_is_in_groups(inode->i_gid))
mode >>= 3;
}
if ((mode & mask & S_IRWXO) == mask)
/* dynamic ioctl number support routins */
static struct llioc_ctl_data {
- struct rw_semaphore ioc_sem;
- struct list_head ioc_head;
+ cfs_rw_semaphore_t ioc_sem;
+ cfs_list_t ioc_head;
} llioc = {
__RWSEM_INITIALIZER(llioc.ioc_sem),
CFS_LIST_HEAD_INIT(llioc.ioc_head)
struct llioc_data {
- struct list_head iocd_list;
+ cfs_list_t iocd_list;
unsigned int iocd_size;
llioc_callback_t iocd_cb;
unsigned int iocd_count;
in_data->iocd_count = count;
memcpy(in_data->iocd_cmd, cmd, sizeof(unsigned int) * count);
- down_write(&llioc.ioc_sem);
- list_add_tail(&in_data->iocd_list, &llioc.ioc_head);
- up_write(&llioc.ioc_sem);
+ cfs_down_write(&llioc.ioc_sem);
+ cfs_list_add_tail(&in_data->iocd_list, &llioc.ioc_head);
+ cfs_up_write(&llioc.ioc_sem);
RETURN(in_data);
}
if (magic == NULL)
return;
- down_write(&llioc.ioc_sem);
- list_for_each_entry(tmp, &llioc.ioc_head, iocd_list) {
+ cfs_down_write(&llioc.ioc_sem);
+ cfs_list_for_each_entry(tmp, &llioc.ioc_head, iocd_list) {
if (tmp == magic) {
unsigned int size = tmp->iocd_size;
- list_del(&tmp->iocd_list);
- up_write(&llioc.ioc_sem);
+ cfs_list_del(&tmp->iocd_list);
+ cfs_up_write(&llioc.ioc_sem);
OBD_FREE(tmp, size);
return;
}
}
- up_write(&llioc.ioc_sem);
+ cfs_up_write(&llioc.ioc_sem);
CWARN("didn't find iocontrol register block with magic: %p\n", magic);
}
struct llioc_data *data;
int rc = -EINVAL, i;
- down_read(&llioc.ioc_sem);
- list_for_each_entry(data, &llioc.ioc_head, iocd_list) {
+ cfs_down_read(&llioc.ioc_sem);
+ cfs_list_for_each_entry(data, &llioc.ioc_head, iocd_list) {
for (i = 0; i < data->iocd_count; i++) {
if (cmd != data->iocd_cmd[i])
continue;
if (ret == LLIOC_STOP)
break;
}
- up_read(&llioc.ioc_sem);
+ cfs_up_read(&llioc.ioc_sem);
if (rcp)
*rcp = rc;
*/
/* capas for oss writeback and those failed to renew */
-static LIST_HEAD(ll_idle_capas);
+static CFS_LIST_HEAD(ll_idle_capas);
static struct ptlrpc_thread ll_capa_thread;
-static struct list_head *ll_capa_list = &capa_list[CAPA_SITE_CLIENT];
+static cfs_list_t *ll_capa_list = &capa_list[CAPA_SITE_CLIENT];
/* llite capa renewal timer */
struct timer_list ll_capa_timer;
/* for debug: indicate whether capa on llite is enabled or not */
-static atomic_t ll_capa_debug = ATOMIC_INIT(0);
+static cfs_atomic_t ll_capa_debug = CFS_ATOMIC_INIT(0);
static unsigned long long ll_capa_renewed = 0;
static unsigned long long ll_capa_renewal_noent = 0;
static unsigned long long ll_capa_renewal_failed = 0;
static inline void update_capa_timer(struct obd_capa *ocapa, cfs_time_t expiry)
{
- if (time_before(expiry, ll_capa_timer.expires) ||
+ if (cfs_time_before(expiry, ll_capa_timer.expires) ||
!timer_pending(&ll_capa_timer)) {
mod_timer(&ll_capa_timer, expiry);
DEBUG_CAPA(D_SEC, &ocapa->c_capa,
/* if ll_capa_list has client capa to expire or ll_idle_capas has
* expired capa, return 1.
*/
- spin_lock(&capa_lock);
- if (!list_empty(ll_capa_list)) {
- ocapa = list_entry(ll_capa_list->next, struct obd_capa, c_list);
+ cfs_spin_lock(&capa_lock);
+ if (!cfs_list_empty(ll_capa_list)) {
+ ocapa = cfs_list_entry(ll_capa_list->next, struct obd_capa,
+ c_list);
expired = capa_is_to_expire(ocapa);
if (!expired)
update_capa_timer(ocapa, capa_renewal_time(ocapa));
- } else if (!list_empty(&ll_idle_capas)) {
- ocapa = list_entry(ll_idle_capas.next, struct obd_capa, c_list);
+ } else if (!cfs_list_empty(&ll_idle_capas)) {
+ ocapa = cfs_list_entry(ll_idle_capas.next, struct obd_capa,
+ c_list);
expired = capa_is_expired(ocapa);
if (!expired)
update_capa_timer(ocapa, ocapa->c_expiry);
}
- spin_unlock(&capa_lock);
+ cfs_spin_unlock(&capa_lock);
if (expired)
DEBUG_CAPA(D_SEC, &ocapa->c_capa, "expired");
return (ll_capa_thread.t_flags & SVC_STOPPING) ? 1: 0;
}
-static void sort_add_capa(struct obd_capa *ocapa, struct list_head *head)
+static void sort_add_capa(struct obd_capa *ocapa, cfs_list_t *head)
{
struct obd_capa *tmp;
- struct list_head *before = NULL;
+ cfs_list_t *before = NULL;
/* TODO: client capa is sorted by expiry, this could be optimized */
- list_for_each_entry_reverse(tmp, head, c_list) {
+ cfs_list_for_each_entry_reverse(tmp, head, c_list) {
if (cfs_time_aftereq(ocapa->c_expiry, tmp->c_expiry)) {
before = &tmp->c_list;
break;
}
LASSERT(&ocapa->c_list != before);
- list_add(&ocapa->c_list, before ?: head);
+ cfs_list_add(&ocapa->c_list, before ?: head);
}
static inline int obd_capa_open_count(struct obd_capa *oc)
{
struct ll_inode_info *lli = ll_i2info(oc->u.cli.inode);
- return atomic_read(&lli->lli_open_count);
+ return cfs_atomic_read(&lli->lli_open_count);
}
static void ll_delete_capa(struct obd_capa *ocapa)
LASSERT(lli->lli_mds_capa == ocapa);
lli->lli_mds_capa = NULL;
} else if (capa_for_oss(&ocapa->c_capa)) {
- list_del_init(&ocapa->u.cli.lli_list);
+ cfs_list_del_init(&ocapa->u.cli.lli_list);
}
DEBUG_CAPA(D_SEC, &ocapa->c_capa, "free client");
- list_del_init(&ocapa->c_list);
+ cfs_list_del_init(&ocapa->c_list);
capa_count[CAPA_SITE_CLIENT]--;
/* release the ref when alloc */
capa_put(ocapa);
cfs_daemonize("ll_capa");
ll_capa_thread.t_flags = SVC_RUNNING;
- wake_up(&ll_capa_thread.t_ctl_waitq);
+ cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
while (1) {
l_wait_event(ll_capa_thread.t_ctl_waitq,
next = NULL;
- spin_lock(&capa_lock);
- list_for_each_entry_safe(ocapa, tmp, ll_capa_list, c_list) {
+ cfs_spin_lock(&capa_lock);
+ cfs_list_for_each_entry_safe(ocapa, tmp, ll_capa_list, c_list) {
LASSERT(ocapa->c_capa.lc_opc != CAPA_OPC_OSS_TRUNC);
if (!capa_is_to_expire(ocapa)) {
break;
}
- list_del_init(&ocapa->c_list);
+ cfs_list_del_init(&ocapa->c_list);
/* for MDS capability, only renew those which belong to
* dir, or its inode is opened, or client holds LOOKUP
capa_get(ocapa);
ll_capa_renewed++;
- spin_unlock(&capa_lock);
+ cfs_spin_unlock(&capa_lock);
rc = md_renew_capa(ll_i2mdexp(inode), ocapa,
ll_update_capa);
- spin_lock(&capa_lock);
+ cfs_spin_lock(&capa_lock);
if (rc) {
DEBUG_CAPA(D_ERROR, &ocapa->c_capa,
"renew failed: %d", rc);
if (next)
update_capa_timer(next, capa_renewal_time(next));
- list_for_each_entry_safe(ocapa, tmp, &ll_idle_capas, c_list) {
+ cfs_list_for_each_entry_safe(ocapa, tmp, &ll_idle_capas,
+ c_list) {
if (!capa_is_expired(ocapa)) {
if (!next)
update_capa_timer(ocapa, ocapa->c_expiry);
break;
}
- if (atomic_read(&ocapa->c_refc) > 1) {
+ if (cfs_atomic_read(&ocapa->c_refc) > 1) {
DEBUG_CAPA(D_SEC, &ocapa->c_capa,
"expired(c_refc %d), don't release",
- atomic_read(&ocapa->c_refc));
+ cfs_atomic_read(&ocapa->c_refc));
/* don't try to renew any more */
- list_del_init(&ocapa->c_list);
+ cfs_list_del_init(&ocapa->c_list);
continue;
}
ll_delete_capa(ocapa);
}
- spin_unlock(&capa_lock);
+ cfs_spin_unlock(&capa_lock);
}
ll_capa_thread.t_flags = SVC_STOPPED;
- wake_up(&ll_capa_thread.t_ctl_waitq);
+ cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
RETURN(0);
}
void ll_capa_timer_callback(unsigned long unused)
{
- wake_up(&ll_capa_thread.t_ctl_waitq);
+ cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
}
int ll_capa_thread_start(void)
int rc;
ENTRY;
- init_waitqueue_head(&ll_capa_thread.t_ctl_waitq);
+ cfs_waitq_init(&ll_capa_thread.t_ctl_waitq);
- rc = kernel_thread(capa_thread_main, NULL, 0);
+ rc = cfs_kernel_thread(capa_thread_main, NULL, 0);
if (rc < 0) {
CERROR("cannot start expired capa thread: rc %d\n", rc);
RETURN(rc);
}
- wait_event(ll_capa_thread.t_ctl_waitq,
- ll_capa_thread.t_flags & SVC_RUNNING);
+ cfs_wait_event(ll_capa_thread.t_ctl_waitq,
+ ll_capa_thread.t_flags & SVC_RUNNING);
RETURN(0);
}
void ll_capa_thread_stop(void)
{
ll_capa_thread.t_flags = SVC_STOPPING;
- wake_up(&ll_capa_thread.t_ctl_waitq);
- wait_event(ll_capa_thread.t_ctl_waitq,
- ll_capa_thread.t_flags & SVC_STOPPED);
+ cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
+ cfs_wait_event(ll_capa_thread.t_ctl_waitq,
+ ll_capa_thread.t_flags & SVC_STOPPED);
}
struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc)
LASSERT(opc == CAPA_OPC_OSS_WRITE || opc == CAPA_OPC_OSS_RW ||
opc == CAPA_OPC_OSS_TRUNC);
- spin_lock(&capa_lock);
- list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
+ cfs_spin_lock(&capa_lock);
+ cfs_list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
if (capa_is_expired(ocapa))
continue;
if ((opc & CAPA_OPC_OSS_WRITE) &&
} else {
ocapa = NULL;
- if (atomic_read(&ll_capa_debug)) {
+ if (cfs_atomic_read(&ll_capa_debug)) {
CERROR("no capability for "DFID" opc "LPX64"\n",
PFID(&lli->lli_fid), opc);
- atomic_set(&ll_capa_debug, 0);
+ cfs_atomic_set(&ll_capa_debug, 0);
}
}
- spin_unlock(&capa_lock);
+ cfs_spin_unlock(&capa_lock);
RETURN(ocapa);
}
if ((ll_i2sbi(inode)->ll_flags & LL_SBI_MDS_CAPA) == 0)
RETURN(NULL);
- spin_lock(&capa_lock);
+ cfs_spin_lock(&capa_lock);
ocapa = capa_get(lli->lli_mds_capa);
- spin_unlock(&capa_lock);
- if (!ocapa && atomic_read(&ll_capa_debug)) {
+ cfs_spin_unlock(&capa_lock);
+ if (!ocapa && cfs_atomic_read(&ll_capa_debug)) {
CERROR("no mds capability for "DFID"\n", PFID(&lli->lli_fid));
- atomic_set(&ll_capa_debug, 0);
+ cfs_atomic_set(&ll_capa_debug, 0);
}
RETURN(ocapa);
DEBUG_CAPA(D_SEC, capa, "add MDS");
} else {
- spin_lock(&old->c_lock);
+ cfs_spin_lock(&old->c_lock);
old->c_capa = *capa;
- spin_unlock(&old->c_lock);
+ cfs_spin_unlock(&old->c_lock);
DEBUG_CAPA(D_SEC, capa, "update MDS");
struct obd_capa *ocapa;
/* inside capa_lock */
- list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
+ cfs_list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
if ((capa_opc(&ocapa->c_capa) & opc) != opc)
continue;
{
struct ll_inode_info *lli = ll_i2info(inode);
struct obd_capa *tmp;
- struct list_head *next = NULL;
+ cfs_list_t *next = NULL;
/* capa is sorted in lli_oss_capas so lookup can always find the
* latest one */
- list_for_each_entry(tmp, &lli->lli_oss_capas, u.cli.lli_list) {
+ cfs_list_for_each_entry(tmp, &lli->lli_oss_capas, u.cli.lli_list) {
if (cfs_time_after(ocapa->c_expiry, tmp->c_expiry)) {
next = &tmp->u.cli.lli_list;
break;
}
}
LASSERT(&ocapa->u.cli.lli_list != next);
- list_move_tail(&ocapa->u.cli.lli_list, next ?: &lli->lli_oss_capas);
+ cfs_list_move_tail(&ocapa->u.cli.lli_list, next ?: &lli->lli_oss_capas);
}
static struct obd_capa *do_add_oss_capa(struct inode *inode,
old = do_lookup_oss_capa(inode, capa_opc(capa) & CAPA_OPC_OSS_ONLY);
if (!old) {
ocapa->u.cli.inode = inode;
- INIT_LIST_HEAD(&ocapa->u.cli.lli_list);
+ CFS_INIT_LIST_HEAD(&ocapa->u.cli.lli_list);
capa_count[CAPA_SITE_CLIENT]++;
DEBUG_CAPA(D_SEC, capa, "add OSS");
} else {
- spin_lock(&old->c_lock);
+ cfs_spin_lock(&old->c_lock);
old->c_capa = *capa;
- spin_unlock(&old->c_lock);
+ cfs_spin_unlock(&old->c_lock);
DEBUG_CAPA(D_SEC, capa, "update OSS");
struct obd_capa *ll_add_capa(struct inode *inode, struct obd_capa *ocapa)
{
- spin_lock(&capa_lock);
+ cfs_spin_lock(&capa_lock);
ocapa = capa_for_mds(&ocapa->c_capa) ? do_add_mds_capa(inode, ocapa) :
do_add_oss_capa(inode, ocapa);
/* truncate capa won't renew */
if (ocapa->c_capa.lc_opc != CAPA_OPC_OSS_TRUNC) {
set_capa_expiry(ocapa);
- list_del_init(&ocapa->c_list);
+ cfs_list_del_init(&ocapa->c_list);
sort_add_capa(ocapa, ll_capa_list);
update_capa_timer(ocapa, capa_renewal_time(ocapa));
}
- spin_unlock(&capa_lock);
+ cfs_spin_unlock(&capa_lock);
- atomic_set(&ll_capa_debug, 1);
+ cfs_atomic_set(&ll_capa_debug, 1);
return ocapa;
}
if (IS_ERR(capa)) {
/* set error code */
rc = PTR_ERR(capa);
- spin_lock(&capa_lock);
+ cfs_spin_lock(&capa_lock);
if (rc == -ENOENT) {
DEBUG_CAPA(D_SEC, &ocapa->c_capa,
"renewal canceled because object removed");
}
}
- list_del_init(&ocapa->c_list);
+ cfs_list_del_init(&ocapa->c_list);
sort_add_capa(ocapa, &ll_idle_capas);
- spin_unlock(&capa_lock);
+ cfs_spin_unlock(&capa_lock);
capa_put(ocapa);
iput(inode);
RETURN(rc);
}
- spin_lock(&ocapa->c_lock);
+ cfs_spin_lock(&ocapa->c_lock);
LASSERT(!memcmp(&ocapa->c_capa, capa,
offsetof(struct lustre_capa, lc_opc)));
ocapa->c_capa = *capa;
set_capa_expiry(ocapa);
- spin_unlock(&ocapa->c_lock);
+ cfs_spin_unlock(&ocapa->c_lock);
- spin_lock(&capa_lock);
+ cfs_spin_lock(&capa_lock);
if (capa_for_oss(capa))
inode_add_oss_capa(inode, ocapa);
DEBUG_CAPA(D_SEC, capa, "renew");
EXIT;
retry:
- list_del_init(&ocapa->c_list);
+ cfs_list_del_init(&ocapa->c_list);
sort_add_capa(ocapa, ll_capa_list);
update_capa_timer(ocapa, capa_renewal_time(ocapa));
- spin_unlock(&capa_lock);
+ cfs_spin_unlock(&capa_lock);
capa_put(ocapa);
iput(inode);
if (!S_ISREG(inode->i_mode))
return;
- atomic_inc(&lli->lli_open_count);
+ cfs_atomic_inc(&lli->lli_open_count);
}
void ll_capa_close(struct inode *inode)
if (!S_ISREG(inode->i_mode))
return;
- atomic_dec(&lli->lli_open_count);
+ cfs_atomic_dec(&lli->lli_open_count);
}
/* delete CAPA_OPC_OSS_TRUNC only */
/* release ref when find */
capa_put(ocapa);
if (likely(ocapa->c_capa.lc_opc == CAPA_OPC_OSS_TRUNC)) {
- spin_lock(&capa_lock);
+ cfs_spin_lock(&capa_lock);
ll_delete_capa(ocapa);
- spin_unlock(&capa_lock);
+ cfs_spin_unlock(&capa_lock);
}
}
struct ll_inode_info *lli = ll_i2info(inode);
struct obd_capa *ocapa, *tmp;
- spin_lock(&capa_lock);
+ cfs_spin_lock(&capa_lock);
ocapa = lli->lli_mds_capa;
if (ocapa)
ll_delete_capa(ocapa);
- list_for_each_entry_safe(ocapa, tmp, &lli->lli_oss_capas,
- u.cli.lli_list)
+ cfs_list_for_each_entry_safe(ocapa, tmp, &lli->lli_oss_capas,
+ u.cli.lli_list)
ll_delete_capa(ocapa);
- spin_unlock(&capa_lock);
+ cfs_spin_unlock(&capa_lock);
}
void ll_print_capa_stat(struct ll_sb_info *sbi)
struct ll_inode_info *lli = ll_i2info(club->cob_inode);
ENTRY;
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
lli->lli_flags |= LLIF_SOM_DIRTY;
- if (page != NULL && list_empty(&page->cpg_pending_linkage))
- list_add(&page->cpg_pending_linkage, &club->cob_pending_list);
- spin_unlock(&lli->lli_lock);
+ if (page != NULL && cfs_list_empty(&page->cpg_pending_linkage))
+ cfs_list_add(&page->cpg_pending_linkage,
+ &club->cob_pending_list);
+ cfs_spin_unlock(&lli->lli_lock);
EXIT;
}
int rc = 0;
ENTRY;
- spin_lock(&lli->lli_lock);
- if (page != NULL && !list_empty(&page->cpg_pending_linkage)) {
- list_del_init(&page->cpg_pending_linkage);
+ cfs_spin_lock(&lli->lli_lock);
+ if (page != NULL && !cfs_list_empty(&page->cpg_pending_linkage)) {
+ cfs_list_del_init(&page->cpg_pending_linkage);
rc = 1;
}
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
if (rc)
ll_queue_done_writing(club->cob_inode, 0);
EXIT;
struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
ENTRY;
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
lli->lli_flags |= flags;
if ((lli->lli_flags & LLIF_DONE_WRITING) &&
- list_empty(&club->cob_pending_list)) {
+ cfs_list_empty(&club->cob_pending_list)) {
struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
inode->i_ino, inode->i_generation,
lli->lli_flags);
/* DONE_WRITING is allowed and inode has no dirty page. */
- spin_lock(&lcq->lcq_lock);
+ cfs_spin_lock(&lcq->lcq_lock);
- LASSERT(list_empty(&lli->lli_close_list));
+ LASSERT(cfs_list_empty(&lli->lli_close_list));
CDEBUG(D_INODE, "adding inode %lu/%u to close list\n",
inode->i_ino, inode->i_generation);
- list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
+ cfs_list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
/* Avoid a concurrent insertion into the close thread queue:
* an inode is already in the close thread, open(), write(),
* it. */
lli->lli_flags &= ~LLIF_DONE_WRITING;
- wake_up(&lcq->lcq_waitq);
- spin_unlock(&lcq->lcq_lock);
+ cfs_waitq_signal(&lcq->lcq_waitq);
+ cfs_spin_unlock(&lcq->lcq_lock);
}
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
EXIT;
}
struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
ENTRY;
- spin_lock(&lli->lli_lock);
- if (!(list_empty(&club->cob_pending_list))) {
+ cfs_spin_lock(&lli->lli_lock);
+ if (!(cfs_list_empty(&club->cob_pending_list))) {
if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
LASSERT(*och != NULL);
LASSERT(lli->lli_pending_och == NULL);
* request yet, DONE_WRITE is to be sent later. */
lli->lli_flags |= LLIF_EPOCH_PENDING;
lli->lli_pending_och = *och;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
inode = igrab(inode);
LASSERT(inode);
* and try DONE_WRITE again later. */
LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
lli->lli_flags |= LLIF_DONE_WRITING;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
inode = igrab(inode);
LASSERT(inode);
} else {
/* Pack Size-on-MDS inode attributes only if they has changed */
if (!(lli->lli_flags & LLIF_SOM_DIRTY)) {
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
GOTO(out, 0);
}
/* There is a pending DONE_WRITE -- close epoch with no
* attribute change. */
if (lli->lli_flags & LLIF_EPOCH_PENDING) {
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
GOTO(out, 0);
}
}
- LASSERT(list_empty(&club->cob_pending_list));
+ LASSERT(cfs_list_empty(&club->cob_pending_list));
lli->lli_flags &= ~LLIF_SOM_DIRTY;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
ll_done_writing_attr(inode, op_data);
EXIT;
{
struct ll_inode_info *lli = NULL;
- spin_lock(&lcq->lcq_lock);
+ cfs_spin_lock(&lcq->lcq_lock);
- if (!list_empty(&lcq->lcq_head)) {
- lli = list_entry(lcq->lcq_head.next, struct ll_inode_info,
- lli_close_list);
- list_del_init(&lli->lli_close_list);
- } else if (atomic_read(&lcq->lcq_stop))
+ if (!cfs_list_empty(&lcq->lcq_head)) {
+ lli = cfs_list_entry(lcq->lcq_head.next, struct ll_inode_info,
+ lli_close_list);
+ cfs_list_del_init(&lli->lli_close_list);
+ } else if (cfs_atomic_read(&lcq->lcq_stop))
lli = ERR_PTR(-EALREADY);
- spin_unlock(&lcq->lcq_lock);
+ cfs_spin_unlock(&lcq->lcq_lock);
return lli;
}
cfs_daemonize(name);
}
- complete(&lcq->lcq_comp);
+ cfs_complete(&lcq->lcq_comp);
while (1) {
struct l_wait_info lwi = { 0 };
}
CDEBUG(D_INFO, "ll_close exiting\n");
- complete(&lcq->lcq_comp);
+ cfs_complete(&lcq->lcq_comp);
RETURN(0);
}
if (lcq == NULL)
return -ENOMEM;
- spin_lock_init(&lcq->lcq_lock);
- INIT_LIST_HEAD(&lcq->lcq_head);
- init_waitqueue_head(&lcq->lcq_waitq);
- init_completion(&lcq->lcq_comp);
+ cfs_spin_lock_init(&lcq->lcq_lock);
+ CFS_INIT_LIST_HEAD(&lcq->lcq_head);
+ cfs_waitq_init(&lcq->lcq_waitq);
+ cfs_init_completion(&lcq->lcq_comp);
- pid = kernel_thread(ll_close_thread, lcq, 0);
+ pid = cfs_kernel_thread(ll_close_thread, lcq, 0);
if (pid < 0) {
OBD_FREE(lcq, sizeof(*lcq));
return pid;
}
- wait_for_completion(&lcq->lcq_comp);
+ cfs_wait_for_completion(&lcq->lcq_comp);
*lcq_ret = lcq;
return 0;
}
void ll_close_thread_shutdown(struct ll_close_queue *lcq)
{
- init_completion(&lcq->lcq_comp);
- atomic_inc(&lcq->lcq_stop);
- wake_up(&lcq->lcq_waitq);
- wait_for_completion(&lcq->lcq_comp);
+ cfs_init_completion(&lcq->lcq_comp);
+ cfs_atomic_inc(&lcq->lcq_stop);
+ cfs_waitq_signal(&lcq->lcq_waitq);
+ cfs_wait_for_completion(&lcq->lcq_comp);
OBD_FREE(lcq, sizeof(*lcq));
}
/* llite setxid/access permission for user on remote client */
struct ll_remote_perm {
- struct hlist_node lrp_list;
+ cfs_hlist_node_t lrp_list;
uid_t lrp_uid;
gid_t lrp_gid;
uid_t lrp_fsuid;
struct ll_inode_info {
int lli_inode_magic;
- struct semaphore lli_size_sem; /* protect open and change size */
+ cfs_semaphore_t lli_size_sem; /* protect open and change size */
void *lli_size_sem_owner;
- struct semaphore lli_write_sem;
- struct semaphore lli_trunc_sem;
+ cfs_semaphore_t lli_write_sem;
+ cfs_semaphore_t lli_trunc_sem;
char *lli_symlink_name;
__u64 lli_maxbytes;
__u64 lli_ioepoch;
cfs_time_t lli_contention_time;
/* this lock protects posix_acl, pending_write_llaps, mmap_cnt */
- spinlock_t lli_lock;
- struct list_head lli_close_list;
+ cfs_spinlock_t lli_lock;
+ cfs_list_t lli_close_list;
/* handle is to be sent to MDS later on done_writing and setattr.
* Open handle data are needed for the recovery to reconstruct
* the inode state on the MDS. XXX: recovery is not ready yet. */
struct posix_acl *lli_posix_acl;
/* remote permission hash */
- struct hlist_head *lli_remote_perms;
+ cfs_hlist_head_t *lli_remote_perms;
unsigned long lli_rmtperm_utime;
- struct semaphore lli_rmtperm_sem;
+ cfs_semaphore_t lli_rmtperm_sem;
- struct list_head lli_dead_list;
+ cfs_list_t lli_dead_list;
- struct semaphore lli_och_sem; /* Protects access to och pointers
+ cfs_semaphore_t lli_och_sem; /* Protects access to och pointers
and their usage counters */
/* We need all three because every inode may be opened in different
modes */
/* fid capability */
/* open count currently used by capability only, indicate whether
* capability needs renewal */
- atomic_t lli_open_count;
+ cfs_atomic_t lli_open_count;
struct obd_capa *lli_mds_capa;
- struct list_head lli_oss_capas;
+ cfs_list_t lli_oss_capas;
/* metadata stat-ahead */
/*
};
struct ll_ra_info {
- atomic_t ra_cur_pages;
+ cfs_atomic_t ra_cur_pages;
unsigned long ra_max_pages;
unsigned long ra_max_pages_per_file;
unsigned long ra_max_read_ahead_whole_pages;
#define RCE_HASHES 32
struct rmtacl_ctl_entry {
- struct list_head rce_list;
+ cfs_list_t rce_list;
pid_t rce_key; /* hash key */
int rce_ops; /* acl operation type */
};
struct rmtacl_ctl_table {
- spinlock_t rct_lock;
- struct list_head rct_entries[RCE_HASHES];
+ cfs_spinlock_t rct_lock;
+ cfs_list_t rct_entries[RCE_HASHES];
};
#define EE_HASHES 32
struct eacl_entry {
- struct list_head ee_list;
+ cfs_list_t ee_list;
pid_t ee_key; /* hash key */
struct lu_fid ee_fid;
int ee_type; /* ACL type for ACCESS or DEFAULT */
};
struct eacl_table {
- spinlock_t et_lock;
- struct list_head et_entries[EE_HASHES];
+ cfs_spinlock_t et_lock;
+ cfs_list_t et_entries[EE_HASHES];
};
struct ll_sb_info {
- struct list_head ll_list;
+ cfs_list_t ll_list;
/* this protects pglist and ra_info. It isn't safe to
* grab from interrupt contexts */
- spinlock_t ll_lock;
- spinlock_t ll_pp_extent_lock; /* Lock for pp_extent entries */
- spinlock_t ll_process_lock; /* Lock for ll_rw_process_info */
+ cfs_spinlock_t ll_lock;
+ cfs_spinlock_t ll_pp_extent_lock; /* Lock for pp_extent entries */
+ cfs_spinlock_t ll_process_lock; /* Lock for ll_rw_process_info */
struct obd_uuid ll_sb_uuid;
struct obd_export *ll_md_exp;
struct obd_export *ll_dt_exp;
struct lu_fid ll_root_fid; /* root object fid */
int ll_flags;
- struct list_head ll_conn_chain; /* per-conn chain of SBs */
+ cfs_list_t ll_conn_chain; /* per-conn chain of SBs */
struct lustre_client_ocd ll_lco;
- struct list_head ll_orphan_dentry_list; /*please don't ask -p*/
+ cfs_list_t ll_orphan_dentry_list; /*please don't ask -p*/
struct ll_close_queue *ll_lcq;
struct lprocfs_stats *ll_stats; /* lprocfs stats counter */
struct file_operations *ll_fop;
#ifdef HAVE_EXPORT___IGET
- struct list_head ll_deathrow; /* inodes to be destroyed (b1443) */
- spinlock_t ll_deathrow_lock;
+ cfs_list_t ll_deathrow;/*inodes to be destroyed (b1443)*/
+ cfs_spinlock_t ll_deathrow_lock;
#endif
/* =0 - hold lock over whole read/write
* >0 - max. chunk to be read/written w/o lock re-acquiring */
/* metadata stat-ahead */
unsigned int ll_sa_max; /* max statahead RPCs */
- atomic_t ll_sa_total; /* statahead thread started
+ cfs_atomic_t ll_sa_total; /* statahead thread started
* count */
- atomic_t ll_sa_wrong; /* statahead thread stopped for
+ cfs_atomic_t ll_sa_wrong; /* statahead thread stopped for
* low hit ratio */
dev_t ll_sdev_orig; /* save s_dev before assign for
pgoff_t lrr_start;
pgoff_t lrr_count;
struct task_struct *lrr_reader;
- struct list_head lrr_linkage;
+ cfs_list_t lrr_linkage;
};
/*
* per file-descriptor read-ahead data.
*/
struct ll_readahead_state {
- spinlock_t ras_lock;
+ cfs_spinlock_t ras_lock;
/*
* index of the last page that read(2) needed and that wasn't in the
* cache. Used by ras_update() to detect seeks.
* progress against this file descriptor. Used by read-ahead code,
* protected by ->ras_lock.
*/
- struct list_head ras_read_beads;
+ cfs_list_t ras_read_beads;
/*
* The following 3 items are used for detecting the stride I/O
* mode.
* ras_stride_pages = stride_pages;
* Note: all these three items are counted by pages.
*/
- unsigned long ras_stride_length;
- unsigned long ras_stride_pages;
- pgoff_t ras_stride_offset;
+ unsigned long ras_stride_length;
+ unsigned long ras_stride_pages;
+ pgoff_t ras_stride_offset;
/*
* number of consecutive stride request count, and it is similar as
* ras_consecutive_requests, but used for stride I/O mode.
* Note: only more than 2 consecutive stride request are detected,
* stride read-ahead will be enable
*/
- unsigned long ras_consecutive_stride_requests;
+ unsigned long ras_consecutive_stride_requests;
};
struct ll_file_dir {
struct lov_stripe_md;
-extern spinlock_t inode_lock;
+extern cfs_spinlock_t inode_lock;
extern struct proc_dir_entry *proc_lustre_fs_root;
/* llite/llite_close.c */
struct ll_close_queue {
- spinlock_t lcq_lock;
- struct list_head lcq_head;
- wait_queue_head_t lcq_waitq;
- struct completion lcq_comp;
- atomic_t lcq_stop;
+ cfs_spinlock_t lcq_lock;
+ cfs_list_t lcq_head;
+ cfs_waitq_t lcq_waitq;
+ cfs_completion_t lcq_comp;
+ cfs_atomic_t lcq_stop;
};
struct ccc_object *cl_inode2ccc(struct inode *inode);
struct ll_lock_tree_node;
struct ll_lock_tree {
rb_root_t lt_root;
- struct list_head lt_locked_list;
- struct ll_file_data *lt_fd;
+ cfs_list_t lt_locked_list;
+ struct ll_file_data *lt_fd;
};
int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last);
extern cfs_mem_cache_t *ll_remote_perm_cachep;
extern cfs_mem_cache_t *ll_rmtperm_hash_cachep;
-struct hlist_head *alloc_rmtperm_hash(void);
-void free_rmtperm_hash(struct hlist_head *hash);
+cfs_hlist_head_t *alloc_rmtperm_hash(void);
+void free_rmtperm_hash(cfs_hlist_head_t *hash);
int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm);
int lustre_check_remote_perm(struct inode *inode, int mask);
struct ll_statahead_info {
struct inode *sai_inode;
unsigned int sai_generation; /* generation for statahead */
- atomic_t sai_refcount; /* when access this struct, hold
+ cfs_atomic_t sai_refcount; /* when access this struct, hold
* refcount */
unsigned int sai_sent; /* stat requests sent count */
unsigned int sai_replied; /* stat requests which received
* hidden entries */
cfs_waitq_t sai_waitq; /* stat-ahead wait queue */
struct ptlrpc_thread sai_thread; /* stat-ahead thread */
- struct list_head sai_entries_sent; /* entries sent out */
- struct list_head sai_entries_received; /* entries returned */
- struct list_head sai_entries_stated; /* entries stated */
+ cfs_list_t sai_entries_sent; /* entries sent out */
+ cfs_list_t sai_entries_received; /* entries returned */
+ cfs_list_t sai_entries_stated; /* entries stated */
};
int do_statahead_enter(struct inode *dir, struct dentry **dentry, int lookup);
if (lli->lli_opendir_pid != cfs_curproc_pid())
return;
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
if (likely(lli->lli_sai != NULL && ldd != NULL))
ldd->lld_sa_generation = lli->lli_sai->sai_generation;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
}
static inline
cfs_mem_cache_t *ll_file_data_slab;
-LIST_HEAD(ll_super_blocks);
-spinlock_t ll_sb_lock = SPIN_LOCK_UNLOCKED;
+CFS_LIST_HEAD(ll_super_blocks);
+cfs_spinlock_t ll_sb_lock = CFS_SPIN_LOCK_UNLOCKED;
extern struct address_space_operations ll_aops;
extern struct address_space_operations ll_dir_aops;
#ifndef log2
-#define log2(n) ffz(~(n))
+#define log2(n) cfs_ffz(~(n))
#endif
static struct ll_sb_info *ll_init_sbi(void)
if (!sbi)
RETURN(NULL);
- spin_lock_init(&sbi->ll_lock);
- init_mutex(&sbi->ll_lco.lco_lock);
- spin_lock_init(&sbi->ll_pp_extent_lock);
- spin_lock_init(&sbi->ll_process_lock);
+ cfs_spin_lock_init(&sbi->ll_lock);
+ cfs_init_mutex(&sbi->ll_lco.lco_lock);
+ cfs_spin_lock_init(&sbi->ll_pp_extent_lock);
+ cfs_spin_lock_init(&sbi->ll_process_lock);
sbi->ll_rw_stats_on = 0;
si_meminfo(&si);
sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
SBI_DEFAULT_READAHEAD_WHOLE_MAX;
- INIT_LIST_HEAD(&sbi->ll_conn_chain);
- INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
+ CFS_INIT_LIST_HEAD(&sbi->ll_conn_chain);
+ CFS_INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
ll_generate_random_uuid(uuid);
class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
- spin_lock(&ll_sb_lock);
- list_add_tail(&sbi->ll_list, &ll_super_blocks);
- spin_unlock(&ll_sb_lock);
+ cfs_spin_lock(&ll_sb_lock);
+ cfs_list_add_tail(&sbi->ll_list, &ll_super_blocks);
+ cfs_spin_unlock(&ll_sb_lock);
#ifdef ENABLE_LLITE_CHECKSUM
sbi->ll_flags |= LL_SBI_CHECKSUM;
#endif
#ifdef HAVE_EXPORT___IGET
- INIT_LIST_HEAD(&sbi->ll_deathrow);
- spin_lock_init(&sbi->ll_deathrow_lock);
+ CFS_INIT_LIST_HEAD(&sbi->ll_deathrow);
+ cfs_spin_lock_init(&sbi->ll_deathrow_lock);
#endif
for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
- spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].pp_r_hist.oh_lock);
- spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].pp_w_hist.oh_lock);
+ cfs_spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i]. \
+ pp_r_hist.oh_lock);
+ cfs_spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i]. \
+ pp_w_hist.oh_lock);
}
/* metadata statahead is enabled by default */
ENTRY;
if (sbi != NULL) {
- spin_lock(&ll_sb_lock);
- list_del(&sbi->ll_list);
- spin_unlock(&ll_sb_lock);
+ cfs_spin_lock(&ll_sb_lock);
+ cfs_list_del(&sbi->ll_list);
+ cfs_spin_unlock(&ll_sb_lock);
OBD_FREE(sbi, sizeof(*sbi));
}
EXIT;
GOTO(out_md, err);
}
- err = obd_statfs(obd, &osfs, cfs_time_current_64() - HZ, 0);
+ err = obd_statfs(obd, &osfs, cfs_time_current_64() - CFS_HZ, 0);
if (err)
GOTO(out_md_fid, err);
GOTO(out_dt, err);
}
- mutex_down(&sbi->ll_lco.lco_lock);
+ cfs_mutex_down(&sbi->ll_lco.lco_lock);
sbi->ll_lco.lco_flags = data->ocd_connect_flags;
sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
- mutex_up(&sbi->ll_lco.lco_lock);
+ cfs_mutex_up(&sbi->ll_lco.lco_lock);
fid_zero(&sbi->ll_root_fid);
err = md_getstatus(sbi->ll_md_exp, &sbi->ll_root_fid, &oc);
if (recur == 0)
return;
- list_for_each(tmp, &dentry->d_subdirs) {
+ list_for_each(tmp, &dentry->d_subdirs) {
struct dentry *d = list_entry(tmp, struct dentry, d_child);
lustre_dump_dentry(d, recur - 1);
}
goto out;
CDEBUG(D_INODE, "inode %lu/%u(%d) looks a good candidate for prune\n",
- inode->i_ino,inode->i_generation, atomic_read(&inode->i_count));
+ inode->i_ino,inode->i_generation,
+ atomic_read(&inode->i_count));
/* seems nobody uses it anymore */
inode->i_nlink = 0;
break;
if (try) {
- if (!spin_trylock(&sbi->ll_deathrow_lock))
+ if (!cfs_spin_trylock(&sbi->ll_deathrow_lock))
break;
} else {
- spin_lock(&sbi->ll_deathrow_lock);
+ cfs_spin_lock(&sbi->ll_deathrow_lock);
}
empty = 1;
lli = NULL;
- if (!list_empty(&sbi->ll_deathrow)) {
- lli = list_entry(sbi->ll_deathrow.next,
- struct ll_inode_info,
- lli_dead_list);
- list_del_init(&lli->lli_dead_list);
- if (!list_empty(&sbi->ll_deathrow))
+ if (!cfs_list_empty(&sbi->ll_deathrow)) {
+ lli = cfs_list_entry(sbi->ll_deathrow.next,
+ struct ll_inode_info,
+ lli_dead_list);
+ cfs_list_del_init(&lli->lli_dead_list);
+ if (!cfs_list_empty(&sbi->ll_deathrow))
empty = 0;
}
- spin_unlock(&sbi->ll_deathrow_lock);
+ cfs_spin_unlock(&sbi->ll_deathrow_lock);
if (lli)
prune_deathrow_one(lli);
/* destroy inodes in deathrow */
prune_deathrow(sbi, 0);
- list_del(&sbi->ll_conn_chain);
+ cfs_list_del(&sbi->ll_conn_chain);
obd_fid_fini(sbi->ll_dt_exp);
obd_disconnect(sbi->ll_dt_exp);
void ll_lli_init(struct ll_inode_info *lli)
{
lli->lli_inode_magic = LLI_INODE_MAGIC;
- sema_init(&lli->lli_size_sem, 1);
- sema_init(&lli->lli_write_sem, 1);
- sema_init(&lli->lli_trunc_sem, 1);
+ cfs_sema_init(&lli->lli_size_sem, 1);
+ cfs_sema_init(&lli->lli_write_sem, 1);
+ cfs_sema_init(&lli->lli_trunc_sem, 1);
lli->lli_flags = 0;
lli->lli_maxbytes = PAGE_CACHE_MAXBYTES;
- spin_lock_init(&lli->lli_lock);
- INIT_LIST_HEAD(&lli->lli_close_list);
+ cfs_spin_lock_init(&lli->lli_lock);
+ CFS_INIT_LIST_HEAD(&lli->lli_close_list);
lli->lli_inode_magic = LLI_INODE_MAGIC;
- sema_init(&lli->lli_och_sem, 1);
+ cfs_sema_init(&lli->lli_och_sem, 1);
lli->lli_mds_read_och = lli->lli_mds_write_och = NULL;
lli->lli_mds_exec_och = NULL;
lli->lli_open_fd_read_count = lli->lli_open_fd_write_count = 0;
lli->lli_open_fd_exec_count = 0;
- INIT_LIST_HEAD(&lli->lli_dead_list);
+ CFS_INIT_LIST_HEAD(&lli->lli_dead_list);
lli->lli_remote_perms = NULL;
lli->lli_rmtperm_utime = 0;
- sema_init(&lli->lli_rmtperm_sem, 1);
- INIT_LIST_HEAD(&lli->lli_oss_capas);
+ cfs_sema_init(&lli->lli_rmtperm_sem, 1);
+ CFS_INIT_LIST_HEAD(&lli->lli_oss_capas);
}
int ll_fill_super(struct super_block *sb)
/* client additional sb info */
lsi->lsi_llsbi = sbi = ll_init_sbi();
if (!sbi) {
- cfs_module_put();
+ cfs_module_put(THIS_MODULE);
RETURN(-ENOMEM);
}
LCONSOLE_WARN("client %s umount complete\n", ll_instance);
- cfs_module_put();
+ cfs_module_put(THIS_MODULE);
EXIT;
} /* client_put_super */
}
#ifdef CONFIG_FS_POSIX_ACL
else if (lli->lli_posix_acl) {
- LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
+ LASSERT(cfs_atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
LASSERT(lli->lli_remote_perms == NULL);
posix_acl_release(lli->lli_posix_acl);
lli->lli_posix_acl = NULL;
lli->lli_inode_magic = LLI_INODE_DEAD;
#ifdef HAVE_EXPORT___IGET
- spin_lock(&sbi->ll_deathrow_lock);
- list_del_init(&lli->lli_dead_list);
- spin_unlock(&sbi->ll_deathrow_lock);
+ cfs_spin_lock(&sbi->ll_deathrow_lock);
+ cfs_list_del_init(&lli->lli_dead_list);
+ cfs_spin_unlock(&sbi->ll_deathrow_lock);
#endif
ll_clear_inode_capas(inode);
/*
/* We mark all of the fields "set" so MDS/OST does not re-set them */
if (attr->ia_valid & ATTR_CTIME) {
- attr->ia_ctime = CURRENT_TIME;
+ attr->ia_ctime = CFS_CURRENT_TIME;
attr->ia_valid |= ATTR_CTIME_SET;
}
if (!(ia_valid & ATTR_ATIME_SET) && (attr->ia_valid & ATTR_ATIME)) {
- attr->ia_atime = CURRENT_TIME;
+ attr->ia_atime = CFS_CURRENT_TIME;
attr->ia_valid |= ATTR_ATIME_SET;
}
if (!(ia_valid & ATTR_MTIME_SET) && (attr->ia_valid & ATTR_MTIME)) {
- attr->ia_mtime = CURRENT_TIME;
+ attr->ia_mtime = CFS_CURRENT_TIME;
attr->ia_valid |= ATTR_MTIME_SET;
}
if ((attr->ia_valid & ATTR_CTIME) && !(attr->ia_valid & ATTR_MTIME)) {
UNLOCK_INODE_MUTEX(inode);
if (ia_valid & ATTR_SIZE)
UP_WRITE_I_ALLOC_SEM(inode);
- down(&lli->lli_trunc_sem);
+ cfs_down(&lli->lli_trunc_sem);
LOCK_INODE_MUTEX(inode);
if (ia_valid & ATTR_SIZE)
DOWN_WRITE_I_ALLOC_SEM(inode);
rc1 = ll_setattr_done_writing(inode, op_data, mod);
ll_finish_md_op_data(op_data);
}
- up(&lli->lli_trunc_sem);
+ cfs_up(&lli->lli_trunc_sem);
return rc ? rc : rc1;
}
lli = ll_i2info(inode);
LASSERT(lli->lli_size_sem_owner != current);
- down(&lli->lli_size_sem);
+ cfs_down(&lli->lli_size_sem);
LASSERT(lli->lli_size_sem_owner == NULL);
lli->lli_size_sem_owner = current;
lsm = lli->lli_smd;
lov_stripe_unlock(lsm);
LASSERT(lli->lli_size_sem_owner == current);
lli->lli_size_sem_owner = NULL;
- up(&lli->lli_size_sem);
+ cfs_up(&lli->lli_size_sem);
}
void ll_update_inode(struct inode *inode, struct lustre_md *md)
}
#ifdef CONFIG_FS_POSIX_ACL
else if (body->valid & OBD_MD_FLACL) {
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
if (lli->lli_posix_acl)
posix_acl_release(lli->lli_posix_acl);
lli->lli_posix_acl = md->posix_acl;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
}
#endif
inode->i_ino = cl_fid_build_ino(&body->fid1);
* and then continue. For now, we just invalidate the requests,
* schedule() and sleep one second if needed, and hope.
*/
- schedule();
+ cfs_schedule();
#ifdef HAVE_UMOUNTBEGIN_VFSMOUNT
if (atomic_read(&vfsmnt->mnt_count) > 2) {
- cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE,
- cfs_time_seconds(1));
+ cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
+ cfs_time_seconds(1));
if (atomic_read(&vfsmnt->mnt_count) > 2)
LCONSOLE_WARN("Mount still busy with %d refs! You "
"may try to umount it a bit later\n",
ENTRY;
LASSERT(vma->vm_file);
- LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
- atomic_inc(&vob->cob_mmap_cnt);
+ LASSERT(cfs_atomic_read(&vob->cob_mmap_cnt) >= 0);
+ cfs_atomic_inc(&vob->cob_mmap_cnt);
EXIT;
}
ENTRY;
LASSERT(vma->vm_file);
- atomic_dec(&vob->cob_mmap_cnt);
- LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
+ cfs_atomic_dec(&vob->cob_mmap_cnt);
+ LASSERT(cfs_atomic_read(&vob->cob_mmap_cnt) >= 0);
EXIT;
}
static void rce_free(struct rmtacl_ctl_entry *rce)
{
- if (!list_empty(&rce->rce_list))
- list_del(&rce->rce_list);
+ if (!cfs_list_empty(&rce->rce_list))
+ cfs_list_del(&rce->rce_list);
OBD_FREE_PTR(rce);
}
pid_t key)
{
struct rmtacl_ctl_entry *rce;
- struct list_head *head = &rct->rct_entries[rce_hashfunc(key)];
+ cfs_list_t *head = &rct->rct_entries[rce_hashfunc(key)];
- list_for_each_entry(rce, head, rce_list)
+ cfs_list_for_each_entry(rce, head, rce_list)
if (rce->rce_key == key)
return rce;
{
struct rmtacl_ctl_entry *rce;
- spin_lock(&rct->rct_lock);
+ cfs_spin_lock(&rct->rct_lock);
rce = __rct_search(rct, key);
- spin_unlock(&rct->rct_lock);
+ cfs_spin_unlock(&rct->rct_lock);
return rce;
}
if (rce == NULL)
return -ENOMEM;
- spin_lock(&rct->rct_lock);
+ cfs_spin_lock(&rct->rct_lock);
e = __rct_search(rct, key);
if (unlikely(e != NULL)) {
CWARN("Unexpected stale rmtacl_entry found: "
"[key: %d] [ops: %d]\n", (int)key, ops);
rce_free(e);
}
- list_add_tail(&rce->rce_list, &rct->rct_entries[rce_hashfunc(key)]);
- spin_unlock(&rct->rct_lock);
+ cfs_list_add_tail(&rce->rce_list, &rct->rct_entries[rce_hashfunc(key)]);
+ cfs_spin_unlock(&rct->rct_lock);
return 0;
}
{
struct rmtacl_ctl_entry *rce;
- spin_lock(&rct->rct_lock);
+ cfs_spin_lock(&rct->rct_lock);
rce = __rct_search(rct, key);
if (rce)
rce_free(rce);
- spin_unlock(&rct->rct_lock);
+ cfs_spin_unlock(&rct->rct_lock);
return rce ? 0 : -ENOENT;
}
{
int i;
- spin_lock_init(&rct->rct_lock);
+ cfs_spin_lock_init(&rct->rct_lock);
for (i = 0; i < RCE_HASHES; i++)
CFS_INIT_LIST_HEAD(&rct->rct_entries[i]);
}
struct rmtacl_ctl_entry *rce;
int i;
- spin_lock(&rct->rct_lock);
+ cfs_spin_lock(&rct->rct_lock);
for (i = 0; i < RCE_HASHES; i++)
- while (!list_empty(&rct->rct_entries[i])) {
- rce = list_entry(rct->rct_entries[i].next,
- struct rmtacl_ctl_entry, rce_list);
+ while (!cfs_list_empty(&rct->rct_entries[i])) {
+ rce = cfs_list_entry(rct->rct_entries[i].next,
+ struct rmtacl_ctl_entry, rce_list);
rce_free(rce);
}
- spin_unlock(&rct->rct_lock);
+ cfs_spin_unlock(&rct->rct_lock);
}
void ee_free(struct eacl_entry *ee)
{
- if (!list_empty(&ee->ee_list))
- list_del(&ee->ee_list);
+ if (!cfs_list_empty(&ee->ee_list))
+ cfs_list_del(&ee->ee_list);
if (ee->ee_acl)
lustre_ext_acl_xattr_free(ee->ee_acl);
struct lu_fid *fid, int type)
{
struct eacl_entry *ee;
- struct list_head *head = &et->et_entries[ee_hashfunc(key)];
+ cfs_list_t *head = &et->et_entries[ee_hashfunc(key)];
LASSERT(fid != NULL);
- list_for_each_entry(ee, head, ee_list)
+ cfs_list_for_each_entry(ee, head, ee_list)
if (ee->ee_key == key) {
if (lu_fid_eq(&ee->ee_fid, fid) &&
ee->ee_type == type) {
- list_del_init(&ee->ee_list);
+ cfs_list_del_init(&ee->ee_list);
return ee;
}
}
{
struct eacl_entry *ee;
- spin_lock(&et->et_lock);
+ cfs_spin_lock(&et->et_lock);
ee = __et_search_del(et, key, fid, type);
- spin_unlock(&et->et_lock);
+ cfs_spin_unlock(&et->et_lock);
return ee;
}
void et_search_free(struct eacl_table *et, pid_t key)
{
struct eacl_entry *ee, *next;
- struct list_head *head = &et->et_entries[ee_hashfunc(key)];
+ cfs_list_t *head = &et->et_entries[ee_hashfunc(key)];
- spin_lock(&et->et_lock);
- list_for_each_entry_safe(ee, next, head, ee_list)
+ cfs_spin_lock(&et->et_lock);
+ cfs_list_for_each_entry_safe(ee, next, head, ee_list)
if (ee->ee_key == key)
ee_free(ee);
- spin_unlock(&et->et_lock);
+ cfs_spin_unlock(&et->et_lock);
}
int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type,
if (ee == NULL)
return -ENOMEM;
- spin_lock(&et->et_lock);
+ cfs_spin_lock(&et->et_lock);
e = __et_search_del(et, key, fid, type);
if (unlikely(e != NULL)) {
CWARN("Unexpected stale eacl_entry found: "
(int)key, PFID(fid), type);
ee_free(e);
}
- list_add_tail(&ee->ee_list, &et->et_entries[ee_hashfunc(key)]);
- spin_unlock(&et->et_lock);
+ cfs_list_add_tail(&ee->ee_list, &et->et_entries[ee_hashfunc(key)]);
+ cfs_spin_unlock(&et->et_lock);
return 0;
}
{
int i;
- spin_lock_init(&et->et_lock);
+ cfs_spin_lock_init(&et->et_lock);
for (i = 0; i < EE_HASHES; i++)
CFS_INIT_LIST_HEAD(&et->et_entries[i]);
}
struct eacl_entry *ee;
int i;
- spin_lock(&et->et_lock);
+ cfs_spin_lock(&et->et_lock);
for (i = 0; i < EE_HASHES; i++)
- while (!list_empty(&et->et_entries[i])) {
- ee = list_entry(et->et_entries[i].next,
- struct eacl_entry, ee_list);
+ while (!cfs_list_empty(&et->et_entries[i])) {
+ ee = cfs_list_entry(et->et_entries[i].next,
+ struct eacl_entry, ee_list);
ee_free(ee);
}
- spin_unlock(&et->et_lock);
+ cfs_spin_unlock(&et->et_lock);
}
#endif
int old_gfp_mask;
- spinlock_t lo_lock;
+ cfs_spinlock_t lo_lock;
struct bio *lo_bio;
struct bio *lo_biotail;
int lo_state;
- struct semaphore lo_sem;
- struct semaphore lo_ctl_mutex;
- atomic_t lo_pending;
- wait_queue_head_t lo_bh_wait;
+ cfs_semaphore_t lo_sem;
+ cfs_semaphore_t lo_ctl_mutex;
+ cfs_atomic_t lo_pending;
+ cfs_waitq_t lo_bh_wait;
struct request_queue *lo_queue;
static int max_loop = MAX_LOOP_DEFAULT;
static struct lloop_device *loop_dev;
static struct gendisk **disks;
-static struct semaphore lloop_mutex;
+static cfs_semaphore_t lloop_mutex;
static void *ll_iocontrol_magic = NULL;
static loff_t get_loop_size(struct lloop_device *lo, struct file *file)
{
unsigned long flags;
- spin_lock_irqsave(&lo->lo_lock, flags);
+ cfs_spin_lock_irqsave(&lo->lo_lock, flags);
if (lo->lo_biotail) {
lo->lo_biotail->bi_next = bio;
lo->lo_biotail = bio;
} else
lo->lo_bio = lo->lo_biotail = bio;
- spin_unlock_irqrestore(&lo->lo_lock, flags);
+ cfs_spin_unlock_irqrestore(&lo->lo_lock, flags);
- atomic_inc(&lo->lo_pending);
- if (waitqueue_active(&lo->lo_bh_wait))
- wake_up(&lo->lo_bh_wait);
+ cfs_atomic_inc(&lo->lo_pending);
+ if (cfs_waitq_active(&lo->lo_bh_wait))
+ cfs_waitq_signal(&lo->lo_bh_wait);
}
/*
unsigned int page_count = 0;
int rw;
- spin_lock_irq(&lo->lo_lock);
+ cfs_spin_lock_irq(&lo->lo_lock);
first = lo->lo_bio;
if (unlikely(first == NULL)) {
- spin_unlock_irq(&lo->lo_lock);
+ cfs_spin_unlock_irq(&lo->lo_lock);
return 0;
}
lo->lo_bio = NULL;
}
*req = first;
- spin_unlock_irq(&lo->lo_lock);
+ cfs_spin_unlock_irq(&lo->lo_lock);
return count;
}
CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
(unsigned long long)old_bio->bi_sector, old_bio->bi_size);
- spin_lock_irq(&lo->lo_lock);
+ cfs_spin_lock_irq(&lo->lo_lock);
inactive = (lo->lo_state != LLOOP_BOUND);
- spin_unlock_irq(&lo->lo_lock);
+ cfs_spin_unlock_irq(&lo->lo_lock);
if (inactive)
goto err;
static inline int loop_active(struct lloop_device *lo)
{
- return atomic_read(&lo->lo_pending) || (lo->lo_state == LLOOP_RUNDOWN);
+ return cfs_atomic_read(&lo->lo_pending) ||
+ (lo->lo_state == LLOOP_RUNDOWN);
}
/*
/*
* up sem, we are running
*/
- up(&lo->lo_sem);
+ cfs_up(&lo->lo_sem);
for (;;) {
- wait_event(lo->lo_bh_wait, loop_active(lo));
- if (!atomic_read(&lo->lo_pending)) {
+ cfs_wait_event(lo->lo_bh_wait, loop_active(lo));
+ if (!cfs_atomic_read(&lo->lo_pending)) {
int exiting = 0;
- spin_lock_irq(&lo->lo_lock);
+ cfs_spin_lock_irq(&lo->lo_lock);
exiting = (lo->lo_state == LLOOP_RUNDOWN);
- spin_unlock_irq(&lo->lo_lock);
+ cfs_spin_unlock_irq(&lo->lo_lock);
if (exiting)
break;
}
}
LASSERT(bio != NULL);
- LASSERT(count <= atomic_read(&lo->lo_pending));
+ LASSERT(count <= cfs_atomic_read(&lo->lo_pending));
loop_handle_bio(lo, bio);
- atomic_sub(count, &lo->lo_pending);
+ cfs_atomic_sub(count, &lo->lo_pending);
}
cl_env_put(env, &refcheck);
out:
- up(&lo->lo_sem);
+ cfs_up(&lo->lo_sem);
return ret;
}
int error;
loff_t size;
- if (!try_module_get(THIS_MODULE))
+ if (!cfs_try_module_get(THIS_MODULE))
return -ENODEV;
error = -EBUSY;
set_blocksize(bdev, lo->lo_blocksize);
- kernel_thread(loop_thread, lo, CLONE_KERNEL);
- down(&lo->lo_sem);
+ cfs_kernel_thread(loop_thread, lo, CLONE_KERNEL);
+ cfs_down(&lo->lo_sem);
return 0;
out:
/* This is safe: open() is still holding a reference. */
- module_put(THIS_MODULE);
+ cfs_module_put(THIS_MODULE);
return error;
}
if (filp == NULL)
return -EINVAL;
- spin_lock_irq(&lo->lo_lock);
+ cfs_spin_lock_irq(&lo->lo_lock);
lo->lo_state = LLOOP_RUNDOWN;
- spin_unlock_irq(&lo->lo_lock);
- wake_up(&lo->lo_bh_wait);
+ cfs_spin_unlock_irq(&lo->lo_lock);
+ cfs_waitq_signal(&lo->lo_bh_wait);
- down(&lo->lo_sem);
+ cfs_down(&lo->lo_sem);
lo->lo_backing_file = NULL;
lo->ioctl = NULL;
lo->lo_device = NULL;
lo->lo_state = LLOOP_UNBOUND;
fput(filp);
/* This is safe: open() is still holding a reference. */
- module_put(THIS_MODULE);
+ cfs_module_put(THIS_MODULE);
return 0;
}
{
struct lloop_device *lo = inode->i_bdev->bd_disk->private_data;
- down(&lo->lo_ctl_mutex);
+ cfs_down(&lo->lo_ctl_mutex);
lo->lo_refcnt++;
- up(&lo->lo_ctl_mutex);
+ cfs_up(&lo->lo_ctl_mutex);
return 0;
}
{
struct lloop_device *lo = inode->i_bdev->bd_disk->private_data;
- down(&lo->lo_ctl_mutex);
+ cfs_down(&lo->lo_ctl_mutex);
--lo->lo_refcnt;
- up(&lo->lo_ctl_mutex);
+ cfs_up(&lo->lo_ctl_mutex);
return 0;
}
struct block_device *bdev = inode->i_bdev;
int err = 0;
- down(&lloop_mutex);
+ cfs_down(&lloop_mutex);
switch (cmd) {
case LL_IOC_LLOOP_DETACH: {
err = loop_clr_fd(lo, bdev, 2);
err = -EINVAL;
break;
}
- up(&lloop_mutex);
+ cfs_up(&lloop_mutex);
return err;
}
CWARN("Enter llop_ioctl\n");
- down(&lloop_mutex);
+ cfs_down(&lloop_mutex);
switch (cmd) {
case LL_IOC_LLOOP_ATTACH: {
struct lloop_device *lo_free = NULL;
}
out:
- up(&lloop_mutex);
+ cfs_up(&lloop_mutex);
out1:
if (rcp)
*rcp = err;
goto out_mem3;
}
- init_MUTEX(&lloop_mutex);
+ cfs_init_mutex(&lloop_mutex);
for (i = 0; i < max_loop; i++) {
struct lloop_device *lo = &loop_dev[i];
if (!lo->lo_queue)
goto out_mem4;
- init_MUTEX(&lo->lo_ctl_mutex);
- init_MUTEX_LOCKED(&lo->lo_sem);
- init_waitqueue_head(&lo->lo_bh_wait);
+ cfs_init_mutex(&lo->lo_ctl_mutex);
+ cfs_init_mutex_locked(&lo->lo_sem);
+ cfs_waitq_init(&lo->lo_bh_wait);
lo->lo_number = i;
- spin_lock_init(&lo->lo_lock);
+ cfs_spin_lock_init(&lo->lo_lock);
disk->major = lloop_major;
disk->first_minor = i;
disk->fops = &lo_fops;
int rc;
LASSERT(sb != NULL);
- rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - HZ,
+ rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - CFS_HZ,
OBD_STATFS_NODELAY);
if (!rc) {
*eof = 1;
int rc;
LASSERT(sb != NULL);
- rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - HZ,
+ rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - CFS_HZ,
OBD_STATFS_NODELAY);
if (!rc) {
__u32 blk_size = osfs.os_bsize >> 10;
int rc;
LASSERT(sb != NULL);
- rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - HZ,
+ rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - CFS_HZ,
OBD_STATFS_NODELAY);
if (!rc) {
__u32 blk_size = osfs.os_bsize >> 10;
int rc;
LASSERT(sb != NULL);
- rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - HZ,
+ rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - CFS_HZ,
OBD_STATFS_NODELAY);
if (!rc) {
__u32 blk_size = osfs.os_bsize >> 10;
int rc;
LASSERT(sb != NULL);
- rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - HZ,
+ rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - CFS_HZ,
OBD_STATFS_NODELAY);
if (!rc) {
*eof = 1;
int rc;
LASSERT(sb != NULL);
- rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - HZ,
+ rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - CFS_HZ,
OBD_STATFS_NODELAY);
if (!rc) {
*eof = 1;
long pages_number;
int mult;
- spin_lock(&sbi->ll_lock);
+ cfs_spin_lock(&sbi->ll_lock);
pages_number = sbi->ll_ra_info.ra_max_pages;
- spin_unlock(&sbi->ll_lock);
+ cfs_spin_unlock(&sbi->ll_lock);
mult = 1 << (20 - PAGE_CACHE_SHIFT);
return lprocfs_read_frac_helper(page, count, pages_number, mult);
}
static int ll_wr_max_readahead_mb(struct file *file, const char *buffer,
- unsigned long count, void *data)
+ unsigned long count, void *data)
{
struct super_block *sb = data;
struct ll_sb_info *sbi = ll_s2sbi(sb);
if (rc)
return rc;
- if (pages_number < 0 || pages_number > num_physpages / 2) {
+ if (pages_number < 0 || pages_number > cfs_num_physpages / 2) {
CERROR("can't set file readahead more than %lu MB\n",
- num_physpages >> (20 - CFS_PAGE_SHIFT + 1)); /*1/2 of RAM*/
+ cfs_num_physpages >> (20 - CFS_PAGE_SHIFT + 1)); /*1/2 of RAM*/
return -ERANGE;
}
- spin_lock(&sbi->ll_lock);
+ cfs_spin_lock(&sbi->ll_lock);
sbi->ll_ra_info.ra_max_pages = pages_number;
- spin_unlock(&sbi->ll_lock);
+ cfs_spin_unlock(&sbi->ll_lock);
return count;
}
static int ll_rd_max_readahead_per_file_mb(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+ int count, int *eof, void *data)
{
struct super_block *sb = data;
struct ll_sb_info *sbi = ll_s2sbi(sb);
long pages_number;
int mult;
- spin_lock(&sbi->ll_lock);
+ cfs_spin_lock(&sbi->ll_lock);
pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
- spin_unlock(&sbi->ll_lock);
+ cfs_spin_unlock(&sbi->ll_lock);
mult = 1 << (20 - CFS_PAGE_SHIFT);
return lprocfs_read_frac_helper(page, count, pages_number, mult);
if (pages_number < 0 ||
pages_number > sbi->ll_ra_info.ra_max_pages) {
CERROR("can't set file readahead more than"
- "max_read_ahead_mb %lu MB\n", sbi->ll_ra_info.ra_max_pages);
+ "max_read_ahead_mb %lu MB\n",
+ sbi->ll_ra_info.ra_max_pages);
return -ERANGE;
}
- spin_lock(&sbi->ll_lock);
+ cfs_spin_lock(&sbi->ll_lock);
sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
- spin_unlock(&sbi->ll_lock);
+ cfs_spin_unlock(&sbi->ll_lock);
return count;
}
static int ll_rd_max_read_ahead_whole_mb(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+ int count, int *eof, void *data)
{
struct super_block *sb = data;
struct ll_sb_info *sbi = ll_s2sbi(sb);
long pages_number;
int mult;
- spin_lock(&sbi->ll_lock);
+ cfs_spin_lock(&sbi->ll_lock);
pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
- spin_unlock(&sbi->ll_lock);
+ cfs_spin_unlock(&sbi->ll_lock);
mult = 1 << (20 - CFS_PAGE_SHIFT);
return lprocfs_read_frac_helper(page, count, pages_number, mult);
}
static int ll_wr_max_read_ahead_whole_mb(struct file *file, const char *buffer,
- unsigned long count, void *data)
+ unsigned long count, void *data)
{
struct super_block *sb = data;
struct ll_sb_info *sbi = ll_s2sbi(sb);
return -ERANGE;
}
- spin_lock(&sbi->ll_lock);
+ cfs_spin_lock(&sbi->ll_lock);
sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
- spin_unlock(&sbi->ll_lock);
+ cfs_spin_unlock(&sbi->ll_lock);
return count;
}
long pages_number;
int mult;
- spin_lock(&sbi->ll_lock);
+ cfs_spin_lock(&sbi->ll_lock);
pages_number = sbi->ll_async_page_max;
- spin_unlock(&sbi->ll_lock);
+ cfs_spin_unlock(&sbi->ll_lock);
mult = 1 << (20 - CFS_PAGE_SHIFT);
return lprocfs_read_frac_helper(page, count, pages_number, mult);;
}
static int ll_wr_max_cached_mb(struct file *file, const char *buffer,
- unsigned long count, void *data)
+ unsigned long count, void *data)
{
struct super_block *sb = data;
struct ll_sb_info *sbi = ll_s2sbi(sb);
if (rc)
return rc;
- if (pages_number < 0 || pages_number > num_physpages) {
+ if (pages_number < 0 || pages_number > cfs_num_physpages) {
CERROR("can't set max cache more than %lu MB\n",
- num_physpages >> (20 - CFS_PAGE_SHIFT));
+ cfs_num_physpages >> (20 - CFS_PAGE_SHIFT));
return -ERANGE;
}
- spin_lock(&sbi->ll_lock);
+ cfs_spin_lock(&sbi->ll_lock);
sbi->ll_async_page_max = pages_number ;
- spin_unlock(&sbi->ll_lock);
+ cfs_spin_unlock(&sbi->ll_lock);
if (!sbi->ll_dt_exp)
/* Not set up yet, don't call llap_shrink_cache */
struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
int k;
- do_gettimeofday(&now);
+ cfs_gettimeofday(&now);
if (!sbi->ll_rw_stats_on) {
seq_printf(seq, "disabled\n"
seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
"extents", "calls", "%", "cum%",
"calls", "%", "cum%");
- spin_lock(&sbi->ll_pp_extent_lock);
+ cfs_spin_lock(&sbi->ll_pp_extent_lock);
for(k = 0; k < LL_PROCESS_HIST_MAX; k++) {
if(io_extents->pp_extents[k].pid != 0) {
seq_printf(seq, "\nPID: %d\n",
ll_display_extents_info(io_extents, seq, k);
}
}
- spin_unlock(&sbi->ll_pp_extent_lock);
+ cfs_spin_unlock(&sbi->ll_pp_extent_lock);
return 0;
}
else
sbi->ll_rw_stats_on = 1;
- spin_lock(&sbi->ll_pp_extent_lock);
+ cfs_spin_lock(&sbi->ll_pp_extent_lock);
for(i = 0; i < LL_PROCESS_HIST_MAX; i++) {
io_extents->pp_extents[i].pid = 0;
lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
}
- spin_unlock(&sbi->ll_pp_extent_lock);
+ cfs_spin_unlock(&sbi->ll_pp_extent_lock);
return len;
}
struct ll_sb_info *sbi = seq->private;
struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
- do_gettimeofday(&now);
+ cfs_gettimeofday(&now);
if (!sbi->ll_rw_stats_on) {
seq_printf(seq, "disabled\n"
seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
"extents", "calls", "%", "cum%",
"calls", "%", "cum%");
- spin_lock(&sbi->ll_lock);
+ cfs_spin_lock(&sbi->ll_lock);
ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX);
- spin_unlock(&sbi->ll_lock);
+ cfs_spin_unlock(&sbi->ll_lock);
return 0;
}
sbi->ll_rw_stats_on = 0;
else
sbi->ll_rw_stats_on = 1;
- spin_lock(&sbi->ll_pp_extent_lock);
+ cfs_spin_lock(&sbi->ll_pp_extent_lock);
for(i = 0; i <= LL_PROCESS_HIST_MAX; i++)
{
io_extents->pp_extents[i].pid = 0;
lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
}
- spin_unlock(&sbi->ll_pp_extent_lock);
+ cfs_spin_unlock(&sbi->ll_pp_extent_lock);
return len;
}
process = sbi->ll_rw_process_info;
offset = sbi->ll_rw_offset_info;
- spin_lock(&sbi->ll_pp_extent_lock);
+ cfs_spin_lock(&sbi->ll_pp_extent_lock);
/* Extent statistics */
for(i = 0; i < LL_PROCESS_HIST_MAX; i++) {
if(io_extents->pp_extents[i].pid == pid) {
io_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++;
io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_w_hist.oh_buckets[i]++;
}
- spin_unlock(&sbi->ll_pp_extent_lock);
+ cfs_spin_unlock(&sbi->ll_pp_extent_lock);
- spin_lock(&sbi->ll_process_lock);
+ cfs_spin_lock(&sbi->ll_process_lock);
/* Offset statistics */
for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
if (process[i].rw_pid == pid) {
process[i].rw_largest_extent = count;
process[i].rw_offset = 0;
process[i].rw_last_file = file;
- spin_unlock(&sbi->ll_process_lock);
+ cfs_spin_unlock(&sbi->ll_process_lock);
return;
}
if (process[i].rw_last_file_pos != pos) {
if(process[i].rw_largest_extent < count)
process[i].rw_largest_extent = count;
process[i].rw_last_file_pos = pos + count;
- spin_unlock(&sbi->ll_process_lock);
+ cfs_spin_unlock(&sbi->ll_process_lock);
return;
}
}
process[*process_count].rw_largest_extent = count;
process[*process_count].rw_offset = 0;
process[*process_count].rw_last_file = file;
- spin_unlock(&sbi->ll_process_lock);
+ cfs_spin_unlock(&sbi->ll_process_lock);
}
static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
struct ll_rw_process_info *process = sbi->ll_rw_process_info;
int i;
- do_gettimeofday(&now);
+ cfs_gettimeofday(&now);
if (!sbi->ll_rw_stats_on) {
seq_printf(seq, "disabled\n"
"then 0 or \"[D/d]isabled\" to deactivate\n");
return 0;
}
- spin_lock(&sbi->ll_process_lock);
+ cfs_spin_lock(&sbi->ll_process_lock);
seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
now.tv_sec, now.tv_usec);
(unsigned long)process[i].rw_largest_extent,
process[i].rw_offset);
}
- spin_unlock(&sbi->ll_process_lock);
+ cfs_spin_unlock(&sbi->ll_process_lock);
return 0;
}
else
sbi->ll_rw_stats_on = 1;
- spin_lock(&sbi->ll_process_lock);
+ cfs_spin_lock(&sbi->ll_process_lock);
sbi->ll_offset_process_count = 0;
sbi->ll_rw_offset_entry_count = 0;
memset(process_info, 0, sizeof(struct ll_rw_process_info) *
LL_PROCESS_HIST_MAX);
memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
LL_OFFSET_HIST_MAX);
- spin_unlock(&sbi->ll_process_lock);
+ cfs_spin_unlock(&sbi->ll_process_lock);
return len;
}
__u32 ll_i2suppgid(struct inode *i)
{
- if (in_group_p(i->i_gid))
+ if (cfs_curproc_is_in_groups(i->i_gid))
return (__u32)i->i_gid;
else
return (__u32)(-1);
OBD_SLAB_ALLOC_PTR_GFP(lrp, ll_remote_perm_cachep, GFP_KERNEL);
if (lrp)
- INIT_HLIST_NODE(&lrp->lrp_list);
+ CFS_INIT_HLIST_NODE(&lrp->lrp_list);
return lrp;
}
if (!lrp)
return;
- if (!hlist_unhashed(&lrp->lrp_list))
- hlist_del(&lrp->lrp_list);
+ if (!cfs_hlist_unhashed(&lrp->lrp_list))
+ cfs_hlist_del(&lrp->lrp_list);
OBD_SLAB_FREE(lrp, ll_remote_perm_cachep, sizeof(*lrp));
}
-struct hlist_head *alloc_rmtperm_hash(void)
+cfs_hlist_head_t *alloc_rmtperm_hash(void)
{
- struct hlist_head *hash;
+ cfs_hlist_head_t *hash;
int i;
OBD_SLAB_ALLOC(hash, ll_rmtperm_hash_cachep, GFP_KERNEL,
return NULL;
for (i = 0; i < REMOTE_PERM_HASHSIZE; i++)
- INIT_HLIST_HEAD(hash + i);
+ CFS_INIT_HLIST_HEAD(hash + i);
return hash;
}
-void free_rmtperm_hash(struct hlist_head *hash)
+void free_rmtperm_hash(cfs_hlist_head_t *hash)
{
int i;
struct ll_remote_perm *lrp;
- struct hlist_node *node, *next;
+ cfs_hlist_node_t *node, *next;
if(!hash)
return;
for (i = 0; i < REMOTE_PERM_HASHSIZE; i++)
- hlist_for_each_entry_safe(lrp, node, next, hash + i, lrp_list)
+ cfs_hlist_for_each_entry_safe(lrp, node, next, hash + i,
+ lrp_list)
free_ll_remote_perm(lrp);
OBD_SLAB_FREE(hash, ll_rmtperm_hash_cachep,
REMOTE_PERM_HASHSIZE * sizeof(*hash));
* MDT when client get remote permission. */
static int do_check_remote_perm(struct ll_inode_info *lli, int mask)
{
- struct hlist_head *head;
+ cfs_hlist_head_t *head;
struct ll_remote_perm *lrp;
- struct hlist_node *node;
+ cfs_hlist_node_t *node;
int found = 0, rc;
ENTRY;
head = lli->lli_remote_perms + remote_perm_hashfunc(current->uid);
- spin_lock(&lli->lli_lock);
- hlist_for_each_entry(lrp, node, head, lrp_list) {
+ cfs_spin_lock(&lli->lli_lock);
+ cfs_hlist_for_each_entry(lrp, node, head, lrp_list) {
if (lrp->lrp_uid != current->uid)
continue;
if (lrp->lrp_gid != current->gid)
rc = ((lrp->lrp_access_perm & mask) == mask) ? 0 : -EACCES;
out:
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
return rc;
}
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_remote_perm *lrp = NULL, *tmp = NULL;
- struct hlist_head *head, *perm_hash = NULL;
- struct hlist_node *node;
+ cfs_hlist_head_t *head, *perm_hash = NULL;
+ cfs_hlist_node_t *node;
ENTRY;
LASSERT(ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT);
}
}
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
if (!lli->lli_remote_perms)
lli->lli_remote_perms = perm_hash;
head = lli->lli_remote_perms + remote_perm_hashfunc(perm->rp_uid);
again:
- hlist_for_each_entry(tmp, node, head, lrp_list) {
+ cfs_hlist_for_each_entry(tmp, node, head, lrp_list) {
if (tmp->lrp_uid != perm->rp_uid)
continue;
if (tmp->lrp_gid != perm->rp_gid)
}
if (!lrp) {
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
lrp = alloc_ll_remote_perm();
if (!lrp) {
CERROR("alloc memory for ll_remote_perm failed!\n");
RETURN(-ENOMEM);
}
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
goto again;
}
lrp->lrp_gid = perm->rp_gid;
lrp->lrp_fsuid = perm->rp_fsuid;
lrp->lrp_fsgid = perm->rp_fsgid;
- hlist_add_head(&lrp->lrp_list, head);
+ cfs_hlist_add_head(&lrp->lrp_list, head);
}
lli->lli_rmtperm_utime = jiffies;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
CDEBUG(D_SEC, "new remote perm@%p: %u/%u/%u/%u - %#x\n",
lrp, lrp->lrp_uid, lrp->lrp_gid, lrp->lrp_fsuid, lrp->lrp_fsgid,
if (!rc || (rc != -ENOENT && i))
break;
- might_sleep();
+ cfs_might_sleep();
- down(&lli->lli_rmtperm_sem);
+ cfs_down(&lli->lli_rmtperm_sem);
/* check again */
if (utime != lli->lli_rmtperm_utime) {
rc = do_check_remote_perm(lli, mask);
if (!rc || (rc != -ENOENT && i)) {
- up(&lli->lli_rmtperm_sem);
+ cfs_up(&lli->lli_rmtperm_sem);
break;
}
}
ll_i2suppgid(inode), &req);
capa_put(oc);
if (rc) {
- up(&lli->lli_rmtperm_sem);
+ cfs_up(&lli->lli_rmtperm_sem);
break;
}
perm = req_capsule_server_swab_get(&req->rq_pill, &RMF_ACL,
lustre_swab_mdt_remote_perm);
if (unlikely(perm == NULL)) {
- up(&lli->lli_rmtperm_sem);
+ cfs_up(&lli->lli_rmtperm_sem);
rc = -EPROTO;
break;
}
rc = ll_update_remote_perm(inode, perm);
- up(&lli->lli_rmtperm_sem);
+ cfs_up(&lli->lli_rmtperm_sem);
if (rc == -ENOMEM)
break;
void ll_free_remote_perms(struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct hlist_head *hash = lli->lli_remote_perms;
+ cfs_hlist_head_t *hash = lli->lli_remote_perms;
struct ll_remote_perm *lrp;
- struct hlist_node *node, *next;
+ cfs_hlist_node_t *node, *next;
int i;
LASSERT(hash);
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
for (i = 0; i < REMOTE_PERM_HASHSIZE; i++) {
- hlist_for_each_entry_safe(lrp, node, next, hash + i, lrp_list)
+ cfs_hlist_for_each_entry_safe(lrp, node, next, hash + i,
+ lrp_list)
free_ll_remote_perm(lrp);
}
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
}
#endif
* otherwise it will form small read RPC(< 1M), which hurt server
* performance a lot.
*/
- ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages), len);
+ ret = min(ra->ra_max_pages - cfs_atomic_read(&ra->ra_cur_pages), len);
if ((int)ret < 0 || ret < min((unsigned long)PTLRPC_MAX_BRW_PAGES, len))
GOTO(out, ret = 0);
- if (atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
- atomic_sub(ret, &ra->ra_cur_pages);
+ if (cfs_atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
+ cfs_atomic_sub(ret, &ra->ra_cur_pages);
ret = 0;
}
out:
void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
{
struct ll_ra_info *ra = &sbi->ll_ra_info;
- atomic_sub(len, &ra->ra_cur_pages);
+ cfs_atomic_sub(len, &ra->ra_cur_pages);
}
static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which)
ras = ll_ras_get(f);
- spin_lock(&ras->ras_lock);
+ cfs_spin_lock(&ras->ras_lock);
ras->ras_requests++;
ras->ras_request_index = 0;
ras->ras_consecutive_requests++;
rar->lrr_reader = current;
- list_add(&rar->lrr_linkage, &ras->ras_read_beads);
- spin_unlock(&ras->ras_lock);
+ cfs_list_add(&rar->lrr_linkage, &ras->ras_read_beads);
+ cfs_spin_unlock(&ras->ras_lock);
}
void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
ras = ll_ras_get(f);
- spin_lock(&ras->ras_lock);
- list_del_init(&rar->lrr_linkage);
- spin_unlock(&ras->ras_lock);
+ cfs_spin_lock(&ras->ras_lock);
+ cfs_list_del_init(&rar->lrr_linkage);
+ cfs_spin_unlock(&ras->ras_lock);
}
static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
{
struct ll_ra_read *scan;
- list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
+ cfs_list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
if (scan->lrr_reader == current)
return scan;
}
ras = ll_ras_get(f);
- spin_lock(&ras->ras_lock);
+ cfs_spin_lock(&ras->ras_lock);
bead = ll_ra_read_get_locked(ras);
- spin_unlock(&ras->ras_lock);
+ cfs_spin_unlock(&ras->ras_lock);
return bead;
}
RETURN(0);
}
- spin_lock(&ras->ras_lock);
+ cfs_spin_lock(&ras->ras_lock);
if (vio->cui_ra_window_set)
bead = &vio->cui_bead;
else
ria->ria_length = ras->ras_stride_length;
ria->ria_pages = ras->ras_stride_pages;
}
- spin_unlock(&ras->ras_lock);
+ cfs_spin_unlock(&ras->ras_lock);
if (end == 0) {
ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
ra_end, end, ria->ria_end);
if (ra_end != end + 1) {
- spin_lock(&ras->ras_lock);
+ cfs_spin_lock(&ras->ras_lock);
if (ra_end < ras->ras_next_readahead &&
index_in_window(ra_end, ras->ras_window_start, 0,
ras->ras_window_len)) {
ras->ras_next_readahead = ra_end;
RAS_CDEBUG(ras);
}
- spin_unlock(&ras->ras_lock);
+ cfs_spin_unlock(&ras->ras_lock);
}
RETURN(ret);
void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
{
- spin_lock_init(&ras->ras_lock);
+ cfs_spin_lock_init(&ras->ras_lock);
ras_reset(ras, 0);
ras->ras_requests = 0;
- INIT_LIST_HEAD(&ras->ras_read_beads);
+ CFS_INIT_LIST_HEAD(&ras->ras_read_beads);
}
/*
int zero = 0, stride_detect = 0, ra_miss = 0;
ENTRY;
- spin_lock(&sbi->ll_lock);
- spin_lock(&ras->ras_lock);
+ cfs_spin_lock(&sbi->ll_lock);
+ cfs_spin_lock(&ras->ras_lock);
ll_ra_stats_inc_sbi(sbi, hit ? RA_STAT_HIT : RA_STAT_MISS);
out_unlock:
RAS_CDEBUG(ras);
ras->ras_request_index++;
- spin_unlock(&ras->ras_lock);
- spin_unlock(&sbi->ll_lock);
+ cfs_spin_unlock(&ras->ras_lock);
+ cfs_spin_unlock(&sbi->ll_lock);
return;
}
#include "llite_internal.h"
struct ll_sai_entry {
- struct list_head se_list;
+ cfs_list_t se_list;
unsigned int se_index;
int se_stat;
struct ptlrpc_request *se_req;
};
static unsigned int sai_generation = 0;
-static spinlock_t sai_generation_lock = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t sai_generation_lock = CFS_SPIN_LOCK_UNLOCKED;
/**
* Check whether first entry was stated already or not.
struct ll_sai_entry *entry;
int rc = 0;
- if (!list_empty(&sai->sai_entries_stated)) {
- entry = list_entry(sai->sai_entries_stated.next,
- struct ll_sai_entry, se_list);
+ if (!cfs_list_empty(&sai->sai_entries_stated)) {
+ entry = cfs_list_entry(sai->sai_entries_stated.next,
+ struct ll_sai_entry, se_list);
if (entry->se_index == sai->sai_index_next)
rc = 1;
}
static inline int sa_received_empty(struct ll_statahead_info *sai)
{
- return list_empty(&sai->sai_entries_received);
+ return cfs_list_empty(&sai->sai_entries_received);
}
static inline int sa_not_full(struct ll_statahead_info *sai)
ptlrpc_req_finished(req);
}
if (free) {
- LASSERT(list_empty(&entry->se_list));
+ LASSERT(cfs_list_empty(&entry->se_list));
OBD_FREE_PTR(entry);
}
if (!sai)
return NULL;
- spin_lock(&sai_generation_lock);
+ cfs_spin_lock(&sai_generation_lock);
sai->sai_generation = ++sai_generation;
if (unlikely(sai_generation == 0))
sai->sai_generation = ++sai_generation;
- spin_unlock(&sai_generation_lock);
- atomic_set(&sai->sai_refcount, 1);
+ cfs_spin_unlock(&sai_generation_lock);
+ cfs_atomic_set(&sai->sai_refcount, 1);
sai->sai_max = LL_SA_RPC_MIN;
cfs_waitq_init(&sai->sai_waitq);
cfs_waitq_init(&sai->sai_thread.t_ctl_waitq);
struct ll_statahead_info *ll_sai_get(struct ll_statahead_info *sai)
{
LASSERT(sai);
- atomic_inc(&sai->sai_refcount);
+ cfs_atomic_inc(&sai->sai_refcount);
return sai;
}
lli = ll_i2info(inode);
LASSERT(lli->lli_sai == sai);
- if (atomic_dec_and_test(&sai->sai_refcount)) {
+ if (cfs_atomic_dec_and_test(&sai->sai_refcount)) {
struct ll_sai_entry *entry, *next;
- spin_lock(&lli->lli_lock);
- if (unlikely(atomic_read(&sai->sai_refcount) > 0)) {
+ cfs_spin_lock(&lli->lli_lock);
+ if (unlikely(cfs_atomic_read(&sai->sai_refcount) > 0)) {
/* It is race case, the interpret callback just hold
* a reference count */
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
EXIT;
return;
}
LASSERT(lli->lli_opendir_key == NULL);
lli->lli_sai = NULL;
lli->lli_opendir_pid = 0;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
LASSERT(sa_is_stopped(sai));
PFID(&lli->lli_fid),
sai->sai_sent, sai->sai_replied);
- list_for_each_entry_safe(entry, next, &sai->sai_entries_sent,
- se_list) {
- list_del_init(&entry->se_list);
+ cfs_list_for_each_entry_safe(entry, next,
+ &sai->sai_entries_sent, se_list) {
+ cfs_list_del_init(&entry->se_list);
ll_sai_entry_cleanup(entry, 1);
}
- list_for_each_entry_safe(entry, next, &sai->sai_entries_received,
- se_list) {
- list_del_init(&entry->se_list);
+ cfs_list_for_each_entry_safe(entry, next,
+ &sai->sai_entries_received,
+ se_list) {
+ cfs_list_del_init(&entry->se_list);
ll_sai_entry_cleanup(entry, 1);
}
- list_for_each_entry_safe(entry, next, &sai->sai_entries_stated,
- se_list) {
- list_del_init(&entry->se_list);
+ cfs_list_for_each_entry_safe(entry, next,
+ &sai->sai_entries_stated,
+ se_list) {
+ cfs_list_del_init(&entry->se_list);
ll_sai_entry_cleanup(entry, 1);
}
iput(inode);
entry->se_index = index;
entry->se_stat = SA_ENTRY_UNSTATED;
- spin_lock(&lli->lli_lock);
- list_add_tail(&entry->se_list, &sai->sai_entries_sent);
- spin_unlock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
+ cfs_list_add_tail(&entry->se_list, &sai->sai_entries_sent);
+ cfs_spin_unlock(&lli->lli_lock);
RETURN(entry);
}
int rc = 0;
ENTRY;
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
sai->sai_index_next++;
- if (likely(!list_empty(&sai->sai_entries_stated))) {
- entry = list_entry(sai->sai_entries_stated.next,
- struct ll_sai_entry, se_list);
+ if (likely(!cfs_list_empty(&sai->sai_entries_stated))) {
+ entry = cfs_list_entry(sai->sai_entries_stated.next,
+ struct ll_sai_entry, se_list);
if (entry->se_index < sai->sai_index_next) {
- list_del(&entry->se_list);
+ cfs_list_del(&entry->se_list);
rc = entry->se_stat;
OBD_FREE_PTR(entry);
}
} else {
LASSERT(sa_is_stopped(sai));
}
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
RETURN(rc);
}
struct ll_sai_entry *entry;
ENTRY;
- if (!list_empty(&sai->sai_entries_sent)) {
- list_for_each_entry(entry, &sai->sai_entries_sent, se_list) {
+ if (!cfs_list_empty(&sai->sai_entries_sent)) {
+ cfs_list_for_each_entry(entry, &sai->sai_entries_sent,
+ se_list) {
if (entry->se_index == index) {
entry->se_stat = stat;
entry->se_req = ptlrpc_request_addref(req);
static inline void
ll_sai_entry_to_received(struct ll_statahead_info *sai, struct ll_sai_entry *entry)
{
- if (!list_empty(&entry->se_list))
- list_del_init(&entry->se_list);
- list_add_tail(&entry->se_list, &sai->sai_entries_received);
+ if (!cfs_list_empty(&entry->se_list))
+ cfs_list_del_init(&entry->se_list);
+ cfs_list_add_tail(&entry->se_list, &sai->sai_entries_received);
}
/**
ll_sai_entry_cleanup(entry, 0);
- spin_lock(&lli->lli_lock);
- if (!list_empty(&entry->se_list))
- list_del_init(&entry->se_list);
+ cfs_spin_lock(&lli->lli_lock);
+ if (!cfs_list_empty(&entry->se_list))
+ cfs_list_del_init(&entry->se_list);
if (unlikely(entry->se_index < sai->sai_index_next)) {
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
OBD_FREE_PTR(entry);
RETURN(0);
}
- list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
+ cfs_list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
if (se->se_index < entry->se_index) {
- list_add(&entry->se_list, &se->se_list);
- spin_unlock(&lli->lli_lock);
+ cfs_list_add(&entry->se_list, &se->se_list);
+ cfs_spin_unlock(&lli->lli_lock);
RETURN(1);
}
}
/*
* I am the first entry.
*/
- list_add(&entry->se_list, &sai->sai_entries_stated);
- spin_unlock(&lli->lli_lock);
+ cfs_list_add(&entry->se_list, &sai->sai_entries_stated);
+ cfs_spin_unlock(&lli->lli_lock);
RETURN(1);
}
struct mdt_body *body;
ENTRY;
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
LASSERT(!sa_received_empty(sai));
- entry = list_entry(sai->sai_entries_received.next, struct ll_sai_entry,
- se_list);
- list_del_init(&entry->se_list);
- spin_unlock(&lli->lli_lock);
+ entry = cfs_list_entry(sai->sai_entries_received.next,
+ struct ll_sai_entry, se_list);
+ cfs_list_del_init(&entry->se_list);
+ cfs_spin_unlock(&lli->lli_lock);
if (unlikely(entry->se_index < sai->sai_index_next)) {
CWARN("Found stale entry: [index %u] [next %u]\n",
CDEBUG(D_READA, "interpret statahead %.*s rc %d\n",
dentry->d_name.len, dentry->d_name.name, rc);
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
if (unlikely(lli->lli_sai == NULL ||
lli->lli_sai->sai_generation != minfo->mi_generation)) {
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
ll_intent_release(it);
dput(dentry);
iput(dir);
if (likely(sa_is_running(sai))) {
ll_sai_entry_to_received(sai, entry);
sai->sai_replied++;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
cfs_waitq_signal(&sai->sai_thread.t_ctl_waitq);
} else {
- if (!list_empty(&entry->se_list))
- list_del_init(&entry->se_list);
+ if (!cfs_list_empty(&entry->se_list))
+ cfs_list_del_init(&entry->se_list);
sai->sai_replied++;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
ll_sai_entry_cleanup(entry, 1);
}
ll_sai_put(sai);
}
atomic_inc(&sbi->ll_sa_total);
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
thread->t_flags = SVC_RUNNING;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
cfs_waitq_signal(&thread->t_ctl_waitq);
CDEBUG(D_READA, "start doing statahead for %s\n", parent->d_name.name);
out:
ll_dir_chain_fini(&chain);
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
thread->t_flags = SVC_STOPPED;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
cfs_waitq_signal(&sai->sai_waitq);
cfs_waitq_signal(&thread->t_ctl_waitq);
ll_sai_put(sai);
if (unlikely(key == NULL))
return;
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
if (lli->lli_opendir_key != key || lli->lli_opendir_pid == 0) {
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
return;
}
if (!sa_is_stopped(lli->lli_sai)) {
thread->t_flags = SVC_STOPPING;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
cfs_waitq_signal(&thread->t_ctl_waitq);
CDEBUG(D_READA, "stopping statahead thread, pid %d\n",
sa_is_stopped(lli->lli_sai),
&lwi);
} else {
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
}
/*
ll_sai_put(lli->lli_sai);
} else {
lli->lli_opendir_pid = 0;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
}
}
if (sai) {
if (unlikely(sa_is_stopped(sai) &&
- list_empty(&sai->sai_entries_stated)))
+ cfs_list_empty(&sai->sai_entries_stated)))
RETURN(-EBADFD);
if ((*dentryp)->d_name.name[0] == '.') {
RETURN(-EEXIST);
out:
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
lli->lli_opendir_key = NULL;
lli->lli_opendir_pid = 0;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
return rc;
}
PFID(&lli->lli_fid), sai->sai_hit,
sai->sai_miss, sai->sai_sent,
sai->sai_replied, cfs_curproc_pid());
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
if (!sa_is_stopped(sai))
sai->sai_thread.t_flags = SVC_STOPPING;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
}
}
{
ll_inode_cachep = cfs_mem_cache_create("lustre_inode_cache",
sizeof(struct ll_inode_info),
- 0, SLAB_HWCACHE_ALIGN);
+ 0, CFS_SLAB_HWCACHE_ALIGN);
if (ll_inode_cachep == NULL)
return -ENOMEM;
return 0;
return -ENOMEM;
ll_file_data_slab = cfs_mem_cache_create("ll_file_data",
sizeof(struct ll_file_data), 0,
- SLAB_HWCACHE_ALIGN);
+ CFS_SLAB_HWCACHE_ALIGN);
if (ll_file_data_slab == NULL) {
ll_destroy_inodecache();
return -ENOMEM;
ll_rmtperm_hash_cachep = cfs_mem_cache_create("ll_rmtperm_hash_cache",
REMOTE_PERM_HASHSIZE *
- sizeof(struct list_head),
+ sizeof(cfs_list_t),
0, 0);
if (ll_rmtperm_hash_cachep == NULL) {
cfs_mem_cache_destroy(ll_remote_perm_cachep);
}
}
- do_gettimeofday(&tv);
+ cfs_gettimeofday(&tv);
ll_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
init_timer(&ll_capa_timer);
struct lu_device *dev,
struct vvp_pgcache_id *id)
{
- struct hlist_head *bucket;
+ cfs_hlist_head_t *bucket;
struct lu_object_header *hdr;
struct lu_site *site;
- struct hlist_node *scan;
+ cfs_hlist_node_t *scan;
struct lu_object_header *found;
struct cl_object *clob;
unsigned depth;
clob = NULL;
/* XXX copy of lu_object.c:htable_lookup() */
- read_lock(&site->ls_guard);
- hlist_for_each_entry(hdr, scan, bucket, loh_hash) {
+ cfs_read_lock(&site->ls_guard);
+ cfs_hlist_for_each_entry(hdr, scan, bucket, loh_hash) {
if (depth-- == 0) {
if (!lu_object_is_dying(hdr)) {
- if (atomic_add_return(1, &hdr->loh_ref) == 1)
+ if (cfs_atomic_add_return(1,
+ &hdr->loh_ref) == 1)
++ site->ls_busy;
found = hdr;
}
break;
}
}
- read_unlock(&site->ls_guard);
+ cfs_read_unlock(&site->ls_guard);
if (found != NULL) {
struct lu_object *lu_obj;
/* got an object. Find next page. */
hdr = cl_object_header(clob);
- spin_lock(&hdr->coh_page_guard);
+ cfs_spin_lock(&hdr->coh_page_guard);
nr = radix_tree_gang_lookup(&hdr->coh_tree,
(void **)&pg,
id.vpi_index, 1);
/* Cant support over 16T file */
nr = !(pg->cp_index > 0xffffffff);
}
- spin_unlock(&hdr->coh_page_guard);
+ cfs_spin_unlock(&hdr->coh_page_guard);
lu_object_ref_del(&clob->co_lu, "dump", cfs_current());
cl_object_put(env, clob);
}
#define seq_page_flag(seq, page, flag, has_flags) do { \
- if (test_bit(PG_##flag, &(page)->flags)) { \
+ if (cfs_test_bit(PG_##flag, &(page)->flags)) { \
seq_printf(seq, "%s"#flag, has_flags ? "|" : ""); \
has_flags = 1; \
} \
if (clob != NULL) {
hdr = cl_object_header(clob);
- spin_lock(&hdr->coh_page_guard);
+ cfs_spin_lock(&hdr->coh_page_guard);
page = cl_page_lookup(hdr, id.vpi_index);
- spin_unlock(&hdr->coh_page_guard);
+ cfs_spin_unlock(&hdr->coh_page_guard);
seq_printf(f, "%8x@"DFID": ",
id.vpi_index, PFID(&hdr->coh_lu.loh_fid));
struct cl_object_header *hdr;
hdr = cl_object_header(obj);
- spin_lock(&hdr->coh_page_guard);
+ cfs_spin_lock(&hdr->coh_page_guard);
vio->cui_partpage = cl_page_lookup(hdr, start);
- spin_unlock(&hdr->coh_page_guard);
+ cfs_spin_unlock(&hdr->coh_page_guard);
if (vio->cui_partpage != NULL)
/*
struct ccc_object *cob = cl2ccc(slice->cls_obj);
ENTRY;
- RETURN(atomic_read(&cob->cob_mmap_cnt) > 0 ? ~0UL >> 2 : 0);
+ RETURN(cfs_atomic_read(&cob->cob_mmap_cnt) > 0 ? ~0UL >> 2 : 0);
}
static const struct cl_lock_operations vvp_lock_ops = {
struct ll_inode_info *lli;
(*p)(env, cookie, "(%s %i %i) inode: %p ",
- list_empty(&obj->cob_pending_list) ? "-" : "+",
- obj->cob_transient_pages, atomic_read(&obj->cob_mmap_cnt), inode);
+ cfs_list_empty(&obj->cob_pending_list) ? "-" : "+",
+ obj->cob_transient_pages, cfs_atomic_read(&obj->cob_mmap_cnt),
+ inode);
if (inode) {
lli = ll_i2info(inode);
(*p)(env, cookie, "%lu/%u %o %u %i %p "DFID,
struct ll_inode_info *lli = ll_i2info(inode);
struct posix_acl *acl;
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
acl = posix_acl_dup(lli->lli_posix_acl);
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
if (!acl)
RETURN(-ENODATA);
#define LMV_MAX_TGT_COUNT 128
-#define lmv_init_lock(lmv) down(&lmv->init_sem);
-#define lmv_init_unlock(lmv) up(&lmv->init_sem);
+#define lmv_init_lock(lmv) cfs_down(&lmv->init_sem);
+#define lmv_init_unlock(lmv) cfs_up(&lmv->init_sem);
#define LL_IT2STR(it) \
((it) ? ldlm_it2str((it)->it_op) : "0")
/**
* Link to global objects list.
*/
- struct list_head lo_list;
+ cfs_list_t lo_list;
/**
* Sema for protecting fields.
*/
- struct semaphore lo_guard;
+ cfs_semaphore_t lo_guard;
/**
* Object state like O_FREEING.
*/
/**
* Object ref counter.
*/
- atomic_t lo_count;
+ cfs_atomic_t lo_count;
/**
* Object master fid.
*/
lmv_object_lock(struct lmv_object *obj)
{
LASSERT(obj);
- down(&obj->lo_guard);
+ cfs_down(&obj->lo_guard);
}
static inline void
lmv_object_unlock(struct lmv_object *obj)
{
LASSERT(obj);
- up(&obj->lo_guard);
+ cfs_up(&obj->lo_guard);
}
void lmv_object_add(struct lmv_object *obj);
/* object cache. */
cfs_mem_cache_t *lmv_object_cache;
-atomic_t lmv_object_count = ATOMIC_INIT(0);
+cfs_atomic_t lmv_object_count = CFS_ATOMIC_INIT(0);
static void lmv_activate_target(struct lmv_obd *lmv,
struct lmv_tgt_desc *tgt,
CDEBUG(D_INFO, "Searching in lmv %p for uuid %s (activate=%d)\n",
lmv, uuid->uuid, activate);
- spin_lock(&lmv->lmv_lock);
+ cfs_spin_lock(&lmv->lmv_lock);
for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
if (tgt->ltd_exp == NULL)
continue;
EXIT;
out_lmv_lock:
- spin_unlock(&lmv->lmv_lock);
+ cfs_spin_unlock(&lmv->lmv_lock);
return rc;
}
LASSERT(data != NULL);
- spin_lock(&lmv->lmv_lock);
+ cfs_spin_lock(&lmv->lmv_lock);
for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
if (tgt->ltd_exp == NULL)
continue;
break;
}
}
- spin_unlock(&lmv->lmv_lock);
+ cfs_spin_unlock(&lmv->lmv_lock);
RETURN(0);
}
CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n",
mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
- atomic_read(&obd->obd_refcount));
+ cfs_atomic_read(&obd->obd_refcount));
#ifdef __KERNEL__
lmv_proc_dir = lprocfs_srch(obd->obd_proc_entry, "target_obds");
CERROR("lmv failed to setup llogging subsystems\n");
}
}
- spin_lock(&lmv->lmv_lock);
+ cfs_spin_lock(&lmv->lmv_lock);
tgt = lmv->tgts + lmv->desc.ld_tgt_count++;
tgt->ltd_uuid = *tgt_uuid;
- spin_unlock(&lmv->lmv_lock);
+ cfs_spin_unlock(&lmv->lmv_lock);
if (lmv->connected) {
rc = lmv_connect_mdc(obd, tgt);
if (rc) {
- spin_lock(&lmv->lmv_lock);
+ cfs_spin_lock(&lmv->lmv_lock);
lmv->desc.ld_tgt_count--;
memset(tgt, 0, sizeof(*tgt));
- spin_unlock(&lmv->lmv_lock);
+ cfs_spin_unlock(&lmv->lmv_lock);
} else {
int easize = sizeof(struct lmv_stripe_md) +
lmv->desc.ld_tgt_count *
RETURN(-EINVAL);
rc = obd_statfs(mdc_obd, &stat_buf,
- cfs_time_current_64() - HZ, 0);
+ cfs_time_current_64() - CFS_HZ, 0);
if (rc)
RETURN(rc);
- if (copy_to_user(data->ioc_pbuf1, &stat_buf, data->ioc_plen1))
+ if (cfs_copy_to_user(data->ioc_pbuf1, &stat_buf,
+ data->ioc_plen1))
RETURN(-EFAULT);
- if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd),
- data->ioc_plen2))
+ if (cfs_copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd),
+ data->ioc_plen2))
RETURN(-EFAULT);
break;
}
* New seq alloc and FLD setup should be atomic. Otherwise we may find
* on server that seq in new allocated fid is not yet known.
*/
- down(&tgt->ltd_fid_sem);
+ cfs_down(&tgt->ltd_fid_sem);
if (!tgt->ltd_active)
GOTO(out, rc = -ENODEV);
EXIT;
out:
- up(&tgt->ltd_fid_sem);
+ cfs_up(&tgt->ltd_fid_sem);
return rc;
}
RETURN(-ENOMEM);
for (i = 0; i < LMV_MAX_TGT_COUNT; i++) {
- sema_init(&lmv->tgts[i].ltd_fid_sem, 1);
+ cfs_sema_init(&lmv->tgts[i].ltd_fid_sem, 1);
lmv->tgts[i].ltd_idx = i;
}
lmv->max_easize = 0;
lmv->lmv_placement = PLACEMENT_CHAR_POLICY;
- spin_lock_init(&lmv->lmv_lock);
- sema_init(&lmv->init_sem, 1);
+ cfs_spin_lock_init(&lmv->lmv_lock);
+ cfs_sema_init(&lmv->init_sem, 1);
rc = lmv_object_setup(obd);
if (rc) {
lprocfs_lmv_init_vars(&lvars);
- request_module("lquota");
+ cfs_request_module("lquota");
quota_interface = PORTAL_SYMBOL_GET(lmv_quota_interface);
init_obd_quota_ops(quota_interface, &lmv_obd_ops);
class_unregister_type(LUSTRE_LMV_NAME);
- LASSERTF(atomic_read(&lmv_object_count) == 0,
+ LASSERTF(cfs_atomic_read(&lmv_object_count) == 0,
"Can't free lmv objects cache, %d object(s) busy\n",
- atomic_read(&lmv_object_count));
+ cfs_atomic_read(&lmv_object_count));
cfs_mem_cache_destroy(lmv_object_cache);
}
#include "lmv_internal.h"
extern cfs_mem_cache_t *lmv_object_cache;
-extern atomic_t lmv_object_count;
+extern cfs_atomic_t lmv_object_count;
static CFS_LIST_HEAD(obj_list);
-static spinlock_t obj_list_lock = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t obj_list_lock = CFS_SPIN_LOCK_UNLOCKED;
struct lmv_object *lmv_object_alloc(struct obd_device *obd,
const struct lu_fid *fid,
if (!obj)
return NULL;
- atomic_inc(&lmv_object_count);
+ cfs_atomic_inc(&lmv_object_count);
obj->lo_fid = *fid;
obj->lo_obd = obd;
obj->lo_state = 0;
obj->lo_hashtype = mea->mea_magic;
- init_MUTEX(&obj->lo_guard);
- atomic_set(&obj->lo_count, 0);
+ cfs_init_mutex(&obj->lo_guard);
+ cfs_atomic_set(&obj->lo_count, 0);
obj->lo_objcount = mea->mea_count;
obj_size = sizeof(struct lmv_stripe) *
struct lmv_obd *lmv = &obj->lo_obd->u.lmv;
unsigned int obj_size;
- LASSERT(!atomic_read(&obj->lo_count));
+ LASSERT(!cfs_atomic_read(&obj->lo_count));
obj_size = sizeof(struct lmv_stripe) *
lmv->desc.ld_tgt_count;
OBD_FREE(obj->lo_stripes, obj_size);
OBD_SLAB_FREE(obj, lmv_object_cache, sizeof(*obj));
- atomic_dec(&lmv_object_count);
+ cfs_atomic_dec(&lmv_object_count);
}
static void __lmv_object_add(struct lmv_object *obj)
{
- atomic_inc(&obj->lo_count);
- list_add(&obj->lo_list, &obj_list);
+ cfs_atomic_inc(&obj->lo_count);
+ cfs_list_add(&obj->lo_list, &obj_list);
}
void lmv_object_add(struct lmv_object *obj)
{
- spin_lock(&obj_list_lock);
+ cfs_spin_lock(&obj_list_lock);
__lmv_object_add(obj);
- spin_unlock(&obj_list_lock);
+ cfs_spin_unlock(&obj_list_lock);
}
static void __lmv_object_del(struct lmv_object *obj)
{
- list_del(&obj->lo_list);
+ cfs_list_del(&obj->lo_list);
lmv_object_free(obj);
}
void lmv_object_del(struct lmv_object *obj)
{
- spin_lock(&obj_list_lock);
+ cfs_spin_lock(&obj_list_lock);
__lmv_object_del(obj);
- spin_unlock(&obj_list_lock);
+ cfs_spin_unlock(&obj_list_lock);
}
static struct lmv_object *__lmv_object_get(struct lmv_object *obj)
{
LASSERT(obj != NULL);
- atomic_inc(&obj->lo_count);
+ cfs_atomic_inc(&obj->lo_count);
return obj;
}
struct lmv_object *lmv_object_get(struct lmv_object *obj)
{
- spin_lock(&obj_list_lock);
+ cfs_spin_lock(&obj_list_lock);
__lmv_object_get(obj);
- spin_unlock(&obj_list_lock);
+ cfs_spin_unlock(&obj_list_lock);
return obj;
}
{
LASSERT(obj);
- if (atomic_dec_and_test(&obj->lo_count)) {
+ if (cfs_atomic_dec_and_test(&obj->lo_count)) {
CDEBUG(D_INODE, "Last reference to "DFID" - "
"destroying\n", PFID(&obj->lo_fid));
__lmv_object_del(obj);
void lmv_object_put(struct lmv_object *obj)
{
- spin_lock(&obj_list_lock);
+ cfs_spin_lock(&obj_list_lock);
__lmv_object_put(obj);
- spin_unlock(&obj_list_lock);
+ cfs_spin_unlock(&obj_list_lock);
}
void lmv_object_put_unlock(struct lmv_object *obj)
static struct lmv_object *__lmv_object_find(struct obd_device *obd, const struct lu_fid *fid)
{
struct lmv_object *obj;
- struct list_head *cur;
+ cfs_list_t *cur;
- list_for_each(cur, &obj_list) {
- obj = list_entry(cur, struct lmv_object, lo_list);
+ cfs_list_for_each(cur, &obj_list) {
+ obj = cfs_list_entry(cur, struct lmv_object, lo_list);
- /*
+ /*
* Check if object is in destroying phase. If so - skip
- * it.
+ * it.
*/
if (obj->lo_state & O_FREEING)
continue;
if (obj->lo_obd != obd)
continue;
- /*
- * Check if this is what we're looking for.
+ /*
+ * Check if this is what we're looking for.
*/
if (lu_fid_eq(&obj->lo_fid, fid))
return __lmv_object_get(obj);
struct lmv_object *obj;
ENTRY;
- spin_lock(&obj_list_lock);
+ cfs_spin_lock(&obj_list_lock);
obj = __lmv_object_find(obd, fid);
- spin_unlock(&obj_list_lock);
+ cfs_spin_unlock(&obj_list_lock);
RETURN(obj);
}
* Check if someone created it already while we were dealing with
* allocating @obj.
*/
- spin_lock(&obj_list_lock);
+ cfs_spin_lock(&obj_list_lock);
obj = __lmv_object_find(obd, fid);
if (obj) {
/*
* Someone created it already - put @obj and getting out.
*/
- spin_unlock(&obj_list_lock);
+ cfs_spin_unlock(&obj_list_lock);
lmv_object_free(new);
RETURN(obj);
}
__lmv_object_add(new);
__lmv_object_get(new);
- spin_unlock(&obj_list_lock);
+ cfs_spin_unlock(&obj_list_lock);
CDEBUG(D_INODE, "New obj in lmv cache: "DFID"\n",
PFID(fid));
int rc = 0;
ENTRY;
- spin_lock(&obj_list_lock);
+ cfs_spin_lock(&obj_list_lock);
obj = __lmv_object_find(obd, fid);
if (obj) {
obj->lo_state |= O_FREEING;
__lmv_object_put(obj);
rc = 1;
}
- spin_unlock(&obj_list_lock);
+ cfs_spin_unlock(&obj_list_lock);
RETURN(rc);
}
void lmv_object_cleanup(struct obd_device *obd)
{
- struct list_head *cur;
- struct list_head *tmp;
+ cfs_list_t *cur;
+ cfs_list_t *tmp;
struct lmv_object *obj;
ENTRY;
CDEBUG(D_INFO, "LMV object manager cleanup (%s)\n",
obd->obd_uuid.uuid);
- spin_lock(&obj_list_lock);
- list_for_each_safe(cur, tmp, &obj_list) {
- obj = list_entry(cur, struct lmv_object, lo_list);
+ cfs_spin_lock(&obj_list_lock);
+ cfs_list_for_each_safe(cur, tmp, &obj_list) {
+ obj = cfs_list_entry(cur, struct lmv_object, lo_list);
if (obj->lo_obd != obd)
continue;
obj->lo_state |= O_FREEING;
- if (atomic_read(&obj->lo_count) > 1) {
+ if (cfs_atomic_read(&obj->lo_count) > 1) {
CERROR("Object "DFID" has count (%d)\n",
- PFID(&obj->lo_fid), atomic_read(&obj->lo_count));
+ PFID(&obj->lo_fid),
+ cfs_atomic_read(&obj->lo_count));
}
__lmv_object_put(obj);
}
- spin_unlock(&obj_list_lock);
+ cfs_spin_unlock(&obj_list_lock);
EXIT;
}
placement_policy_t policy;
struct lmv_obd *lmv;
- if (copy_from_user(dummy, buffer, MAX_POLICY_STRING_SIZE))
+ if (cfs_copy_from_user(dummy, buffer, MAX_POLICY_STRING_SIZE))
return -EFAULT;
LASSERT(dev != NULL);
policy = placement_name2policy(dummy, len);
if (policy != PLACEMENT_INVAL_POLICY) {
- spin_lock(&lmv->lmv_lock);
+ cfs_spin_lock(&lmv->lmv_lock);
lmv->lmv_placement = policy;
- spin_unlock(&lmv->lmv_lock);
+ cfs_spin_unlock(&lmv->lmv_lock);
} else {
CERROR("Invalid placement policy \"%s\"!\n", dummy);
return -EINVAL;
* Serializes access to lov_device::ld_emrg in low-memory
* conditions.
*/
- struct mutex ld_mutex;
+ cfs_mutex_t ld_mutex;
};
/**
*
* \see lov_object::lo_type
*/
- struct rw_semaphore lo_type_guard;
+ cfs_rw_semaphore_t lo_type_guard;
/**
* Type of an object. Protected by lov_object::lo_type_guard.
*/
* A linkage into per sub-lock list of all corresponding top-locks,
* hanging off lovsub_lock::lss_parents.
*/
- struct list_head lll_list;
+ cfs_list_t lll_list;
};
/**
* List of top-locks that have given sub-lock as their part. Protected
* by cl_lock::cll_guard mutex.
*/
- struct list_head lss_parents;
+ cfs_list_t lss_parents;
/**
* Top-lock that initiated current operation on this sub-lock. This is
* only set during top-to-bottom lock operations like enqueue, and is
* Linkage into a list (hanging off lov_io::lis_active) of all
* sub-io's active for the current IO iteration.
*/
- struct list_head sub_linkage;
+ cfs_list_t sub_linkage;
/**
* true, iff cl_io_init() was successfully executed against
* lov_io_sub::sub_io.
/**
* List of active sub-io's.
*/
- struct list_head lis_active;
+ cfs_list_t lis_active;
};
struct lov_session {
cfs_mem_cache_t *lov_lock_link_kmem;
/** Lock class of lov_device::ld_mutex. */
-struct lock_class_key cl_lov_device_mutex_class;
+cfs_lock_class_key_t cl_lov_device_mutex_class;
struct lu_kmem_descr lov_caches[] = {
{
struct lu_context_key *key, void *data)
{
struct lov_thread_info *info = data;
- LINVRNT(list_empty(&info->lti_closure.clc_list));
+ LINVRNT(cfs_list_empty(&info->lti_closure.clc_list));
OBD_SLAB_FREE_PTR(info, lov_thread_kmem);
}
OBD_ALLOC(newd, tgt_size * sz);
if (newd != NULL) {
- mutex_lock(&dev->ld_mutex);
+ cfs_mutex_lock(&dev->ld_mutex);
if (sub_size > 0) {
memcpy(newd, dev->ld_target, sub_size * sz);
OBD_FREE(dev->ld_target, sub_size * sz);
if (dev->ld_emrg != NULL)
lov_emerg_free(dev->ld_emrg, sub_size);
dev->ld_emrg = emerg;
- mutex_unlock(&dev->ld_mutex);
+ cfs_mutex_unlock(&dev->ld_mutex);
} else {
lov_emerg_free(emerg, tgt_size);
result = -ENOMEM;
d->ld_ops = &lov_lu_ops;
ld->ld_cl.cd_ops = &lov_cl_ops;
- mutex_init(&ld->ld_mutex);
- lockdep_set_class(&ld->ld_mutex, &cl_lov_device_mutex_class);
+ cfs_mutex_init(&ld->ld_mutex);
+ cfs_lockdep_set_class(&ld->ld_mutex, &cl_lov_device_mutex_class);
/* setup the LOV OBD */
obd = class_name2obd(lustre_cfg_string(cfg, 0));
struct lov_lock_handles {
struct portals_handle llh_handle;
- atomic_t llh_refcount;
+ cfs_atomic_t llh_refcount;
int llh_stripe_count;
struct lustre_handle llh_handles[0];
};
struct obd_info rq_oi;
struct lov_request_set *rq_rqset;
- struct list_head rq_link;
+ cfs_list_t rq_link;
int rq_idx; /* index in lov->tgts array */
int rq_stripe; /* stripe number */
struct lov_request_set {
struct ldlm_enqueue_info*set_ei;
struct obd_info *set_oi;
- atomic_t set_refcount;
+ cfs_atomic_t set_refcount;
struct obd_export *set_exp;
/* XXX: There is @set_exp already, however obd_statfs gets obd_device
only. */
obd_count set_oabufs;
struct brw_page *set_pga;
struct lov_lock_handles *set_lockh;
- struct list_head set_list;
+ cfs_list_t set_list;
cfs_waitq_t set_waitq;
- spinlock_t set_lock;
+ cfs_spinlock_t set_lock;
};
extern cfs_mem_cache_t *lov_oinfo_slab;
static inline void lov_llh_addref(void *llhp)
{
struct lov_lock_handles *llh = llhp;
- atomic_inc(&llh->llh_refcount);
+ cfs_atomic_inc(&llh->llh_refcount);
CDEBUG(D_INFO, "GETting llh %p : new refcount %d\n", llh,
- atomic_read(&llh->llh_refcount));
+ cfs_atomic_read(&llh->llh_refcount));
}
static inline struct lov_lock_handles *lov_llh_new(struct lov_stripe_md *lsm)
sizeof(*llh->llh_handles) * lsm->lsm_stripe_count);
if (llh == NULL)
return NULL;
- atomic_set(&llh->llh_refcount, 2);
+ cfs_atomic_set(&llh->llh_refcount, 2);
llh->llh_stripe_count = lsm->lsm_stripe_count;
CFS_INIT_LIST_HEAD(&llh->llh_handle.h_link);
class_handle_hash(&llh->llh_handle, lov_llh_addref);
static inline void lov_get_reqset(struct lov_request_set *set)
{
LASSERT(set != NULL);
- LASSERT(atomic_read(&set->set_refcount) > 0);
- atomic_inc(&set->set_refcount);
+ LASSERT(cfs_atomic_read(&set->set_refcount) > 0);
+ cfs_atomic_inc(&set->set_refcount);
}
static inline void lov_put_reqset(struct lov_request_set *set)
{
- if (atomic_dec_and_test(&set->set_refcount))
+ if (cfs_atomic_dec_and_test(&set->set_refcount))
lov_finish_set(set);
}
static inline void lov_llh_put(struct lov_lock_handles *llh)
{
CDEBUG(D_INFO, "PUTting llh %p : new refcount %d\n", llh,
- atomic_read(&llh->llh_refcount) - 1);
- LASSERT(atomic_read(&llh->llh_refcount) > 0 &&
- atomic_read(&llh->llh_refcount) < 0x5a5a);
- if (atomic_dec_and_test(&llh->llh_refcount)) {
+ cfs_atomic_read(&llh->llh_refcount) - 1);
+ LASSERT(cfs_atomic_read(&llh->llh_refcount) > 0 &&
+ cfs_atomic_read(&llh->llh_refcount) < 0x5a5a);
+ if (cfs_atomic_dec_and_test(&llh->llh_refcount)) {
class_handle_unhash(&llh->llh_handle);
/* The structure may be held by other threads because RCU.
* -jxiong */
- if (atomic_read(&llh->llh_refcount))
+ if (cfs_atomic_read(&llh->llh_refcount))
return;
OBD_FREE_RCU(llh, sizeof *llh +
sub->sub_borrowed = 0;
if (lio->lis_mem_frozen) {
- LASSERT(mutex_is_locked(&ld->ld_mutex));
+ LASSERT(cfs_mutex_is_locked(&ld->ld_mutex));
sub->sub_io = &ld->ld_emrg[stripe]->emrg_subio;
sub->sub_env = ld->ld_emrg[stripe]->emrg_env;
sub->sub_borrowed = 1;
rc = PTR_ERR(sub);
if (!rc)
- list_add_tail(&sub->sub_linkage, &lio->lis_active);
+ cfs_list_add_tail(&sub->sub_linkage, &lio->lis_active);
else
break;
}
int rc = 0;
ENTRY;
- list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
+ cfs_list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
lov_sub_enter(sub);
rc = iofunc(sub->sub_env, sub->sub_io);
lov_sub_exit(sub);
ENTRY;
rc = lov_io_call(env, lio, lov_io_iter_fini_wrapper);
LASSERT(rc == 0);
- while (!list_empty(&lio->lis_active))
- list_del_init(lio->lis_active.next);
+ while (!cfs_list_empty(&lio->lis_active))
+ cfs_list_del_init(lio->lis_active.next);
EXIT;
}
* In order to not make things worse, even don't try to
* allocate the memory with __GFP_NOWARN. -jay
*/
- mutex_lock(&ld->ld_mutex);
+ cfs_mutex_lock(&ld->ld_mutex);
lio->lis_mem_frozen = 1;
}
struct lov_io_sub *sub;
struct cl_page_list *sub_qin = QIN(stripe);
- if (list_empty(&sub_qin->pl_pages))
+ if (cfs_list_empty(&sub_qin->pl_pages))
continue;
cl_page_list_splice(sub_qin, &cl2q->c2_qin);
for (stripe = 0; stripe < lio->lis_nr_subios; stripe++) {
struct cl_page_list *sub_qin = QIN(stripe);
- if (list_empty(&sub_qin->pl_pages))
+ if (cfs_list_empty(&sub_qin->pl_pages))
continue;
cl_page_list_splice(sub_qin, qin);
lov_io_sub_fini(env, lio, &lio->lis_subs[i]);
}
lio->lis_mem_frozen = 0;
- mutex_unlock(&ld->ld_mutex);
+ cfs_mutex_unlock(&ld->ld_mutex);
}
RETURN(rc);
lck->lls_sub[idx].sub_lock = lsl;
lck->lls_nr_filled++;
LASSERT(lck->lls_nr_filled <= lck->lls_nr);
- list_add_tail(&link->lll_list, &lsl->lss_parents);
+ cfs_list_add_tail(&link->lll_list, &lsl->lss_parents);
link->lll_idx = idx;
link->lll_super = lck;
cl_lock_get(parent);
int result = 0;
ENTRY;
- LASSERT(list_empty(&closure->clc_list));
+ LASSERT(cfs_list_empty(&closure->clc_list));
sublock = lls->sub_lock;
child = sublock->lss_cl.cls_lock;
LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
ENTRY;
- list_del_init(&link->lll_list);
+ cfs_list_del_init(&link->lll_list);
LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
/* yank this sub-lock from parent's array */
lck->lls_sub[link->lll_idx].sub_lock = NULL;
LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
ENTRY;
- list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+ cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) {
if (scan->lll_super == lck)
RETURN(scan);
}
struct cl_lock_closure *closure;
closure = &lov_env_info(env)->lti_closure;
- LASSERT(list_empty(&closure->clc_list));
+ LASSERT(cfs_list_empty(&closure->clc_list));
cl_lock_closure_init(env, closure, parent, 1);
return closure;
}
struct lov_obd *lov = &obd->u.lov;
/* nobody gets through here until lov_putref is done */
- mutex_down(&lov->lov_lock);
- atomic_inc(&lov->lov_refcount);
- mutex_up(&lov->lov_lock);
+ cfs_mutex_down(&lov->lov_lock);
+ cfs_atomic_inc(&lov->lov_refcount);
+ cfs_mutex_up(&lov->lov_lock);
return;
}
{
struct lov_obd *lov = &obd->u.lov;
- mutex_down(&lov->lov_lock);
+ cfs_mutex_down(&lov->lov_lock);
/* ok to dec to 0 more than once -- ltd_exp's will be null */
- if (atomic_dec_and_test(&lov->lov_refcount) && lov->lov_death_row) {
+ if (cfs_atomic_dec_and_test(&lov->lov_refcount) && lov->lov_death_row) {
CFS_LIST_HEAD(kill);
int i;
struct lov_tgt_desc *tgt, *n;
if (!tgt || !tgt->ltd_reap)
continue;
- list_add(&tgt->ltd_kill, &kill);
+ cfs_list_add(&tgt->ltd_kill, &kill);
/* XXX - right now there is a dependency on ld_tgt_count
* being the maximum tgt index for computing the
* mds_max_easize. So we can't shrink it. */
lov->lov_tgts[i] = NULL;
lov->lov_death_row--;
}
- mutex_up(&lov->lov_lock);
+ cfs_mutex_up(&lov->lov_lock);
- list_for_each_entry_safe(tgt, n, &kill, ltd_kill) {
- list_del(&tgt->ltd_kill);
+ cfs_list_for_each_entry_safe(tgt, n, &kill, ltd_kill) {
+ cfs_list_del(&tgt->ltd_kill);
/* Disconnect */
__lov_del_obd(obd, tgt);
}
} else {
- mutex_up(&lov->lov_lock);
+ cfs_mutex_up(&lov->lov_lock);
}
}
if (tgt_obd == NULL)
RETURN(-EINVAL);
- mutex_down(&lov->lov_lock);
+ cfs_mutex_down(&lov->lov_lock);
if ((index < lov->lov_tgt_size) && (lov->lov_tgts[index] != NULL)) {
tgt = lov->lov_tgts[index];
CERROR("UUID %s already assigned at LOV target index %d\n",
obd_uuid2str(&tgt->ltd_uuid), index);
- mutex_up(&lov->lov_lock);
+ cfs_mutex_up(&lov->lov_lock);
RETURN(-EEXIST);
}
newsize = newsize << 1;
OBD_ALLOC(newtgts, sizeof(*newtgts) * newsize);
if (newtgts == NULL) {
- mutex_up(&lov->lov_lock);
+ cfs_mutex_up(&lov->lov_lock);
RETURN(-ENOMEM);
}
OBD_ALLOC_PTR(tgt);
if (!tgt) {
- mutex_up(&lov->lov_lock);
+ cfs_mutex_up(&lov->lov_lock);
RETURN(-ENOMEM);
}
rc = lov_ost_pool_add(&lov->lov_packed, index, lov->lov_tgt_size);
if (rc) {
- mutex_up(&lov->lov_lock);
+ cfs_mutex_up(&lov->lov_lock);
OBD_FREE_PTR(tgt);
RETURN(rc);
}
if (index >= lov->desc.ld_tgt_count)
lov->desc.ld_tgt_count = index + 1;
- mutex_up(&lov->lov_lock);
+ cfs_mutex_up(&lov->lov_lock);
CDEBUG(D_CONFIG, "idx=%d ltd_gen=%d ld_tgt_count=%d\n",
index, tgt->ltd_gen, lov->desc.ld_tgt_count);
lov->desc = *desc;
lov->lov_tgt_size = 0;
- sema_init(&lov->lov_lock, 1);
- atomic_set(&lov->lov_refcount, 0);
+ cfs_sema_init(&lov->lov_lock, 1);
+ cfs_atomic_set(&lov->lov_refcount, 0);
CFS_INIT_LIST_HEAD(&lov->lov_qos.lq_oss_list);
- init_rwsem(&lov->lov_qos.lq_rw_sem);
+ cfs_init_rwsem(&lov->lov_qos.lq_rw_sem);
lov->lov_sp_me = LUSTRE_SP_CLI;
lov->lov_qos.lq_dirty = 1;
lov->lov_qos.lq_rr.lqr_dirty = 1;
static int lov_cleanup(struct obd_device *obd)
{
struct lov_obd *lov = &obd->u.lov;
- struct list_head *pos, *tmp;
+ cfs_list_t *pos, *tmp;
struct pool_desc *pool;
- list_for_each_safe(pos, tmp, &lov->lov_pool_list) {
- pool = list_entry(pos, struct pool_desc, pool_list);
+ cfs_list_for_each_safe(pos, tmp, &lov->lov_pool_list) {
+ pool = cfs_list_entry(pos, struct pool_desc, pool_list);
/* free pool structs */
CDEBUG(D_INFO, "delete pool %p\n", pool);
lov_pool_del(obd, pool->pool_name);
/* Inactive targets may never have connected */
if (lov->lov_tgts[i]->ltd_active ||
- atomic_read(&lov->lov_refcount))
+ cfs_atomic_read(&lov->lov_refcount))
/* We should never get here - these
should have been removed in the
disconnect. */
CERROR("lov tgt %d not cleaned!"
" deathrow=%d, lovrc=%d\n",
i, lov->lov_death_row,
- atomic_read(&lov->lov_refcount));
+ cfs_atomic_read(&lov->lov_refcount));
lov_del_target(obd, i, 0, 0);
}
obd_putref(obd);
}
#ifndef log2
-#define log2(n) ffz(~(n))
+#define log2(n) cfs_ffz(~(n))
#endif
static int lov_clear_orphans(struct obd_export *export, struct obdo *src_oa,
* later in alloc_qos(), we will wait for those rpcs to complete if
* the osfs age is older than 2 * qos_maxage */
qos_statfs_update(exp->exp_obd,
- cfs_time_shift_64(-lov->desc.ld_qos_maxage) + HZ, 0);
+ cfs_time_shift_64(-lov->desc.ld_qos_maxage) + CFS_HZ,
+ 0);
rc = lov_prep_create_set(exp, &oinfo, ea, src_oa, oti, &set);
if (rc)
GOTO(out, rc);
- list_for_each_entry(req, &set->set_list, rq_link) {
+ cfs_list_for_each_entry(req, &set->set_list, rq_link) {
/* XXX: LOV STACKING: use real "obj_mdp" sub-data */
rc = obd_create_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
&req->rq_oi, &req->rq_oi.oi_md, oti);
struct lov_request_set *set;
struct obd_info oinfo;
struct lov_request *req;
- struct list_head *pos;
+ cfs_list_t *pos;
struct lov_obd *lov;
int rc = 0, err = 0;
ENTRY;
if (rc)
GOTO(out, rc);
- list_for_each (pos, &set->set_list) {
- req = list_entry(pos, struct lov_request, rq_link);
+ cfs_list_for_each (pos, &set->set_list) {
+ req = cfs_list_entry(pos, struct lov_request, rq_link);
if (oa->o_valid & OBD_MD_FLCOOKIE)
oti->oti_logcookies = set->set_cookies + req->rq_stripe;
{
struct lov_request_set *set;
struct lov_request *req;
- struct list_head *pos;
+ cfs_list_t *pos;
struct lov_obd *lov;
int err = 0, rc = 0;
ENTRY;
if (rc)
RETURN(rc);
- list_for_each (pos, &set->set_list) {
- req = list_entry(pos, struct lov_request, rq_link);
+ cfs_list_for_each (pos, &set->set_list) {
+ req = cfs_list_entry(pos, struct lov_request, rq_link);
CDEBUG(D_INFO, "objid "LPX64"[%d] has subobj "LPX64" at idx "
"%u\n", oinfo->oi_oa->o_id, req->rq_stripe,
{
struct lov_request_set *lovset;
struct lov_obd *lov;
- struct list_head *pos;
+ cfs_list_t *pos;
struct lov_request *req;
int rc = 0, err;
ENTRY;
oinfo->oi_md->lsm_object_id, oinfo->oi_md->lsm_stripe_count,
oinfo->oi_md->lsm_stripe_size);
- list_for_each (pos, &lovset->set_list) {
- req = list_entry(pos, struct lov_request, rq_link);
+ cfs_list_for_each (pos, &lovset->set_list) {
+ req = cfs_list_entry(pos, struct lov_request, rq_link);
CDEBUG(D_INFO, "objid "LPX64"[%d] has subobj "LPX64" at idx "
"%u\n", oinfo->oi_oa->o_id, req->rq_stripe,
}
}
- if (!list_empty(&rqset->set_requests)) {
+ if (!cfs_list_empty(&rqset->set_requests)) {
LASSERT(rc == 0);
LASSERT (rqset->set_interpret == NULL);
rqset->set_interpret = lov_getattr_interpret;
{
struct lov_request_set *set;
struct lov_obd *lov;
- struct list_head *pos;
+ cfs_list_t *pos;
struct lov_request *req;
int err = 0, rc = 0;
ENTRY;
if (rc)
RETURN(rc);
- list_for_each (pos, &set->set_list) {
- req = list_entry(pos, struct lov_request, rq_link);
+ cfs_list_for_each (pos, &set->set_list) {
+ req = cfs_list_entry(pos, struct lov_request, rq_link);
rc = obd_setattr(lov->lov_tgts[req->rq_idx]->ltd_exp,
&req->rq_oi, NULL);
{
struct lov_request_set *set;
struct lov_request *req;
- struct list_head *pos;
+ cfs_list_t *pos;
struct lov_obd *lov;
int rc = 0;
ENTRY;
oinfo->oi_md->lsm_object_id, oinfo->oi_md->lsm_stripe_count,
oinfo->oi_md->lsm_stripe_size);
- list_for_each (pos, &set->set_list) {
- req = list_entry(pos, struct lov_request, rq_link);
+ cfs_list_for_each (pos, &set->set_list) {
+ req = cfs_list_entry(pos, struct lov_request, rq_link);
if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
oti->oti_logcookies = set->set_cookies + req->rq_stripe;
}
/* If we are not waiting for responses on async requests, return. */
- if (rc || !rqset || list_empty(&rqset->set_requests)) {
+ if (rc || !rqset || cfs_list_empty(&rqset->set_requests)) {
int err;
if (rc)
set->set_completes = 0;
{
struct lov_request_set *set;
struct lov_obd *lov;
- struct list_head *pos;
+ cfs_list_t *pos;
struct lov_request *req;
int rc = 0;
ENTRY;
if (rc)
RETURN(rc);
- list_for_each (pos, &set->set_list) {
- req = list_entry(pos, struct lov_request, rq_link);
+ cfs_list_for_each (pos, &set->set_list) {
+ req = cfs_list_entry(pos, struct lov_request, rq_link);
rc = obd_punch(lov->lov_tgts[req->rq_idx]->ltd_exp,
&req->rq_oi, NULL, rqset);
}
}
- if (rc || list_empty(&rqset->set_requests)) {
+ if (rc || cfs_list_empty(&rqset->set_requests)) {
int err;
err = lov_fini_punch_set(set);
RETURN(rc ? rc : err);
struct lov_request_set *set;
struct obd_info oinfo;
struct lov_obd *lov;
- struct list_head *pos;
+ cfs_list_t *pos;
struct lov_request *req;
int err = 0, rc = 0;
ENTRY;
if (rc)
RETURN(rc);
- list_for_each (pos, &set->set_list) {
- req = list_entry(pos, struct lov_request, rq_link);
+ cfs_list_for_each (pos, &set->set_list) {
+ req = cfs_list_entry(pos, struct lov_request, rq_link);
rc = obd_sync(lov->lov_tgts[req->rq_idx]->ltd_exp,
req->rq_oi.oi_oa, NULL,
{
struct lov_request_set *set;
struct lov_request *req;
- struct list_head *pos;
+ cfs_list_t *pos;
struct lov_obd *lov = &exp->exp_obd->u.lov;
int err, rc = 0;
ENTRY;
if (rc)
RETURN(rc);
- list_for_each (pos, &set->set_list) {
+ cfs_list_for_each (pos, &set->set_list) {
struct obd_export *sub_exp;
struct brw_page *sub_pga;
- req = list_entry(pos, struct lov_request, rq_link);
+ req = cfs_list_entry(pos, struct lov_request, rq_link);
sub_exp = lov->lov_tgts[req->rq_idx]->ltd_exp;
sub_pga = set->set_pga + req->rq_pgaidx;
ldlm_mode_t mode = einfo->ei_mode;
struct lov_request_set *set;
struct lov_request *req;
- struct list_head *pos;
+ cfs_list_t *pos;
struct lov_obd *lov;
ldlm_error_t rc;
ENTRY;
if (rc)
RETURN(rc);
- list_for_each (pos, &set->set_list) {
- req = list_entry(pos, struct lov_request, rq_link);
+ cfs_list_for_each (pos, &set->set_list) {
+ req = cfs_list_entry(pos, struct lov_request, rq_link);
rc = obd_enqueue(lov->lov_tgts[req->rq_idx]->ltd_exp,
&req->rq_oi, einfo, rqset);
GOTO(out, rc);
}
- if (rqset && !list_empty(&rqset->set_requests)) {
+ if (rqset && !cfs_list_empty(&rqset->set_requests)) {
LASSERT(rc == 0);
LASSERT(rqset->set_interpret == NULL);
rqset->set_interpret = lov_enqueue_interpret;
struct lov_request_set *set;
struct obd_info oinfo;
struct lov_request *req;
- struct list_head *pos;
+ cfs_list_t *pos;
struct lov_obd *lov;
struct lustre_handle *lov_lockhp;
int err = 0, rc = 0;
if (rc)
RETURN(rc);
- list_for_each (pos, &set->set_list) {
- req = list_entry(pos, struct lov_request, rq_link);
+ cfs_list_for_each (pos, &set->set_list) {
+ req = cfs_list_entry(pos, struct lov_request, rq_link);
lov_lockhp = set->set_lockh->llh_handles + req->rq_stripe;
rc = obd_cancel(lov->lov_tgts[req->rq_idx]->ltd_exp,
{
struct lov_request_set *set;
struct lov_request *req;
- struct list_head *pos;
+ cfs_list_t *pos;
struct lov_obd *lov;
int rc = 0;
ENTRY;
if (rc)
RETURN(rc);
- list_for_each (pos, &set->set_list) {
+ cfs_list_for_each (pos, &set->set_list) {
struct obd_device *osc_obd;
- req = list_entry(pos, struct lov_request, rq_link);
+ req = cfs_list_entry(pos, struct lov_request, rq_link);
osc_obd = class_exp2obd(lov->lov_tgts[req->rq_idx]->ltd_exp);
rc = obd_statfs_async(osc_obd, &req->rq_oi, max_age, rqset);
break;
}
- if (rc || list_empty(&rqset->set_requests)) {
+ if (rc || cfs_list_empty(&rqset->set_requests)) {
int err;
if (rc)
set->set_completes = 0;
/* got statfs data */
rc = obd_statfs(osc_obd, &stat_buf,
- cfs_time_current_64() - HZ, 0);
+ cfs_time_current_64() - CFS_HZ, 0);
if (rc)
RETURN(rc);
- if (copy_to_user(data->ioc_pbuf1, &stat_buf, data->ioc_plen1))
+ if (cfs_copy_to_user(data->ioc_pbuf1, &stat_buf,
+ data->ioc_plen1))
RETURN(-EFAULT);
/* copy UUID */
- if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(osc_obd),
- data->ioc_plen2))
+ if (cfs_copy_to_user(data->ioc_pbuf2, obd2cli_tgt(osc_obd),
+ data->ioc_plen2))
RETURN(-EFAULT);
break;
}
*genp = lov->lov_tgts[i]->ltd_gen;
}
- if (copy_to_user((void *)uarg, buf, len))
+ if (cfs_copy_to_user((void *)uarg, buf, len))
rc = -EFAULT;
obd_ioctl_freedata(buf, len);
break;
void lov_stripe_lock(struct lov_stripe_md *md)
{
LASSERT(md->lsm_lock_owner != cfs_curproc_pid());
- spin_lock(&md->lsm_lock);
+ cfs_spin_lock(&md->lsm_lock);
LASSERT(md->lsm_lock_owner == 0);
md->lsm_lock_owner = cfs_curproc_pid();
}
{
LASSERT(md->lsm_lock_owner == cfs_curproc_pid());
md->lsm_lock_owner = 0;
- spin_unlock(&md->lsm_lock);
+ cfs_spin_unlock(&md->lsm_lock);
}
EXPORT_SYMBOL(lov_stripe_unlock);
lov_oinfo_slab = cfs_mem_cache_create("lov_oinfo",
sizeof(struct lov_oinfo),
- 0, SLAB_HWCACHE_ALIGN);
+ 0, CFS_SLAB_HWCACHE_ALIGN);
if (lov_oinfo_slab == NULL) {
lu_kmem_fini(lov_caches);
return -ENOMEM;
}
lprocfs_lov_init_vars(&lvars);
- request_module("lquota");
+ cfs_request_module("lquota");
quota_interface = PORTAL_SYMBOL_GET(lov_quota_interface);
init_obd_quota_ops(quota_interface, &lov_obd_ops);
waiter = &lov_env_info(env)->lti_waiter;
cfs_waitlink_init(waiter);
cfs_waitq_add(&site->ls_marche_funebre, waiter);
- set_current_state(CFS_TASK_UNINT);
+ cfs_set_current_state(CFS_TASK_UNINT);
while (r0->lo_sub[idx] == los)
/* this wait-queue is signaled at the end of
\
__lock &= __obj->lo_owner != cfs_current(); \
if (__lock) \
- down_read(&__obj->lo_type_guard); \
+ cfs_down_read(&__obj->lo_type_guard); \
__result = LOV_2DISPATCH_NOLOCK(obj, op, __VA_ARGS__); \
if (__lock) \
- up_read(&__obj->lo_type_guard); \
+ cfs_up_read(&__obj->lo_type_guard); \
__result; \
})
enum lov_layout_type __llt; \
\
if (__obj->lo_owner != cfs_current()) \
- down_read(&__obj->lo_type_guard); \
+ cfs_down_read(&__obj->lo_type_guard); \
__llt = __obj->lo_type; \
LASSERT(0 <= __llt && __llt < ARRAY_SIZE(lov_dispatch)); \
lov_dispatch[__llt].op(__VA_ARGS__); \
if (__obj->lo_owner != cfs_current()) \
- up_read(&__obj->lo_type_guard); \
+ cfs_up_read(&__obj->lo_type_guard); \
} while (0)
static int lov_layout_change(const struct lu_env *env,
cl_env_reexit(cookie);
old_ops->llo_fini(env, obj, &obj->u);
- LASSERT(list_empty(&hdr->coh_locks));
+ LASSERT(cfs_list_empty(&hdr->coh_locks));
LASSERT(hdr->coh_tree.rnode == NULL);
LASSERT(hdr->coh_pages == 0);
int result;
ENTRY;
- init_rwsem(&lov->lo_type_guard);
+ cfs_init_rwsem(&lov->lo_type_guard);
/* no locking is necessary, as object is being created */
lov->lo_type = cconf->u.coc_md->lsm != NULL ? LLT_RAID0 : LLT_EMPTY;
* Currently only LLT_EMPTY -> LLT_RAID0 transition is supported.
*/
LASSERT(lov->lo_owner != cfs_current());
- down_write(&lov->lo_type_guard);
+ cfs_down_write(&lov->lo_type_guard);
LASSERT(lov->lo_owner == NULL);
lov->lo_owner = cfs_current();
if (lov->lo_type == LLT_EMPTY && conf->u.coc_md->lsm != NULL)
else
result = -EOPNOTSUPP;
lov->lo_owner = NULL;
- up_write(&lov->lo_type_guard);
+ cfs_up_write(&lov->lo_type_guard);
RETURN(result);
}
RETURN(-ENOMEM);
}
- spin_lock_init(&(*lsmp)->lsm_lock);
+ cfs_spin_lock_init(&(*lsmp)->lsm_lock);
(*lsmp)->lsm_magic = magic;
(*lsmp)->lsm_stripe_count = stripe_count;
(*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES * stripe_count;
int rc;
ENTRY;
- if (copy_from_user(&lumv3, lump, sizeof(struct lov_user_md_v1)))
+ if (cfs_copy_from_user(&lumv3, lump, sizeof(struct lov_user_md_v1)))
RETURN(-EFAULT);
lmm_magic = lumv1->lmm_magic;
lustre_swab_lov_user_md_v1(lumv1);
lmm_magic = LOV_USER_MAGIC_V1;
} else if (lmm_magic == LOV_USER_MAGIC_V3) {
- if (copy_from_user(&lumv3, lump, sizeof(lumv3)))
+ if (cfs_copy_from_user(&lumv3, lump, sizeof(lumv3)))
RETURN(-EFAULT);
} else if (lmm_magic == __swab32(LOV_USER_MAGIC_V3)) {
- if (copy_from_user(&lumv3, lump, sizeof(lumv3)))
+ if (cfs_copy_from_user(&lumv3, lump, sizeof(lumv3)))
RETURN(-EFAULT);
lustre_swab_lov_user_md_v3(&lumv3);
lmm_magic = LOV_USER_MAGIC_V3;
/* we only need the header part from user space to get lmm_magic and
* lmm_stripe_count, (the header part is common to v1 and v3) */
lum_size = sizeof(struct lov_user_md_v1);
- if (copy_from_user(&lum, lump, lum_size))
+ if (cfs_copy_from_user(&lum, lump, lum_size))
GOTO(out_set, rc = -EFAULT);
else if ((lum.lmm_magic != LOV_USER_MAGIC) &&
(lum.lmm_magic != LOV_USER_MAGIC_V3))
(lum.lmm_stripe_count < lsm->lsm_stripe_count)) {
/* Return right size of stripe to user */
lum.lmm_stripe_count = lsm->lsm_stripe_count;
- rc = copy_to_user(lump, &lum, lum_size);
+ rc = cfs_copy_to_user(lump, &lum, lum_size);
GOTO(out_set, rc = -EOVERFLOW);
}
rc = lov_packmd(exp, &lmmk, lsm);
lum.lmm_stripe_count = lmmk->lmm_stripe_count;
((struct lov_user_md*)lmmk)->lmm_stripe_offset = 0;
((struct lov_user_md*)lmmk)->lmm_stripe_count = lum.lmm_stripe_count;
- if (copy_to_user(lump, lmmk, lmm_size))
+ if (cfs_copy_to_user(lump, lmmk, lmm_size))
rc = -EFAULT;
obd_free_diskmd(exp, &lmmk);
static void lov_pool_getref(struct pool_desc *pool)
{
CDEBUG(D_INFO, "pool %p\n", pool);
- atomic_inc(&pool->pool_refcount);
+ cfs_atomic_inc(&pool->pool_refcount);
}
void lov_pool_putref(struct pool_desc *pool)
{
CDEBUG(D_INFO, "pool %p\n", pool);
- if (atomic_dec_and_test(&pool->pool_refcount)) {
- LASSERT(hlist_unhashed(&pool->pool_hash));
- LASSERT(list_empty(&pool->pool_list));
+ if (cfs_atomic_dec_and_test(&pool->pool_refcount)) {
+ LASSERT(cfs_hlist_unhashed(&pool->pool_hash));
+ LASSERT(cfs_list_empty(&pool->pool_list));
LASSERT(pool->pool_proc_entry == NULL);
lov_ost_pool_free(&(pool->pool_rr.lqr_pool));
lov_ost_pool_free(&(pool->pool_obds));
return (result % mask);
}
-static void *pool_key(struct hlist_node *hnode)
+static void *pool_key(cfs_hlist_node_t *hnode)
{
struct pool_desc *pool;
- pool = hlist_entry(hnode, struct pool_desc, pool_hash);
+ pool = cfs_hlist_entry(hnode, struct pool_desc, pool_hash);
return (pool->pool_name);
}
-static int pool_hashkey_compare(void *key, struct hlist_node *compared_hnode)
+static int pool_hashkey_compare(void *key, cfs_hlist_node_t *compared_hnode)
{
char *pool_name;
struct pool_desc *pool;
int rc;
pool_name = (char *)key;
- pool = hlist_entry(compared_hnode, struct pool_desc, pool_hash);
+ pool = cfs_hlist_entry(compared_hnode, struct pool_desc, pool_hash);
rc = strncmp(pool_name, pool->pool_name, LOV_MAXPOOLNAME);
return (!rc);
}
-static void *pool_hashrefcount_get(struct hlist_node *hnode)
+static void *pool_hashrefcount_get(cfs_hlist_node_t *hnode)
{
struct pool_desc *pool;
- pool = hlist_entry(hnode, struct pool_desc, pool_hash);
+ pool = cfs_hlist_entry(hnode, struct pool_desc, pool_hash);
lov_pool_getref(pool);
return (pool);
}
-static void *pool_hashrefcount_put(struct hlist_node *hnode)
+static void *pool_hashrefcount_put(cfs_hlist_node_t *hnode)
{
struct pool_desc *pool;
- pool = hlist_entry(hnode, struct pool_desc, pool_hash);
+ pool = cfs_hlist_entry(hnode, struct pool_desc, pool_hash);
lov_pool_putref(pool);
return (pool);
}
/* iterate to find a non empty entry */
prev_idx = iter->idx;
- down_read(&pool_tgt_rw_sem(iter->pool));
+ cfs_down_read(&pool_tgt_rw_sem(iter->pool));
iter->idx++;
if (iter->idx == pool_tgt_count(iter->pool)) {
iter->idx = prev_idx; /* we stay on the last entry */
- up_read(&pool_tgt_rw_sem(iter->pool));
+ cfs_up_read(&pool_tgt_rw_sem(iter->pool));
return NULL;
}
- up_read(&pool_tgt_rw_sem(iter->pool));
+ cfs_up_read(&pool_tgt_rw_sem(iter->pool));
(*pos)++;
/* return != NULL to continue */
return iter;
LASSERT(iter->pool != NULL);
LASSERT(iter->idx <= pool_tgt_count(iter->pool));
- down_read(&pool_tgt_rw_sem(iter->pool));
+ cfs_down_read(&pool_tgt_rw_sem(iter->pool));
tgt = pool_tgt(iter->pool, iter->idx);
- up_read(&pool_tgt_rw_sem(iter->pool));
+ cfs_up_read(&pool_tgt_rw_sem(iter->pool));
if (tgt)
seq_printf(s, "%s\n", obd_uuid2str(&(tgt->ltd_uuid)));
CDEBUG(level, "pool "LOV_POOLNAMEF" has %d members\n",
pool->pool_name, pool->pool_obds.op_count);
- down_read(&pool_tgt_rw_sem(pool));
+ cfs_down_read(&pool_tgt_rw_sem(pool));
for (i = 0; i < pool_tgt_count(pool) ; i++) {
if (!pool_tgt(pool, i) || !(pool_tgt(pool, i))->ltd_exp)
obd_uuid2str(&((pool_tgt(pool, i))->ltd_uuid)));
}
- up_read(&pool_tgt_rw_sem(pool));
+ cfs_up_read(&pool_tgt_rw_sem(pool));
lov_pool_putref(pool);
}
count = LOV_POOL_INIT_COUNT;
op->op_array = NULL;
op->op_count = 0;
- init_rwsem(&op->op_rw_sem);
+ cfs_init_rwsem(&op->op_rw_sem);
op->op_size = count;
OBD_ALLOC(op->op_array, op->op_size * sizeof(op->op_array[0]));
if (op->op_array == NULL) {
int rc = 0, i;
ENTRY;
- down_write(&op->op_rw_sem);
+ cfs_down_write(&op->op_rw_sem);
rc = lov_ost_pool_extend(op, min_count);
if (rc)
op->op_count++;
EXIT;
out:
- up_write(&op->op_rw_sem);
+ cfs_up_write(&op->op_rw_sem);
return rc;
}
int i;
ENTRY;
- down_write(&op->op_rw_sem);
+ cfs_down_write(&op->op_rw_sem);
for (i = 0; i < op->op_count; i++) {
if (op->op_array[i] == idx) {
memmove(&op->op_array[i], &op->op_array[i + 1],
(op->op_count - i - 1) * sizeof(op->op_array[0]));
op->op_count--;
- up_write(&op->op_rw_sem);
+ cfs_up_write(&op->op_rw_sem);
EXIT;
return 0;
}
}
- up_write(&op->op_rw_sem);
+ cfs_up_write(&op->op_rw_sem);
RETURN(-EINVAL);
}
if (op->op_size == 0)
RETURN(0);
- down_write(&op->op_rw_sem);
+ cfs_down_write(&op->op_rw_sem);
OBD_FREE(op->op_array, op->op_size * sizeof(op->op_array[0]));
op->op_array = NULL;
op->op_count = 0;
op->op_size = 0;
- up_write(&op->op_rw_sem);
+ cfs_up_write(&op->op_rw_sem);
RETURN(0);
}
/* ref count init to 1 because when created a pool is always used
* up to deletion
*/
- atomic_set(&new_pool->pool_refcount, 1);
+ cfs_atomic_set(&new_pool->pool_refcount, 1);
rc = lov_ost_pool_init(&new_pool->pool_obds, 0);
if (rc)
GOTO(out_err, rc);
if (rc)
GOTO(out_free_pool_obds, rc);
- INIT_HLIST_NODE(&new_pool->pool_hash);
+ CFS_INIT_HLIST_NODE(&new_pool->pool_hash);
#ifdef LPROCFS
/* we need this assert seq_file is not implementated for liblustre */
CDEBUG(D_INFO, "pool %p - proc %p\n", new_pool, new_pool->pool_proc_entry);
#endif
- spin_lock(&obd->obd_dev_lock);
- list_add_tail(&new_pool->pool_list, &lov->lov_pool_list);
+ cfs_spin_lock(&obd->obd_dev_lock);
+ cfs_list_add_tail(&new_pool->pool_list, &lov->lov_pool_list);
lov->lov_pool_count++;
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
/* add to find only when it fully ready */
rc = cfs_hash_add_unique(lov->lov_pools_hash_body, poolname,
RETURN(0);
out_err:
- spin_lock(&obd->obd_dev_lock);
- list_del_init(&new_pool->pool_list);
+ cfs_spin_lock(&obd->obd_dev_lock);
+ cfs_list_del_init(&new_pool->pool_list);
lov->lov_pool_count--;
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
lprocfs_remove(&new_pool->pool_proc_entry);
lov_pool_putref(pool);
}
- spin_lock(&obd->obd_dev_lock);
- list_del_init(&pool->pool_list);
+ cfs_spin_lock(&obd->obd_dev_lock);
+ cfs_list_del_init(&pool->pool_list);
lov->lov_pool_count--;
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
/* release last reference */
lov_pool_putref(pool);
*/
lov_pool_getref(pool);
- down_read(&pool_tgt_rw_sem(pool));
+ cfs_down_read(&pool_tgt_rw_sem(pool));
for (i = 0; i < pool_tgt_count(pool); i++) {
if (pool_tgt_array(pool)[i] == idx)
rc = -ENOENT;
EXIT;
out:
- up_read(&pool_tgt_rw_sem(pool));
+ cfs_up_read(&pool_tgt_rw_sem(pool));
lov_pool_putref(pool);
return rc;
RETURN(-ENOTCONN);
}
- down_write(&lov->lov_qos.lq_rw_sem);
- mutex_down(&lov->lov_lock);
- list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
+ cfs_mutex_down(&lov->lov_lock);
+ cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
if (obd_uuid_equals(&oss->lqo_uuid,
&exp->exp_connection->c_remote_uuid)) {
found++;
sizeof(oss->lqo_uuid));
} else {
/* Assume we have to move this one */
- list_del(&oss->lqo_oss_list);
+ cfs_list_del(&oss->lqo_oss_list);
}
oss->lqo_ost_count++;
/* Add sorted by # of OSTs. Find the first entry that we're
bigger than... */
- list_for_each_entry(temposs, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+ cfs_list_for_each_entry(temposs, &lov->lov_qos.lq_oss_list,
+ lqo_oss_list) {
if (oss->lqo_ost_count > temposs->lqo_ost_count)
break;
}
/* ...and add before it. If we're the first or smallest, temposs
points to the list head, and we add to the end. */
- list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list);
+ cfs_list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list);
lov->lov_qos.lq_dirty = 1;
lov->lov_qos.lq_rr.lqr_dirty = 1;
oss->lqo_ost_count);
out:
- mutex_up(&lov->lov_lock);
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_mutex_up(&lov->lov_lock);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
RETURN(rc);
}
int rc = 0;
ENTRY;
- down_write(&lov->lov_qos.lq_rw_sem);
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
oss = tgt->ltd_qos.ltq_oss;
if (!oss)
if (oss->lqo_ost_count == 0) {
CDEBUG(D_QOS, "removing OSS %s\n",
obd_uuid2str(&oss->lqo_uuid));
- list_del(&oss->lqo_oss_list);
+ cfs_list_del(&oss->lqo_oss_list);
OBD_FREE_PTR(oss);
}
lov->lov_qos.lq_dirty = 1;
lov->lov_qos.lq_rr.lqr_dirty = 1;
out:
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
RETURN(rc);
}
GOTO(out, rc = -EAGAIN);
/* find bavail on each OSS */
- list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+ cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
oss->lqo_bavail = 0;
}
lov->lov_qos.lq_active_oss_count = 0;
}
/* Per-OSS penalty is prio * oss_avail / oss_osts / (num_oss - 1) / 2 */
- list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+ cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
temp = oss->lqo_bavail >> 1;
do_div(temp, oss->lqo_ost_count * num_active);
oss->lqo_penalty_per_obj = (temp * prio_wide) >> 8;
lov->lov_qos.lq_active_oss_count;
/* Decrease all OSS penalties */
- list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+ cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
if (oss->lqo_penalty < oss->lqo_penalty_per_obj)
oss->lqo_penalty = 0;
else
}
/* Do actual allocation. */
- down_write(&lov->lov_qos.lq_rw_sem);
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
/*
* Check again. While we were sleeping on @lq_rw_sem something could
*/
if (!lqr->lqr_dirty) {
LASSERT(lqr->lqr_pool.op_size);
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
RETURN(0);
}
lqr->lqr_pool.op_count = real_count;
rc = lov_ost_pool_extend(&lqr->lqr_pool, real_count);
if (rc) {
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
RETURN(rc);
}
for (i = 0; i < lqr->lqr_pool.op_count; i++)
/* Place all the OSTs from 1 OSS at the same time. */
placed = 0;
- list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+ cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
int j = 0;
for (i = 0; i < lqr->lqr_pool.op_count; i++) {
if (lov->lov_tgts[src_pool->op_array[i]] &&
}
lqr->lqr_dirty = 0;
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
if (placed != real_count) {
/* This should never happen */
osts = &(lov->lov_packed);
lqr = &(lov->lov_qos.lq_rr);
} else {
- down_read(&pool_tgt_rw_sem(pool));
+ cfs_down_read(&pool_tgt_rw_sem(pool));
osts = &(pool->pool_obds);
lqr = &(pool->pool_rr);
}
if (*stripe_cnt > 1 && (osts->op_count % (*stripe_cnt)) != 1)
++lqr->lqr_offset_idx;
}
- down_read(&lov->lov_qos.lq_rw_sem);
+ cfs_down_read(&lov->lov_qos.lq_rw_sem);
ost_start_idx_temp = lqr->lqr_start_idx;
repeat_find:
goto repeat_find;
}
- up_read(&lov->lov_qos.lq_rw_sem);
+ cfs_up_read(&lov->lov_qos.lq_rw_sem);
*stripe_cnt = idx_pos - idx_arr;
out:
if (pool != NULL) {
- up_read(&pool_tgt_rw_sem(pool));
+ cfs_up_read(&pool_tgt_rw_sem(pool));
/* put back ref got by lov_find_pool() */
lov_pool_putref(pool);
}
if (pool == NULL) {
osts = &(lov->lov_packed);
} else {
- down_read(&pool_tgt_rw_sem(pool));
+ cfs_down_read(&pool_tgt_rw_sem(pool));
osts = &(pool->pool_obds);
}
rc = -EFBIG;
out:
if (pool != NULL) {
- up_read(&pool_tgt_rw_sem(pool));
+ cfs_up_read(&pool_tgt_rw_sem(pool));
/* put back ref got by lov_find_pool() */
lov_pool_putref(pool);
}
osts = &(lov->lov_packed);
lqr = &(lov->lov_qos.lq_rr);
} else {
- down_read(&pool_tgt_rw_sem(pool));
+ cfs_down_read(&pool_tgt_rw_sem(pool));
osts = &(pool->pool_obds);
lqr = &(pool->pool_rr);
}
GOTO(out_nolock, rc = -EAGAIN);
/* Do actual allocation, use write lock here. */
- down_write(&lov->lov_qos.lq_rw_sem);
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
/*
* Check again, while we were sleeping on @lq_rw_sem things could
LASSERT(nfound == *stripe_cnt);
out:
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
out_nolock:
if (pool != NULL) {
- up_read(&pool_tgt_rw_sem(pool));
+ cfs_up_read(&pool_tgt_rw_sem(pool));
/* put back ref got by lov_find_pool() */
lov_pool_putref(pool);
}
void qos_statfs_done(struct lov_obd *lov)
{
LASSERT(lov->lov_qos.lq_statfs_in_progress);
- down_write(&lov->lov_qos.lq_rw_sem);
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
lov->lov_qos.lq_statfs_in_progress = 0;
/* wake up any threads waiting for the statfs rpcs to complete */
cfs_waitq_signal(&lov->lov_qos.lq_statfs_waitq);
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
}
static int qos_statfs_ready(struct obd_device *obd, __u64 max_age)
struct lov_obd *lov = &obd->u.lov;
int rc;
ENTRY;
- down_read(&lov->lov_qos.lq_rw_sem);
+ cfs_down_read(&lov->lov_qos.lq_rw_sem);
rc = lov->lov_qos.lq_statfs_in_progress == 0 ||
cfs_time_beforeq_64(max_age, obd->obd_osfs_age);
- up_read(&lov->lov_qos.lq_rw_sem);
+ cfs_up_read(&lov->lov_qos.lq_rw_sem);
RETURN(rc);
}
/* statfs already in progress */
RETURN_EXIT;
- down_write(&lov->lov_qos.lq_rw_sem);
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
if (lov->lov_qos.lq_statfs_in_progress) {
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
GOTO(out, rc = 0);
}
/* no statfs in flight, send rpcs */
lov->lov_qos.lq_statfs_in_progress = 1;
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
if (wait)
CDEBUG(D_QOS, "%s: did not manage to get fresh statfs data "
GOTO(out_failed, rc = -ENOMEM);
rc = obd_statfs_async(obd, oinfo, max_age, set);
- if (rc || list_empty(&set->set_requests)) {
+ if (rc || cfs_list_empty(&set->set_requests)) {
if (rc)
CWARN("statfs failed with %d\n", rc);
GOTO(out_failed, rc);
GOTO(out, rc);
out_failed:
- down_write(&lov->lov_qos.lq_rw_sem);
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
lov->lov_qos.lq_statfs_in_progress = 0;
/* wake up any threads waiting for the statfs rpcs to complete */
cfs_waitq_signal(&lov->lov_qos.lq_statfs_waitq);
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
wait = 0;
out:
if (set)
set->set_success = 0;
set->set_cookies = 0;
CFS_INIT_LIST_HEAD(&set->set_list);
- atomic_set(&set->set_refcount, 1);
+ cfs_atomic_set(&set->set_refcount, 1);
cfs_waitq_init(&set->set_waitq);
- spin_lock_init(&set->set_lock);
+ cfs_spin_lock_init(&set->set_lock);
}
void lov_finish_set(struct lov_request_set *set)
{
- struct list_head *pos, *n;
+ cfs_list_t *pos, *n;
ENTRY;
LASSERT(set);
- list_for_each_safe(pos, n, &set->set_list) {
- struct lov_request *req = list_entry(pos, struct lov_request,
- rq_link);
- list_del_init(&req->rq_link);
+ cfs_list_for_each_safe(pos, n, &set->set_list) {
+ struct lov_request *req = cfs_list_entry(pos,
+ struct lov_request,
+ rq_link);
+ cfs_list_del_init(&req->rq_link);
if (req->rq_oi.oi_oa)
OBDO_FREE(req->rq_oi.oi_oa);
void lov_set_add_req(struct lov_request *req, struct lov_request_set *set)
{
- list_add_tail(&req->rq_link, &set->set_list);
+ cfs_list_add_tail(&req->rq_link, &set->set_list);
set->set_count++;
req->rq_rqset = set;
}
RETURN(0);
/* cancel enqueued/matched locks */
- list_for_each_entry(req, &set->set_list, rq_link) {
+ cfs_list_for_each_entry(req, &set->set_list, rq_link) {
struct lustre_handle *lov_lockhp;
if (!req->rq_complete || req->rq_rc)
/* try alloc objects on other osts if osc_create fails for
* exceptions: RPC failure, ENOSPC, etc */
if (set->set_count != set->set_success) {
- list_for_each_entry (req, &set->set_list, rq_link) {
+ cfs_list_for_each_entry (req, &set->set_list, rq_link) {
if (req->rq_rc == 0)
continue;
if (ret_oa == NULL)
GOTO(cleanup, rc = -ENOMEM);
- list_for_each_entry(req, &set->set_list, rq_link) {
+ cfs_list_for_each_entry(req, &set->set_list, rq_link) {
if (!req->rq_complete || req->rq_rc)
continue;
lov_merge_attrs(ret_oa, req->rq_oi.oi_oa,
GOTO(done, rc = 0);
cleanup:
- list_for_each_entry(req, &set->set_list, rq_link) {
+ cfs_list_for_each_entry(req, &set->set_list, rq_link) {
struct obd_export *sub_exp;
int err = 0;
}
}
- spin_lock(&set->set_lock);
+ cfs_spin_lock(&set->set_lock);
req->rq_stripe = set->set_success;
loi = lsm->lsm_oinfo[req->rq_stripe];
if (rc) {
lov_update_set(set, req, rc);
- spin_unlock(&set->set_lock);
+ cfs_spin_unlock(&set->set_lock);
RETURN(rc);
}
set->set_cookie_sent++;
lov_update_set(set, req, rc);
- spin_unlock(&set->set_lock);
+ cfs_spin_unlock(&set->set_lock);
CDEBUG(D_INODE, "objid "LPX64" has subobj "LPX64"/"LPU64" at idx %d\n",
lsm->lsm_object_id, loi->loi_id, loi->loi_id, req->rq_idx);
static int common_attr_done(struct lov_request_set *set)
{
- struct list_head *pos;
+ cfs_list_t *pos;
struct lov_request *req;
struct obdo *tmp_oa;
int rc = 0, attrset = 0;
if (tmp_oa == NULL)
GOTO(out, rc = -ENOMEM);
- list_for_each (pos, &set->set_list) {
- req = list_entry(pos, struct lov_request, rq_link);
+ cfs_list_for_each (pos, &set->set_list) {
+ req = cfs_list_entry(pos, struct lov_request, rq_link);
if (!req->rq_complete || req->rq_rc)
continue;
{
struct lov_stripe_md *lsm = set->set_oi->oi_md;
struct lov_oinfo *loi = NULL;
- struct list_head *pos;
+ cfs_list_t *pos;
struct lov_request *req;
ENTRY;
- list_for_each (pos, &set->set_list) {
- req = list_entry(pos, struct lov_request, rq_link);
+ cfs_list_for_each (pos, &set->set_list) {
+ req = cfs_list_entry(pos, struct lov_request, rq_link);
if (!req->rq_complete || req->rq_rc)
continue;
if (osfs->os_ffree != LOV_U64_MAX)
do_div(osfs->os_ffree, expected_stripes);
- spin_lock(&obd->obd_osfs_lock);
+ cfs_spin_lock(&obd->obd_osfs_lock);
memcpy(&obd->obd_osfs, osfs, sizeof(*osfs));
obd->obd_osfs_age = cfs_time_current_64();
- spin_unlock(&obd->obd_osfs_lock);
+ cfs_spin_unlock(&obd->obd_osfs_lock);
RETURN(0);
}
lov_update_set(lovreq->rq_rqset, lovreq, rc);
if (rc)
GOTO(out, rc);
-
+
obd_getref(lovobd);
tgt = lov->lov_tgts[lovreq->rq_idx];
if (!tgt || !tgt->ltd_active)
GOTO(out_update, rc);
tgtobd = class_exp2obd(tgt->ltd_exp);
- spin_lock(&tgtobd->obd_osfs_lock);
+ cfs_spin_lock(&tgtobd->obd_osfs_lock);
memcpy(&tgtobd->obd_osfs, lov_sfs, sizeof(*lov_sfs));
if ((oinfo->oi_flags & OBD_STATFS_FROM_CACHE) == 0)
tgtobd->obd_osfs_age = cfs_time_current_64();
- spin_unlock(&tgtobd->obd_osfs_lock);
+ cfs_spin_unlock(&tgtobd->obd_osfs_lock);
out_update:
lov_update_statfs(osfs, lov_sfs, success);
ENTRY;
lsl = cl2lovsub_lock(slice);
- LASSERT(list_empty(&lsl->lss_parents));
+ LASSERT(cfs_list_empty(&lsl->lss_parents));
OBD_SLAB_FREE_PTR(lsl, lovsub_lock_kmem);
EXIT;
}
LASSERT(cl_lock_is_mutexed(slice->cls_lock));
ENTRY;
- list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+ cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) {
struct lov_lock *lov = scan->lll_super;
struct cl_lock *parent = lov->lls_cl.cls_lock;
LASSERT(cl_lock_is_mutexed(slice->cls_lock));
- if (!list_empty(&lock->lss_parents)) {
+ if (!cfs_list_empty(&lock->lss_parents)) {
/*
* It is not clear whether all parents have to be asked and
* their estimations summed, or it is enough to ask one. For
LASSERT(cl_lock_mode_match(d->cld_mode,
s->cls_lock->cll_descr.cld_mode));
- list_for_each_entry(scan, &lock->lss_parents, lll_list) {
+ cfs_list_for_each_entry(scan, &lock->lss_parents, lll_list) {
int rc;
lov = scan->lll_super;
sub = cl2lovsub_lock(slice);
result = 0;
- list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+ cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) {
parent = scan->lll_super->lls_cl.cls_lock;
result = cl_lock_closure_build(env, parent, closure);
if (result != 0)
struct lov_lock_sub *subdata;
restart = 0;
- list_for_each_entry_safe(scan, temp,
- &sub->lss_parents, lll_list) {
+ cfs_list_for_each_entry_safe(scan, temp,
+ &sub->lss_parents, lll_list) {
lov = scan->lll_super;
subdata = &lov->lls_sub[scan->lll_idx];
lovsub_parent_lock(env, lov);
struct lov_lock *lov;
struct lov_lock_link *scan;
- list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+ cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) {
lov = scan->lll_super;
(*p)(env, cookie, "[%d %p ", scan->lll_idx, lov);
if (lov != NULL)
static struct fsfilt_operations *fsfilt_search_type(const char *type)
{
struct fsfilt_operations *found;
- struct list_head *p;
+ cfs_list_t *p;
- list_for_each(p, &fsfilt_types) {
- found = list_entry(p, struct fsfilt_operations, fs_list);
+ cfs_list_for_each(p, &fsfilt_types) {
+ found = cfs_list_entry(p, struct fsfilt_operations, fs_list);
if (!strcmp(found->fs_type, type)) {
return found;
}
}
} else {
PORTAL_MODULE_USE;
- list_add(&fs_ops->fs_list, &fsfilt_types);
+ cfs_list_add(&fs_ops->fs_list, &fsfilt_types);
}
/* unlock fsfilt_types list */
void fsfilt_unregister_ops(struct fsfilt_operations *fs_ops)
{
- struct list_head *p;
+ cfs_list_t *p;
/* lock fsfilt_types list */
- list_for_each(p, &fsfilt_types) {
+ cfs_list_for_each(p, &fsfilt_types) {
struct fsfilt_operations *found;
- found = list_entry(p, typeof(*found), fs_list);
+ found = cfs_list_entry(p, typeof(*found), fs_list);
if (found == fs_ops) {
- list_del(p);
+ cfs_list_del(p);
PORTAL_MODULE_UNUSE;
break;
}
snprintf(name, sizeof(name) - 1, "fsfilt_%s", type);
name[sizeof(name) - 1] = '\0';
- if (!(rc = request_module("%s", name))) {
+ if (!(rc = cfs_request_module("%s", name))) {
fs_ops = fsfilt_search_type(type);
CDEBUG(D_INFO, "Loaded module '%s'\n", name);
if (!fs_ops)
/* unlock fsfilt_types list */
}
}
- try_module_get(fs_ops->fs_owner);
+ cfs_try_module_get(fs_ops->fs_owner);
/* unlock fsfilt_types list */
return fs_ops;
void fsfilt_put_ops(struct fsfilt_operations *fs_ops)
{
- module_put(fs_ops->fs_owner);
+ cfs_module_put(fs_ops->fs_owner);
}
if (iattr->ia_valid & ATTR_MODE) {
inode->i_mode = iattr->ia_mode;
- if (!in_group_p(inode->i_gid) &&
+ if (!cfs_curproc_is_in_groups(inode->i_gid) &&
!cfs_capable(CFS_CAP_FSETID))
inode->i_mode &= ~S_ISGID;
}
int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
int pages, unsigned long *blocks,
int *created, int create,
- struct semaphore *optional_sem)
+ cfs_semaphore_t *optional_sem)
{
int rc;
#ifdef EXT3_MULTIBLOCK_ALLOCATOR
}
#endif
if (optional_sem != NULL)
- down(optional_sem);
+ cfs_down(optional_sem);
rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks,
created, create);
if (optional_sem != NULL)
- up(optional_sem);
+ cfs_up(optional_sem);
return rc;
}
int err, blocksize, csize, boffs, osize = size;
/* prevent reading after eof */
- lock_kernel();
+ cfs_lock_kernel();
if (i_size_read(inode) < *offs + size) {
size = i_size_read(inode) - *offs;
- unlock_kernel();
+ cfs_unlock_kernel();
if (size < 0) {
CDEBUG(D_EXT2, "size %llu is too short for read @%llu\n",
i_size_read(inode), *offs);
return 0;
}
} else {
- unlock_kernel();
+ cfs_unlock_kernel();
}
blocksize = 1 << inode->i_blkbits;
/* correct in-core and on-disk sizes */
if (new_size > i_size_read(inode)) {
- lock_kernel();
+ cfs_lock_kernel();
if (new_size > i_size_read(inode))
i_size_write(inode, new_size);
if (i_size_read(inode) > EXT3_I(inode)->i_disksize)
EXT3_I(inode)->i_disksize = i_size_read(inode);
if (i_size_read(inode) > old_size)
mark_inode_dirty(inode);
- unlock_kernel();
+ cfs_unlock_kernel();
}
if (err == 0)
}
struct chk_dqblk{
- struct hlist_node dqb_hash; /** quotacheck hash */
- struct list_head dqb_list; /** in list also */
+ cfs_hlist_node_t dqb_hash; /** quotacheck hash */
+ cfs_list_t dqb_list; /** in list also */
qid_t dqb_id; /** uid/gid */
short dqb_type; /** USRQUOTA/GRPQUOTA */
qsize_t dqb_bhardlimit; /** block hard limit */
}
static inline struct chk_dqblk *
-find_chkquot(struct hlist_head *head, qid_t id, int type)
+find_chkquot(cfs_hlist_head_t *head, qid_t id, int type)
{
- struct hlist_node *node;
+ cfs_hlist_node_t *node;
struct chk_dqblk *cdqb;
- hlist_for_each(node, head) {
- cdqb = hlist_entry(node, struct chk_dqblk, dqb_hash);
+ cfs_hlist_for_each(node, head) {
+ cdqb = cfs_hlist_entry(node, struct chk_dqblk, dqb_hash);
if (cdqb->dqb_id == id && cdqb->dqb_type == type)
return cdqb;
}
OBD_ALLOC_PTR(cdqb);
if (cdqb) {
- INIT_HLIST_NODE(&cdqb->dqb_hash);
- INIT_LIST_HEAD(&cdqb->dqb_list);
+ CFS_INIT_HLIST_NODE(&cdqb->dqb_hash);
+ CFS_INIT_LIST_HEAD(&cdqb->dqb_list);
cdqb->dqb_id = id;
cdqb->dqb_type = type;
}
}
static struct chk_dqblk *
-cqget(struct super_block *sb, struct hlist_head *hash, struct list_head *list,
- qid_t id, int type, int first_check)
+cqget(struct super_block *sb, cfs_hlist_head_t *hash,
+ cfs_list_t *list, qid_t id, int type, int first_check)
{
- struct hlist_head *head = hash + chkquot_hash(id, type);
+ cfs_hlist_head_t *head = hash + chkquot_hash(id, type);
struct if_dqblk dqb;
struct chk_dqblk *cdqb;
int rc;
}
}
- hlist_add_head(&cdqb->dqb_hash, head);
- list_add_tail(&cdqb->dqb_list, list);
+ cfs_hlist_add_head(&cdqb->dqb_hash, head);
+ cfs_list_add_tail(&cdqb->dqb_list, list);
return cdqb;
}
#endif
struct qchk_ctxt {
- struct hlist_head qckt_hash[NR_DQHASH]; /* quotacheck hash */
- struct list_head qckt_list; /* quotacheck list */
+ cfs_hlist_head_t qckt_hash[NR_DQHASH]; /* quotacheck hash */
+ cfs_list_t qckt_list; /* quotacheck list */
int qckt_first_check[MAXQUOTAS]; /* 1 if no old quotafile */
- struct if_dqinfo qckt_dqinfo[MAXQUOTAS]; /* old dqinfo */
+ struct if_dqinfo qckt_dqinfo[MAXQUOTAS]; /* old dqinfo */
};
static int add_inode_quota(struct inode *inode, struct qchk_ctxt *qctxt,
struct chk_dqblk *cdqb, *tmp;
int rc;
- list_for_each_entry_safe(cdqb, tmp, &qctxt->qckt_list, dqb_list) {
+ cfs_list_for_each_entry_safe(cdqb, tmp, &qctxt->qckt_list, dqb_list) {
if (!error) {
rc = commit_chkquot(sb, qctxt, cdqb);
if (rc)
error = rc;
}
- hlist_del_init(&cdqb->dqb_hash);
- list_del(&cdqb->dqb_list);
+ cfs_hlist_del_init(&cdqb->dqb_hash);
+ cfs_list_del(&cdqb->dqb_list);
OBD_FREE_PTR(cdqb);
}
}
for (i = 0; i < NR_DQHASH; i++)
- INIT_HLIST_HEAD(&qctxt->qckt_hash[i]);
- INIT_LIST_HEAD(&qctxt->qckt_list);
+ CFS_INIT_HLIST_HEAD(&qctxt->qckt_hash[i]);
+ CFS_INIT_LIST_HEAD(&qctxt->qckt_list);
for (i = 0; i < MAXQUOTAS; i++) {
if (!Q_TYPESET(oqc, i))
* has limits but hasn't file) */
#ifdef HAVE_QUOTA_SUPPORT
for (i = 0; i < MAXQUOTAS; i++) {
- struct list_head id_list;
+ cfs_list_t id_list;
struct dquot_id *dqid, *tmp;
if (!Q_TYPESET(oqc, i))
LASSERT(sb_dqopt(sb)->files[i] != NULL);
- INIT_LIST_HEAD(&id_list);
+ CFS_INIT_LIST_HEAD(&id_list);
#ifndef KERNEL_SUPPORTS_QUOTA_READ
rc = lustre_get_qids(sb_dqopt(sb)->files[i], NULL, i, &id_list);
#else
if (rc)
CERROR("read old limits failed. (rc:%d)\n", rc);
- list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
- list_del_init(&dqid->di_link);
+ cfs_list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
+ cfs_list_del_init(&dqid->di_link);
if (!rc)
cqget(sb, qctxt->qckt_hash, &qctxt->qckt_list,
}
static int fsfilt_ext3_qids(struct file *file, struct inode *inode, int type,
- struct list_head *list)
+ cfs_list_t *list)
{
return lustre_get_qids(file, inode, type, list);
}
dquot->dq_dqb.dqb_isoftlimit ||
dquot->dq_dqb.dqb_bhardlimit ||
dquot->dq_dqb.dqb_bsoftlimit)
- clear_bit(DQ_FAKE_B, &dquot->dq_flags);
+ cfs_clear_bit(DQ_FAKE_B, &dquot->dq_flags);
else
- set_bit(DQ_FAKE_B, &dquot->dq_flags);
+ cfs_set_bit(DQ_FAKE_B, &dquot->dq_flags);
rc = lustre_commit_dquot(dquot);
if (rc >= 0)
struct inode *inode = dentry->d_inode;
int rc;
- lock_kernel();
+ cfs_lock_kernel();
/* A _really_ horrible hack to avoid removing the data stored
* in the block pointers; this is really the "small" stripe MD data.
rc = inode_setattr(inode, iattr);
}
- unlock_kernel();
+ cfs_unlock_kernel();
return rc;
}
{
static unsigned long next = 0;
- if (time_after(jiffies, next)) {
+ if (cfs_time_after(jiffies, next)) {
CERROR("no journal callback kernel patch, faking it...\n");
- next = jiffies + 300 * HZ;
+ next = jiffies + 300 * CFS_HZ;
}
cb_func(obd, last_rcvd, cb_data, 0);
void lustre_mark_info_dirty(struct lustre_mem_dqinfo *info)
{
- set_bit(DQF_INFO_DIRTY_B, &info->dqi_flags);
+ cfs_set_bit(DQF_INFO_DIRTY_B, &info->dqi_flags);
}
/**
"VFS: Can't read quota structure for id %u.\n",
dquot->dq_id);
dquot->dq_off = 0;
- set_bit(DQ_FAKE_B, &dquot->dq_flags);
+ cfs_set_bit(DQ_FAKE_B, &dquot->dq_flags);
memset(&dquot->dq_dqb, 0, sizeof(struct lustre_mem_dqblk));
ret = offset;
} else {
struct inode *inode = dquot->dq_info->qi_files[dquot->dq_type]->f_dentry->d_inode;
/* always clear the flag so we don't loop on an IO error... */
- clear_bit(DQ_MOD_B, &dquot->dq_flags);
+ cfs_clear_bit(DQ_MOD_B, &dquot->dq_flags);
/* The block/inode usage in admin quotafile isn't the real usage
* over all cluster, so keep the fake dquot entry on disk is
* meaningless, just remove it */
- if (test_bit(DQ_FAKE_B, &dquot->dq_flags)) {
+ if (cfs_test_bit(DQ_FAKE_B, &dquot->dq_flags)) {
handle = lustre_quota_journal_start(inode, 1);
rc = lustre_delete_dquot(dquot, version);
lustre_quota_journal_stop(handle);
}
static int walk_block_dqentry(struct file *filp, struct inode *inode, int type,
- uint blk, struct list_head *list)
+ uint blk, cfs_list_t *list)
{
dqbuf_t buf = getdqbuf();
loff_t ret = 0;
(struct lustre_disk_dqdbheader *)buf;
struct dqblk *blk_item;
struct dqblk *pos;
- struct list_head *tmp;
+ cfs_list_t *tmp;
if (!buf)
return -ENOMEM;
if (!le32_to_cpu(dqhead->dqdh_entries))
goto out_buf;
- if (list_empty(list)) {
+ if (cfs_list_empty(list)) {
tmp = list;
goto done;
}
- list_for_each_entry(pos, list, link) {
+ cfs_list_for_each_entry(pos, list, link) {
if (blk == pos->blk) /* we got this blk already */
goto out_buf;
if (blk > pos->blk)
goto out_buf;
}
blk_item->blk = blk;
- INIT_LIST_HEAD(&blk_item->link);
+ CFS_INIT_LIST_HEAD(&blk_item->link);
- list_add_tail(&blk_item->link, tmp);
+ cfs_list_add_tail(&blk_item->link, tmp);
out_buf:
freedqbuf(buf);
}
int walk_tree_dqentry(struct file *filp, struct inode *inode, int type,
- uint blk, int depth, struct list_head *list)
+ uint blk, int depth, cfs_list_t *list)
{
dqbuf_t buf = getdqbuf();
loff_t ret = 0;
* Walk through the quota file (v2 format) to get all ids with quota limit
*/
int lustre_get_qids(struct file *fp, struct inode *inode, int type,
- struct list_head *list)
+ cfs_list_t *list)
{
- struct list_head blk_list;
+ cfs_list_t blk_list;
struct dqblk *blk_item, *tmp;
dqbuf_t buf = NULL;
struct lustre_disk_dqblk_v2 *ddquot;
RETURN(-EINVAL);
}
- if (!list_empty(list)) {
+ if (!cfs_list_empty(list)) {
CDEBUG(D_ERROR, "not empty list\n");
RETURN(-EINVAL);
}
- INIT_LIST_HEAD(&blk_list);
+ CFS_INIT_LIST_HEAD(&blk_list);
rc = walk_tree_dqentry(fp, inode, type, LUSTRE_DQTREEOFF, 0, &blk_list);
if (rc) {
CDEBUG(D_ERROR, "walk through quota file failed!(%d)\n", rc);
GOTO(out_free, rc);
}
- if (list_empty(&blk_list))
+ if (cfs_list_empty(&blk_list))
RETURN(0);
buf = getdqbuf();
RETURN(-ENOMEM);
ddquot = (struct lustre_disk_dqblk_v2 *)GETENTRIES(buf, version);
- list_for_each_entry(blk_item, &blk_list, link) {
+ cfs_list_for_each_entry(blk_item, &blk_list, link) {
loff_t ret = 0;
int i, dqblk_sz = lustre_disk_dqblk_sz[version];
dqid->di_flag |= le64_to_cpu(ddquot[i].dqb_bhardlimit) ?
QB_SET : 0;
- INIT_LIST_HEAD(&dqid->di_link);
- list_add(&dqid->di_link, list);
+ CFS_INIT_LIST_HEAD(&dqid->di_link);
+ cfs_list_add(&dqid->di_link, list);
}
}
out_free:
- list_for_each_entry_safe(blk_item, tmp, &blk_list, link) {
- list_del_init(&blk_item->link);
+ cfs_list_for_each_entry_safe(blk_item, tmp, &blk_list, link) {
+ cfs_list_del_init(&blk_item->link);
kfree(blk_item);
}
if (buf)
#define MAX_UL (0xffffffffUL)
-#define lustre_info_dirty(info) test_bit(DQF_INFO_DIRTY_B, &(info)->dqi_flags)
+#define lustre_info_dirty(info) \
+ cfs_test_bit(DQF_INFO_DIRTY_B, &(info)->dqi_flags)
struct dqblk {
- struct list_head link;
+ cfs_list_t link;
uint blk;
};
ssize_t quota_read(struct file *file, struct inode *inode, int type,
uint blk, dqbuf_t buf);
int walk_tree_dqentry(struct file *filp, struct inode *inode, int type,
- uint blk, int depth, struct list_head *list);
+ uint blk, int depth, cfs_list_t *list);
int check_quota_file(struct file *f, struct inode *inode, int type,
lustre_quota_version_t version);
int lustre_check_quota_file(struct lustre_quota_info *lqi, int type);
int lustre_commit_dquot(struct lustre_dquot *dquot);
int lustre_init_quota_info(struct lustre_quota_info *lqi, int type);
int lustre_get_qids(struct file *fp, struct inode *inode, int type,
- struct list_head *list);
+ cfs_list_t *list);
ssize_t lustre_read_quota(struct file *f, struct inode *inode, int type,
char *buf, int count, loff_t pos);
obd_memory_sum(),
obd_pages_sum() << CFS_PAGE_SHIFT,
obd_pages_sum(),
- atomic_read(&libcfs_kmemory));
+ cfs_atomic_read(&libcfs_kmemory));
return 1;
}
return 0;
int __obd_fail_check_set(__u32 id, __u32 value, int set)
{
- static atomic_t obd_fail_count = ATOMIC_INIT(0);
+ static cfs_atomic_t obd_fail_count = CFS_ATOMIC_INIT(0);
LASSERT(!(id & OBD_FAIL_ONCE));
if ((obd_fail_loc & (OBD_FAILED | OBD_FAIL_ONCE)) ==
(OBD_FAILED | OBD_FAIL_ONCE)) {
- atomic_set(&obd_fail_count, 0); /* paranoia */
+ cfs_atomic_set(&obd_fail_count, 0); /* paranoia */
return 0;
}
/* Skip the first obd_fail_val, then fail */
if (obd_fail_loc & OBD_FAIL_SKIP) {
- if (atomic_inc_return(&obd_fail_count) <= obd_fail_val)
+ if (cfs_atomic_inc_return(&obd_fail_count) <= obd_fail_val)
return 0;
}
/* Fail obd_fail_val times, overridden by FAIL_ONCE */
if (obd_fail_loc & OBD_FAIL_SOME &&
(!(obd_fail_loc & OBD_FAIL_ONCE) || obd_fail_val <= 1)) {
- int count = atomic_inc_return(&obd_fail_count);
+ int count = cfs_atomic_inc_return(&obd_fail_count);
if (count >= obd_fail_val) {
- set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);
- atomic_set(&obd_fail_count, 0);
+ cfs_set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);
+ cfs_atomic_set(&obd_fail_count, 0);
/* we are lost race to increase obd_fail_count */
if (count > obd_fail_val)
return 0;
if ((set == OBD_FAIL_LOC_ORSET || set == OBD_FAIL_LOC_RESET) &&
(value & OBD_FAIL_ONCE))
- set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);
+ cfs_set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);
/* Lost race to set OBD_FAILED_BIT. */
- if (test_and_set_bit(OBD_FAILED_BIT, &obd_fail_loc)) {
+ if (cfs_test_and_set_bit(OBD_FAILED_BIT, &obd_fail_loc)) {
/* If OBD_FAIL_ONCE is valid, only one process can fail,
* otherwise multi-process can fail at the same time. */
if (obd_fail_loc & OBD_FAIL_ONCE)
if (ret) {
CERROR("obd_fail_timeout id %x sleeping for %dms\n",
id, ms);
- cfs_schedule_timeout(CFS_TASK_UNINT,
- cfs_time_seconds(ms) / 1000);
- set_current_state(CFS_TASK_RUNNING);
+ cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
+ cfs_time_seconds(ms) / 1000);
+ cfs_set_current_state(CFS_TASK_RUNNING);
CERROR("obd_fail_timeout id %x awake\n", id);
}
return ret;
smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID);
percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
- atomic_inc(&percpu_cntr->lc_cntl.la_entry);
+ cfs_atomic_inc(&percpu_cntr->lc_cntl.la_entry);
percpu_cntr->lc_count++;
if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX) {
if (amount > percpu_cntr->lc_max)
percpu_cntr->lc_max = amount;
}
- atomic_inc(&percpu_cntr->lc_cntl.la_exit);
+ cfs_atomic_inc(&percpu_cntr->lc_cntl.la_exit);
lprocfs_stats_unlock(stats);
}
EXPORT_SYMBOL(lprocfs_counter_add);
smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID);
percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
- atomic_inc(&percpu_cntr->lc_cntl.la_entry);
+ cfs_atomic_inc(&percpu_cntr->lc_cntl.la_entry);
if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX) {
/*
* currently lprocfs_count_add() can only be called in thread
else
percpu_cntr->lc_sum -= amount;
}
- atomic_inc(&percpu_cntr->lc_cntl.la_exit);
+ cfs_atomic_inc(&percpu_cntr->lc_cntl.la_exit);
lprocfs_stats_unlock(stats);
}
EXPORT_SYMBOL(lprocfs_counter_sub);
__u64 obd_max_pages = 0;
__u64 obd_max_alloc = 0;
struct lprocfs_stats *obd_memory = NULL;
-spinlock_t obd_updatemax_lock = SPIN_LOCK_UNLOCKED;
+cfs_spinlock_t obd_updatemax_lock = CFS_SPIN_LOCK_UNLOCKED;
/* refine later and change to seqlock or simlar from libcfs */
/* Debugging check only needed during development */
OBD_SET_CTXT_MAGIC(save);
save->fs = get_fs();
- LASSERT(atomic_read(&cfs_fs_pwd(current->fs)->d_count));
- LASSERT(atomic_read(&new_ctx->pwd->d_count));
+ LASSERT(cfs_atomic_read(&cfs_fs_pwd(current->fs)->d_count));
+ LASSERT(cfs_atomic_read(&new_ctx->pwd->d_count));
save->pwd = dget(cfs_fs_pwd(current->fs));
save->pwdmnt = mntget(cfs_fs_mnt(current->fs));
save->luc.luc_umask = current->fs->umask;
if (!dirent)
return -ENOMEM;
- list_add_tail(&dirent->lld_list, buf->lrc_list);
+ cfs_list_add_tail(&dirent->lld_list, buf->lrc_list);
buf->lrc_dirent = dirent;
dirent->lld_ino = ino;
return 0;
}
-long l_readdir(struct file *file, struct list_head *dentry_list)
+long l_readdir(struct file *file, cfs_list_t *dentry_list)
{
struct l_linux_dirent *lastdirent;
struct l_readdir_callback buf;
max1 = obd_pages_sum();
max2 = obd_memory_sum();
- spin_lock(&obd_updatemax_lock);
+ cfs_spin_lock(&obd_updatemax_lock);
if (max1 > obd_max_pages)
obd_max_pages = max1;
if (max2 > obd_max_alloc)
obd_max_alloc = max2;
- spin_unlock(&obd_updatemax_lock);
+ cfs_spin_unlock(&obd_updatemax_lock);
}
{
__u64 ret;
- spin_lock(&obd_updatemax_lock);
+ cfs_spin_lock(&obd_updatemax_lock);
ret = obd_max_alloc;
- spin_unlock(&obd_updatemax_lock);
+ cfs_spin_unlock(&obd_updatemax_lock);
return ret;
}
{
__u64 ret;
- spin_lock(&obd_updatemax_lock);
+ cfs_spin_lock(&obd_updatemax_lock);
ret = obd_max_pages;
- spin_unlock(&obd_updatemax_lock);
+ cfs_spin_unlock(&obd_updatemax_lock);
return ret;
}
if (!lc)
RETURN(0);
do {
- centry = atomic_read(&lc->lc_cntl.la_entry);
+ centry = cfs_atomic_read(&lc->lc_cntl.la_entry);
switch (field) {
case LPROCFS_FIELDS_FLAGS_CONFIG:
default:
break;
};
- } while (centry != atomic_read(&lc->lc_cntl.la_entry) &&
- centry != atomic_read(&lc->lc_cntl.la_exit));
+ } while (centry != cfs_atomic_read(&lc->lc_cntl.la_entry) &&
+ centry != cfs_atomic_read(&lc->lc_cntl.la_exit));
RETURN(ret);
}
#ifndef __KERNEL__
#include <liblustre.h>
-#define get_random_bytes(val, size) (*val) = 0
+#define cfs_get_random_bytes(val, size) (*val) = 0
#endif
#include <obd_class.h>
#if defined(HAVE_LINUX_RANDOM_H)
rem = min((int)((unsigned long)buf & (sizeof(int) - 1)), size);
if (rem) {
- get_random_bytes(&tmp, sizeof(tmp));
+ cfs_get_random_bytes(&tmp, sizeof(tmp));
tmp ^= ll_rand();
memcpy(buf, &tmp, rem);
p = buf + rem;
}
while (size >= sizeof(int)) {
- get_random_bytes(&tmp, sizeof(tmp));
+ cfs_get_random_bytes(&tmp, sizeof(tmp));
*p = ll_rand() ^ tmp;
size -= sizeof(int);
p++;
}
buf = p;
if (size) {
- get_random_bytes(&tmp, sizeof(tmp));
+ cfs_get_random_bytes(&tmp, sizeof(tmp));
tmp ^= ll_rand();
memcpy(buf, &tmp, size);
}
if (dquot == NULL)
return NULL;
- get_random_bytes(&rand, sizeof(rand));
+ ll_get_random_bytes(&rand, sizeof(rand));
if (!rand)
rand = 1000;
GOTO(out, rc);
}
- clear_bit(DQ_FAKE_B, &dquot->dq_flags);
+ cfs_clear_bit(DQ_FAKE_B, &dquot->dq_flags);
/* for already exists entry, we rewrite it */
rc = lustre_commit_dquot(dquot);
if (rc) {
if (dquot == NULL)
RETURN(-ENOMEM);
repeat:
- clear_bit(DQ_FAKE_B, &dquot->dq_flags);
+ cfs_clear_bit(DQ_FAKE_B, &dquot->dq_flags);
/* write a new dquot */
rc = lustre_commit_dquot(dquot);
if (rc) {
CERROR("read dquot failed! (rc:%d)\n", rc);
GOTO(out, rc);
}
- if (!dquot->dq_off || test_bit(DQ_FAKE_B, &dquot->dq_flags)) {
+ if (!dquot->dq_off || cfs_test_bit(DQ_FAKE_B, &dquot->dq_flags)) {
CERROR("the dquot isn't committed\n");
GOTO(out, rc = -EINVAL);
}
/* remove this dquot */
- set_bit(DQ_FAKE_B, &dquot->dq_flags);
+ cfs_set_bit(DQ_FAKE_B, &dquot->dq_flags);
dquot->dq_dqb.dqb_curspace = 0;
dquot->dq_dqb.dqb_curinodes = 0;
rc = lustre_commit_dquot(dquot);
}
/* check if the dquot is really removed */
- clear_bit(DQ_FAKE_B, &dquot->dq_flags);
+ cfs_clear_bit(DQ_FAKE_B, &dquot->dq_flags);
dquot->dq_off = 0;
rc = lustre_read_dquot(dquot);
if (rc) {
CERROR("read dquot failed! (rc:%d)\n", rc);
GOTO(out, rc);
}
- if (!test_bit(DQ_FAKE_B, &dquot->dq_flags) || dquot->dq_off) {
+ if (!cfs_test_bit(DQ_FAKE_B, &dquot->dq_flags) || dquot->dq_off) {
CERROR("the dquot isn't removed!\n");
GOTO(out, rc = -EINVAL);
}
int i, rc = 0;
for (i = USRQUOTA; i < MAXQUOTAS && !rc; i++) {
- struct list_head list;
+ cfs_list_t list;
struct dquot_id *dqid, *tmp;
- INIT_LIST_HEAD(&list);
+ CFS_INIT_LIST_HEAD(&list);
rc = lustre_get_qids(lqi->qi_files[i], NULL, i, &list);
if (rc) {
CERROR("%s get all %ss (rc:%d):\n",
rc ? "error" : "success",
i == USRQUOTA ? "uid" : "gid", rc);
}
- list_for_each_entry_safe(dqid, tmp, &list, di_link) {
- list_del_init(&dqid->di_link);
+ cfs_list_for_each_entry_safe(dqid, tmp, &list, di_link) {
+ cfs_list_del_init(&dqid->di_link);
if (rc == 0)
CDEBUG(D_INFO, "%d ", dqid->di_id);
kfree(dqid);
return NULL;
UC_CACHE_SET_NEW(entry);
- INIT_LIST_HEAD(&entry->ue_hash);
+ CFS_INIT_LIST_HEAD(&entry->ue_hash);
entry->ue_key = key;
- atomic_set(&entry->ue_refcount, 0);
- init_waitqueue_head(&entry->ue_waitq);
+ cfs_atomic_set(&entry->ue_refcount, 0);
+ cfs_waitq_init(&entry->ue_waitq);
if (cache->uc_ops->init_entry)
cache->uc_ops->init_entry(entry, args);
return entry;
if (cache->uc_ops->free_entry)
cache->uc_ops->free_entry(cache, entry);
- list_del(&entry->ue_hash);
+ cfs_list_del(&entry->ue_hash);
CDEBUG(D_OTHER, "destroy cache entry %p for key "LPU64"\n",
entry, entry->ue_key);
OBD_FREE_PTR(entry);
static inline void get_entry(struct upcall_cache_entry *entry)
{
- atomic_inc(&entry->ue_refcount);
+ cfs_atomic_inc(&entry->ue_refcount);
}
static inline void put_entry(struct upcall_cache *cache,
struct upcall_cache_entry *entry)
{
- if (atomic_dec_and_test(&entry->ue_refcount) &&
+ if (cfs_atomic_dec_and_test(&entry->ue_refcount) &&
(UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
free_entry(cache, entry);
}
struct upcall_cache_entry *entry)
{
if (UC_CACHE_IS_VALID(entry) &&
- time_before(jiffies, entry->ue_expire))
+ cfs_time_before(jiffies, entry->ue_expire))
return 0;
if (UC_CACHE_IS_ACQUIRING(entry)) {
- if (time_before(jiffies, entry->ue_acquire_expire))
+ if (cfs_time_before(jiffies, entry->ue_acquire_expire))
return 0;
UC_CACHE_SET_EXPIRED(entry);
- wake_up_all(&entry->ue_waitq);
+ cfs_waitq_broadcast(&entry->ue_waitq);
} else if (!UC_CACHE_IS_INVALID(entry)) {
UC_CACHE_SET_EXPIRED(entry);
}
- list_del_init(&entry->ue_hash);
- if (!atomic_read(&entry->ue_refcount))
+ cfs_list_del_init(&entry->ue_hash);
+ if (!cfs_atomic_read(&entry->ue_refcount))
free_entry(cache, entry);
return 1;
}
__u64 key, void *args)
{
struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
- struct list_head *head;
- wait_queue_t wait;
+ cfs_list_t *head;
+ cfs_waitlink_t wait;
int rc, found;
ENTRY;
head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
find_again:
found = 0;
- spin_lock(&cache->uc_lock);
- list_for_each_entry_safe(entry, next, head, ue_hash) {
+ cfs_spin_lock(&cache->uc_lock);
+ cfs_list_for_each_entry_safe(entry, next, head, ue_hash) {
/* check invalid & expired items */
if (check_unlink_entry(cache, entry))
continue;
if (!found) { /* didn't find it */
if (!new) {
- spin_unlock(&cache->uc_lock);
+ cfs_spin_unlock(&cache->uc_lock);
new = alloc_entry(cache, key, args);
if (!new) {
CERROR("fail to alloc entry\n");
}
goto find_again;
} else {
- list_add(&new->ue_hash, head);
+ cfs_list_add(&new->ue_hash, head);
entry = new;
}
} else {
free_entry(cache, new);
new = NULL;
}
- list_move(&entry->ue_hash, head);
+ cfs_list_move(&entry->ue_hash, head);
}
get_entry(entry);
UC_CACHE_SET_ACQUIRING(entry);
UC_CACHE_CLEAR_NEW(entry);
entry->ue_acquire_expire = jiffies + cache->uc_acquire_expire;
- spin_unlock(&cache->uc_lock);
+ cfs_spin_unlock(&cache->uc_lock);
rc = refresh_entry(cache, entry);
- spin_lock(&cache->uc_lock);
+ cfs_spin_lock(&cache->uc_lock);
if (rc < 0) {
UC_CACHE_CLEAR_ACQUIRING(entry);
UC_CACHE_SET_INVALID(entry);
if (UC_CACHE_IS_ACQUIRING(entry)) {
unsigned long expiry = jiffies + cache->uc_acquire_expire;
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&entry->ue_waitq, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock(&cache->uc_lock);
+ cfs_waitlink_init(&wait);
+ cfs_waitq_add(&entry->ue_waitq, &wait);
+ cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+ cfs_spin_unlock(&cache->uc_lock);
- schedule_timeout(cache->uc_acquire_expire);
+ cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
+ cache->uc_acquire_expire);
- spin_lock(&cache->uc_lock);
- remove_wait_queue(&entry->ue_waitq, &wait);
+ cfs_spin_lock(&cache->uc_lock);
+ cfs_waitq_del(&entry->ue_waitq, &wait);
if (UC_CACHE_IS_ACQUIRING(entry)) {
/* we're interrupted or upcall failed in the middle */
- rc = time_before(jiffies, expiry) ? -EINTR : -ETIMEDOUT;
+ rc = cfs_time_before(jiffies, expiry) ? \
+ -EINTR : -ETIMEDOUT;
put_entry(cache, entry);
CERROR("acquire timeout exceeded for key "LPU64
"\n", entry->ue_key);
*/
if (entry != new) {
put_entry(cache, entry);
- spin_unlock(&cache->uc_lock);
+ cfs_spin_unlock(&cache->uc_lock);
new = NULL;
goto find_again;
}
/* Now we know it's good */
out:
- spin_unlock(&cache->uc_lock);
+ cfs_spin_unlock(&cache->uc_lock);
RETURN(entry);
}
EXPORT_SYMBOL(upcall_cache_get_entry);
return;
}
- LASSERT(atomic_read(&entry->ue_refcount) > 0);
- spin_lock(&cache->uc_lock);
+ LASSERT(cfs_atomic_read(&entry->ue_refcount) > 0);
+ cfs_spin_lock(&cache->uc_lock);
put_entry(cache, entry);
- spin_unlock(&cache->uc_lock);
+ cfs_spin_unlock(&cache->uc_lock);
EXIT;
}
EXPORT_SYMBOL(upcall_cache_put_entry);
void *args)
{
struct upcall_cache_entry *entry = NULL;
- struct list_head *head;
+ cfs_list_t *head;
int found = 0, rc = 0;
ENTRY;
head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
- spin_lock(&cache->uc_lock);
- list_for_each_entry(entry, head, ue_hash) {
+ cfs_spin_lock(&cache->uc_lock);
+ cfs_list_for_each_entry(entry, head, ue_hash) {
if (downcall_compare(cache, entry, key, args) == 0) {
found = 1;
get_entry(entry);
CDEBUG(D_OTHER, "%s: upcall for key "LPU64" not expected\n",
cache->uc_name, key);
/* haven't found, it's possible */
- spin_unlock(&cache->uc_lock);
+ cfs_spin_unlock(&cache->uc_lock);
RETURN(-EINVAL);
}
GOTO(out, rc = -EINVAL);
}
- spin_unlock(&cache->uc_lock);
+ cfs_spin_unlock(&cache->uc_lock);
if (cache->uc_ops->parse_downcall)
rc = cache->uc_ops->parse_downcall(cache, entry, args);
- spin_lock(&cache->uc_lock);
+ cfs_spin_lock(&cache->uc_lock);
if (rc)
GOTO(out, rc);
out:
if (rc) {
UC_CACHE_SET_INVALID(entry);
- list_del_init(&entry->ue_hash);
+ cfs_list_del_init(&entry->ue_hash);
}
UC_CACHE_CLEAR_ACQUIRING(entry);
- spin_unlock(&cache->uc_lock);
- wake_up_all(&entry->ue_waitq);
+ cfs_spin_unlock(&cache->uc_lock);
+ cfs_waitq_broadcast(&entry->ue_waitq);
put_entry(cache, entry);
RETURN(rc);
int i;
ENTRY;
- spin_lock(&cache->uc_lock);
+ cfs_spin_lock(&cache->uc_lock);
for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
- list_for_each_entry_safe(entry, next,
+ cfs_list_for_each_entry_safe(entry, next,
&cache->uc_hashtable[i], ue_hash) {
- if (!force && atomic_read(&entry->ue_refcount)) {
+ if (!force && cfs_atomic_read(&entry->ue_refcount)) {
UC_CACHE_SET_EXPIRED(entry);
continue;
}
- LASSERT(!atomic_read(&entry->ue_refcount));
+ LASSERT(!cfs_atomic_read(&entry->ue_refcount));
free_entry(cache, entry);
}
}
- spin_unlock(&cache->uc_lock);
+ cfs_spin_unlock(&cache->uc_lock);
EXIT;
}
void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
{
- struct list_head *head;
+ cfs_list_t *head;
struct upcall_cache_entry *entry;
int found = 0;
ENTRY;
head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
- spin_lock(&cache->uc_lock);
- list_for_each_entry(entry, head, ue_hash) {
+ cfs_spin_lock(&cache->uc_lock);
+ cfs_list_for_each_entry(entry, head, ue_hash) {
if (upcall_compare(cache, entry, key, args) == 0) {
found = 1;
break;
CWARN("%s: flush entry %p: key "LPU64", ref %d, fl %x, "
"cur %lu, ex %ld/%ld\n",
cache->uc_name, entry, entry->ue_key,
- atomic_read(&entry->ue_refcount), entry->ue_flags,
+ cfs_atomic_read(&entry->ue_refcount), entry->ue_flags,
get_seconds(), entry->ue_acquire_expire,
entry->ue_expire);
UC_CACHE_SET_EXPIRED(entry);
- if (!atomic_read(&entry->ue_refcount))
+ if (!cfs_atomic_read(&entry->ue_refcount))
free_entry(cache, entry);
}
- spin_unlock(&cache->uc_lock);
+ cfs_spin_unlock(&cache->uc_lock);
}
EXPORT_SYMBOL(upcall_cache_flush_one);
if (!cache)
RETURN(ERR_PTR(-ENOMEM));
- spin_lock_init(&cache->uc_lock);
- rwlock_init(&cache->uc_upcall_rwlock);
+ cfs_spin_lock_init(&cache->uc_lock);
+ cfs_rwlock_init(&cache->uc_upcall_rwlock);
for (i = 0; i < UC_CACHE_HASH_SIZE; i++)
- INIT_LIST_HEAD(&cache->uc_hashtable[i]);
+ CFS_INIT_LIST_HEAD(&cache->uc_hashtable[i]);
strncpy(cache->uc_name, name, sizeof(cache->uc_name) - 1);
/* upcall pathname proc tunable */
strncpy(cache->uc_upcall, upcall, sizeof(cache->uc_upcall) - 1);
- cache->uc_entry_expire = 10 * 60 * HZ;
- cache->uc_acquire_expire = 15 * HZ;
+ cache->uc_entry_expire = 10 * 60 * CFS_HZ;
+ cache->uc_acquire_expire = 15 * CFS_HZ;
cache->uc_ops = ops;
RETURN(cache);
if (count != sizeof(cs))
return -EINVAL;
- if (copy_from_user(&cs, buffer, sizeof(cs)))
+ if (cfs_copy_from_user(&cs, buffer, sizeof(cs)))
return -EFAULT;
CDEBUG(D_CHANGELOG, "changelog to pid=%d start "LPU64"\n",
CWARN("message to pid %d\n", pid);
len = sizeof(*lh) + sizeof(*hal) + MTI_NAME_MAXLEN +
- /* for mockup below */ 2 * size_round(sizeof(*hai));
+ /* for mockup below */ 2 * cfs_size_round(sizeof(*hai));
OBD_ALLOC(lh, len);
struct ptlrpc_request **req, int extra_lock_flags);
int mdc_resource_get_unused(struct obd_export *exp, struct lu_fid *fid,
- struct list_head *cancels, ldlm_mode_t mode,
+ cfs_list_t *cancels, ldlm_mode_t mode,
__u64 bits);
/* mdc/mdc_request.c */
int mdc_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
rec->sa_ctime = LTIME_S(op_data->op_attr.ia_ctime);
rec->sa_attr_flags = ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags;
if ((op_data->op_attr.ia_valid & ATTR_GID) &&
- in_group_p(op_data->op_attr.ia_gid))
+ cfs_curproc_is_in_groups(op_data->op_attr.ia_gid))
rec->sa_suppgid = op_data->op_attr.ia_gid;
else
rec->sa_suppgid = op_data->op_suppgids[0];
int rc;
ENTRY;
client_obd_list_lock(&cli->cl_loi_list_lock);
- rc = list_empty(&mcw->mcw_entry);
+ rc = cfs_list_empty(&mcw->mcw_entry);
client_obd_list_unlock(&cli->cl_loi_list_lock);
RETURN(rc);
};
client_obd_list_lock(&cli->cl_loi_list_lock);
if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
- list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
+ cfs_list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
cfs_waitq_init(&mcw.mcw_waitq);
client_obd_list_unlock(&cli->cl_loi_list_lock);
l_wait_event(mcw.mcw_waitq, mdc_req_avail(cli, &mcw), &lwi);
void mdc_exit_request(struct client_obd *cli)
{
- struct list_head *l, *tmp;
+ cfs_list_t *l, *tmp;
struct mdc_cache_waiter *mcw;
client_obd_list_lock(&cli->cl_loi_list_lock);
cli->cl_r_in_flight--;
- list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
+ cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
/* No free request slots anymore */
break;
}
- mcw = list_entry(l, struct mdc_cache_waiter, mcw_entry);
- list_del_init(&mcw->mcw_entry);
+ mcw = cfs_list_entry(l, struct mdc_cache_waiter, mcw_entry);
+ cfs_list_del_init(&mcw->mcw_entry);
cli->cl_r_in_flight++;
cfs_waitq_signal(&mcw->mcw_waitq);
}
{
/* Don't hold error requests for replay. */
if (req->rq_replay) {
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_replay = 0;
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
}
if (rc && req->rq_transno != 0) {
DEBUG_REQ(D_ERROR, req, "transno returned on error rc %d", rc);
return NULL;
}
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_replay = req->rq_import->imp_replayable;
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
/* pack the intent */
lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
* found by @fid. Found locks are added into @cancel list. Returns the amount of
* locks added to @cancels list. */
int mdc_resource_get_unused(struct obd_export *exp, struct lu_fid *fid,
- struct list_head *cancels, ldlm_mode_t mode,
+ cfs_list_t *cancels, ldlm_mode_t mode,
__u64 bits)
{
ldlm_policy_data_t policy = {{0}};
}
static int mdc_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
- struct list_head *cancels, int count)
+ cfs_list_t *cancels, int count)
{
return ldlm_prep_elc_req(exp, req, LUSTRE_MDS_VERSION, MDS_REINT,
0, cancels, count);
* be put along with freeing \var mod.
*/
ptlrpc_request_addref(req);
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_committed = 1;
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
req->rq_cb_data = NULL;
obd_mod_put(mod);
}
obd_mod_get(mod);
obd_mod_get(mod);
- spin_lock(&open_req->rq_lock);
+ cfs_spin_lock(&open_req->rq_lock);
och->och_mod = mod;
mod->mod_och = och;
mod->mod_open_req = open_req;
open_req->rq_cb_data = mod;
open_req->rq_commit_cb = mdc_commit_open;
- spin_unlock(&open_req->rq_lock);
+ cfs_spin_unlock(&open_req->rq_lock);
}
rec->cr_fid2 = body->fid1;
DEBUG_REQ(D_HA, mod->mod_open_req, "matched open");
/* We no longer want to preserve this open for replay even
* though the open was committed. b=3632, b=3633 */
- spin_lock(&mod->mod_open_req->rq_lock);
+ cfs_spin_lock(&mod->mod_open_req->rq_lock);
mod->mod_open_req->rq_replay = 0;
- spin_unlock(&mod->mod_open_req->rq_lock);
+ cfs_spin_unlock(&mod->mod_open_req->rq_lock);
} else {
CDEBUG(D_HA, "couldn't find open req; expecting close error\n");
}
DEBUG_REQ(D_HA, mod->mod_open_req, "matched setattr");
/* We no longer want to preserve this setattr for replay even
* though the open was committed. b=3632, b=3633 */
- spin_lock(&mod->mod_open_req->rq_lock);
+ cfs_spin_lock(&mod->mod_open_req->rq_lock);
mod->mod_open_req->rq_replay = 0;
- spin_unlock(&mod->mod_open_req->rq_lock);
+ cfs_spin_unlock(&mod->mod_open_req->rq_lock);
}
mdc_close_pack(req, op_data);
RETURN(-EOVERFLOW);
/* Key is KEY_FID2PATH + getinfo_fid2path description */
- keylen = size_round(sizeof(KEY_FID2PATH)) + sizeof(*gf);
+ keylen = cfs_size_round(sizeof(KEY_FID2PATH)) + sizeof(*gf);
OBD_ALLOC(key, keylen);
if (key == NULL)
RETURN(-ENOMEM);
memcpy(key, KEY_FID2PATH, sizeof(KEY_FID2PATH));
- memcpy(key + size_round(sizeof(KEY_FID2PATH)), gf, sizeof(*gf));
+ memcpy(key + cfs_size_round(sizeof(KEY_FID2PATH)), gf, sizeof(*gf));
CDEBUG(D_IOCTL, "path get "DFID" from "LPU64" #%d\n",
PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno);
int rc;
ENTRY;
- if (!try_module_get(THIS_MODULE)) {
+ if (!cfs_try_module_get(THIS_MODULE)) {
CERROR("Can't get module. Is it alive?");
return -EINVAL;
}
GOTO(out, rc = -ENOTTY);
}
out:
- module_put(THIS_MODULE);
+ cfs_module_put(THIS_MODULE);
return rc;
}
if (KEY_IS(KEY_INIT_RECOV)) {
if (vallen != sizeof(int))
RETURN(-EINVAL);
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_initial_recov = *(int *)val;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
CDEBUG(D_HA, "%s: set imp_initial_recov = %d\n",
exp->exp_obd->obd_name, imp->imp_initial_recov);
RETURN(0);
if (KEY_IS(KEY_INIT_RECOV_BACKUP)) {
if (vallen != sizeof(int))
RETURN(-EINVAL);
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_initial_recov_bk = *(int *)val;
if (imp->imp_initial_recov_bk)
imp->imp_initial_recov = 1;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
CDEBUG(D_HA, "%s: set imp_initial_recov_bk = %d\n",
exp->exp_obd->obd_name, imp->imp_initial_recov_bk);
RETURN(0);
if (vallen != sizeof(int))
RETURN(-EINVAL);
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
if (*((int *)val)) {
imp->imp_connect_flags_orig |= OBD_CONNECT_RDONLY;
imp->imp_connect_data.ocd_connect_flags |= OBD_CONNECT_RDONLY;
imp->imp_connect_flags_orig &= ~OBD_CONNECT_RDONLY;
imp->imp_connect_data.ocd_connect_flags &= ~OBD_CONNECT_RDONLY;
}
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
rc = target_set_info_rpc(imp, MDS_SET_INFO,
keylen, key, vallen, val, set);
}
if (KEY_IS(KEY_MDS_CONN)) {
/* mds-mds import */
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_server_timeout = 1;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
imp->imp_client->cli_request_portal = MDS_MDS_PORTAL;
CDEBUG(D_OTHER, "%s: timeout / 2\n", exp->exp_obd->obd_name);
RETURN(0);
/*Since the request might also come from lprocfs, so we need
*sync this with client_disconnect_export Bug15684*/
- down_read(&obd->u.cli.cl_sem);
+ cfs_down_read(&obd->u.cli.cl_sem);
if (obd->u.cli.cl_import)
imp = class_import_get(obd->u.cli.cl_import);
- up_read(&obd->u.cli.cl_sem);
+ cfs_up_read(&obd->u.cli.cl_sem);
if (!imp)
RETURN(-ENODEV);
client import will not have been cleaned. */
if (obd->u.cli.cl_import) {
struct obd_import *imp;
- down_write(&obd->u.cli.cl_sem);
+ cfs_down_write(&obd->u.cli.cl_sem);
imp = obd->u.cli.cl_import;
CERROR("client import never connected\n");
ptlrpc_invalidate_import(imp);
class_destroy_import(imp);
- up_write(&obd->u.cli.cl_sem);
+ cfs_up_write(&obd->u.cli.cl_sem);
obd->u.cli.cl_import = NULL;
}
rc = obd_llog_finish(obd, 0);
/* mds-mds import features */
if (data && (data->ocd_connect_flags & OBD_CONNECT_MDS_MDS)) {
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_server_timeout = 1;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
imp->imp_client->cli_request_portal = MDS_MDS_PORTAL;
CDEBUG(D_OTHER, "%s: Set 'mds' portal and timeout\n",
obd->obd_name);
struct lprocfs_static_vars lvars = { 0 };
lprocfs_mdc_init_vars(&lvars);
- request_module("lquota");
+ cfs_request_module("lquota");
quota_interface = PORTAL_SYMBOL_GET(mdc_quota_interface);
init_obd_quota_ops(quota_interface, &mdc_obd_ops);
" in log "LPX64"\n", hdr->lrh_index, rec->cur_hdr.lrh_index,
rec->cur_id, rec->cur_endrec, llh->lgh_id.lgl_oid);
- spin_lock(&mdd->mdd_cl.mc_user_lock);
+ cfs_spin_lock(&mdd->mdd_cl.mc_user_lock);
mdd->mdd_cl.mc_lastuser = rec->cur_id;
- spin_unlock(&mdd->mdd_cl.mc_user_lock);
+ cfs_spin_unlock(&mdd->mdd_cl.mc_user_lock);
RETURN(LLOG_PROC_BREAK);
}
int rc;
mdd->mdd_cl.mc_index = 0;
- spin_lock_init(&mdd->mdd_cl.mc_lock);
+ cfs_spin_lock_init(&mdd->mdd_cl.mc_lock);
cfs_waitq_init(&mdd->mdd_cl.mc_waitq);
mdd->mdd_cl.mc_starttime = cfs_time_current_64();
mdd->mdd_cl.mc_flags = 0; /* off by default */
mdd->mdd_cl.mc_mask = CHANGELOG_DEFMASK;
- spin_lock_init(&mdd->mdd_cl.mc_user_lock);
+ cfs_spin_lock_init(&mdd->mdd_cl.mc_user_lock);
mdd->mdd_cl.mc_lastuser = 0;
rc = mdd_changelog_llog_init(mdd);
mdd2obd_dev(mdd)->obd_name);
rc = -ESRCH;
} else {
- spin_lock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_lock(&mdd->mdd_cl.mc_lock);
mdd->mdd_cl.mc_flags |= CLM_ON;
- spin_unlock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
rc = mdd_changelog_write_header(mdd, CLM_START);
}
} else if ((on == 0) && ((mdd->mdd_cl.mc_flags & CLM_ON) == CLM_ON)) {
LCONSOLE_INFO("%s: changelog off\n",mdd2obd_dev(mdd)->obd_name);
rc = mdd_changelog_write_header(mdd, CLM_FINI);
- spin_lock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_lock(&mdd->mdd_cl.mc_lock);
mdd->mdd_cl.mc_flags &= ~CLM_ON;
- spin_unlock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
}
return rc;
}
/* llog_lvfs_write_rec sets the llog tail len */
rec->cr_hdr.lrh_type = CHANGELOG_REC;
rec->cr.cr_time = cl_time();
- spin_lock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_lock(&mdd->mdd_cl.mc_lock);
/* NB: I suppose it's possible llog_add adds out of order wrt cr_index,
but as long as the MDD transactions are ordered correctly for e.g.
rename conflicts, I don't think this should matter. */
rec->cr.cr_index = ++mdd->mdd_cl.mc_index;
- spin_unlock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
ctxt = llog_get_context(obd, LLOG_CHANGELOG_ORIG_CTXT);
if (ctxt == NULL)
return -ENXIO;
if (ctxt == NULL)
return -ENXIO;
- spin_lock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_lock(&mdd->mdd_cl.mc_lock);
cur = (long long)mdd->mdd_cl.mc_index;
- spin_unlock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
if (endrec > cur)
endrec = cur;
* No permission check is needed.
*/
static int mdd_statfs(const struct lu_env *env, struct md_device *m,
- struct kstatfs *sfs)
+ cfs_kstatfs_t *sfs)
{
struct mdd_device *mdd = lu2mdd_dev(&m->md_lu_dev);
int rc;
struct lu_device *next = &m->mdd_child->dd_lu_dev;
ENTRY;
- LASSERT(atomic_read(&lu->ld_ref) == 0);
+ LASSERT(cfs_atomic_read(&lu->ld_ref) == 0);
md_device_fini(&m->mdd_md_dev);
OBD_FREE_PTR(m);
RETURN(next);
rec->cur_hdr.lrh_len = sizeof(*rec);
rec->cur_hdr.lrh_type = CHANGELOG_USER_REC;
- spin_lock(&mdd->mdd_cl.mc_user_lock);
+ cfs_spin_lock(&mdd->mdd_cl.mc_user_lock);
if (mdd->mdd_cl.mc_lastuser == (unsigned int)(-1)) {
- spin_unlock(&mdd->mdd_cl.mc_user_lock);
+ cfs_spin_unlock(&mdd->mdd_cl.mc_user_lock);
CERROR("Maximum number of changelog users exceeded!\n");
GOTO(out, rc = -EOVERFLOW);
}
*id = rec->cur_id = ++mdd->mdd_cl.mc_lastuser;
rec->cur_endrec = mdd->mdd_cl.mc_index;
- spin_unlock(&mdd->mdd_cl.mc_user_lock);
+ cfs_spin_unlock(&mdd->mdd_cl.mc_user_lock);
rc = llog_add(ctxt, &rec->cur_hdr, NULL, NULL, 0);
data.mcud_minrec = 0;
data.mcud_usercount = 0;
data.mcud_endrec = endrec;
- spin_lock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_lock(&mdd->mdd_cl.mc_lock);
endrec = mdd->mdd_cl.mc_index;
- spin_unlock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
if ((data.mcud_endrec == 0) ||
((data.mcud_endrec > endrec) &&
(data.mcud_endrec != MCUD_UNREGISTER)))
#define CLM_PURGE 0x40000
struct mdd_changelog {
- spinlock_t mc_lock; /* for index */
+ cfs_spinlock_t mc_lock; /* for index */
cfs_waitq_t mc_waitq;
int mc_flags;
int mc_mask;
__u64 mc_index;
__u64 mc_starttime;
- spinlock_t mc_user_lock;
+ cfs_spinlock_t mc_user_lock;
int mc_lastuser;
};
return lu_object_fid(&obj->mod_obj.mo_lu);
}
-static inline umode_t mdd_object_type(const struct mdd_object *obj)
+static inline cfs_umode_t mdd_object_type(const struct mdd_object *obj)
{
return lu_object_attr(&obj->mod_obj.mo_lu);
}
#ifdef CONFIG_LOCKDEP
-static struct lock_class_key mdd_pdirop_key;
+static cfs_lock_class_key_t mdd_pdirop_key;
#define RETIP ((unsigned long)__builtin_return_address(0))
int lmm_size, struct thandle *handle, int set_stripe)
{
struct lu_buf *buf;
- umode_t mode;
+ cfs_umode_t mode;
int rc = 0;
ENTRY;
if (count > (sizeof(kernbuf) - 1))
return -EINVAL;
- if (copy_from_user(kernbuf, buffer, count))
+ if (cfs_copy_from_user(kernbuf, buffer, count))
return -EFAULT;
kernbuf[count] = '\0';
OBD_ALLOC(kernbuf, CFS_PAGE_SIZE);
if (kernbuf == NULL)
RETURN(-ENOMEM);
- if (copy_from_user(kernbuf, buffer, count))
+ if (cfs_copy_from_user(kernbuf, buffer, count))
GOTO(out, rc = -EFAULT);
kernbuf[count] = 0;
return -ENXIO;
LASSERT(ctxt->loc_handle->lgh_hdr->llh_flags & LLOG_F_IS_CAT);
- spin_lock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_lock(&mdd->mdd_cl.mc_lock);
cur = mdd->mdd_cl.mc_index;
- spin_unlock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
cucb.count = count;
cucb.page = page;
/* Verify that our path hasn't changed since we started the lookup.
Record the current index, and verify the path resolves to the
same fid. If it does, then the path is correct as of this index. */
- spin_lock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_lock(&mdd->mdd_cl.mc_lock);
pli->pli_currec = mdd->mdd_cl.mc_index;
- spin_unlock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
rc = mdd_path2fid(env, mdd, ptr, &pli->pli_fid);
if (rc) {
CDEBUG(D_INFO, "mdd_path2fid(%s) failed %d\n", ptr, rc);
!mdd_capable(uc, CFS_CAP_FOWNER))
RETURN(-EPERM);
- if (la->la_mode == (umode_t) -1)
+ if (la->la_mode == (cfs_umode_t) -1)
la->la_mode = tmp_la->la_mode;
else
la->la_mode = (la->la_mode & S_IALLUGO) |
}
if (rc == 0 && ma->ma_valid & MA_LOV) {
- umode_t mode;
+ cfs_umode_t mode;
mode = mdd_object_type(mdd_obj);
if (S_ISREG(mode) || S_ISDIR(mode)) {
}
if (rc == 0 && ma->ma_valid & (MA_HSM | MA_SOM)) {
- umode_t mode;
+ cfs_umode_t mode;
mode = mdd_object_type(mdd_obj);
if (S_ISREG(mode))
break;
case OBD_CLEANUP_EXPORTS:
mds_lov_early_clean(obd);
- down_write(&mds->mds_notify_lock);
+ cfs_down_write(&mds->mds_notify_lock);
mds_lov_disconnect(obd);
mds_lov_clean(obd);
ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
llog_cleanup(ctxt);
rc = obd_llog_finish(obd, 0);
mds->mds_osc_exp = NULL;
- up_write(&mds->mds_notify_lock);
+ cfs_up_write(&mds->mds_notify_lock);
break;
}
RETURN(rc);
* we need only lmi here but not get mount
* OSD did mount already, so put mount back
*/
- atomic_dec(&lsi->lsi_mounts);
+ cfs_atomic_dec(&lsi->lsi_mounts);
mntput(mnt);
- init_rwsem(&mds->mds_notify_lock);
+ cfs_init_rwsem(&mds->mds_notify_lock);
obd->obd_fsops = fsfilt_get_ops(MT_STR(lsi->lsi_ldd));
mds_init_ctxt(obd, mnt);
struct lprocfs_static_vars lvars;
int rc;
- request_module("%s", "lquota");
+ cfs_request_module("%s", "lquota");
mds_quota_interface_ref = PORTAL_SYMBOL_GET(mds_quota_interface);
rc = lquota_init(mds_quota_interface_ref);
if (rc) {
if (count > (sizeof(kernbuf) - 1))
return -EINVAL;
- if (copy_from_user(kernbuf, buffer, count))
+ if (cfs_copy_from_user(kernbuf, buffer, count))
return -EFAULT;
kernbuf[count] = '\0';
CLASSERT(((MDS_LOV_ALLOC_SIZE % sizeof(obd_id)) == 0));
- mds->mds_lov_page_dirty = ALLOCATE_BITMAP(MDS_LOV_OBJID_PAGES_COUNT);
+ mds->mds_lov_page_dirty =
+ CFS_ALLOCATE_BITMAP(MDS_LOV_OBJID_PAGES_COUNT);
if (mds->mds_lov_page_dirty == NULL)
RETURN(-ENOMEM);
err_free:
OBD_FREE(mds->mds_lov_page_array, size);
err_free_bitmap:
- FREE_BITMAP(mds->mds_lov_page_dirty);
+ CFS_FREE_BITMAP(mds->mds_lov_page_dirty);
RETURN(rc);
}
CERROR("%s file won't close, rc=%d\n", LOV_OBJID, rc);
}
- FREE_BITMAP(mds->mds_lov_page_dirty);
+ CFS_FREE_BITMAP(mds->mds_lov_page_dirty);
EXIT;
}
}
- mutex_down(&obd->obd_dev_sem);
+ cfs_mutex_down(&obd->obd_dev_sem);
for (j = 0; j < count; j++) {
__u32 i = le32_to_cpu(data[j].l_ost_idx);
if (mds_lov_update_max_ost(&obd->u.mds, i)) {
break;
}
}
- mutex_up(&obd->obd_dev_sem);
+ cfs_mutex_up(&obd->obd_dev_sem);
RETURN(rc);
}
CDEBUG(D_CONFIG, "updated lov_desc, tgt_count: %d - idx %d / uuid %s\n",
mds->mds_lov_desc.ld_tgt_count, idx, uuid->uuid);
- mutex_down(&obd->obd_dev_sem);
+ cfs_mutex_down(&obd->obd_dev_sem);
rc = mds_lov_update_max_ost(mds, idx);
- mutex_up(&obd->obd_dev_sem);
+ cfs_mutex_up(&obd->obd_dev_sem);
if (rc != 0)
GOTO(out, rc );
RETURN(-ENOTCONN);
}
- mutex_down(&obd->obd_dev_sem);
+ cfs_mutex_down(&obd->obd_dev_sem);
rc = mds_lov_read_objids(obd);
- mutex_up(&obd->obd_dev_sem);
+ cfs_mutex_up(&obd->obd_dev_sem);
if (rc) {
CERROR("cannot read %s: rc = %d\n", "lov_objids", rc);
GOTO(err_exit, rc);
uuid = &watched->u.cli.cl_target_uuid;
LASSERT(uuid);
- down_read(&mds->mds_notify_lock);
+ cfs_down_read(&mds->mds_notify_lock);
if (obd->obd_stopping || obd->obd_fail)
GOTO(out, rc = -ENODEV);
#endif
EXIT;
out:
- up_read(&mds->mds_notify_lock);
+ cfs_up_read(&mds->mds_notify_lock);
if (rc) {
/* Deactivate it for safety */
CERROR("%s sync failed %d, deactivating\n", obd_uuid2str(uuid),
static inline void set_capa_key_expiry(struct mdt_device *mdt)
{
- mdt->mdt_ck_expiry = jiffies + mdt->mdt_ck_timeout * HZ;
+ mdt->mdt_ck_expiry = jiffies + mdt->mdt_ck_timeout * CFS_HZ;
}
static void make_capa_key(struct lustre_capa_key *key,
next = mdt->mdt_child;
rc = next->md_ops->mdo_update_capa_key(&env, next, tmp);
if (!rc) {
- spin_lock(&capa_lock);
+ cfs_spin_lock(&capa_lock);
*bkey = *rkey;
*rkey = *tmp;
- spin_unlock(&capa_lock);
+ cfs_spin_unlock(&capa_lock);
rc = write_capa_keys(&env, mdt, mdt->mdt_capa_keys);
if (rc) {
- spin_lock(&capa_lock);
+ cfs_spin_lock(&capa_lock);
*rkey = *bkey;
memset(bkey, 0, sizeof(*bkey));
- spin_unlock(&capa_lock);
+ cfs_spin_unlock(&capa_lock);
} else {
set_capa_key_expiry(mdt);
DEBUG_CAPA_KEY(D_SEC, rkey, "new");
if (rc) {
DEBUG_CAPA_KEY(D_ERROR, rkey, "update failed for");
/* next retry is in 300 sec */
- mdt->mdt_ck_expiry = jiffies + 300 * HZ;
+ mdt->mdt_ck_expiry = jiffies + 300 * CFS_HZ;
}
cfs_timer_arm(&mdt->mdt_ck_timer, mdt->mdt_ck_expiry);
cfs_waitq_init(&thread->t_ctl_waitq);
rc = cfs_kernel_thread(mdt_ck_thread_main, mdt,
- (CLONE_VM | CLONE_FILES));
+ (CLONE_VM | CLONE_FILES));
if (rc < 0) {
CERROR("cannot start mdt_ck thread, rc = %d\n", rc);
return rc;
}
- cfs_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_RUNNING);
+ l_cfs_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_RUNNING);
return 0;
}
thread->t_flags = SVC_STOPPING;
cfs_waitq_signal(&thread->t_ctl_waitq);
- cfs_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
+ l_cfs_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
}
req->rq_status = 0;
lustre_msg_set_status(req->rq_repmsg, 0);
- spin_lock(&req->rq_export->exp_lock);
+ cfs_spin_lock(&req->rq_export->exp_lock);
if (*(__u32 *)val)
req->rq_export->exp_connect_flags |= OBD_CONNECT_RDONLY;
else
req->rq_export->exp_connect_flags &=~OBD_CONNECT_RDONLY;
- spin_unlock(&req->rq_export->exp_lock);
+ cfs_spin_unlock(&req->rq_export->exp_lock);
} else if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
struct changelog_setinfo *cs =
else
rc = ptlrpc_start_bulk_transfer (desc);
if (rc == 0) {
- *lwi = LWI_TIMEOUT_INTERVAL(obd_timeout * HZ / 4, HZ,
+ *lwi = LWI_TIMEOUT_INTERVAL(obd_timeout * CFS_HZ / 4, CFS_HZ,
mdt_bulk_timeout, desc);
rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc) ||
desc->bd_export->exp_failed, lwi);
obd = req->rq_export->exp_obd;
/* Check for aborted recovery... */
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
recovering = obd->obd_recovering;
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
if (unlikely(recovering)) {
int rc;
int should_process;
OBD_FREE_PTR(mite);
d->ld_site = NULL;
}
- LASSERT(atomic_read(&d->ld_ref) == 0);
+ LASSERT(cfs_atomic_read(&d->ld_ref) == 0);
EXIT;
}
sptlrpc_target_update_exp_flavor(obd, &tmp_rset);
- write_lock(&m->mdt_sptlrpc_lock);
+ cfs_write_lock(&m->mdt_sptlrpc_lock);
sptlrpc_rule_set_free(&m->mdt_sptlrpc_rset);
m->mdt_sptlrpc_rset = tmp_rset;
- write_unlock(&m->mdt_sptlrpc_lock);
+ cfs_write_unlock(&m->mdt_sptlrpc_lock);
return 0;
}
obd = class_name2obd(dev);
LASSERT(obd != NULL);
- spin_lock_init(&m->mdt_transno_lock);
+ cfs_spin_lock_init(&m->mdt_transno_lock);
m->mdt_max_mdsize = MAX_MD_SIZE;
m->mdt_max_cookiesize = sizeof(struct llog_cookie);
}
}
- rwlock_init(&m->mdt_sptlrpc_lock);
+ cfs_rwlock_init(&m->mdt_sptlrpc_lock);
sptlrpc_rule_set_init(&m->mdt_sptlrpc_rset);
- spin_lock_init(&m->mdt_ioepoch_lock);
+ cfs_spin_lock_init(&m->mdt_ioepoch_lock);
m->mdt_opts.mo_compat_resname = 0;
m->mdt_capa_timeout = CAPA_TIMEOUT;
m->mdt_capa_alg = CAPA_HMAC_ALG_SHA1;
CFS_INIT_LIST_HEAD(&m->mdt_nosquash_nids);
m->mdt_nosquash_str = NULL;
m->mdt_nosquash_strlen = 0;
- init_rwsem(&m->mdt_squash_sem);
+ cfs_init_rwsem(&m->mdt_squash_sem);
- spin_lock_init(&m->mdt_client_bitmap_lock);
+ cfs_spin_lock_init(&m->mdt_client_bitmap_lock);
OBD_ALLOC_PTR(mite);
if (mite == NULL)
lu_object_init(o, h, d);
lu_object_add_top(h, o);
o->lo_ops = &mdt_obj_ops;
- sema_init(&mo->mot_ioepoch_sem, 1);
+ cfs_sema_init(&mo->mot_ioepoch_sem, 1);
RETURN(o);
} else
RETURN(NULL);
if (!mdt->mdt_som_conf)
data->ocd_connect_flags &= ~OBD_CONNECT_SOM;
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_connect_flags = data->ocd_connect_flags;
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
data->ocd_version = LUSTRE_VERSION_CODE;
exp->exp_mdt_data.med_ibits_known = data->ocd_ibits_known;
}
int rc = 0;
if (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_INVALID) {
- read_lock(&mdt->mdt_sptlrpc_lock);
+ cfs_read_lock(&mdt->mdt_sptlrpc_lock);
sptlrpc_target_choose_flavor(&mdt->mdt_sptlrpc_rset,
req->rq_sp_from,
req->rq_peer.nid,
&flvr);
- read_unlock(&mdt->mdt_sptlrpc_lock);
+ cfs_read_unlock(&mdt->mdt_sptlrpc_lock);
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_sp_peer = req->rq_sp_from;
exp->exp_flvr = flvr;
rc = -EACCES;
}
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
} else {
if (exp->exp_sp_peer != req->rq_sp_from) {
CERROR("RPC source %s doesn't match %s\n",
int rc = 0;
ENTRY;
- spin_lock(&med->med_open_lock);
- while (!list_empty(&med->med_open_head)) {
- struct list_head *tmp = med->med_open_head.next;
- mfd = list_entry(tmp, struct mdt_file_data, mfd_list);
+ cfs_spin_lock(&med->med_open_lock);
+ while (!cfs_list_empty(&med->med_open_head)) {
+ cfs_list_t *tmp = med->med_open_head.next;
+ mfd = cfs_list_entry(tmp, struct mdt_file_data, mfd_list);
/* Remove mfd handle so it can't be found again.
* We are consuming the mfd_list reference here. */
class_handle_unhash(&mfd->mfd_handle);
- list_move_tail(&mfd->mfd_list, &closing_list);
+ cfs_list_move_tail(&mfd->mfd_list, &closing_list);
}
- spin_unlock(&med->med_open_lock);
+ cfs_spin_unlock(&med->med_open_lock);
mdt = mdt_dev(obd->obd_lu_dev);
LASSERT(mdt != NULL);
info->mti_mdt = mdt;
info->mti_exp = exp;
- if (!list_empty(&closing_list)) {
+ if (!cfs_list_empty(&closing_list)) {
struct md_attr *ma = &info->mti_attr;
int lmm_size;
int cookie_size;
GOTO(out_cookie, rc = -ENOMEM);
/* Close any open files (which may also cause orphan unlinking). */
- list_for_each_entry_safe(mfd, n, &closing_list, mfd_list) {
- list_del_init(&mfd->mfd_list);
+ cfs_list_for_each_entry_safe(mfd, n, &closing_list, mfd_list) {
+ cfs_list_del_init(&mfd->mfd_list);
memset(&ma->ma_attr, 0, sizeof(ma->ma_attr));
ma->ma_lmm_size = lmm_size;
ma->ma_cookie_size = cookie_size;
ENTRY;
CFS_INIT_LIST_HEAD(&med->med_open_head);
- spin_lock_init(&med->med_open_lock);
- sema_init(&med->med_idmap_sem, 1);
+ cfs_spin_lock_init(&med->med_open_lock);
+ cfs_sema_init(&med->med_idmap_sem, 1);
med->med_idmap = NULL;
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_connecting = 1;
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
rc = ldlm_init_export(exp);
if (rc)
CERROR("Error %d while initializing export\n", rc);
target_destroy_export(exp);
ldlm_destroy_export(exp);
- LASSERT(list_empty(&exp->exp_outstanding_replies));
- LASSERT(list_empty(&exp->exp_mdt_data.med_open_head));
+ LASSERT(cfs_list_empty(&exp->exp_outstanding_replies));
+ LASSERT(cfs_list_empty(&exp->exp_mdt_data.med_open_head));
if (obd_uuid_equals(&exp->exp_client_uuid, &exp->exp_obd->obd_uuid))
RETURN(0);
struct getinfo_fid2path *fpout, *fpin;
int rc = 0;
- fpin = key + size_round(sizeof(KEY_FID2PATH));
+ fpin = key + cfs_size_round(sizeof(KEY_FID2PATH));
fpout = val;
if (ptlrpc_req_need_swab(info->mti_pill->rc_req))
libcfs_nid2str(exp->exp_connection->c_peer.nid));
len = sizeof(*lh) + sizeof(*hal) + MTI_NAME_MAXLEN +
- /* for mockup below */ 2 * size_round(sizeof(*hai));
+ /* for mockup below */ 2 * cfs_size_round(sizeof(*hai));
OBD_ALLOC(lh, len);
if (lh == NULL)
RETURN(-ENOMEM);
struct md_identity *identity = &entry->u.identity;
if (identity->mi_ginfo) {
- put_group_info(identity->mi_ginfo);
+ cfs_put_group_info(identity->mi_ginfo);
identity->mi_ginfo = NULL;
}
if (unlikely(!upcall))
RETURN(-ENOMEM);
- read_lock(&cache->uc_upcall_rwlock);
+ cfs_read_lock(&cache->uc_upcall_rwlock);
memcpy(upcall, cache->uc_upcall, size - 1);
- read_unlock(&cache->uc_upcall_rwlock);
+ cfs_read_unlock(&cache->uc_upcall_rwlock);
upcall[size - 1] = 0;
if (unlikely(!strcmp(upcall, "NONE"))) {
CERROR("no upcall set\n");
{
struct md_identity *identity = &entry->u.identity;
struct identity_downcall_data *data = args;
- struct group_info *ginfo;
+ cfs_group_info_t *ginfo;
struct md_perm *perms = NULL;
int size, i;
ENTRY;
if (data->idd_ngroups > NGROUPS_MAX)
RETURN(-E2BIG);
- ginfo = groups_alloc(data->idd_ngroups);
+ ginfo = cfs_groups_alloc(data->idd_ngroups);
if (!ginfo) {
CERROR("failed to alloc %d groups\n", data->idd_ngroups);
RETURN(-ENOMEM);
if (!perms) {
CERROR("failed to alloc %d permissions\n",
data->idd_nperms);
- put_group_info(ginfo);
+ cfs_put_group_info(ginfo);
RETURN(-ENOMEM);
}
OBD_CONNECT_RMT_CLIENT_FORCE | \
OBD_CONNECT_MDS_CAPA | \
OBD_CONNECT_OSS_CAPA); \
- spin_lock(&exp->exp_lock); \
+ cfs_spin_lock(&exp->exp_lock); \
exp->exp_connect_flags = reply->ocd_connect_flags; \
- spin_unlock(&exp->exp_lock); \
+ cfs_spin_unlock(&exp->exp_lock); \
} while (0)
int mdt_init_sec_level(struct mdt_thread_info *info)
if (!mdt->mdt_opts.mo_oss_capa)
reply->ocd_connect_flags &= ~OBD_CONNECT_OSS_CAPA;
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_connect_flags = reply->ocd_connect_flags;
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
}
break;
default:
ENTRY;
if (exp_connect_rmtclient(exp)) {
- down(&med->med_idmap_sem);
+ cfs_down(&med->med_idmap_sem);
if (!med->med_idmap)
med->med_idmap = lustre_idmap_init();
- up(&med->med_idmap_sem);
+ cfs_up(&med->med_idmap_sem);
if (IS_ERR(med->med_idmap)) {
long err = PTR_ERR(med->med_idmap);
void mdt_cleanup_idmap(struct mdt_export_data *med)
{
- down(&med->med_idmap_sem);
+ cfs_down(&med->med_idmap_sem);
if (med->med_idmap != NULL) {
lustre_idmap_fini(med->med_idmap);
med->med_idmap = NULL;
}
- up(&med->med_idmap_sem);
+ cfs_up(&med->med_idmap_sem);
}
static inline void mdt_revoke_export_locks(struct obd_export *exp)
/* file data for open files on MDS */
struct mdt_file_data {
struct portals_handle mfd_handle; /* must be first */
- struct list_head mfd_list; /* protected by med_open_lock */
+ cfs_list_t mfd_list; /* protected by med_open_lock */
__u64 mfd_xid; /* xid of the open request */
struct lustre_handle mfd_old_handle; /* old handle in replay case */
int mfd_mode; /* open mode provided by client */
__u32 mdt_fl_cfglog:1,
mdt_fl_synced:1;
/* lock to protect IOepoch */
- spinlock_t mdt_ioepoch_lock;
+ cfs_spinlock_t mdt_ioepoch_lock;
__u64 mdt_ioepoch;
/* transaction callbacks */
struct upcall_cache *mdt_identity_cache;
/* sptlrpc rules */
- rwlock_t mdt_sptlrpc_lock;
+ cfs_rwlock_t mdt_sptlrpc_lock;
struct sptlrpc_rule_set mdt_sptlrpc_rset;
/* capability keys */
/* root squash */
uid_t mdt_squash_uid;
gid_t mdt_squash_gid;
- struct list_head mdt_nosquash_nids;
+ cfs_list_t mdt_nosquash_nids;
char *mdt_nosquash_str;
int mdt_nosquash_strlen;
- struct rw_semaphore mdt_squash_sem;
+ cfs_rw_semaphore_t mdt_squash_sem;
cfs_proc_dir_entry_t *mdt_proc_entry;
struct lprocfs_stats *mdt_stats;
int mot_ioepoch_count;
int mot_writecount;
/* Lock to protect object's IO epoch. */
- struct semaphore mot_ioepoch_sem;
+ cfs_semaphore_t mot_ioepoch_sem;
};
enum mdt_object_flags {
struct obd_uuid uuid[2]; /* for mdt_seq_init_cli() */
char ns_name[48]; /* for mdt_init0() */
struct lustre_cfg_bufs bufs; /* for mdt_stack_fini() */
- struct kstatfs ksfs; /* for mdt_statfs() */
+ cfs_kstatfs_t ksfs; /* for mdt_statfs() */
struct {
/* for mdt_readpage() */
struct lu_rdpg mti_rdpg;
if (uc->mu_valid != UCRED_INIT) {
uc->mu_suppgids[0] = uc->mu_suppgids[1] = -1;
if (uc->mu_ginfo) {
- put_group_info(uc->mu_ginfo);
+ cfs_put_group_info(uc->mu_ginfo);
uc->mu_ginfo = NULL;
}
if (uc->mu_identity) {
}
}
-static int match_nosquash_list(struct rw_semaphore *sem,
- struct list_head *nidlist,
+static int match_nosquash_list(cfs_rw_semaphore_t *sem,
+ cfs_list_t *nidlist,
lnet_nid_t peernid)
{
int rc;
ENTRY;
- down_read(sem);
+ cfs_down_read(sem);
rc = cfs_match_nid(peernid, nidlist);
- up_read(sem);
+ cfs_up_read(sem);
RETURN(rc);
}
if (!remote && perm & CFS_SETGRP_PERM) {
if (pud->pud_ngroups) {
/* setgroups for local client */
- ucred->mu_ginfo = groups_alloc(pud->pud_ngroups);
+ ucred->mu_ginfo = cfs_groups_alloc(pud->pud_ngroups);
if (!ucred->mu_ginfo) {
CERROR("failed to alloc %d groups\n",
pud->pud_ngroups);
out:
if (rc) {
if (ucred->mu_ginfo) {
- put_group_info(ucred->mu_ginfo);
+ cfs_put_group_info(ucred->mu_ginfo);
ucred->mu_ginfo = NULL;
}
if (ucred->mu_identity) {
*eof = 1;
return snprintf(page, count, "%lu\n",
- mdt->mdt_identity_cache->uc_entry_expire / HZ);
+ mdt->mdt_identity_cache->uc_entry_expire / CFS_HZ);
}
static int lprocfs_wr_identity_expire(struct file *file, const char *buffer,
if (rc)
return rc;
- mdt->mdt_identity_cache->uc_entry_expire = val * HZ;
+ mdt->mdt_identity_cache->uc_entry_expire = val * CFS_HZ;
return count;
}
*eof = 1;
return snprintf(page, count, "%lu\n",
- mdt->mdt_identity_cache->uc_acquire_expire / HZ);
+ mdt->mdt_identity_cache->uc_acquire_expire / CFS_HZ);
}
static int lprocfs_wr_identity_acquire_expire(struct file *file,
if (rc)
return rc;
- mdt->mdt_identity_cache->uc_acquire_expire = val * HZ;
+ mdt->mdt_identity_cache->uc_acquire_expire = val * CFS_HZ;
return count;
}
int len;
*eof = 1;
- read_lock(&hash->uc_upcall_rwlock);
+ cfs_read_lock(&hash->uc_upcall_rwlock);
len = snprintf(page, count, "%s\n", hash->uc_upcall);
- read_unlock(&hash->uc_upcall_rwlock);
+ cfs_read_unlock(&hash->uc_upcall_rwlock);
return len;
}
return -EINVAL;
}
- if (copy_from_user(kernbuf, buffer, min_t(unsigned long, count,
- UC_CACHE_UPCALL_MAXPATH - 1)))
+ if (cfs_copy_from_user(kernbuf, buffer,
+ min_t(unsigned long, count,
+ UC_CACHE_UPCALL_MAXPATH - 1)))
return -EFAULT;
/* Remove any extraneous bits from the upcall (e.g. linefeeds) */
- write_lock(&hash->uc_upcall_rwlock);
+ cfs_write_lock(&hash->uc_upcall_rwlock);
sscanf(kernbuf, "%s", hash->uc_upcall);
- write_unlock(&hash->uc_upcall_rwlock);
+ cfs_write_unlock(&hash->uc_upcall_rwlock);
if (strcmp(hash->uc_name, obd->obd_name) != 0)
CWARN("%s: write to upcall name %s\n",
return count;
}
- if (copy_from_user(&sparam, buffer, sizeof(sparam))) {
+ if (cfs_copy_from_user(&sparam, buffer, sizeof(sparam))) {
CERROR("%s: bad identity data\n", obd->obd_name);
GOTO(out, rc = -EFAULT);
}
sparam.idd_uid, sparam.idd_ngroups);
param = &sparam;
param->idd_ngroups = 0;
- } else if (copy_from_user(param, buffer, size)) {
+ } else if (cfs_copy_from_user(param, buffer, size)) {
CERROR("%s: uid %u bad supplementary group data\n",
obd->obd_name, sparam.idd_uid);
OBD_FREE(param, size);
errmsg = "string too long";
GOTO(failed, rc = -EINVAL);
}
- if (copy_from_user(kernbuf, buffer, count)) {
+ if (cfs_copy_from_user(kernbuf, buffer, count)) {
errmsg = "bad address";
GOTO(failed, rc = -EFAULT);
}
struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
int rc;
char *kernbuf, *errmsg;
- struct list_head tmp;
+ cfs_list_t tmp;
ENTRY;
OBD_ALLOC(kernbuf, count + 1);
errmsg = "no memory";
GOTO(failed, rc = -ENOMEM);
}
- if (copy_from_user(kernbuf, buffer, count)) {
+ if (cfs_copy_from_user(kernbuf, buffer, count)) {
errmsg = "bad address";
GOTO(failed, rc = -EFAULT);
}
if (!strcmp(kernbuf, "NONE") || !strcmp(kernbuf, "clear")) {
/* empty string is special case */
- down_write(&mdt->mdt_squash_sem);
- if (!list_empty(&mdt->mdt_nosquash_nids)) {
+ cfs_down_write(&mdt->mdt_squash_sem);
+ if (!cfs_list_empty(&mdt->mdt_nosquash_nids)) {
cfs_free_nidlist(&mdt->mdt_nosquash_nids);
OBD_FREE(mdt->mdt_nosquash_str,
mdt->mdt_nosquash_strlen);
mdt->mdt_nosquash_str = NULL;
mdt->mdt_nosquash_strlen = 0;
}
- up_write(&mdt->mdt_squash_sem);
+ cfs_up_write(&mdt->mdt_squash_sem);
LCONSOLE_INFO("%s: nosquash_nids is cleared\n",
obd->obd_name);
OBD_FREE(kernbuf, count + 1);
GOTO(failed, rc = -EINVAL);
}
- down_write(&mdt->mdt_squash_sem);
- if (!list_empty(&mdt->mdt_nosquash_nids)) {
+ cfs_down_write(&mdt->mdt_squash_sem);
+ if (!cfs_list_empty(&mdt->mdt_nosquash_nids)) {
cfs_free_nidlist(&mdt->mdt_nosquash_nids);
OBD_FREE(mdt->mdt_nosquash_str, mdt->mdt_nosquash_strlen);
}
mdt->mdt_nosquash_str = kernbuf;
mdt->mdt_nosquash_strlen = count + 1;
- list_splice(&tmp, &mdt->mdt_nosquash_nids);
+ cfs_list_splice(&tmp, &mdt->mdt_nosquash_nids);
LCONSOLE_INFO("%s: nosquash_nids is set to %s\n",
obd->obd_name, kernbuf);
- up_write(&mdt->mdt_squash_sem);
+ cfs_up_write(&mdt->mdt_squash_sem);
RETURN(count);
failed:
if (count > (sizeof(kernbuf) - 1))
return -EINVAL;
- if (copy_from_user(kernbuf, buffer, count))
+ if (cfs_copy_from_user(kernbuf, buffer, count))
return -EFAULT;
kernbuf[count] = '\0';
}
/* 1 stands for self export. */
- list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
+ cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
if (exp == obd->obd_self_export)
continue;
if (exp->exp_connect_flags & OBD_CONNECT_MDS_MDS)
if (mfd == NULL &&
lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) {
struct mdt_export_data *med = &req->rq_export->exp_mdt_data;
- list_for_each_entry(mfd, &med->med_open_head, mfd_list) {
+ cfs_list_for_each_entry(mfd, &med->med_open_head, mfd_list) {
if (mfd->mfd_old_handle.cookie == handle->cookie)
RETURN (mfd);
}
/* free mfd */
void mdt_mfd_free(struct mdt_file_data *mfd)
{
- LASSERT(list_empty(&mfd->mfd_list));
+ LASSERT(cfs_list_empty(&mfd->mfd_list));
OBD_FREE_RCU(mfd, sizeof *mfd, &mfd->mfd_handle);
}
!S_ISREG(lu_object_attr(&o->mot_obj.mo_lu)))
RETURN(0);
- down(&o->mot_ioepoch_sem);
+ cfs_down(&o->mot_ioepoch_sem);
if (mdt_ioepoch_opened(o)) {
/* Epoch continues even if there is no writers yet. */
CDEBUG(D_INODE, "continue epoch "LPU64" for "DFID"\n",
o->mot_ioepoch, PFID(mdt_object_fid(o)));
} else {
/* XXX: ->mdt_ioepoch is not initialized at the mount */
- spin_lock(&mdt->mdt_ioepoch_lock);
+ cfs_spin_lock(&mdt->mdt_ioepoch_lock);
if (mdt->mdt_ioepoch < info->mti_replayepoch)
mdt->mdt_ioepoch = info->mti_replayepoch;
else
o->mot_ioepoch = mdt->mdt_ioepoch;
- spin_unlock(&mdt->mdt_ioepoch_lock);
+ cfs_spin_unlock(&mdt->mdt_ioepoch_lock);
CDEBUG(D_INODE, "starting epoch "LPU64" for "DFID"\n",
o->mot_ioepoch, PFID(mdt_object_fid(o)));
cancel = 1;
}
o->mot_ioepoch_count++;
- up(&o->mot_ioepoch_sem);
+ cfs_up(&o->mot_ioepoch_sem);
/* Cancel Size-on-MDS attributes cached on clients for the open case.
* In the truncate case, see mdt_reint_setattr(). */
{
int rc = 0;
- down(&o->mot_ioepoch_sem);
+ cfs_down(&o->mot_ioepoch_sem);
CDEBUG(D_INODE, "Eviction. Closing IOepoch "LPU64" on "DFID". "
"Count %d\n", o->mot_ioepoch, PFID(mdt_object_fid(o)),
o->mot_ioepoch_count);
rc = mdt_som_attr_set(info, o, o->mot_ioepoch, MDT_SOM_DISABLE);
mdt_object_som_enable(o, o->mot_ioepoch);
}
- up(&o->mot_ioepoch_sem);
+ cfs_up(&o->mot_ioepoch_sem);
RETURN(rc);
}
int rc = MDT_IOEPOCH_CLOSED;
ENTRY;
- down(&o->mot_ioepoch_sem);
+ cfs_down(&o->mot_ioepoch_sem);
CDEBUG(D_INODE, "Replay. Closing epoch "LPU64" on "DFID". Count %d\n",
o->mot_ioepoch, PFID(mdt_object_fid(o)), o->mot_ioepoch_count);
o->mot_ioepoch_count--;
if (!mdt_ioepoch_opened(o))
mdt_object_som_enable(o, info->mti_ioepoch->ioepoch);
- up(&o->mot_ioepoch_sem);
+ cfs_up(&o->mot_ioepoch_sem);
RETURN(rc);
}
la = &info->mti_attr.ma_attr;
achange = (info->mti_ioepoch->flags & MF_SOM_CHANGE);
- down(&o->mot_ioepoch_sem);
+ cfs_down(&o->mot_ioepoch_sem);
o->mot_ioepoch_count--;
tmp_ma = &info->mti_u.som.attr;
mdt_object_som_enable(o, o->mot_ioepoch);
}
- up(&o->mot_ioepoch_sem);
+ cfs_up(&o->mot_ioepoch_sem);
/* If recovery is needed, tell the client to perform GETATTR under
* the lock. */
if (ret == MDT_IOEPOCH_GETATTR && recovery) {
RETURN(rc ? : ret);
error_up:
- up(&o->mot_ioepoch_sem);
+ cfs_up(&o->mot_ioepoch_sem);
return rc;
}
!(info->mti_attr.ma_attr.la_valid & LA_SIZE)))
act = MDT_SOM_DISABLE;
- down(&o->mot_ioepoch_sem);
+ cfs_down(&o->mot_ioepoch_sem);
/* Mark the object it is the recovery state if we failed to obtain
* SOM attributes. */
if (act == MDT_SOM_DISABLE)
rc = mdt_som_attr_set(info, o, ioepoch, act);
mdt_object_som_enable(o, ioepoch);
}
- up(&o->mot_ioepoch_sem);
+ cfs_up(&o->mot_ioepoch_sem);
RETURN(rc);
}
{
int rc = 0;
ENTRY;
- down(&o->mot_ioepoch_sem);
+ cfs_down(&o->mot_ioepoch_sem);
rc = o->mot_writecount;
- up(&o->mot_ioepoch_sem);
+ cfs_up(&o->mot_ioepoch_sem);
RETURN(rc);
}
{
int rc = 0;
ENTRY;
- down(&o->mot_ioepoch_sem);
+ cfs_down(&o->mot_ioepoch_sem);
if (o->mot_writecount < 0)
rc = -ETXTBSY;
else
o->mot_writecount++;
- up(&o->mot_ioepoch_sem);
+ cfs_up(&o->mot_ioepoch_sem);
RETURN(rc);
}
void mdt_write_put(struct mdt_object *o)
{
ENTRY;
- down(&o->mot_ioepoch_sem);
+ cfs_down(&o->mot_ioepoch_sem);
o->mot_writecount--;
- up(&o->mot_ioepoch_sem);
+ cfs_up(&o->mot_ioepoch_sem);
EXIT;
}
{
int rc = 0;
ENTRY;
- down(&o->mot_ioepoch_sem);
+ cfs_down(&o->mot_ioepoch_sem);
if (o->mot_writecount > 0)
rc = -ETXTBSY;
else
o->mot_writecount--;
- up(&o->mot_ioepoch_sem);
+ cfs_up(&o->mot_ioepoch_sem);
RETURN(rc);
}
static void mdt_write_allow(struct mdt_object *o)
{
ENTRY;
- down(&o->mot_ioepoch_sem);
+ cfs_down(&o->mot_ioepoch_sem);
o->mot_writecount++;
- up(&o->mot_ioepoch_sem);
+ cfs_up(&o->mot_ioepoch_sem);
EXIT;
}
return;
}
- spin_lock(&mdt->mdt_transno_lock);
+ cfs_spin_lock(&mdt->mdt_transno_lock);
if (info->mti_transno == 0) {
info->mti_transno = ++ mdt->mdt_last_transno;
} else {
if (info->mti_transno > mdt->mdt_last_transno)
mdt->mdt_last_transno = info->mti_transno;
}
- spin_unlock(&mdt->mdt_transno_lock);
+ cfs_spin_unlock(&mdt->mdt_transno_lock);
CDEBUG(D_INODE, "transno = %llu, last_committed = %llu\n",
info->mti_transno,
mfd,
PFID(mdt_object_fid(mfd->mfd_object)),
info->mti_rr.rr_handle->cookie);
- spin_lock(&med->med_open_lock);
+ cfs_spin_lock(&med->med_open_lock);
class_handle_unhash(&old_mfd->mfd_handle);
- list_del_init(&old_mfd->mfd_list);
- spin_unlock(&med->med_open_lock);
+ cfs_list_del_init(&old_mfd->mfd_list);
+ cfs_spin_unlock(&med->med_open_lock);
mdt_mfd_close(info, old_mfd);
}
CDEBUG(D_HA, "Store old cookie "LPX64" in new mfd\n",
repbody->handle.cookie = mfd->mfd_handle.h_cookie;
if (req->rq_export->exp_disconnected) {
- spin_lock(&med->med_open_lock);
+ cfs_spin_lock(&med->med_open_lock);
class_handle_unhash(&mfd->mfd_handle);
- list_del_init(&mfd->mfd_list);
- spin_unlock(&med->med_open_lock);
+ cfs_list_del_init(&mfd->mfd_list);
+ cfs_spin_unlock(&med->med_open_lock);
mdt_mfd_close(info, mfd);
} else {
- spin_lock(&med->med_open_lock);
- list_add(&mfd->mfd_list, &med->med_open_head);
- spin_unlock(&med->med_open_lock);
+ cfs_spin_lock(&med->med_open_lock);
+ cfs_list_add(&mfd->mfd_list, &med->med_open_head);
+ cfs_spin_unlock(&med->med_open_lock);
}
mdt_empty_transno(info);
struct mdt_body *repbody;
int rc = 0;
int isreg, isdir, islnk;
- struct list_head *t;
+ cfs_list_t *t;
ENTRY;
LASSERT(ma->ma_valid & MA_INODE);
mfd = NULL;
if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
- spin_lock(&med->med_open_lock);
- list_for_each(t, &med->med_open_head) {
- mfd = list_entry(t, struct mdt_file_data, mfd_list);
+ cfs_spin_lock(&med->med_open_lock);
+ cfs_list_for_each(t, &med->med_open_head) {
+ mfd = cfs_list_entry(t, struct mdt_file_data, mfd_list);
if (mfd->mfd_xid == req->rq_xid) {
break;
}
mfd = NULL;
}
- spin_unlock(&med->med_open_lock);
+ cfs_spin_unlock(&med->med_open_lock);
if (mfd != NULL) {
repbody->handle.cookie = mfd->mfd_handle.h_cookie;
LASSERT(mdt_info_req(info));
med = &mdt_info_req(info)->rq_export->exp_mdt_data;
- spin_lock(&med->med_open_lock);
- list_add(&mfd->mfd_list, &med->med_open_head);
+ cfs_spin_lock(&med->med_open_lock);
+ cfs_list_add(&mfd->mfd_list, &med->med_open_head);
class_handle_hash_back(&mfd->mfd_handle);
- spin_unlock(&med->med_open_lock);
+ cfs_spin_unlock(&med->med_open_lock);
if (ret == MDT_IOEPOCH_OPENED) {
ret = 0;
}
med = &req->rq_export->exp_mdt_data;
- spin_lock(&med->med_open_lock);
+ cfs_spin_lock(&med->med_open_lock);
mfd = mdt_handle2mfd(info, &info->mti_ioepoch->handle);
if (mdt_mfd_closed(mfd)) {
- spin_unlock(&med->med_open_lock);
+ cfs_spin_unlock(&med->med_open_lock);
CDEBUG(D_INODE, "no handle for file close: fid = "DFID
": cookie = "LPX64"\n", PFID(info->mti_rr.rr_fid1),
info->mti_ioepoch->handle.cookie);
rc = -ESTALE;
} else {
class_handle_unhash(&mfd->mfd_handle);
- list_del_init(&mfd->mfd_list);
- spin_unlock(&med->med_open_lock);
+ cfs_list_del_init(&mfd->mfd_list);
+ cfs_spin_unlock(&med->med_open_lock);
/* Do not lose object before last unlink. */
o = mfd->mfd_object;
RETURN(lustre_msg_get_status(req->rq_repmsg));
med = &info->mti_exp->exp_mdt_data;
- spin_lock(&med->med_open_lock);
+ cfs_spin_lock(&med->med_open_lock);
mfd = mdt_handle2mfd(info, &info->mti_ioepoch->handle);
if (mfd == NULL) {
- spin_unlock(&med->med_open_lock);
+ cfs_spin_unlock(&med->med_open_lock);
CDEBUG(D_INODE, "no handle for done write: fid = "DFID
": cookie = "LPX64" ioepoch = "LPU64"\n",
PFID(info->mti_rr.rr_fid1),
LASSERT(mfd->mfd_mode == FMODE_EPOCH ||
mfd->mfd_mode == FMODE_TRUNC);
class_handle_unhash(&mfd->mfd_handle);
- list_del_init(&mfd->mfd_list);
- spin_unlock(&med->med_open_lock);
+ cfs_list_del_init(&mfd->mfd_list);
+ cfs_spin_unlock(&med->med_open_lock);
/* Set EPOCH CLOSE flag if not set by client. */
info->mti_ioepoch->flags |= MF_EPOCH_CLOSE;
/* When we do a clean MDS shutdown, we save the last_transno into
* the header. If we find clients with higher last_transno values
* then those clients may need recovery done. */
- LASSERT(atomic_read(&obd->obd_req_replay_clients) == 0);
+ LASSERT(cfs_atomic_read(&obd->obd_req_replay_clients) == 0);
for (cl_idx = 0, off = lsd->lsd_client_start;
off < last_size; cl_idx++) {
__u64 last_transno;
/* VBR: set export last committed version */
exp->exp_last_committed = last_transno;
lcd = NULL;
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_connecting = 0;
exp->exp_in_recovery = 0;
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
obd->obd_max_recoverable_clients++;
class_export_put(exp);
}
CDEBUG(D_OTHER, "client at idx %d has last_transno="LPU64"\n",
cl_idx, last_transno);
/* protect __u64 value update */
- spin_lock(&mdt->mdt_transno_lock);
+ cfs_spin_lock(&mdt->mdt_transno_lock);
mdt->mdt_last_transno = max(last_transno,
mdt->mdt_last_transno);
- spin_unlock(&mdt->mdt_transno_lock);
+ cfs_spin_unlock(&mdt->mdt_transno_lock);
}
err_client:
lsd->lsd_feature_incompat |= OBD_INCOMPAT_FID;
- spin_lock(&mdt->mdt_transno_lock);
+ cfs_spin_lock(&mdt->mdt_transno_lock);
mdt->mdt_last_transno = lsd->lsd_last_transno;
- spin_unlock(&mdt->mdt_transno_lock);
+ cfs_spin_unlock(&mdt->mdt_transno_lock);
CDEBUG(D_INODE, "========BEGIN DUMPING LAST_RCVD========\n");
CDEBUG(D_INODE, "%s: server last_transno: "LPU64"\n",
if (rc)
GOTO(err_client, rc);
- spin_lock(&mdt->mdt_transno_lock);
+ cfs_spin_lock(&mdt->mdt_transno_lock);
/* obd_last_committed is used for compatibility
* with other lustre recovery code */
obd->obd_last_committed = mdt->mdt_last_transno;
- spin_unlock(&mdt->mdt_transno_lock);
+ cfs_spin_unlock(&mdt->mdt_transno_lock);
mdt->mdt_mount_count = mount_count + 1;
lsd->lsd_mount_count = mdt->mdt_mount_count;
CDEBUG(D_SUPER, "MDS mount_count is "LPU64", last_transno is "LPU64"\n",
mdt->mdt_mount_count, mdt->mdt_last_transno);
- spin_lock(&mdt->mdt_transno_lock);
+ cfs_spin_lock(&mdt->mdt_transno_lock);
mdt->mdt_lsd.lsd_last_transno = mdt->mdt_last_transno;
- spin_unlock(&mdt->mdt_transno_lock);
+ cfs_spin_unlock(&mdt->mdt_transno_lock);
rc = mdt_last_rcvd_header_write(env, mdt, th);
mdt_trans_stop(env, mdt, th);
/* the bitmap operations can handle cl_idx > sizeof(long) * 8, so
* there's no need for extra complication here
*/
- spin_lock(&mdt->mdt_client_bitmap_lock);
- cl_idx = find_first_zero_bit(bitmap, LR_MAX_CLIENTS);
+ cfs_spin_lock(&mdt->mdt_client_bitmap_lock);
+ cl_idx = cfs_find_first_zero_bit(bitmap, LR_MAX_CLIENTS);
if (cl_idx >= LR_MAX_CLIENTS ||
OBD_FAIL_CHECK(OBD_FAIL_MDS_CLIENT_ADD)) {
CERROR("no room for %u clients - fix LR_MAX_CLIENTS\n",
cl_idx);
- spin_unlock(&mdt->mdt_client_bitmap_lock);
+ cfs_spin_unlock(&mdt->mdt_client_bitmap_lock);
RETURN(-EOVERFLOW);
}
- set_bit(cl_idx, bitmap);
- spin_unlock(&mdt->mdt_client_bitmap_lock);
+ cfs_set_bit(cl_idx, bitmap);
+ cfs_spin_unlock(&mdt->mdt_client_bitmap_lock);
CDEBUG(D_INFO, "client at idx %d with UUID '%s' added\n",
cl_idx, med->med_lcd->lcd_uuid);
med->med_lr_idx = cl_idx;
med->med_lr_off = lsd->lsd_client_start +
(cl_idx * lsd->lsd_client_size);
- init_mutex(&med->med_lcd_lock);
+ cfs_init_mutex(&med->med_lcd_lock);
LASSERTF(med->med_lr_off > 0, "med_lr_off = %llu\n", med->med_lr_off);
* server down with lots of sync writes.
*/
mdt_trans_add_cb(th, lut_cb_client, class_export_cb_get(mti->mti_exp));
- spin_lock(&mti->mti_exp->exp_lock);
+ cfs_spin_lock(&mti->mti_exp->exp_lock);
mti->mti_exp->exp_need_sync = 1;
- spin_unlock(&mti->mti_exp->exp_lock);
+ cfs_spin_unlock(&mti->mti_exp->exp_lock);
rc = mdt_last_rcvd_write(env, mdt, lcd, &off, th);
CDEBUG(D_INFO, "wrote client lcd at idx %u off %llu (len %u)\n",
if (!strcmp(med->med_lcd->lcd_uuid, obd->obd_uuid.uuid))
RETURN(0);
- spin_lock(&mdt->mdt_client_bitmap_lock);
- if (test_and_set_bit(cl_idx, bitmap)) {
+ cfs_spin_lock(&mdt->mdt_client_bitmap_lock);
+ if (cfs_test_and_set_bit(cl_idx, bitmap)) {
CERROR("MDS client %d: bit already set in bitmap!!\n",
cl_idx);
LBUG();
}
- spin_unlock(&mdt->mdt_client_bitmap_lock);
+ cfs_spin_unlock(&mdt->mdt_client_bitmap_lock);
CDEBUG(D_INFO, "client at idx %d with UUID '%s' added\n",
cl_idx, med->med_lcd->lcd_uuid);
med->med_lr_idx = cl_idx;
med->med_lr_off = lsd->lsd_client_start +
(cl_idx * lsd->lsd_client_size);
- init_mutex(&med->med_lcd_lock);
+ cfs_init_mutex(&med->med_lcd_lock);
LASSERTF(med->med_lr_off > 0, "med_lr_off = %llu\n", med->med_lr_off);
* Clear the bit _after_ zeroing out the client so we don't race with
* mdt_client_add and zero out new clients.
*/
- if (!test_bit(med->med_lr_idx, mdt->mdt_client_bitmap)) {
+ if (!cfs_test_bit(med->med_lr_idx, mdt->mdt_client_bitmap)) {
CERROR("MDT client %u: bit already clear in bitmap!!\n",
med->med_lr_idx);
LBUG();
if (IS_ERR(th))
GOTO(free, rc = PTR_ERR(th));
- mutex_down(&med->med_lcd_lock);
+ cfs_mutex_down(&med->med_lcd_lock);
memset(lcd, 0, sizeof *lcd);
rc = mdt_last_rcvd_write(env, mdt, lcd, &off, th);
med->med_lcd = NULL;
- mutex_up(&med->med_lcd_lock);
+ cfs_mutex_up(&med->med_lcd_lock);
mdt_trans_stop(env, mdt, th);
- spin_lock(&mdt->mdt_client_bitmap_lock);
- clear_bit(med->med_lr_idx, mdt->mdt_client_bitmap);
- spin_unlock(&mdt->mdt_client_bitmap_lock);
+ cfs_spin_lock(&mdt->mdt_client_bitmap_lock);
+ cfs_clear_bit(med->med_lr_idx, mdt->mdt_client_bitmap);
+ cfs_spin_unlock(&mdt->mdt_client_bitmap_lock);
CDEBUG(rc == 0 ? D_INFO : D_ERROR, "Zeroing out client idx %u in "
"%s, rc %d\n", med->med_lr_idx, LAST_RCVD, rc);
OBD_FREE_PTR(lcd);
RETURN(0);
free:
- mutex_down(&med->med_lcd_lock);
+ cfs_mutex_down(&med->med_lcd_lock);
med->med_lcd = NULL;
- mutex_up(&med->med_lcd_lock);
+ cfs_mutex_up(&med->med_lcd_lock);
OBD_FREE_PTR(lcd);
return 0;
}
med = &req->rq_export->exp_mdt_data;
LASSERT(med);
- mutex_down(&med->med_lcd_lock);
+ cfs_mutex_down(&med->med_lcd_lock);
lcd = med->med_lcd;
/* if the export has already been disconnected, we have no last_rcvd slot,
* update server data with latest transno then */
if (lcd == NULL) {
- mutex_up(&med->med_lcd_lock);
+ cfs_mutex_up(&med->med_lcd_lock);
CWARN("commit transaction for disconnected client %s: rc %d\n",
req->rq_export->exp_client_uuid.uuid, rc);
err = mdt_last_rcvd_header_write(mti->mti_env, mdt, th);
} else {
err = mdt_last_rcvd_write(mti->mti_env, mdt, lcd, &off, th);
}
- mutex_up(&med->med_lcd_lock);
+ cfs_mutex_up(&med->med_lcd_lock);
RETURN(err);
}
}
mti->mti_has_trans = 1;
- spin_lock(&mdt->mdt_transno_lock);
+ cfs_spin_lock(&mdt->mdt_transno_lock);
if (txn->th_result != 0) {
if (mti->mti_transno != 0) {
CERROR("Replay transno "LPU64" failed: rc %i\n",
if (mti->mti_transno > mdt->mdt_last_transno)
mdt->mdt_last_transno = mti->mti_transno;
}
- spin_unlock(&mdt->mdt_transno_lock);
+ cfs_spin_unlock(&mdt->mdt_transno_lock);
/* sometimes the reply message has not been successfully packed */
LASSERT(req != NULL && req->rq_repmsg != NULL);
static void mdt_steal_ack_locks(struct ptlrpc_request *req)
{
struct obd_export *exp = req->rq_export;
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct ptlrpc_reply_state *oldrep;
struct ptlrpc_service *svc;
int i;
/* CAVEAT EMPTOR: spinlock order */
- spin_lock(&exp->exp_lock);
- list_for_each (tmp, &exp->exp_outstanding_replies) {
- oldrep = list_entry(tmp, struct ptlrpc_reply_state,rs_exp_list);
+ cfs_spin_lock(&exp->exp_lock);
+ cfs_list_for_each (tmp, &exp->exp_outstanding_replies) {
+ oldrep = cfs_list_entry(tmp, struct ptlrpc_reply_state,
+ rs_exp_list);
if (oldrep->rs_xid != req->rq_xid)
continue;
oldrep->rs_opc);
svc = oldrep->rs_service;
- spin_lock (&svc->srv_lock);
+ cfs_spin_lock (&svc->srv_lock);
- list_del_init (&oldrep->rs_exp_list);
+ cfs_list_del_init (&oldrep->rs_exp_list);
CWARN("Stealing %d locks from rs %p x"LPD64".t"LPD64
" o%d NID %s\n",
oldrep->rs_nlocks = 0;
DEBUG_REQ(D_HA, req, "stole locks for");
- spin_lock(&oldrep->rs_lock);
+ cfs_spin_lock(&oldrep->rs_lock);
ptlrpc_schedule_difficult_reply (oldrep);
- spin_unlock(&oldrep->rs_lock);
+ cfs_spin_unlock(&oldrep->rs_lock);
- spin_unlock (&svc->srv_lock);
+ cfs_spin_unlock (&svc->srv_lock);
break;
}
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
}
/**
repbody = req_capsule_server_get(mti->mti_pill, &RMF_MDT_BODY);
repbody->ioepoch = obj->mot_ioepoch;
- spin_lock(&med->med_open_lock);
- list_for_each_entry(mfd, &med->med_open_head, mfd_list) {
+ cfs_spin_lock(&med->med_open_lock);
+ cfs_list_for_each_entry(mfd, &med->med_open_head, mfd_list) {
if (mfd->mfd_xid == req->rq_xid)
break;
}
LASSERT(&mfd->mfd_list != &med->med_open_head);
- spin_unlock(&med->med_open_lock);
+ cfs_spin_unlock(&med->med_open_lock);
repbody->handle.cookie = mfd->mfd_handle.h_cookie;
}
/** Sanity check for malformed buffers */
if (pre_versions == NULL) {
CERROR("No versions in request buffer\n");
- spin_lock(&req->rq_export->exp_lock);
+ cfs_spin_lock(&req->rq_export->exp_lock);
req->rq_export->exp_vbr_failed = 1;
- spin_unlock(&req->rq_export->exp_lock);
+ cfs_spin_unlock(&req->rq_export->exp_lock);
RETURN(-EOVERFLOW);
} else if (pre_versions[index] != curr_version) {
CDEBUG(D_INODE, "Version mismatch "LPX64" != "LPX64"\n",
pre_versions[index], curr_version);
- spin_lock(&req->rq_export->exp_lock);
+ cfs_spin_lock(&req->rq_export->exp_lock);
req->rq_export->exp_vbr_failed = 1;
- spin_unlock(&req->rq_export->exp_lock);
+ cfs_spin_unlock(&req->rq_export->exp_lock);
RETURN(-EOVERFLOW);
}
}
mfd->mfd_object = mo;
mfd->mfd_xid = req->rq_xid;
- spin_lock(&med->med_open_lock);
- list_add(&mfd->mfd_list, &med->med_open_head);
- spin_unlock(&med->med_open_lock);
+ cfs_spin_lock(&med->med_open_lock);
+ cfs_list_add(&mfd->mfd_list, &med->med_open_head);
+ cfs_spin_unlock(&med->med_open_lock);
repbody->handle.cookie = mfd->mfd_handle.h_cookie;
}
LASSERT(mdt_conn_flags(info) & OBD_CONNECT_SOM);
LASSERT(info->mti_ioepoch);
- spin_lock(&med->med_open_lock);
+ cfs_spin_lock(&med->med_open_lock);
mfd = mdt_handle2mfd(info, &info->mti_ioepoch->handle);
if (mfd == NULL) {
- spin_unlock(&med->med_open_lock);
+ cfs_spin_unlock(&med->med_open_lock);
CDEBUG(D_INODE, "no handle for file close: "
"fid = "DFID": cookie = "LPX64"\n",
PFID(info->mti_rr.rr_fid1),
LASSERT(!(info->mti_ioepoch->flags & MF_EPOCH_CLOSE));
class_handle_unhash(&mfd->mfd_handle);
- list_del_init(&mfd->mfd_list);
- spin_unlock(&med->med_open_lock);
+ cfs_list_del_init(&mfd->mfd_list);
+ cfs_spin_unlock(&med->med_open_lock);
/* Close the found mfd, update attributes. */
ma->ma_lmm_size = info->mti_mdt->mdt_max_mdsize;
/********************** config llog list **********************/
static CFS_LIST_HEAD(config_llog_list);
-static spinlock_t config_list_lock = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t config_list_lock = CFS_SPIN_LOCK_UNLOCKED;
/* Take a reference to a config log */
static int config_log_get(struct config_llog_data *cld)
ENTRY;
if (cld->cld_stopping)
RETURN(1);
- atomic_inc(&cld->cld_refcount);
+ cfs_atomic_inc(&cld->cld_refcount);
CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
- atomic_read(&cld->cld_refcount));
+ cfs_atomic_read(&cld->cld_refcount));
RETURN(0);
}
ENTRY;
CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
- atomic_read(&cld->cld_refcount));
- LASSERT(atomic_read(&cld->cld_refcount) > 0);
+ cfs_atomic_read(&cld->cld_refcount));
+ LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
/* spinlock to make sure no item with 0 refcount in the list */
- spin_lock(&config_list_lock);
- if (unlikely(atomic_dec_and_test(&cld->cld_refcount))) {
- list_del(&cld->cld_list_chain);
- spin_unlock(&config_list_lock);
+ cfs_spin_lock(&config_list_lock);
+ if (unlikely(cfs_atomic_dec_and_test(&cld->cld_refcount))) {
+ cfs_list_del(&cld->cld_list_chain);
+ cfs_spin_unlock(&config_list_lock);
CDEBUG(D_MGC, "dropping config log %s\n", cld->cld_logname);
strlen(cld->cld_cfg.cfg_instance) + 1);
OBD_FREE(cld, sizeof(*cld));
} else {
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
}
EXIT;
RETURN(ERR_PTR(-EINVAL));
}
- spin_lock(&config_list_lock);
- list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
+ cfs_spin_lock(&config_list_lock);
+ cfs_list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
if (match_instance && cld->cld_cfg.cfg_instance &&
strcmp(logid, cld->cld_cfg.cfg_instance) == 0)
goto out_found;
strcmp(logid, cld->cld_logname) == 0)
goto out_found;
}
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
CDEBUG(D_CONFIG, "can't get log %s\n", logid);
RETURN(ERR_PTR(-ENOENT));
out_found:
- atomic_inc(&cld->cld_refcount);
- spin_unlock(&config_list_lock);
+ cfs_atomic_inc(&cld->cld_refcount);
+ cfs_spin_unlock(&config_list_lock);
LASSERT(cld->cld_stopping == 0 || cld->cld_is_sptlrpc == 0);
RETURN(cld);
}
cld->cld_cfg.cfg_flags = 0;
cld->cld_cfg.cfg_sb = sb;
cld->cld_is_sptlrpc = is_sptlrpc;
- atomic_set(&cld->cld_refcount, 1);
+ cfs_atomic_set(&cld->cld_refcount, 1);
/* Keep the mgc around until we are done */
cld->cld_mgcexp = class_export_get(obd->obd_self_export);
rc = mgc_logname2resid(logname, &cld->cld_resid);
- spin_lock(&config_list_lock);
- list_add(&cld->cld_list_chain, &config_llog_list);
- spin_unlock(&config_list_lock);
+ cfs_spin_lock(&config_list_lock);
+ cfs_list_add(&cld->cld_list_chain, &config_llog_list);
+ cfs_spin_unlock(&config_list_lock);
if (rc) {
config_log_put(cld);
RETURN(0);
}
-DECLARE_MUTEX(llog_process_lock);
+CFS_DECLARE_MUTEX(llog_process_lock);
/* Stop watching for updates on this log. */
static int config_log_end(char *logname, struct config_llog_instance *cfg)
if (IS_ERR(cld))
RETURN(PTR_ERR(cld));
- down(&llog_process_lock);
+ cfs_down(&llog_process_lock);
/*
* if cld_stopping is set, it means we didn't start the log thus
* not owning the start ref. this can happen after previous umount:
* calling start_log.
*/
if (unlikely(cld->cld_stopping)) {
- up(&llog_process_lock);
+ cfs_up(&llog_process_lock);
/* drop the ref from the find */
config_log_put(cld);
RETURN(rc);
}
cld->cld_stopping = 1;
- up(&llog_process_lock);
+ cfs_up(&llog_process_lock);
- spin_lock(&config_list_lock);
+ cfs_spin_lock(&config_list_lock);
cld_sptlrpc = cld->cld_sptlrpc;
cld->cld_sptlrpc = NULL;
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
if (cld_sptlrpc)
config_log_put(cld_sptlrpc);
static void do_requeue(struct config_llog_data *cld)
{
- LASSERT(atomic_read(&cld->cld_refcount) > 0);
+ LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
/* Do not run mgc_process_log on a disconnected export or an
export which is being disconnected. Take the client
semaphore to make the check non-racy. */
- down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
+ cfs_down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) {
CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname);
mgc_process_log(cld->cld_mgcexp->exp_obd, cld);
CDEBUG(D_MGC, "disconnecting, won't update log %s\n",
cld->cld_logname);
}
- up_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
+ cfs_up_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
/* Whether we enqueued again or not in mgc_process_log, we're done
* with the ref from the old enqueue */
CDEBUG(D_MGC, "Starting requeue thread\n");
- lwi_later = LWI_TIMEOUT(60 * HZ, NULL, NULL);
+ lwi_later = LWI_TIMEOUT(60 * CFS_HZ, NULL, NULL);
l_wait_event(rq_waitq, rq_state & (RQ_NOW | RQ_STOP), &lwi_later);
/* Keep trying failed locks periodically */
- spin_lock(&config_list_lock);
+ cfs_spin_lock(&config_list_lock);
while (rq_state & (RQ_NOW | RQ_LATER)) {
/* Any new or requeued lostlocks will change the state */
rq_state &= ~(RQ_NOW | RQ_LATER);
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
/* Always wait a few seconds to allow the server who
caused the lock revocation to finish its setup, plus some
random so everyone doesn't try to reconnect at once. */
- lwi_now = LWI_TIMEOUT(3 * HZ + (ll_rand() & 0xff) * (HZ / 100),
+ lwi_now = LWI_TIMEOUT(3 * CFS_HZ + (ll_rand() & 0xff) * \
+ (CFS_HZ / 100),
NULL, NULL);
l_wait_event(rq_waitq, rq_state & RQ_STOP, &lwi_now);
*/
cld_prev = NULL;
- spin_lock(&config_list_lock);
- list_for_each_entry_safe(cld, cld_next, &config_llog_list,
- cld_list_chain) {
+ cfs_spin_lock(&config_list_lock);
+ cfs_list_for_each_entry_safe(cld, cld_next, &config_llog_list,
+ cld_list_chain) {
if (cld->cld_list_chain.next != &config_llog_list)
- atomic_inc(&cld_next->cld_refcount);
+ cfs_atomic_inc(&cld_next->cld_refcount);
if (cld->cld_lostlock) {
if (cld->cld_sptlrpc &&
cld->cld_sptlrpc->cld_lostlock) {
cld->cld_sptlrpc->cld_lostlock = 0;
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
do_requeue(cld->cld_sptlrpc);
- spin_lock(&config_list_lock);
+ cfs_spin_lock(&config_list_lock);
LASSERT(cld->cld_lostlock);
}
cld->cld_lostlock = 0;
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
do_requeue(cld);
- spin_lock(&config_list_lock);
+ cfs_spin_lock(&config_list_lock);
}
if (cld_prev) {
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
config_log_put(cld_prev);
- spin_lock(&config_list_lock);
+ cfs_spin_lock(&config_list_lock);
}
cld_prev = cld_next;
}
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
/* Wait a bit to see if anyone else needs a requeue */
l_wait_event(rq_waitq, rq_state & (RQ_NOW | RQ_STOP),
&lwi_later);
- spin_lock(&config_list_lock);
+ cfs_spin_lock(&config_list_lock);
}
/* spinlock and while guarantee RQ_NOW and RQ_LATER are not set */
rq_state &= ~RQ_RUNNING;
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
CDEBUG(D_MGC, "Ending requeue thread\n");
RETURN(rc);
int rc = 0;
CDEBUG(D_INFO, "log %s: requeue (l=%d r=%d sp=%d st=%x)\n",
- cld->cld_logname, later, atomic_read(&cld->cld_refcount),
+ cld->cld_logname, later, cfs_atomic_read(&cld->cld_refcount),
cld->cld_stopping, rq_state);
- LASSERT(atomic_read(&cld->cld_refcount) > 0);
+ LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
/* Hold lock for rq_state */
- spin_lock(&config_list_lock);
+ cfs_spin_lock(&config_list_lock);
if (cld->cld_stopping || (rq_state & RQ_STOP)) {
cld->cld_lostlock = 0;
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
config_log_put(cld);
RETURN(0);
}
if (!(rq_state & RQ_RUNNING)) {
LASSERT(rq_state == 0);
rq_state = RQ_RUNNING | (later ? RQ_LATER : RQ_NOW);
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
rc = cfs_kernel_thread(mgc_requeue_thread, 0,
CLONE_VM | CLONE_FILES);
if (rc < 0) {
}
} else {
rq_state |= later ? RQ_LATER : RQ_NOW;
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
cfs_waitq_signal(&rq_waitq);
}
LASSERT(lsi->lsi_srv_mnt == mnt);
/* The mgc fs exclusion sem. Only one fs can be setup at a time. */
- down(&cli->cl_mgc_sem);
+ cfs_down(&cli->cl_mgc_sem);
- cleanup_group_info();
+ cfs_cleanup_group_info();
obd->obd_fsops = fsfilt_get_ops(MT_STR(lsi->lsi_ldd));
if (IS_ERR(obd->obd_fsops)) {
- up(&cli->cl_mgc_sem);
+ cfs_up(&cli->cl_mgc_sem);
CERROR("No fstype %s rc=%ld\n", MT_STR(lsi->lsi_ldd),
PTR_ERR(obd->obd_fsops));
RETURN(PTR_ERR(obd->obd_fsops));
fsfilt_put_ops(obd->obd_fsops);
obd->obd_fsops = NULL;
cli->cl_mgc_vfsmnt = NULL;
- up(&cli->cl_mgc_sem);
+ cfs_up(&cli->cl_mgc_sem);
RETURN(err);
}
if (obd->obd_fsops)
fsfilt_put_ops(obd->obd_fsops);
- up(&cli->cl_mgc_sem);
+ cfs_up(&cli->cl_mgc_sem);
RETURN(rc);
}
-static atomic_t mgc_count = ATOMIC_INIT(0);
+static cfs_atomic_t mgc_count = CFS_ATOMIC_INIT(0);
static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
{
int rc = 0;
case OBD_CLEANUP_EARLY:
break;
case OBD_CLEANUP_EXPORTS:
- if (atomic_dec_and_test(&mgc_count)) {
+ if (cfs_atomic_dec_and_test(&mgc_count)) {
/* Kick the requeue waitq - cld's should all be
stopping */
- spin_lock(&config_list_lock);
+ cfs_spin_lock(&config_list_lock);
rq_state |= RQ_STOP;
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
cfs_waitq_signal(&rq_waitq);
}
rc = obd_llog_finish(obd, 0);
lprocfs_obd_setup(obd, lvars.obd_vars);
sptlrpc_lprocfs_cliobd_attach(obd);
- spin_lock(&config_list_lock);
- atomic_inc(&mgc_count);
- if (atomic_read(&mgc_count) == 1) {
+ cfs_spin_lock(&config_list_lock);
+ cfs_atomic_inc(&mgc_count);
+ if (cfs_atomic_read(&mgc_count) == 1) {
rq_state &= ~RQ_STOP;
cfs_waitq_init(&rq_waitq);
}
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
RETURN(rc);
int rc;
ENTRY;
- if (!try_module_get(THIS_MODULE)) {
+ if (!cfs_try_module_get(THIS_MODULE)) {
CERROR("Can't get module. Is it alive?");
return -EINVAL;
}
GOTO(out, rc = -ENOTTY);
}
out:
- module_put(THIS_MODULE);
+ cfs_module_put(THIS_MODULE);
return rc;
}
if (KEY_IS(KEY_INIT_RECOV)) {
if (vallen != sizeof(int))
RETURN(-EINVAL);
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_initial_recov = *(int *)val;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
CDEBUG(D_HA, "%s: set imp_initial_recov = %d\n",
exp->exp_obd->obd_name, imp->imp_initial_recov);
RETURN(0);
if (vallen != sizeof(int))
RETURN(-EINVAL);
value = *(int *)val;
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_initial_recov_bk = value > 0;
/* Even after the initial connection, give up all comms if
nobody answers the first time. */
imp->imp_recon_bk = 1;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
CDEBUG(D_MGC, "InitRecov %s %d/%d:d%d:i%d:r%d:or%d:%s\n",
imp->imp_obd->obd_name, value, imp->imp_initial_recov,
imp->imp_deactive, imp->imp_invalid,
sounds like badness. It actually might be fine, as long as
we're not trying to update from the same log
simultaneously (in which case we should use a per-log sem.) */
- down(&llog_process_lock);
+ cfs_down(&llog_process_lock);
if (cld->cld_stopping) {
- up(&llog_process_lock);
+ cfs_up(&llog_process_lock);
RETURN(0);
}
ctxt = llog_get_context(mgc, LLOG_CONFIG_REPL_CTXT);
if (!ctxt) {
CERROR("missing llog context\n");
- up(&llog_process_lock);
+ cfs_up(&llog_process_lock);
RETURN(-EINVAL);
}
CDEBUG(D_MGC, "%s: configuration from log '%s' %sed (%d).\n",
mgc->obd_name, cld->cld_logname, rc ? "fail" : "succeed", rc);
- up(&llog_process_lock);
+ cfs_up(&llog_process_lock);
RETURN(rc);
}
{
struct obd_device *obd = seq->private;
struct mgs_obd *mgs = &obd->u.mgs;
- struct list_head dentry_list;
+ cfs_list_t dentry_list;
struct l_linux_dirent *dirent, *n;
int rc, len;
ENTRY;
CERROR("Can't read config dir\n");
RETURN(rc);
}
- list_for_each_entry_safe(dirent, n, &dentry_list, lld_list) {
- list_del(&dirent->lld_list);
+ cfs_list_for_each_entry_safe(dirent, n, &dentry_list, lld_list) {
+ cfs_list_del(&dirent->lld_list);
len = strlen(dirent->lld_name);
if ((len > 7) && (strncmp(dirent->lld_name + len - 7, "-client",
len) == 0)) {
if (rc)
return rc;
- down(&fsdb->fsdb_sem);
+ cfs_down(&fsdb->fsdb_sem);
seq_show_srpc_rules(seq, fsdb->fsdb_name, &fsdb->fsdb_srpc_gen);
- up(&fsdb->fsdb_sem);
+ cfs_up(&fsdb->fsdb_sem);
return 0;
}
struct mgs_tgt_srpc_conf *srpc_tgt;
int i;
- down(&fsdb->fsdb_sem);
+ cfs_down(&fsdb->fsdb_sem);
seq_printf(seq, "fsname: %s\n", fsdb->fsdb_name);
seq_printf(seq, "flags: %#x gen: %d\n",
fsdb->fsdb_flags, fsdb->fsdb_gen);
for (i = 0; i < INDEX_MAP_SIZE * 8; i++)
- if (test_bit(i, fsdb->fsdb_mdt_index_map))
+ if (cfs_test_bit(i, fsdb->fsdb_mdt_index_map))
seq_printf(seq, "%s-MDT%04x\n", fsdb->fsdb_name, i);
for (i = 0; i < INDEX_MAP_SIZE * 8; i++)
- if (test_bit(i, fsdb->fsdb_ost_index_map))
+ if (cfs_test_bit(i, fsdb->fsdb_ost_index_map))
seq_printf(seq, "%s-OST%04x\n", fsdb->fsdb_name, i);
seq_printf(seq, "\nSecure RPC Config Rules:\n");
}
seq_show_srpc_rules(seq, fsdb->fsdb_name, &fsdb->fsdb_srpc_gen);
- up(&fsdb->fsdb_sem);
+ cfs_up(&fsdb->fsdb_sem);
return 0;
}
/* we didn't find the right inode.. */
CDEBUG(D_INODE, "found wrong generation: inode %lu, link: %lu, "
"count: %d, generation %u/%u\n", inode->i_ino,
- (unsigned long)inode->i_nlink, atomic_read(&inode->i_count),
+ (unsigned long)inode->i_nlink,
+ atomic_read(&inode->i_count),
inode->i_generation, gen);
l_dput(result);
RETURN(ERR_PTR(-ENOENT));
ENTRY;
/* FIXME what's this? Do I need it? */
- rc = cleanup_group_info();
+ rc = cfs_cleanup_group_info();
if (rc)
RETURN(rc);
/* Internal mgs setup */
mgs_init_fsdb_list(obd);
- sema_init(&mgs->mgs_sem, 1);
+ cfs_sema_init(&mgs->mgs_sem, 1);
/* Setup proc */
lprocfs_mgs_init_vars(&lvars);
if (rc)
return rc;
- down(&fsdb->fsdb_sem);
+ cfs_down(&fsdb->fsdb_sem);
if (sptlrpc_rule_set_choose(&fsdb->fsdb_srpc_gen,
LUSTRE_SP_MGC, LUSTRE_SP_MGS,
req->rq_peer.nid,
/* by defualt allow any flavors */
flvr.sf_rpc = SPTLRPC_FLVR_ANY;
}
- up(&fsdb->fsdb_sem);
+ cfs_up(&fsdb->fsdb_sem);
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_sp_peer = req->rq_sp_from;
exp->exp_flvr = flvr;
rc = -EACCES;
}
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
} else {
if (exp->exp_sp_peer != req->rq_sp_from) {
CERROR("RPC source %s doesn't match %s\n",
static inline int mgs_init_export(struct obd_export *exp)
{
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_connecting = 1;
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
return ldlm_init_export(exp);
}
if (lcfg == NULL)
GOTO(out_pool, rc = -ENOMEM);
- if (copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1))
+ if (cfs_copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1))
GOTO(out_pool, rc = -EFAULT);
if (lcfg->lcfg_bufcount < 2) {
OBD_ALLOC(lcfg, data->ioc_plen1);
if (lcfg == NULL)
RETURN(-ENOMEM);
- if (copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1))
+ if (cfs_copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1))
GOTO(out_free, rc = -EFAULT);
if (lcfg->lcfg_bufcount < 1)
/* mgs_llog.c */
int class_dentry_readdir(struct obd_device *obd, struct dentry *dir,
struct vfsmount *inmnt,
- struct list_head *dentry_list);
+ cfs_list_t *dentry_list);
#define MGSSELF_NAME "_mgs"
struct fs_db {
char fsdb_name[9];
- struct list_head fsdb_list; /* list of databases */
- struct semaphore fsdb_sem;
+ cfs_list_t fsdb_list; /* list of databases */
+ cfs_semaphore_t fsdb_sem;
void *fsdb_ost_index_map; /* bitmap of used indicies */
void *fsdb_mdt_index_map; /* bitmap of used indicies */
/* COMPAT_146 these items must be recorded out of the old client log */
- char *fsdb_clilov; /* COMPAT_146 client lov name */
+ char *fsdb_clilov; /* COMPAT_146 client lov name */
char *fsdb_clilmv;
- char *fsdb_mdtlov; /* COMPAT_146 mds lov name */
+ char *fsdb_mdtlov; /* COMPAT_146 mds lov name */
char *fsdb_mdtlmv;
- char *fsdb_mdc; /* COMPAT_146 mdc name */
+ char *fsdb_mdc; /* COMPAT_146 mdc name */
/* end COMPAT_146 */
__u32 fsdb_flags;
__u32 fsdb_gen;
/* Caller must list_del and OBD_FREE each dentry from the list */
int class_dentry_readdir(struct obd_device *obd, struct dentry *dir,
struct vfsmount *inmnt,
- struct list_head *dentry_list){
+ cfs_list_t *dentry_list){
/* see mds_cleanup_pending */
struct lvfs_run_ctxt saved;
struct file *file;
CDEBUG(D_MGS, "OST index for %s is %u (%s)\n",
lustre_cfg_string(lcfg, 1), index,
lustre_cfg_string(lcfg, 2));
- set_bit(index, fsdb->fsdb_ost_index_map);
+ cfs_set_bit(index, fsdb->fsdb_ost_index_map);
}
/* Figure out mdt indicies */
}
rc = 0;
CDEBUG(D_MGS, "MDT index is %u\n", index);
- set_bit(index, fsdb->fsdb_mdt_index_map);
+ cfs_set_bit(index, fsdb->fsdb_mdt_index_map);
}
/* COMPAT_146 */
ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
LASSERT(ctxt != NULL);
name_create(&logname, fsdb->fsdb_name, "-client");
- down(&fsdb->fsdb_sem);
+ cfs_down(&fsdb->fsdb_sem);
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
rc = llog_create(ctxt, &loghandle, NULL, logname);
if (rc)
rc = rc2;
out_pop:
pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- up(&fsdb->fsdb_sem);
+ cfs_up(&fsdb->fsdb_sem);
name_destroy(&logname);
llog_ctxt_put(ctxt);
{
struct mgs_obd *mgs = &obd->u.mgs;
struct fs_db *fsdb;
- struct list_head *tmp;
+ cfs_list_t *tmp;
- list_for_each(tmp, &mgs->mgs_fs_db_list) {
- fsdb = list_entry(tmp, struct fs_db, fsdb_list);
+ cfs_list_for_each(tmp, &mgs->mgs_fs_db_list) {
+ fsdb = cfs_list_entry(tmp, struct fs_db, fsdb_list);
if (strcmp(fsdb->fsdb_name, fsname) == 0)
return fsdb;
}
RETURN(NULL);
strcpy(fsdb->fsdb_name, fsname);
- sema_init(&fsdb->fsdb_sem, 1);
+ cfs_sema_init(&fsdb->fsdb_sem, 1);
fsdb->fsdb_fl_udesc = 1;
if (strcmp(fsname, MGSSELF_NAME) == 0) {
lproc_mgs_add_live(obd, fsdb);
}
- list_add(&fsdb->fsdb_list, &mgs->mgs_fs_db_list);
+ cfs_list_add(&fsdb->fsdb_list, &mgs->mgs_fs_db_list);
RETURN(fsdb);
err:
static void mgs_free_fsdb(struct obd_device *obd, struct fs_db *fsdb)
{
/* wait for anyone with the sem */
- down(&fsdb->fsdb_sem);
+ cfs_down(&fsdb->fsdb_sem);
lproc_mgs_del_live(obd, fsdb);
- list_del(&fsdb->fsdb_list);
+ cfs_list_del(&fsdb->fsdb_list);
if (fsdb->fsdb_ost_index_map)
OBD_FREE(fsdb->fsdb_ost_index_map, INDEX_MAP_SIZE);
if (fsdb->fsdb_mdt_index_map)
{
struct mgs_obd *mgs = &obd->u.mgs;
struct fs_db *fsdb;
- struct list_head *tmp, *tmp2;
- down(&mgs->mgs_sem);
- list_for_each_safe(tmp, tmp2, &mgs->mgs_fs_db_list) {
- fsdb = list_entry(tmp, struct fs_db, fsdb_list);
+ cfs_list_t *tmp, *tmp2;
+ cfs_down(&mgs->mgs_sem);
+ cfs_list_for_each_safe(tmp, tmp2, &mgs->mgs_fs_db_list) {
+ fsdb = cfs_list_entry(tmp, struct fs_db, fsdb_list);
mgs_free_fsdb(obd, fsdb);
}
- up(&mgs->mgs_sem);
+ cfs_up(&mgs->mgs_sem);
return 0;
}
struct fs_db *fsdb;
int rc = 0;
- down(&mgs->mgs_sem);
+ cfs_down(&mgs->mgs_sem);
fsdb = mgs_find_fsdb(obd, name);
if (fsdb) {
- up(&mgs->mgs_sem);
+ cfs_up(&mgs->mgs_sem);
*dbh = fsdb;
return 0;
}
CDEBUG(D_MGS, "Creating new db\n");
fsdb = mgs_new_fsdb(obd, name);
- up(&mgs->mgs_sem);
+ cfs_up(&mgs->mgs_sem);
if (!fsdb)
return -ENOMEM;
else
RETURN(-EINVAL);
- if (test_bit(mti->mti_stripe_index, imap))
+ if (cfs_test_bit(mti->mti_stripe_index, imap))
RETURN(1);
RETURN(0);
}
{
int i;
for (i = 0; i < map_len * 8; i++)
- if (!test_bit(i, index_map)) {
+ if (!cfs_test_bit(i, index_map)) {
return i;
}
CERROR("max index %d exceeded.\n", i);
RETURN(-ERANGE);
}
- if (test_bit(mti->mti_stripe_index, imap)) {
+ if (cfs_test_bit(mti->mti_stripe_index, imap)) {
if ((mti->mti_flags & LDD_F_VIRGIN) &&
!(mti->mti_flags & LDD_F_WRITECONF)) {
LCONSOLE_ERROR_MSG(0x140, "Server %s requested index "
}
}
- set_bit(mti->mti_stripe_index, imap);
+ cfs_set_bit(mti->mti_stripe_index, imap);
fsdb->fsdb_flags &= ~FSDB_LOG_EMPTY;
server_make_name(mti->mti_flags, mti->mti_stripe_index,
mti->mti_fsname, mti->mti_svname);
char *devname, char *comment)
{
struct mgs_obd *mgs = &obd->u.mgs;
- struct list_head dentry_list;
+ cfs_list_t dentry_list;
struct l_linux_dirent *dirent, *n;
char *fsname = mti->mti_fsname;
char *logname;
}
/* Could use fsdb index maps instead of directory listing */
- list_for_each_entry_safe(dirent, n, &dentry_list, lld_list) {
- list_del(&dirent->lld_list);
+ cfs_list_for_each_entry_safe(dirent, n, &dentry_list, lld_list) {
+ cfs_list_del(&dirent->lld_list);
/* don't write to sptlrpc rule log */
if (strncmp(fsname, dirent->lld_name, len) == 0 &&
strstr(dirent->lld_name, "-sptlrpc") == NULL) {
for (i = 0; i < INDEX_MAP_SIZE * 8; i++){
char *mdtname;
if (i != mti->mti_stripe_index &&
- test_bit(i, fsdb->fsdb_mdt_index_map)) {
+ cfs_test_bit(i, fsdb->fsdb_mdt_index_map)) {
name_create_mdt(&mdtname, mti->mti_fsname, i);
rc = mgs_write_log_mdc_to_mdt(obd, fsdb, mti, mdtname);
name_destroy(&mdtname);
/* Add ost to all MDT lov defs */
for (i = 0; i < INDEX_MAP_SIZE * 8; i++){
- if (test_bit(i, fsdb->fsdb_mdt_index_map)) {
+ if (cfs_test_bit(i, fsdb->fsdb_mdt_index_map)) {
char mdt_index[9];
name_create_mdt_and_lov(&logname, &lovname, fsdb, i);
int i;
for (i = 0; i < INDEX_MAP_SIZE * 8; i++) {
- if (!test_bit(i, fsdb->fsdb_mdt_index_map))
+ if (!cfs_test_bit(i, fsdb->fsdb_mdt_index_map))
continue;
name_create_mdt(&logname, mti->mti_fsname, i);
name_create_mdt_osc(&cliname, mti->mti_svname, fsdb, i);
/* Modify mdtlov */
/* Add to all MDT logs for CMD */
for (i = 0; i < INDEX_MAP_SIZE * 8; i++) {
- if (!test_bit(i, fsdb->fsdb_mdt_index_map))
+ if (!cfs_test_bit(i, fsdb->fsdb_mdt_index_map))
continue;
name_create_mdt(&logname, mti->mti_fsname, i);
rc = mgs_modify(obd, fsdb, mti, logname,
int i;
for (i = 0; i < INDEX_MAP_SIZE * 8; i++){
- if (!test_bit(i, fsdb->fsdb_mdt_index_map))
+ if (!cfs_test_bit(i, fsdb->fsdb_mdt_index_map))
continue;
name_destroy(&cname);
name_create_mdt_osc(&cname, mti->mti_svname,
goto active_err;
if (rc & LDD_F_SV_ALL) {
for (i = 0; i < INDEX_MAP_SIZE * 8; i++) {
- if (!test_bit(i,
- fsdb->fsdb_mdt_index_map))
+ if (!cfs_test_bit(i,
+ fsdb->fsdb_mdt_index_map))
continue;
name_create_mdt(&logname, mti->mti_fsname, i);
rc = mgs_wlp_lcfg(obd, fsdb, mti,
the failover list. Modify mti->params for rewriting back at
server_register_target(). */
- down(&fsdb->fsdb_sem);
+ cfs_down(&fsdb->fsdb_sem);
rc = mgs_write_log_add_failnid(obd, fsdb, mti);
- up(&fsdb->fsdb_sem);
+ cfs_up(&fsdb->fsdb_sem);
RETURN(rc);
#endif
RETURN(rc);
}
- down(&fsdb->fsdb_sem);
+ cfs_down(&fsdb->fsdb_sem);
if (mti->mti_flags &
(LDD_F_VIRGIN | LDD_F_UPGRADE14 | LDD_F_WRITECONF)) {
OBD_FREE(buf, strlen(mti->mti_params) + 1);
out_up:
- up(&fsdb->fsdb_sem);
+ cfs_up(&fsdb->fsdb_sem);
RETURN(rc);
}
{
struct mgs_obd *mgs = &obd->u.mgs;
static struct fs_db *fsdb;
- struct list_head dentry_list;
+ cfs_list_t dentry_list;
struct l_linux_dirent *dirent, *n;
int rc, len = strlen(fsname);
ENTRY;
RETURN(rc);
}
- down(&mgs->mgs_sem);
+ cfs_down(&mgs->mgs_sem);
/* Delete the fs db */
fsdb = mgs_find_fsdb(obd, fsname);
if (fsdb)
mgs_free_fsdb(obd, fsdb);
- list_for_each_entry_safe(dirent, n, &dentry_list, lld_list) {
- list_del(&dirent->lld_list);
+ cfs_list_for_each_entry_safe(dirent, n, &dentry_list, lld_list) {
+ cfs_list_del(&dirent->lld_list);
if (strncmp(fsname, dirent->lld_name, len) == 0) {
CDEBUG(D_MGS, "Removing log %s\n", dirent->lld_name);
mgs_erase_log(obd, dirent->lld_name);
OBD_FREE(dirent, sizeof(*dirent));
}
- up(&mgs->mgs_sem);
+ cfs_up(&mgs->mgs_sem);
RETURN(rc);
}
mti->mti_flags = rc | LDD_F_PARAM;
- down(&fsdb->fsdb_sem);
+ cfs_down(&fsdb->fsdb_sem);
/* this is lctl conf_param's single param path, there is not
need to loop through parameters */
rc = mgs_write_log_param(obd, fsdb, mti, mti->mti_params);
- up(&fsdb->fsdb_sem);
+ cfs_up(&fsdb->fsdb_sem);
out:
OBD_FREE_PTR(mti);
}
}
- down(&fsdb->fsdb_sem);
+ cfs_down(&fsdb->fsdb_sem);
if (canceled_label != NULL) {
OBD_ALLOC_PTR(mti);
/* write pool def to all MDT logs */
for (i = 0; i < INDEX_MAP_SIZE * 8; i++) {
- if (test_bit(i, fsdb->fsdb_mdt_index_map)) {
+ if (cfs_test_bit(i, fsdb->fsdb_mdt_index_map)) {
name_create_mdt_and_lov(&logname, &lovname, fsdb, i);
if (canceled_label != NULL) {
cmd, fsname, poolname, ostname, label);
name_destroy(&logname);
- up(&fsdb->fsdb_sem);
+ cfs_up(&fsdb->fsdb_sem);
EXIT;
out:
#ifdef __KERNEL__
/* lock for capa hash/capa_list/fo_capa_keys */
-spinlock_t capa_lock = SPIN_LOCK_UNLOCKED;
+cfs_spinlock_t capa_lock = CFS_SPIN_LOCK_UNLOCKED;
-struct list_head capa_list[CAPA_SITE_MAX];
+cfs_list_t capa_list[CAPA_SITE_MAX];
static struct capa_hmac_alg capa_hmac_algs[] = {
DEF_CAPA_HMAC_ALG("sha1", SHA1, 20, 20),
EXPORT_SYMBOL(capa_lock);
EXPORT_SYMBOL(capa_count);
-struct hlist_head *init_capa_hash(void)
+cfs_hlist_head_t *init_capa_hash(void)
{
- struct hlist_head *hash;
+ cfs_hlist_head_t *hash;
int nr_hash, i;
OBD_ALLOC(hash, CFS_PAGE_SIZE);
if (!hash)
return NULL;
- nr_hash = CFS_PAGE_SIZE / sizeof(struct hlist_head);
+ nr_hash = CFS_PAGE_SIZE / sizeof(cfs_hlist_head_t);
LASSERT(nr_hash > NR_CAPAHASH);
for (i = 0; i < NR_CAPAHASH; i++)
- INIT_HLIST_HEAD(hash + i);
+ CFS_INIT_HLIST_HEAD(hash + i);
return hash;
}
static inline void capa_delete(struct obd_capa *ocapa)
{
LASSERT(capa_on_server(ocapa));
- hlist_del_init(&ocapa->u.tgt.c_hash);
- list_del_init(&ocapa->c_list);
+ cfs_hlist_del_init(&ocapa->u.tgt.c_hash);
+ cfs_list_del_init(&ocapa->c_list);
capa_count[ocapa->c_site]--;
/* release the ref when alloc */
capa_put(ocapa);
}
-void cleanup_capa_hash(struct hlist_head *hash)
+void cleanup_capa_hash(cfs_hlist_head_t *hash)
{
int i;
- struct hlist_node *pos, *next;
+ cfs_hlist_node_t *pos, *next;
struct obd_capa *oc;
- spin_lock(&capa_lock);
+ cfs_spin_lock(&capa_lock);
for (i = 0; i < NR_CAPAHASH; i++) {
- hlist_for_each_entry_safe(oc, pos, next, hash + i, u.tgt.c_hash)
+ cfs_hlist_for_each_entry_safe(oc, pos, next, hash + i,
+ u.tgt.c_hash)
capa_delete(oc);
}
- spin_unlock(&capa_lock);
+ cfs_spin_unlock(&capa_lock);
OBD_FREE(hash, CFS_PAGE_SIZE);
}
}
static struct obd_capa *find_capa(struct lustre_capa *capa,
- struct hlist_head *head, int alive)
+ cfs_hlist_head_t *head, int alive)
{
- struct hlist_node *pos;
+ cfs_hlist_node_t *pos;
struct obd_capa *ocapa;
int len = alive ? offsetof(struct lustre_capa, lc_keyid):sizeof(*capa);
- hlist_for_each_entry(ocapa, pos, head, u.tgt.c_hash) {
+ cfs_hlist_for_each_entry(ocapa, pos, head, u.tgt.c_hash) {
if (memcmp(&ocapa->c_capa, capa, len))
continue;
/* don't return one that will expire soon in this case */
}
#define LRU_CAPA_DELETE_COUNT 12
-static inline void capa_delete_lru(struct list_head *head)
+static inline void capa_delete_lru(cfs_list_t *head)
{
struct obd_capa *ocapa;
- struct list_head *node = head->next;
+ cfs_list_t *node = head->next;
int count = 0;
/* free LRU_CAPA_DELETE_COUNT unused capa from head */
while (count++ < LRU_CAPA_DELETE_COUNT) {
- ocapa = list_entry(node, struct obd_capa, c_list);
+ ocapa = cfs_list_entry(node, struct obd_capa, c_list);
node = node->next;
- if (atomic_read(&ocapa->c_refc))
+ if (cfs_atomic_read(&ocapa->c_refc))
continue;
DEBUG_CAPA(D_SEC, &ocapa->c_capa, "free lru");
}
/* add or update */
-struct obd_capa *capa_add(struct hlist_head *hash, struct lustre_capa *capa)
+struct obd_capa *capa_add(cfs_hlist_head_t *hash, struct lustre_capa *capa)
{
- struct hlist_head *head = hash + capa_hashfn(&capa->lc_fid);
+ cfs_hlist_head_t *head = hash + capa_hashfn(&capa->lc_fid);
struct obd_capa *ocapa, *old = NULL;
- struct list_head *list = &capa_list[CAPA_SITE_SERVER];
+ cfs_list_t *list = &capa_list[CAPA_SITE_SERVER];
ocapa = alloc_capa(CAPA_SITE_SERVER);
if (IS_ERR(ocapa))
return NULL;
- spin_lock(&capa_lock);
+ cfs_spin_lock(&capa_lock);
old = find_capa(capa, head, 0);
if (!old) {
ocapa->c_capa = *capa;
set_capa_expiry(ocapa);
- hlist_add_head(&ocapa->u.tgt.c_hash, head);
- list_add_tail(&ocapa->c_list, list);
+ cfs_hlist_add_head(&ocapa->u.tgt.c_hash, head);
+ cfs_list_add_tail(&ocapa->c_list, list);
capa_get(ocapa);
capa_count[CAPA_SITE_SERVER]++;
if (capa_count[CAPA_SITE_SERVER] > CAPA_HASH_SIZE)
capa_delete_lru(list);
- spin_unlock(&capa_lock);
+ cfs_spin_unlock(&capa_lock);
return ocapa;
} else {
capa_get(old);
- spin_unlock(&capa_lock);
+ cfs_spin_unlock(&capa_lock);
capa_put(ocapa);
return old;
}
}
-struct obd_capa *capa_lookup(struct hlist_head *hash, struct lustre_capa *capa,
+struct obd_capa *capa_lookup(cfs_hlist_head_t *hash, struct lustre_capa *capa,
int alive)
{
struct obd_capa *ocapa;
- spin_lock(&capa_lock);
+ cfs_spin_lock(&capa_lock);
ocapa = find_capa(capa, hash + capa_hashfn(&capa->lc_fid), alive);
if (ocapa) {
- list_move_tail(&ocapa->c_list, &capa_list[CAPA_SITE_SERVER]);
+ cfs_list_move_tail(&ocapa->c_list,
+ &capa_list[CAPA_SITE_SERVER]);
capa_get(ocapa);
}
- spin_unlock(&capa_lock);
+ cfs_spin_unlock(&capa_lock);
return ocapa;
}
void capa_cpy(void *capa, struct obd_capa *ocapa)
{
- spin_lock(&ocapa->c_lock);
+ cfs_spin_lock(&ocapa->c_lock);
*(struct lustre_capa *)capa = ocapa->c_capa;
- spin_unlock(&ocapa->c_lock);
+ cfs_spin_unlock(&ocapa->c_lock);
}
EXPORT_SYMBOL(init_capa_hash);
*/
#define cl_io_for_each(slice, io) \
- list_for_each_entry((slice), &io->ci_layers, cis_linkage)
+ cfs_list_for_each_entry((slice), &io->ci_layers, cis_linkage)
#define cl_io_for_each_reverse(slice, io) \
- list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
+ cfs_list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
static inline int cl_io_type_is_valid(enum cl_io_type type)
{
LINVRNT(cl_io_invariant(io));
ENTRY;
- while (!list_empty(&io->ci_layers)) {
+ while (!cfs_list_empty(&io->ci_layers)) {
slice = container_of(io->ci_layers.next, struct cl_io_slice,
cis_linkage);
- list_del_init(&slice->cis_linkage);
+ cfs_list_del_init(&slice->cis_linkage);
if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
/*
done = 1;
prev = NULL;
- list_for_each_entry_safe(curr, temp, &io->ci_lockset.cls_todo,
- cill_linkage) {
+ cfs_list_for_each_entry_safe(curr, temp,
+ &io->ci_lockset.cls_todo,
+ cill_linkage) {
if (prev != NULL) {
switch (cl_lock_descr_cmp(&prev->cill_descr,
&curr->cill_descr)) {
default:
LBUG();
case +1:
- list_move_tail(&curr->cill_linkage,
- &prev->cill_linkage);
+ cfs_list_move_tail(&curr->cill_linkage,
+ &prev->cill_linkage);
done = 0;
continue; /* don't change prev: it's
* still "previous" */
* \retval +ve there is a matching lock in the \a queue
* \retval 0 there are no matching locks in the \a queue
*/
-int cl_queue_match(const struct list_head *queue,
+int cl_queue_match(const cfs_list_t *queue,
const struct cl_lock_descr *need)
{
struct cl_io_lock_link *scan;
ENTRY;
- list_for_each_entry(scan, queue, cill_linkage) {
+ cfs_list_for_each_entry(scan, queue, cill_linkage) {
if (cl_lock_descr_match(&scan->cill_descr, need))
RETURN(+1);
}
lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
if (!IS_ERR(lock)) {
link->cill_lock = lock;
- list_move(&link->cill_linkage, &set->cls_curr);
+ cfs_list_move(&link->cill_linkage, &set->cls_curr);
if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) {
result = cl_wait(env, lock);
if (result == 0)
- list_move(&link->cill_linkage, &set->cls_done);
+ cfs_list_move(&link->cill_linkage,
+ &set->cls_done);
} else
result = 0;
} else
struct cl_lock *lock = link->cill_lock;
ENTRY;
- list_del_init(&link->cill_linkage);
+ cfs_list_del_init(&link->cill_linkage);
if (lock != NULL) {
cl_lock_release(env, lock, "io", io);
link->cill_lock = NULL;
ENTRY;
result = 0;
- list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
+ cfs_list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
if (!cl_lockset_match(set, &link->cill_descr, 0)) {
/* XXX some locking to guarantee that locks aren't
* expanded in between. */
cl_lock_link_fini(env, io, link);
}
if (result == 0) {
- list_for_each_entry_safe(link, temp,
- &set->cls_curr, cill_linkage) {
+ cfs_list_for_each_entry_safe(link, temp,
+ &set->cls_curr, cill_linkage) {
lock = link->cill_lock;
result = cl_wait(env, lock);
if (result == 0)
- list_move(&link->cill_linkage, &set->cls_done);
+ cfs_list_move(&link->cill_linkage,
+ &set->cls_done);
else
break;
}
ENTRY;
set = &io->ci_lockset;
- list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage)
+ cfs_list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage)
cl_lock_link_fini(env, io, link);
- list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage)
+ cfs_list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage)
cl_lock_link_fini(env, io, link);
- list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
+ cfs_list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
cl_unuse(env, link->cill_lock);
cl_lock_link_fini(env, io, link);
}
if (cl_lockset_match(&io->ci_lockset, &link->cill_descr, 1))
result = +1;
else {
- list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
+ cfs_list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
result = 0;
}
RETURN(result);
/*
* If ->cio_submit() failed, no pages were sent.
*/
- LASSERT(ergo(result != 0, list_empty(&queue->c2_qout.pl_pages)));
+ LASSERT(ergo(result != 0, cfs_list_empty(&queue->c2_qout.pl_pages)));
RETURN(result);
}
EXPORT_SYMBOL(cl_io_submit_rw);
rc = cl_sync_io_wait(env, io, &queue->c2_qout,
anchor, timeout);
} else {
- LASSERT(list_empty(&queue->c2_qout.pl_pages));
+ LASSERT(cfs_list_empty(&queue->c2_qout.pl_pages));
cl_page_list_for_each(pg, &queue->c2_qin)
pg->cp_sync_io = NULL;
}
struct cl_object *obj,
const struct cl_io_operations *ops)
{
- struct list_head *linkage = &slice->cis_linkage;
+ cfs_list_t *linkage = &slice->cis_linkage;
LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
- list_empty(linkage));
+ cfs_list_empty(linkage));
ENTRY;
- list_add_tail(linkage, &io->ci_layers);
+ cfs_list_add_tail(linkage, &io->ci_layers);
slice->cis_io = io;
slice->cis_obj = obj;
slice->cis_iop = ops;
LASSERT(page->cp_owner != NULL);
LINVRNT(plist->pl_owner == cfs_current());
- lockdep_off();
- mutex_lock(&page->cp_mutex);
- lockdep_on();
- LASSERT(list_empty(&page->cp_batch));
- list_add_tail(&page->cp_batch, &plist->pl_pages);
+ cfs_lockdep_off();
+ cfs_mutex_lock(&page->cp_mutex);
+ cfs_lockdep_on();
+ LASSERT(cfs_list_empty(&page->cp_batch));
+ cfs_list_add_tail(&page->cp_batch, &plist->pl_pages);
++plist->pl_nr;
page->cp_queue_ref = lu_ref_add(&page->cp_reference, "queue", plist);
cl_page_get(page);
LINVRNT(plist->pl_owner == cfs_current());
ENTRY;
- list_del_init(&page->cp_batch);
- lockdep_off();
- mutex_unlock(&page->cp_mutex);
- lockdep_on();
+ cfs_list_del_init(&page->cp_batch);
+ cfs_lockdep_off();
+ cfs_mutex_unlock(&page->cp_mutex);
+ cfs_lockdep_on();
--plist->pl_nr;
lu_ref_del_at(&page->cp_reference, page->cp_queue_ref, "queue", plist);
cl_page_put(env, page);
LINVRNT(src->pl_owner == cfs_current());
ENTRY;
- list_move_tail(&page->cp_batch, &dst->pl_pages);
+ cfs_list_move_tail(&page->cp_batch, &dst->pl_pages);
--src->pl_nr;
++dst->pl_nr;
lu_ref_set_at(&page->cp_reference,
cl_page_list_for_each_safe(page, temp, plist) {
LASSERT(plist->pl_nr > 0);
- list_del_init(&page->cp_batch);
- lockdep_off();
- mutex_unlock(&page->cp_mutex);
- lockdep_on();
+ cfs_list_del_init(&page->cp_batch);
+ cfs_lockdep_off();
+ cfs_mutex_unlock(&page->cp_mutex);
+ cfs_lockdep_on();
--plist->pl_nr;
/*
* cl_page_disown0 rather than usual cl_page_disown() is used,
const struct cl_req_operations *ops)
{
ENTRY;
- list_add_tail(&slice->crs_linkage, &req->crq_layers);
+ cfs_list_add_tail(&slice->crs_linkage, &req->crq_layers);
slice->crs_dev = dev;
slice->crs_ops = ops;
slice->crs_req = req;
{
unsigned i;
- LASSERT(list_empty(&req->crq_pages));
+ LASSERT(cfs_list_empty(&req->crq_pages));
LASSERT(req->crq_nrpages == 0);
- LINVRNT(list_empty(&req->crq_layers));
+ LINVRNT(cfs_list_empty(&req->crq_layers));
LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL));
ENTRY;
result = 0;
page = cl_page_top(page);
do {
- list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+ cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
if (dev->cd_ops->cdo_req_init != NULL) {
result = dev->cd_ops->cdo_req_init(env,
/*
* for the lack of list_for_each_entry_reverse_safe()...
*/
- while (!list_empty(&req->crq_layers)) {
- slice = list_entry(req->crq_layers.prev,
- struct cl_req_slice, crs_linkage);
- list_del_init(&slice->crs_linkage);
+ while (!cfs_list_empty(&req->crq_layers)) {
+ slice = cfs_list_entry(req->crq_layers.prev,
+ struct cl_req_slice, crs_linkage);
+ cfs_list_del_init(&slice->crs_linkage);
if (slice->crs_ops->cro_completion != NULL)
slice->crs_ops->cro_completion(env, slice, rc);
}
page = cl_page_top(page);
LINVRNT(cl_page_is_vmlocked(env, page));
- LASSERT(list_empty(&page->cp_flight));
+ LASSERT(cfs_list_empty(&page->cp_flight));
LASSERT(page->cp_req == NULL);
- list_add_tail(&page->cp_flight, &req->crq_pages);
+ cfs_list_add_tail(&page->cp_flight, &req->crq_pages);
++req->crq_nrpages;
page->cp_req = req;
obj = cl_object_top(page->cp_obj);
page = cl_page_top(page);
LINVRNT(cl_page_is_vmlocked(env, page));
- LASSERT(!list_empty(&page->cp_flight));
+ LASSERT(!cfs_list_empty(&page->cp_flight));
LASSERT(req->crq_nrpages > 0);
- list_del_init(&page->cp_flight);
+ cfs_list_del_init(&page->cp_flight);
--req->crq_nrpages;
page->cp_req = NULL;
EXIT;
LASSERT(req->crq_o[i].ro_obj != NULL);
result = 0;
- list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
+ cfs_list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
if (slice->crs_ops->cro_prep != NULL) {
result = slice->crs_ops->cro_prep(env, slice);
if (result != 0)
struct cl_page *page;
int i;
- LASSERT(!list_empty(&req->crq_pages));
+ LASSERT(!cfs_list_empty(&req->crq_pages));
ENTRY;
/* Take any page to use as a model. */
- page = list_entry(req->crq_pages.next, struct cl_page, cp_flight);
+ page = cfs_list_entry(req->crq_pages.next, struct cl_page, cp_flight);
for (i = 0; i < req->crq_nrobjs; ++i) {
- list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
+ cfs_list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
const struct cl_page_slice *scan;
const struct cl_object *obj;
{
ENTRY;
cfs_waitq_init(&anchor->csi_waitq);
- atomic_set(&anchor->csi_sync_nr, nrpages);
+ cfs_atomic_set(&anchor->csi_sync_nr, nrpages);
anchor->csi_sync_rc = 0;
EXIT;
}
LASSERT(timeout >= 0);
rc = l_wait_event(anchor->csi_waitq,
- atomic_read(&anchor->csi_sync_nr) == 0,
+ cfs_atomic_read(&anchor->csi_sync_nr) == 0,
&lwi);
if (rc < 0) {
CERROR("SYNC IO failed with error: %d, try to cancel "
"%d remaining pages\n",
- rc, atomic_read(&anchor->csi_sync_nr));
+ rc, cfs_atomic_read(&anchor->csi_sync_nr));
(void)cl_io_cancel(env, io, queue);
lwi = (struct l_wait_info) { 0 };
(void)l_wait_event(anchor->csi_waitq,
- atomic_read(&anchor->csi_sync_nr) == 0,
+ cfs_atomic_read(&anchor->csi_sync_nr) == 0,
&lwi);
} else {
rc = anchor->csi_sync_rc;
}
- LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
+ LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) == 0);
cl_page_list_assume(env, io, queue);
POISON(anchor, 0x5a, sizeof *anchor);
RETURN(rc);
* ->{prepare,commit}_write(). Completion is used to signal the end of
* IO.
*/
- LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
- if (atomic_dec_and_test(&anchor->csi_sync_nr))
+ LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) > 0);
+ if (cfs_atomic_dec_and_test(&anchor->csi_sync_nr))
cfs_waitq_broadcast(&anchor->csi_waitq);
EXIT;
}
#include "cl_internal.h"
/** Lock class of cl_lock::cll_guard */
-static struct lock_class_key cl_lock_guard_class;
+static cfs_lock_class_key_t cl_lock_guard_class;
static cfs_mem_cache_t *cl_lock_kmem;
static struct lu_kmem_descr cl_lock_caches[] = {
return
cl_is_lock(lock) &&
ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
- atomic_read(&lock->cll_ref) >= lock->cll_holds &&
+ cfs_atomic_read(&lock->cll_ref) >= lock->cll_holds &&
lock->cll_holds >= lock->cll_users &&
lock->cll_holds >= 0 &&
lock->cll_users >= 0 &&
{
int result;
- result = atomic_read(&lock->cll_ref) > 0 &&
+ result = cfs_atomic_read(&lock->cll_ref) > 0 &&
cl_lock_invariant_trusted(env, lock);
if (!result && env != NULL)
CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
CDEBUG(level, "%s: %p@(%i %p %i %d %d %d %d %lx)"
"(%p/%d/%i) at %s():%d\n",
- prefix, lock,
- atomic_read(&lock->cll_ref), lock->cll_guarder, lock->cll_depth,
+ prefix, lock, cfs_atomic_read(&lock->cll_ref),
+ lock->cll_guarder, lock->cll_depth,
lock->cll_state, lock->cll_error, lock->cll_holds,
lock->cll_users, lock->cll_flags,
env, h->coh_nesting, cl_lock_nr_mutexed(env),
#define RETIP ((unsigned long)__builtin_return_address(0))
#ifdef CONFIG_LOCKDEP
-static struct lock_class_key cl_lock_key;
+static cfs_lock_class_key_t cl_lock_key;
static void cl_lock_lockdep_init(struct cl_lock *lock)
{
{
ENTRY;
slice->cls_lock = lock;
- list_add_tail(&slice->cls_linkage, &lock->cll_layers);
+ cfs_list_add_tail(&slice->cls_linkage, &lock->cll_layers);
slice->cls_obj = obj;
slice->cls_ops = ops;
EXIT;
ENTRY;
cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
- might_sleep();
- while (!list_empty(&lock->cll_layers)) {
+ cfs_might_sleep();
+ while (!cfs_list_empty(&lock->cll_layers)) {
struct cl_lock_slice *slice;
- slice = list_entry(lock->cll_layers.next, struct cl_lock_slice,
- cls_linkage);
- list_del_init(lock->cll_layers.next);
+ slice = cfs_list_entry(lock->cll_layers.next,
+ struct cl_lock_slice, cls_linkage);
+ cfs_list_del_init(lock->cll_layers.next);
slice->cls_ops->clo_fini(env, slice);
}
- atomic_dec(&cl_object_site(obj)->cs_locks.cs_total);
- atomic_dec(&cl_object_site(obj)->cs_locks_state[lock->cll_state]);
+ cfs_atomic_dec(&cl_object_site(obj)->cs_locks.cs_total);
+ cfs_atomic_dec(&cl_object_site(obj)->cs_locks_state[lock->cll_state]);
lu_object_ref_del_at(&obj->co_lu, lock->cll_obj_ref, "cl_lock", lock);
cl_object_put(env, obj);
lu_ref_fini(&lock->cll_reference);
lu_ref_fini(&lock->cll_holders);
- mutex_destroy(&lock->cll_guard);
+ cfs_mutex_destroy(&lock->cll_guard);
OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
EXIT;
}
site = cl_object_site(obj);
CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
- atomic_read(&lock->cll_ref), lock, RETIP);
+ cfs_atomic_read(&lock->cll_ref), lock, RETIP);
- if (atomic_dec_and_test(&lock->cll_ref)) {
+ if (cfs_atomic_dec_and_test(&lock->cll_ref)) {
if (lock->cll_state == CLS_FREEING) {
- LASSERT(list_empty(&lock->cll_linkage));
+ LASSERT(cfs_list_empty(&lock->cll_linkage));
cl_lock_free(env, lock);
}
- atomic_dec(&site->cs_locks.cs_busy);
+ cfs_atomic_dec(&site->cs_locks.cs_busy);
}
EXIT;
}
{
LINVRNT(cl_lock_invariant(NULL, lock));
CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
- atomic_read(&lock->cll_ref), lock, RETIP);
- atomic_inc(&lock->cll_ref);
+ cfs_atomic_read(&lock->cll_ref), lock, RETIP);
+ cfs_atomic_inc(&lock->cll_ref);
}
EXPORT_SYMBOL(cl_lock_get);
LASSERT(cl_is_lock(lock));
CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
- atomic_read(&lock->cll_ref), lock, RETIP);
- if (atomic_inc_return(&lock->cll_ref) == 1)
- atomic_inc(&site->cs_locks.cs_busy);
+ cfs_atomic_read(&lock->cll_ref), lock, RETIP);
+ if (cfs_atomic_inc_return(&lock->cll_ref) == 1)
+ cfs_atomic_inc(&site->cs_locks.cs_busy);
}
EXPORT_SYMBOL(cl_lock_get_trust);
ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, CFS_ALLOC_IO);
if (lock != NULL) {
- atomic_set(&lock->cll_ref, 1);
+ cfs_atomic_set(&lock->cll_ref, 1);
lock->cll_descr = *descr;
lock->cll_state = CLS_NEW;
cl_object_get(obj);
CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
lu_ref_init(&lock->cll_reference);
lu_ref_init(&lock->cll_holders);
- mutex_init(&lock->cll_guard);
- lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
+ cfs_mutex_init(&lock->cll_guard);
+ cfs_lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
cfs_waitq_init(&lock->cll_wq);
head = obj->co_lu.lo_header;
- atomic_inc(&site->cs_locks_state[CLS_NEW]);
- atomic_inc(&site->cs_locks.cs_total);
- atomic_inc(&site->cs_locks.cs_created);
+ cfs_atomic_inc(&site->cs_locks_state[CLS_NEW]);
+ cfs_atomic_inc(&site->cs_locks.cs_total);
+ cfs_atomic_inc(&site->cs_locks.cs_created);
cl_lock_lockdep_init(lock);
- list_for_each_entry(obj, &head->loh_layers, co_lu.lo_linkage) {
+ cfs_list_for_each_entry(obj, &head->loh_layers,
+ co_lu.lo_linkage) {
int err;
err = obj->co_ops->coo_lock_init(env, obj, lock, io);
LINVRNT(cl_lock_invariant_trusted(env, lock));
ENTRY;
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_fits_into != NULL &&
!slice->cls_ops->clo_fits_into(env, slice, need, io))
RETURN(0);
head = cl_object_header(obj);
site = cl_object_site(obj);
LINVRNT_SPIN_LOCKED(&head->coh_lock_guard);
- atomic_inc(&site->cs_locks.cs_lookup);
- list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
+ cfs_atomic_inc(&site->cs_locks.cs_lookup);
+ cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
int matched;
LASSERT(cl_is_lock(lock));
matched);
if (matched) {
cl_lock_get_trust(lock);
- atomic_inc(&cl_object_site(obj)->cs_locks.cs_hit);
+ cfs_atomic_inc(&cl_object_site(obj)->cs_locks.cs_hit);
RETURN(lock);
}
}
head = cl_object_header(obj);
site = cl_object_site(obj);
- spin_lock(&head->coh_lock_guard);
+ cfs_spin_lock(&head->coh_lock_guard);
lock = cl_lock_lookup(env, obj, io, need);
- spin_unlock(&head->coh_lock_guard);
+ cfs_spin_unlock(&head->coh_lock_guard);
if (lock == NULL) {
lock = cl_lock_alloc(env, obj, io, need);
if (!IS_ERR(lock)) {
struct cl_lock *ghost;
- spin_lock(&head->coh_lock_guard);
+ cfs_spin_lock(&head->coh_lock_guard);
ghost = cl_lock_lookup(env, obj, io, need);
if (ghost == NULL) {
- list_add_tail(&lock->cll_linkage, &head->coh_locks);
- spin_unlock(&head->coh_lock_guard);
- atomic_inc(&site->cs_locks.cs_busy);
+ cfs_list_add_tail(&lock->cll_linkage,
+ &head->coh_locks);
+ cfs_spin_unlock(&head->coh_lock_guard);
+ cfs_atomic_inc(&site->cs_locks.cs_busy);
} else {
- spin_unlock(&head->coh_lock_guard);
+ cfs_spin_unlock(&head->coh_lock_guard);
/*
* Other threads can acquire references to the
* top-lock through its sub-locks. Hence, it
obj = need->cld_obj;
head = cl_object_header(obj);
- spin_lock(&head->coh_lock_guard);
+ cfs_spin_lock(&head->coh_lock_guard);
lock = cl_lock_lookup(env, obj, io, need);
- spin_unlock(&head->coh_lock_guard);
+ cfs_spin_unlock(&head->coh_lock_guard);
if (lock == NULL)
return NULL;
LINVRNT(cl_lock_invariant_trusted(NULL, lock));
ENTRY;
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
RETURN(slice);
}
info = cl_env_info(env);
for (i = 0; i < hdr->coh_nesting; ++i)
LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
- mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
+ cfs_mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
lock->cll_guarder = cfs_current();
LINVRNT(lock->cll_depth == 0);
}
if (lock->cll_guarder == cfs_current()) {
LINVRNT(lock->cll_depth > 0);
cl_lock_mutex_tail(env, lock);
- } else if (mutex_trylock(&lock->cll_guard)) {
+ } else if (cfs_mutex_trylock(&lock->cll_guard)) {
LINVRNT(lock->cll_depth == 0);
lock->cll_guarder = cfs_current();
cl_lock_mutex_tail(env, lock);
counters->ctc_nr_locks_locked--;
if (--lock->cll_depth == 0) {
lock->cll_guarder = NULL;
- mutex_unlock(&lock->cll_guard);
+ cfs_mutex_unlock(&lock->cll_guard);
}
}
EXPORT_SYMBOL(cl_lock_mutex_put);
const struct cl_lock_slice *slice;
lock->cll_flags |= CLF_CANCELLED;
- list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
+ cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
+ cls_linkage) {
if (slice->cls_ops->clo_cancel != NULL)
slice->cls_ops->clo_cancel(env, slice);
}
head = cl_object_header(lock->cll_descr.cld_obj);
- spin_lock(&head->coh_lock_guard);
- list_del_init(&lock->cll_linkage);
+ cfs_spin_lock(&head->coh_lock_guard);
+ cfs_list_del_init(&lock->cll_linkage);
- spin_unlock(&head->coh_lock_guard);
+ cfs_spin_unlock(&head->coh_lock_guard);
/*
* From now on, no new references to this lock can be acquired
* by cl_lock_lookup().
*/
- list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
+ cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
+ cls_linkage) {
if (slice->cls_ops->clo_delete != NULL)
slice->cls_ops->clo_delete(env, slice);
}
if (result == 0) {
cfs_waitlink_init(&waiter);
cfs_waitq_add(&lock->cll_wq, &waiter);
- set_current_state(CFS_TASK_INTERRUPTIBLE);
+ cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
cl_lock_mutex_put(env, lock);
LASSERT(cl_lock_nr_mutexed(env) == 0);
cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
cl_lock_mutex_get(env, lock);
- set_current_state(CFS_TASK_RUNNING);
+ cfs_set_current_state(CFS_TASK_RUNNING);
cfs_waitq_del(&lock->cll_wq, &waiter);
result = cfs_signal_pending() ? -EINTR : 0;
}
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
+ cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
if (slice->cls_ops->clo_state != NULL)
slice->cls_ops->clo_state(env, slice, state);
cfs_waitq_broadcast(&lock->cll_wq);
lock->cll_state == CLS_INTRANSIT);
if (lock->cll_state != state) {
- atomic_dec(&site->cs_locks_state[lock->cll_state]);
- atomic_inc(&site->cs_locks_state[state]);
+ cfs_atomic_dec(&site->cs_locks_state[lock->cll_state]);
+ cfs_atomic_inc(&site->cs_locks_state[state]);
cl_lock_state_signal(env, lock, state);
lock->cll_state = state;
LASSERT(lock->cll_state == CLS_INTRANSIT);
result = -ENOSYS;
- list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
+ cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
+ cls_linkage) {
if (slice->cls_ops->clo_unuse != NULL) {
result = slice->cls_ops->clo_unuse(env, slice);
if (result != 0)
result = -ENOSYS;
state = cl_lock_intransit(env, lock);
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_use != NULL) {
result = slice->cls_ops->clo_use(env, slice);
if (result != 0)
ENTRY;
result = -ENOSYS;
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_enqueue != NULL) {
result = slice->cls_ops->clo_enqueue(env,
slice, io, flags);
break;
result = -ENOSYS;
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_wait != NULL) {
result = slice->cls_ops->clo_wait(env, slice);
if (result != 0)
LINVRNT(cl_lock_invariant(env, lock));
pound = 0;
- list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
+ cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_weigh != NULL) {
ounce = slice->cls_ops->clo_weigh(env, slice);
pound += ounce;
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
- list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
+ cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_modify != NULL) {
result = slice->cls_ops->clo_modify(env, slice, desc);
if (result != 0)
* now. If locks were indexed according to their extent and/or mode,
* that index would have to be updated here.
*/
- spin_lock(&hdr->coh_lock_guard);
+ cfs_spin_lock(&hdr->coh_lock_guard);
lock->cll_descr = *desc;
- spin_unlock(&hdr->coh_lock_guard);
+ cfs_spin_unlock(&hdr->coh_lock_guard);
RETURN(0);
}
EXPORT_SYMBOL(cl_lock_modify);
result = cl_lock_enclosure(env, lock, closure);
if (result == 0) {
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_closure != NULL) {
result = slice->cls_ops->clo_closure(env, slice,
closure);
* If lock->cll_inclosure is not empty, lock is already in
* this closure.
*/
- if (list_empty(&lock->cll_inclosure)) {
+ if (cfs_list_empty(&lock->cll_inclosure)) {
cl_lock_get_trust(lock);
lu_ref_add(&lock->cll_reference, "closure", closure);
- list_add(&lock->cll_inclosure, &closure->clc_list);
+ cfs_list_add(&lock->cll_inclosure, &closure->clc_list);
closure->clc_nr++;
} else
cl_lock_mutex_put(env, lock);
struct cl_lock *temp;
cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
- list_for_each_entry_safe(scan, temp, &closure->clc_list, cll_inclosure){
- list_del_init(&scan->cll_inclosure);
+ cfs_list_for_each_entry_safe(scan, temp, &closure->clc_list,
+ cll_inclosure){
+ cfs_list_del_init(&scan->cll_inclosure);
cl_lock_mutex_put(env, scan);
lu_ref_del(&scan->cll_reference, "closure", closure);
cl_lock_put(env, scan);
void cl_lock_closure_fini(struct cl_lock_closure *closure)
{
LASSERT(closure->clc_nr == 0);
- LASSERT(list_empty(&closure->clc_list));
+ LASSERT(cfs_list_empty(&closure->clc_list));
}
EXPORT_SYMBOL(cl_lock_closure_fini);
need->cld_start = need->cld_end = page->cp_index;
need->cld_enq_flags = 0;
- spin_lock(&head->coh_lock_guard);
+ cfs_spin_lock(&head->coh_lock_guard);
/* It is fine to match any group lock since there could be only one
* with a uniq gid and it conflicts with all other lock modes too */
- list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
+ cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
if (scan != except &&
(scan->cll_descr.cld_mode == CLM_GROUP ||
cl_lock_ext_match(&scan->cll_descr, need)) &&
break;
}
}
- spin_unlock(&head->coh_lock_guard);
+ cfs_spin_unlock(&head->coh_lock_guard);
RETURN(lock);
}
EXPORT_SYMBOL(cl_lock_at_page);
continue;
descr = &found->cll_descr;
- list_for_each_entry_safe_from(page, temp, &queue->pl_pages,
- cp_batch) {
+ cfs_list_for_each_entry_safe_from(page, temp, &queue->pl_pages,
+ cp_batch) {
idx = page->cp_index;
if (descr->cld_start > idx || descr->cld_end < idx)
break;
LASSERT(ergo(!cancel,
head->coh_tree.rnode == NULL && head->coh_pages == 0));
- spin_lock(&head->coh_lock_guard);
- while (!list_empty(&head->coh_locks)) {
+ cfs_spin_lock(&head->coh_lock_guard);
+ while (!cfs_list_empty(&head->coh_locks)) {
lock = container_of(head->coh_locks.next,
struct cl_lock, cll_linkage);
cl_lock_get_trust(lock);
- spin_unlock(&head->coh_lock_guard);
+ cfs_spin_unlock(&head->coh_lock_guard);
lu_ref_add(&lock->cll_reference, "prune", cfs_current());
cl_lock_mutex_get(env, lock);
if (lock->cll_state < CLS_FREEING) {
cl_lock_mutex_put(env, lock);
lu_ref_del(&lock->cll_reference, "prune", cfs_current());
cl_lock_put(env, lock);
- spin_lock(&head->coh_lock_guard);
+ cfs_spin_lock(&head->coh_lock_guard);
}
- spin_unlock(&head->coh_lock_guard);
+ cfs_spin_unlock(&head->coh_lock_guard);
EXIT;
}
EXPORT_SYMBOL(cl_locks_prune);
{
const struct cl_lock_slice *slice;
(*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
- lock, atomic_read(&lock->cll_ref),
+ lock, cfs_atomic_read(&lock->cll_ref),
lock->cll_state, lock->cll_error, lock->cll_holds,
lock->cll_users, lock->cll_flags);
cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
(*printer)(env, cookie, " {\n");
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
(*printer)(env, cookie, " %s@%p: ",
slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
slice);
static cfs_mem_cache_t *cl_env_kmem;
/** Lock class of cl_object_header::coh_page_guard */
-static struct lock_class_key cl_page_guard_class;
+static cfs_lock_class_key_t cl_page_guard_class;
/** Lock class of cl_object_header::coh_lock_guard */
-static struct lock_class_key cl_lock_guard_class;
+static cfs_lock_class_key_t cl_lock_guard_class;
/** Lock class of cl_object_header::coh_attr_guard */
-static struct lock_class_key cl_attr_guard_class;
+static cfs_lock_class_key_t cl_attr_guard_class;
/**
* Initialize cl_object_header.
ENTRY;
result = lu_object_header_init(&h->coh_lu);
if (result == 0) {
- spin_lock_init(&h->coh_page_guard);
- spin_lock_init(&h->coh_lock_guard);
- spin_lock_init(&h->coh_attr_guard);
- lockdep_set_class(&h->coh_attr_guard, &cl_page_guard_class);
- lockdep_set_class(&h->coh_attr_guard, &cl_lock_guard_class);
- lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
+ cfs_spin_lock_init(&h->coh_page_guard);
+ cfs_spin_lock_init(&h->coh_lock_guard);
+ cfs_spin_lock_init(&h->coh_attr_guard);
+ cfs_lockdep_set_class(&h->coh_attr_guard, &cl_page_guard_class);
+ cfs_lockdep_set_class(&h->coh_attr_guard, &cl_lock_guard_class);
+ cfs_lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
h->coh_pages = 0;
/* XXX hard coded GFP_* mask. */
INIT_RADIX_TREE(&h->coh_tree, GFP_ATOMIC);
*/
void cl_object_header_fini(struct cl_object_header *h)
{
- LASSERT(list_empty(&h->coh_locks));
+ LASSERT(cfs_list_empty(&h->coh_locks));
lu_object_header_fini(&h->coh_lu);
}
EXPORT_SYMBOL(cl_object_header_fini);
struct cl_device *cd, const struct lu_fid *fid,
const struct cl_object_conf *c)
{
- might_sleep();
+ cfs_might_sleep();
return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu));
}
EXPORT_SYMBOL(cl_object_find);
*
* \see cl_attr, cl_object_attr_lock(), cl_object_operations::coo_attr_get().
*/
-static spinlock_t *cl_object_attr_guard(struct cl_object *o)
+static cfs_spinlock_t *cl_object_attr_guard(struct cl_object *o)
{
return &cl_object_header(cl_object_top(o))->coh_attr_guard;
}
*/
void cl_object_attr_lock(struct cl_object *o)
{
- spin_lock(cl_object_attr_guard(o));
+ cfs_spin_lock(cl_object_attr_guard(o));
}
EXPORT_SYMBOL(cl_object_attr_lock);
*/
void cl_object_attr_unlock(struct cl_object *o)
{
- spin_unlock(cl_object_attr_guard(o));
+ cfs_spin_unlock(cl_object_attr_guard(o));
}
EXPORT_SYMBOL(cl_object_attr_unlock);
top = obj->co_lu.lo_header;
result = 0;
- list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
+ cfs_list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
if (obj->co_ops->coo_attr_get != NULL) {
result = obj->co_ops->coo_attr_get(env, obj, attr);
if (result != 0) {
top = obj->co_lu.lo_header;
result = 0;
- list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
+ cfs_list_for_each_entry_reverse(obj, &top->loh_layers,
+ co_lu.lo_linkage) {
if (obj->co_ops->coo_attr_set != NULL) {
result = obj->co_ops->coo_attr_set(env, obj, attr, v);
if (result != 0) {
ENTRY;
top = obj->co_lu.lo_header;
result = 0;
- list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
+ cfs_list_for_each_entry_reverse(obj, &top->loh_layers,
+ co_lu.lo_linkage) {
if (obj->co_ops->coo_glimpse != NULL) {
result = obj->co_ops->coo_glimpse(env, obj, lvb);
if (result != 0)
ENTRY;
top = obj->co_lu.lo_header;
result = 0;
- list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
+ cfs_list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
if (obj->co_ops->coo_conf_set != NULL) {
result = obj->co_ops->coo_conf_set(env, obj, conf);
if (result != 0)
LASSERT(hdr->coh_tree.rnode == NULL);
LASSERT(hdr->coh_pages == 0);
- set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
+ cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
/*
* Destroy all locks. Object destruction (including cl_inode_fini())
* cannot cancel the locks, because in the case of a local client,
struct cl_object_header *head = cl_object_header(obj);
int has;
- spin_lock(&head->coh_lock_guard);
- has = list_empty(&head->coh_locks);
- spin_unlock(&head->coh_lock_guard);
+ cfs_spin_lock(&head->coh_lock_guard);
+ has = cfs_list_empty(&head->coh_locks);
+ cfs_spin_unlock(&head->coh_lock_guard);
return (has == 0);
}
void cache_stats_init(struct cache_stats *cs, const char *name)
{
cs->cs_name = name;
- atomic_set(&cs->cs_lookup, 0);
- atomic_set(&cs->cs_hit, 0);
- atomic_set(&cs->cs_total, 0);
- atomic_set(&cs->cs_busy, 0);
+ cfs_atomic_set(&cs->cs_lookup, 0);
+ cfs_atomic_set(&cs->cs_hit, 0);
+ cfs_atomic_set(&cs->cs_total, 0);
+ cfs_atomic_set(&cs->cs_busy, 0);
}
int cache_stats_print(const struct cache_stats *cs,
nob += snprintf(page + nob, count - nob,
"%5.5s: %6u %6u %6u %6u %6u",
cs->cs_name,
- atomic_read(&cs->cs_lookup),
- atomic_read(&cs->cs_hit),
- atomic_read(&cs->cs_total),
- atomic_read(&cs->cs_busy),
- atomic_read(&cs->cs_created));
+ cfs_atomic_read(&cs->cs_lookup),
+ cfs_atomic_read(&cs->cs_hit),
+ cfs_atomic_read(&cs->cs_total),
+ cfs_atomic_read(&cs->cs_busy),
+ cfs_atomic_read(&cs->cs_created));
return nob;
}
cache_stats_init(&s->cs_pages, "pages");
cache_stats_init(&s->cs_locks, "locks");
for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
- atomic_set(&s->cs_pages_state[0], 0);
+ cfs_atomic_set(&s->cs_pages_state[0], 0);
for (i = 0; i < ARRAY_SIZE(s->cs_locks_state); ++i)
- atomic_set(&s->cs_locks_state[i], 0);
+ cfs_atomic_set(&s->cs_locks_state[i], 0);
}
return result;
}
static struct cache_stats cl_env_stats = {
.cs_name = "envs",
- .cs_created = ATOMIC_INIT(0),
- .cs_lookup = ATOMIC_INIT(0),
- .cs_hit = ATOMIC_INIT(0),
- .cs_total = ATOMIC_INIT(0),
- .cs_busy = ATOMIC_INIT(0)
+ .cs_created = CFS_ATOMIC_INIT(0),
+ .cs_lookup = CFS_ATOMIC_INIT(0),
+ .cs_hit = CFS_ATOMIC_INIT(0),
+ .cs_total = CFS_ATOMIC_INIT(0),
+ .cs_busy = CFS_ATOMIC_INIT(0)
};
/**
for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
nob += snprintf(page + nob, count - nob, "%s: %u ",
pstate[i],
- atomic_read(&site->cs_pages_state[i]));
+ cfs_atomic_read(&site->cs_pages_state[i]));
nob += snprintf(page + nob, count - nob, "]\n");
nob += cache_stats_print(&site->cs_locks, page + nob, count - nob, 0);
nob += snprintf(page + nob, count - nob, " [");
for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i)
nob += snprintf(page + nob, count - nob, "%s: %u ",
lstate[i],
- atomic_read(&site->cs_locks_state[i]));
+ cfs_atomic_read(&site->cs_locks_state[i]));
nob += snprintf(page + nob, count - nob, "]\n");
nob += cache_stats_print(&cl_env_stats, page + nob, count - nob, 0);
nob += snprintf(page + nob, count - nob, "\n");
static unsigned cl_envs_cached_nr = 0;
static unsigned cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit
* for now. */
-static spinlock_t cl_envs_guard = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t cl_envs_guard = CFS_SPIN_LOCK_UNLOCKED;
struct cl_env {
void *ce_magic;
* This allows cl_env to be entered into cl_env_hash which implements
* the current thread -> client environment lookup.
*/
- struct hlist_node ce_node;
+ cfs_hlist_node_t ce_node;
/**
* Owner for the current cl_env, the key for cfs_hash.
* Now current thread pointer is stored.
* Linkage into global list of all client environments. Used for
* garbage collection.
*/
- struct list_head ce_linkage;
+ cfs_list_t ce_linkage;
/*
*
*/
void *ce_debug;
};
-#define CL_ENV_INC(counter) atomic_inc(&cl_env_stats.counter)
+#define CL_ENV_INC(counter) cfs_atomic_inc(&cl_env_stats.counter)
#define CL_ENV_DEC(counter) \
do { \
- LASSERT(atomic_read(&cl_env_stats.counter) > 0); \
- atomic_dec(&cl_env_stats.counter); \
+ LASSERT(cfs_atomic_read(&cl_env_stats.counter) > 0); \
+ cfs_atomic_dec(&cl_env_stats.counter); \
} while (0)
/*****************************************************************************
#endif
}
-static void *cl_env_hops_obj(struct hlist_node *hn)
+static void *cl_env_hops_obj(cfs_hlist_node_t *hn)
{
- struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
+ struct cl_env *cle = cfs_hlist_entry(hn, struct cl_env, ce_node);
LASSERT(cle->ce_magic == &cl_env_init0);
return (void *)cle;
}
-static int cl_env_hops_compare(void *key, struct hlist_node *hn)
+static int cl_env_hops_compare(void *key, cfs_hlist_node_t *hn)
{
struct cl_env *cle = cl_env_hops_obj(hn);
struct lu_env *env;
ENTRY;
- spin_lock(&cl_envs_guard);
- LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
+ cfs_spin_lock(&cl_envs_guard);
+ LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
if (cl_envs_cached_nr > 0) {
int rc;
cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
- list_del_init(&cle->ce_linkage);
+ cfs_list_del_init(&cle->ce_linkage);
cl_envs_cached_nr--;
- spin_unlock(&cl_envs_guard);
+ cfs_spin_unlock(&cl_envs_guard);
env = &cle->ce_lu;
rc = lu_env_refill(env);
env = ERR_PTR(rc);
}
} else {
- spin_unlock(&cl_envs_guard);
+ cfs_spin_unlock(&cl_envs_guard);
env = cl_env_new(0, debug);
}
RETURN(env);
struct cl_env *cle;
ENTRY;
- spin_lock(&cl_envs_guard);
- for (; !list_empty(&cl_envs) && nr > 0; --nr) {
+ cfs_spin_lock(&cl_envs_guard);
+ for (; !cfs_list_empty(&cl_envs) && nr > 0; --nr) {
cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
- list_del_init(&cle->ce_linkage);
+ cfs_list_del_init(&cle->ce_linkage);
LASSERT(cl_envs_cached_nr > 0);
cl_envs_cached_nr--;
- spin_unlock(&cl_envs_guard);
+ cfs_spin_unlock(&cl_envs_guard);
cl_env_fini(cle);
- spin_lock(&cl_envs_guard);
+ cfs_spin_lock(&cl_envs_guard);
}
- LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
- spin_unlock(&cl_envs_guard);
+ LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
+ cfs_spin_unlock(&cl_envs_guard);
RETURN(nr);
}
EXPORT_SYMBOL(cl_env_cache_purge);
if (cl_envs_cached_nr < cl_envs_cached_max &&
(env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
(env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
- spin_lock(&cl_envs_guard);
- list_add(&cle->ce_linkage, &cl_envs);
+ cfs_spin_lock(&cl_envs_guard);
+ cfs_list_add(&cle->ce_linkage, &cl_envs);
cl_envs_cached_nr++;
- spin_unlock(&cl_envs_guard);
+ cfs_spin_unlock(&cl_envs_guard);
} else
cl_env_fini(cle);
}
/*
* Checkless version for trusted users.
*/
- if (atomic_inc_return(&page->cp_ref) == 1)
- atomic_inc(&cl_object_site(page->cp_obj)->cs_pages.cs_busy);
+ if (cfs_atomic_inc_return(&page->cp_ref) == 1)
+ cfs_atomic_inc(&cl_object_site(page->cp_obj)->cs_pages.cs_busy);
}
/**
#ifdef INVARIANT_CHECK
struct cl_object_header *ch = cl_object_header(page->cp_obj);
- if (!atomic_read(&page->cp_ref))
+ if (!cfs_atomic_read(&page->cp_ref))
LASSERT_SPIN_LOCKED(&ch->coh_page_guard);
#endif
ENTRY;
page = cl_page_top_trusted((struct cl_page *)page);
do {
- list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+ cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
RETURN(slice);
}
hdr = cl_object_header(obj);
pvec = cl_env_info(env)->clt_pvec;
dtype = cl_object_top(obj)->co_lu.lo_dev->ld_type;
- spin_lock(&hdr->coh_page_guard);
+ cfs_spin_lock(&hdr->coh_page_guard);
while ((nr = radix_tree_gang_lookup(&hdr->coh_tree, (void **)pvec,
idx, CLT_PVEC_SIZE)) > 0) {
idx = pvec[nr - 1]->cp_index + 1;
* check that pages weren't truncated (cl_page_own() returns
* error in the latter case).
*/
- spin_unlock(&hdr->coh_page_guard);
+ cfs_spin_unlock(&hdr->coh_page_guard);
for (i = 0; i < j; ++i) {
page = pvec[i];
if (page_own(env, io, page) == 0)
"page_list", cfs_current());
cl_page_put(env, page);
}
- spin_lock(&hdr->coh_page_guard);
+ cfs_spin_lock(&hdr->coh_page_guard);
if (nr < CLT_PVEC_SIZE)
break;
}
- spin_unlock(&hdr->coh_page_guard);
+ cfs_spin_unlock(&hdr->coh_page_guard);
EXIT;
}
EXPORT_SYMBOL(cl_page_gang_lookup);
struct cl_site *site = cl_object_site(obj);
PASSERT(env, page, cl_is_page(page));
- PASSERT(env, page, list_empty(&page->cp_batch));
+ PASSERT(env, page, cfs_list_empty(&page->cp_batch));
PASSERT(env, page, page->cp_owner == NULL);
PASSERT(env, page, page->cp_req == NULL);
PASSERT(env, page, page->cp_parent == NULL);
PASSERT(env, page, page->cp_state == CPS_FREEING);
ENTRY;
- might_sleep();
- while (!list_empty(&page->cp_layers)) {
+ cfs_might_sleep();
+ while (!cfs_list_empty(&page->cp_layers)) {
struct cl_page_slice *slice;
- slice = list_entry(page->cp_layers.next, struct cl_page_slice,
- cpl_linkage);
- list_del_init(page->cp_layers.next);
+ slice = cfs_list_entry(page->cp_layers.next,
+ struct cl_page_slice, cpl_linkage);
+ cfs_list_del_init(page->cp_layers.next);
slice->cpl_ops->cpo_fini(env, slice);
}
- atomic_dec(&site->cs_pages.cs_total);
- atomic_dec(&site->cs_pages_state[page->cp_state]);
+ cfs_atomic_dec(&site->cs_pages.cs_total);
+ cfs_atomic_dec(&site->cs_pages_state[page->cp_state]);
lu_object_ref_del_at(&obj->co_lu, page->cp_obj_ref, "cl_page", page);
cl_object_put(env, obj);
lu_ref_fini(&page->cp_reference);
result = +1;
OBD_SLAB_ALLOC_PTR_GFP(page, cl_page_kmem, CFS_ALLOC_IO);
if (page != NULL) {
- atomic_set(&page->cp_ref, 1);
+ cfs_atomic_set(&page->cp_ref, 1);
page->cp_obj = o;
cl_object_get(o);
page->cp_obj_ref = lu_object_ref_add(&o->co_lu,
CFS_INIT_LIST_HEAD(&page->cp_layers);
CFS_INIT_LIST_HEAD(&page->cp_batch);
CFS_INIT_LIST_HEAD(&page->cp_flight);
- mutex_init(&page->cp_mutex);
+ cfs_mutex_init(&page->cp_mutex);
lu_ref_init(&page->cp_reference);
head = o->co_lu.lo_header;
- list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) {
+ cfs_list_for_each_entry(o, &head->loh_layers,
+ co_lu.lo_linkage) {
if (o->co_ops->coo_page_init != NULL) {
err = o->co_ops->coo_page_init(env, o,
page, vmpage);
}
}
if (err == NULL) {
- atomic_inc(&site->cs_pages.cs_busy);
- atomic_inc(&site->cs_pages.cs_total);
- atomic_inc(&site->cs_pages_state[CPS_CACHED]);
- atomic_inc(&site->cs_pages.cs_created);
+ cfs_atomic_inc(&site->cs_pages.cs_busy);
+ cfs_atomic_inc(&site->cs_pages.cs_total);
+ cfs_atomic_inc(&site->cs_pages_state[CPS_CACHED]);
+ cfs_atomic_inc(&site->cs_pages.cs_created);
result = 0;
}
} else
int err;
LINVRNT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
- might_sleep();
+ cfs_might_sleep();
ENTRY;
hdr = cl_object_header(o);
- atomic_inc(&site->cs_pages.cs_lookup);
+ cfs_atomic_inc(&site->cs_pages.cs_lookup);
CDEBUG(D_PAGE, "%lu@"DFID" %p %lu %i\n",
idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
(void *)radix_tree_lookup(&hdr->coh_tree,
idx) == page));
} else {
- spin_lock(&hdr->coh_page_guard);
+ cfs_spin_lock(&hdr->coh_page_guard);
page = cl_page_lookup(hdr, idx);
- spin_unlock(&hdr->coh_page_guard);
+ cfs_spin_unlock(&hdr->coh_page_guard);
}
if (page != NULL) {
- atomic_inc(&site->cs_pages.cs_hit);
+ cfs_atomic_inc(&site->cs_pages.cs_hit);
RETURN(page);
}
* XXX optimization: use radix_tree_preload() here, and change tree
* gfp mask to GFP_KERNEL in cl_object_header_init().
*/
- spin_lock(&hdr->coh_page_guard);
+ cfs_spin_lock(&hdr->coh_page_guard);
err = radix_tree_insert(&hdr->coh_tree, idx, page);
if (err != 0) {
ghost = page;
* transient pages, so it is impossible to
* have conflicting transient pages.
*/
- spin_unlock(&hdr->coh_page_guard);
+ cfs_spin_unlock(&hdr->coh_page_guard);
cl_page_put(env, page);
- spin_lock(&hdr->coh_page_guard);
+ cfs_spin_lock(&hdr->coh_page_guard);
page = ERR_PTR(-EBUSY);
}
}
}
hdr->coh_pages++;
}
- spin_unlock(&hdr->coh_page_guard);
+ cfs_spin_unlock(&hdr->coh_page_guard);
if (unlikely(ghost != NULL)) {
- atomic_dec(&site->cs_pages.cs_busy);
+ cfs_atomic_dec(&site->cs_pages.cs_busy);
cl_page_delete0(env, ghost, 0);
cl_page_free(env, ghost);
}
child = pg->cp_child;
owner = pg->cp_owner;
- return atomic_read(&pg->cp_ref) > 0 &&
+ return cfs_atomic_read(&pg->cp_ref) > 0 &&
ergo(parent != NULL, parent->cp_child == pg) &&
ergo(child != NULL, child->cp_parent == pg) &&
ergo(child != NULL, pg->cp_obj != child->cp_obj) &&
PASSERT(env, page,
equi(state == CPS_OWNED, page->cp_owner != NULL));
- atomic_dec(&site->cs_pages_state[page->cp_state]);
- atomic_inc(&site->cs_pages_state[state]);
+ cfs_atomic_dec(&site->cs_pages_state[page->cp_state]);
+ cfs_atomic_inc(&site->cs_pages_state[state]);
cl_page_state_set_trust(page, state);
}
EXIT;
struct cl_object_header *hdr;
struct cl_site *site = cl_object_site(page->cp_obj);
- PASSERT(env, page, atomic_read(&page->cp_ref) > !!page->cp_parent);
+ PASSERT(env, page, cfs_atomic_read(&page->cp_ref) > !!page->cp_parent);
ENTRY;
- CL_PAGE_HEADER(D_TRACE, env, page, "%i\n", atomic_read(&page->cp_ref));
+ CL_PAGE_HEADER(D_TRACE, env, page, "%i\n",
+ cfs_atomic_read(&page->cp_ref));
hdr = cl_object_header(cl_object_top(page->cp_obj));
- if (atomic_dec_and_lock(&page->cp_ref, &hdr->coh_page_guard)) {
- atomic_dec(&site->cs_pages.cs_busy);
+ if (cfs_atomic_dec_and_lock(&page->cp_ref, &hdr->coh_page_guard)) {
+ cfs_atomic_dec(&site->cs_pages.cs_busy);
/* We're going to access the page w/o a reference, but it's
* ok because we have grabbed the lock coh_page_guard, which
* means nobody is able to free this page behind us.
* inside the coh_page_guard. So that if it gets here,
* it is the REALLY last reference to this page.
*/
- spin_unlock(&hdr->coh_page_guard);
+ cfs_spin_unlock(&hdr->coh_page_guard);
- LASSERT(atomic_read(&page->cp_ref) == 0);
+ LASSERT(cfs_atomic_read(&page->cp_ref) == 0);
PASSERT(env, page, page->cp_owner == NULL);
- PASSERT(env, page, list_empty(&page->cp_batch));
+ PASSERT(env, page, cfs_list_empty(&page->cp_batch));
/*
* Page is no longer reachable by other threads. Tear
* it down.
EXIT;
return;
}
- spin_unlock(&hdr->coh_page_guard);
+ cfs_spin_unlock(&hdr->coh_page_guard);
}
EXIT;
*/
page = cl_page_top(page);
do {
- list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+ cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
if (slice->cpl_ops->cpo_vmpage != NULL)
RETURN(slice->cpl_ops->cpo_vmpage(env, slice));
}
* can be rectified easily.
*/
hdr = cl_object_header(cl_object_top(obj));
- spin_lock(&hdr->coh_page_guard);
+ cfs_spin_lock(&hdr->coh_page_guard);
for (page = (void *)vmpage->private;
page != NULL; page = page->cp_child) {
if (cl_object_same(page->cp_obj, obj)) {
break;
}
}
- spin_unlock(&hdr->coh_page_guard);
+ cfs_spin_unlock(&hdr->coh_page_guard);
LASSERT(ergo(page, cl_is_page(page) && page->cp_type == CPT_CACHEABLE));
RETURN(page);
}
__result = 0; \
__page = cl_page_top(__page); \
do { \
- list_for_each_entry(__scan, &__page->cp_layers, \
- cpl_linkage) { \
+ cfs_list_for_each_entry(__scan, &__page->cp_layers, \
+ cpl_linkage) { \
__method = *(void **)((char *)__scan->cpl_ops + \
__op); \
if (__method != NULL) { \
\
__page = cl_page_top(__page); \
do { \
- list_for_each_entry(__scan, &__page->cp_layers, \
- cpl_linkage) { \
+ cfs_list_for_each_entry(__scan, &__page->cp_layers, \
+ cpl_linkage) { \
__method = *(void **)((char *)__scan->cpl_ops + \
__op); \
if (__method != NULL) \
} while (__page != NULL); \
} while (0)
-#define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \
-do { \
- const struct lu_env *__env = (_env); \
- struct cl_page *__page = (_page); \
- const struct cl_page_slice *__scan; \
- ptrdiff_t __op = (_op); \
- void (*__method)_proto; \
- \
- /* get to the bottom page. */ \
- while (__page->cp_child != NULL) \
- __page = __page->cp_child; \
- do { \
- list_for_each_entry_reverse(__scan, &__page->cp_layers, \
- cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + \
- __op); \
- if (__method != NULL) \
- (*__method)(__env, __scan, \
- ## __VA_ARGS__); \
- } \
- __page = __page->cp_parent; \
- } while (__page != NULL); \
+#define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \
+do { \
+ const struct lu_env *__env = (_env); \
+ struct cl_page *__page = (_page); \
+ const struct cl_page_slice *__scan; \
+ ptrdiff_t __op = (_op); \
+ void (*__method)_proto; \
+ \
+ /* get to the bottom page. */ \
+ while (__page->cp_child != NULL) \
+ __page = __page->cp_child; \
+ do { \
+ cfs_list_for_each_entry_reverse(__scan, &__page->cp_layers, \
+ cpl_linkage) { \
+ __method = *(void **)((char *)__scan->cpl_ops + \
+ __op); \
+ if (__method != NULL) \
+ (*__method)(__env, __scan, \
+ ## __VA_ARGS__); \
+ } \
+ __page = __page->cp_parent; \
+ } while (__page != NULL); \
} while (0)
static int cl_page_invoke(const struct lu_env *env,
struct cl_object_header *hdr;
hdr = cl_object_header(tmp->cp_obj);
- spin_lock(&hdr->coh_page_guard);
+ cfs_spin_lock(&hdr->coh_page_guard);
value = radix_tree_delete(&hdr->coh_tree, tmp->cp_index);
PASSERT(env, tmp, value == tmp);
PASSERT(env, tmp, hdr->coh_pages > 0);
hdr->coh_pages--;
- spin_unlock(&hdr->coh_page_guard);
+ cfs_spin_unlock(&hdr->coh_page_guard);
}
CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_delete),
{
(*printer)(env, cookie,
"page@%p[%d %p:%lu ^%p_%p %d %d %d %p %p %#x]\n",
- pg, atomic_read(&pg->cp_ref), pg->cp_obj,
+ pg, cfs_atomic_read(&pg->cp_ref), pg->cp_obj,
pg->cp_index, pg->cp_parent, pg->cp_child,
pg->cp_state, pg->cp_error, pg->cp_type,
pg->cp_owner, pg->cp_req, pg->cp_flags);
const struct cl_page_operations *ops)
{
ENTRY;
- list_add_tail(&slice->cpl_linkage, &page->cp_layers);
+ cfs_list_add_tail(&slice->cpl_linkage, &page->cp_layers);
slice->cpl_obj = obj;
slice->cpl_ops = ops;
slice->cpl_page = page;
#ifndef __KERNEL__
/* liblustre workaround */
-atomic_t libcfs_kmemory = {0};
+cfs_atomic_t libcfs_kmemory = {0};
#endif
struct obd_device *obd_devs[MAX_OBD_DEVICES];
-struct list_head obd_types;
-spinlock_t obd_dev_lock = SPIN_LOCK_UNLOCKED;
+cfs_list_t obd_types;
+cfs_spinlock_t obd_dev_lock = CFS_SPIN_LOCK_UNLOCKED;
#ifndef __KERNEL__
__u64 obd_max_pages = 0;
int at_early_margin = 5;
int at_extra = 30;
-atomic_t obd_dirty_pages;
-atomic_t obd_dirty_transit_pages;
+cfs_atomic_t obd_dirty_pages;
+cfs_atomic_t obd_dirty_transit_pages;
cfs_waitq_t obd_race_waitq;
int obd_race_state;
OBD_ALLOC(lcfg, data->ioc_plen1);
if (lcfg == NULL)
GOTO(out, err = -ENOMEM);
- err = copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1);
+ err = cfs_copy_from_user(lcfg, data->ioc_pbuf1,
+ data->ioc_plen1);
if (!err)
err = lustre_cfg_sanity_check(lcfg, data->ioc_plen1);
if (!err)
snprintf(str, len - sizeof(*data), "%3d %s %s %s %s %d",
(int)index, status, obd->obd_type->typ_name,
obd->obd_name, obd->obd_uuid.uuid,
- atomic_read(&obd->obd_refcount));
+ cfs_atomic_read(&obd->obd_refcount));
err = obd_ioctl_popdata((void *)arg, data, len);
GOTO(out, err = 0);
#define obd_init_checks() do {} while(0)
#endif
-extern spinlock_t obd_types_lock;
+extern cfs_spinlock_t obd_types_lock;
extern int class_procfs_init(void);
extern int class_procfs_clean(void);
LCONSOLE_INFO(" Lustre Version: "LUSTRE_VERSION_STRING"\n");
LCONSOLE_INFO(" Build Version: "BUILD_VERSION"\n");
- spin_lock_init(&obd_types_lock);
+ cfs_spin_lock_init(&obd_types_lock);
cfs_waitq_init(&obd_race_waitq);
obd_zombie_impexp_init();
#ifdef LPROCFS
if (err)
return err;
- spin_lock_init(&obd_dev_lock);
+ cfs_spin_lock_init(&obd_dev_lock);
CFS_INIT_LIST_HEAD(&obd_types);
err = cfs_psdev_register(&obd_psdev);
/* Default the dirty page cache cap to 1/2 of system memory.
* For clients with less memory, a larger fraction is needed
* for other purposes (mostly for BGL). */
- if (num_physpages <= 512 << (20 - CFS_PAGE_SHIFT))
- obd_max_dirty_pages = num_physpages / 4;
+ if (cfs_num_physpages <= 512 << (20 - CFS_PAGE_SHIFT))
+ obd_max_dirty_pages = cfs_num_physpages / 4;
else
- obd_max_dirty_pages = num_physpages / 2;
+ obd_max_dirty_pages = cfs_num_physpages / 2;
err = obd_init_caches();
if (err)
*/
void dt_txn_callback_add(struct dt_device *dev, struct dt_txn_callback *cb)
{
- list_add(&cb->dtc_linkage, &dev->dd_txn_callbacks);
+ cfs_list_add(&cb->dtc_linkage, &dev->dd_txn_callbacks);
}
EXPORT_SYMBOL(dt_txn_callback_add);
void dt_txn_callback_del(struct dt_device *dev, struct dt_txn_callback *cb)
{
- list_del_init(&cb->dtc_linkage);
+ cfs_list_del_init(&cb->dtc_linkage);
}
EXPORT_SYMBOL(dt_txn_callback_del);
struct dt_txn_callback *cb;
result = 0;
- list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
+ cfs_list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
if (cb->dtc_txn_start == NULL ||
!(cb->dtc_tag & env->le_ctx.lc_tags))
continue;
int result;
result = 0;
- list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
+ cfs_list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
if (cb->dtc_txn_stop == NULL ||
!(cb->dtc_tag & env->le_ctx.lc_tags))
continue;
int result;
result = 0;
- list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
+ cfs_list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
if (cb->dtc_txn_commit == NULL ||
!(cb->dtc_tag & env->le_ctx.lc_tags))
continue;
#include <obd_class.h>
#include <lprocfs_status.h>
-extern struct list_head obd_types;
-spinlock_t obd_types_lock;
+extern cfs_list_t obd_types;
+cfs_spinlock_t obd_types_lock;
cfs_mem_cache_t *obd_device_cachep;
cfs_mem_cache_t *obdo_cachep;
EXPORT_SYMBOL(obdo_cachep);
cfs_mem_cache_t *import_cachep;
-struct list_head obd_zombie_imports;
-struct list_head obd_zombie_exports;
-spinlock_t obd_zombie_impexp_lock;
+cfs_list_t obd_zombie_imports;
+cfs_list_t obd_zombie_exports;
+cfs_spinlock_t obd_zombie_impexp_lock;
static void obd_zombie_impexp_notify(void);
static void obd_zombie_export_add(struct obd_export *exp);
static void obd_zombie_import_add(struct obd_import *imp);
struct obd_type *class_search_type(const char *name)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct obd_type *type;
- spin_lock(&obd_types_lock);
- list_for_each(tmp, &obd_types) {
- type = list_entry(tmp, struct obd_type, typ_chain);
+ cfs_spin_lock(&obd_types_lock);
+ cfs_list_for_each(tmp, &obd_types) {
+ type = cfs_list_entry(tmp, struct obd_type, typ_chain);
if (strcmp(type->typ_name, name) == 0) {
- spin_unlock(&obd_types_lock);
+ cfs_spin_unlock(&obd_types_lock);
return type;
}
}
- spin_unlock(&obd_types_lock);
+ cfs_spin_unlock(&obd_types_lock);
return NULL;
}
#ifdef CONFIG_KMOD
if (!type) {
const char *modname = name;
- if (!request_module("%s", modname)) {
+ if (!cfs_request_module("%s", modname)) {
CDEBUG(D_INFO, "Loaded module '%s'\n", modname);
type = class_search_type(name);
} else {
}
#endif
if (type) {
- spin_lock(&type->obd_type_lock);
+ cfs_spin_lock(&type->obd_type_lock);
type->typ_refcnt++;
- try_module_get(type->typ_dt_ops->o_owner);
- spin_unlock(&type->obd_type_lock);
+ cfs_try_module_get(type->typ_dt_ops->o_owner);
+ cfs_spin_unlock(&type->obd_type_lock);
}
return type;
}
void class_put_type(struct obd_type *type)
{
LASSERT(type);
- spin_lock(&type->obd_type_lock);
+ cfs_spin_lock(&type->obd_type_lock);
type->typ_refcnt--;
- module_put(type->typ_dt_ops->o_owner);
- spin_unlock(&type->obd_type_lock);
+ cfs_module_put(type->typ_dt_ops->o_owner);
+ cfs_spin_unlock(&type->obd_type_lock);
}
#define CLASS_MAX_NAME 1024
if (md_ops)
*(type->typ_md_ops) = *md_ops;
strcpy(type->typ_name, name);
- spin_lock_init(&type->obd_type_lock);
+ cfs_spin_lock_init(&type->obd_type_lock);
#ifdef LPROCFS
type->typ_procroot = lprocfs_register(type->typ_name, proc_lustre_root,
GOTO (failed, rc);
}
- spin_lock(&obd_types_lock);
- list_add(&type->typ_chain, &obd_types);
- spin_unlock(&obd_types_lock);
+ cfs_spin_lock(&obd_types_lock);
+ cfs_list_add(&type->typ_chain, &obd_types);
+ cfs_spin_unlock(&obd_types_lock);
RETURN (0);
if (type->typ_lu)
lu_device_type_fini(type->typ_lu);
- spin_lock(&obd_types_lock);
- list_del(&type->typ_chain);
- spin_unlock(&obd_types_lock);
+ cfs_spin_lock(&obd_types_lock);
+ cfs_list_del(&type->typ_chain);
+ cfs_spin_unlock(&obd_types_lock);
OBD_FREE(type->typ_name, strlen(name) + 1);
if (type->typ_dt_ops != NULL)
OBD_FREE_PTR(type->typ_dt_ops);
}
LASSERT(newdev->obd_magic == OBD_DEVICE_MAGIC);
- spin_lock(&obd_dev_lock);
+ cfs_spin_lock(&obd_dev_lock);
for (i = 0; i < class_devno_max(); i++) {
struct obd_device *obd = class_num2obd(i);
if (obd && obd->obd_name &&
obd_devs[i] = result;
}
}
- spin_unlock(&obd_dev_lock);
+ cfs_spin_unlock(&obd_dev_lock);
if (result == NULL && i >= class_devno_max()) {
CERROR("all %u OBD devices used, increase MAX_OBD_DEVICES\n",
CDEBUG(D_INFO, "Release obd device %s obd_type name =%s\n",
obd->obd_name,obd->obd_type->typ_name);
- spin_lock(&obd_dev_lock);
+ cfs_spin_lock(&obd_dev_lock);
obd_devs[obd->obd_minor] = NULL;
- spin_unlock(&obd_dev_lock);
+ cfs_spin_unlock(&obd_dev_lock);
obd_device_free(obd);
class_put_type(obd_type);
if (!name)
return -1;
- spin_lock(&obd_dev_lock);
+ cfs_spin_lock(&obd_dev_lock);
for (i = 0; i < class_devno_max(); i++) {
struct obd_device *obd = class_num2obd(i);
if (obd && obd->obd_name && strcmp(name, obd->obd_name) == 0) {
out any references */
LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
if (obd->obd_attached) {
- spin_unlock(&obd_dev_lock);
+ cfs_spin_unlock(&obd_dev_lock);
return i;
}
break;
}
}
- spin_unlock(&obd_dev_lock);
+ cfs_spin_unlock(&obd_dev_lock);
return -1;
}
{
int i;
- spin_lock(&obd_dev_lock);
+ cfs_spin_lock(&obd_dev_lock);
for (i = 0; i < class_devno_max(); i++) {
struct obd_device *obd = class_num2obd(i);
if (obd && obd_uuid_equals(uuid, &obd->obd_uuid)) {
LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
- spin_unlock(&obd_dev_lock);
+ cfs_spin_unlock(&obd_dev_lock);
return i;
}
}
- spin_unlock(&obd_dev_lock);
+ cfs_spin_unlock(&obd_dev_lock);
return -1;
}
char *status;
int i;
- spin_lock(&obd_dev_lock);
+ cfs_spin_lock(&obd_dev_lock);
for (i = 0; i < class_devno_max(); i++) {
struct obd_device *obd = class_num2obd(i);
if (obd == NULL)
LCONSOLE(D_CONFIG, "%3d %s %s %s %s %d\n",
i, status, obd->obd_type->typ_name,
obd->obd_name, obd->obd_uuid.uuid,
- atomic_read(&obd->obd_refcount));
+ cfs_atomic_read(&obd->obd_refcount));
}
- spin_unlock(&obd_dev_lock);
+ cfs_spin_unlock(&obd_dev_lock);
return;
}
{
int i;
- spin_lock(&obd_dev_lock);
+ cfs_spin_lock(&obd_dev_lock);
for (i = 0; i < class_devno_max(); i++) {
struct obd_device *obd = class_num2obd(i);
if (obd == NULL)
&obd->u.cli.cl_target_uuid) &&
((grp_uuid)? obd_uuid_equals(grp_uuid,
&obd->obd_uuid) : 1)) {
- spin_unlock(&obd_dev_lock);
+ cfs_spin_unlock(&obd_dev_lock);
return obd;
}
}
}
- spin_unlock(&obd_dev_lock);
+ cfs_spin_unlock(&obd_dev_lock);
return NULL;
}
else
return NULL;
- spin_lock(&obd_dev_lock);
+ cfs_spin_lock(&obd_dev_lock);
for (; i < class_devno_max(); i++) {
struct obd_device *obd = class_num2obd(i);
if (obd == NULL)
if (obd_uuid_equals(grp_uuid, &obd->obd_uuid)) {
if (next != NULL)
*next = i+1;
- spin_unlock(&obd_dev_lock);
+ cfs_spin_unlock(&obd_dev_lock);
return obd;
}
}
- spin_unlock(&obd_dev_lock);
+ cfs_spin_unlock(&obd_dev_lock);
return NULL;
}
LASSERT(namelen > 0);
- spin_lock(&obd_dev_lock);
+ cfs_spin_lock(&obd_dev_lock);
for (i = 0; i < class_devno_max(); i++) {
obd = class_num2obd(i);
continue;
class_incref(obd, __FUNCTION__, obd);
- spin_unlock(&obd_dev_lock);
+ cfs_spin_unlock(&obd_dev_lock);
rc2 = obd_set_info_async(obd->obd_self_export,
sizeof(KEY_SPTLRPC_CONF),
KEY_SPTLRPC_CONF, 0, NULL, NULL);
rc = rc ? rc : rc2;
class_decref(obd, __FUNCTION__, obd);
- spin_lock(&obd_dev_lock);
+ cfs_spin_lock(&obd_dev_lock);
}
- spin_unlock(&obd_dev_lock);
+ cfs_spin_unlock(&obd_dev_lock);
return rc;
}
EXPORT_SYMBOL(class_notify_sptlrpc_conf);
struct obd_device *obd = exp->exp_obd;
ENTRY;
- LASSERT (atomic_read(&exp->exp_refcount) == 0);
+ LASSERT (cfs_atomic_read(&exp->exp_refcount) == 0);
CDEBUG(D_IOCTL, "destroying export %p/%s for %s\n", exp,
exp->exp_client_uuid.uuid, obd->obd_name);
if (exp->exp_connection)
ptlrpc_put_connection_superhack(exp->exp_connection);
- LASSERT(list_empty(&exp->exp_outstanding_replies));
- LASSERT(list_empty(&exp->exp_uncommitted_replies));
- LASSERT(list_empty(&exp->exp_req_replay_queue));
- LASSERT(list_empty(&exp->exp_queued_rpc));
+ LASSERT(cfs_list_empty(&exp->exp_outstanding_replies));
+ LASSERT(cfs_list_empty(&exp->exp_uncommitted_replies));
+ LASSERT(cfs_list_empty(&exp->exp_req_replay_queue));
+ LASSERT(cfs_list_empty(&exp->exp_queued_rpc));
obd_destroy_export(exp);
class_decref(obd, "export", exp);
struct obd_export *class_export_get(struct obd_export *exp)
{
- atomic_inc(&exp->exp_refcount);
+ cfs_atomic_inc(&exp->exp_refcount);
CDEBUG(D_INFO, "GETting export %p : new refcount %d\n", exp,
- atomic_read(&exp->exp_refcount));
+ cfs_atomic_read(&exp->exp_refcount));
return exp;
}
EXPORT_SYMBOL(class_export_get);
{
LASSERT(exp != NULL);
CDEBUG(D_INFO, "PUTting export %p : new refcount %d\n", exp,
- atomic_read(&exp->exp_refcount) - 1);
- LASSERT(atomic_read(&exp->exp_refcount) > 0);
- LASSERT(atomic_read(&exp->exp_refcount) < 0x5a5a5a);
+ cfs_atomic_read(&exp->exp_refcount) - 1);
+ LASSERT(cfs_atomic_read(&exp->exp_refcount) > 0);
+ LASSERT(cfs_atomic_read(&exp->exp_refcount) < 0x5a5a5a);
- if (atomic_dec_and_test(&exp->exp_refcount)) {
- LASSERT(!list_empty(&exp->exp_obd_chain));
+ if (cfs_atomic_dec_and_test(&exp->exp_refcount)) {
+ LASSERT(!cfs_list_empty(&exp->exp_obd_chain));
CDEBUG(D_IOCTL, "final put %p/%s\n",
exp, exp->exp_client_uuid.uuid);
obd_zombie_export_add(exp);
export->exp_conn_cnt = 0;
export->exp_lock_hash = NULL;
- atomic_set(&export->exp_refcount, 2);
- atomic_set(&export->exp_rpc_count, 0);
- atomic_set(&export->exp_cb_count, 0);
- atomic_set(&export->exp_locks_count, 0);
+ cfs_atomic_set(&export->exp_refcount, 2);
+ cfs_atomic_set(&export->exp_rpc_count, 0);
+ cfs_atomic_set(&export->exp_cb_count, 0);
+ cfs_atomic_set(&export->exp_locks_count, 0);
#if LUSTRE_TRACKS_LOCK_EXP_REFS
CFS_INIT_LIST_HEAD(&export->exp_locks_list);
- spin_lock_init(&export->exp_locks_list_guard);
+ cfs_spin_lock_init(&export->exp_locks_list_guard);
#endif
- atomic_set(&export->exp_replay_count, 0);
+ cfs_atomic_set(&export->exp_replay_count, 0);
export->exp_obd = obd;
CFS_INIT_LIST_HEAD(&export->exp_outstanding_replies);
- spin_lock_init(&export->exp_uncommitted_replies_lock);
+ cfs_spin_lock_init(&export->exp_uncommitted_replies_lock);
CFS_INIT_LIST_HEAD(&export->exp_uncommitted_replies);
CFS_INIT_LIST_HEAD(&export->exp_req_replay_queue);
CFS_INIT_LIST_HEAD(&export->exp_handle.h_link);
CFS_INIT_LIST_HEAD(&export->exp_queued_rpc);
class_handle_hash(&export->exp_handle, export_handle_addref);
export->exp_last_request_time = cfs_time_current_sec();
- spin_lock_init(&export->exp_lock);
- INIT_HLIST_NODE(&export->exp_uuid_hash);
- INIT_HLIST_NODE(&export->exp_nid_hash);
+ cfs_spin_lock_init(&export->exp_lock);
+ CFS_INIT_HLIST_NODE(&export->exp_uuid_hash);
+ CFS_INIT_HLIST_NODE(&export->exp_nid_hash);
export->exp_sp_peer = LUSTRE_SP_ANY;
export->exp_flvr.sf_rpc = SPTLRPC_FLVR_INVALID;
export->exp_client_uuid = *cluuid;
obd_init_export(export);
- spin_lock(&obd->obd_dev_lock);
+ cfs_spin_lock(&obd->obd_dev_lock);
/* shouldn't happen, but might race */
if (obd->obd_stopping)
GOTO(exit_err, rc = -ENODEV);
}
class_incref(obd, "export", export);
- list_add(&export->exp_obd_chain, &export->exp_obd->obd_exports);
- list_add_tail(&export->exp_obd_chain_timed,
- &export->exp_obd->obd_exports_timed);
+ cfs_list_add(&export->exp_obd_chain, &export->exp_obd->obd_exports);
+ cfs_list_add_tail(&export->exp_obd_chain_timed,
+ &export->exp_obd->obd_exports_timed);
export->exp_obd->obd_num_exports++;
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
RETURN(export);
exit_err:
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
class_handle_unhash(&export->exp_handle);
- LASSERT(hlist_unhashed(&export->exp_uuid_hash));
+ LASSERT(cfs_hlist_unhashed(&export->exp_uuid_hash));
obd_destroy_export(export);
OBD_FREE_PTR(export);
return ERR_PTR(rc);
{
class_handle_unhash(&exp->exp_handle);
- spin_lock(&exp->exp_obd->obd_dev_lock);
+ cfs_spin_lock(&exp->exp_obd->obd_dev_lock);
/* delete an uuid-export hashitem from hashtables */
- if (!hlist_unhashed(&exp->exp_uuid_hash))
+ if (!cfs_hlist_unhashed(&exp->exp_uuid_hash))
cfs_hash_del(exp->exp_obd->obd_uuid_hash,
&exp->exp_client_uuid,
&exp->exp_uuid_hash);
- list_move(&exp->exp_obd_chain, &exp->exp_obd->obd_unlinked_exports);
- list_del_init(&exp->exp_obd_chain_timed);
+ cfs_list_move(&exp->exp_obd_chain, &exp->exp_obd->obd_unlinked_exports);
+ cfs_list_del_init(&exp->exp_obd_chain_timed);
exp->exp_obd->obd_num_exports--;
- spin_unlock(&exp->exp_obd->obd_dev_lock);
+ cfs_spin_unlock(&exp->exp_obd->obd_dev_lock);
class_export_put(exp);
}
EXPORT_SYMBOL(class_unlink_export);
CDEBUG(D_IOCTL, "destroying import %p for %s\n", imp,
imp->imp_obd->obd_name);
- LASSERT(atomic_read(&imp->imp_refcount) == 0);
+ LASSERT(cfs_atomic_read(&imp->imp_refcount) == 0);
ptlrpc_put_connection_superhack(imp->imp_connection);
- while (!list_empty(&imp->imp_conn_list)) {
+ while (!cfs_list_empty(&imp->imp_conn_list)) {
struct obd_import_conn *imp_conn;
- imp_conn = list_entry(imp->imp_conn_list.next,
- struct obd_import_conn, oic_item);
- list_del_init(&imp_conn->oic_item);
+ imp_conn = cfs_list_entry(imp->imp_conn_list.next,
+ struct obd_import_conn, oic_item);
+ cfs_list_del_init(&imp_conn->oic_item);
ptlrpc_put_connection_superhack(imp_conn->oic_conn);
OBD_FREE(imp_conn, sizeof(*imp_conn));
}
struct obd_import *class_import_get(struct obd_import *import)
{
- LASSERT(atomic_read(&import->imp_refcount) >= 0);
- LASSERT(atomic_read(&import->imp_refcount) < 0x5a5a5a);
- atomic_inc(&import->imp_refcount);
+ LASSERT(cfs_atomic_read(&import->imp_refcount) >= 0);
+ LASSERT(cfs_atomic_read(&import->imp_refcount) < 0x5a5a5a);
+ cfs_atomic_inc(&import->imp_refcount);
CDEBUG(D_INFO, "import %p refcount=%d obd=%s\n", import,
- atomic_read(&import->imp_refcount),
+ cfs_atomic_read(&import->imp_refcount),
import->imp_obd->obd_name);
return import;
}
{
ENTRY;
- LASSERT(atomic_read(&imp->imp_refcount) > 0);
- LASSERT(atomic_read(&imp->imp_refcount) < 0x5a5a5a);
- LASSERT(list_empty(&imp->imp_zombie_chain));
+ LASSERT(cfs_atomic_read(&imp->imp_refcount) > 0);
+ LASSERT(cfs_atomic_read(&imp->imp_refcount) < 0x5a5a5a);
+ LASSERT(cfs_list_empty(&imp->imp_zombie_chain));
CDEBUG(D_INFO, "import %p refcount=%d obd=%s\n", imp,
- atomic_read(&imp->imp_refcount) - 1,
+ cfs_atomic_read(&imp->imp_refcount) - 1,
imp->imp_obd->obd_name);
- if (atomic_dec_and_test(&imp->imp_refcount)) {
+ if (cfs_atomic_dec_and_test(&imp->imp_refcount)) {
CDEBUG(D_INFO, "final put import %p\n", imp);
obd_zombie_import_add(imp);
}
CFS_INIT_LIST_HEAD(&imp->imp_replay_list);
CFS_INIT_LIST_HEAD(&imp->imp_sending_list);
CFS_INIT_LIST_HEAD(&imp->imp_delayed_list);
- spin_lock_init(&imp->imp_lock);
+ cfs_spin_lock_init(&imp->imp_lock);
imp->imp_last_success_conn = 0;
imp->imp_state = LUSTRE_IMP_NEW;
imp->imp_obd = class_incref(obd, "import", imp);
- sema_init(&imp->imp_sec_mutex, 1);
+ cfs_sema_init(&imp->imp_sec_mutex, 1);
cfs_waitq_init(&imp->imp_recovery_waitq);
- atomic_set(&imp->imp_refcount, 2);
- atomic_set(&imp->imp_unregistering, 0);
- atomic_set(&imp->imp_inflight, 0);
- atomic_set(&imp->imp_replay_inflight, 0);
- atomic_set(&imp->imp_inval_count, 0);
+ cfs_atomic_set(&imp->imp_refcount, 2);
+ cfs_atomic_set(&imp->imp_unregistering, 0);
+ cfs_atomic_set(&imp->imp_inflight, 0);
+ cfs_atomic_set(&imp->imp_replay_inflight, 0);
+ cfs_atomic_set(&imp->imp_inval_count, 0);
CFS_INIT_LIST_HEAD(&imp->imp_conn_list);
CFS_INIT_LIST_HEAD(&imp->imp_handle.h_link);
class_handle_hash(&imp->imp_handle, import_handle_addref);
class_handle_unhash(&import->imp_handle);
- spin_lock(&import->imp_lock);
+ cfs_spin_lock(&import->imp_lock);
import->imp_generation++;
- spin_unlock(&import->imp_lock);
+ cfs_spin_unlock(&import->imp_lock);
class_import_put(import);
}
EXPORT_SYMBOL(class_destroy_import);
void __class_export_add_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
{
- spin_lock(&exp->exp_locks_list_guard);
+ cfs_spin_lock(&exp->exp_locks_list_guard);
LASSERT(lock->l_exp_refs_nr >= 0);
exp, lock, lock->l_exp_refs_target);
}
if ((lock->l_exp_refs_nr ++) == 0) {
- list_add(&lock->l_exp_refs_link, &exp->exp_locks_list);
+ cfs_list_add(&lock->l_exp_refs_link, &exp->exp_locks_list);
lock->l_exp_refs_target = exp;
}
CDEBUG(D_INFO, "lock = %p, export = %p, refs = %u\n",
lock, exp, lock->l_exp_refs_nr);
- spin_unlock(&exp->exp_locks_list_guard);
+ cfs_spin_unlock(&exp->exp_locks_list_guard);
}
EXPORT_SYMBOL(__class_export_add_lock_ref);
void __class_export_del_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
{
- spin_lock(&exp->exp_locks_list_guard);
+ cfs_spin_lock(&exp->exp_locks_list_guard);
LASSERT(lock->l_exp_refs_nr > 0);
if (lock->l_exp_refs_target != exp) {
LCONSOLE_WARN("lock %p, "
lock, lock->l_exp_refs_target, exp);
}
if (-- lock->l_exp_refs_nr == 0) {
- list_del_init(&lock->l_exp_refs_link);
+ cfs_list_del_init(&lock->l_exp_refs_link);
lock->l_exp_refs_target = NULL;
}
CDEBUG(D_INFO, "lock = %p, export = %p, refs = %u\n",
lock, exp, lock->l_exp_refs_nr);
- spin_unlock(&exp->exp_locks_list_guard);
+ cfs_spin_unlock(&exp->exp_locks_list_guard);
}
EXPORT_SYMBOL(__class_export_del_lock_ref);
#endif
{
struct obd_device *obd = exp->exp_obd;
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
if (exp->exp_delayed)
obd->obd_delayed_clients--;
if (obd->obd_recovering && exp->exp_in_recovery) {
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_in_recovery = 0;
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
LASSERT(obd->obd_connected_clients);
obd->obd_connected_clients--;
}
/** Cleanup req replay fields */
if (exp->exp_req_replay_needed) {
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_req_replay_needed = 0;
- spin_unlock(&exp->exp_lock);
- LASSERT(atomic_read(&obd->obd_req_replay_clients));
- atomic_dec(&obd->obd_req_replay_clients);
+ cfs_spin_unlock(&exp->exp_lock);
+ LASSERT(cfs_atomic_read(&obd->obd_req_replay_clients));
+ cfs_atomic_dec(&obd->obd_req_replay_clients);
}
/** Cleanup lock replay data */
if (exp->exp_lock_replay_needed) {
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_lock_replay_needed = 0;
- spin_unlock(&exp->exp_lock);
- LASSERT(atomic_read(&obd->obd_lock_replay_clients));
- atomic_dec(&obd->obd_lock_replay_clients);
+ cfs_spin_unlock(&exp->exp_lock);
+ LASSERT(cfs_atomic_read(&obd->obd_lock_replay_clients));
+ cfs_atomic_dec(&obd->obd_lock_replay_clients);
}
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
}
/* This function removes 1-3 references from the export:
RETURN(-EINVAL);
}
- spin_lock(&export->exp_lock);
+ cfs_spin_lock(&export->exp_lock);
already_disconnected = export->exp_disconnected;
export->exp_disconnected = 1;
- spin_unlock(&export->exp_lock);
+ cfs_spin_unlock(&export->exp_lock);
/* class_cleanup(), abort_recovery(), and class_fail_export()
* all end up in here, and if any of them race we shouldn't
* call extra class_export_puts(). */
if (already_disconnected) {
- LASSERT(hlist_unhashed(&export->exp_nid_hash));
+ LASSERT(cfs_hlist_unhashed(&export->exp_nid_hash));
GOTO(no_disconn, already_disconnected);
}
CDEBUG(D_IOCTL, "disconnect: cookie "LPX64"\n",
export->exp_handle.h_cookie);
- if (!hlist_unhashed(&export->exp_nid_hash))
+ if (!cfs_hlist_unhashed(&export->exp_nid_hash))
cfs_hash_del(export->exp_obd->obd_nid_hash,
&export->exp_connection->c_peer.nid,
&export->exp_nid_hash);
{
if (exp) {
int connected;
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
connected = (exp->exp_conn_cnt > 0);
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
return connected;
}
return 0;
}
EXPORT_SYMBOL(class_connected_export);
-static void class_disconnect_export_list(struct list_head *list,
+static void class_disconnect_export_list(cfs_list_t *list,
enum obd_option flags)
{
int rc;
/* It's possible that an export may disconnect itself, but
* nothing else will be added to this list. */
- while (!list_empty(list)) {
- exp = list_entry(list->next, struct obd_export, exp_obd_chain);
+ while (!cfs_list_empty(list)) {
+ exp = cfs_list_entry(list->next, struct obd_export,
+ exp_obd_chain);
/* need for safe call CDEBUG after obd_disconnect */
class_export_get(exp);
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_flags = flags;
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
if (obd_uuid_equals(&exp->exp_client_uuid,
&exp->exp_obd->obd_uuid)) {
exp);
/* Need to delete this now so we don't end up pointing
* to work_list later when this export is cleaned up. */
- list_del_init(&exp->exp_obd_chain);
+ cfs_list_del_init(&exp->exp_obd_chain);
class_export_put(exp);
continue;
}
void class_disconnect_exports(struct obd_device *obd)
{
- struct list_head work_list;
+ cfs_list_t work_list;
ENTRY;
/* Move all of the exports from obd_exports to a work list, en masse. */
CFS_INIT_LIST_HEAD(&work_list);
- spin_lock(&obd->obd_dev_lock);
- list_splice_init(&obd->obd_exports, &work_list);
- list_splice_init(&obd->obd_delayed_exports, &work_list);
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_lock(&obd->obd_dev_lock);
+ cfs_list_splice_init(&obd->obd_exports, &work_list);
+ cfs_list_splice_init(&obd->obd_delayed_exports, &work_list);
+ cfs_spin_unlock(&obd->obd_dev_lock);
- if (!list_empty(&work_list)) {
+ if (!cfs_list_empty(&work_list)) {
CDEBUG(D_HA, "OBD device %d (%p) has exports, "
"disconnecting them\n", obd->obd_minor, obd);
class_disconnect_export_list(&work_list,
void class_disconnect_stale_exports(struct obd_device *obd,
int (*test_export)(struct obd_export *))
{
- struct list_head work_list;
- struct list_head *pos, *n;
+ cfs_list_t work_list;
+ cfs_list_t *pos, *n;
struct obd_export *exp;
int evicted = 0;
ENTRY;
CFS_INIT_LIST_HEAD(&work_list);
- spin_lock(&obd->obd_dev_lock);
- list_for_each_safe(pos, n, &obd->obd_exports) {
- exp = list_entry(pos, struct obd_export, exp_obd_chain);
+ cfs_spin_lock(&obd->obd_dev_lock);
+ cfs_list_for_each_safe(pos, n, &obd->obd_exports) {
+ exp = cfs_list_entry(pos, struct obd_export, exp_obd_chain);
if (test_export(exp))
continue;
&exp->exp_obd->obd_uuid))
continue;
- list_move(&exp->exp_obd_chain, &work_list);
+ cfs_list_move(&exp->exp_obd_chain, &work_list);
evicted++;
CDEBUG(D_ERROR, "%s: disconnect stale client %s@%s\n",
obd->obd_name, exp->exp_client_uuid.uuid,
libcfs_nid2str(exp->exp_connection->c_peer.nid));
print_export_data(exp, "EVICTING", 0);
}
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
if (evicted) {
CDEBUG(D_HA, "%s: disconnecting %d stale clients\n",
{
int rc, already_failed;
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
already_failed = exp->exp_failed;
exp->exp_failed = 1;
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
if (already_failed) {
CDEBUG(D_HA, "disconnecting dead export %p/%s; skipping\n",
struct ptlrpc_reply_state *first_reply = NULL;
int nreplies = 0;
- spin_lock(&exp->exp_lock);
- list_for_each_entry (rs, &exp->exp_outstanding_replies, rs_exp_list) {
+ cfs_spin_lock(&exp->exp_lock);
+ cfs_list_for_each_entry(rs, &exp->exp_outstanding_replies,
+ rs_exp_list) {
if (nreplies == 0)
first_reply = rs;
nreplies++;
}
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
CDEBUG(D_HA, "%s: %s %p %s %s %d (%d %d %d) %d %d %d %d: %p %s "LPU64"\n",
exp->exp_obd->obd_name, status, exp, exp->exp_client_uuid.uuid,
- obd_export_nid2str(exp), atomic_read(&exp->exp_refcount),
- atomic_read(&exp->exp_rpc_count),
- atomic_read(&exp->exp_cb_count),
- atomic_read(&exp->exp_locks_count),
+ obd_export_nid2str(exp), cfs_atomic_read(&exp->exp_refcount),
+ cfs_atomic_read(&exp->exp_rpc_count),
+ cfs_atomic_read(&exp->exp_cb_count),
+ cfs_atomic_read(&exp->exp_locks_count),
exp->exp_disconnected, exp->exp_delayed, exp->exp_failed,
nreplies, first_reply, nreplies > 3 ? "..." : "",
exp->exp_last_committed);
{
struct obd_export *exp;
- spin_lock(&obd->obd_dev_lock);
- list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain)
+ cfs_spin_lock(&obd->obd_dev_lock);
+ cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain)
print_export_data(exp, "ACTIVE", locks);
- list_for_each_entry(exp, &obd->obd_unlinked_exports, exp_obd_chain)
+ cfs_list_for_each_entry(exp, &obd->obd_unlinked_exports, exp_obd_chain)
print_export_data(exp, "UNLINKED", locks);
- list_for_each_entry(exp, &obd->obd_delayed_exports, exp_obd_chain)
+ cfs_list_for_each_entry(exp, &obd->obd_delayed_exports, exp_obd_chain)
print_export_data(exp, "DELAYED", locks);
- spin_unlock(&obd->obd_dev_lock);
- spin_lock(&obd_zombie_impexp_lock);
- list_for_each_entry(exp, &obd_zombie_exports, exp_obd_chain)
+ cfs_spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_lock(&obd_zombie_impexp_lock);
+ cfs_list_for_each_entry(exp, &obd_zombie_exports, exp_obd_chain)
print_export_data(exp, "ZOMBIE", locks);
- spin_unlock(&obd_zombie_impexp_lock);
+ cfs_spin_unlock(&obd_zombie_impexp_lock);
}
EXPORT_SYMBOL(dump_exports);
void obd_exports_barrier(struct obd_device *obd)
{
int waited = 2;
- LASSERT(list_empty(&obd->obd_exports));
- spin_lock(&obd->obd_dev_lock);
- while (!list_empty(&obd->obd_unlinked_exports)) {
- spin_unlock(&obd->obd_dev_lock);
- cfs_schedule_timeout(CFS_TASK_UNINT, cfs_time_seconds(waited));
+ LASSERT(cfs_list_empty(&obd->obd_exports));
+ cfs_spin_lock(&obd->obd_dev_lock);
+ while (!cfs_list_empty(&obd->obd_unlinked_exports)) {
+ cfs_spin_unlock(&obd->obd_dev_lock);
+ cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
+ cfs_time_seconds(waited));
if (waited > 5 && IS_PO2(waited)) {
LCONSOLE_WARN("%s is waiting for obd_unlinked_exports "
"more than %d seconds. "
"The obd refcount = %d. Is it stuck?\n",
obd->obd_name, waited,
- atomic_read(&obd->obd_refcount));
+ cfs_atomic_read(&obd->obd_refcount));
dump_exports(obd, 0);
}
waited *= 2;
- spin_lock(&obd->obd_dev_lock);
+ cfs_spin_lock(&obd->obd_dev_lock);
}
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
}
EXPORT_SYMBOL(obd_exports_barrier);
ENTRY;
do {
- spin_lock(&obd_zombie_impexp_lock);
+ cfs_spin_lock(&obd_zombie_impexp_lock);
import = NULL;
- if (!list_empty(&obd_zombie_imports)) {
- import = list_entry(obd_zombie_imports.next,
- struct obd_import,
- imp_zombie_chain);
- list_del_init(&import->imp_zombie_chain);
+ if (!cfs_list_empty(&obd_zombie_imports)) {
+ import = cfs_list_entry(obd_zombie_imports.next,
+ struct obd_import,
+ imp_zombie_chain);
+ cfs_list_del_init(&import->imp_zombie_chain);
}
export = NULL;
- if (!list_empty(&obd_zombie_exports)) {
- export = list_entry(obd_zombie_exports.next,
- struct obd_export,
- exp_obd_chain);
- list_del_init(&export->exp_obd_chain);
+ if (!cfs_list_empty(&obd_zombie_exports)) {
+ export = cfs_list_entry(obd_zombie_exports.next,
+ struct obd_export,
+ exp_obd_chain);
+ cfs_list_del_init(&export->exp_obd_chain);
}
- spin_unlock(&obd_zombie_impexp_lock);
+ cfs_spin_unlock(&obd_zombie_impexp_lock);
if (import != NULL)
class_import_destroy(import);
EXIT;
}
-static struct completion obd_zombie_start;
-static struct completion obd_zombie_stop;
+static cfs_completion_t obd_zombie_start;
+static cfs_completion_t obd_zombie_stop;
static unsigned long obd_zombie_flags;
static cfs_waitq_t obd_zombie_waitq;
static pid_t obd_zombie_pid;
{
int rc;
- spin_lock(&obd_zombie_impexp_lock);
- rc = list_empty(&obd_zombie_imports) &&
- list_empty(&obd_zombie_exports) &&
- !test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags);
+ cfs_spin_lock(&obd_zombie_impexp_lock);
+ rc = cfs_list_empty(&obd_zombie_imports) &&
+ cfs_list_empty(&obd_zombie_exports) &&
+ !cfs_test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags);
- spin_unlock(&obd_zombie_impexp_lock);
+ cfs_spin_unlock(&obd_zombie_impexp_lock);
RETURN(rc);
}
* Add export to the obd_zombe thread and notify it.
*/
static void obd_zombie_export_add(struct obd_export *exp) {
- spin_lock(&exp->exp_obd->obd_dev_lock);
- LASSERT(!list_empty(&exp->exp_obd_chain));
- list_del_init(&exp->exp_obd_chain);
- spin_unlock(&exp->exp_obd->obd_dev_lock);
- spin_lock(&obd_zombie_impexp_lock);
- list_add(&exp->exp_obd_chain, &obd_zombie_exports);
- spin_unlock(&obd_zombie_impexp_lock);
+ cfs_spin_lock(&exp->exp_obd->obd_dev_lock);
+ LASSERT(!cfs_list_empty(&exp->exp_obd_chain));
+ cfs_list_del_init(&exp->exp_obd_chain);
+ cfs_spin_unlock(&exp->exp_obd->obd_dev_lock);
+ cfs_spin_lock(&obd_zombie_impexp_lock);
+ cfs_list_add(&exp->exp_obd_chain, &obd_zombie_exports);
+ cfs_spin_unlock(&obd_zombie_impexp_lock);
if (obd_zombie_impexp_notify != NULL)
obd_zombie_impexp_notify();
*/
static void obd_zombie_import_add(struct obd_import *imp) {
LASSERT(imp->imp_sec == NULL);
- spin_lock(&obd_zombie_impexp_lock);
- LASSERT(list_empty(&imp->imp_zombie_chain));
- list_add(&imp->imp_zombie_chain, &obd_zombie_imports);
- spin_unlock(&obd_zombie_impexp_lock);
+ cfs_spin_lock(&obd_zombie_impexp_lock);
+ LASSERT(cfs_list_empty(&imp->imp_zombie_chain));
+ cfs_list_add(&imp->imp_zombie_chain, &obd_zombie_imports);
+ cfs_spin_unlock(&obd_zombie_impexp_lock);
if (obd_zombie_impexp_notify != NULL)
obd_zombie_impexp_notify();
{
int rc;
- LASSERT(!test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags));
- spin_lock(&obd_zombie_impexp_lock);
- rc = list_empty(&obd_zombie_imports) &&
- list_empty(&obd_zombie_exports);
- spin_unlock(&obd_zombie_impexp_lock);
+ LASSERT(!cfs_test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags));
+ cfs_spin_lock(&obd_zombie_impexp_lock);
+ rc = cfs_list_empty(&obd_zombie_imports) &&
+ cfs_list_empty(&obd_zombie_exports);
+ cfs_spin_unlock(&obd_zombie_impexp_lock);
return rc;
}
int rc;
if ((rc = cfs_daemonize_ctxt("obd_zombid"))) {
- complete(&obd_zombie_start);
+ cfs_complete(&obd_zombie_start);
RETURN(rc);
}
- complete(&obd_zombie_start);
+ cfs_complete(&obd_zombie_start);
obd_zombie_pid = cfs_curproc_pid();
- while(!test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags)) {
+ while(!cfs_test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags)) {
struct l_wait_info lwi = { 0 };
l_wait_event(obd_zombie_waitq,
cfs_waitq_signal(&obd_zombie_waitq);
}
- complete(&obd_zombie_stop);
+ cfs_complete(&obd_zombie_stop);
RETURN(0);
}
#else /* ! KERNEL */
-static atomic_t zombie_recur = ATOMIC_INIT(0);
+static cfs_atomic_t zombie_recur = CFS_ATOMIC_INIT(0);
static void *obd_zombie_impexp_work_cb;
static void *obd_zombie_impexp_idle_cb;
{
int rc = 0;
- if (atomic_inc_return(&zombie_recur) == 1) {
+ if (cfs_atomic_inc_return(&zombie_recur) == 1) {
obd_zombie_impexp_cull();
rc = 1;
}
- atomic_dec(&zombie_recur);
+ cfs_atomic_dec(&zombie_recur);
return rc;
}
CFS_INIT_LIST_HEAD(&obd_zombie_imports);
CFS_INIT_LIST_HEAD(&obd_zombie_exports);
- spin_lock_init(&obd_zombie_impexp_lock);
- init_completion(&obd_zombie_start);
- init_completion(&obd_zombie_stop);
+ cfs_spin_lock_init(&obd_zombie_impexp_lock);
+ cfs_init_completion(&obd_zombie_start);
+ cfs_init_completion(&obd_zombie_stop);
cfs_waitq_init(&obd_zombie_waitq);
obd_zombie_pid = 0;
if (rc < 0)
RETURN(rc);
- wait_for_completion(&obd_zombie_start);
+ cfs_wait_for_completion(&obd_zombie_start);
#else
obd_zombie_impexp_work_cb =
*/
void obd_zombie_impexp_stop(void)
{
- set_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags);
+ cfs_set_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags);
obd_zombie_impexp_notify();
#ifdef __KERNEL__
- wait_for_completion(&obd_zombie_stop);
+ cfs_wait_for_completion(&obd_zombie_stop);
#else
liblustre_deregister_wait_callback(obd_zombie_impexp_work_cb);
liblustre_deregister_idle_callback(obd_zombie_impexp_idle_cb);
#include <obd_support.h>
#define lustre_get_group_info(group_info) do { \
- atomic_inc(&(group_info)->usage); \
+ cfs_atomic_inc(&(group_info)->usage); \
} while (0)
#define lustre_put_group_info(group_info) do { \
- if (atomic_dec_and_test(&(group_info)->usage)) \
- groups_free(group_info); \
+ if (cfs_atomic_dec_and_test(&(group_info)->usage)) \
+ cfs_groups_free(group_info); \
} while (0)
/*
* groups_search() is copied from linux kernel!
* A simple bsearch.
*/
-static int lustre_groups_search(struct group_info *group_info, gid_t grp)
+static int lustre_groups_search(cfs_group_info_t *group_info,
+ gid_t grp)
{
int left, right;
return 0;
}
-void lustre_groups_from_list(struct group_info *ginfo, gid_t *glist)
+void lustre_groups_from_list(cfs_group_info_t *ginfo, gid_t *glist)
{
int i;
int count = ginfo->ngroups;
/* groups_sort() is copied from linux kernel! */
/* a simple shell-metzner sort */
-void lustre_groups_sort(struct group_info *group_info)
+void lustre_groups_sort(cfs_group_info_t *group_info)
{
int base, max, stride;
int gidsetsize = group_info->ngroups;
int rc = 1;
if (grp != mu->mu_fsgid) {
- struct group_info *group_info = NULL;
+ cfs_group_info_t *group_info = NULL;
if (mu->mu_ginfo || !mu->mu_identity ||
mu->mu_valid == UCRED_OLD)
EXPORT_SYMBOL(lustre_in_group_p);
struct lustre_idmap_entry {
- struct list_head lie_rmt_uid_hash; /* hashed as lie_rmt_uid; */
- struct list_head lie_lcl_uid_hash; /* hashed as lie_lcl_uid; */
- struct list_head lie_rmt_gid_hash; /* hashed as lie_rmt_gid; */
- struct list_head lie_lcl_gid_hash; /* hashed as lie_lcl_gid; */
+ cfs_list_t lie_rmt_uid_hash; /* hashed as lie_rmt_uid; */
+ cfs_list_t lie_lcl_uid_hash; /* hashed as lie_lcl_uid; */
+ cfs_list_t lie_rmt_gid_hash; /* hashed as lie_rmt_gid; */
+ cfs_list_t lie_lcl_gid_hash; /* hashed as lie_lcl_gid; */
uid_t lie_rmt_uid; /* remote uid */
uid_t lie_lcl_uid; /* local uid */
gid_t lie_rmt_gid; /* remote gid */
static void idmap_entry_free(struct lustre_idmap_entry *e)
{
- if (!list_empty(&e->lie_rmt_uid_hash))
- list_del(&e->lie_rmt_uid_hash);
- if (!list_empty(&e->lie_lcl_uid_hash))
- list_del(&e->lie_lcl_uid_hash);
- if (!list_empty(&e->lie_rmt_gid_hash))
- list_del(&e->lie_rmt_gid_hash);
- if (!list_empty(&e->lie_lcl_gid_hash))
- list_del(&e->lie_lcl_gid_hash);
+ if (!cfs_list_empty(&e->lie_rmt_uid_hash))
+ cfs_list_del(&e->lie_rmt_uid_hash);
+ if (!cfs_list_empty(&e->lie_lcl_uid_hash))
+ cfs_list_del(&e->lie_lcl_uid_hash);
+ if (!cfs_list_empty(&e->lie_rmt_gid_hash))
+ cfs_list_del(&e->lie_rmt_gid_hash);
+ if (!cfs_list_empty(&e->lie_lcl_gid_hash))
+ cfs_list_del(&e->lie_lcl_gid_hash);
OBD_FREE_PTR(e);
}
uid_t rmt_uid, uid_t lcl_uid,
gid_t rmt_gid, gid_t lcl_gid)
{
- struct list_head *head;
+ cfs_list_t *head;
struct lustre_idmap_entry *e;
head = &t->lit_idmaps[RMT_UIDMAP_IDX][lustre_idmap_hashfunc(rmt_uid)];
- list_for_each_entry(e, head, lie_rmt_uid_hash)
+ cfs_list_for_each_entry(e, head, lie_rmt_uid_hash)
if (e->lie_rmt_uid == rmt_uid) {
if (e->lie_lcl_uid == lcl_uid) {
if (e->lie_rmt_gid == rmt_gid &&
}
head = &t->lit_idmaps[RMT_GIDMAP_IDX][lustre_idmap_hashfunc(rmt_gid)];
- list_for_each_entry(e, head, lie_rmt_gid_hash)
+ cfs_list_for_each_entry(e, head, lie_rmt_gid_hash)
if (e->lie_rmt_gid == rmt_gid) {
if (e->lie_lcl_gid == lcl_gid) {
if (unlikely(e->lie_rmt_uid == rmt_uid &&
return NULL;
}
-static __u32 idmap_lookup_uid(struct list_head *hash, int reverse, __u32 uid)
+static __u32 idmap_lookup_uid(cfs_list_t *hash, int reverse,
+ __u32 uid)
{
- struct list_head *head = &hash[lustre_idmap_hashfunc(uid)];
+ cfs_list_t *head = &hash[lustre_idmap_hashfunc(uid)];
struct lustre_idmap_entry *e;
if (!reverse) {
- list_for_each_entry(e, head, lie_rmt_uid_hash)
+ cfs_list_for_each_entry(e, head, lie_rmt_uid_hash)
if (e->lie_rmt_uid == uid)
return e->lie_lcl_uid;
} else {
- list_for_each_entry(e, head, lie_lcl_uid_hash)
+ cfs_list_for_each_entry(e, head, lie_lcl_uid_hash)
if (e->lie_lcl_uid == uid)
return e->lie_rmt_uid;
}
return CFS_IDMAP_NOTFOUND;
}
-static __u32 idmap_lookup_gid(struct list_head *hash, int reverse, __u32 gid)
+static __u32 idmap_lookup_gid(cfs_list_t *hash, int reverse, __u32 gid)
{
- struct list_head *head = &hash[lustre_idmap_hashfunc(gid)];
+ cfs_list_t *head = &hash[lustre_idmap_hashfunc(gid)];
struct lustre_idmap_entry *e;
if (!reverse) {
- list_for_each_entry(e, head, lie_rmt_gid_hash)
+ cfs_list_for_each_entry(e, head, lie_rmt_gid_hash)
if (e->lie_rmt_gid == gid)
return e->lie_lcl_gid;
} else {
- list_for_each_entry(e, head, lie_lcl_gid_hash)
+ cfs_list_for_each_entry(e, head, lie_lcl_gid_hash)
if (e->lie_lcl_gid == gid)
return e->lie_rmt_gid;
}
LASSERT(t);
- spin_lock(&t->lit_lock);
+ cfs_spin_lock(&t->lit_lock);
e0 = idmap_search_entry(t, ruid, luid, rgid, lgid);
- spin_unlock(&t->lit_lock);
+ cfs_spin_unlock(&t->lit_lock);
if (!e0) {
e0 = idmap_entry_alloc(ruid, luid, rgid, lgid);
if (!e0)
return -ENOMEM;
- spin_lock(&t->lit_lock);
+ cfs_spin_lock(&t->lit_lock);
e1 = idmap_search_entry(t, ruid, luid, rgid, lgid);
if (e1 == NULL) {
- list_add_tail(&e0->lie_rmt_uid_hash,
- &t->lit_idmaps[RMT_UIDMAP_IDX]
- [lustre_idmap_hashfunc(ruid)]);
- list_add_tail(&e0->lie_lcl_uid_hash,
- &t->lit_idmaps[LCL_UIDMAP_IDX]
- [lustre_idmap_hashfunc(luid)]);
- list_add_tail(&e0->lie_rmt_gid_hash,
- &t->lit_idmaps[RMT_GIDMAP_IDX]
- [lustre_idmap_hashfunc(rgid)]);
- list_add_tail(&e0->lie_lcl_gid_hash,
- &t->lit_idmaps[LCL_GIDMAP_IDX]
- [lustre_idmap_hashfunc(lgid)]);
- }
- spin_unlock(&t->lit_lock);
+ cfs_list_add_tail(&e0->lie_rmt_uid_hash,
+ &t->lit_idmaps[RMT_UIDMAP_IDX]
+ [lustre_idmap_hashfunc(ruid)]);
+ cfs_list_add_tail(&e0->lie_lcl_uid_hash,
+ &t->lit_idmaps[LCL_UIDMAP_IDX]
+ [lustre_idmap_hashfunc(luid)]);
+ cfs_list_add_tail(&e0->lie_rmt_gid_hash,
+ &t->lit_idmaps[RMT_GIDMAP_IDX]
+ [lustre_idmap_hashfunc(rgid)]);
+ cfs_list_add_tail(&e0->lie_lcl_gid_hash,
+ &t->lit_idmaps[LCL_GIDMAP_IDX]
+ [lustre_idmap_hashfunc(lgid)]);
+ }
+ cfs_spin_unlock(&t->lit_lock);
if (e1 != NULL) {
idmap_entry_free(e0);
if (IS_ERR(e1))
LASSERT(t);
- spin_lock(&t->lit_lock);
+ cfs_spin_lock(&t->lit_lock);
e = idmap_search_entry(t, ruid, luid, rgid, lgid);
if (IS_ERR(e))
rc = PTR_ERR(e);
else if (e)
idmap_entry_free(e);
- spin_unlock(&t->lit_lock);
+ cfs_spin_unlock(&t->lit_lock);
return rc;
}
struct lustre_idmap_table *t,
int reverse, uid_t uid)
{
- struct list_head *hash;
+ cfs_list_t *hash;
if (mu && (mu->mu_valid == UCRED_OLD || mu->mu_valid == UCRED_NEW)) {
if (!reverse) {
hash = t->lit_idmaps[reverse ? LCL_UIDMAP_IDX : RMT_UIDMAP_IDX];
- spin_lock(&t->lit_lock);
+ cfs_spin_lock(&t->lit_lock);
uid = idmap_lookup_uid(hash, reverse, uid);
- spin_unlock(&t->lit_lock);
+ cfs_spin_unlock(&t->lit_lock);
return uid;
}
int lustre_idmap_lookup_gid(struct md_ucred *mu, struct lustre_idmap_table *t,
int reverse, gid_t gid)
{
- struct list_head *hash;
+ cfs_list_t *hash;
if (mu && (mu->mu_valid == UCRED_OLD || mu->mu_valid == UCRED_NEW)) {
if (!reverse) {
hash = t->lit_idmaps[reverse ? LCL_GIDMAP_IDX : RMT_GIDMAP_IDX];
- spin_lock(&t->lit_lock);
+ cfs_spin_lock(&t->lit_lock);
gid = idmap_lookup_gid(hash, reverse, gid);
- spin_unlock(&t->lit_lock);
+ cfs_spin_unlock(&t->lit_lock);
return gid;
}
if(unlikely(t == NULL))
return (ERR_PTR(-ENOMEM));
- spin_lock_init(&t->lit_lock);
+ cfs_spin_lock_init(&t->lit_lock);
for (i = 0; i < ARRAY_SIZE(t->lit_idmaps); i++)
for (j = 0; j < ARRAY_SIZE(t->lit_idmaps[i]); j++)
- INIT_LIST_HEAD(&t->lit_idmaps[i][j]);
+ CFS_INIT_LIST_HEAD(&t->lit_idmaps[i][j]);
return t;
}
void lustre_idmap_fini(struct lustre_idmap_table *t)
{
- struct list_head *list;
+ cfs_list_t *list;
struct lustre_idmap_entry *e;
int i;
LASSERT(t);
list = t->lit_idmaps[RMT_UIDMAP_IDX];
- spin_lock(&t->lit_lock);
+ cfs_spin_lock(&t->lit_lock);
for (i = 0; i < CFS_IDMAP_HASHSIZE; i++)
- while (!list_empty(&list[i])) {
- e = list_entry(list[i].next, struct lustre_idmap_entry,
- lie_rmt_uid_hash);
+ while (!cfs_list_empty(&list[i])) {
+ e = cfs_list_entry(list[i].next,
+ struct lustre_idmap_entry,
+ lie_rmt_uid_hash);
idmap_entry_free(e);
}
- spin_unlock(&t->lit_lock);
+ cfs_spin_unlock(&t->lit_lock);
OBD_FREE_PTR(t);
}
int offset = 0;
ENTRY;
- err = copy_from_user(&hdr, (void *)arg, sizeof(hdr));
+ err = cfs_copy_from_user(&hdr, (void *)arg, sizeof(hdr));
if ( err )
RETURN(err);
*len = hdr.ioc_len;
data = (struct obd_ioctl_data *)*buf;
- err = copy_from_user(*buf, (void *)arg, hdr.ioc_len);
+ err = cfs_copy_from_user(*buf, (void *)arg, hdr.ioc_len);
if ( err ) {
OBD_VFREE(*buf, hdr.ioc_len);
RETURN(err);
if (data->ioc_inllen1) {
data->ioc_inlbuf1 = &data->ioc_bulk[0];
- offset += size_round(data->ioc_inllen1);
+ offset += cfs_size_round(data->ioc_inllen1);
}
if (data->ioc_inllen2) {
data->ioc_inlbuf2 = &data->ioc_bulk[0] + offset;
- offset += size_round(data->ioc_inllen2);
+ offset += cfs_size_round(data->ioc_inllen2);
}
if (data->ioc_inllen3) {
data->ioc_inlbuf3 = &data->ioc_bulk[0] + offset;
- offset += size_round(data->ioc_inllen3);
+ offset += cfs_size_round(data->ioc_inllen3);
}
if (data->ioc_inllen4) {
{
int err;
- err = copy_to_user(arg, data, len);
+ err = cfs_copy_to_user(arg, data, len);
if (err)
err = -EFAULT;
return err;
if (libcfs_catastrophe)
rc += snprintf(page + rc, count - rc, "LBUG\n");
- spin_lock(&obd_dev_lock);
+ cfs_spin_lock(&obd_dev_lock);
for (i = 0; i < class_devno_max(); i++) {
struct obd_device *obd;
continue;
class_incref(obd, __FUNCTION__, cfs_current());
- spin_unlock(&obd_dev_lock);
+ cfs_spin_unlock(&obd_dev_lock);
if (obd_health_check(obd)) {
rc += snprintf(page + rc, count - rc,
obd->obd_name);
}
class_decref(obd, __FUNCTION__, cfs_current());
- spin_lock(&obd_dev_lock);
+ cfs_spin_lock(&obd_dev_lock);
}
- spin_unlock(&obd_dev_lock);
+ cfs_spin_unlock(&obd_dev_lock);
if (rc == 0)
return snprintf(page, count, "healthy\n");
return seq_printf(p, "%3d %s %s %s %s %d\n",
(int)index, status, obd->obd_type->typ_name,
obd->obd_name, obd->obd_uuid.uuid,
- atomic_read(&obd->obd_refcount));
+ cfs_atomic_read(&obd->obd_refcount));
}
struct seq_operations obd_device_list_sops = {
i_size_write(dst, src->o_size);
/* optimum IO size */
if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits)) {
- dst->i_blkbits = ffs(src->o_blksize) - 1;
+ dst->i_blkbits = cfs_ffs(src->o_blksize) - 1;
#ifdef HAVE_INODE_BLKSIZE
dst->i_blksize = src->o_blksize;
#endif
}
if (valid & OBD_MD_FLBLKSZ) {
- dst->i_blkbits = ffs(src->o_blksize)-1;
+ dst->i_blkbits = cfs_ffs(src->o_blksize)-1;
#ifdef HAVE_INODE_BLKSIZE
dst->i_blksize = src->o_blksize;
#endif
rc = ll_proc_dolongvec(table, write, filp, buffer, lenp, ppos);
if (old_fail_loc != obd_fail_loc)
- wake_up(&obd_race_waitq);
+ cfs_waitq_signal(&obd_race_waitq);
return rc;
}
if (len > *lenp)
len = *lenp;
buf[len] = '\0';
- if (copy_to_user(buffer, buf, len))
+ if (cfs_copy_to_user(buffer, buf, len))
return -EFAULT;
*lenp = len;
*ppos += *lenp;
if (len > *lenp)
len = *lenp;
buf[len] = '\0';
- if (copy_to_user(buffer, buf, len))
+ if (cfs_copy_to_user(buffer, buf, len))
return -EFAULT;
*lenp = len;
*ppos += *lenp;
if (len > *lenp)
len = *lenp;
buf[len] = '\0';
- if (copy_to_user(buffer, buf, len))
+ if (cfs_copy_to_user(buffer, buf, len))
return -EFAULT;
*lenp = len;
*ppos += *lenp;
if (len > *lenp)
len = *lenp;
buf[len] = '\0';
- if (copy_to_user(buffer, buf, len))
+ if (cfs_copy_to_user(buffer, buf, len))
return -EFAULT;
*lenp = len;
*ppos += *lenp;
rc = lprocfs_write_frac_helper(buffer, *lenp,
(unsigned int*)table->data,
1 << (20 - CFS_PAGE_SHIFT));
- /* Don't allow them to let dirty pages exceed 90% of system memory,
- * and set a hard minimum of 4MB. */
- if (obd_max_dirty_pages > ((num_physpages / 10) * 9)) {
+ /* Don't allow them to let dirty pages exceed 90% of system
+ * memory and set a hard minimum of 4MB. */
+ if (obd_max_dirty_pages > ((cfs_num_physpages / 10) * 9)) {
CERROR("Refusing to set max dirty pages to %u, which "
- "is more than 90%% of available RAM; setting to %lu\n",
- obd_max_dirty_pages, ((num_physpages / 10) * 9));
- obd_max_dirty_pages = ((num_physpages / 10) * 9);
+ "is more than 90%% of available RAM; setting "
+ "to %lu\n", obd_max_dirty_pages,
+ ((cfs_num_physpages / 10) * 9));
+ obd_max_dirty_pages = ((cfs_num_physpages / 10) * 9);
} else if (obd_max_dirty_pages < 4 << (20 - CFS_PAGE_SHIFT)) {
obd_max_dirty_pages = 4 << (20 - CFS_PAGE_SHIFT);
}
if (len > *lenp)
len = *lenp;
buf[len] = '\0';
- if (copy_to_user(buffer, buf, len))
+ if (cfs_copy_to_user(buffer, buf, len))
return -EFAULT;
*lenp = len;
}
if (len > *lenp)
len = *lenp;
buf[len] = '\0';
- if (copy_to_user(buffer, buf, len))
+ if (cfs_copy_to_user(buffer, buf, len))
return -EFAULT;
*lenp = len;
}
if (loghandle == NULL)
RETURN(ERR_PTR(-ENOMEM));
- init_rwsem(&loghandle->lgh_lock);
+ cfs_init_rwsem(&loghandle->lgh_lock);
RETURN(loghandle);
}
if (!loghandle->lgh_hdr)
goto out;
if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_PLAIN)
- list_del_init(&loghandle->u.phd.phd_entry);
+ cfs_list_del_init(&loghandle->u.phd.phd_entry);
if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_CAT)
- LASSERT(list_empty(&loghandle->u.chd.chd_head));
+ LASSERT(cfs_list_empty(&loghandle->u.chd.chd_head));
OBD_FREE(loghandle->lgh_hdr, LLOG_CHUNK_SIZE);
out:
if (!buf) {
lpi->lpi_rc = -ENOMEM;
#ifdef __KERNEL__
- complete(&lpi->lpi_completion);
+ cfs_complete(&lpi->lpi_completion);
#endif
return 0;
}
OBD_FREE(buf, LLOG_CHUNK_SIZE);
lpi->lpi_rc = rc;
#ifdef __KERNEL__
- complete(&lpi->lpi_completion);
+ cfs_complete(&lpi->lpi_completion);
#endif
return 0;
}
lpi->lpi_catdata = catdata;
#ifdef __KERNEL__
- init_completion(&lpi->lpi_completion);
+ cfs_init_completion(&lpi->lpi_completion);
rc = cfs_kernel_thread(llog_process_thread, lpi, CLONE_VM | CLONE_FILES);
if (rc < 0) {
CERROR("cannot start thread: %d\n", rc);
OBD_FREE_PTR(lpi);
RETURN(rc);
}
- wait_for_completion(&lpi->lpi_completion);
+ cfs_wait_for_completion(&lpi->lpi_completion);
#else
llog_process_thread(lpi);
#endif
loghandle->lgh_hdr->llh_cat_idx = index;
cathandle->u.chd.chd_current_log = loghandle;
- LASSERT(list_empty(&loghandle->u.phd.phd_entry));
- list_add_tail(&loghandle->u.phd.phd_entry, &cathandle->u.chd.chd_head);
+ LASSERT(cfs_list_empty(&loghandle->u.phd.phd_entry));
+ cfs_list_add_tail(&loghandle->u.phd.phd_entry,
+ &cathandle->u.chd.chd_head);
out_destroy:
if (rc < 0)
if (cathandle == NULL)
RETURN(-EBADF);
- list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
- u.phd.phd_entry) {
+ cfs_list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
+ u.phd.phd_entry) {
struct llog_logid *cgl = &loghandle->lgh_id;
if (cgl->lgl_oid == logid->lgl_oid) {
if (cgl->lgl_ogen != logid->lgl_ogen) {
} else {
rc = llog_init_handle(loghandle, LLOG_F_IS_PLAIN, NULL);
if (!rc) {
- list_add(&loghandle->u.phd.phd_entry,
- &cathandle->u.chd.chd_head);
+ cfs_list_add(&loghandle->u.phd.phd_entry,
+ &cathandle->u.chd.chd_head);
}
}
if (!rc) {
int rc;
ENTRY;
- list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head,
- u.phd.phd_entry) {
+ cfs_list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head,
+ u.phd.phd_entry) {
int err = llog_close(loghandle);
if (err)
CERROR("error closing loghandle\n");
struct llog_handle *loghandle = NULL;
ENTRY;
- down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
+ cfs_down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
loghandle = cathandle->u.chd.chd_current_log;
if (loghandle) {
struct llog_log_hdr *llh = loghandle->lgh_hdr;
- down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
+ cfs_down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
if (loghandle->lgh_last_idx < LLOG_BITMAP_SIZE(llh) - 1) {
- up_read(&cathandle->lgh_lock);
+ cfs_up_read(&cathandle->lgh_lock);
RETURN(loghandle);
} else {
- up_write(&loghandle->lgh_lock);
+ cfs_up_write(&loghandle->lgh_lock);
}
}
if (!create) {
if (loghandle)
- down_write(&loghandle->lgh_lock);
- up_read(&cathandle->lgh_lock);
+ cfs_down_write(&loghandle->lgh_lock);
+ cfs_up_read(&cathandle->lgh_lock);
RETURN(loghandle);
}
- up_read(&cathandle->lgh_lock);
+ cfs_up_read(&cathandle->lgh_lock);
/* time to create new log */
/* first, we have to make sure the state hasn't changed */
- down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
+ cfs_down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
loghandle = cathandle->u.chd.chd_current_log;
if (loghandle) {
struct llog_log_hdr *llh = loghandle->lgh_hdr;
- down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
+ cfs_down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
if (loghandle->lgh_last_idx < LLOG_BITMAP_SIZE(llh) - 1) {
- up_write(&cathandle->lgh_lock);
+ cfs_up_write(&cathandle->lgh_lock);
RETURN(loghandle);
} else {
- up_write(&loghandle->lgh_lock);
+ cfs_up_write(&loghandle->lgh_lock);
}
}
CDEBUG(D_INODE, "creating new log\n");
loghandle = llog_cat_new_log(cathandle);
if (!IS_ERR(loghandle))
- down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
- up_write(&cathandle->lgh_lock);
+ cfs_down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
+ cfs_up_write(&cathandle->lgh_lock);
RETURN(loghandle);
}
rc = llog_write_rec(loghandle, rec, reccookie, 1, buf, -1);
if (rc < 0)
CERROR("llog_write_rec %d: lh=%p\n", rc, loghandle);
- up_write(&loghandle->lgh_lock);
+ cfs_up_write(&loghandle->lgh_lock);
if (rc == -ENOSPC) {
/* to create a new plain log */
loghandle = llog_cat_current_log(cathandle, 1);
if (IS_ERR(loghandle))
RETURN(PTR_ERR(loghandle));
rc = llog_write_rec(loghandle, rec, reccookie, 1, buf, -1);
- up_write(&loghandle->lgh_lock);
+ cfs_up_write(&loghandle->lgh_lock);
}
RETURN(rc);
int i, index, rc = 0;
ENTRY;
- down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
+ cfs_down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
for (i = 0; i < count; i++, cookies++) {
struct llog_handle *loghandle;
struct llog_logid *lgl = &cookies->lgc_lgl;
break;
}
- down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
+ cfs_down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
rc = llog_cancel_rec(loghandle, cookies->lgc_index);
- up_write(&loghandle->lgh_lock);
+ cfs_up_write(&loghandle->lgh_lock);
if (rc == 1) { /* log has been destroyed */
index = loghandle->u.phd.phd_cookie.lgc_index;
index, cathandle->lgh_id.lgl_oid);
}
}
- up_write(&cathandle->lgh_lock);
+ cfs_up_write(&cathandle->lgh_lock);
RETURN(rc);
}
void *lpi_cbdata;
void *lpi_catdata;
int lpi_rc;
- struct completion lpi_completion;
+ cfs_completion_t lpi_completion;
};
int llog_cat_id2handle(struct llog_handle *cathandle, struct llog_handle **res,
if (ioc_data && (ioc_data->ioc_inllen1)) {
l = 0;
remains = ioc_data->ioc_inllen4 +
- size_round(ioc_data->ioc_inllen1) +
- size_round(ioc_data->ioc_inllen2) +
- size_round(ioc_data->ioc_inllen3);
+ cfs_size_round(ioc_data->ioc_inllen1) +
+ cfs_size_round(ioc_data->ioc_inllen2) +
+ cfs_size_round(ioc_data->ioc_inllen3);
from = simple_strtol(ioc_data->ioc_inlbuf2, &endp, 0);
if (*endp != '\0')
RETURN(-EINVAL);
if (ioc_data->ioc_inllen1) {
l = 0;
remains = ioc_data->ioc_inllen4 +
- size_round(ioc_data->ioc_inllen1) +
- size_round(ioc_data->ioc_inllen2) +
- size_round(ioc_data->ioc_inllen3);
+ cfs_size_round(ioc_data->ioc_inllen1) +
+ cfs_size_round(ioc_data->ioc_inllen2) +
+ cfs_size_round(ioc_data->ioc_inllen3);
from = simple_strtol(ioc_data->ioc_inlbuf2, &endp, 0);
if (*endp != '\0')
RETURN(-EINVAL);
int rc, index = 0;
ENTRY;
- down_write(&cat->lgh_lock);
+ cfs_down_write(&cat->lgh_lock);
rc = llog_cat_id2handle(cat, &log, logid);
if (rc) {
CDEBUG(D_IOCTL, "cannot find log #"LPX64"#"LPX64"#%08x\n",
rc = llog_cancel_rec(cat, index);
out:
llog_free_handle(log);
- up_write(&cat->lgh_lock);
+ cfs_up_write(&cat->lgh_lock);
RETURN(rc);
}
case OBD_IOC_LLOG_INFO: {
int l;
int remains = data->ioc_inllen2 +
- size_round(data->ioc_inllen1);
+ cfs_size_round(data->ioc_inllen1);
char *out = data->ioc_bulk;
l = snprintf(out, remains,
GOTO(out_close, err = -EINVAL);
if (handle->lgh_hdr->llh_flags & LLOG_F_IS_CAT) {
- down_write(&handle->lgh_lock);
+ cfs_down_write(&handle->lgh_lock);
err = llog_cancel_rec(handle, cookie.lgc_index);
- up_write(&handle->lgh_lock);
+ cfs_up_write(&handle->lgh_lock);
GOTO(out_close, err);
}
if (!idarray)
RETURN(-ENOMEM);
- mutex_down(&obd->obd_olg.olg_cat_processing);
+ cfs_mutex_down(&obd->obd_olg.olg_cat_processing);
rc = llog_get_cat_list(obd, name, 0, count, idarray);
if (rc)
GOTO(out, rc);
}
out:
/* release semaphore */
- mutex_up(&obd->obd_olg.olg_cat_processing);
+ cfs_mutex_up(&obd->obd_olg.olg_cat_processing);
OBD_VFREE(idarray, size);
RETURN(rc);
return NULL;
ctxt->loc_obd = obd;
- atomic_set(&ctxt->loc_refcount, 1);
+ cfs_atomic_set(&ctxt->loc_refcount, 1);
return ctxt;
}
struct obd_device *obd;
int rc = 0;
- spin_lock(&olg->olg_lock);
- if (!atomic_dec_and_test(&ctxt->loc_refcount)) {
- spin_unlock(&olg->olg_lock);
+ cfs_spin_lock(&olg->olg_lock);
+ if (!cfs_atomic_dec_and_test(&ctxt->loc_refcount)) {
+ cfs_spin_unlock(&olg->olg_lock);
return rc;
}
olg->olg_ctxts[ctxt->loc_idx] = NULL;
- spin_unlock(&olg->olg_lock);
+ cfs_spin_unlock(&olg->olg_lock);
if (ctxt->loc_lcm)
lcm_put(ctxt->loc_lcm);
obd = ctxt->loc_obd;
- spin_lock(&obd->obd_dev_lock);
- spin_unlock(&obd->obd_dev_lock); /* sync with llog ctxt user thread */
+ cfs_spin_lock(&obd->obd_dev_lock);
+ /* sync with llog ctxt user thread */
+ cfs_spin_unlock(&obd->obd_dev_lock);
/* obd->obd_starting is needed for the case of cleanup
* in error case while obd is starting up. */
rc = CTXTP(ctxt, cleanup)(ctxt);
llog_ctxt_destroy(ctxt);
- wake_up(&olg->olg_waitq);
+ cfs_waitq_signal(&olg->olg_waitq);
return rc;
}
EXPORT_SYMBOL(__llog_ctxt_put);
/*
* Banlance the ctxt get when calling llog_cleanup()
*/
- LASSERT(atomic_read(&ctxt->loc_refcount) < 0x5a5a5a);
- LASSERT(atomic_read(&ctxt->loc_refcount) > 1);
+ LASSERT(cfs_atomic_read(&ctxt->loc_refcount) < 0x5a5a5a);
+ LASSERT(cfs_atomic_read(&ctxt->loc_refcount) > 1);
llog_ctxt_put(ctxt);
/*
ctxt->loc_olg = olg;
ctxt->loc_idx = index;
ctxt->loc_logops = op;
- sema_init(&ctxt->loc_sem, 1);
+ cfs_sema_init(&ctxt->loc_sem, 1);
ctxt->loc_exp = class_export_get(disk_obd->obd_self_export);
ctxt->loc_flags = LLOG_CTXT_FLAG_UNINITIALIZED;
cathandle = ctxt->loc_handle;
if (cathandle) {
- list_for_each_entry_safe(loghandle, n,
- &cathandle->u.chd.chd_head,
- u.phd.phd_entry) {
+ cfs_list_for_each_entry_safe(loghandle, n,
+ &cathandle->u.chd.chd_head,
+ u.phd.phd_entry) {
llh = loghandle->lgh_hdr;
if ((llh->llh_flags &
LLOG_F_ZAP_WHEN_EMPTY) &&
#define MAX_STRING_SIZE 128
/* for bug 10866, global variable */
-DECLARE_RWSEM(_lprocfs_lock);
+CFS_DECLARE_RWSEM(_lprocfs_lock);
EXPORT_SYMBOL(_lprocfs_lock);
int lprocfs_seq_release(struct inode *inode, struct file *file)
}
count = (rc < size) ? rc : size;
- if (copy_to_user(buf, start, count)) {
+ if (cfs_copy_to_user(buf, start, count)) {
rc = -EFAULT;
goto out;
}
struct proc_dir_entry *dp = PDE(f->f_dentry->d_inode);
struct obd_device *obd = dp->data;
- atomic_inc(&obd->obd_evict_inprogress);
+ cfs_atomic_inc(&obd->obd_evict_inprogress);
return 0;
}
struct proc_dir_entry *dp = PDE(f->f_dentry->d_inode);
struct obd_device *obd = dp->data;
- atomic_dec(&obd->obd_evict_inprogress);
- wake_up(&obd->obd_evict_inprogress_waitq);
+ cfs_atomic_dec(&obd->obd_evict_inprogress);
+ cfs_waitq_signal(&obd->obd_evict_inprogress_waitq);
return 0;
}
unsigned long tmp;
dummy[MAX_STRING_SIZE] = '\0';
- if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
+ if (cfs_copy_from_user(dummy, buffer, MAX_STRING_SIZE))
return -EFAULT;
tmp = simple_strtoul(dummy, &end, 0);
int lprocfs_rd_atomic(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
- atomic_t *atom = data;
+ cfs_atomic_t *atom = data;
LASSERT(atom != NULL);
*eof = 1;
- return snprintf(page, count, "%d\n", atomic_read(atom));
+ return snprintf(page, count, "%d\n", cfs_atomic_read(atom));
}
int lprocfs_wr_atomic(struct file *file, const char *buffer,
unsigned long count, void *data)
{
- atomic_t *atm = data;
+ cfs_atomic_t *atm = data;
int val = 0;
int rc;
if (val <= 0)
return -ERANGE;
- atomic_set(atm, val);
+ cfs_atomic_set(atm, val);
return count;
}
int *eof, void *data)
{
struct obd_statfs osfs;
- int rc = obd_statfs(data, &osfs, cfs_time_current_64() - HZ,
+ int rc = obd_statfs(data, &osfs, cfs_time_current_64() - CFS_HZ,
OBD_STATFS_NODELAY);
if (!rc) {
*eof = 1;
int *eof, void *data)
{
struct obd_statfs osfs;
- int rc = obd_statfs(data, &osfs, cfs_time_current_64() - HZ,
+ int rc = obd_statfs(data, &osfs, cfs_time_current_64() - CFS_HZ,
OBD_STATFS_NODELAY);
if (!rc) {
__u32 blk_size = osfs.os_bsize >> 10;
int *eof, void *data)
{
struct obd_statfs osfs;
- int rc = obd_statfs(data, &osfs, cfs_time_current_64() - HZ,
+ int rc = obd_statfs(data, &osfs, cfs_time_current_64() - CFS_HZ,
OBD_STATFS_NODELAY);
if (!rc) {
__u32 blk_size = osfs.os_bsize >> 10;
int *eof, void *data)
{
struct obd_statfs osfs;
- int rc = obd_statfs(data, &osfs, cfs_time_current_64() - HZ,
+ int rc = obd_statfs(data, &osfs, cfs_time_current_64() - CFS_HZ,
OBD_STATFS_NODELAY);
if (!rc) {
__u32 blk_size = osfs.os_bsize >> 10;
int *eof, void *data)
{
struct obd_statfs osfs;
- int rc = obd_statfs(data, &osfs, cfs_time_current_64() - HZ,
+ int rc = obd_statfs(data, &osfs, cfs_time_current_64() - CFS_HZ,
OBD_STATFS_NODELAY);
if (!rc) {
*eof = 1;
int *eof, void *data)
{
struct obd_statfs osfs;
- int rc = obd_statfs(data, &osfs, cfs_time_current_64() - HZ,
+ int rc = obd_statfs(data, &osfs, cfs_time_current_64() - CFS_HZ,
OBD_STATFS_NODELAY);
if (!rc) {
*eof = 1;
if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU)
num_cpu = 1;
else
- num_cpu = num_possible_cpus();
+ num_cpu = cfs_num_possible_cpus();
for (i = 0; i < num_cpu; i++) {
percpu_cntr = &(stats->ls_percpu[i])->lp_cntr[idx];
do {
- centry = atomic_read(&percpu_cntr->lc_cntl.la_entry);
+ centry = cfs_atomic_read(&percpu_cntr-> \
+ lc_cntl.la_entry);
t.lc_count = percpu_cntr->lc_count;
t.lc_sum = percpu_cntr->lc_sum;
t.lc_min = percpu_cntr->lc_min;
t.lc_max = percpu_cntr->lc_max;
t.lc_sumsquare = percpu_cntr->lc_sumsquare;
- } while (centry != atomic_read(&percpu_cntr->lc_cntl.la_entry) &&
- centry != atomic_read(&percpu_cntr->lc_cntl.la_exit));
+ } while (centry != cfs_atomic_read(&percpu_cntr->lc_cntl. \
+ la_entry) &&
+ centry != cfs_atomic_read(&percpu_cntr->lc_cntl. \
+ la_exit));
cnt->lc_count += t.lc_count;
cnt->lc_sum += t.lc_sum;
if (t.lc_min < cnt->lc_min)
" in-progress_invalidations: %u\n",
imp->imp_conn_cnt,
imp->imp_generation,
- atomic_read(&imp->imp_inval_count));
+ cfs_atomic_read(&imp->imp_inval_count));
lprocfs_stats_collect(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR, &ret);
do_div(ret.lc_sum, ret.lc_count);
" unregistering: %u\n"
" timeouts: %u\n"
" avg_waittime: "LPU64" %s\n",
- atomic_read(&imp->imp_inflight),
- atomic_read(&imp->imp_unregistering),
- atomic_read(&imp->imp_timeouts),
+ cfs_atomic_read(&imp->imp_inflight),
+ cfs_atomic_read(&imp->imp_unregistering),
+ cfs_atomic_read(&imp->imp_timeouts),
ret.lc_sum, ret.lc_units);
k = 0;
struct obd_device *obd = data;
return snprintf(page, count, "%d\n",
- atomic_read(&obd->u.cli.cl_quota_resends));
+ cfs_atomic_read(&obd->u.cli.cl_quota_resends));
}
int lprocfs_wr_quota_resend_count(struct file *file, const char *buffer,
if (rc)
return rc;
- atomic_set(&obd->u.cli.cl_quota_resends, val);
+ cfs_atomic_set(&obd->u.cli.cl_quota_resends, val);
return count;
}
client_stat->nid_proc, client_stat->nid_stats,
client_stat->nid_brw_stats);
- LASSERTF(atomic_read(&client_stat->nid_exp_ref_count) == 0,
- "count %d\n", atomic_read(&client_stat->nid_exp_ref_count));
+ LASSERTF(cfs_atomic_read(&client_stat->nid_exp_ref_count) == 0,
+ "count %d\n",
+ cfs_atomic_read(&client_stat->nid_exp_ref_count));
- hlist_del_init(&client_stat->nid_hash);
+ cfs_hlist_del_init(&client_stat->nid_hash);
if (client_stat->nid_proc)
lprocfs_remove(&client_stat->nid_proc);
/* we need extra list - because hash_exit called to early */
/* not need locking because all clients is died */
- while(!list_empty(&obd->obd_nid_stats)) {
- stat = list_entry(obd->obd_nid_stats.next,
- struct nid_stat, nid_list);
- list_del_init(&stat->nid_list);
+ while(!cfs_list_empty(&obd->obd_nid_stats)) {
+ stat = cfs_list_entry(obd->obd_nid_stats.next,
+ struct nid_stat, nid_list);
+ cfs_list_del_init(&stat->nid_list);
lprocfs_free_client_stats(stat);
}
if (flags & LPROCFS_STATS_FLAG_NOPERCPU)
num_cpu = 1;
else
- num_cpu = num_possible_cpus();
+ num_cpu = cfs_num_possible_cpus();
OBD_ALLOC(stats, offsetof(typeof(*stats), ls_percpu[num_cpu]));
if (stats == NULL)
if (flags & LPROCFS_STATS_FLAG_NOPERCPU) {
stats->ls_flags = flags;
- spin_lock_init(&stats->ls_lock);
+ cfs_spin_lock_init(&stats->ls_lock);
/* Use this lock only if there are no percpu areas */
} else {
stats->ls_flags = 0;
percpusize = offsetof(struct lprocfs_percpu, lp_cntr[num]);
if (num_cpu > 1)
- percpusize = L1_CACHE_ALIGN(percpusize);
+ percpusize = CFS_L1_CACHE_ALIGN(percpusize);
for (i = 0; i < num_cpu; i++) {
OBD_ALLOC(stats->ls_percpu[i], percpusize);
if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU)
num_cpu = 1;
else
- num_cpu = num_possible_cpus();
+ num_cpu = cfs_num_possible_cpus();
percpusize = offsetof(struct lprocfs_percpu, lp_cntr[stats->ls_num]);
if (num_cpu > 1)
- percpusize = L1_CACHE_ALIGN(percpusize);
+ percpusize = CFS_L1_CACHE_ALIGN(percpusize);
for (i = 0; i < num_cpu; i++)
OBD_FREE(stats->ls_percpu[i], percpusize);
OBD_FREE(stats, offsetof(typeof(*stats), ls_percpu[num_cpu]));
for (i = 0; i < num_cpu; i++) {
for (j = 0; j < stats->ls_num; j++) {
percpu_cntr = &(stats->ls_percpu[i])->lp_cntr[j];
- atomic_inc(&percpu_cntr->lc_cntl.la_entry);
+ cfs_atomic_inc(&percpu_cntr->lc_cntl.la_entry);
percpu_cntr->lc_count = 0;
percpu_cntr->lc_sum = 0;
percpu_cntr->lc_min = LC_MIN_INIT;
percpu_cntr->lc_max = 0;
percpu_cntr->lc_sumsquare = 0;
- atomic_inc(&percpu_cntr->lc_cntl.la_exit);
+ cfs_atomic_inc(&percpu_cntr->lc_cntl.la_exit);
}
}
if (cntr == &(stats->ls_percpu[0])->lp_cntr[0]) {
struct timeval now;
- do_gettimeofday(&now);
+ cfs_gettimeofday(&now);
rc = seq_printf(p, "%-25s %lu.%lu secs.usecs\n",
"snapshot_time", now.tv_sec, now.tv_usec);
if (rc < 0)
ENTRY;
/* object has only hash + iterate_all references.
* add/delete blocked by hash bucket lock */
- CDEBUG(D_INFO,"refcnt %d\n", atomic_read(&stat->nid_exp_ref_count));
- if (atomic_read(&stat->nid_exp_ref_count) == 2) {
- hlist_del_init(&stat->nid_hash);
+ CDEBUG(D_INFO,"refcnt %d\n", cfs_atomic_read(&stat->nid_exp_ref_count));
+ if (cfs_atomic_read(&stat->nid_exp_ref_count) == 2) {
+ cfs_hlist_del_init(&stat->nid_hash);
nidstat_putref(stat);
- spin_lock(&stat->nid_obd->obd_nid_lock);
- list_move(&stat->nid_list, data);
- spin_unlock(&stat->nid_obd->obd_nid_lock);
+ cfs_spin_lock(&stat->nid_obd->obd_nid_lock);
+ cfs_list_move(&stat->nid_list, data);
+ cfs_spin_unlock(&stat->nid_obd->obd_nid_lock);
EXIT;
return;
}
cfs_hash_for_each(obd->obd_nid_stats_hash,
lprocfs_nid_stats_clear_write_cb, &free_list);
- while (!list_empty(&free_list)) {
- client_stat = list_entry(free_list.next, struct nid_stat,
- nid_list);
- list_del_init(&client_stat->nid_list);
+ while (!cfs_list_empty(&free_list)) {
+ client_stat = cfs_list_entry(free_list.next, struct nid_stat,
+ nid_list);
+ cfs_list_del_init(&client_stat->nid_list);
lprocfs_free_client_stats(client_stat);
}
new_stat->nid = *nid;
new_stat->nid_obd = exp->exp_obd;
- atomic_set(&new_stat->nid_exp_ref_count, 0);
+ cfs_atomic_set(&new_stat->nid_exp_ref_count, 0);
old_stat = cfs_hash_findadd_unique(obd->obd_nid_stats_hash,
nid, &new_stat->nid_hash);
CDEBUG(D_INFO, "Found stats %p for nid %s - ref %d\n",
old_stat, libcfs_nid2str(*nid),
- atomic_read(&new_stat->nid_exp_ref_count));
+ cfs_atomic_read(&new_stat->nid_exp_ref_count));
/* Return -EALREADY here so that we know that the /proc
* entry already has been created */
if (old_stat != new_stat) {
- spin_lock(&obd->obd_nid_lock);
+ cfs_spin_lock(&obd->obd_nid_lock);
if (exp->exp_nid_stats != old_stat) {
if (exp->exp_nid_stats)
nidstat_putref(exp->exp_nid_stats);
nidstat_putref(old_stat);
}
- spin_unlock(&obd->obd_nid_lock);
+ cfs_spin_unlock(&obd->obd_nid_lock);
GOTO(destroy_new, rc = -EALREADY);
}
exp->exp_nid_stats = new_stat;
*newnid = 1;
/* protect competitive add to list, not need locking on destroy */
- spin_lock(&obd->obd_nid_lock);
- list_add(&new_stat->nid_list, &obd->obd_nid_stats);
- spin_unlock(&obd->obd_nid_lock);
+ cfs_spin_lock(&obd->obd_nid_lock);
+ cfs_list_add(&new_stat->nid_list, &obd->obd_nid_stats);
+ cfs_spin_unlock(&obd->obd_nid_lock);
RETURN(rc);
if (count > (sizeof(kernbuf) - 1))
return -EINVAL;
- if (copy_from_user(kernbuf, buffer, count))
+ if (cfs_copy_from_user(kernbuf, buffer, count))
return -EFAULT;
kernbuf[count] = '\0';
if (count > (sizeof(kernbuf) - 1))
return -EINVAL;
- if (copy_from_user(kernbuf, buffer, count))
+ if (cfs_copy_from_user(kernbuf, buffer, count))
return -EFAULT;
kernbuf[count] = '\0';
if (value >= OBD_HIST_MAX)
value = OBD_HIST_MAX - 1;
- spin_lock(&oh->oh_lock);
+ cfs_spin_lock(&oh->oh_lock);
oh->oh_buckets[value]++;
- spin_unlock(&oh->oh_lock);
+ cfs_spin_unlock(&oh->oh_lock);
}
EXPORT_SYMBOL(lprocfs_oh_tally);
void lprocfs_oh_clear(struct obd_histogram *oh)
{
- spin_lock(&oh->oh_lock);
+ cfs_spin_lock(&oh->oh_lock);
memset(oh->oh_buckets, 0, sizeof(oh->oh_buckets));
- spin_unlock(&oh->oh_lock);
+ cfs_spin_unlock(&oh->oh_lock);
}
EXPORT_SYMBOL(lprocfs_oh_clear);
goto out;
/* Number of clients that have completed recovery */
if (lprocfs_obd_snprintf(&page, size, &len,"req_replay_clients: %d\n",
- atomic_read(&obd->obd_req_replay_clients))<= 0)
+ cfs_atomic_read(&obd->obd_req_replay_clients))
+ <= 0)
goto out;
if (lprocfs_obd_snprintf(&page, size, &len,"lock_repay_clients: %d\n",
- atomic_read(&obd->obd_lock_replay_clients))<=0)
+ cfs_atomic_read(&obd->obd_lock_replay_clients))
+ <=0)
goto out;
if (lprocfs_obd_snprintf(&page, size, &len,"completed_clients: %d\n",
obd->obd_connected_clients -
- atomic_read(&obd->obd_lock_replay_clients))<=0)
+ cfs_atomic_read(&obd->obd_lock_replay_clients))
+ <=0)
goto out;
if (lprocfs_obd_snprintf(&page, size, &len,"evicted_clients: %d\n",
obd->obd_stale_clients) <= 0)
site = o->lo_dev->ld_site;
orig = o;
kill_it = 0;
- write_lock(&site->ls_guard);
- if (atomic_dec_and_test(&top->loh_ref)) {
+ cfs_write_lock(&site->ls_guard);
+ if (cfs_atomic_dec_and_test(&top->loh_ref)) {
/*
* When last reference is released, iterate over object
* layers, and notify them that object is no longer busy.
*/
- list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
+ cfs_list_for_each_entry_reverse(o, &top->loh_layers,
+ lo_linkage) {
if (o->lo_ops->loo_object_release != NULL)
o->lo_ops->loo_object_release(env, o);
}
* object lookup is possible and we can safely destroy
* object below.
*/
- hlist_del_init(&top->loh_hash);
- list_del_init(&top->loh_lru);
+ cfs_hlist_del_init(&top->loh_hash);
+ cfs_list_del_init(&top->loh_lru);
-- site->ls_total;
kill_it = 1;
}
}
- write_unlock(&site->ls_guard);
+ cfs_write_unlock(&site->ls_guard);
if (kill_it)
/*
* Object was already removed from hash and lru above, can
{
struct lu_object *scan;
struct lu_object *top;
- struct list_head *layers;
+ cfs_list_t *layers;
int clean;
int result;
ENTRY;
* object slices are created.
*/
clean = 1;
- list_for_each_entry(scan, layers, lo_linkage) {
+ cfs_list_for_each_entry(scan, layers, lo_linkage) {
if (scan->lo_flags & LU_OBJECT_ALLOCATED)
continue;
clean = 0;
}
} while (!clean);
- list_for_each_entry_reverse(scan, layers, lo_linkage) {
+ cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
if (scan->lo_ops->loo_object_start != NULL) {
result = scan->lo_ops->loo_object_start(env, scan);
if (result != 0) {
*/
static void lu_object_free(const struct lu_env *env, struct lu_object *o)
{
- struct list_head splice;
- struct lu_object *scan;
- struct lu_site *site;
- struct list_head *layers;
+ cfs_list_t splice;
+ struct lu_object *scan;
+ struct lu_site *site;
+ cfs_list_t *layers;
site = o->lo_dev->ld_site;
layers = &o->lo_header->loh_layers;
/*
* First call ->loo_object_delete() method to release all resources.
*/
- list_for_each_entry_reverse(scan, layers, lo_linkage) {
+ cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
if (scan->lo_ops->loo_object_delete != NULL)
scan->lo_ops->loo_object_delete(env, scan);
}
* top-level slice.
*/
CFS_INIT_LIST_HEAD(&splice);
- list_splice_init(layers, &splice);
- while (!list_empty(&splice)) {
+ cfs_list_splice_init(layers, &splice);
+ while (!cfs_list_empty(&splice)) {
/*
* Free layers in bottom-to-top order, so that object header
* lives as long as possible and ->loo_object_free() methods
* can look at its contents.
*/
o = container_of0(splice.prev, struct lu_object, lo_linkage);
- list_del_init(&o->lo_linkage);
+ cfs_list_del_init(&o->lo_linkage);
LASSERT(o->lo_ops->loo_object_free != NULL);
o->lo_ops->loo_object_free(env, o);
}
*/
int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
{
- struct list_head dispose;
+ cfs_list_t dispose;
struct lu_object_header *h;
struct lu_object_header *temp;
* Under LRU list lock, scan LRU list and move unreferenced objects to
* the dispose list, removing them from LRU and hash table.
*/
- write_lock(&s->ls_guard);
- list_for_each_entry_safe(h, temp, &s->ls_lru, loh_lru) {
+ cfs_write_lock(&s->ls_guard);
+ cfs_list_for_each_entry_safe(h, temp, &s->ls_lru, loh_lru) {
/*
* Objects are sorted in lru order, and "busy" objects (ones
* with h->loh_ref > 0) naturally tend to live near hot end
*/
if (nr-- == 0)
break;
- if (atomic_read(&h->loh_ref) > 0)
+ if (cfs_atomic_read(&h->loh_ref) > 0)
continue;
- hlist_del_init(&h->loh_hash);
- list_move(&h->loh_lru, &dispose);
+ cfs_hlist_del_init(&h->loh_hash);
+ cfs_list_move(&h->loh_lru, &dispose);
s->ls_total --;
}
- write_unlock(&s->ls_guard);
+ cfs_write_unlock(&s->ls_guard);
/*
* Free everything on the dispose list. This is safe against races due
* to the reasons described in lu_object_put().
*/
- while (!list_empty(&dispose)) {
+ while (!cfs_list_empty(&dispose)) {
h = container_of0(dispose.next,
struct lu_object_header, loh_lru);
- list_del_init(&h->loh_lru);
+ cfs_list_del_init(&h->loh_lru);
lu_object_free(env, lu_object_top(h));
s->ls_stats.s_lru_purged ++;
}
vsnprintf(key->lck_area + used,
ARRAY_SIZE(key->lck_area) - used, format, args);
if (complete) {
- if (cdebug_show(info->lpi_mask, info->lpi_subsys))
+ if (cfs_cdebug_show(info->lpi_mask, info->lpi_subsys))
libcfs_debug_msg(NULL, info->lpi_subsys, info->lpi_mask,
(char *)info->lpi_file, info->lpi_fn,
info->lpi_line, "%s", key->lck_area);
const struct lu_object_header *hdr)
{
(*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
- hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
+ hdr, hdr->loh_flags, cfs_atomic_read(&hdr->loh_ref),
PFID(&hdr->loh_fid),
- hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
- list_empty((struct list_head *)&hdr->loh_lru) ? "" : " lru",
+ cfs_hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
+ cfs_list_empty((cfs_list_t *)&hdr->loh_lru) ? \
+ "" : " lru",
hdr->loh_attr & LOHA_EXISTS ? " exist":"");
}
EXPORT_SYMBOL(lu_object_header_print);
top = o->lo_header;
lu_object_header_print(env, cookie, printer, top);
(*printer)(env, cookie, "{ \n");
- list_for_each_entry(o, &top->loh_layers, lo_linkage) {
+ cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
depth = o->lo_depth + 4;
/*
struct lu_object_header *top;
top = o->lo_header;
- list_for_each_entry(o, &top->loh_layers, lo_linkage) {
+ cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
if (o->lo_ops->loo_object_invariant != NULL &&
!o->lo_ops->loo_object_invariant(o))
return 0;
EXPORT_SYMBOL(lu_object_invariant);
static struct lu_object *htable_lookup(struct lu_site *s,
- const struct hlist_head *bucket,
+ const cfs_hlist_head_t *bucket,
const struct lu_fid *f,
cfs_waitlink_t *waiter)
{
struct lu_object_header *h;
- struct hlist_node *scan;
+ cfs_hlist_node_t *scan;
- hlist_for_each_entry(h, scan, bucket, loh_hash) {
+ cfs_hlist_for_each_entry(h, scan, bucket, loh_hash) {
s->ls_stats.s_cache_check ++;
if (likely(lu_fid_eq(&h->loh_fid, f))) {
if (unlikely(lu_object_is_dying(h))) {
*/
cfs_waitlink_init(waiter);
cfs_waitq_add(&s->ls_marche_funebre, waiter);
- set_current_state(CFS_TASK_UNINT);
+ cfs_set_current_state(CFS_TASK_UNINT);
s->ls_stats.s_cache_death_race ++;
return ERR_PTR(-EAGAIN);
}
/* bump reference count... */
- if (atomic_add_return(1, &h->loh_ref) == 1)
+ if (cfs_atomic_add_return(1, &h->loh_ref) == 1)
++ s->ls_busy;
/* and move to the head of the LRU */
/*
{
/* all objects with same id and different versions will belong to same
* collisions list. */
- return hash_long(fid_flatten(f), bits);
+ return cfs_hash_long(fid_flatten(f), bits);
}
/**
const struct lu_object_conf *conf,
cfs_waitlink_t *waiter)
{
- struct lu_site *s;
- struct lu_object *o;
- struct lu_object *shadow;
- struct hlist_head *bucket;
+ struct lu_site *s;
+ struct lu_object *o;
+ struct lu_object *shadow;
+ cfs_hlist_head_t *bucket;
/*
* This uses standard index maintenance protocol:
s = dev->ld_site;
bucket = s->ls_hash + fid_hash(f, s->ls_hash_bits);
- read_lock(&s->ls_guard);
+ cfs_read_lock(&s->ls_guard);
o = htable_lookup(s, bucket, f, waiter);
- read_unlock(&s->ls_guard);
+ cfs_read_unlock(&s->ls_guard);
if (o != NULL)
return o;
LASSERT(lu_fid_eq(lu_object_fid(o), f));
- write_lock(&s->ls_guard);
+ cfs_write_lock(&s->ls_guard);
shadow = htable_lookup(s, bucket, f, waiter);
if (likely(shadow == NULL)) {
- hlist_add_head(&o->lo_header->loh_hash, bucket);
- list_add_tail(&o->lo_header->loh_lru, &s->ls_lru);
+ cfs_hlist_add_head(&o->lo_header->loh_hash, bucket);
+ cfs_list_add_tail(&o->lo_header->loh_lru, &s->ls_lru);
++ s->ls_busy;
++ s->ls_total;
shadow = o;
o = NULL;
} else
s->ls_stats.s_cache_race ++;
- write_unlock(&s->ls_guard);
+ cfs_write_unlock(&s->ls_guard);
if (o != NULL)
lu_object_free(env, o);
return shadow;
CFS_INIT_LIST_HEAD(&ldt->ldt_linkage);
result = ldt->ldt_ops->ldto_init(ldt);
if (result == 0)
- list_add(&ldt->ldt_linkage, &lu_device_types);
+ cfs_list_add(&ldt->ldt_linkage, &lu_device_types);
return result;
}
EXPORT_SYMBOL(lu_device_type_init);
void lu_device_type_fini(struct lu_device_type *ldt)
{
- list_del_init(&ldt->ldt_linkage);
+ cfs_list_del_init(&ldt->ldt_linkage);
ldt->ldt_ops->ldto_fini(ldt);
}
EXPORT_SYMBOL(lu_device_type_fini);
{
struct lu_device_type *ldt;
- list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
+ cfs_list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
if (ldt->ldt_device_nr == 0)
ldt->ldt_ops->ldto_stop(ldt);
}
* Global list of all sites on this node
*/
static CFS_LIST_HEAD(lu_sites);
-static DECLARE_MUTEX(lu_sites_guard);
+static CFS_DECLARE_MUTEX(lu_sites_guard);
/**
* Global environment used by site shrinker.
for (i = 0; i < s->ls_hash_size; ++i) {
struct lu_object_header *h;
- struct hlist_node *scan;
+ cfs_hlist_node_t *scan;
- read_lock(&s->ls_guard);
- hlist_for_each_entry(h, scan, &s->ls_hash[i], loh_hash) {
+ cfs_read_lock(&s->ls_guard);
+ cfs_hlist_for_each_entry(h, scan, &s->ls_hash[i], loh_hash) {
- if (!list_empty(&h->loh_layers)) {
+ if (!cfs_list_empty(&h->loh_layers)) {
const struct lu_object *obj;
obj = lu_object_top(h);
} else
lu_object_header_print(env, cookie, printer, h);
}
- read_unlock(&s->ls_guard);
+ cfs_read_unlock(&s->ls_guard);
}
}
EXPORT_SYMBOL(lu_site_print);
*
* Size of lu_object is (arbitrary) taken as 1K (together with inode).
*/
- cache_size = num_physpages;
+ cache_size = cfs_num_physpages;
#if BITS_PER_LONG == 32
/* limit hashtable size for lowmem systems to low RAM */
return bits;
}
-static struct lock_class_key lu_site_guard_class;
+static cfs_lock_class_key_t lu_site_guard_class;
/**
* Initialize site \a s, with \a d as the top level device.
ENTRY;
memset(s, 0, sizeof *s);
- rwlock_init(&s->ls_guard);
- lockdep_set_class(&s->ls_guard, &lu_site_guard_class);
+ cfs_rwlock_init(&s->ls_guard);
+ cfs_lockdep_set_class(&s->ls_guard, &lu_site_guard_class);
CFS_INIT_LIST_HEAD(&s->ls_lru);
CFS_INIT_LIST_HEAD(&s->ls_linkage);
cfs_waitq_init(&s->ls_marche_funebre);
s->ls_hash_mask = size - 1;
for (i = 0; i < size; i++)
- INIT_HLIST_HEAD(&s->ls_hash[i]);
+ CFS_INIT_HLIST_HEAD(&s->ls_hash[i]);
RETURN(0);
}
*/
void lu_site_fini(struct lu_site *s)
{
- LASSERT(list_empty(&s->ls_lru));
+ LASSERT(cfs_list_empty(&s->ls_lru));
LASSERT(s->ls_total == 0);
- down(&lu_sites_guard);
- list_del_init(&s->ls_linkage);
- up(&lu_sites_guard);
+ cfs_down(&lu_sites_guard);
+ cfs_list_del_init(&s->ls_linkage);
+ cfs_up(&lu_sites_guard);
if (s->ls_hash != NULL) {
int i;
for (i = 0; i < s->ls_hash_size; i++)
- LASSERT(hlist_empty(&s->ls_hash[i]));
+ LASSERT(cfs_hlist_empty(&s->ls_hash[i]));
cfs_free_large(s->ls_hash);
s->ls_hash = NULL;
}
int lu_site_init_finish(struct lu_site *s)
{
int result;
- down(&lu_sites_guard);
+ cfs_down(&lu_sites_guard);
result = lu_context_refill(&lu_shrink_env.le_ctx);
if (result == 0)
- list_add(&s->ls_linkage, &lu_sites);
- up(&lu_sites_guard);
+ cfs_list_add(&s->ls_linkage, &lu_sites);
+ cfs_up(&lu_sites_guard);
return result;
}
EXPORT_SYMBOL(lu_site_init_finish);
*/
void lu_device_get(struct lu_device *d)
{
- atomic_inc(&d->ld_ref);
+ cfs_atomic_inc(&d->ld_ref);
}
EXPORT_SYMBOL(lu_device_get);
*/
void lu_device_put(struct lu_device *d)
{
- LASSERT(atomic_read(&d->ld_ref) > 0);
- atomic_dec(&d->ld_ref);
+ LASSERT(cfs_atomic_read(&d->ld_ref) > 0);
+ cfs_atomic_dec(&d->ld_ref);
}
EXPORT_SYMBOL(lu_device_put);
if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
t->ldt_ops->ldto_start(t);
memset(d, 0, sizeof *d);
- atomic_set(&d->ld_ref, 0);
+ cfs_atomic_set(&d->ld_ref, 0);
d->ld_type = t;
lu_ref_init(&d->ld_reference);
return 0;
}
lu_ref_fini(&d->ld_reference);
- LASSERTF(atomic_read(&d->ld_ref) == 0,
- "Refcount is %u\n", atomic_read(&d->ld_ref));
+ LASSERTF(cfs_atomic_read(&d->ld_ref) == 0,
+ "Refcount is %u\n", cfs_atomic_read(&d->ld_ref));
LASSERT(t->ldt_device_nr > 0);
if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
t->ldt_ops->ldto_stop(t);
{
struct lu_device *dev = o->lo_dev;
- LASSERT(list_empty(&o->lo_linkage));
+ LASSERT(cfs_list_empty(&o->lo_linkage));
if (dev != NULL) {
lu_ref_del_at(&dev->ld_reference,
*/
void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
{
- list_move(&o->lo_linkage, &h->loh_layers);
+ cfs_list_move(&o->lo_linkage, &h->loh_layers);
}
EXPORT_SYMBOL(lu_object_add_top);
*/
void lu_object_add(struct lu_object *before, struct lu_object *o)
{
- list_move(&o->lo_linkage, &before->lo_linkage);
+ cfs_list_move(&o->lo_linkage, &before->lo_linkage);
}
EXPORT_SYMBOL(lu_object_add);
int lu_object_header_init(struct lu_object_header *h)
{
memset(h, 0, sizeof *h);
- atomic_set(&h->loh_ref, 1);
- INIT_HLIST_NODE(&h->loh_hash);
+ cfs_atomic_set(&h->loh_ref, 1);
+ CFS_INIT_HLIST_NODE(&h->loh_hash);
CFS_INIT_LIST_HEAD(&h->loh_lru);
CFS_INIT_LIST_HEAD(&h->loh_layers);
lu_ref_init(&h->loh_reference);
*/
void lu_object_header_fini(struct lu_object_header *h)
{
- LASSERT(list_empty(&h->loh_layers));
- LASSERT(list_empty(&h->loh_lru));
- LASSERT(hlist_unhashed(&h->loh_hash));
+ LASSERT(cfs_list_empty(&h->loh_layers));
+ LASSERT(cfs_list_empty(&h->loh_lru));
+ LASSERT(cfs_hlist_unhashed(&h->loh_hash));
lu_ref_fini(&h->loh_reference);
}
EXPORT_SYMBOL(lu_object_header_fini);
{
struct lu_object *o;
- list_for_each_entry(o, &h->loh_layers, lo_linkage) {
+ cfs_list_for_each_entry(o, &h->loh_layers, lo_linkage) {
if (o->lo_dev->ld_type == dtype)
return o;
}
/* purge again. */
lu_site_purge(env, site, ~0);
- if (!list_empty(&site->ls_lru) || site->ls_total != 0) {
+ if (!cfs_list_empty(&site->ls_lru) || site->ls_total != 0) {
/*
* Uh-oh, objects still exist.
*/
static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
-static spinlock_t lu_keys_guard = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t lu_keys_guard = CFS_SPIN_LOCK_UNLOCKED;
/**
* Global counter incremented whenever key is registered, unregistered,
LASSERT(key->lct_owner != NULL);
result = -ENFILE;
- spin_lock(&lu_keys_guard);
+ cfs_spin_lock(&lu_keys_guard);
for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
if (lu_keys[i] == NULL) {
key->lct_index = i;
- atomic_set(&key->lct_used, 1);
+ cfs_atomic_set(&key->lct_used, 1);
lu_keys[i] = key;
lu_ref_init(&key->lct_reference);
result = 0;
break;
}
}
- spin_unlock(&lu_keys_guard);
+ cfs_spin_unlock(&lu_keys_guard);
return result;
}
EXPORT_SYMBOL(lu_context_key_register);
key = lu_keys[index];
LASSERT(key != NULL);
LASSERT(key->lct_fini != NULL);
- LASSERT(atomic_read(&key->lct_used) > 1);
+ LASSERT(cfs_atomic_read(&key->lct_used) > 1);
key->lct_fini(ctx, key, ctx->lc_value[index]);
lu_ref_del(&key->lct_reference, "ctx", ctx);
- atomic_dec(&key->lct_used);
+ cfs_atomic_dec(&key->lct_used);
LASSERT(key->lct_owner != NULL);
if (!(ctx->lc_tags & LCT_NOREF)) {
- LASSERT(module_refcount(key->lct_owner) > 0);
- module_put(key->lct_owner);
+ LASSERT(cfs_module_refcount(key->lct_owner) > 0);
+ cfs_module_put(key->lct_owner);
}
ctx->lc_value[index] = NULL;
}
*/
void lu_context_key_degister(struct lu_context_key *key)
{
- LASSERT(atomic_read(&key->lct_used) >= 1);
+ LASSERT(cfs_atomic_read(&key->lct_used) >= 1);
LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
lu_context_key_quiesce(key);
++key_set_version;
- spin_lock(&lu_keys_guard);
+ cfs_spin_lock(&lu_keys_guard);
key_fini(&lu_shrink_env.le_ctx, key->lct_index);
if (lu_keys[key->lct_index]) {
lu_keys[key->lct_index] = NULL;
lu_ref_fini(&key->lct_reference);
}
- spin_unlock(&lu_keys_guard);
+ cfs_spin_unlock(&lu_keys_guard);
- LASSERTF(atomic_read(&key->lct_used) == 1, "key has instances: %d\n",
- atomic_read(&key->lct_used));
+ LASSERTF(cfs_atomic_read(&key->lct_used) == 1,
+ "key has instances: %d\n",
+ cfs_atomic_read(&key->lct_used));
}
EXPORT_SYMBOL(lu_context_key_degister);
/*
* XXX memory barrier has to go here.
*/
- spin_lock(&lu_keys_guard);
- list_for_each_entry(ctx, &lu_context_remembered, lc_remember)
+ cfs_spin_lock(&lu_keys_guard);
+ cfs_list_for_each_entry(ctx, &lu_context_remembered,
+ lc_remember)
key_fini(ctx, key->lct_index);
- spin_unlock(&lu_keys_guard);
+ cfs_spin_unlock(&lu_keys_guard);
++key_set_version;
}
}
{
int i;
- spin_lock(&lu_keys_guard);
+ cfs_spin_lock(&lu_keys_guard);
if (ctx->lc_value != NULL) {
for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
key_fini(ctx, i);
ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
ctx->lc_value = NULL;
}
- spin_unlock(&lu_keys_guard);
+ cfs_spin_unlock(&lu_keys_guard);
}
static int keys_fill(struct lu_context *ctx)
LASSERT(key->lct_owner != NULL);
if (!(ctx->lc_tags & LCT_NOREF))
- try_module_get(key->lct_owner);
+ cfs_try_module_get(key->lct_owner);
lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
- atomic_inc(&key->lct_used);
+ cfs_atomic_inc(&key->lct_used);
/*
* This is the only place in the code, where an
* element of ctx->lc_value[] array is set to non-NULL
ctx->lc_state = LCS_INITIALIZED;
ctx->lc_tags = tags;
if (tags & LCT_REMEMBER) {
- spin_lock(&lu_keys_guard);
- list_add(&ctx->lc_remember, &lu_context_remembered);
- spin_unlock(&lu_keys_guard);
+ cfs_spin_lock(&lu_keys_guard);
+ cfs_list_add(&ctx->lc_remember, &lu_context_remembered);
+ cfs_spin_unlock(&lu_keys_guard);
} else
CFS_INIT_LIST_HEAD(&ctx->lc_remember);
return keys_init(ctx);
LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
ctx->lc_state = LCS_FINALIZED;
keys_fini(ctx);
- spin_lock(&lu_keys_guard);
- list_del_init(&ctx->lc_remember);
- spin_unlock(&lu_keys_guard);
+ cfs_spin_lock(&lu_keys_guard);
+ cfs_list_del_init(&ctx->lc_remember);
+ cfs_spin_unlock(&lu_keys_guard);
}
EXPORT_SYMBOL(lu_context_fini);
}
EXPORT_SYMBOL(lu_env_refill);
-static struct shrinker *lu_site_shrinker = NULL;
+static struct cfs_shrinker *lu_site_shrinker = NULL;
#ifdef __KERNEL__
static int lu_cache_shrink(int nr, unsigned int gfp_mask)
CDEBUG(D_INODE, "Shrink %d objects\n", nr);
}
- down(&lu_sites_guard);
- list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
+ cfs_down(&lu_sites_guard);
+ cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
if (nr != 0) {
remain = lu_site_purge(&lu_shrink_env, s, remain);
/*
* Move just shrunk site to the tail of site list to
* assure shrinking fairness.
*/
- list_move_tail(&s->ls_linkage, &splice);
+ cfs_list_move_tail(&s->ls_linkage, &splice);
}
- read_lock(&s->ls_guard);
+ cfs_read_lock(&s->ls_guard);
cached += s->ls_total - s->ls_busy;
- read_unlock(&s->ls_guard);
+ cfs_read_unlock(&s->ls_guard);
if (nr && remain <= 0)
break;
}
- list_splice(&splice, lu_sites.prev);
- up(&lu_sites_guard);
+ cfs_list_splice(&splice, lu_sites.prev);
+ cfs_up(&lu_sites_guard);
cached = (cached / 100) * sysctl_vfs_cache_pressure;
if (nr == 0)
CERROR("[%i]: %p %x (%p,%p,%p) %i %i \"%s\"@%p\n",
i, key, key->lct_tags,
key->lct_init, key->lct_fini, key->lct_exit,
- key->lct_index, atomic_read(&key->lct_used),
+ key->lct_index, cfs_atomic_read(&key->lct_used),
key->lct_owner ? key->lct_owner->name : "",
key->lct_owner);
lu_ref_print(&key->lct_reference);
* conservatively. This should not be too bad, because this
* environment is global.
*/
- down(&lu_sites_guard);
+ cfs_down(&lu_sites_guard);
result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
- up(&lu_sites_guard);
+ cfs_up(&lu_sites_guard);
if (result != 0)
return result;
* inode, one for ea. Unfortunately setting this high value results in
* lu_object/inode cache consuming all the memory.
*/
- lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, lu_cache_shrink);
+ lu_site_shrinker = cfs_set_shrinker(CFS_DEFAULT_SEEKS, lu_cache_shrink);
if (lu_site_shrinker == NULL)
return -ENOMEM;
#endif
lu_time_global_fini();
if (lu_site_shrinker != NULL) {
- remove_shrinker(lu_site_shrinker);
+ cfs_remove_shrinker(lu_site_shrinker);
lu_site_shrinker = NULL;
}
* Tear shrinker environment down _after_ de-registering
* lu_global_key, because the latter has a value in the former.
*/
- down(&lu_sites_guard);
+ cfs_down(&lu_sites_guard);
lu_env_fini(&lu_shrink_env);
- up(&lu_sites_guard);
+ cfs_up(&lu_sites_guard);
lu_ref_global_fini();
}
* an estimation anyway.
*/
for (i = 0, populated = 0; i < s->ls_hash_size; i++)
- populated += !hlist_empty(&s->ls_hash[i]);
+ populated += !cfs_hlist_empty(&s->ls_hash[i]);
return snprintf(page, count, "%d %d %d/%d %d %d %d %d %d %d %d\n",
s->ls_total,
\
if (unlikely(!(expr))) { \
lu_ref_print(__ref); \
- spin_unlock(&__ref->lf_guard); \
+ cfs_spin_unlock(&__ref->lf_guard); \
lu_ref_print_all(); \
- spin_lock(&__ref->lf_guard); \
+ cfs_spin_lock(&__ref->lf_guard); \
LASSERT(0); \
} \
} while (0)
struct lu_ref_link {
struct lu_ref *ll_ref;
- struct list_head ll_linkage;
+ cfs_list_t ll_linkage;
const char *ll_scope;
const void *ll_source;
};
* Protected by lu_ref_refs_guard.
*/
static CFS_LIST_HEAD(lu_ref_refs);
-static spinlock_t lu_ref_refs_guard;
+static cfs_spinlock_t lu_ref_refs_guard;
static struct lu_ref lu_ref_marker = {
- .lf_guard = SPIN_LOCK_UNLOCKED,
+ .lf_guard = CFS_SPIN_LOCK_UNLOCKED,
.lf_list = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_list),
.lf_linkage = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_linkage)
};
CERROR("lu_ref: %p %d %d %s:%d\n",
ref, ref->lf_refs, ref->lf_failed, ref->lf_func, ref->lf_line);
- list_for_each_entry(link, &ref->lf_list, ll_linkage) {
+ cfs_list_for_each_entry(link, &ref->lf_list, ll_linkage) {
CERROR(" link: %s %p\n", link->ll_scope, link->ll_source);
}
}
{
struct lu_ref *ref;
- spin_lock(&lu_ref_refs_guard);
- list_for_each_entry(ref, &lu_ref_refs, lf_linkage) {
+ cfs_spin_lock(&lu_ref_refs_guard);
+ cfs_list_for_each_entry(ref, &lu_ref_refs, lf_linkage) {
if (lu_ref_is_marker(ref))
continue;
- spin_lock(&ref->lf_guard);
+ cfs_spin_lock(&ref->lf_guard);
lu_ref_print(ref);
- spin_unlock(&ref->lf_guard);
+ cfs_spin_unlock(&ref->lf_guard);
}
- spin_unlock(&lu_ref_refs_guard);
+ cfs_spin_unlock(&lu_ref_refs_guard);
}
EXPORT_SYMBOL(lu_ref_print_all);
ref->lf_refs = 0;
ref->lf_func = func;
ref->lf_line = line;
- spin_lock_init(&ref->lf_guard);
+ cfs_spin_lock_init(&ref->lf_guard);
CFS_INIT_LIST_HEAD(&ref->lf_list);
- spin_lock(&lu_ref_refs_guard);
- list_add(&ref->lf_linkage, &lu_ref_refs);
- spin_unlock(&lu_ref_refs_guard);
+ cfs_spin_lock(&lu_ref_refs_guard);
+ cfs_list_add(&ref->lf_linkage, &lu_ref_refs);
+ cfs_spin_unlock(&lu_ref_refs_guard);
}
EXPORT_SYMBOL(lu_ref_init_loc);
void lu_ref_fini(struct lu_ref *ref)
{
- REFASSERT(ref, list_empty(&ref->lf_list));
+ REFASSERT(ref, cfs_list_empty(&ref->lf_list));
REFASSERT(ref, ref->lf_refs == 0);
- spin_lock(&lu_ref_refs_guard);
- list_del_init(&ref->lf_linkage);
- spin_unlock(&lu_ref_refs_guard);
+ cfs_spin_lock(&lu_ref_refs_guard);
+ cfs_list_del_init(&ref->lf_linkage);
+ cfs_spin_unlock(&lu_ref_refs_guard);
}
EXPORT_SYMBOL(lu_ref_fini);
link->ll_ref = ref;
link->ll_scope = scope;
link->ll_source = source;
- spin_lock(&ref->lf_guard);
- list_add_tail(&link->ll_linkage, &ref->lf_list);
+ cfs_spin_lock(&ref->lf_guard);
+ cfs_list_add_tail(&link->ll_linkage, &ref->lf_list);
ref->lf_refs++;
- spin_unlock(&ref->lf_guard);
+ cfs_spin_unlock(&ref->lf_guard);
}
}
if (link == NULL) {
- spin_lock(&ref->lf_guard);
+ cfs_spin_lock(&ref->lf_guard);
ref->lf_failed++;
- spin_unlock(&ref->lf_guard);
+ cfs_spin_unlock(&ref->lf_guard);
link = ERR_PTR(-ENOMEM);
}
struct lu_ref_link *lu_ref_add(struct lu_ref *ref, const char *scope,
const void *source)
{
- might_sleep();
+ cfs_might_sleep();
return lu_ref_add_context(ref, CFS_ALLOC_STD, scope, source);
}
EXPORT_SYMBOL(lu_ref_add);
unsigned iterations;
iterations = 0;
- list_for_each_entry(link, &ref->lf_list, ll_linkage) {
+ cfs_list_for_each_entry(link, &ref->lf_list, ll_linkage) {
++iterations;
if (lu_ref_link_eq(link, scope, source)) {
if (iterations > lu_ref_chain_max_length) {
{
struct lu_ref_link *link;
- spin_lock(&ref->lf_guard);
+ cfs_spin_lock(&ref->lf_guard);
link = lu_ref_find(ref, scope, source);
if (link != NULL) {
- list_del(&link->ll_linkage);
+ cfs_list_del(&link->ll_linkage);
ref->lf_refs--;
- spin_unlock(&ref->lf_guard);
+ cfs_spin_unlock(&ref->lf_guard);
OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
} else {
REFASSERT(ref, ref->lf_failed > 0);
ref->lf_failed--;
- spin_unlock(&ref->lf_guard);
+ cfs_spin_unlock(&ref->lf_guard);
}
}
EXPORT_SYMBOL(lu_ref_del);
const char *scope,
const void *source0, const void *source1)
{
- spin_lock(&ref->lf_guard);
+ cfs_spin_lock(&ref->lf_guard);
if (link != ERR_PTR(-ENOMEM)) {
REFASSERT(ref, link->ll_ref == ref);
REFASSERT(ref, lu_ref_link_eq(link, scope, source0));
} else {
REFASSERT(ref, ref->lf_failed > 0);
}
- spin_unlock(&ref->lf_guard);
+ cfs_spin_unlock(&ref->lf_guard);
}
EXPORT_SYMBOL(lu_ref_set_at);
const char *scope, const void *source)
{
if (link != ERR_PTR(-ENOMEM)) {
- spin_lock(&ref->lf_guard);
+ cfs_spin_lock(&ref->lf_guard);
REFASSERT(ref, link->ll_ref == ref);
REFASSERT(ref, lu_ref_link_eq(link, scope, source));
- list_del(&link->ll_linkage);
+ cfs_list_del(&link->ll_linkage);
ref->lf_refs--;
- spin_unlock(&ref->lf_guard);
+ cfs_spin_unlock(&ref->lf_guard);
OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
} else {
- spin_lock(&ref->lf_guard);
+ cfs_spin_lock(&ref->lf_guard);
REFASSERT(ref, ref->lf_failed > 0);
ref->lf_failed--;
- spin_unlock(&ref->lf_guard);
+ cfs_spin_unlock(&ref->lf_guard);
}
}
EXPORT_SYMBOL(lu_ref_del_at);
{
struct lu_ref *ref = seq->private;
- spin_lock(&lu_ref_refs_guard);
- if (list_empty(&ref->lf_linkage))
+ cfs_spin_lock(&lu_ref_refs_guard);
+ if (cfs_list_empty(&ref->lf_linkage))
ref = NULL;
- spin_unlock(&lu_ref_refs_guard);
+ cfs_spin_unlock(&lu_ref_refs_guard);
return ref;
}
struct lu_ref *next;
LASSERT(seq->private == p);
- LASSERT(!list_empty(&ref->lf_linkage));
+ LASSERT(!cfs_list_empty(&ref->lf_linkage));
- spin_lock(&lu_ref_refs_guard);
- next = list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
+ cfs_spin_lock(&lu_ref_refs_guard);
+ next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
if (&next->lf_linkage == &lu_ref_refs) {
p = NULL;
} else {
(*pos)++;
- list_move(&ref->lf_linkage, &next->lf_linkage);
+ cfs_list_move(&ref->lf_linkage, &next->lf_linkage);
}
- spin_unlock(&lu_ref_refs_guard);
+ cfs_spin_unlock(&lu_ref_refs_guard);
return p;
}
struct lu_ref *ref = p;
struct lu_ref *next;
- spin_lock(&lu_ref_refs_guard);
- next = list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
+ cfs_spin_lock(&lu_ref_refs_guard);
+ next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
if ((&next->lf_linkage == &lu_ref_refs) || lu_ref_is_marker(next)) {
- spin_unlock(&lu_ref_refs_guard);
+ cfs_spin_unlock(&lu_ref_refs_guard);
return 0;
}
/* print the entry */
- spin_lock(&next->lf_guard);
+ cfs_spin_lock(&next->lf_guard);
seq_printf(seq, "lu_ref: %p %d %d %s:%d\n",
next, next->lf_refs, next->lf_failed,
next->lf_func, next->lf_line);
struct lu_ref_link *link;
int i = 0;
- list_for_each_entry(link, &next->lf_list, ll_linkage)
+ cfs_list_for_each_entry(link, &next->lf_list, ll_linkage)
seq_printf(seq, " #%d link: %s %p\n",
i++, link->ll_scope, link->ll_source);
}
- spin_unlock(&next->lf_guard);
- spin_unlock(&lu_ref_refs_guard);
+ cfs_spin_unlock(&next->lf_guard);
+ cfs_spin_unlock(&lu_ref_refs_guard);
return 0;
}
result = seq_open(file, &lu_ref_seq_ops);
if (result == 0) {
- spin_lock(&lu_ref_refs_guard);
- if (!list_empty(&marker->lf_linkage))
+ cfs_spin_lock(&lu_ref_refs_guard);
+ if (!cfs_list_empty(&marker->lf_linkage))
result = -EAGAIN;
else
- list_add(&marker->lf_linkage, &lu_ref_refs);
- spin_unlock(&lu_ref_refs_guard);
+ cfs_list_add(&marker->lf_linkage, &lu_ref_refs);
+ cfs_spin_unlock(&lu_ref_refs_guard);
if (result == 0) {
struct seq_file *f = file->private_data;
{
struct lu_ref *ref = ((struct seq_file *)file->private_data)->private;
- spin_lock(&lu_ref_refs_guard);
- list_del_init(&ref->lf_linkage);
- spin_unlock(&lu_ref_refs_guard);
+ cfs_spin_lock(&lu_ref_refs_guard);
+ cfs_list_del_init(&ref->lf_linkage);
+ cfs_spin_unlock(&lu_ref_refs_guard);
return seq_release(inode, file);
}
"lu_ref tracking is enabled. Performance isn't.\n");
- spin_lock_init(&lu_ref_refs_guard);
+ cfs_spin_lock_init(&lu_ref_refs_guard);
result = lu_kmem_init(lu_ref_caches);
#if defined(__KERNEL__) && defined(LPROCFS)
struct timeval now;
unsigned long long ret;
- do_gettimeofday(&now);
+ cfs_gettimeofday(&now);
ret = now.tv_sec;
ret *= 1000000;
ret += now.tv_usec;
#include <lustre_lib.h>
#if !defined(HAVE_RCU) || !defined(__KERNEL__)
-# define list_add_rcu list_add
-# define list_del_rcu list_del
-# define list_for_each_rcu list_for_each
-# define list_for_each_safe_rcu list_for_each_safe
-# define rcu_read_lock() spin_lock(&bucket->lock)
-# define rcu_read_unlock() spin_unlock(&bucket->lock)
+# define list_add_rcu cfs_list_add
+# define list_del_rcu cfs_list_del
+# define list_for_each_rcu cfs_list_for_each
+# define list_for_each_safe_rcu cfs_list_for_each_safe
+# define list_for_each_entry_rcu cfs_list_for_each_entry
+# define rcu_read_lock() cfs_spin_lock(&bucket->lock)
+# define rcu_read_unlock() cfs_spin_unlock(&bucket->lock)
#endif /* ifndef HAVE_RCU */
static __u64 handle_base;
#define HANDLE_INCR 7
-static spinlock_t handle_base_lock;
+static cfs_spinlock_t handle_base_lock;
static struct handle_bucket {
- spinlock_t lock;
- struct list_head head;
+ cfs_spinlock_t lock;
+ cfs_list_t head;
} *handle_hash;
-static atomic_t handle_count = ATOMIC_INIT(0);
+static cfs_atomic_t handle_count = CFS_ATOMIC_INIT(0);
#ifdef __arch_um__
/* For unknown reason, UML uses kmalloc rather than vmalloc to allocate
ENTRY;
LASSERT(h != NULL);
- LASSERT(list_empty(&h->h_link));
+ LASSERT(cfs_list_empty(&h->h_link));
/*
* This is fast, but simplistic cookie generation algorithm, it will
* need a re-do at some point in the future for security.
*/
- spin_lock(&handle_base_lock);
+ cfs_spin_lock(&handle_base_lock);
handle_base += HANDLE_INCR;
h->h_cookie = handle_base;
CWARN("The universe has been exhausted: cookie wrap-around.\n");
handle_base += HANDLE_INCR;
}
- spin_unlock(&handle_base_lock);
+ cfs_spin_unlock(&handle_base_lock);
- atomic_inc(&handle_count);
+ cfs_atomic_inc(&handle_count);
h->h_addref = cb;
- spin_lock_init(&h->h_lock);
+ cfs_spin_lock_init(&h->h_lock);
bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK];
- spin_lock(&bucket->lock);
+ cfs_spin_lock(&bucket->lock);
list_add_rcu(&h->h_link, &bucket->head);
h->h_in = 1;
- spin_unlock(&bucket->lock);
+ cfs_spin_unlock(&bucket->lock);
CDEBUG(D_INFO, "added object %p with handle "LPX64" to hash\n",
h, h->h_cookie);
static void class_handle_unhash_nolock(struct portals_handle *h)
{
- if (list_empty(&h->h_link)) {
+ if (cfs_list_empty(&h->h_link)) {
CERROR("removing an already-removed handle ("LPX64")\n",
h->h_cookie);
return;
CDEBUG(D_INFO, "removing object %p with handle "LPX64" from hash\n",
h, h->h_cookie);
- spin_lock(&h->h_lock);
+ cfs_spin_lock(&h->h_lock);
if (h->h_in == 0) {
- spin_unlock(&h->h_lock);
+ cfs_spin_unlock(&h->h_lock);
return;
}
h->h_in = 0;
- spin_unlock(&h->h_lock);
+ cfs_spin_unlock(&h->h_lock);
list_del_rcu(&h->h_link);
}
struct handle_bucket *bucket;
bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
- spin_lock(&bucket->lock);
+ cfs_spin_lock(&bucket->lock);
class_handle_unhash_nolock(h);
- spin_unlock(&bucket->lock);
+ cfs_spin_unlock(&bucket->lock);
- atomic_dec(&handle_count);
+ cfs_atomic_dec(&handle_count);
}
void class_handle_hash_back(struct portals_handle *h)
bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
- atomic_inc(&handle_count);
- spin_lock(&bucket->lock);
+ cfs_atomic_inc(&handle_count);
+ cfs_spin_lock(&bucket->lock);
list_add_rcu(&h->h_link, &bucket->head);
h->h_in = 1;
- spin_unlock(&bucket->lock);
+ cfs_spin_unlock(&bucket->lock);
EXIT;
}
if (h->h_cookie != cookie)
continue;
- spin_lock(&h->h_lock);
+ cfs_spin_lock(&h->h_lock);
if (likely(h->h_in != 0)) {
h->h_addref(h);
retval = h;
}
- spin_unlock(&h->h_lock);
+ cfs_spin_unlock(&h->h_lock);
break;
}
rcu_read_unlock();
RETURN(retval);
}
-void class_handle_free_cb(struct rcu_head *rcu)
+void class_handle_free_cb(cfs_rcu_head_t *rcu)
{
struct portals_handle *h = RCU2HANDLE(rcu);
if (h->h_free_cb) {
if (handle_hash == NULL)
return -ENOMEM;
- spin_lock_init(&handle_base_lock);
+ cfs_spin_lock_init(&handle_base_lock);
for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
bucket--) {
CFS_INIT_LIST_HEAD(&bucket->head);
- spin_lock_init(&bucket->lock);
+ cfs_spin_lock_init(&bucket->lock);
}
/** bug 21430: add randomness to the initial base */
ll_get_random_bytes(seed, sizeof(seed));
- do_gettimeofday(&tv);
+ cfs_gettimeofday(&tv);
ll_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
ll_get_random_bytes(&handle_base, sizeof(handle_base));
for (i = 0; i < HANDLE_HASH_SIZE; i++) {
struct portals_handle *h;
- spin_lock(&handle_hash[i].lock);
+ cfs_spin_lock(&handle_hash[i].lock);
list_for_each_entry_rcu(h, &(handle_hash[i].head), h_link) {
CERROR("force clean handle "LPX64" addr %p addref %p\n",
h->h_cookie, h, h->h_addref);
class_handle_unhash_nolock(h);
}
- spin_unlock(&handle_hash[i].lock);
+ cfs_spin_unlock(&handle_hash[i].lock);
}
}
int count;
LASSERT(handle_hash != NULL);
- count = atomic_read(&handle_count);
+ count = cfs_atomic_read(&handle_count);
if (count != 0) {
CERROR("handle_count at cleanup: %d\n", count);
cleanup_all_handles();
OBD_VFREE(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
handle_hash = NULL;
- if (atomic_read(&handle_count))
- CERROR("leaked %d handles\n", atomic_read(&handle_count));
+ if (cfs_atomic_read(&handle_count))
+ CERROR("leaked %d handles\n", cfs_atomic_read(&handle_count));
}
#include <lprocfs_status.h>
struct uuid_nid_data {
- struct list_head un_list;
+ cfs_list_t un_list;
lnet_nid_t un_nid;
char *un_uuid;
int un_count; /* nid/uuid pair refcount */
};
/* FIXME: This should probably become more elegant than a global linked list */
-static struct list_head g_uuid_list;
-static spinlock_t g_uuid_lock;
+static cfs_list_t g_uuid_list;
+static cfs_spinlock_t g_uuid_lock;
void class_init_uuidlist(void)
{
CFS_INIT_LIST_HEAD(&g_uuid_list);
- spin_lock_init(&g_uuid_lock);
+ cfs_spin_lock_init(&g_uuid_lock);
}
void class_exit_uuidlist(void)
int lustre_uuid_to_peer(const char *uuid, lnet_nid_t *peer_nid, int index)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
- spin_lock (&g_uuid_lock);
+ cfs_spin_lock (&g_uuid_lock);
- list_for_each(tmp, &g_uuid_list) {
+ cfs_list_for_each(tmp, &g_uuid_list) {
struct uuid_nid_data *data =
- list_entry(tmp, struct uuid_nid_data, un_list);
+ cfs_list_entry(tmp, struct uuid_nid_data, un_list);
if (!strcmp(data->un_uuid, uuid) &&
index-- == 0) {
*peer_nid = data->un_nid;
- spin_unlock (&g_uuid_lock);
+ cfs_spin_unlock (&g_uuid_lock);
return 0;
}
}
- spin_unlock (&g_uuid_lock);
+ cfs_spin_unlock (&g_uuid_lock);
return -ENOENT;
}
data->un_nid = nid;
data->un_count = 1;
- spin_lock (&g_uuid_lock);
+ cfs_spin_lock (&g_uuid_lock);
- list_for_each_entry(entry, &g_uuid_list, un_list) {
+ cfs_list_for_each_entry(entry, &g_uuid_list, un_list) {
if (entry->un_nid == nid &&
(strcmp(entry->un_uuid, uuid) == 0)) {
found++;
}
}
if (!found)
- list_add(&data->un_list, &g_uuid_list);
- spin_unlock (&g_uuid_lock);
+ cfs_list_add(&data->un_list, &g_uuid_list);
+ cfs_spin_unlock (&g_uuid_lock);
if (found) {
CDEBUG(D_INFO, "found uuid %s %s cnt=%d\n", uuid,
struct uuid_nid_data *data;
int found = 0;
- spin_lock (&g_uuid_lock);
+ cfs_spin_lock (&g_uuid_lock);
if (uuid == NULL) {
- list_splice_init(&g_uuid_list, &deathrow);
+ cfs_list_splice_init(&g_uuid_list, &deathrow);
found = 1;
} else {
- list_for_each_entry(data, &g_uuid_list, un_list) {
+ cfs_list_for_each_entry(data, &g_uuid_list, un_list) {
if (strcmp(data->un_uuid, uuid))
continue;
--data->un_count;
LASSERT(data->un_count >= 0);
if (data->un_count == 0)
- list_move(&data->un_list, &deathrow);
+ cfs_list_move(&data->un_list, &deathrow);
found = 1;
break;
}
}
- spin_unlock (&g_uuid_lock);
+ cfs_spin_unlock (&g_uuid_lock);
if (!found) {
if (uuid)
return -EINVAL;
}
- while (!list_empty(&deathrow)) {
- data = list_entry(deathrow.next, struct uuid_nid_data, un_list);
- list_del(&data->un_list);
+ while (!cfs_list_empty(&deathrow)) {
+ data = cfs_list_entry(deathrow.next, struct uuid_nid_data,
+ un_list);
+ cfs_list_del(&data->un_list);
CDEBUG(D_INFO, "del uuid %s %s\n", data->un_uuid,
libcfs_nid2str(data->un_nid));
/** List head to hold list of objects to be created. */
-static struct list_head llo_lobj_list;
+static cfs_list_t llo_lobj_list;
/** Lock to protect list manipulations */
-static struct mutex llo_lock;
+static cfs_mutex_t llo_lock;
/**
* Structure used to maintain state of path parsing.
void llo_local_obj_register(struct lu_local_obj_desc *llod)
{
- mutex_lock(&llo_lock);
- list_add_tail(&llod->llod_linkage, &llo_lobj_list);
- mutex_unlock(&llo_lock);
+ cfs_mutex_lock(&llo_lock);
+ cfs_list_add_tail(&llod->llod_linkage, &llo_lobj_list);
+ cfs_mutex_unlock(&llo_lock);
}
EXPORT_SYMBOL(llo_local_obj_register);
void llo_local_obj_unregister(struct lu_local_obj_desc *llod)
{
- mutex_lock(&llo_lock);
- list_del(&llod->llod_linkage);
- mutex_unlock(&llo_lock);
+ cfs_mutex_lock(&llo_lock);
+ cfs_list_del(&llod->llod_linkage);
+ cfs_mutex_unlock(&llo_lock);
}
EXPORT_SYMBOL(llo_local_obj_unregister);
int rc = 0;
fid = &info->lti_cfid;
- mutex_lock(&llo_lock);
+ cfs_mutex_lock(&llo_lock);
- list_for_each_entry(scan, &llo_lobj_list, llod_linkage) {
+ cfs_list_for_each_entry(scan, &llo_lobj_list, llod_linkage) {
lu_local_obj_fid(fid, scan->llod_oid);
dir = "";
if (scan->llod_dir)
}
out:
- mutex_unlock(&llo_lock);
+ cfs_mutex_unlock(&llo_lock);
return rc;
}
int result;
CFS_INIT_LIST_HEAD(&llo_lobj_list);
- mutex_init(&llo_lock);
+ cfs_mutex_init(&llo_lock);
LU_CONTEXT_KEY_INIT(&llod_key);
result = lu_context_key_register(&llod_key);
void llo_global_fini(void)
{
lu_context_key_degister(&llod_key);
- LASSERT(list_empty(&llo_lobj_list));
+ LASSERT(cfs_list_empty(&llo_lobj_list));
}
LASSERTF(strncmp(obd->obd_name, name, strlen(name)) == 0,
"%p obd_name %s != %s\n", obd, obd->obd_name, name);
- rwlock_init(&obd->obd_pool_lock);
+ cfs_rwlock_init(&obd->obd_pool_lock);
obd->obd_pool_limit = 0;
obd->obd_pool_slv = 0;
CFS_INIT_LIST_HEAD(&obd->obd_delayed_exports);
CFS_INIT_LIST_HEAD(&obd->obd_exports_timed);
CFS_INIT_LIST_HEAD(&obd->obd_nid_stats);
- spin_lock_init(&obd->obd_nid_lock);
- spin_lock_init(&obd->obd_dev_lock);
- sema_init(&obd->obd_dev_sem, 1);
- spin_lock_init(&obd->obd_osfs_lock);
+ cfs_spin_lock_init(&obd->obd_nid_lock);
+ cfs_spin_lock_init(&obd->obd_dev_lock);
+ cfs_sema_init(&obd->obd_dev_sem, 1);
+ cfs_spin_lock_init(&obd->obd_osfs_lock);
/* obd->obd_osfs_age must be set to a value in the distant
* past to guarantee a fresh statfs is fetched on mount. */
obd->obd_osfs_age = cfs_time_shift_64(-1000);
/* XXX belongs in setup not attach */
- init_rwsem(&obd->obd_observer_link_sem);
+ cfs_init_rwsem(&obd->obd_observer_link_sem);
/* recovery data */
cfs_init_timer(&obd->obd_recovery_timer);
- spin_lock_init(&obd->obd_processing_task_lock);
+ cfs_spin_lock_init(&obd->obd_processing_task_lock);
cfs_waitq_init(&obd->obd_next_transno_waitq);
cfs_waitq_init(&obd->obd_evict_inprogress_waitq);
CFS_INIT_LIST_HEAD(&obd->obd_req_replay_queue);
}
/* Detach drops this */
- spin_lock(&obd->obd_dev_lock);
- atomic_set(&obd->obd_refcount, 1);
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_lock(&obd->obd_dev_lock);
+ cfs_atomic_set(&obd->obd_refcount, 1);
+ cfs_spin_unlock(&obd->obd_dev_lock);
lu_ref_init(&obd->obd_reference);
lu_ref_add(&obd->obd_reference, "attach", obd);
obd->obd_attached = 1;
CDEBUG(D_IOCTL, "OBD: dev %d attached type %s with refcount %d\n",
- obd->obd_minor, typename, atomic_read(&obd->obd_refcount));
+ obd->obd_minor, typename, cfs_atomic_read(&obd->obd_refcount));
RETURN(0);
out:
if (obd != NULL) {
}
/* is someone else setting us up right now? (attach inits spinlock) */
- spin_lock(&obd->obd_dev_lock);
+ cfs_spin_lock(&obd->obd_dev_lock);
if (obd->obd_starting) {
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
CERROR("Device %d setup in progress (type %s)\n",
obd->obd_minor, obd->obd_type->typ_name);
RETURN(-EEXIST);
obd->obd_uuid_hash = NULL;
obd->obd_nid_hash = NULL;
obd->obd_nid_stats_hash = NULL;
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
/* create an uuid-export lustre hash */
obd->obd_uuid_hash = cfs_hash_create("UUID_HASH",
GOTO(err_hash, err = PTR_ERR(exp));
obd->obd_self_export = exp;
- list_del_init(&exp->exp_obd_chain_timed);
+ cfs_list_del_init(&exp->exp_obd_chain_timed);
class_export_put(exp);
err = obd_setup(obd, lcfg);
obd->obd_set_up = 1;
- spin_lock(&obd->obd_dev_lock);
+ cfs_spin_lock(&obd->obd_dev_lock);
/* cleanup drops this */
class_incref(obd, "setup", obd);
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
CDEBUG(D_IOCTL, "finished setup of obd %s (uuid %s)\n",
obd->obd_name, obd->obd_uuid.uuid);
RETURN(-EBUSY);
}
- spin_lock(&obd->obd_dev_lock);
+ cfs_spin_lock(&obd->obd_dev_lock);
if (!obd->obd_attached) {
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
CERROR("OBD device %d not attached\n", obd->obd_minor);
RETURN(-ENODEV);
}
obd->obd_attached = 0;
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
CDEBUG(D_IOCTL, "detach on obd %s (uuid %s)\n",
obd->obd_name, obd->obd_uuid.uuid);
RETURN(-ENODEV);
}
- spin_lock(&obd->obd_dev_lock);
+ cfs_spin_lock(&obd->obd_dev_lock);
if (obd->obd_stopping) {
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
CERROR("OBD %d already stopping\n", obd->obd_minor);
RETURN(-ENODEV);
}
/* Leave this on forever */
obd->obd_stopping = 1;
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
if (lcfg->lcfg_bufcount >= 2 && LUSTRE_CFG_BUFLEN(lcfg, 1) > 0) {
for (flag = lustre_cfg_string(lcfg, 1); *flag != 0; flag++)
/* The three references that should be remaining are the
* obd_self_export and the attach and setup references. */
- if (atomic_read(&obd->obd_refcount) > 3) {
+ if (cfs_atomic_read(&obd->obd_refcount) > 3) {
/* refcounf - 3 might be the number of real exports
(excluding self export). But class_incref is called
by other things as well, so don't count on it. */
CDEBUG(D_IOCTL, "%s: forcing exports to disconnect: %d\n",
- obd->obd_name, atomic_read(&obd->obd_refcount) - 3);
+ obd->obd_name, cfs_atomic_read(&obd->obd_refcount) - 3);
dump_exports(obd, 0);
class_disconnect_exports(obd);
}
const char *scope, const void *source)
{
lu_ref_add_atomic(&obd->obd_reference, scope, source);
- atomic_inc(&obd->obd_refcount);
+ cfs_atomic_inc(&obd->obd_refcount);
CDEBUG(D_INFO, "incref %s (%p) now %d\n", obd->obd_name, obd,
- atomic_read(&obd->obd_refcount));
+ cfs_atomic_read(&obd->obd_refcount));
return obd;
}
int err;
int refs;
- spin_lock(&obd->obd_dev_lock);
- atomic_dec(&obd->obd_refcount);
- refs = atomic_read(&obd->obd_refcount);
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_lock(&obd->obd_dev_lock);
+ cfs_atomic_dec(&obd->obd_refcount);
+ refs = cfs_atomic_read(&obd->obd_refcount);
+ cfs_spin_unlock(&obd->obd_dev_lock);
lu_ref_del(&obd->obd_reference, scope, source);
CDEBUG(D_INFO, "Decref %s (%p) now %d\n", obd->obd_name, obd, refs);
/* All exports have been destroyed; there should
be no more in-progress ops by this point.*/
- spin_lock(&obd->obd_self_export->exp_lock);
+ cfs_spin_lock(&obd->obd_self_export->exp_lock);
obd->obd_self_export->exp_flags |= exp_flags_from_obd(obd);
- spin_unlock(&obd->obd_self_export->exp_lock);
+ cfs_spin_unlock(&obd->obd_self_export->exp_lock);
/* note that we'll recurse into class_decref again */
class_unlink_export(obd->obd_self_export);
struct lustre_profile *lprof;
ENTRY;
- list_for_each_entry(lprof, &lustre_profile_list, lp_list) {
+ cfs_list_for_each_entry(lprof, &lustre_profile_list, lp_list) {
if (!strcmp(lprof->lp_profile, prof)) {
RETURN(lprof);
}
memcpy(lprof->lp_md, mdc, mdclen);
}
- list_add(&lprof->lp_list, &lustre_profile_list);
+ cfs_list_add(&lprof->lp_list, &lustre_profile_list);
RETURN(err);
out:
lprof = class_get_profile(prof);
if (lprof) {
- list_del(&lprof->lp_list);
+ cfs_list_del(&lprof->lp_list);
OBD_FREE(lprof->lp_profile, strlen(lprof->lp_profile) + 1);
OBD_FREE(lprof->lp_dt, strlen(lprof->lp_dt) + 1);
if (lprof->lp_md)
struct lustre_profile *lprof, *n;
ENTRY;
- list_for_each_entry_safe(lprof, n, &lustre_profile_list, lp_list) {
- list_del(&lprof->lp_list);
+ cfs_list_for_each_entry_safe(lprof, n, &lustre_profile_list, lp_list) {
+ cfs_list_del(&lprof->lp_list);
OBD_FREE(lprof->lp_profile, strlen(lprof->lp_profile) + 1);
OBD_FREE(lprof->lp_dt, strlen(lprof->lp_dt) + 1);
if (lprof->lp_md)
}
static void *
-uuid_key(struct hlist_node *hnode)
+uuid_key(cfs_hlist_node_t *hnode)
{
struct obd_export *exp;
- exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
+ exp = cfs_hlist_entry(hnode, struct obd_export, exp_uuid_hash);
RETURN(&exp->exp_client_uuid);
}
* state with this function
*/
static int
-uuid_compare(void *key, struct hlist_node *hnode)
+uuid_compare(void *key, cfs_hlist_node_t *hnode)
{
struct obd_export *exp;
LASSERT(key);
- exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
+ exp = cfs_hlist_entry(hnode, struct obd_export, exp_uuid_hash);
RETURN(obd_uuid_equals((struct obd_uuid *)key,&exp->exp_client_uuid) &&
!exp->exp_failed);
}
static void *
-uuid_export_get(struct hlist_node *hnode)
+uuid_export_get(cfs_hlist_node_t *hnode)
{
struct obd_export *exp;
- exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
+ exp = cfs_hlist_entry(hnode, struct obd_export, exp_uuid_hash);
class_export_get(exp);
RETURN(exp);
}
static void *
-uuid_export_put(struct hlist_node *hnode)
+uuid_export_put(cfs_hlist_node_t *hnode)
{
struct obd_export *exp;
- exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
+ exp = cfs_hlist_entry(hnode, struct obd_export, exp_uuid_hash);
class_export_put(exp);
RETURN(exp);
}
static void *
-nid_key(struct hlist_node *hnode)
+nid_key(cfs_hlist_node_t *hnode)
{
struct obd_export *exp;
- exp = hlist_entry(hnode, struct obd_export, exp_nid_hash);
+ exp = cfs_hlist_entry(hnode, struct obd_export, exp_nid_hash);
RETURN(&exp->exp_connection->c_peer.nid);
}
* state with this function
*/
static int
-nid_compare(void *key, struct hlist_node *hnode)
+nid_compare(void *key, cfs_hlist_node_t *hnode)
{
struct obd_export *exp;
LASSERT(key);
- exp = hlist_entry(hnode, struct obd_export, exp_nid_hash);
+ exp = cfs_hlist_entry(hnode, struct obd_export, exp_nid_hash);
RETURN(exp->exp_connection->c_peer.nid == *(lnet_nid_t *)key &&
!exp->exp_failed);
}
static void *
-nid_export_get(struct hlist_node *hnode)
+nid_export_get(cfs_hlist_node_t *hnode)
{
struct obd_export *exp;
- exp = hlist_entry(hnode, struct obd_export, exp_nid_hash);
+ exp = cfs_hlist_entry(hnode, struct obd_export, exp_nid_hash);
class_export_get(exp);
RETURN(exp);
}
static void *
-nid_export_put(struct hlist_node *hnode)
+nid_export_put(cfs_hlist_node_t *hnode)
{
struct obd_export *exp;
- exp = hlist_entry(hnode, struct obd_export, exp_nid_hash);
+ exp = cfs_hlist_entry(hnode, struct obd_export, exp_nid_hash);
class_export_put(exp);
RETURN(exp);
*/
static void *
-nidstats_key(struct hlist_node *hnode)
+nidstats_key(cfs_hlist_node_t *hnode)
{
struct nid_stat *ns;
- ns = hlist_entry(hnode, struct nid_stat, nid_hash);
+ ns = cfs_hlist_entry(hnode, struct nid_stat, nid_hash);
RETURN(&ns->nid);
}
static int
-nidstats_compare(void *key, struct hlist_node *hnode)
+nidstats_compare(void *key, cfs_hlist_node_t *hnode)
{
RETURN(*(lnet_nid_t *)nidstats_key(hnode) == *(lnet_nid_t *)key);
}
static void *
-nidstats_get(struct hlist_node *hnode)
+nidstats_get(cfs_hlist_node_t *hnode)
{
struct nid_stat *ns;
- ns = hlist_entry(hnode, struct nid_stat, nid_hash);
+ ns = cfs_hlist_entry(hnode, struct nid_stat, nid_hash);
nidstat_getref(ns);
RETURN(ns);
}
static void *
-nidstats_put(struct hlist_node *hnode)
+nidstats_put(cfs_hlist_node_t *hnode)
{
struct nid_stat *ns;
- ns = hlist_entry(hnode, struct nid_stat, nid_hash);
+ ns = cfs_hlist_entry(hnode, struct nid_stat, nid_hash);
nidstat_putref(ns);
RETURN(ns);
/*********** mount lookup *********/
-DECLARE_MUTEX(lustre_mount_info_lock);
+CFS_DECLARE_MUTEX(lustre_mount_info_lock);
static CFS_LIST_HEAD(server_mount_info_list);
static struct lustre_mount_info *server_find_mount(const char *name)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct lustre_mount_info *lmi;
ENTRY;
- list_for_each(tmp, &server_mount_info_list) {
- lmi = list_entry(tmp, struct lustre_mount_info, lmi_list_chain);
+ cfs_list_for_each(tmp, &server_mount_info_list) {
+ lmi = cfs_list_entry(tmp, struct lustre_mount_info,
+ lmi_list_chain);
if (strcmp(name, lmi->lmi_name) == 0)
RETURN(lmi);
}
}
strcpy(name_cp, name);
- down(&lustre_mount_info_lock);
+ cfs_down(&lustre_mount_info_lock);
if (server_find_mount(name)) {
- up(&lustre_mount_info_lock);
+ cfs_up(&lustre_mount_info_lock);
OBD_FREE(lmi, sizeof(*lmi));
OBD_FREE(name_cp, strlen(name) + 1);
CERROR("Already registered %s\n", name);
lmi->lmi_name = name_cp;
lmi->lmi_sb = sb;
lmi->lmi_mnt = mnt;
- list_add(&lmi->lmi_list_chain, &server_mount_info_list);
+ cfs_list_add(&lmi->lmi_list_chain, &server_mount_info_list);
- up(&lustre_mount_info_lock);
+ cfs_up(&lustre_mount_info_lock);
CDEBUG(D_MOUNT, "reg_mnt %p from %s, vfscount=%d\n",
- lmi->lmi_mnt, name, atomic_read(&lmi->lmi_mnt->mnt_count));
+ lmi->lmi_mnt, name, cfs_atomic_read(&lmi->lmi_mnt->mnt_count));
RETURN(0);
}
struct lustre_mount_info *lmi;
ENTRY;
- down(&lustre_mount_info_lock);
+ cfs_down(&lustre_mount_info_lock);
lmi = server_find_mount(name);
if (!lmi) {
- up(&lustre_mount_info_lock);
+ cfs_up(&lustre_mount_info_lock);
CERROR("%s not registered\n", name);
RETURN(-ENOENT);
}
CDEBUG(D_MOUNT, "dereg_mnt %p from %s, vfscount=%d\n",
- lmi->lmi_mnt, name, atomic_read(&lmi->lmi_mnt->mnt_count));
+ lmi->lmi_mnt, name, cfs_atomic_read(&lmi->lmi_mnt->mnt_count));
OBD_FREE(lmi->lmi_name, strlen(lmi->lmi_name) + 1);
- list_del(&lmi->lmi_list_chain);
+ cfs_list_del(&lmi->lmi_list_chain);
OBD_FREE(lmi, sizeof(*lmi));
- up(&lustre_mount_info_lock);
+ cfs_up(&lustre_mount_info_lock);
RETURN(0);
}
struct lustre_sb_info *lsi;
ENTRY;
- down(&lustre_mount_info_lock);
+ cfs_down(&lustre_mount_info_lock);
lmi = server_find_mount(name);
- up(&lustre_mount_info_lock);
+ cfs_up(&lustre_mount_info_lock);
if (!lmi) {
CERROR("Can't find mount for %s\n", name);
RETURN(NULL);
}
lsi = s2lsi(lmi->lmi_sb);
mntget(lmi->lmi_mnt);
- atomic_inc(&lsi->lsi_mounts);
+ cfs_atomic_inc(&lsi->lsi_mounts);
CDEBUG(D_MOUNT, "get_mnt %p from %s, refs=%d, vfscount=%d\n",
- lmi->lmi_mnt, name, atomic_read(&lsi->lsi_mounts),
- atomic_read(&lmi->lmi_mnt->mnt_count));
+ lmi->lmi_mnt, name, cfs_atomic_read(&lsi->lsi_mounts),
+ cfs_atomic_read(&lmi->lmi_mnt->mnt_count));
RETURN(lmi);
}
struct lustre_mount_info *lmi;
ENTRY;
- down(&lustre_mount_info_lock);
+ cfs_down(&lustre_mount_info_lock);
lmi = server_find_mount(name);
- up(&lustre_mount_info_lock);
+ cfs_up(&lustre_mount_info_lock);
if (!lmi)
CERROR("Can't find mount for %s\n", name);
static void unlock_mntput(struct vfsmount *mnt)
{
if (kernel_locked()) {
- unlock_kernel();
+ cfs_unlock_kernel();
mntput(mnt);
- lock_kernel();
+ cfs_lock_kernel();
} else {
mntput(mnt);
}
/* This might be the last one, can't deref after this */
unlock_mntput(mnt);
- down(&lustre_mount_info_lock);
+ cfs_down(&lustre_mount_info_lock);
lmi = server_find_mount(name);
- up(&lustre_mount_info_lock);
+ cfs_up(&lustre_mount_info_lock);
if (!lmi) {
CERROR("Can't find mount for %s\n", name);
RETURN(-ENOENT);
LASSERT(lmi->lmi_mnt == mnt);
CDEBUG(D_MOUNT, "put_mnt %p from %s, refs=%d, vfscount=%d\n",
- lmi->lmi_mnt, name, atomic_read(&lsi->lsi_mounts), count);
+ lmi->lmi_mnt, name, cfs_atomic_read(&lsi->lsi_mounts), count);
if (lustre_put_lsi(lmi->lmi_sb)) {
CDEBUG(D_MOUNT, "Last put of mnt %p from %s, vfscount=%d\n",
RETURN(rc);
}
-DECLARE_MUTEX(mgc_start_lock);
+CFS_DECLARE_MUTEX(mgc_start_lock);
/** Set up a mgc obd to process startup logs
*
mgssec = lsi->lsi_lmd->lmd_mgssec ? lsi->lsi_lmd->lmd_mgssec : "";
- mutex_down(&mgc_start_lock);
+ cfs_mutex_down(&mgc_start_lock);
obd = class_name2obd(mgcname);
if (obd && !obd->obd_stopping) {
GOTO(out_free, rc);
/* Re-using an existing MGC */
- atomic_inc(&obd->u.cli.cl_mgc_refcount);
+ cfs_atomic_inc(&obd->u.cli.cl_mgc_refcount);
recov_bk = 0;
/* If we are restarting the MGS, don't try to keep the MGC's
/* Keep a refcount of servers/clients who started with "mount",
so we know when we can get rid of the mgc. */
- atomic_set(&obd->u.cli.cl_mgc_refcount, 1);
+ cfs_atomic_set(&obd->u.cli.cl_mgc_refcount, 1);
/* Try all connections, but only once. */
recov_bk = 1;
to the same mgc.*/
lsi->lsi_mgc = obd;
out_free:
- mutex_up(&mgc_start_lock);
+ cfs_mutex_up(&mgc_start_lock);
if (mgcname)
OBD_FREE(mgcname, len);
RETURN(-ENOENT);
lsi->lsi_mgc = NULL;
- mutex_down(&mgc_start_lock);
- LASSERT(atomic_read(&obd->u.cli.cl_mgc_refcount) > 0);
- if (!atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) {
+ cfs_mutex_down(&mgc_start_lock);
+ LASSERT(cfs_atomic_read(&obd->u.cli.cl_mgc_refcount) > 0);
+ if (!cfs_atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) {
/* This is not fatal, every client that stops
will call in here. */
CDEBUG(D_MOUNT, "mgc still has %d references.\n",
- atomic_read(&obd->u.cli.cl_mgc_refcount));
+ cfs_atomic_read(&obd->u.cli.cl_mgc_refcount));
GOTO(out, rc = -EBUSY);
}
OBD_FREE(niduuid, len);
/* class_import_put will get rid of the additional connections */
- mutex_up(&mgc_start_lock);
+ cfs_mutex_up(&mgc_start_lock);
RETURN(rc);
}
RETURN(rc);
}
-DECLARE_MUTEX(server_start_lock);
+CFS_DECLARE_MUTEX(server_start_lock);
/* Stop MDS/OSS if nobody is using them */
static int server_stop_servers(int lddflags, int lsiflags)
int rc = 0;
ENTRY;
- mutex_down(&server_start_lock);
+ cfs_mutex_down(&server_start_lock);
/* Either an MDT or an OST or neither */
/* if this was an MDT, and there are no more MDT's, clean up the MDS */
rc = err;
}
- mutex_up(&server_start_lock);
+ cfs_mutex_up(&server_start_lock);
RETURN(rc);
}
/* If we're an MDT, make sure the global MDS is running */
if (lsi->lsi_ldd->ldd_flags & LDD_F_SV_TYPE_MDT) {
/* make sure the MDS is started */
- mutex_down(&server_start_lock);
+ cfs_mutex_down(&server_start_lock);
obd = class_name2obd(LUSTRE_MDS_OBDNAME);
if (!obd) {
rc = lustre_start_simple(LUSTRE_MDS_OBDNAME,
LUSTRE_MDS_OBDNAME"_uuid",
0, 0);
if (rc) {
- mutex_up(&server_start_lock);
+ cfs_mutex_up(&server_start_lock);
CERROR("failed to start MDS: %d\n", rc);
RETURN(rc);
}
}
- mutex_up(&server_start_lock);
+ cfs_mutex_up(&server_start_lock);
}
#endif
/* If we're an OST, make sure the global OSS is running */
if (lsi->lsi_ldd->ldd_flags & LDD_F_SV_TYPE_OST) {
/* make sure OSS is started */
- mutex_down(&server_start_lock);
+ cfs_mutex_down(&server_start_lock);
obd = class_name2obd(LUSTRE_OSS_OBDNAME);
if (!obd) {
rc = lustre_start_simple(LUSTRE_OSS_OBDNAME,
LUSTRE_OSS_OBDNAME"_uuid",
0, 0);
if (rc) {
- mutex_up(&server_start_lock);
+ cfs_mutex_up(&server_start_lock);
CERROR("failed to start OSS: %d\n", rc);
RETURN(rc);
}
}
- mutex_up(&server_start_lock);
+ cfs_mutex_up(&server_start_lock);
}
/* Set the mgc fs to our server disk. This allows the MGC
lsi->lsi_lmd->lmd_exclude_count = 0;
s2lsi_nocast(sb) = lsi;
/* we take 1 extra ref for our setup */
- atomic_set(&lsi->lsi_mounts, 1);
+ cfs_atomic_set(&lsi->lsi_mounts, 1);
/* Default umount style */
lsi->lsi_flags = LSI_UMOUNT_FAILOVER;
CDEBUG(D_MOUNT, "Freeing lsi %p\n", lsi);
/* someone didn't call server_put_mount. */
- LASSERT(atomic_read(&lsi->lsi_mounts) == 0);
+ LASSERT(cfs_atomic_read(&lsi->lsi_mounts) == 0);
if (lsi->lsi_ldd != NULL)
OBD_FREE(lsi->lsi_ldd, sizeof(*lsi->lsi_ldd));
LASSERT(lsi != NULL);
- CDEBUG(D_MOUNT, "put %p %d\n", sb, atomic_read(&lsi->lsi_mounts));
- if (atomic_dec_and_test(&lsi->lsi_mounts)) {
+ CDEBUG(D_MOUNT, "put %p %d\n", sb, cfs_atomic_read(&lsi->lsi_mounts));
+ if (cfs_atomic_dec_and_test(&lsi->lsi_mounts)) {
lustre_free_lsi(sb);
RETURN(1);
}
cfs_waitq_init(&waitq);
- while (cfs_atomic_read(&mnt->mnt_count) > 1) {
+ while (atomic_read(&mnt->mnt_count) > 1) {
if (waited && (waited % 30 == 0))
LCONSOLE_WARN("Mount still busy with %d refs after "
"%d secs.\n",
blocked = l_w_e_set_sigs(sigmask(SIGKILL));
cfs_waitq_wait_event_interruptible_timeout(
waitq,
- (cfs_atomic_read(&mnt->mnt_count) == 1),
+ (atomic_read(&mnt->mnt_count) == 1),
cfs_time_seconds(3),
rc);
cfs_block_sigs(blocked);
}
#ifndef HAVE_STATFS_DENTRY_PARAM
-static int server_statfs (struct super_block *sb, struct kstatfs *buf)
+static int server_statfs (struct super_block *sb, cfs_kstatfs_t *buf)
{
#else
-static int server_statfs (struct dentry *dentry, struct kstatfs *buf)
+static int server_statfs (struct dentry *dentry, cfs_kstatfs_t *buf)
{
struct super_block *sb = dentry->d_sb;
#endif
.statfs = server_statfs,
};
-#define log2(n) ffz(~(n))
+#define log2(n) cfs_ffz(~(n))
#define LUSTRE_SUPER_MAGIC 0x0BD00BD1
static int server_fill_super_common(struct super_block *sb)
* Disable lockdep during mount, because mount locking patterns are
* `special'.
*/
- lockdep_off();
+ cfs_lockdep_off();
/* Figure out the lmd from the mount options */
if (lmd_parse((char *)data, lmd)) {
CDEBUG(D_SUPER, "Mount %s complete\n",
lmd->lmd_dev);
}
- lockdep_on();
+ cfs_lockdep_on();
return rc;
}
if (ia_valid & ATTR_MODE) {
oa->o_mode = attr->ia_mode;
oa->o_valid |= OBD_MD_FLTYPE | OBD_MD_FLMODE;
- if (!in_group_p(oa->o_gid) && !cfs_capable(CFS_CAP_FSETID))
+ if (!cfs_curproc_is_in_groups(oa->o_gid) &&
+ !cfs_capable(CFS_CAP_FSETID))
oa->o_mode &= ~S_ISGID;
}
if (ia_valid & ATTR_UID) {
if (valid & OBD_MD_FLMODE) {
attr->ia_mode = (attr->ia_mode & S_IFMT)|(oa->o_mode & ~S_IFMT);
attr->ia_valid |= ATTR_MODE;
- if (!in_group_p(oa->o_gid) && !cfs_capable(CFS_CAP_FSETID))
+ if (!cfs_curproc_is_in_groups(oa->o_gid) &&
+ !cfs_capable(CFS_CAP_FSETID))
attr->ia_mode &= ~S_ISGID;
}
if (valid & OBD_MD_FLUID) {
#include <obd_support.h>
#include <obd_class.h>
-void statfs_pack(struct obd_statfs *osfs, struct kstatfs *sfs)
+void statfs_pack(struct obd_statfs *osfs, cfs_kstatfs_t *sfs)
{
memset(osfs, 0, sizeof(*osfs));
osfs->os_type = sfs->f_type;
osfs->os_namelen = sfs->f_namelen;
}
-void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs)
+void statfs_unpack(cfs_kstatfs_t *sfs, struct obd_statfs *osfs)
{
memset(sfs, 0, sizeof(*sfs));
sfs->f_type = osfs->os_type;
{
obd_id id;
- spin_lock(&obddev->u.echo.eo_lock);
+ cfs_spin_lock(&obddev->u.echo.eo_lock);
id = ++obddev->u.echo.eo_lastino;
- spin_unlock(&obddev->u.echo.eo_lock);
+ cfs_spin_unlock(&obddev->u.echo.eo_lock);
return id;
}
}
}
- atomic_add(*pages, &obd->u.echo.eo_prep);
+ cfs_atomic_add(*pages, &obd->u.echo.eo_prep);
if (cmd & OBD_BRW_READ)
lprocfs_counter_add(obd->obd_stats, LPROC_ECHO_READ_BYTES,
tot_bytes);
CDEBUG(D_PAGE, "%d pages allocated after prep\n",
- atomic_read(&obd->u.echo.eo_prep));
+ cfs_atomic_read(&obd->u.echo.eo_prep));
RETURN(0);
* lose the extra ref gained above */
OBD_PAGE_FREE(res[i].page);
res[i].page = NULL;
- atomic_dec(&obd->u.echo.eo_prep);
+ cfs_atomic_dec(&obd->u.echo.eo_prep);
}
return rc;
}
- atomic_sub(pgs, &obd->u.echo.eo_prep);
+ cfs_atomic_sub(pgs, &obd->u.echo.eo_prep);
CDEBUG(D_PAGE, "%d pages remain after commit\n",
- atomic_read(&obd->u.echo.eo_prep));
+ cfs_atomic_read(&obd->u.echo.eo_prep));
RETURN(rc);
commitrw_cleanup:
- atomic_sub(pgs, &obd->u.echo.eo_prep);
+ cfs_atomic_sub(pgs, &obd->u.echo.eo_prep);
CERROR("cleaning up %d pages (%d obdos)\n",
niocount - pgs - 1, objcount);
/* NB see comment above regarding persistent pages */
OBD_PAGE_FREE(page);
- atomic_dec(&obd->u.echo.eo_prep);
+ cfs_atomic_dec(&obd->u.echo.eo_prep);
}
return rc;
}
char ns_name[48];
ENTRY;
- spin_lock_init(&obd->u.echo.eo_lock);
+ cfs_spin_lock_init(&obd->u.echo.eo_lock);
obd->u.echo.eo_lastino = ECHO_INIT_OBJID;
sprintf(ns_name, "echotgt-%s", obd->obd_uuid.uuid);
lprocfs_obd_cleanup(obd);
lprocfs_free_obd_stats(obd);
- ldlm_lock_decref (&obd->u.echo.eo_nl_lock, LCK_NL);
+ ldlm_lock_decref(&obd->u.echo.eo_nl_lock, LCK_NL);
/* XXX Bug 3413; wait for a bit to ensure the BL callback has
* happened before calling ldlm_namespace_free() */
- cfs_schedule_timeout (CFS_TASK_UNINT, cfs_time_seconds(1));
+ cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT, cfs_time_seconds(1));
ldlm_namespace_free(obd->obd_namespace, NULL, obd->obd_force);
obd->obd_namespace = NULL;
- leaked = atomic_read(&obd->u.echo.eo_prep);
+ leaked = cfs_atomic_read(&obd->u.echo.eo_prep);
if (leaked != 0)
CERROR("%d prep/commitrw pages leaked\n", leaked);
struct cl_object_header eo_hdr;
struct echo_device *eo_dev;
- struct list_head eo_obj_chain;
+ cfs_list_t eo_obj_chain;
struct lov_stripe_md *eo_lsm;
- atomic_t eo_npages;
+ cfs_atomic_t eo_npages;
int eo_deleted;
};
struct echo_lock {
struct cl_lock_slice el_cl;
- struct list_head el_chain;
+ cfs_list_t el_chain;
struct echo_object *el_object;
__u64 el_cookie;
- atomic_t el_refcount;
+ cfs_atomic_t el_refcount;
};
struct echo_io {
cfs_page_t *vmpage = ep->ep_vmpage;
ENTRY;
- atomic_dec(&eco->eo_npages);
+ cfs_atomic_dec(&eco->eo_npages);
page_cache_release(vmpage);
OBD_SLAB_FREE_PTR(ep, echo_page_kmem);
EXIT;
{
struct echo_lock *ecl = cl2echo_lock(slice);
- LASSERT(list_empty(&ecl->el_chain));
+ LASSERT(cfs_list_empty(&ecl->el_chain));
OBD_SLAB_FREE_PTR(ecl, echo_lock_kmem);
}
{
struct echo_lock *ecl = cl2echo_lock(slice);
- LASSERT(list_empty(&ecl->el_chain));
+ LASSERT(cfs_list_empty(&ecl->el_chain));
}
static int echo_lock_fits_into(const struct lu_env *env,
ep->ep_vmpage = vmpage;
page_cache_get(vmpage);
cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
- atomic_inc(&eco->eo_npages);
+ cfs_atomic_inc(&eco->eo_npages);
}
RETURN(ERR_PTR(ep ? 0 : -ENOMEM));
}
cl_lock_slice_add(lock, &el->el_cl, obj, &echo_lock_ops);
el->el_object = cl2echo_obj(obj);
CFS_INIT_LIST_HEAD(&el->el_chain);
- atomic_set(&el->el_refcount, 0);
+ cfs_atomic_set(&el->el_refcount, 0);
}
RETURN(el == NULL ? -ENOMEM : 0);
}
LASSERT(econf->eoc_md);
eco->eo_lsm = *econf->eoc_md;
eco->eo_dev = ed;
- atomic_set(&eco->eo_npages, 0);
+ cfs_atomic_set(&eco->eo_npages, 0);
/* clear the lsm pointer so that it won't get freed. */
*econf->eoc_md = NULL;
- spin_lock(&ec->ec_lock);
- list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
- spin_unlock(&ec->ec_lock);
+ cfs_spin_lock(&ec->ec_lock);
+ cfs_list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
+ cfs_spin_unlock(&ec->ec_lock);
RETURN(0);
}
struct lov_stripe_md *lsm = eco->eo_lsm;
ENTRY;
- LASSERT(atomic_read(&eco->eo_npages) == 0);
+ LASSERT(cfs_atomic_read(&eco->eo_npages) == 0);
- spin_lock(&ec->ec_lock);
- list_del_init(&eco->eo_obj_chain);
- spin_unlock(&ec->ec_lock);
+ cfs_spin_lock(&ec->ec_lock);
+ cfs_list_del_init(&eco->eo_obj_chain);
+ cfs_spin_unlock(&ec->ec_lock);
lu_object_fini(obj);
lu_object_header_fini(obj->lo_header);
CDEBUG(D_INFO, "echo device:%p is going to be freed, next = %p\n", ed, next);
/* destroy locks */
- spin_lock(&ec->ec_lock);
- while (!list_empty(&ec->ec_locks)) {
- struct echo_lock *ecl = list_entry(ec->ec_locks.next,
- struct echo_lock, el_chain);
+ cfs_spin_lock(&ec->ec_lock);
+ while (!cfs_list_empty(&ec->ec_locks)) {
+ struct echo_lock *ecl = cfs_list_entry(ec->ec_locks.next,
+ struct echo_lock,
+ el_chain);
int still_used = 0;
- if (atomic_dec_and_test(&ecl->el_refcount))
- list_del_init(&ecl->el_chain);
+ if (cfs_atomic_dec_and_test(&ecl->el_refcount))
+ cfs_list_del_init(&ecl->el_chain);
else
still_used = 1;
- spin_unlock(&ec->ec_lock);
+ cfs_spin_unlock(&ec->ec_lock);
CERROR("echo client: pending lock %p refs %d\n",
- ecl, atomic_read(&ecl->el_refcount));
+ ecl, cfs_atomic_read(&ecl->el_refcount));
echo_lock_release(env, ecl, still_used);
- spin_lock(&ec->ec_lock);
+ cfs_spin_lock(&ec->ec_lock);
}
- spin_unlock(&ec->ec_lock);
+ cfs_spin_unlock(&ec->ec_lock);
LASSERT(ed->ed_site);
lu_site_purge(env, &ed->ed_site->cs_lu, -1);
* all of cached objects. Anyway, probably the echo device is being
* parallelly accessed.
*/
- spin_lock(&ec->ec_lock);
- list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
+ cfs_spin_lock(&ec->ec_lock);
+ cfs_list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
eco->eo_deleted = 1;
- spin_unlock(&ec->ec_lock);
+ cfs_spin_unlock(&ec->ec_lock);
/* purge again */
lu_site_purge(env, &ed->ed_site->cs_lu, -1);
- CDEBUG(D_INFO, "Waiting for the reference of echo object to be dropped\n");
+ CDEBUG(D_INFO,
+ "Waiting for the reference of echo object to be dropped\n");
/* Wait for the last reference to be dropped. */
- spin_lock(&ec->ec_lock);
- while (!list_empty(&ec->ec_objects)) {
- spin_unlock(&ec->ec_lock);
+ cfs_spin_lock(&ec->ec_lock);
+ while (!cfs_list_empty(&ec->ec_objects)) {
+ cfs_spin_unlock(&ec->ec_lock);
CERROR("echo_client still has objects at cleanup time, "
"wait for 1 second\n");
- cfs_schedule_timeout(CFS_TASK_UNINT, cfs_time_seconds(1));
- spin_lock(&ec->ec_lock);
+ cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
+ cfs_time_seconds(1));
+ cfs_spin_lock(&ec->ec_lock);
}
- spin_unlock(&ec->ec_lock);
+ cfs_spin_unlock(&ec->ec_lock);
CDEBUG(D_INFO, "No object exists, exiting...\n");
if (eco->eo_deleted) {
struct lu_object_header *loh = obj->co_lu.lo_header;
LASSERT(&eco->eo_hdr == luh2coh(loh));
- set_bit(LU_OBJECT_HEARD_BANSHEE, &loh->loh_flags);
+ cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &loh->loh_flags);
cl_object_prune(env, obj);
}
rc = cl_wait(env, lck);
if (rc == 0) {
el = cl2echo_lock(cl_lock_at(lck, &echo_device_type));
- spin_lock(&ec->ec_lock);
- if (list_empty(&el->el_chain)) {
- list_add(&el->el_chain, &ec->ec_locks);
+ cfs_spin_lock(&ec->ec_lock);
+ if (cfs_list_empty(&el->el_chain)) {
+ cfs_list_add(&el->el_chain, &ec->ec_locks);
el->el_cookie = ++ec->ec_unique;
}
- atomic_inc(&el->el_refcount);
+ cfs_atomic_inc(&el->el_refcount);
*cookie = el->el_cookie;
- spin_unlock(&ec->ec_lock);
+ cfs_spin_unlock(&ec->ec_lock);
} else
cl_lock_release(env, lck, "ec enqueue", cfs_current());
}
{
struct echo_client_obd *ec = ed->ed_ec;
struct echo_lock *ecl = NULL;
- struct list_head *el;
+ cfs_list_t *el;
int found = 0, still_used = 0;
ENTRY;
LASSERT(ec != NULL);
- spin_lock (&ec->ec_lock);
- list_for_each (el, &ec->ec_locks) {
- ecl = list_entry (el, struct echo_lock, el_chain);
+ cfs_spin_lock (&ec->ec_lock);
+ cfs_list_for_each (el, &ec->ec_locks) {
+ ecl = cfs_list_entry (el, struct echo_lock, el_chain);
CDEBUG(D_INFO, "ecl: %p, cookie: %llx\n", ecl, ecl->el_cookie);
found = (ecl->el_cookie == cookie);
if (found) {
- if (atomic_dec_and_test(&ecl->el_refcount))
- list_del_init(&ecl->el_chain);
+ if (cfs_atomic_dec_and_test(&ecl->el_refcount))
+ cfs_list_del_init(&ecl->el_chain);
else
still_used = 1;
break;
}
}
- spin_unlock (&ec->ec_lock);
+ cfs_spin_unlock (&ec->ec_lock);
if (!found)
RETURN(-ENOENT);
if (nob > ulsm_nob)
return (-EINVAL);
- if (copy_to_user (ulsm, lsm, sizeof(ulsm)))
+ if (cfs_copy_to_user (ulsm, lsm, sizeof(ulsm)))
return (-EFAULT);
for (i = 0; i < lsm->lsm_stripe_count; i++) {
- if (copy_to_user (ulsm->lsm_oinfo[i], lsm->lsm_oinfo[i],
- sizeof(lsm->lsm_oinfo[0])))
+ if (cfs_copy_to_user (ulsm->lsm_oinfo[i], lsm->lsm_oinfo[i],
+ sizeof(lsm->lsm_oinfo[0])))
return (-EFAULT);
}
return 0;
if (ulsm_nob < sizeof (*lsm))
return (-EINVAL);
- if (copy_from_user (lsm, ulsm, sizeof (*lsm)))
+ if (cfs_copy_from_user (lsm, ulsm, sizeof (*lsm)))
return (-EFAULT);
if (lsm->lsm_stripe_count > ec->ec_nstripes ||
for (i = 0; i < lsm->lsm_stripe_count; i++) {
- if (copy_from_user(lsm->lsm_oinfo[i],
- ((struct lov_stripe_md *)ulsm)->lsm_oinfo[i],
- sizeof(lsm->lsm_oinfo[0])))
+ if (cfs_copy_from_user(lsm->lsm_oinfo[i],
+ ((struct lov_stripe_md *)ulsm)-> \
+ lsm_oinfo[i],
+ sizeof(lsm->lsm_oinfo[0])))
return (-EFAULT);
}
return (0);
int i;
ENTRY;
- unlock_kernel();
+ cfs_unlock_kernel();
memset(&dummy_oti, 0, sizeof(dummy_oti));
ldlm_lock_decref(&ack_lock->lock, ack_lock->mode);
}
- lock_kernel();
+ cfs_lock_kernel();
return rc;
}
RETURN(-EINVAL);
}
- spin_lock_init (&ec->ec_lock);
+ cfs_spin_lock_init (&ec->ec_lock);
CFS_INIT_LIST_HEAD (&ec->ec_objects);
CFS_INIT_LIST_HEAD (&ec->ec_locks);
ec->ec_unique = 0;
rc = obd_connect(NULL, &ec->ec_exp, tgt, &echo_uuid, ocd, NULL);
if (rc == 0) {
/* Turn off pinger because it connects to tgt obd directly. */
- spin_lock(&tgt->obd_dev_lock);
- list_del_init(&ec->ec_exp->exp_obd_chain_timed);
- spin_unlock(&tgt->obd_dev_lock);
+ cfs_spin_lock(&tgt->obd_dev_lock);
+ cfs_list_del_init(&ec->ec_exp->exp_obd_chain_timed);
+ cfs_spin_unlock(&tgt->obd_dev_lock);
}
OBD_FREE(ocd, sizeof(*ocd));
int rc;
ENTRY;
- if (!list_empty(&obddev->obd_exports)) {
+ if (!cfs_list_empty(&obddev->obd_exports)) {
CERROR("still has clients!\n");
RETURN(-EBUSY);
}
- LASSERT(atomic_read(&ec->ec_exp->exp_refcount) > 0);
+ LASSERT(cfs_atomic_read(&ec->ec_exp->exp_refcount) > 0);
rc = obd_disconnect(ec->ec_exp);
if (rc != 0)
CERROR("fail to disconnect device: %d\n", rc);
ec = &obd->u.echo_client;
/* no more contention on export's lock list */
- while (!list_empty (&exp->exp_ec_data.eced_locks)) {
- ecl = list_entry (exp->exp_ec_data.eced_locks.next,
- struct ec_lock, ecl_exp_chain);
- list_del (&ecl->ecl_exp_chain);
+ while (!cfs_list_empty (&exp->exp_ec_data.eced_locks)) {
+ ecl = cfs_list_entry (exp->exp_ec_data.eced_locks.next,
+ struct ec_lock, ecl_exp_chain);
+ cfs_list_del (&ecl->ecl_exp_chain);
rc = obd_cancel(ec->ec_exp, ecl->ecl_object->eco_lsm,
ecl->ecl_mode, &ecl->ecl_lock_handle);
oti->oti_pre_version != curr_version) {
CDEBUG(D_INODE, "Version mismatch "LPX64" != "LPX64"\n",
oti->oti_pre_version, curr_version);
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_vbr_failed = 1;
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
RETURN (-EOVERFLOW);
}
oti->oti_pre_version = curr_version;
if (!exp->exp_obd->obd_replayable || oti == NULL)
RETURN(rc);
- mutex_down(&fed->fed_lcd_lock);
+ cfs_mutex_down(&fed->fed_lcd_lock);
lcd = fed->fed_lcd;
/* if the export has already been disconnected, we have no last_rcvd slot,
* update server data with latest transno then */
if (lcd == NULL) {
- mutex_up(&fed->fed_lcd_lock);
+ cfs_mutex_up(&fed->fed_lcd_lock);
CWARN("commit transaction for disconnected client %s: rc %d\n",
exp->exp_client_uuid.uuid, rc);
err = filter_update_server_data(exp->exp_obd,
}
/* we don't allocate new transnos for replayed requests */
- spin_lock(&filter->fo_translock);
+ cfs_spin_lock(&filter->fo_translock);
if (oti->oti_transno == 0) {
last_rcvd = le64_to_cpu(filter->fo_fsd->lsd_last_transno) + 1;
filter->fo_fsd->lsd_last_transno = cpu_to_le64(last_rcvd);
lcd->lcd_last_transno = cpu_to_le64(last_rcvd);
lcd->lcd_pre_versions[0] = cpu_to_le64(oti->oti_pre_version);
lcd->lcd_last_xid = cpu_to_le64(oti->oti_xid);
- spin_unlock(&filter->fo_translock);
+ cfs_spin_unlock(&filter->fo_translock);
if (inode)
fsfilt_set_version(exp->exp_obd, inode, last_rcvd);
CDEBUG(log_pri, "wrote trans "LPU64" for client %s at #%d: err = %d\n",
last_rcvd, lcd->lcd_uuid, fed->fed_lr_idx, err);
- mutex_up(&fed->fed_lcd_lock);
+ cfs_mutex_up(&fed->fed_lcd_lock);
RETURN(rc);
}
{
int i;
for (i = 0; i < BRW_LAST; i++)
- spin_lock_init(&brw_stats->hist[i].oh_lock);
+ cfs_spin_lock_init(&brw_stats->hist[i].oh_lock);
}
static int lprocfs_init_rw_stats(struct obd_device *obd,
* there's no need for extra complication here
*/
if (new_client) {
- cl_idx = find_first_zero_bit(bitmap, LR_MAX_CLIENTS);
+ cl_idx = cfs_find_first_zero_bit(bitmap, LR_MAX_CLIENTS);
repeat:
if (cl_idx >= LR_MAX_CLIENTS) {
CERROR("no room for %u client - fix LR_MAX_CLIENTS\n",
cl_idx);
RETURN(-EOVERFLOW);
}
- if (test_and_set_bit(cl_idx, bitmap)) {
- cl_idx = find_next_zero_bit(bitmap, LR_MAX_CLIENTS,
- cl_idx);
+ if (cfs_test_and_set_bit(cl_idx, bitmap)) {
+ cl_idx = cfs_find_next_zero_bit(bitmap, LR_MAX_CLIENTS,
+ cl_idx);
goto repeat;
}
} else {
- if (test_and_set_bit(cl_idx, bitmap)) {
+ if (cfs_test_and_set_bit(cl_idx, bitmap)) {
CERROR("FILTER client %d: bit already set in bitmap!\n",
cl_idx);
LBUG();
fed->fed_lr_idx = cl_idx;
fed->fed_lr_off = le32_to_cpu(filter->fo_fsd->lsd_client_start) +
cl_idx * le16_to_cpu(filter->fo_fsd->lsd_client_size);
- init_mutex(&fed->fed_lcd_lock);
+ cfs_init_mutex(&fed->fed_lcd_lock);
LASSERTF(fed->fed_lr_off > 0, "fed_lr_off = %llu\n", fed->fed_lr_off);
CDEBUG(D_INFO, "client at index %d (%llu) with UUID '%s' added\n",
target_client_add_cb,
class_export_cb_get(exp));
if (rc == 0) {
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_need_sync = 1;
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
}
rc = fsfilt_write_record(obd, filter->fo_rcvd_filp,
fed->fed_lcd,
/* Clear the bit _after_ zeroing out the client so we don't
race with filter_client_add and zero out new clients.*/
- if (!test_bit(fed->fed_lr_idx, filter->fo_last_rcvd_slots)) {
+ if (!cfs_test_bit(fed->fed_lr_idx, filter->fo_last_rcvd_slots)) {
CERROR("FILTER client %u: bit already clear in bitmap!!\n",
fed->fed_lr_idx);
LBUG();
* be in server data or in client data in case of failure */
filter_update_server_data(obd, filter->fo_rcvd_filp, filter->fo_fsd);
- mutex_down(&fed->fed_lcd_lock);
+ cfs_mutex_down(&fed->fed_lcd_lock);
rc = fsfilt_write_record(obd, filter->fo_rcvd_filp, &zero_lcd,
sizeof(zero_lcd), &off, 0);
fed->fed_lcd = NULL;
- mutex_up(&fed->fed_lcd_lock);
+ cfs_mutex_up(&fed->fed_lcd_lock);
pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
CDEBUG(rc == 0 ? D_INFO : D_ERROR,
lcd->lcd_uuid, fed->fed_lr_idx, fed->fed_lr_off,
LAST_RCVD, rc);
- if (!test_and_clear_bit(fed->fed_lr_idx, filter->fo_last_rcvd_slots)) {
+ if (!cfs_test_and_clear_bit(fed->fed_lr_idx,
+ filter->fo_last_rcvd_slots)) {
CERROR("FILTER client %u: bit already clear in bitmap!!\n",
fed->fed_lr_idx);
LBUG();
OBD_FREE_PTR(lcd);
RETURN(0);
free:
- mutex_down(&fed->fed_lcd_lock);
+ cfs_mutex_down(&fed->fed_lcd_lock);
fed->fed_lcd = NULL;
- mutex_up(&fed->fed_lcd_lock);
+ cfs_mutex_up(&fed->fed_lcd_lock);
OBD_FREE_PTR(lcd);
return 0;
/* XXX when we have persistent reservations and the handle
* is stored herein we need to drop it here. */
fed->fed_mod_count--;
- list_del(&fmd->fmd_list);
+ cfs_list_del(&fmd->fmd_list);
OBD_SLAB_FREE(fmd, ll_fmd_cachep, sizeof(*fmd));
}
}
return;
fed = &exp->exp_filter_data;
- spin_lock(&fed->fed_lock);
+ cfs_spin_lock(&fed->fed_lock);
filter_fmd_put_nolock(fed, fmd); /* caller reference */
- spin_unlock(&fed->fed_lock);
+ cfs_spin_unlock(&fed->fed_lock);
}
/* expire entries from the end of the list if there are too many
{
struct filter_mod_data *fmd, *tmp;
- list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
+ cfs_list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
if (fmd == keep)
break;
- if (time_before(jiffies, fmd->fmd_expire) &&
+ if (cfs_time_before(jiffies, fmd->fmd_expire) &&
fed->fed_mod_count < filter->fo_fmd_max_num)
break;
- list_del_init(&fmd->fmd_list);
+ cfs_list_del_init(&fmd->fmd_list);
filter_fmd_put_nolock(fed, fmd); /* list reference */
}
}
void filter_fmd_expire(struct obd_export *exp)
{
- spin_lock(&exp->exp_filter_data.fed_lock);
+ cfs_spin_lock(&exp->exp_filter_data.fed_lock);
filter_fmd_expire_nolock(&exp->exp_obd->u.filter,
&exp->exp_filter_data, NULL);
- spin_unlock(&exp->exp_filter_data.fed_lock);
+ cfs_spin_unlock(&exp->exp_filter_data.fed_lock);
}
/* find specified objid, group in export fmd list.
LASSERT_SPIN_LOCKED(&fed->fed_lock);
- list_for_each_entry_reverse(fmd, &fed->fed_mod_list, fmd_list) {
+ cfs_list_for_each_entry_reverse(fmd, &fed->fed_mod_list, fmd_list) {
if (fmd->fmd_id == objid && fmd->fmd_gr == group) {
found = fmd;
- list_del(&fmd->fmd_list);
- list_add_tail(&fmd->fmd_list, &fed->fed_mod_list);
+ cfs_list_del(&fmd->fmd_list);
+ cfs_list_add_tail(&fmd->fmd_list, &fed->fed_mod_list);
fmd->fmd_expire = jiffies + filter->fo_fmd_max_age;
break;
}
{
struct filter_mod_data *fmd;
- spin_lock(&exp->exp_filter_data.fed_lock);
+ cfs_spin_lock(&exp->exp_filter_data.fed_lock);
fmd = filter_fmd_find_nolock(&exp->exp_obd->u.filter,
&exp->exp_filter_data, objid, group);
if (fmd)
fmd->fmd_refcount++; /* caller reference */
- spin_unlock(&exp->exp_filter_data.fed_lock);
+ cfs_spin_unlock(&exp->exp_filter_data.fed_lock);
return fmd;
}
OBD_SLAB_ALLOC_PTR_GFP(fmd_new, ll_fmd_cachep, CFS_ALLOC_IO);
- spin_lock(&fed->fed_lock);
+ cfs_spin_lock(&fed->fed_lock);
found = filter_fmd_find_nolock(&exp->exp_obd->u.filter,fed,objid,group);
if (fmd_new) {
if (found == NULL) {
- list_add_tail(&fmd_new->fmd_list, &fed->fed_mod_list);
+ cfs_list_add_tail(&fmd_new->fmd_list,
+ &fed->fed_mod_list);
fmd_new->fmd_id = objid;
fmd_new->fmd_gr = group;
fmd_new->fmd_refcount++; /* list reference */
exp->exp_obd->u.filter.fo_fmd_max_age;
}
- spin_unlock(&fed->fed_lock);
+ cfs_spin_unlock(&fed->fed_lock);
return found;
}
{
struct filter_mod_data *found = NULL;
- spin_lock(&exp->exp_filter_data.fed_lock);
+ cfs_spin_lock(&exp->exp_filter_data.fed_lock);
found = filter_fmd_find_nolock(&exp->exp_filter_data, objid, group);
if (found) {
- list_del_init(&found->fmd_list);
+ cfs_list_del_init(&found->fmd_list);
filter_fmd_put_nolock(&exp->exp_filter_data, found);
}
- spin_unlock(&exp->exp_filter_data.fed_lock);
+ cfs_spin_unlock(&exp->exp_filter_data.fed_lock);
}
#else
#define filter_fmd_drop(exp, objid, group)
struct filter_export_data *fed = &exp->exp_filter_data;
struct filter_mod_data *fmd = NULL, *tmp;
- spin_lock(&fed->fed_lock);
- list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
- list_del_init(&fmd->fmd_list);
+ cfs_spin_lock(&fed->fed_lock);
+ cfs_list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
+ cfs_list_del_init(&fmd->fmd_list);
filter_fmd_put_nolock(fed, fmd);
}
- spin_unlock(&fed->fed_lock);
+ cfs_spin_unlock(&fed->fed_lock);
}
static int filter_init_export(struct obd_export *exp)
{
- spin_lock_init(&exp->exp_filter_data.fed_lock);
+ cfs_spin_lock_init(&exp->exp_filter_data.fed_lock);
CFS_INIT_LIST_HEAD(&exp->exp_filter_data.fed_mod_list);
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_connecting = 1;
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
return ldlm_init_export(exp);
}
/* VBR: set export last committed */
exp->exp_last_committed = last_rcvd;
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_connecting = 0;
exp->exp_in_recovery = 0;
- spin_unlock(&exp->exp_lock);
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock(&exp->exp_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
obd->obd_max_recoverable_clients++;
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
lcd = NULL;
class_export_put(exp);
}
struct filter_obd *filter = &obd->u.filter;
int old_count, group, rc = 0;
- down(&filter->fo_init_lock);
+ cfs_down(&filter->fo_init_lock);
old_count = filter->fo_group_count;
for (group = old_count; group <= last_group; group++) {
rc = filter_read_group_internal(obd, group, create);
if (rc != 0)
break;
}
- up(&filter->fo_init_lock);
+ cfs_up(&filter->fo_init_lock);
return rc;
}
LASSERT(filter->fo_fsd != NULL);
LASSERT(group <= filter->fo_group_count);
- spin_lock(&filter->fo_objidlock);
+ cfs_spin_lock(&filter->fo_objidlock);
filter->fo_last_objids[group] = id;
- spin_unlock(&filter->fo_objidlock);
+ cfs_spin_unlock(&filter->fo_objidlock);
}
obd_id filter_last_id(struct filter_obd *filter, obd_gr group)
LASSERT(group <= filter->fo_group_count);
/* FIXME: object groups */
- spin_lock(&filter->fo_objidlock);
+ cfs_spin_lock(&filter->fo_objidlock);
id = filter->fo_last_objids[group];
- spin_unlock(&filter->fo_objidlock);
+ cfs_spin_unlock(&filter->fo_objidlock);
return id;
}
if (interval_high(n) <= size)
return INTERVAL_ITER_STOP;
- list_for_each_entry(lck, &node->li_group, l_sl_policy) {
+ cfs_list_for_each_entry(lck, &node->li_group, l_sl_policy) {
/* Don't send glimpse ASTs to liblustre clients.
* They aren't listening for them, and they do
* entirely synchronous I/O anyways. */
/* FIXME: we should change the policy function slightly, to not make
* this list at all, since we just turn around and free it */
- while (!list_empty(&rpc_list)) {
+ while (!cfs_list_empty(&rpc_list)) {
struct ldlm_lock *wlock =
- list_entry(rpc_list.next, struct ldlm_lock, l_cp_ast);
+ cfs_list_entry(rpc_list.next, struct ldlm_lock,
+ l_cp_ast);
LASSERT((lock->l_flags & LDLM_FL_AST_SENT) == 0);
LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
lock->l_flags &= ~LDLM_FL_CP_REQD;
- list_del_init(&wlock->l_cp_ast);
+ cfs_list_del_init(&wlock->l_cp_ast);
LDLM_LOCK_RELEASE(wlock);
}
sptlrpc_target_update_exp_flavor(obd, &tmp_rset);
- write_lock(&filter->fo_sptlrpc_lock);
+ cfs_write_lock(&filter->fo_sptlrpc_lock);
sptlrpc_rule_set_free(&filter->fo_sptlrpc_rset);
filter->fo_sptlrpc_rset = tmp_rset;
- write_unlock(&filter->fo_sptlrpc_lock);
+ cfs_write_unlock(&filter->fo_sptlrpc_lock);
return 0;
}
obd->obd_lvfs_ctxt.fs = get_ds();
obd->obd_lvfs_ctxt.cb_ops = filter_lvfs_ops;
- init_mutex(&filter->fo_init_lock);
+ cfs_init_mutex(&filter->fo_init_lock);
filter->fo_committed_group = 0;
filter->fo_destroys_in_progress = 0;
for (i = 0; i < 32; i++)
- sema_init(&filter->fo_create_locks[i], 1);
+ cfs_sema_init(&filter->fo_create_locks[i], 1);
- spin_lock_init(&filter->fo_translock);
- spin_lock_init(&filter->fo_objidlock);
+ cfs_spin_lock_init(&filter->fo_translock);
+ cfs_spin_lock_init(&filter->fo_objidlock);
CFS_INIT_LIST_HEAD(&filter->fo_export_list);
- sema_init(&filter->fo_alloc_lock, 1);
+ cfs_sema_init(&filter->fo_alloc_lock, 1);
init_brw_stats(&filter->fo_filter_stats);
filter->fo_read_cache = 1; /* enable read-only cache by default */
filter->fo_writethrough_cache = 1; /* enable writethrough cache */
GOTO(err_ops, rc);
CFS_INIT_LIST_HEAD(&filter->fo_llog_list);
- spin_lock_init(&filter->fo_llog_list_lock);
+ cfs_spin_lock_init(&filter->fo_llog_list_lock);
filter->fo_fl_oss_capa = 1;
GOTO(err_post, rc);
}
- rwlock_init(&filter->fo_sptlrpc_lock);
+ cfs_rwlock_init(&filter->fo_sptlrpc_lock);
sptlrpc_rule_set_init(&filter->fo_sptlrpc_rset);
/* do this after llog being initialized */
filter_adapt_sptlrpc_conf(obd, 1);
* This is safe to do, as llog is already synchronized
* and its import may go.
*/
- mutex_down(&ctxt->loc_sem);
+ cfs_mutex_down(&ctxt->loc_sem);
if (ctxt->loc_imp) {
class_import_put(ctxt->loc_imp);
ctxt->loc_imp = NULL;
}
- mutex_up(&ctxt->loc_sem);
+ cfs_mutex_up(&ctxt->loc_sem);
llog_ctxt_put(ctxt);
}
if (filter->fo_lcm) {
- mutex_down(&ctxt->loc_sem);
+ cfs_mutex_down(&ctxt->loc_sem);
llog_recov_thread_fini(filter->fo_lcm, obd->obd_force);
filter->fo_lcm = NULL;
- mutex_up(&ctxt->loc_sem);
+ cfs_mutex_up(&ctxt->loc_sem);
}
RETURN(filter_olg_fini(&obd->obd_olg));
}
struct obd_llog_group *olg;
LASSERT_SPIN_LOCKED(&filter->fo_llog_list_lock);
- list_for_each_entry(olg, &filter->fo_llog_list, olg_list) {
+ cfs_list_for_each_entry(olg, &filter->fo_llog_list, olg_list) {
if (olg->olg_group == group)
RETURN(olg);
}
if (group == FILTER_GROUP_LLOG)
RETURN(&obd->obd_olg);
- spin_lock(&filter->fo_llog_list_lock);
+ cfs_spin_lock(&filter->fo_llog_list_lock);
olg = filter_find_olg_internal(filter, group);
- spin_unlock(&filter->fo_llog_list_lock);
+ cfs_spin_unlock(&filter->fo_llog_list_lock);
RETURN(olg);
}
if (group == FILTER_GROUP_LLOG)
RETURN(&obd->obd_olg);
- spin_lock(&filter->fo_llog_list_lock);
+ cfs_spin_lock(&filter->fo_llog_list_lock);
olg = filter_find_olg_internal(filter, group);
if (olg) {
if (olg->olg_initializing) {
GOTO(out_unlock, olg = ERR_PTR(-ENOMEM));
llog_group_init(olg, group);
- list_add(&olg->olg_list, &filter->fo_llog_list);
+ cfs_list_add(&olg->olg_list, &filter->fo_llog_list);
olg->olg_initializing = 1;
- spin_unlock(&filter->fo_llog_list_lock);
+ cfs_spin_unlock(&filter->fo_llog_list_lock);
rc = obd_llog_init(obd, olg, obd, NULL);
if (rc) {
- spin_lock(&filter->fo_llog_list_lock);
- list_del(&olg->olg_list);
- spin_unlock(&filter->fo_llog_list_lock);
+ cfs_spin_lock(&filter->fo_llog_list_lock);
+ cfs_list_del(&olg->olg_list);
+ cfs_spin_unlock(&filter->fo_llog_list_lock);
OBD_FREE_PTR(olg);
GOTO(out, olg = ERR_PTR(-ENOMEM));
}
- spin_lock(&filter->fo_llog_list_lock);
+ cfs_spin_lock(&filter->fo_llog_list_lock);
olg->olg_initializing = 0;
- spin_unlock(&filter->fo_llog_list_lock);
+ cfs_spin_unlock(&filter->fo_llog_list_lock);
CDEBUG(D_OTHER, "%s: new llog group %u (0x%p)\n",
obd->obd_name, group, olg);
out:
RETURN(olg);
out_unlock:
- spin_unlock(&filter->fo_llog_list_lock);
+ cfs_spin_unlock(&filter->fo_llog_list_lock);
GOTO(out, olg);
}
obd->obd_name, body->lgdc_logid.lgl_oid,
body->lgdc_logid.lgl_ogr, body->lgdc_logid.lgl_ogen);
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
obd->u.filter.fo_mds_ost_sync = 1;
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
rc = llog_connect(ctxt, &body->lgdc_logid,
&body->lgdc_gen, NULL);
llog_ctxt_put(ctxt);
{
struct obd_llog_group *olg, *tmp;
struct filter_obd *filter;
- struct list_head remove_list;
+ cfs_list_t remove_list;
int rc = 0;
ENTRY;
filter = &obd->u.filter;
CFS_INIT_LIST_HEAD(&remove_list);
- spin_lock(&filter->fo_llog_list_lock);
- while (!list_empty(&filter->fo_llog_list)) {
- olg = list_entry(filter->fo_llog_list.next,
- struct obd_llog_group, olg_list);
- list_del(&olg->olg_list);
- list_add(&olg->olg_list, &remove_list);
+ cfs_spin_lock(&filter->fo_llog_list_lock);
+ while (!cfs_list_empty(&filter->fo_llog_list)) {
+ olg = cfs_list_entry(filter->fo_llog_list.next,
+ struct obd_llog_group, olg_list);
+ cfs_list_del(&olg->olg_list);
+ cfs_list_add(&olg->olg_list, &remove_list);
}
- spin_unlock(&filter->fo_llog_list_lock);
+ cfs_spin_unlock(&filter->fo_llog_list_lock);
- list_for_each_entry_safe(olg, tmp, &remove_list, olg_list) {
- list_del_init(&olg->olg_list);
+ cfs_list_for_each_entry_safe(olg, tmp, &remove_list, olg_list) {
+ cfs_list_del_init(&olg->olg_list);
rc = filter_olg_fini(olg);
if (rc)
CERROR("failed to cleanup llogging subsystem for %u\n",
struct filter_obd *filter = &exp->exp_obd->u.filter;
obd_size left, want;
- spin_lock(&exp->exp_obd->obd_osfs_lock);
+ cfs_spin_lock(&exp->exp_obd->obd_osfs_lock);
left = filter_grant_space_left(exp);
want = data->ocd_grant;
filter_grant(exp, fed->fed_grant, want, left, (reconnect == 0));
data->ocd_grant = fed->fed_grant;
- spin_unlock(&exp->exp_obd->obd_osfs_lock);
+ cfs_spin_unlock(&exp->exp_obd->obd_osfs_lock);
CDEBUG(D_CACHE, "%s: cli %s/%p ocd_grant: %d want: "
LPU64" left: "LPU64"\n", exp->exp_obd->obd_name,
obd_size tot_dirty = 0, tot_pending = 0, tot_granted = 0;
obd_size fo_tot_dirty, fo_tot_pending, fo_tot_granted;
- if (list_empty(&obd->obd_exports))
+ if (cfs_list_empty(&obd->obd_exports))
return;
/* We don't want to do this for large machines that do lots of
if (obd->obd_num_exports > 100)
return;
- spin_lock(&obd->obd_osfs_lock);
- spin_lock(&obd->obd_dev_lock);
- list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
+ cfs_spin_lock(&obd->obd_osfs_lock);
+ cfs_spin_lock(&obd->obd_dev_lock);
+ cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
int error = 0;
fed = &exp->exp_filter_data;
if (fed->fed_grant < 0 || fed->fed_pending < 0 ||
fo_tot_granted = obd->u.filter.fo_tot_granted;
fo_tot_pending = obd->u.filter.fo_tot_pending;
fo_tot_dirty = obd->u.filter.fo_tot_dirty;
- spin_unlock(&obd->obd_dev_lock);
- spin_unlock(&obd->obd_osfs_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_unlock(&obd->obd_osfs_lock);
/* Do these assertions outside the spinlocks so we don't kill system */
if (tot_granted != fo_tot_granted)
struct filter_obd *filter = &obd->u.filter;
struct filter_export_data *fed = &exp->exp_filter_data;
- spin_lock(&obd->obd_osfs_lock);
+ cfs_spin_lock(&obd->obd_osfs_lock);
LASSERTF(filter->fo_tot_granted >= fed->fed_grant,
"%s: tot_granted "LPU64" cli %s/%p fed_grant %ld\n",
obd->obd_name, filter->fo_tot_granted,
fed->fed_dirty = 0;
fed->fed_grant = 0;
- spin_unlock(&obd->obd_osfs_lock);
+ cfs_spin_unlock(&obd->obd_osfs_lock);
}
static int filter_destroy_export(struct obd_export *exp)
/* look for group with min. number, but > worked */
olg_min = NULL;
group = 1 << 30;
- spin_lock(&filter->fo_llog_list_lock);
- list_for_each_entry(olg, &filter->fo_llog_list, olg_list) {
+ cfs_spin_lock(&filter->fo_llog_list_lock);
+ cfs_list_for_each_entry(olg, &filter->fo_llog_list, olg_list) {
if (olg->olg_group <= worked) {
/* this group is already synced */
continue;
olg_min = olg;
group = olg->olg_group;
}
- spin_unlock(&filter->fo_llog_list_lock);
+ cfs_spin_unlock(&filter->fo_llog_list_lock);
if (olg_min == NULL)
break;
doa.o_gr = oa->o_gr;
doa.o_mode = S_IFREG;
- if (!test_bit(doa.o_gr, &filter->fo_destroys_in_progress)) {
+ if (!cfs_test_bit(doa.o_gr, &filter->fo_destroys_in_progress)) {
CERROR("%s:["LPU64"] destroys_in_progress already cleared\n",
exp->exp_obd->obd_name, doa.o_gr);
RETURN(0);
oa->o_id = last;
rc = 0;
}
- clear_bit(doa.o_gr, &filter->fo_destroys_in_progress);
+ cfs_clear_bit(doa.o_gr, &filter->fo_destroys_in_progress);
RETURN(rc);
}
RETURN(0);
}
/* This causes inflight precreates to abort and drop lock */
- set_bit(group, &filter->fo_destroys_in_progress);
- down(&filter->fo_create_locks[group]);
- if (!test_bit(group, &filter->fo_destroys_in_progress)) {
+ cfs_set_bit(group, &filter->fo_destroys_in_progress);
+ cfs_down(&filter->fo_create_locks[group]);
+ if (!cfs_test_bit(group, &filter->fo_destroys_in_progress)) {
CERROR("%s:["LPU64"] destroys_in_progress already cleared\n",
exp->exp_obd->obd_name, group);
- up(&filter->fo_create_locks[group]);
+ cfs_up(&filter->fo_create_locks[group]);
RETURN(0);
}
diff = oa->o_id - last;
GOTO(out, rc);
} else {
/* XXX: Used by MDS for the first time! */
- clear_bit(group, &filter->fo_destroys_in_progress);
+ cfs_clear_bit(group, &filter->fo_destroys_in_progress);
}
} else {
- down(&filter->fo_create_locks[group]);
+ cfs_down(&filter->fo_create_locks[group]);
if (oti->oti_conn_cnt < exp->exp_conn_cnt) {
CERROR("%s: dropping old precreate request\n",
obd->obd_name);
/* else diff == 0 */
GOTO(out, rc = 0);
out:
- up(&filter->fo_create_locks[group]);
+ cfs_up(&filter->fo_create_locks[group]);
return rc;
}
/* at least try to account for cached pages. its still racey and
* might be under-reporting if clients haven't announced their
* caches with brw recently */
- spin_lock(&obd->obd_osfs_lock);
+ cfs_spin_lock(&obd->obd_osfs_lock);
rc = fsfilt_statfs(obd, obd->u.obt.obt_sb, max_age);
memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
- spin_unlock(&obd->obd_osfs_lock);
+ cfs_spin_unlock(&obd->obd_osfs_lock);
CDEBUG(D_SUPER | D_CACHE, "blocks cached "LPU64" granted "LPU64
" pending "LPU64" free "LPU64" avail "LPU64"\n",
int rc;
__u64 os_ffree = -1;
- spin_lock(&obd->obd_osfs_lock);
+ cfs_spin_lock(&obd->obd_osfs_lock);
rc = fsfilt_statfs(obd, obd->u.obt.obt_sb, cfs_time_shift_64(1));
if (rc == 0)
os_ffree = obd->obd_osfs.os_ffree;
- spin_unlock(&obd->obd_osfs_lock);
+ cfs_spin_unlock(&obd->obd_osfs_lock);
return os_ffree;
}
OBD_ALLOC(osfs, sizeof(*osfs));
if (osfs == NULL)
RETURN(-ENOMEM);
- rc = filter_statfs(obd, osfs, cfs_time_current_64() - HZ, 0);
+ rc = filter_statfs(obd, osfs, cfs_time_current_64() - CFS_HZ,
+ 0);
if (rc == 0 && osfs->os_bavail < (osfs->os_blocks >> 10)) {
CDEBUG(D_RPCTRACE,"%s: not enough space for create "
LPU64"\n", obd->obd_name, osfs->os_bavail <<
for (i = 0; i < *num && err == 0; i++) {
int cleanup_phase = 0;
- if (test_bit(group, &filter->fo_destroys_in_progress)) {
+ if (cfs_test_bit(group, &filter->fo_destroys_in_progress)) {
CWARN("%s: create aborted by destroy\n",
obd->obd_name);
rc = -EAGAIN;
if (rc)
break;
- if (time_after(jiffies, enough_time)) {
+ if (cfs_time_after(jiffies, enough_time)) {
CDEBUG(D_RPCTRACE,
"%s: precreate slow - want %d got %d \n",
obd->obd_name, *num, i);
rc = -EINVAL;
} else {
diff = 1;
- down(&filter->fo_create_locks[oa->o_gr]);
+ cfs_down(&filter->fo_create_locks[oa->o_gr]);
rc = filter_precreate(obd, oa, oa->o_gr, &diff);
- up(&filter->fo_create_locks[oa->o_gr]);
+ cfs_up(&filter->fo_create_locks[oa->o_gr]);
}
} else {
rc = filter_handle_precreate(exp, oa, oa->o_gr, oti);
struct ost_body *body)
{
/* handle shrink grant */
- spin_lock(&exp->exp_obd->obd_osfs_lock);
+ cfs_spin_lock(&exp->exp_obd->obd_osfs_lock);
filter_grant_incoming(exp, &body->oa);
- spin_unlock(&exp->exp_obd->obd_osfs_lock);
+ cfs_spin_unlock(&exp->exp_obd->obd_osfs_lock);
RETURN(0);
lprocfs_filter_init_vars(&lvars);
- request_module("%s", "lquota");
+ cfs_request_module("%s", "lquota");
OBD_ALLOC(obdfilter_created_scratchpad,
OBDFILTER_CREATED_SCRATCHPAD_ENTRIES *
sizeof(*obdfilter_created_scratchpad));
struct filter_capa_key *k, *keys[2] = { NULL, NULL };
int i;
- spin_lock(&capa_lock);
- list_for_each_entry(k, &filter->fo_capa_keys, k_list) {
+ cfs_spin_lock(&capa_lock);
+ cfs_list_for_each_entry(k, &filter->fo_capa_keys, k_list) {
if (k->k_key.lk_mdsid != new->lk_mdsid)
continue;
keys[0] = k;
}
}
- spin_unlock(&capa_lock);
+ cfs_spin_unlock(&capa_lock);
for (i = 0; i < 2; i++) {
if (!keys[i])
/* maybe because of recovery or other reasons, MDS sent the
* the old capability key again.
*/
- spin_lock(&capa_lock);
+ cfs_spin_lock(&capa_lock);
keys[i]->k_key = *new;
- spin_unlock(&capa_lock);
+ cfs_spin_unlock(&capa_lock);
RETURN(0);
}
CFS_INIT_LIST_HEAD(&k->k_list);
}
- spin_lock(&capa_lock);
+ cfs_spin_lock(&capa_lock);
k->k_key = *new;
- if (list_empty(&k->k_list))
- list_add(&k->k_list, &filter->fo_capa_keys);
- spin_unlock(&capa_lock);
+ if (cfs_list_empty(&k->k_list))
+ cfs_list_add(&k->k_list, &filter->fo_capa_keys);
+ cfs_spin_unlock(&capa_lock);
DEBUG_CAPA_KEY(D_SEC, new, "new");
RETURN(0);
oc = capa_lookup(filter->fo_capa_hash, capa, 0);
if (oc) {
- spin_lock(&oc->c_lock);
+ cfs_spin_lock(&oc->c_lock);
if (capa_is_expired(oc)) {
DEBUG_CAPA(D_ERROR, capa, "expired");
rc = -ESTALE;
}
- spin_unlock(&oc->c_lock);
+ cfs_spin_unlock(&oc->c_lock);
capa_put(oc);
RETURN(rc);
RETURN(-ESTALE);
}
- spin_lock(&capa_lock);
- list_for_each_entry(k, &filter->fo_capa_keys, k_list) {
+ cfs_spin_lock(&capa_lock);
+ cfs_list_for_each_entry(k, &filter->fo_capa_keys, k_list) {
if (k->k_key.lk_mdsid == mdsid) {
keys_ready = 1;
if (k->k_key.lk_keyid == capa_keyid(capa)) {
}
}
}
- spin_unlock(&capa_lock);
+ cfs_spin_unlock(&capa_lock);
if (!keys_ready) {
CDEBUG(D_SEC, "MDS hasn't propagated capability keys yet, "
struct filter_capa_key *k;
int found = 0;
- spin_lock(&capa_lock);
- list_for_each_entry(k, &filter->fo_capa_keys, k_list) {
+ cfs_spin_lock(&capa_lock);
+ cfs_list_for_each_entry(k, &filter->fo_capa_keys, k_list) {
if (k->k_key.lk_mdsid == mdsid &&
k->k_key.lk_keyid == capa_keyid(capa)) {
found = 1;
break;
}
}
- spin_unlock(&capa_lock);
+ cfs_spin_unlock(&capa_lock);
if (found) {
union {
{
struct filter_capa_key *key, *n;
- spin_lock(&capa_lock);
- list_for_each_entry_safe(key, n, &filter->fo_capa_keys, k_list) {
- list_del_init(&key->k_list);
+ cfs_spin_lock(&capa_lock);
+ cfs_list_for_each_entry_safe(key, n, &filter->fo_capa_keys, k_list) {
+ cfs_list_del_init(&key->k_list);
OBD_FREE(key, sizeof(*key));
}
- spin_unlock(&capa_lock);
+ cfs_spin_unlock(&capa_lock);
}
/* per-client-per-object persistent state (LRU) */
struct filter_mod_data {
- struct list_head fmd_list; /* linked to fed_mod_list */
- __u64 fmd_id; /* object being written to */
- __u64 fmd_gr; /* group being written to */
- __u64 fmd_mactime_xid;/* xid highest {m,a,c}time setattr */
- unsigned long fmd_expire; /* jiffies when it should expire */
- int fmd_refcount; /* reference counter - list holds 1 */
+ cfs_list_t fmd_list; /* linked to fed_mod_list */
+ __u64 fmd_id; /* object being written to */
+ __u64 fmd_gr; /* group being written to */
+ __u64 fmd_mactime_xid;/* xid highest {m,a,c}time
+ * setattr */
+ unsigned long fmd_expire; /* jiffies when it should expire */
+ int fmd_refcount; /* reference counter, list holds 1 */
};
#ifdef HAVE_BGL_SUPPORT
#define FILTER_FMD_MAX_NUM_DEFAULT 32
#endif
/* Client cache seconds */
-#define FILTER_FMD_MAX_AGE_DEFAULT ((obd_timeout + 10) * HZ)
+#define FILTER_FMD_MAX_AGE_DEFAULT ((obd_timeout + 10) * CFS_HZ)
#ifndef HAVE_PAGE_CONSTANT
#define mapping_cap_page_constant_write(mapping) 0
CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
obd->obd_name, exp->exp_client_uuid.uuid, exp,
fed->fed_dirty, fed->fed_pending, fed->fed_grant);
- spin_unlock(&obd->obd_osfs_lock);
+ cfs_spin_unlock(&obd->obd_osfs_lock);
LBUG();
}
EXIT;
LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
- if (cfs_time_before_64(obd->obd_osfs_age, cfs_time_current_64() - HZ)) {
+ if (cfs_time_before_64(obd->obd_osfs_age,
+ cfs_time_current_64() - CFS_HZ)) {
restat:
rc = fsfilt_statfs(obd, obd->u.obt.obt_sb,
- cfs_time_current_64() + HZ);
+ cfs_time_current_64() + CFS_HZ);
if (rc) /* N.B. statfs can't really fail */
RETURN(0);
statfs_done = 1;
"current"LPU64"\n",
obd->obd_name, exp->exp_client_uuid.uuid,
exp, fed->fed_grant, want,current_grant);
- spin_unlock(&obd->obd_osfs_lock);
+ cfs_spin_unlock(&obd->obd_osfs_lock);
LBUG();
}
}
RETURN(rc);
if (oa && oa->o_valid & OBD_MD_FLGRANT) {
- spin_lock(&obd->obd_osfs_lock);
+ cfs_spin_lock(&obd->obd_osfs_lock);
filter_grant_incoming(exp, oa);
if (!(oa->o_flags & OBD_FL_SHRINK_GRANT))
oa->o_grant = 0;
- spin_unlock(&obd->obd_osfs_lock);
+ cfs_spin_unlock(&obd->obd_osfs_lock);
}
iobuf = filter_iobuf_get(&obd->u.filter, oti);
fsfilt_check_slow(obd, now, "preprw_read setup");
/* find pages for all segments, fill array with them */
- do_gettimeofday(&start);
+ cfs_gettimeofday(&start);
for (i = 0, lnb = res; i < *npages; i++, lnb++) {
lnb->dentry = dentry;
lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_CACHE_MISS, 1);
filter_iobuf_add_page(obd, iobuf, inode, lnb->page);
}
- do_gettimeofday(&end);
+ cfs_gettimeofday(&end);
timediff = cfs_timeval_sub(&end, &start, NULL);
lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_GET_PAGE, timediff);
CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
fed->fed_dirty, fed->fed_pending, fed->fed_grant);
- spin_unlock(&exp->exp_obd->obd_osfs_lock);
+ cfs_spin_unlock(&exp->exp_obd->obd_osfs_lock);
LBUG();
}
return rc;
fmd = filter_fmd_find(exp, obj->ioo_id, obj->ioo_gr);
LASSERT(oa != NULL);
- spin_lock(&obd->obd_osfs_lock);
+ cfs_spin_lock(&obd->obd_osfs_lock);
filter_grant_incoming(exp, oa);
if (fmd && fmd->fmd_mactime_xid > oti->oti_xid)
oa->o_valid &= ~(OBD_MD_FLMTIME | OBD_MD_FLCTIME |
oa->o_grant = filter_grant(exp, oa->o_grant, oa->o_undirty,
left, 1);
- spin_unlock(&obd->obd_osfs_lock);
+ cfs_spin_unlock(&obd->obd_osfs_lock);
filter_fmd_put(exp, fmd);
if (rc)
* multiple writes or single truncate. */
down_read(&dentry->d_inode->i_alloc_sem);
- do_gettimeofday(&start);
+ cfs_gettimeofday(&start);
for (i = 0, lnb = res; i < *npages; i++, lnb++) {
/* We still set up for ungranted pages so that granted pages
if (lnb->rc == 0)
tot_bytes += lnb->len;
}
- do_gettimeofday(&end);
+ cfs_gettimeofday(&end);
timediff = cfs_timeval_sub(&end, &start, NULL);
lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_GET_PAGE, timediff);
case 1:
filter_iobuf_put(&obd->u.filter, iobuf, oti);
case 0:
- spin_lock(&obd->obd_osfs_lock);
+ cfs_spin_lock(&obd->obd_osfs_lock);
if (oa)
filter_grant_incoming(exp, oa);
- spin_unlock(&obd->obd_osfs_lock);
+ cfs_spin_unlock(&obd->obd_osfs_lock);
pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
break;
default:;
unsigned long pending = 0;
int i;
- spin_lock(&exp->exp_obd->obd_osfs_lock);
+ cfs_spin_lock(&exp->exp_obd->obd_osfs_lock);
for (i = 0, lnb = res; i < niocount; i++, lnb++)
pending += lnb->lnb_grant_used;
filter->fo_tot_pending, pending);
filter->fo_tot_pending -= pending;
- spin_unlock(&exp->exp_obd->obd_osfs_lock);
+ cfs_spin_unlock(&exp->exp_obd->obd_osfs_lock);
}
int filter_commitrw(int cmd, struct obd_export *exp, struct obdo *oa,
/* 512byte block min */
#define MAX_BLOCKS_PER_PAGE (CFS_PAGE_SIZE / 512)
struct filter_iobuf {
- atomic_t dr_numreqs; /* number of reqs being processed */
- wait_queue_head_t dr_wait;
- int dr_max_pages;
- int dr_npages;
- int dr_error;
- struct page **dr_pages;
- unsigned long *dr_blocks;
- unsigned int dr_ignore_quota:1;
+ cfs_atomic_t dr_numreqs; /* number of reqs being processed */
+ cfs_waitq_t dr_wait;
+ int dr_max_pages;
+ int dr_npages;
+ int dr_error;
+ struct page **dr_pages;
+ unsigned long *dr_blocks;
+ unsigned int dr_ignore_quota:1;
struct filter_obd *dr_filter;
};
{
struct filter_obd *filter = iobuf->dr_filter;
- atomic_inc(&iobuf->dr_numreqs);
+ cfs_atomic_inc(&iobuf->dr_numreqs);
if (rw == OBD_BRW_READ) {
- atomic_inc(&filter->fo_r_in_flight);
+ cfs_atomic_inc(&filter->fo_r_in_flight);
lprocfs_oh_tally(&filter->fo_filter_stats.hist[BRW_R_RPC_HIST],
- atomic_read(&filter->fo_r_in_flight));
+ cfs_atomic_read(&filter->fo_r_in_flight));
lprocfs_oh_tally_log2(&filter->
fo_filter_stats.hist[BRW_R_DISK_IOSIZE],
size);
if (exp->exp_nid_stats && exp->exp_nid_stats->nid_brw_stats) {
lprocfs_oh_tally(&exp->exp_nid_stats->nid_brw_stats->
- hist[BRW_R_RPC_HIST],
- atomic_read(&filter->fo_r_in_flight));
+ hist[BRW_R_RPC_HIST],
+ cfs_atomic_read(&filter-> \
+ fo_r_in_flight));
lprocfs_oh_tally_log2(&exp->exp_nid_stats->
nid_brw_stats->hist[BRW_R_DISK_IOSIZE],
size);
}
} else {
- atomic_inc(&filter->fo_w_in_flight);
+ cfs_atomic_inc(&filter->fo_w_in_flight);
lprocfs_oh_tally(&filter->fo_filter_stats.hist[BRW_W_RPC_HIST],
- atomic_read(&filter->fo_w_in_flight));
+ cfs_atomic_read(&filter->fo_w_in_flight));
lprocfs_oh_tally_log2(&filter->
fo_filter_stats.hist[BRW_W_DISK_IOSIZE],
size);
if (exp->exp_nid_stats && exp->exp_nid_stats->nid_brw_stats) {
lprocfs_oh_tally(&exp->exp_nid_stats->nid_brw_stats->
hist[BRW_W_RPC_HIST],
- atomic_read(&filter->fo_r_in_flight));
+ cfs_atomic_read(&filter-> \
+ fo_r_in_flight));
lprocfs_oh_tally_log2(&exp->exp_nid_stats->
nid_brw_stats->hist[BRW_W_DISK_IOSIZE],
size);
* DO NOT record procfs stats here!!! */
if (rw == OBD_BRW_READ)
- atomic_dec(&filter->fo_r_in_flight);
+ cfs_atomic_dec(&filter->fo_r_in_flight);
else
- atomic_dec(&filter->fo_w_in_flight);
+ cfs_atomic_dec(&filter->fo_w_in_flight);
- if (atomic_dec_and_test(&iobuf->dr_numreqs))
- wake_up(&iobuf->dr_wait);
+ if (cfs_atomic_dec_and_test(&iobuf->dr_numreqs))
+ cfs_waitq_signal(&iobuf->dr_wait);
}
static int dio_complete_routine(struct bio *bio, unsigned int done, int error)
"bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, "
"bi_private: %p\n", bio->bi_next, bio->bi_flags,
bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size,
- bio->bi_end_io, atomic_read(&bio->bi_cnt),
+ bio->bi_end_io, cfs_atomic_read(&bio->bi_cnt),
bio->bi_private);
return 0;
}
/* the check is outside of the cycle for performance reason -bzzz */
- if (!test_bit(BIO_RW, &bio->bi_rw)) {
+ if (!cfs_test_bit(BIO_RW, &bio->bi_rw)) {
bio_for_each_segment(bvl, bio, i) {
if (likely(error == 0))
SetPageUptodate(bvl->bv_page);
goto failed_2;
iobuf->dr_filter = filter;
- init_waitqueue_head(&iobuf->dr_wait);
- atomic_set(&iobuf->dr_numreqs, 0);
+ cfs_waitq_init(&iobuf->dr_wait);
+ cfs_atomic_set(&iobuf->dr_numreqs, 0);
iobuf->dr_max_pages = num_pages;
iobuf->dr_npages = 0;
iobuf->dr_error = 0;
{
iobuf->dr_npages = 0;
iobuf->dr_error = 0;
- atomic_set(&iobuf->dr_numreqs, 0);
+ cfs_atomic_set(&iobuf->dr_numreqs, 0);
}
void filter_free_iobuf(struct filter_iobuf *iobuf)
}
out:
- wait_event(iobuf->dr_wait, atomic_read(&iobuf->dr_numreqs) == 0);
+ cfs_wait_event(iobuf->dr_wait,
+ cfs_atomic_read(&iobuf->dr_numreqs) == 0);
if (rw == OBD_BRW_READ) {
lprocfs_oh_tally(&obd->u.filter.fo_filter_stats.
struct inode *inode = dchild->d_inode;
int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
int rc, rc2, create;
- struct semaphore *sem;
+ cfs_semaphore_t *sem;
ENTRY;
LASSERTF(iobuf->dr_npages <= iobuf->dr_max_pages, "%d,%d\n",
RETURN(LLOG_PROC_BREAK);
if (rec == NULL) {
- spin_lock_bh(&ctxt->loc_obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&ctxt->loc_obd->obd_processing_task_lock);
ctxt->loc_obd->u.filter.fo_mds_ost_sync = 0;
- spin_unlock_bh(&ctxt->loc_obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&ctxt->loc_obd->obd_processing_task_lock);
RETURN(0);
}
LASSERT(res);
- down(&res->lr_lvb_sem);
+ cfs_down(&res->lr_lvb_sem);
lvb = res->lr_lvb_data;
if (lvb == NULL) {
CERROR("No lvb when running lvbo_update!\n");
f_dput(dentry);
out:
- up(&res->lr_lvb_sem);
+ cfs_up(&res->lr_lvb_sem);
return rc;
}
struct obd_device *obd = data;
int rc;
- rc = snprintf(page, count, "%u\n", obd->u.filter.fo_fmd_max_age / HZ);
+ rc = snprintf(page, count, "%u\n",
+ obd->u.filter.fo_fmd_max_age / CFS_HZ);
return rc;
}
if (val > 65536 || val < 1)
return -EINVAL;
- obd->u.filter.fo_fmd_max_age = val * HZ;
+ obd->u.filter.fo_fmd_max_age = val * CFS_HZ;
return count;
}
if (rc)
return rc;
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
obd->u.filter.fo_read_cache = val;
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
return count;
}
if (rc)
return rc;
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
obd->u.filter.fo_writethrough_cache = val;
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
return count;
}
if (rc)
return rc;
- spin_lock(&obd->obd_osfs_lock);
+ cfs_spin_lock(&obd->obd_osfs_lock);
obd->u.filter.fo_raid_degraded = !!val;
- spin_unlock(&obd->obd_osfs_lock);
+ cfs_spin_unlock(&obd->obd_osfs_lock);
return count;
}
struct timeval now;
/* this sampling races with updates */
- do_gettimeofday(&now);
+ cfs_gettimeofday(&now);
seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
now.tv_sec, now.tv_usec);
{
char title[24];
- sprintf(title, "I/O time (1/%ds)", HZ);
+ sprintf(title, "I/O time (1/%ds)", CFS_HZ);
display_brw_stats(seq, title, "ios",
&brw_stats->hist[BRW_R_IO_TIME],
&brw_stats->hist[BRW_W_IO_TIME], 1);
if (pages_number < 0 ||
pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - CFS_PAGE_SHIFT) ||
- pages_number > num_physpages / 4) /* 1/4 of RAM */
+ pages_number > cfs_num_physpages / 4) /* 1/4 of RAM */
return -ERANGE;
client_obd_list_lock(&cli->cl_loi_list_lock);
if (count > sizeof(kernbuf) - 1)
return -EINVAL;
- if (copy_from_user(kernbuf, buffer, count))
+ if (cfs_copy_from_user(kernbuf, buffer, count))
return -EFAULT;
if (count > 0 && kernbuf[count - 1] == '\n')
kernbuf[count - 1] = '\0';
{
struct obd_device *obd = data;
- return snprintf(page, count, "%u\n", atomic_read(&obd->u.cli.cl_resends));
+ return snprintf(page, count, "%u\n",
+ cfs_atomic_read(&obd->u.cli.cl_resends));
}
static int osc_wr_resend_count(struct file *file, const char *buffer,
if (val < 0)
return -EINVAL;
- atomic_set(&obd->u.cli.cl_resends, val);
+ cfs_atomic_set(&obd->u.cli.cl_resends, val);
return count;
}
{
struct obd_device *obd = data;
return snprintf(page, count, "%u\n",
- atomic_read(&obd->u.cli.cl_destroy_in_flight));
+ cfs_atomic_read(&obd->u.cli.cl_destroy_in_flight));
}
static struct lprocfs_vars lprocfs_osc_obd_vars[] = {
unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
int i;
- do_gettimeofday(&now);
+ cfs_gettimeofday(&now);
client_obd_list_lock(&cli->cl_loi_list_lock);
struct obd_device *dev = seq->private;
struct osc_stats *stats = &obd2osc_dev(dev)->od_stats;
- do_gettimeofday(&now);
+ cfs_gettimeofday(&now);
seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
now.tv_sec, now.tv_usec);
struct obdo oi_oa;
struct osc_punch_cbargs {
int opc_rc;
- struct completion opc_sync;
+ cfs_completion_t opc_sync;
} oi_punch_cbarg;
};
*/
struct cl_io oo_debug_io;
/** Serialization object for osc_object::oo_debug_io. */
- struct mutex oo_debug_mutex;
+ cfs_mutex_t oo_debug_mutex;
#endif
/**
* List of pages in transfer.
*/
- struct list_head oo_inflight[CRT_NR];
+ cfs_list_t oo_inflight[CRT_NR];
/**
* Lock, protecting ccc_object::cob_inflight, because a seat-belt is
* locked during take-off and landing.
*/
- spinlock_t oo_seatbelt;
+ cfs_spinlock_t oo_seatbelt;
};
/*
* Linkage into a per-osc_object list of pages in flight. For
* debugging.
*/
- struct list_head ops_inflight;
+ cfs_list_t ops_inflight;
/**
* Thread that submitted this page for transfer. For debugging.
*/
oscc = req->rq_async_args.pointer_arg[0];
LASSERT(oscc && (oscc->oscc_obd != LP_POISON));
- spin_lock(&oscc->oscc_lock);
+ cfs_spin_lock(&oscc->oscc_lock);
oscc->oscc_flags &= ~OSCC_FLAG_CREATING;
switch (rc) {
case 0: {
}
oscc->oscc_last_id = body->oa.o_id;
}
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
break;
}
case -EROFS:
oscc->oscc_grow_count = OST_MIN_PRECREATE;
}
}
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
DEBUG_REQ(D_INODE, req, "OST out of space, flagging");
break;
case -EIO: {
* of filter (see filter_handle_precreate for detail)*/
if (body && body->oa.o_id > oscc->oscc_last_id)
oscc->oscc_last_id = body->oa.o_id;
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
break;
}
case -EINTR:
* IMP_DISCONN event */
oscc->oscc_flags |= OSCC_FLAG_RECOVERING;
/* oscc->oscc_grow_count = OST_MIN_PRECREATE; */
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
break;
}
default: {
oscc->oscc_flags |= OSCC_FLAG_RECOVERING;
oscc->oscc_grow_count = OST_MIN_PRECREATE;
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
DEBUG_REQ(D_ERROR, req,
"Unknown rc %d from async create: failing oscc", rc);
ptlrpc_fail_import(req->rq_import,
CDEBUG(D_HA, "preallocated through id "LPU64" (next to use "LPU64")\n",
oscc->oscc_last_id, oscc->oscc_next_id);
- spin_lock(&oscc->oscc_lock);
- list_for_each_entry_safe(fake_req, pos,
- &oscc->oscc_wait_create_list, rq_list) {
+ cfs_spin_lock(&oscc->oscc_lock);
+ cfs_list_for_each_entry_safe(fake_req, pos,
+ &oscc->oscc_wait_create_list, rq_list) {
if (handle_async_create(fake_req, rc) == -EAGAIN) {
oscc_internal_create(oscc);
/* sending request should be never fail because
GOTO(exit_wakeup, rc);
}
}
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
exit_wakeup:
cfs_waitq_signal(&oscc->oscc_waitq);
if ((oscc->oscc_flags & OSCC_FLAG_RECOVERING) ||
(oscc->oscc_flags & OSCC_FLAG_DEGRADED)) {
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
RETURN(0);
}
}
if (oscc->oscc_flags & OSCC_FLAG_CREATING) {
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
RETURN(0);
}
oscc->oscc_grow_count = oscc->oscc_max_grow_count / 2;
oscc->oscc_flags |= OSCC_FLAG_CREATING;
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
request = ptlrpc_request_alloc_pack(oscc->oscc_obd->u.cli.cl_import,
&RQF_OST_CREATE,
LUSTRE_OST_VERSION, OST_CREATE);
if (request == NULL) {
- spin_lock(&oscc->oscc_lock);
+ cfs_spin_lock(&oscc->oscc_lock);
oscc->oscc_flags &= ~OSCC_FLAG_CREATING;
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
RETURN(-ENOMEM);
}
ptlrpc_at_set_req_timeout(request);
body = req_capsule_client_get(&request->rq_pill, &RMF_OST_BODY);
- spin_lock(&oscc->oscc_lock);
+ cfs_spin_lock(&oscc->oscc_lock);
body->oa.o_id = oscc->oscc_last_id + oscc->oscc_grow_count;
body->oa.o_gr = oscc->oscc_oa.o_gr;
LASSERT_MDS_GROUP(body->oa.o_gr);
body->oa.o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP;
request->rq_async_args.space[0] = oscc->oscc_grow_count;
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
CDEBUG(D_RPCTRACE, "prealloc through id "LPU64" (last seen "LPU64")\n",
body->oa.o_id, oscc->oscc_last_id);
{
int have_objs;
- spin_lock(&oscc->oscc_lock);
+ cfs_spin_lock(&oscc->oscc_lock);
have_objs = oscc_has_objects_nolock(oscc, count);
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
return have_objs;
}
ost_unusable = oscc->oscc_obd->u.cli.cl_import->imp_invalid;
- spin_lock(&oscc->oscc_lock);
+ cfs_spin_lock(&oscc->oscc_lock);
ost_unusable |= (OSCC_FLAG_NOSPC | OSCC_FLAG_RDONLY |
OSCC_FLAG_EXITING) & oscc->oscc_flags;
have_objs = oscc_has_objects_nolock(oscc, count);
/* they release lock himself */
have_objs = oscc_internal_create(oscc);
else
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
return have_objs || ost_unusable;
}
{
int sync;
- spin_lock(&oscc->oscc_lock);
+ cfs_spin_lock(&oscc->oscc_lock);
sync = oscc->oscc_flags & OSCC_FLAG_SYNC_IN_PROGRESS;
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
return sync;
}
RETURN(1000);
/* Handle critical states first */
- spin_lock(&oscc->oscc_lock);
+ cfs_spin_lock(&oscc->oscc_lock);
if (oscc->oscc_flags & OSCC_FLAG_NOSPC ||
oscc->oscc_flags & OSCC_FLAG_RDONLY ||
oscc->oscc_flags & OSCC_FLAG_EXITING) {
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
RETURN(1000);
}
if (oscc->oscc_flags & OSCC_FLAG_RECOVERING ||
oscc->oscc_flags & OSCC_FLAG_DEGRADED) {
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
RETURN(2);
}
if (oscc_has_objects_nolock(oscc, oscc->oscc_grow_count / 2)) {
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
RETURN(0);
}
if ((oscc->oscc_flags & OSCC_FLAG_SYNC_IN_PROGRESS) ||
(oscc->oscc_flags & OSCC_FLAG_CREATING)) {
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
RETURN(1);
}
}
static int async_create_interpret(const struct lu_env *env,
- struct ptlrpc_request *req, void *data, int rc)
+ struct ptlrpc_request *req, void *data,
+ int rc)
{
struct osc_create_async_args *args = ptlrpc_req_async_args(req);
struct osc_creator *oscc = args->rq_oscc;
int ret;
- spin_lock(&oscc->oscc_lock);
+ cfs_spin_lock(&oscc->oscc_lock);
ret = handle_async_create(req, rc);
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
return ret;
}
args->rq_lsm = *ea;
args->rq_oinfo = oinfo;
- spin_lock(&oscc->oscc_lock);
+ cfs_spin_lock(&oscc->oscc_lock);
/* try fast path */
rc = handle_async_create(fake_req, 0);
if (rc == -EAGAIN) {
/* we not have objects - try wait */
is_add = ptlrpcd_add_req(fake_req, PSCOPE_OTHER);
if (!is_add)
- list_add(&fake_req->rq_list,
- &oscc->oscc_wait_create_list);
+ cfs_list_add(&fake_req->rq_list,
+ &oscc->oscc_wait_create_list);
else
rc = is_add;
}
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
if (rc != -EAGAIN)
/* need free request if was error hit or
/* this is the special case where create removes orphans */
if (oa->o_valid & OBD_MD_FLFLAGS &&
oa->o_flags == OBD_FL_DELORPHAN) {
- spin_lock(&oscc->oscc_lock);
+ cfs_spin_lock(&oscc->oscc_lock);
if (oscc->oscc_flags & OSCC_FLAG_SYNC_IN_PROGRESS) {
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
RETURN(-EBUSY);
}
if (!(oscc->oscc_flags & OSCC_FLAG_RECOVERING)) {
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
RETURN(0);
}
/* seting flag LOW we prevent extra grow precreate size
* and enforce use last assigned size */
oscc->oscc_flags |= OSCC_FLAG_LOW;
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
CDEBUG(D_HA, "%s: oscc recovery started - delete to "LPU64"\n",
oscc->oscc_obd->obd_name, oscc->oscc_next_id - 1);
rc = osc_real_create(exp, oa, ea, NULL);
- spin_lock(&oscc->oscc_lock);
+ cfs_spin_lock(&oscc->oscc_lock);
oscc->oscc_flags &= ~OSCC_FLAG_SYNC_IN_PROGRESS;
if (rc == 0 || rc == -ENOSPC) {
struct obd_connect_data *ocd;
}
cfs_waitq_signal(&oscc->oscc_waitq);
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
if (rc < 0)
RETURN(rc);
CDEBUG(D_HA,"%s: error create %d\n",
oscc->oscc_obd->obd_name, rc);
- spin_lock(&oscc->oscc_lock);
+ cfs_spin_lock(&oscc->oscc_lock);
/* wakeup but recovery did not finished */
if ((oscc->oscc_obd->u.cli.cl_import->imp_invalid) ||
(oscc->oscc_flags & OSCC_FLAG_RECOVERING)) {
rc = -EIO;
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
break;
}
if (oscc->oscc_flags & OSCC_FLAG_NOSPC) {
rc = -ENOSPC;
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
break;
}
if (oscc->oscc_flags & OSCC_FLAG_RDONLY) {
rc = -EROFS;
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
break;
}
// Should we report -EIO error ?
if (oscc->oscc_flags & OSCC_FLAG_EXITING) {
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
break;
}
lsm->lsm_object_id = oscc->oscc_next_id;
*ea = lsm;
oscc->oscc_next_id++;
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
CDEBUG(D_RPCTRACE, "%s: set oscc_next_id = "LPU64"\n",
exp->exp_obd->obd_name, oscc->oscc_next_id);
break;
}
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
}
if (rc == 0) {
memset(oscc, 0, sizeof(*oscc));
cfs_waitq_init(&oscc->oscc_waitq);
- spin_lock_init(&oscc->oscc_lock);
+ cfs_spin_lock_init(&oscc->oscc_lock);
oscc->oscc_obd = obd;
oscc->oscc_grow_count = OST_MIN_PRECREATE;
oscc->oscc_max_grow_count = OST_MAX_PRECREATE;
ENTRY;
- spin_lock(&oscc->oscc_lock);
+ cfs_spin_lock(&oscc->oscc_lock);
oscc->oscc_flags &= ~OSCC_FLAG_RECOVERING;
oscc->oscc_flags |= OSCC_FLAG_EXITING;
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
}
}
};
-struct lock_class_key osc_ast_guard_class;
+cfs_lock_class_key_t osc_ast_guard_class;
/*****************************************************************************
*
unsigned short oap_cmd;
unsigned short oap_interrupted:1;
- struct list_head oap_pending_item;
- struct list_head oap_urgent_item;
- struct list_head oap_rpc_item;
+ cfs_list_t oap_pending_item;
+ cfs_list_t oap_urgent_item;
+ cfs_list_t oap_rpc_item;
obd_off oap_obj_off;
unsigned oap_page_off;
const struct obd_async_page_ops *oap_caller_ops;
void *oap_caller_data;
- struct list_head oap_page_list;
+ cfs_list_t oap_page_list;
struct ldlm_lock *oap_ldlm_lock;
- spinlock_t oap_lock;
+ cfs_spinlock_t oap_lock;
};
#define oap_page oap_brw_page.pg
#define oap_brw_flags oap_brw_page.flag
struct osc_cache_waiter {
- struct list_head ocw_entry;
+ cfs_list_t ocw_entry;
cfs_waitq_t ocw_waitq;
- struct osc_async_page *ocw_oap;
+ struct osc_async_page *ocw_oap;
int ocw_rc;
};
struct osc_async_page *oap, int transient);
struct cl_page *osc_oap2cl_page(struct osc_async_page *oap);
-extern spinlock_t osc_ast_guard;
+extern cfs_spinlock_t osc_ast_guard;
int osc_cleanup(struct obd_device *obd);
int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg);
/* return 1 if osc should be resend request */
static inline int osc_should_resend(int resend, struct client_obd *cli)
{
- return atomic_read(&cli->cl_resends) ?
- atomic_read(&cli->cl_resends) > resend : 1;
+ return cfs_atomic_read(&cli->cl_resends) ?
+ cfs_atomic_read(&cli->cl_resends) > resend : 1;
}
#ifndef min_t
exp = osc_export(osc);
if (priority > CRP_NORMAL) {
- spin_lock(&oap->oap_lock);
+ cfs_spin_lock(&oap->oap_lock);
oap->oap_async_flags |= ASYNC_HP;
- spin_unlock(&oap->oap_lock);
+ cfs_spin_unlock(&oap->oap_lock);
}
/*
* This can be checked without cli->cl_loi_list_lock, because
* ->oap_*_item are always manipulated when the page is owned.
*/
- if (!list_empty(&oap->oap_urgent_item) ||
- !list_empty(&oap->oap_rpc_item)) {
+ if (!cfs_list_empty(&oap->oap_urgent_item) ||
+ !cfs_list_empty(&oap->oap_rpc_item)) {
result = -EBUSY;
break;
}
result = cl_page_prep(env, io, page, crt);
if (result == 0) {
cl_page_list_move(qout, qin, page);
- if (list_empty(&oap->oap_pending_item)) {
+ if (cfs_list_empty(&oap->oap_pending_item)) {
osc_io_submit_page(env, cl2osc_io(env, ios),
opg, crt);
} else {
struct osc_punch_cbargs *args = a;
args->opc_rc = rc;
- complete(&args->opc_sync);
+ cfs_complete(&args->opc_sync);
return 0;
}
cl_page_list_disown(env, io, list);
cl_page_list_fini(env, list);
- spin_lock(&obj->oo_seatbelt);
- list_for_each_entry(cp, &obj->oo_inflight[CRT_WRITE], ops_inflight) {
+ cfs_spin_lock(&obj->oo_seatbelt);
+ cfs_list_for_each_entry(cp, &obj->oo_inflight[CRT_WRITE],
+ ops_inflight) {
page = cp->ops_cl.cpl_page;
if (page->cp_index >= start + partial) {
cfs_task_t *submitter;
libcfs_debug_dumpstack(submitter);
}
}
- spin_unlock(&obj->oo_seatbelt);
+ cfs_spin_unlock(&obj->oo_seatbelt);
}
#else /* __KERNEL__ */
# define osc_trunc_check(env, io, oio, size) do {;} while (0)
oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
capa = io->u.ci_truncate.tr_capa;
- init_completion(&cbargs->opc_sync);
+ cfs_init_completion(&cbargs->opc_sync);
result = osc_punch_base(osc_export(cl2osc(obj)), oa, capa,
osc_punch_upcall, cbargs, PTLRPCD_SET);
}
struct obdo *oa = &oio->oi_oa;
int result;
- wait_for_completion(&cbargs->opc_sync);
+ cfs_wait_for_completion(&cbargs->opc_sync);
result = io->ci_result = cbargs->opc_rc;
if (result == 0) {
}
if (flags & OBD_MD_FLHANDLE) {
clerq = slice->crs_req;
- LASSERT(!list_empty(&clerq->crq_pages));
+ LASSERT(!cfs_list_empty(&clerq->crq_pages));
apage = container_of(clerq->crq_pages.next,
struct cl_page, cp_flight);
opg = osc_cl_page_osc(apage);
struct cl_lock *scan;
head = cl_object_header(apage->cp_obj);
- list_for_each_entry(scan, &head->coh_locks, cll_linkage)
+ cfs_list_for_each_entry(scan, &head->coh_locks,
+ cll_linkage)
CL_LOCK_DEBUG(D_ERROR, env, scan,
"no cover page!\n");
CL_PAGE_DEBUG(D_ERROR, env, apage,
{
struct ldlm_lock *dlmlock;
- spin_lock(&osc_ast_guard);
+ cfs_spin_lock(&osc_ast_guard);
dlmlock = olck->ols_lock;
if (dlmlock == NULL) {
- spin_unlock(&osc_ast_guard);
+ cfs_spin_unlock(&osc_ast_guard);
return;
}
* call to osc_lock_detach() */
dlmlock->l_ast_data = NULL;
olck->ols_handle.cookie = 0ULL;
- spin_unlock(&osc_ast_guard);
+ cfs_spin_unlock(&osc_ast_guard);
lock_res_and_lock(dlmlock);
if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
* Global spin-lock protecting consistency of ldlm_lock::l_ast_data
* pointers. Initialized in osc_init().
*/
-spinlock_t osc_ast_guard;
+cfs_spinlock_t osc_ast_guard;
static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
{
struct osc_lock *olck;
lock_res_and_lock(dlm_lock);
- spin_lock(&osc_ast_guard);
+ cfs_spin_lock(&osc_ast_guard);
olck = dlm_lock->l_ast_data;
if (olck != NULL) {
struct cl_lock *lock = olck->ols_cl.cls_lock;
} else
olck = NULL;
}
- spin_unlock(&osc_ast_guard);
+ cfs_spin_unlock(&osc_ast_guard);
unlock_res_and_lock(dlm_lock);
return olck;
}
LASSERT(dlmlock != NULL);
lock_res_and_lock(dlmlock);
- spin_lock(&osc_ast_guard);
+ cfs_spin_lock(&osc_ast_guard);
LASSERT(dlmlock->l_ast_data == olck);
LASSERT(olck->ols_lock == NULL);
olck->ols_lock = dlmlock;
- spin_unlock(&osc_ast_guard);
+ cfs_spin_unlock(&osc_ast_guard);
/*
* Lock might be not yet granted. In this case, completion ast
dlmlock = ldlm_handle2lock(&olck->ols_handle);
if (dlmlock != NULL) {
lock_res_and_lock(dlmlock);
- spin_lock(&osc_ast_guard);
+ cfs_spin_lock(&osc_ast_guard);
LASSERT(olck->ols_lock == NULL);
dlmlock->l_ast_data = NULL;
olck->ols_handle.cookie = 0ULL;
- spin_unlock(&osc_ast_guard);
+ cfs_spin_unlock(&osc_ast_guard);
unlock_res_and_lock(dlmlock);
LDLM_LOCK_PUT(dlmlock);
}
unsigned long weight;
ENTRY;
- might_sleep();
+ cfs_might_sleep();
/*
* osc_ldlm_weigh_ast has a complex context since it might be called
* because of lock canceling, or from user's input. We have to make
return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ));
}
-#ifndef list_for_each_entry_continue
-#define list_for_each_entry_continue(pos, head, member) \
- for (pos = list_entry(pos->member.next, typeof(*pos), member); \
- prefetch(pos->member.next), &pos->member != (head); \
- pos = list_entry(pos->member.next, typeof(*pos), member))
-#endif
-
/**
* Cancel all conflicting locks and wait for them to be destroyed.
*
if (olck->ols_glimpse)
return 0;
- spin_lock(&hdr->coh_lock_guard);
- list_for_each_entry_continue(scan, &hdr->coh_locks, cll_linkage) {
+ cfs_spin_lock(&hdr->coh_lock_guard);
+ cfs_list_for_each_entry_continue(scan, &hdr->coh_locks, cll_linkage) {
struct cl_lock_descr *cld = &scan->cll_descr;
const struct osc_lock *scan_ols;
conflict = scan;
break;
}
- spin_unlock(&hdr->coh_lock_guard);
+ cfs_spin_unlock(&hdr->coh_lock_guard);
if (conflict) {
CDEBUG(D_DLMTRACE, "lock %p is confliced with %p, will wait\n",
head = cl_object_header(obj);
result = 0;
- spin_lock(&head->coh_lock_guard);
- list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
+ cfs_spin_lock(&head->coh_lock_guard);
+ cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
if (scan != lock) {
struct osc_lock *oscan;
}
}
}
- spin_unlock(&head->coh_lock_guard);
+ cfs_spin_unlock(&head->coh_lock_guard);
RETURN(result);
}
plist = &osc_env_info(env)->oti_plist;
cl_page_list_init(plist);
- mutex_lock(&oob->oo_debug_mutex);
+ cfs_mutex_lock(&oob->oo_debug_mutex);
io->ci_obj = cl_object_top(obj);
cl_io_init(env, io, CIT_MISC, io->ci_obj);
cl_page_list_disown(env, io, plist);
cl_page_list_fini(env, plist);
cl_io_fini(env, io);
- mutex_unlock(&oob->oo_debug_mutex);
+ cfs_mutex_unlock(&oob->oo_debug_mutex);
cl_env_nested_put(&nest, env);
} else
result = 0;
osc->oo_oinfo = cconf->u.coc_oinfo;
#ifdef INVARIANT_CHECK
- mutex_init(&osc->oo_debug_mutex);
+ cfs_mutex_init(&osc->oo_debug_mutex);
#endif
- spin_lock_init(&osc->oo_seatbelt);
+ cfs_spin_lock_init(&osc->oo_seatbelt);
for (i = 0; i < CRT_NR; ++i)
CFS_INIT_LIST_HEAD(&osc->oo_inflight[i]);
return 0;
int i;
for (i = 0; i < CRT_NR; ++i)
- LASSERT(list_empty(&osc->oo_inflight[i]));
+ LASSERT(cfs_list_empty(&osc->oo_inflight[i]));
lu_object_fini(obj);
OBD_SLAB_FREE_PTR(osc, osc_object_kmem);
ldlm_mode_t dlmmode;
int flags;
- might_sleep();
+ cfs_might_sleep();
info = osc_env_info(env);
resname = &info->oti_resname;
descr->cld_mode = mode;
descr->cld_start = page->cp_index;
descr->cld_end = page->cp_index;
- spin_lock(&hdr->coh_lock_guard);
- list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
+ cfs_spin_lock(&hdr->coh_lock_guard);
+ cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
/*
* Lock-less sub-lock has to be either in HELD state
* (when io is actively going on), or in CACHED state,
break;
}
}
- spin_unlock(&hdr->coh_lock_guard);
+ cfs_spin_unlock(&hdr->coh_lock_guard);
}
return result;
}
LINVRNT(cl_page_is_vmlocked(env, opg->ops_cl.cpl_page));
obj = cl2osc(opg->ops_cl.cpl_obj);
- spin_lock(&obj->oo_seatbelt);
- list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
+ cfs_spin_lock(&obj->oo_seatbelt);
+ cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
opg->ops_submitter = cfs_current();
- spin_unlock(&obj->oo_seatbelt);
+ cfs_spin_unlock(&obj->oo_seatbelt);
}
static int osc_page_cache_add(const struct lu_env *env,
}
-static const char *osc_list(struct list_head *head)
+static const char *osc_list(cfs_list_t *head)
{
- return list_empty(head) ? "-" : "+";
+ return cfs_list_empty(head) ? "-" : "+";
}
static inline cfs_time_t osc_submit_duration(struct osc_page *opg)
"Trying to teardown failed: %d\n", rc);
LASSERT(0);
}
- spin_lock(&obj->oo_seatbelt);
- list_del_init(&opg->ops_inflight);
- spin_unlock(&obj->oo_seatbelt);
+ cfs_spin_lock(&obj->oo_seatbelt);
+ cfs_list_del_init(&opg->ops_inflight);
+ cfs_spin_unlock(&obj->oo_seatbelt);
EXIT;
}
opg->ops_from = from;
opg->ops_to = to;
- spin_lock(&oap->oap_lock);
+ cfs_spin_lock(&oap->oap_lock);
oap->oap_async_flags |= ASYNC_COUNT_STABLE;
- spin_unlock(&oap->oap_lock);
+ cfs_spin_unlock(&oap->oap_lock);
}
static int osc_page_cancel(const struct lu_env *env,
LASSERT(page->cp_req == NULL);
/* As the transfer for this page is being done, clear the flags */
- spin_lock(&oap->oap_lock);
+ cfs_spin_lock(&oap->oap_lock);
oap->oap_async_flags = 0;
- spin_unlock(&oap->oap_lock);
+ cfs_spin_unlock(&oap->oap_lock);
crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
/* Clear opg->ops_transfer_pinned before VM lock is released. */
opg->ops_transfer_pinned = 0;
- spin_lock(&obj->oo_seatbelt);
+ cfs_spin_lock(&obj->oo_seatbelt);
LASSERT(opg->ops_submitter != NULL);
- LASSERT(!list_empty(&opg->ops_inflight));
- list_del_init(&opg->ops_inflight);
- spin_unlock(&obj->oo_seatbelt);
+ LASSERT(!cfs_list_empty(&opg->ops_inflight));
+ cfs_list_del_init(&opg->ops_inflight);
+ cfs_spin_unlock(&obj->oo_seatbelt);
opg->ops_submit_time = 0;
else if (!(oap->oap_brw_page.flag & OBD_BRW_FROM_GRANT))
osc_enter_cache_try(env, cli, oap->oap_loi, oap, 1);
- spin_lock(&oap->oap_lock);
+ cfs_spin_lock(&oap->oap_lock);
oap->oap_async_flags |= OSC_FLAGS | flags;
- spin_unlock(&oap->oap_lock);
+ cfs_spin_unlock(&oap->oap_lock);
osc_oap_to_pending(oap);
osc_page_transfer_get(opg, "transfer\0imm");
* @objid. Found locks are added into @cancel list. Returns the amount of
* locks added to @cancels list. */
static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
- struct list_head *cancels, ldlm_mode_t mode,
- int lock_flags)
+ cfs_list_t *cancels,
+ ldlm_mode_t mode, int lock_flags)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
struct ldlm_res_id res_id;
{
struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- atomic_dec(&cli->cl_destroy_in_flight);
+ cfs_atomic_dec(&cli->cl_destroy_in_flight);
cfs_waitq_signal(&cli->cl_destroy_waitq);
return 0;
}
static int osc_can_send_destroy(struct client_obd *cli)
{
- if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
+ if (cfs_atomic_inc_return(&cli->cl_destroy_in_flight) <=
cli->cl_max_rpcs_in_flight) {
/* The destroy request can be sent */
return 1;
}
- if (atomic_dec_return(&cli->cl_destroy_in_flight) <
+ if (cfs_atomic_dec_return(&cli->cl_destroy_in_flight) <
cli->cl_max_rpcs_in_flight) {
/*
* The counter has been modified between the two atomic
CERROR("dirty %lu - %lu > dirty_max %lu\n",
cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max);
oa->o_undirty = 0;
- } else if (atomic_read(&obd_dirty_pages) -
- atomic_read(&obd_dirty_transit_pages) > obd_max_dirty_pages + 1){
- /* The atomic_read() allowing the atomic_inc() are not covered
- * by a lock thus they may safely race and trip this CERROR()
- * unless we add in a small fudge factor (+1). */
+ } else if (cfs_atomic_read(&obd_dirty_pages) -
+ cfs_atomic_read(&obd_dirty_transit_pages) >
+ obd_max_dirty_pages + 1){
+ /* The cfs_atomic_read() allowing the cfs_atomic_inc() are
+ * not covered by a lock thus they may safely race and trip
+ * this CERROR() unless we add in a small fudge factor (+1). */
CERROR("dirty %d - %d > system dirty_max %d\n",
- atomic_read(&obd_dirty_pages),
- atomic_read(&obd_dirty_transit_pages),
+ cfs_atomic_read(&obd_dirty_pages),
+ cfs_atomic_read(&obd_dirty_transit_pages),
obd_max_dirty_pages);
oa->o_undirty = 0;
} else if (cli->cl_dirty_max - cli->cl_dirty > 0x7fffffff) {
{
LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock);
LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
- atomic_inc(&obd_dirty_pages);
+ cfs_atomic_inc(&obd_dirty_pages);
cli->cl_dirty += CFS_PAGE_SIZE;
cli->cl_avail_grant -= CFS_PAGE_SIZE;
pga->flag |= OBD_BRW_FROM_GRANT;
}
pga->flag &= ~OBD_BRW_FROM_GRANT;
- atomic_dec(&obd_dirty_pages);
+ cfs_atomic_dec(&obd_dirty_pages);
cli->cl_dirty -= CFS_PAGE_SIZE;
if (pga->flag & OBD_BRW_NOCACHE) {
pga->flag &= ~OBD_BRW_NOCACHE;
- atomic_dec(&obd_dirty_transit_pages);
+ cfs_atomic_dec(&obd_dirty_transit_pages);
cli->cl_dirty_transit -= CFS_PAGE_SIZE;
}
if (!sent) {
/* caller must hold loi_list_lock */
void osc_wake_cache_waiters(struct client_obd *cli)
{
- struct list_head *l, *tmp;
+ cfs_list_t *l, *tmp;
struct osc_cache_waiter *ocw;
ENTRY;
- list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
+ cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
/* if we can't dirty more, we must wait until some is written */
if ((cli->cl_dirty + CFS_PAGE_SIZE > cli->cl_dirty_max) ||
- (atomic_read(&obd_dirty_pages) + 1 > obd_max_dirty_pages)) {
+ (cfs_atomic_read(&obd_dirty_pages) + 1 >
+ obd_max_dirty_pages)) {
CDEBUG(D_CACHE, "no dirty room: dirty: %ld "
"osc max %ld, sys max %d\n", cli->cl_dirty,
cli->cl_dirty_max, obd_max_dirty_pages);
return;
}
- ocw = list_entry(l, struct osc_cache_waiter, ocw_entry);
- list_del_init(&ocw->ocw_entry);
+ ocw = cfs_list_entry(l, struct osc_cache_waiter, ocw_entry);
+ cfs_list_del_init(&ocw->ocw_entry);
if (cli->cl_avail_grant < CFS_PAGE_SIZE) {
/* no more RPCs in flight to return grant, do sync IO */
ocw->ocw_rc = -EDQUOT;
{
struct client_obd *client;
- list_for_each_entry(client, &item->ti_obd_list, cl_grant_shrink_list) {
+ cfs_list_for_each_entry(client, &item->ti_obd_list,
+ cl_grant_shrink_list) {
if (osc_should_shrink_grant(client))
osc_shrink_grant(client);
}
LASSERT(cli->cl_avail_grant >= 0);
if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
- list_empty(&cli->cl_grant_shrink_list))
+ cfs_list_empty(&cli->cl_grant_shrink_list))
osc_add_shrink_grant(cli);
}
client_obd_list_lock(&aa->aa_cli->cl_loi_list_lock);
- list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
+ cfs_list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
if (oap->oap_request != NULL) {
LASSERTF(request == oap->oap_request,
"request %p != oap_request %p\n",
new_aa = ptlrpc_req_async_args(new_req);
CFS_INIT_LIST_HEAD(&new_aa->aa_oaps);
- list_splice(&aa->aa_oaps, &new_aa->aa_oaps);
+ cfs_list_splice(&aa->aa_oaps, &new_aa->aa_oaps);
CFS_INIT_LIST_HEAD(&aa->aa_oaps);
- list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
+ cfs_list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
if (oap->oap_request) {
ptlrpc_req_finished(oap->oap_request);
oap->oap_request = ptlrpc_request_addref(new_req);
* queued. this is our cheap solution for good batching in the case
* where writepage marks some random page in the middle of the file
* as urgent because of, say, memory pressure */
- if (!list_empty(&lop->lop_urgent)) {
+ if (!cfs_list_empty(&lop->lop_urgent)) {
CDEBUG(D_CACHE, "urgent request forcing RPC\n");
RETURN(1);
}
/* trigger a write rpc stream as long as there are dirtiers
* waiting for space. as they're waiting, they're not going to
* create more pages to coallesce with what's waiting.. */
- if (!list_empty(&cli->cl_cache_waiters)) {
+ if (!cfs_list_empty(&cli->cl_cache_waiters)) {
CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
RETURN(1);
}
struct osc_async_page *oap;
ENTRY;
- if (list_empty(&lop->lop_urgent))
+ if (cfs_list_empty(&lop->lop_urgent))
RETURN(0);
- oap = list_entry(lop->lop_urgent.next,
+ oap = cfs_list_entry(lop->lop_urgent.next,
struct osc_async_page, oap_urgent_item);
if (oap->oap_async_flags & ASYNC_HP) {
RETURN(0);
}
-static void on_list(struct list_head *item, struct list_head *list,
+static void on_list(cfs_list_t *item, cfs_list_t *list,
int should_be_on)
{
- if (list_empty(item) && should_be_on)
- list_add_tail(item, list);
- else if (!list_empty(item) && !should_be_on)
- list_del_init(item);
+ if (cfs_list_empty(item) && should_be_on)
+ cfs_list_add_tail(item, list);
+ else if (!cfs_list_empty(item) && !should_be_on)
+ cfs_list_del_init(item);
}
/* maintain the loi's cli list membership invariants so that osc_send_oap_rpc
* page completion may be called only if ->cpo_prep() method was
* executed by osc_io_submit(), that also adds page the to pending list
*/
- if (!list_empty(&oap->oap_pending_item)) {
- list_del_init(&oap->oap_pending_item);
- list_del_init(&oap->oap_urgent_item);
+ if (!cfs_list_empty(&oap->oap_pending_item)) {
+ cfs_list_del_init(&oap->oap_pending_item);
+ cfs_list_del_init(&oap->oap_urgent_item);
loi = oap->oap_loi;
lop = (oap->oap_cmd & OBD_BRW_WRITE) ?
lop = &oap->oap_loi->loi_read_lop;
if (oap->oap_async_flags & ASYNC_HP)
- list_add(&oap->oap_urgent_item, &lop->lop_urgent);
+ cfs_list_add(&oap->oap_urgent_item, &lop->lop_urgent);
else if (oap->oap_async_flags & ASYNC_URGENT)
- list_add_tail(&oap->oap_urgent_item, &lop->lop_urgent);
- list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
+ cfs_list_add_tail(&oap->oap_urgent_item, &lop->lop_urgent);
+ cfs_list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, 1);
}
oap->oap_request = NULL;
}
- spin_lock(&oap->oap_lock);
+ cfs_spin_lock(&oap->oap_lock);
oap->oap_async_flags = 0;
- spin_unlock(&oap->oap_lock);
+ cfs_spin_unlock(&oap->oap_lock);
oap->oap_interrupted = 0;
if (oap->oap_cmd & OBD_BRW_WRITE) {
else
cli->cl_r_in_flight--;
- async = list_empty(&aa->aa_oaps);
+ async = cfs_list_empty(&aa->aa_oaps);
if (!async) { /* from osc_send_oap_rpc() */
struct osc_async_page *oap, *tmp;
/* the caller may re-use the oap after the completion call so
* we need to clean it up a little */
- list_for_each_entry_safe(oap, tmp, &aa->aa_oaps, oap_rpc_item) {
- list_del_init(&oap->oap_rpc_item);
+ cfs_list_for_each_entry_safe(oap, tmp, &aa->aa_oaps,
+ oap_rpc_item) {
+ cfs_list_del_init(&oap->oap_rpc_item);
osc_ap_completion(env, cli, aa->aa_oa, oap, 1, rc);
}
OBDO_FREE(aa->aa_oa);
static struct ptlrpc_request *osc_build_req(const struct lu_env *env,
struct client_obd *cli,
- struct list_head *rpc_list,
+ cfs_list_t *rpc_list,
int page_count, int cmd)
{
struct ptlrpc_request *req;
int i, rc;
ENTRY;
- LASSERT(!list_empty(rpc_list));
+ LASSERT(!cfs_list_empty(rpc_list));
memset(&crattr, 0, sizeof crattr);
OBD_ALLOC(pga, sizeof(*pga) * page_count);
GOTO(out, req = ERR_PTR(-ENOMEM));
i = 0;
- list_for_each_entry(oap, rpc_list, oap_rpc_item) {
+ cfs_list_for_each_entry(oap, rpc_list, oap_rpc_item) {
struct cl_page *page = osc_oap2cl_page(oap);
if (ops == NULL) {
ops = oap->oap_caller_ops;
CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
CFS_INIT_LIST_HEAD(&aa->aa_oaps);
- list_splice(rpc_list, &aa->aa_oaps);
+ cfs_list_splice(rpc_list, &aa->aa_oaps);
CFS_INIT_LIST_HEAD(rpc_list);
aa->aa_clerq = clerq;
out:
/* this should happen rarely and is pretty bad, it makes the
* pending list not follow the dirty order */
client_obd_list_lock(&cli->cl_loi_list_lock);
- list_for_each_entry_safe(oap, tmp, rpc_list, oap_rpc_item) {
- list_del_init(&oap->oap_rpc_item);
+ cfs_list_for_each_entry_safe(oap, tmp, rpc_list, oap_rpc_item) {
+ cfs_list_del_init(&oap->oap_rpc_item);
/* queued sync pages can be torn down while the pages
* were between the pending list and the rpc */
/* ASYNC_HP pages first. At present, when the lock the pages is
* to be canceled, the pages covered by the lock will be sent out
* with ASYNC_HP. We have to send out them as soon as possible. */
- list_for_each_entry_safe(oap, tmp, &lop->lop_urgent, oap_urgent_item) {
+ cfs_list_for_each_entry_safe(oap, tmp, &lop->lop_urgent, oap_urgent_item) {
if (oap->oap_async_flags & ASYNC_HP)
- list_move(&oap->oap_pending_item, &tmp_list);
+ cfs_list_move(&oap->oap_pending_item, &tmp_list);
else
- list_move_tail(&oap->oap_pending_item, &tmp_list);
+ cfs_list_move_tail(&oap->oap_pending_item, &tmp_list);
if (++page_count >= cli->cl_max_pages_per_rpc)
break;
}
- list_splice(&tmp_list, &lop->lop_pending);
+ cfs_list_splice(&tmp_list, &lop->lop_pending);
page_count = 0;
/* first we find the pages we're allowed to work with */
- list_for_each_entry_safe(oap, tmp, &lop->lop_pending,
- oap_pending_item) {
+ cfs_list_for_each_entry_safe(oap, tmp, &lop->lop_pending,
+ oap_pending_item) {
ops = oap->oap_caller_ops;
LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, "
case -EINTR:
/* the io isn't needed.. tell the checks
* below to complete the rpc with EINTR */
- spin_lock(&oap->oap_lock);
+ cfs_spin_lock(&oap->oap_lock);
oap->oap_async_flags |= ASYNC_COUNT_STABLE;
- spin_unlock(&oap->oap_lock);
+ cfs_spin_unlock(&oap->oap_lock);
oap->oap_count = -EINTR;
break;
case 0:
- spin_lock(&oap->oap_lock);
+ cfs_spin_lock(&oap->oap_lock);
oap->oap_async_flags |= ASYNC_READY;
- spin_unlock(&oap->oap_lock);
+ cfs_spin_unlock(&oap->oap_lock);
break;
default:
LASSERTF(0, "oap %p page %p returned %d "
#endif
/* take the page out of our book-keeping */
- list_del_init(&oap->oap_pending_item);
+ cfs_list_del_init(&oap->oap_pending_item);
lop_update_pending(cli, lop, cmd, -1);
- list_del_init(&oap->oap_urgent_item);
+ cfs_list_del_init(&oap->oap_urgent_item);
if (page_count == 0)
starting_offset = (oap->oap_obj_off+oap->oap_page_off) &
}
/* now put the page back in our accounting */
- list_add_tail(&oap->oap_rpc_item, &rpc_list);
+ cfs_list_add_tail(&oap->oap_rpc_item, &rpc_list);
if (page_count == 0)
srvlock = !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK);
if (++page_count >= cli->cl_max_pages_per_rpc)
req = osc_build_req(env, cli, &rpc_list, page_count, cmd);
if (IS_ERR(req)) {
- LASSERT(list_empty(&rpc_list));
+ LASSERT(cfs_list_empty(&rpc_list));
loi_list_maint(cli, loi);
RETURN(PTR_ERR(req));
}
/* queued sync pages can be torn down while the pages
* were between the pending list and the rpc */
tmp = NULL;
- list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
+ cfs_list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
/* only one oap gets a request reference */
if (tmp == NULL)
tmp = oap;
#define LOI_DEBUG(LOI, STR, args...) \
CDEBUG(D_INODE, "loi ready %d wr %d:%d rd %d:%d " STR, \
- !list_empty(&(LOI)->loi_ready_item) || \
- !list_empty(&(LOI)->loi_hp_ready_item), \
+ !cfs_list_empty(&(LOI)->loi_ready_item) || \
+ !cfs_list_empty(&(LOI)->loi_hp_ready_item), \
(LOI)->loi_write_lop.lop_num_pending, \
- !list_empty(&(LOI)->loi_write_lop.lop_urgent), \
+ !cfs_list_empty(&(LOI)->loi_write_lop.lop_urgent), \
(LOI)->loi_read_lop.lop_num_pending, \
- !list_empty(&(LOI)->loi_read_lop.lop_urgent), \
+ !cfs_list_empty(&(LOI)->loi_read_lop.lop_urgent), \
args) \
/* This is called by osc_check_rpcs() to find which objects have pages that
/* First return objects that have blocked locks so that they
* will be flushed quickly and other clients can get the lock,
* then objects which have pages ready to be stuffed into RPCs */
- if (!list_empty(&cli->cl_loi_hp_ready_list))
- RETURN(list_entry(cli->cl_loi_hp_ready_list.next,
- struct lov_oinfo, loi_hp_ready_item));
- if (!list_empty(&cli->cl_loi_ready_list))
- RETURN(list_entry(cli->cl_loi_ready_list.next,
- struct lov_oinfo, loi_ready_item));
+ if (!cfs_list_empty(&cli->cl_loi_hp_ready_list))
+ RETURN(cfs_list_entry(cli->cl_loi_hp_ready_list.next,
+ struct lov_oinfo, loi_hp_ready_item));
+ if (!cfs_list_empty(&cli->cl_loi_ready_list))
+ RETURN(cfs_list_entry(cli->cl_loi_ready_list.next,
+ struct lov_oinfo, loi_ready_item));
/* then if we have cache waiters, return all objects with queued
* writes. This is especially important when many small files
* have filled up the cache and not been fired into rpcs because
* they don't pass the nr_pending/object threshhold */
- if (!list_empty(&cli->cl_cache_waiters) &&
- !list_empty(&cli->cl_loi_write_list))
- RETURN(list_entry(cli->cl_loi_write_list.next,
- struct lov_oinfo, loi_write_item));
+ if (!cfs_list_empty(&cli->cl_cache_waiters) &&
+ !cfs_list_empty(&cli->cl_loi_write_list))
+ RETURN(cfs_list_entry(cli->cl_loi_write_list.next,
+ struct lov_oinfo, loi_write_item));
/* then return all queued objects when we have an invalid import
* so that they get flushed */
if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
- if (!list_empty(&cli->cl_loi_write_list))
- RETURN(list_entry(cli->cl_loi_write_list.next,
- struct lov_oinfo, loi_write_item));
- if (!list_empty(&cli->cl_loi_read_list))
- RETURN(list_entry(cli->cl_loi_read_list.next,
- struct lov_oinfo, loi_read_item));
+ if (!cfs_list_empty(&cli->cl_loi_write_list))
+ RETURN(cfs_list_entry(cli->cl_loi_write_list.next,
+ struct lov_oinfo,
+ loi_write_item));
+ if (!cfs_list_empty(&cli->cl_loi_read_list))
+ RETURN(cfs_list_entry(cli->cl_loi_read_list.next,
+ struct lov_oinfo, loi_read_item));
}
RETURN(NULL);
}
struct osc_async_page *oap;
int hprpc = 0;
- if (!list_empty(&loi->loi_write_lop.lop_urgent)) {
- oap = list_entry(loi->loi_write_lop.lop_urgent.next,
- struct osc_async_page, oap_urgent_item);
+ if (!cfs_list_empty(&loi->loi_write_lop.lop_urgent)) {
+ oap = cfs_list_entry(loi->loi_write_lop.lop_urgent.next,
+ struct osc_async_page, oap_urgent_item);
hprpc = !!(oap->oap_async_flags & ASYNC_HP);
}
- if (!hprpc && !list_empty(&loi->loi_read_lop.lop_urgent)) {
- oap = list_entry(loi->loi_read_lop.lop_urgent.next,
- struct osc_async_page, oap_urgent_item);
+ if (!hprpc && !cfs_list_empty(&loi->loi_read_lop.lop_urgent)) {
+ oap = cfs_list_entry(loi->loi_read_lop.lop_urgent.next,
+ struct osc_async_page, oap_urgent_item);
hprpc = !!(oap->oap_async_flags & ASYNC_HP);
}
/* attempt some inter-object balancing by issueing rpcs
* for each object in turn */
- if (!list_empty(&loi->loi_hp_ready_item))
- list_del_init(&loi->loi_hp_ready_item);
- if (!list_empty(&loi->loi_ready_item))
- list_del_init(&loi->loi_ready_item);
- if (!list_empty(&loi->loi_write_item))
- list_del_init(&loi->loi_write_item);
- if (!list_empty(&loi->loi_read_item))
- list_del_init(&loi->loi_read_item);
+ if (!cfs_list_empty(&loi->loi_hp_ready_item))
+ cfs_list_del_init(&loi->loi_hp_ready_item);
+ if (!cfs_list_empty(&loi->loi_ready_item))
+ cfs_list_del_init(&loi->loi_ready_item);
+ if (!cfs_list_empty(&loi->loi_write_item))
+ cfs_list_del_init(&loi->loi_write_item);
+ if (!cfs_list_empty(&loi->loi_read_item))
+ cfs_list_del_init(&loi->loi_read_item);
loi_list_maint(cli, loi);
int rc;
ENTRY;
client_obd_list_lock(&cli->cl_loi_list_lock);
- rc = list_empty(&ocw->ocw_entry) || rpcs_in_flight(cli) == 0;
+ rc = cfs_list_empty(&ocw->ocw_entry) || rpcs_in_flight(cli) == 0;
client_obd_list_unlock(&cli->cl_loi_list_lock);
RETURN(rc);
};
osc_consume_write_grant(cli, &oap->oap_brw_page);
if (transient) {
cli->cl_dirty_transit += CFS_PAGE_SIZE;
- atomic_inc(&obd_dirty_transit_pages);
+ cfs_atomic_inc(&obd_dirty_transit_pages);
oap->oap_brw_flags |= OBD_BRW_NOCACHE;
}
}
ENTRY;
CDEBUG(D_CACHE, "dirty: %ld/%d dirty_max: %ld/%d dropped: %lu "
- "grant: %lu\n", cli->cl_dirty, atomic_read(&obd_dirty_pages),
+ "grant: %lu\n", cli->cl_dirty, cfs_atomic_read(&obd_dirty_pages),
cli->cl_dirty_max, obd_max_dirty_pages,
cli->cl_lost_grant, cli->cl_avail_grant);
/* Hopefully normal case - cache space and write credits available */
if (cli->cl_dirty + CFS_PAGE_SIZE <= cli->cl_dirty_max &&
- atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages &&
+ cfs_atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages &&
osc_enter_cache_try(env, cli, loi, oap, 0))
RETURN(0);
* is a little silly as this object may not have any pending but
* other objects sure might. */
if (cli->cl_w_in_flight) {
- list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
+ cfs_list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
cfs_waitq_init(&ocw.ocw_waitq);
ocw.ocw_oap = oap;
ocw.ocw_rc = 0;
l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
client_obd_list_lock(&cli->cl_loi_list_lock);
- if (!list_empty(&ocw.ocw_entry)) {
- list_del(&ocw.ocw_entry);
+ if (!cfs_list_empty(&ocw.ocw_entry)) {
+ cfs_list_del(&ocw.ocw_entry);
RETURN(-EINTR);
}
RETURN(ocw.ocw_rc);
ENTRY;
if (!page)
- return size_round(sizeof(*oap));
+ return cfs_size_round(sizeof(*oap));
oap = *res;
oap->oap_magic = OAP_MAGIC;
CFS_INIT_LIST_HEAD(&oap->oap_rpc_item);
CFS_INIT_LIST_HEAD(&oap->oap_page_list);
- spin_lock_init(&oap->oap_lock);
+ cfs_spin_lock_init(&oap->oap_lock);
CDEBUG(D_CACHE, "oap %p page %p obj off "LPU64"\n", oap, page, offset);
RETURN(0);
}
if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
RETURN(-EIO);
- if (!list_empty(&oap->oap_pending_item) ||
- !list_empty(&oap->oap_urgent_item) ||
- !list_empty(&oap->oap_rpc_item))
+ if (!cfs_list_empty(&oap->oap_pending_item) ||
+ !cfs_list_empty(&oap->oap_urgent_item) ||
+ !cfs_list_empty(&oap->oap_rpc_item))
RETURN(-EBUSY);
/* check if the file's owner/group is over quota */
/* Give a hint to OST that requests are coming from kswapd - bug19529 */
if (libcfs_memory_pressure_get())
oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
- spin_lock(&oap->oap_lock);
+ cfs_spin_lock(&oap->oap_lock);
oap->oap_async_flags = async_flags;
- spin_unlock(&oap->oap_lock);
+ cfs_spin_unlock(&oap->oap_lock);
if (cmd & OBD_BRW_WRITE) {
rc = osc_enter_cache(env, cli, loi, oap);
int flags = 0;
ENTRY;
- LASSERT(!list_empty(&oap->oap_pending_item));
+ LASSERT(!cfs_list_empty(&oap->oap_pending_item));
if (oap->oap_cmd & OBD_BRW_WRITE) {
lop = &loi->loi_write_lop;
flags |= ASYNC_READY;
if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT) &&
- list_empty(&oap->oap_rpc_item)) {
+ cfs_list_empty(&oap->oap_rpc_item)) {
if (oap->oap_async_flags & ASYNC_HP)
- list_add(&oap->oap_urgent_item, &lop->lop_urgent);
+ cfs_list_add(&oap->oap_urgent_item, &lop->lop_urgent);
else
- list_add_tail(&oap->oap_urgent_item, &lop->lop_urgent);
+ cfs_list_add_tail(&oap->oap_urgent_item,
+ &lop->lop_urgent);
flags |= ASYNC_URGENT;
loi_list_maint(cli, loi);
}
- spin_lock(&oap->oap_lock);
+ cfs_spin_lock(&oap->oap_lock);
oap->oap_async_flags |= flags;
- spin_unlock(&oap->oap_lock);
+ cfs_spin_unlock(&oap->oap_lock);
LOI_DEBUG(loi, "oap %p page %p has flags %x\n", oap, oap->oap_page,
oap->oap_async_flags);
client_obd_list_lock(&cli->cl_loi_list_lock);
- if (!list_empty(&oap->oap_rpc_item))
+ if (!cfs_list_empty(&oap->oap_rpc_item))
GOTO(out, rc = -EBUSY);
osc_exit_cache(cli, oap, 0);
osc_wake_cache_waiters(cli);
- if (!list_empty(&oap->oap_urgent_item)) {
- list_del_init(&oap->oap_urgent_item);
- spin_lock(&oap->oap_lock);
+ if (!cfs_list_empty(&oap->oap_urgent_item)) {
+ cfs_list_del_init(&oap->oap_urgent_item);
+ cfs_spin_lock(&oap->oap_lock);
oap->oap_async_flags &= ~(ASYNC_URGENT | ASYNC_HP);
- spin_unlock(&oap->oap_lock);
+ cfs_spin_unlock(&oap->oap_lock);
}
- if (!list_empty(&oap->oap_pending_item)) {
- list_del_init(&oap->oap_pending_item);
+ if (!cfs_list_empty(&oap->oap_pending_item)) {
+ cfs_list_del_init(&oap->oap_pending_item);
lop_update_pending(cli, lop, oap->oap_cmd, -1);
}
loi_list_maint(cli, loi);
LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
lock_res_and_lock(lock);
- spin_lock(&osc_ast_guard);
+ cfs_spin_lock(&osc_ast_guard);
LASSERT(lock->l_ast_data == NULL || lock->l_ast_data == data);
lock->l_ast_data = data;
- spin_unlock(&osc_ast_guard);
+ cfs_spin_unlock(&osc_ast_guard);
unlock_res_and_lock(lock);
}
/* Reinitialize the RDONLY and DEGRADED flags at the client
* on each statfs, so they don't stay set permanently. */
- spin_lock(&cli->cl_oscc.oscc_lock);
+ cfs_spin_lock(&cli->cl_oscc.oscc_lock);
if (unlikely(msfs->os_state & OS_STATE_DEGRADED))
cli->cl_oscc.oscc_flags |= OSCC_FLAG_DEGRADED;
(msfs->os_ffree > 64) && (msfs->os_bavail > (used << 1))))
cli->cl_oscc.oscc_flags &= ~OSCC_FLAG_NOSPC;
- spin_unlock(&cli->cl_oscc.oscc_lock);
+ cfs_spin_unlock(&cli->cl_oscc.oscc_lock);
*aa->aa_oi->oi_osfs = *msfs;
out:
/*Since the request might also come from lprocfs, so we need
*sync this with client_disconnect_export Bug15684*/
- down_read(&obd->u.cli.cl_sem);
+ cfs_down_read(&obd->u.cli.cl_sem);
if (obd->u.cli.cl_import)
imp = class_import_get(obd->u.cli.cl_import);
- up_read(&obd->u.cli.cl_sem);
+ cfs_up_read(&obd->u.cli.cl_sem);
if (!imp)
RETURN(-ENODEV);
/* we only need the header part from user space to get lmm_magic and
* lmm_stripe_count, (the header part is common to v1 and v3) */
lum_size = sizeof(struct lov_user_md_v1);
- if (copy_from_user(&lum, lump, lum_size))
+ if (cfs_copy_from_user(&lum, lump, lum_size))
RETURN(-EFAULT);
if ((lum.lmm_magic != LOV_USER_MAGIC_V1) &&
lumk->lmm_object_gr = lsm->lsm_object_gr;
lumk->lmm_stripe_count = 1;
- if (copy_to_user(lump, lumk, lum_size))
+ if (cfs_copy_to_user(lump, lumk, lum_size))
rc = -EFAULT;
if (lumk != &lum)
int err = 0;
ENTRY;
- if (!try_module_get(THIS_MODULE)) {
+ if (!cfs_try_module_get(THIS_MODULE)) {
CERROR("Can't get module. Is it alive?");
return -EINVAL;
}
memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
- err = copy_to_user((void *)uarg, buf, len);
+ err = cfs_copy_to_user((void *)uarg, buf, len);
if (err)
err = -EFAULT;
obd_ioctl_freedata(buf, len);
GOTO(out, err = -ENOTTY);
}
out:
- module_put(THIS_MODULE);
+ cfs_module_put(THIS_MODULE);
return err;
}
/* XXX return an error? skip setting below flags? */
}
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_server_timeout = 1;
imp->imp_pingable = 1;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
CDEBUG(D_RPCTRACE, "pinging OST %s\n", obd2cli_tgt(imp->imp_obd));
RETURN(rc);
/* avoid race between allocate new object and set next id
* from ll_sync thread */
- spin_lock(&oscc->oscc_lock);
+ cfs_spin_lock(&oscc->oscc_lock);
new_val = *((obd_id*)val) + 1;
if (new_val > oscc->oscc_next_id)
oscc->oscc_next_id = new_val;
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
CDEBUG(D_HA, "%s: set oscc_next_id = "LPU64"\n",
exp->exp_obd->obd_name,
obd->u.cli.cl_oscc.oscc_next_id);
if (KEY_IS(KEY_INIT_RECOV)) {
if (vallen != sizeof(int))
RETURN(-EINVAL);
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_initial_recov = *(int *)val;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
CDEBUG(D_HA, "%s: set imp_initial_recov = %d\n",
exp->exp_obd->obd_name,
imp->imp_initial_recov);
LASSERT(olg == &obd->obd_olg);
- mutex_down(&olg->olg_cat_processing);
+ cfs_mutex_down(&olg->olg_cat_processing);
rc = llog_get_cat_list(disk_obd, name, *index, 1, &catid);
if (rc) {
CERROR("rc: %d\n", rc);
}
out:
- mutex_up(&olg->olg_cat_processing);
+ cfs_mutex_up(&olg->olg_cat_processing);
return rc;
}
if (imp->imp_server_timeout) {
struct osc_creator *oscc = &obd->u.cli.cl_oscc;
- spin_lock(&oscc->oscc_lock);
+ cfs_spin_lock(&oscc->oscc_lock);
oscc->oscc_flags |= OSCC_FLAG_RECOVERING;
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
}
cli = &obd->u.cli;
client_obd_list_lock(&cli->cl_loi_list_lock);
if (imp->imp_server_timeout) {
struct osc_creator *oscc = &obd->u.cli.cl_oscc;
- spin_lock(&oscc->oscc_lock);
+ cfs_spin_lock(&oscc->oscc_lock);
oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
- spin_unlock(&oscc->oscc_lock);
+ cfs_spin_unlock(&oscc->oscc_lock);
}
rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
break;
ptlrpc_add_rqs_to_pool);
CFS_INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
- sema_init(&cli->cl_grant_sem, 1);
+ cfs_sema_init(&cli->cl_grant_sem, 1);
}
RETURN(rc);
CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
/* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
ptlrpc_deactivate_import(imp);
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_pingable = 0;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
break;
}
case OBD_CLEANUP_EXPORTS: {
client import will not have been cleaned. */
if (obd->u.cli.cl_import) {
struct obd_import *imp;
- down_write(&obd->u.cli.cl_sem);
+ cfs_down_write(&obd->u.cli.cl_sem);
imp = obd->u.cli.cl_import;
CDEBUG(D_CONFIG, "%s: client import never connected\n",
obd->obd_name);
imp->imp_rq_pool = NULL;
}
class_destroy_import(imp);
- up_write(&obd->u.cli.cl_sem);
+ cfs_up_write(&obd->u.cli.cl_sem);
obd->u.cli.cl_import = NULL;
}
rc = obd_llog_finish(obd, 0);
.o_process_config = osc_process_config,
};
-extern struct lu_kmem_descr osc_caches[];
-extern spinlock_t osc_ast_guard;
-extern struct lock_class_key osc_ast_guard_class;
+extern struct lu_kmem_descr osc_caches[];
+extern cfs_spinlock_t osc_ast_guard;
+extern cfs_lock_class_key_t osc_ast_guard_class;
int __init osc_init(void)
{
lprocfs_osc_init_vars(&lvars);
- request_module("lquota");
+ cfs_request_module("lquota");
quota_interface = PORTAL_SYMBOL_GET(osc_quota_interface);
lquota_init(quota_interface);
init_obd_quota_ops(quota_interface, &osc_obd_ops);
RETURN(rc);
}
- spin_lock_init(&osc_ast_guard);
- lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
+ cfs_spin_lock_init(&osc_ast_guard);
+ cfs_lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
osc_mds_ost_orig_logops = llog_lvfs_ops;
osc_mds_ost_orig_logops.lop_setup = llog_obd_origin_setup;
/**
* to protect index ops.
*/
- struct rw_semaphore oo_ext_idx_sem;
- struct rw_semaphore oo_sem;
+ cfs_rw_semaphore_t oo_ext_idx_sem;
+ cfs_rw_semaphore_t oo_sem;
struct osd_directory *oo_dir;
/** protects inode attributes. */
- spinlock_t oo_guard;
+ cfs_spinlock_t oo_guard;
/**
* Following two members are used to indicate the presence of dot and
* dotdot in the given directory. This is required for interop mode
* (b11826).
*/
- int oo_compat_dot_created;
- int oo_compat_dotdot_created;
+ int oo_compat_dot_created;
+ int oo_compat_dotdot_created;
const struct lu_env *oo_owner;
#ifdef CONFIG_LOCKDEP
mo->oo_dt.do_ops = &osd_obj_ops;
l->lo_ops = &osd_lu_obj_ops;
- init_rwsem(&mo->oo_sem);
- init_rwsem(&mo->oo_ext_idx_sem);
- spin_lock_init(&mo->oo_guard);
+ cfs_init_rwsem(&mo->oo_sem);
+ cfs_init_rwsem(&mo->oo_ext_idx_sem);
+ cfs_spin_lock_init(&mo->oo_guard);
return l;
} else
return NULL;
LASSERT(!lu_object_is_dying(l->lo_header));
if (o->oo_inode != NULL && osd_inode_unlinked(o->oo_inode))
- set_bit(LU_OBJECT_HEARD_BANSHEE, &l->lo_header->loh_flags);
+ cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &l->lo_header->loh_flags);
}
/*
* Concurrency: shouldn't matter.
*/
int osd_statfs(const struct lu_env *env, struct dt_device *d,
- struct kstatfs *sfs)
+ cfs_kstatfs_t *sfs)
{
struct osd_device *osd = osd_dt_dev(d);
struct super_block *sb = osd_sb(osd);
int result = 0;
- spin_lock(&osd->od_osfs_lock);
+ cfs_spin_lock(&osd->od_osfs_lock);
/* cache 1 second */
if (cfs_time_before_64(osd->od_osfs_age, cfs_time_shift_64(-1))) {
result = ll_do_statfs(sb, &osd->od_kstatfs);
if (likely(result == 0))
*sfs = osd->od_kstatfs;
- spin_unlock(&osd->od_osfs_lock);
+ cfs_spin_unlock(&osd->od_osfs_lock);
return result;
}
LINVRNT(osd_invariant(obj));
LASSERT(obj->oo_owner != env);
- down_read_nested(&obj->oo_sem, role);
+ cfs_down_read_nested(&obj->oo_sem, role);
LASSERT(obj->oo_owner == NULL);
oti->oti_r_locks++;
LINVRNT(osd_invariant(obj));
LASSERT(obj->oo_owner != env);
- down_write_nested(&obj->oo_sem, role);
+ cfs_down_write_nested(&obj->oo_sem, role);
LASSERT(obj->oo_owner == NULL);
obj->oo_owner = env;
LASSERT(oti->oti_r_locks > 0);
oti->oti_r_locks--;
- up_read(&obj->oo_sem);
+ cfs_up_read(&obj->oo_sem);
}
static void osd_object_write_unlock(const struct lu_env *env,
LASSERT(oti->oti_w_locks > 0);
oti->oti_w_locks--;
obj->oo_owner = NULL;
- up_write(&obj->oo_sem);
+ cfs_up_write(&obj->oo_sem);
}
static int osd_object_write_locked(const struct lu_env *env,
RETURN(-ESTALE);
}
- spin_lock(&capa_lock);
+ cfs_spin_lock(&capa_lock);
for (i = 0; i < 2; i++) {
if (keys[i].lk_keyid == capa->lc_keyid) {
oti->oti_capa_key = keys[i];
break;
}
}
- spin_unlock(&capa_lock);
+ cfs_spin_unlock(&capa_lock);
if (i == 2) {
DEBUG_CAPA(D_ERROR, capa, "no matched capa key");
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
return -EACCES;
- spin_lock(&obj->oo_guard);
+ cfs_spin_lock(&obj->oo_guard);
osd_inode_getattr(env, obj->oo_inode, attr);
- spin_unlock(&obj->oo_guard);
+ cfs_spin_unlock(&obj->oo_guard);
return 0;
}
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
return -EACCES;
- spin_lock(&obj->oo_guard);
+ cfs_spin_lock(&obj->oo_guard);
rc = osd_inode_setattr(env, obj->oo_inode, attr);
- spin_unlock(&obj->oo_guard);
+ cfs_spin_unlock(&obj->oo_guard);
if (!rc)
mark_inode_dirty(obj->oo_inode);
static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
- umode_t mode,
+ cfs_umode_t mode,
struct dt_allocation_hint *hint,
struct thandle *th)
{
struct dt_object_format *dof,
struct thandle *th)
{
- umode_t mode = attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX);
+ cfs_umode_t mode = attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX);
int result;
LINVRNT(osd_invariant(obj));
static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
- struct dt_object *parent, umode_t child_mode)
+ struct dt_object *parent, cfs_umode_t child_mode)
{
LASSERT(ah);
rc = inode->i_op->setxattr(dentry, name, buf->lb_buf,
buf->lb_len, fs_flags);
/* ctime should not be updated with server-side time. */
- spin_lock(&obj->oo_guard);
+ cfs_spin_lock(&obj->oo_guard);
inode->i_ctime = *t;
- spin_unlock(&obj->oo_guard);
+ cfs_spin_unlock(&obj->oo_guard);
mark_inode_dirty(inode);
return rc;
}
LASSERT(osd_write_locked(env, obj));
LASSERT(th != NULL);
- spin_lock(&obj->oo_guard);
+ cfs_spin_lock(&obj->oo_guard);
LASSERT(inode->i_nlink < LDISKFS_LINK_MAX);
inode->i_nlink++;
- spin_unlock(&obj->oo_guard);
+ cfs_spin_unlock(&obj->oo_guard);
mark_inode_dirty(inode);
LINVRNT(osd_invariant(obj));
}
LASSERT(osd_write_locked(env, obj));
LASSERT(th != NULL);
- spin_lock(&obj->oo_guard);
+ cfs_spin_lock(&obj->oo_guard);
LASSERT(inode->i_nlink > 0);
inode->i_nlink--;
- spin_unlock(&obj->oo_guard);
+ cfs_spin_unlock(&obj->oo_guard);
mark_inode_dirty(inode);
LINVRNT(osd_invariant(obj));
}
*t = inode->i_ctime;
rc = inode->i_op->removexattr(dentry, name);
/* ctime should not be updated with server-side time. */
- spin_lock(&obj->oo_guard);
+ cfs_spin_lock(&obj->oo_guard);
inode->i_ctime = *t;
- spin_unlock(&obj->oo_guard);
+ cfs_spin_unlock(&obj->oo_guard);
mark_inode_dirty(inode);
return rc;
}
__u32 d[4], s[4];
s[0] = obj->oo_inode->i_uid;
- get_random_bytes(&(s[1]), sizeof(__u32));
+ ll_get_random_bytes(&(s[1]), sizeof(__u32));
s[2] = obj->oo_inode->i_gid;
- get_random_bytes(&(s[3]), sizeof(__u32));
+ ll_get_random_bytes(&(s[3]), sizeof(__u32));
rc = capa_encrypt_id(d, s, key->lk_key, CAPA_HMAC_KEY_MAX_LEN);
if (unlikely(rc))
RETURN(ERR_PTR(rc));
RETURN(oc);
}
- spin_lock(&capa_lock);
+ cfs_spin_lock(&capa_lock);
*key = dev->od_capa_keys[1];
- spin_unlock(&capa_lock);
+ cfs_spin_unlock(&capa_lock);
capa->lc_keyid = key->lk_keyid;
capa->lc_expiry = cfs_time_current_sec() + dev->od_capa_timeout;
OBD_ALLOC_PTR(dir);
if (dir != NULL) {
- spin_lock(&obj->oo_guard);
+ cfs_spin_lock(&obj->oo_guard);
if (obj->oo_dir == NULL)
obj->oo_dir = dir;
else
* Concurrent thread allocated container data.
*/
OBD_FREE_PTR(dir);
- spin_unlock(&obj->oo_guard);
+ cfs_spin_unlock(&obj->oo_guard);
/*
* Now, that we have container data, serialize its
* initialization.
*/
- down_write(&obj->oo_ext_idx_sem);
+ cfs_down_write(&obj->oo_ext_idx_sem);
/*
* recheck under lock.
*/
result = osd_iam_container_init(env, obj, dir);
else
result = 0;
- up_write(&obj->oo_ext_idx_sem);
+ cfs_up_write(&obj->oo_ext_idx_sem);
} else
result = -ENOMEM;
} else
dentry = osd_child_dentry_get(env, obj,
(char *)key, strlen((char *)key));
- down_write(&obj->oo_ext_idx_sem);
+ cfs_down_write(&obj->oo_ext_idx_sem);
bh = ll_ldiskfs_find_entry(dir, dentry, &de);
if (bh) {
struct osd_thread_info *oti = osd_oti_get(env);
rc = ldiskfs_delete_entry(oh->ot_handle,
dir, de, bh);
/* xtime should not be updated with server-side time. */
- spin_lock(&obj->oo_guard);
+ cfs_spin_lock(&obj->oo_guard);
dir->i_ctime = *ctime;
dir->i_mtime = *mtime;
- spin_unlock(&obj->oo_guard);
+ cfs_spin_unlock(&obj->oo_guard);
mark_inode_dirty(dir);
brelse(bh);
} else
rc = -ENOENT;
- up_write(&obj->oo_ext_idx_sem);
+ cfs_up_write(&obj->oo_ext_idx_sem);
LASSERT(osd_invariant(obj));
RETURN(rc);
}
dentry = osd_child_dentry_get(env, obj,
(char *)key, strlen((char *)key));
- down_read(&obj->oo_ext_idx_sem);
+ cfs_down_read(&obj->oo_ext_idx_sem);
bh = ll_ldiskfs_find_entry(dir, dentry, &de);
if (bh) {
ino = le32_to_cpu(de->inode);
} else
rc = -ENOENT;
- up_read(&obj->oo_ext_idx_sem);
+ cfs_up_read(&obj->oo_ext_idx_sem);
RETURN (rc);
}
else
current->cap_effective &= ~CFS_CAP_SYS_RESOURCE_MASK;
#endif
- down_write(&obj->oo_ext_idx_sem);
+ cfs_down_write(&obj->oo_ext_idx_sem);
rc = osd_ea_add_rec(env, obj, child, name, th);
- up_write(&obj->oo_ext_idx_sem);
+ cfs_up_write(&obj->oo_ext_idx_sem);
#ifdef HAVE_QUOTA_SUPPORT
current->cap_effective = save;
#endif
osd_object_put(env, child);
/* xtime should not be updated with server-side time. */
- spin_lock(&obj->oo_guard);
+ cfs_spin_lock(&obj->oo_guard);
inode->i_ctime = *ctime;
inode->i_mtime = *mtime;
- spin_unlock(&obj->oo_guard);
+ cfs_spin_unlock(&obj->oo_guard);
mark_inode_dirty(inode);
} else {
rc = PTR_ERR(child);
memcpy(ent->oied_name, name, namelen);
it->oie_rd_dirent++;
- it->oie_dirent = (void *) ent + size_round(sizeof(*ent) + namelen);
+ it->oie_dirent = (void *) ent + cfs_size_round(sizeof(*ent) + namelen);
RETURN(0);
}
it->oie_dirent = it->oie_buf;
it->oie_rd_dirent = 0;
- down_read(&obj->oo_ext_idx_sem);
+ cfs_down_read(&obj->oo_ext_idx_sem);
result = inode->i_fop->readdir(&it->oie_file, it,
(filldir_t) osd_ldiskfs_filldir);
- up_read(&obj->oo_ext_idx_sem);
+ cfs_up_read(&obj->oo_ext_idx_sem);
if (it->oie_rd_dirent == 0) {
result = -EIO;
ENTRY;
if (it->oie_it_dirent < it->oie_rd_dirent) {
- it->oie_dirent = (void *) it->oie_dirent +
- size_round(sizeof(struct osd_it_ea_dirent) +
- it->oie_dirent->oied_namelen);
+ it->oie_dirent =
+ (void *) it->oie_dirent +
+ cfs_size_round(sizeof(struct osd_it_ea_dirent) +
+ it->oie_dirent->oied_namelen);
it->oie_it_dirent++;
RETURN(0);
} else {
l = osd2lu_dev(o);
l->ld_ops = &osd_lu_ops;
o->od_dt_dev.dd_ops = &osd_dt_ops;
- spin_lock_init(&o->od_osfs_lock);
+ cfs_spin_lock_init(&o->od_osfs_lock);
o->od_osfs_age = cfs_time_shift_64(-1000);
o->od_capa_hash = init_capa_hash();
if (o->od_capa_hash == NULL) {
*
* No locking. Callers synchronize.
*/
-static LIST_HEAD(iam_formats);
+static CFS_LIST_HEAD(iam_formats);
void iam_format_register(struct iam_format *fmt)
{
- list_add(&fmt->if_linkage, &iam_formats);
+ cfs_list_add(&fmt->if_linkage, &iam_formats);
}
EXPORT_SYMBOL(iam_format_register);
}
result = -ENOENT;
- list_for_each_entry(fmt, &iam_formats, if_linkage) {
+ cfs_list_for_each_entry(fmt, &iam_formats, if_linkage) {
result = fmt->if_guess(c);
if (result == 0)
break;
memset(c, 0, sizeof *c);
c->ic_descr = descr;
c->ic_object = inode;
- init_rwsem(&c->ic_sem);
+ cfs_init_rwsem(&c->ic_sem);
return 0;
}
EXPORT_SYMBOL(iam_container_init);
block = path->ip_frame->leaf;
if (block == 0) {
/* XXX bug 11027 */
- printk(KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
+ printk(CFS_KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
(long unsigned)path->ip_frame->leaf,
dx_get_count(dx_node_get_entries(path, path->ip_frame)),
path->ip_frames[0].bh, path->ip_frames[1].bh,
void iam_container_write_lock(struct iam_container *ic)
{
- down_write(&ic->ic_sem);
+ cfs_down_write(&ic->ic_sem);
}
void iam_container_write_unlock(struct iam_container *ic)
{
- up_write(&ic->ic_sem);
+ cfs_up_write(&ic->ic_sem);
}
void iam_container_read_lock(struct iam_container *ic)
{
- down_read(&ic->ic_sem);
+ cfs_down_read(&ic->ic_sem);
}
void iam_container_read_unlock(struct iam_container *ic)
{
- up_read(&ic->ic_sem);
+ cfs_up_read(&ic->ic_sem);
}
/*
* Underlying flat file. IO against this object is issued to
* read/write nodes.
*/
- struct inode *ic_object;
+ struct inode *ic_object;
/*
* container flavor.
*/
- struct iam_descr *ic_descr;
+ struct iam_descr *ic_descr;
/*
* read-write lock protecting index consistency.
*/
- struct rw_semaphore ic_sem;
+ cfs_rw_semaphore_t ic_sem;
};
/*
{
DX_DEVAL(iam_lock_stats.dls_bh_lock++);
#ifdef CONFIG_SMP
- while (test_and_set_bit(BH_DXLock, &bh->b_state)) {
+ while (cfs_test_and_set_bit(BH_DXLock, &bh->b_state)) {
DX_DEVAL(iam_lock_stats.dls_bh_busy++);
- while (test_bit(BH_DXLock, &bh->b_state))
+ while (cfs_test_bit(BH_DXLock, &bh->b_state))
cpu_relax();
}
#endif
/*
* Linkage into global list of container formats.
*/
- struct list_head if_linkage;
+ cfs_list_t if_linkage;
};
void iam_format_register(struct iam_format *fmt);
char h[3];
area = (char *)entry;
- printk(KERN_EMERG "[");
+ printk(CFS_KERN_EMERG "[");
for (i = iam_lfix_key_size(leaf); i > 0; --i, ++area)
printk("%s", hex(*area, h));
printk("]-(");
entry = leaf->il_entries;
count = lentry_count_get(leaf);
- printk(KERN_EMERG "lfix: %p %p %d\n", leaf, leaf->il_at, count);
+ printk(CFS_KERN_EMERG "lfix: %p %p %d\n", leaf, leaf->il_at, count);
for (i = 0; i < count; ++i, entry = iam_lfix_shift(leaf, entry, 1))
l_print(leaf, entry);
}
{
struct lvar_leaf_entry *scan;
- printk(KERN_EMERG "used: %d\n", h_used(n_head(l)));
+ printk(CFS_KERN_EMERG "used: %d\n", h_used(n_head(l)));
for (scan = n_start(l); scan < n_end(l); scan = e_next(l, scan))
e_print(scan);
}
unsigned long od_capa_timeout;
__u32 od_capa_alg;
struct lustre_capa_key *od_capa_keys;
- struct hlist_head *od_capa_hash;
+ cfs_hlist_head_t *od_capa_hash;
cfs_proc_dir_entry_t *od_proc_entry;
struct lprocfs_stats *od_stats;
* statfs optimization: we cache a bit.
*/
cfs_time_t od_osfs_age;
- struct kstatfs od_kstatfs;
- spinlock_t od_osfs_lock;
+ cfs_kstatfs_t od_kstatfs;
+ cfs_spinlock_t od_osfs_lock;
/**
* The following flag indicates, if it is interop mode or not.
struct osd_device *osd, int op);
#endif
int osd_statfs(const struct lu_env *env, struct dt_device *dev,
- struct kstatfs *sfs);
+ cfs_kstatfs_t *sfs);
/*
* Invariants, assertions.
};
/** to serialize concurrent OI index initialization */
-static struct mutex oi_init_lock;
+static cfs_mutex_t oi_init_lock;
static struct dt_index_features oi_feat = {
.dif_flags = DT_IND_UPDATE,
int i;
env = info->oti_env;
- mutex_lock(&oi_init_lock);
+ cfs_mutex_lock(&oi_init_lock);
memset(oi, 0, sizeof *oi);
retry:
for (i = rc = 0; i < OSD_OI_FID_NR && rc == 0; ++i) {
if (rc != 0)
osd_oi_fini(info, oi);
- mutex_unlock(&oi_init_lock);
+ cfs_mutex_unlock(&oi_init_lock);
return rc;
}
int osd_oi_mod_init()
{
- mutex_init(&oi_init_lock);
+ cfs_mutex_init(&oi_init_lock);
return 0;
}
osfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
req->rq_status = obd_statfs(req->rq_export->exp_obd, osfs,
- cfs_time_current_64() - HZ, 0);
+ cfs_time_current_64() - CFS_HZ, 0);
if (req->rq_status != 0)
CERROR("ost: statfs failed: rc %d\n", req->rq_status);
/* Check if there is eviction in progress, and if so, wait for it to
* finish */
- if (unlikely(atomic_read(&exp->exp_obd->obd_evict_inprogress))) {
+ if (unlikely(cfs_atomic_read(&exp->exp_obd->obd_evict_inprogress))) {
lwi = LWI_INTR(NULL, NULL); // We do not care how long it takes
rc = l_wait_event(exp->exp_obd->obd_evict_inprogress_waitq,
- !atomic_read(&exp->exp_obd->obd_evict_inprogress),
+ !cfs_atomic_read(&exp->exp_obd->obd_evict_inprogress),
&lwi);
}
if (exp->exp_failed)
if (rc == 0) {
/* Check if there is eviction in progress, and if so, wait for
* it to finish */
- if (unlikely(atomic_read(&exp->exp_obd->
- obd_evict_inprogress))) {
+ if (unlikely(cfs_atomic_read(&exp->exp_obd->
+ obd_evict_inprogress))) {
lwi = LWI_INTR(NULL, NULL);
rc = l_wait_event(exp->exp_obd->
- obd_evict_inprogress_waitq,
- !atomic_read(&exp->exp_obd->
- obd_evict_inprogress),
+ obd_evict_inprogress_waitq,
+ !cfs_atomic_read(&exp->exp_obd->
+ obd_evict_inprogress),
&lwi);
}
/* Check if client was evicted or tried to reconnect already */
/* Check if there is eviction in progress, and if so, wait for it to
* finish */
- if (unlikely(atomic_read(&exp->exp_obd->obd_evict_inprogress))) {
+ if (unlikely(cfs_atomic_read(&exp->exp_obd->obd_evict_inprogress))) {
lwi = LWI_INTR(NULL, NULL); // We do not care how long it takes
rc = l_wait_event(exp->exp_obd->obd_evict_inprogress_waitq,
- !atomic_read(&exp->exp_obd->obd_evict_inprogress),
+ !cfs_atomic_read(&exp->exp_obd->obd_evict_inprogress),
&lwi);
}
if (exp->exp_failed)
reply->ocd_connect_flags &= ~(OBD_CONNECT_RMT_CLIENT | \
OBD_CONNECT_RMT_CLIENT_FORCE | \
OBD_CONNECT_OSS_CAPA); \
- spin_lock(&exp->exp_lock); \
+ cfs_spin_lock(&exp->exp_lock); \
exp->exp_connect_flags = reply->ocd_connect_flags; \
- spin_unlock(&exp->exp_lock); \
+ cfs_spin_unlock(&exp->exp_lock); \
} while (0)
static int ost_init_sec_level(struct ptlrpc_request *req)
if (!filter->fo_fl_oss_capa)
reply->ocd_connect_flags &= ~OBD_CONNECT_OSS_CAPA;
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_connect_flags = reply->ocd_connect_flags;
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
}
break;
default:
}
if (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_INVALID) {
- read_lock(&filter->fo_sptlrpc_lock);
+ cfs_read_lock(&filter->fo_sptlrpc_lock);
sptlrpc_target_choose_flavor(&filter->fo_sptlrpc_rset,
req->rq_sp_from,
req->rq_peer.nid,
&flvr);
- read_unlock(&filter->fo_sptlrpc_lock);
+ cfs_read_unlock(&filter->fo_sptlrpc_lock);
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_sp_peer = req->rq_sp_from;
exp->exp_flvr = flvr;
rc = -EACCES;
}
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
} else {
if (exp->exp_sp_peer != req->rq_sp_from) {
CERROR("RPC source %s doesn't match %s\n",
obd = req->rq_export->exp_obd;
/* Check for aborted recovery. */
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
recovering = obd->obd_recovering;
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
if (recovering) {
rc = ost_filter_recovery_request(req, obd,
&should_process);
int rc;
ENTRY;
- rc = cleanup_group_info();
+ rc = cfs_cleanup_group_info();
if (rc)
RETURN(rc);
lprocfs_ost_init_vars(&lvars);
lprocfs_obd_setup(obd, lvars.obd_vars);
- sema_init(&ost->ost_health_sem, 1);
+ cfs_sema_init(&ost->ost_health_sem, 1);
if (oss_num_threads) {
/* If oss_num_threads is set, it is the min and the max. */
oss_max_threads = oss_min_threads = oss_num_threads;
} else {
/* Base min threads on memory and cpus */
- oss_min_threads = num_possible_cpus() * CFS_NUM_CACHEPAGES >>
+ oss_min_threads =
+ cfs_num_possible_cpus() * CFS_NUM_CACHEPAGES >>
(27 - CFS_PAGE_SHIFT);
if (oss_min_threads < OSS_THREADS_MIN)
oss_min_threads = OSS_THREADS_MIN;
ping_evictor_stop();
- spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
if (obd->obd_recovering) {
target_cancel_recovery_timer(obd);
obd->obd_recovering = 0;
}
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
- down(&ost->ost_health_sem);
+ cfs_down(&ost->ost_health_sem);
ptlrpc_unregister_service(ost->ost_service);
ptlrpc_unregister_service(ost->ost_create_service);
ptlrpc_unregister_service(ost->ost_io_service);
ost->ost_service = NULL;
ost->ost_create_service = NULL;
- up(&ost->ost_health_sem);
+ cfs_up(&ost->ost_health_sem);
lprocfs_obd_cleanup(obd);
struct ost_obd *ost = &obd->u.ost;
int rc = 0;
- down(&ost->ost_health_sem);
+ cfs_down(&ost->ost_health_sem);
rc |= ptlrpc_service_health_check(ost->ost_service);
rc |= ptlrpc_service_health_check(ost->ost_create_service);
rc |= ptlrpc_service_health_check(ost->ost_io_service);
- up(&ost->ost_health_sem);
+ cfs_up(&ost->ost_health_sem);
/*
* health_check to return 0 on healthy
if (!desc)
return NULL;
- spin_lock_init(&desc->bd_lock);
+ cfs_spin_lock_init(&desc->bd_lock);
cfs_waitq_init(&desc->bd_waitq);
desc->bd_max_iov = npages;
desc->bd_iov_count = 0;
ENTRY;
req->rq_early = 0;
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
if (rc) {
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
RETURN(rc);
}
sptlrpc_cli_finish_early_reply(early_req);
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
if (rc == 0) {
/* Adjust the local timeout for this req */
void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
{
- struct list_head *l, *tmp;
+ cfs_list_t *l, *tmp;
struct ptlrpc_request *req;
LASSERT(pool != NULL);
- spin_lock(&pool->prp_lock);
- list_for_each_safe(l, tmp, &pool->prp_req_list) {
- req = list_entry(l, struct ptlrpc_request, rq_list);
- list_del(&req->rq_list);
+ cfs_spin_lock(&pool->prp_lock);
+ cfs_list_for_each_safe(l, tmp, &pool->prp_req_list) {
+ req = cfs_list_entry(l, struct ptlrpc_request, rq_list);
+ cfs_list_del(&req->rq_list);
LASSERT(req->rq_reqbuf);
LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
OBD_FREE(req->rq_reqbuf, pool->prp_rq_size);
OBD_FREE(req, sizeof(*req));
}
- spin_unlock(&pool->prp_lock);
+ cfs_spin_unlock(&pool->prp_lock);
OBD_FREE(pool, sizeof(*pool));
}
while (size < pool->prp_rq_size + SPTLRPC_MAX_PAYLOAD)
size <<= 1;
- LASSERTF(list_empty(&pool->prp_req_list) || size == pool->prp_rq_size,
+ LASSERTF(cfs_list_empty(&pool->prp_req_list) ||
+ size == pool->prp_rq_size,
"Trying to change pool size with nonempty pool "
"from %d to %d bytes\n", pool->prp_rq_size, size);
- spin_lock(&pool->prp_lock);
+ cfs_spin_lock(&pool->prp_lock);
pool->prp_rq_size = size;
for (i = 0; i < num_rq; i++) {
struct ptlrpc_request *req;
struct lustre_msg *msg;
- spin_unlock(&pool->prp_lock);
+ cfs_spin_unlock(&pool->prp_lock);
OBD_ALLOC(req, sizeof(struct ptlrpc_request));
if (!req)
return;
req->rq_reqbuf = msg;
req->rq_reqbuf_len = size;
req->rq_pool = pool;
- spin_lock(&pool->prp_lock);
- list_add_tail(&req->rq_list, &pool->prp_req_list);
+ cfs_spin_lock(&pool->prp_lock);
+ cfs_list_add_tail(&req->rq_list, &pool->prp_req_list);
}
- spin_unlock(&pool->prp_lock);
+ cfs_spin_unlock(&pool->prp_lock);
return;
}
/* Request next power of two for the allocation, because internally
kernel would do exactly this */
- spin_lock_init(&pool->prp_lock);
+ cfs_spin_lock_init(&pool->prp_lock);
CFS_INIT_LIST_HEAD(&pool->prp_req_list);
pool->prp_rq_size = msgsize;
pool->prp_populate = populate_pool;
populate_pool(pool, num_rq);
- if (list_empty(&pool->prp_req_list)) {
+ if (cfs_list_empty(&pool->prp_req_list)) {
/* have not allocated a single request for the pool */
OBD_FREE(pool, sizeof (struct ptlrpc_request_pool));
pool = NULL;
if (!pool)
return NULL;
- spin_lock(&pool->prp_lock);
+ cfs_spin_lock(&pool->prp_lock);
/* See if we have anything in a pool, and bail out if nothing,
* in writeout path, where this matters, this is safe to do, because
* nothing is lost in this case, and when some in-flight requests
* complete, this code will be called again. */
- if (unlikely(list_empty(&pool->prp_req_list))) {
- spin_unlock(&pool->prp_lock);
+ if (unlikely(cfs_list_empty(&pool->prp_req_list))) {
+ cfs_spin_unlock(&pool->prp_lock);
return NULL;
}
- request = list_entry(pool->prp_req_list.next, struct ptlrpc_request,
- rq_list);
- list_del_init(&request->rq_list);
- spin_unlock(&pool->prp_lock);
+ request = cfs_list_entry(pool->prp_req_list.next, struct ptlrpc_request,
+ rq_list);
+ cfs_list_del_init(&request->rq_list);
+ cfs_spin_unlock(&pool->prp_lock);
LASSERT(request->rq_reqbuf);
LASSERT(request->rq_pool);
{
struct ptlrpc_request_pool *pool = request->rq_pool;
- spin_lock(&pool->prp_lock);
- LASSERT(list_empty(&request->rq_list));
+ cfs_spin_lock(&pool->prp_lock);
+ LASSERT(cfs_list_empty(&request->rq_list));
LASSERT(!request->rq_receiving_reply);
- list_add_tail(&request->rq_list, &pool->prp_req_list);
- spin_unlock(&pool->prp_lock);
+ cfs_list_add_tail(&request->rq_list, &pool->prp_req_list);
+ cfs_spin_unlock(&pool->prp_lock);
}
static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
ptlrpc_at_set_req_timeout(request);
- spin_lock_init(&request->rq_lock);
+ cfs_spin_lock_init(&request->rq_lock);
CFS_INIT_LIST_HEAD(&request->rq_list);
CFS_INIT_LIST_HEAD(&request->rq_timed_list);
CFS_INIT_LIST_HEAD(&request->rq_replay_list);
CFS_INIT_LIST_HEAD(&request->rq_exp_list);
cfs_waitq_init(&request->rq_reply_waitq);
request->rq_xid = ptlrpc_next_xid();
- atomic_set(&request->rq_refcount, 1);
+ cfs_atomic_set(&request->rq_refcount, 1);
lustre_msg_set_opc(request->rq_reqmsg, opcode);
request->rq_no_delay = request->rq_no_resend = 1;
request->rq_fake = 1;
- spin_lock_init(&request->rq_lock);
+ cfs_spin_lock_init(&request->rq_lock);
CFS_INIT_LIST_HEAD(&request->rq_list);
CFS_INIT_LIST_HEAD(&request->rq_replay_list);
CFS_INIT_LIST_HEAD(&request->rq_set_chain);
cfs_waitq_init(&request->rq_reply_waitq);
request->rq_xid = ptlrpc_next_xid();
- atomic_set(&request->rq_refcount, 1);
+ cfs_atomic_set(&request->rq_refcount, 1);
RETURN(request);
}
}
ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
- list_del_init(&req->rq_list);
+ cfs_list_del_init(&req->rq_list);
}
CFS_INIT_LIST_HEAD(&set->set_requests);
cfs_waitq_init(&set->set_waitq);
set->set_remaining = 0;
- spin_lock_init(&set->set_new_req_lock);
+ cfs_spin_lock_init(&set->set_new_req_lock);
CFS_INIT_LIST_HEAD(&set->set_new_requests);
CFS_INIT_LIST_HEAD(&set->set_cblist);
/* Finish with this set; opposite of prep_set. */
void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
{
- struct list_head *tmp;
- struct list_head *next;
+ cfs_list_t *tmp;
+ cfs_list_t *next;
int expected_phase;
int n = 0;
ENTRY;
/* Requests on the set should either all be completed, or all be new */
expected_phase = (set->set_remaining == 0) ?
RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
- list_for_each (tmp, &set->set_requests) {
+ cfs_list_for_each (tmp, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+ cfs_list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
LASSERT(req->rq_phase == expected_phase);
n++;
LASSERTF(set->set_remaining == 0 || set->set_remaining == n, "%d / %d\n",
set->set_remaining, n);
- list_for_each_safe(tmp, next, &set->set_requests) {
+ cfs_list_for_each_safe(tmp, next, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request, rq_set_chain);
- list_del_init(&req->rq_set_chain);
+ cfs_list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
+ cfs_list_del_init(&req->rq_set_chain);
LASSERT(req->rq_phase == expected_phase);
cbdata->psc_interpret = fn;
cbdata->psc_data = data;
- list_add_tail(&cbdata->psc_item, &set->set_cblist);
+ cfs_list_add_tail(&cbdata->psc_item, &set->set_cblist);
RETURN(0);
}
struct ptlrpc_request *req)
{
/* The set takes over the caller's request reference */
- list_add_tail(&req->rq_set_chain, &set->set_requests);
+ cfs_list_add_tail(&req->rq_set_chain, &set->set_requests);
req->rq_set = set;
set->set_remaining++;
}
* Let caller know that we stopped and will not handle this request.
* It needs to take care itself of request.
*/
- if (test_bit(LIOD_STOP, &pc->pc_flags))
+ if (cfs_test_bit(LIOD_STOP, &pc->pc_flags))
return -EALREADY;
- spin_lock(&set->set_new_req_lock);
+ cfs_spin_lock(&set->set_new_req_lock);
/*
* The set takes over the caller's request reference.
*/
- list_add_tail(&req->rq_set_chain, &set->set_new_requests);
+ cfs_list_add_tail(&req->rq_set_chain, &set->set_new_requests);
req->rq_set = set;
- spin_unlock(&set->set_new_req_lock);
+ cfs_spin_unlock(&set->set_new_req_lock);
cfs_waitq_signal(&set->set_waitq);
return 0;
} else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
imp->imp_state == LUSTRE_IMP_CONNECTING) {
/* allow CONNECT even if import is invalid */ ;
- if (atomic_read(&imp->imp_inval_count) != 0) {
+ if (cfs_atomic_read(&imp->imp_inval_count) != 0) {
DEBUG_REQ(D_ERROR, req, "invalidate in flight");
*status = -EIO;
}
*status = -EIO;
} else if (req->rq_send_state != imp->imp_state) {
/* invalidate in progress - any requests should be drop */
- if (atomic_read(&imp->imp_inval_count) != 0) {
+ if (cfs_atomic_read(&imp->imp_inval_count) != 0) {
DEBUG_REQ(D_ERROR, req, "invalidate in flight");
*status = -EIO;
} else if (imp->imp_dlm_fake || req->rq_no_delay) {
RETURN(delay);
}
-
/* Conditionally suppress specific console messages */
static int ptlrpc_console_allow(struct ptlrpc_request *req)
{
if (rc)
RETURN(rc);
- do_gettimeofday(&work_start);
+ cfs_gettimeofday(&work_start);
timediff = cfs_timeval_sub(&work_start, &req->rq_arrival_time, NULL);
if (obd->obd_svc_stats != NULL) {
lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
}
if (imp->imp_replayable) {
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
/*
* No point in adding already-committed requests to the replay
* list, we will just remove them immediately. b=9829
ptlrpc_save_versions(req);
ptlrpc_retain_replayable_request(req, imp);
} else if (req->rq_commit_cb != NULL) {
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
req->rq_commit_cb(req);
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
}
/*
lustre_msg_get_last_committed(req->rq_repmsg);
}
ptlrpc_free_committed(imp);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
}
RETURN(rc);
ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
imp = req->rq_import;
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
req->rq_import_generation = imp->imp_generation;
if (ptlrpc_import_delay_req(imp, req, &rc)) {
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_waiting = 1;
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
"(%s != %s)", lustre_msg_get_status(req->rq_reqmsg),
ptlrpc_import_state_name(req->rq_send_state),
ptlrpc_import_state_name(imp->imp_state));
- LASSERT(list_empty(&req->rq_list));
- list_add_tail(&req->rq_list, &imp->imp_delayed_list);
- atomic_inc(&req->rq_import->imp_inflight);
- spin_unlock(&imp->imp_lock);
+ LASSERT(cfs_list_empty(&req->rq_list));
+ cfs_list_add_tail(&req->rq_list, &imp->imp_delayed_list);
+ cfs_atomic_inc(&req->rq_import->imp_inflight);
+ cfs_spin_unlock(&imp->imp_lock);
RETURN(0);
}
if (rc != 0) {
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
req->rq_status = rc;
ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
RETURN(rc);
}
- LASSERT(list_empty(&req->rq_list));
- list_add_tail(&req->rq_list, &imp->imp_sending_list);
- atomic_inc(&req->rq_import->imp_inflight);
- spin_unlock(&imp->imp_lock);
+ LASSERT(cfs_list_empty(&req->rq_list));
+ cfs_list_add_tail(&req->rq_list, &imp->imp_sending_list);
+ cfs_atomic_inc(&req->rq_import->imp_inflight);
+ cfs_spin_unlock(&imp->imp_lock);
lustre_msg_set_status(req->rq_reqmsg, cfs_curproc_pid());
/* this sends any unsent RPCs in @set and returns TRUE if all are sent */
int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
int force_timer_recalc = 0;
ENTRY;
if (set->set_remaining == 0)
RETURN(1);
- list_for_each(tmp, &set->set_requests) {
+ cfs_list_for_each(tmp, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+ cfs_list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
struct obd_import *imp = req->rq_import;
int rc = 0;
if (!ptlrpc_unregister_reply(req, 1))
continue;
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
if (ptlrpc_import_delay_req(imp, req, &status)){
/* put on delay list - only if we wait
* recovery finished - before send */
- list_del_init(&req->rq_list);
- list_add_tail(&req->rq_list, &imp->imp_delayed_list);
- spin_unlock(&imp->imp_lock);
+ cfs_list_del_init(&req->rq_list);
+ cfs_list_add_tail(&req->rq_list,
+ &imp-> \
+ imp_delayed_list);
+ cfs_spin_unlock(&imp->imp_lock);
continue;
}
req->rq_status = status;
ptlrpc_rqphase_move(req,
RQ_PHASE_INTERPRET);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
GOTO(interpret, req->rq_status);
}
if (req->rq_no_resend && !req->rq_wait_ctx) {
req->rq_status = -ENOTCONN;
ptlrpc_rqphase_move(req,
RQ_PHASE_INTERPRET);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
GOTO(interpret, req->rq_status);
}
- list_del_init(&req->rq_list);
- list_add_tail(&req->rq_list,
+ cfs_list_del_init(&req->rq_list);
+ cfs_list_add_tail(&req->rq_list,
&imp->imp_sending_list);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
req->rq_waiting = 0;
force_timer_recalc = 1;
}
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
if (ptlrpc_client_early(req)) {
ptlrpc_at_recv_early_reply(req);
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
continue;
}
/* Still waiting for a reply? */
if (ptlrpc_client_recv(req)) {
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
continue;
}
/* Did we actually receive a reply? */
if (!ptlrpc_client_replied(req)) {
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
continue;
}
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
req->rq_status = after_reply(req);
if (req->rq_resend)
libcfs_nid2str(imp->imp_connection->c_peer.nid),
req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : -1);
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
/* Request already may be not on sending or delaying list. This
* may happen in the case of marking it erroneous for the case
* ptlrpc_import_delay_req(req, status) find it impossible to
* allow sending this rpc and returns *status != 0. */
- if (!list_empty(&req->rq_list)) {
- list_del_init(&req->rq_list);
- atomic_dec(&imp->imp_inflight);
+ if (!cfs_list_empty(&req->rq_list)) {
+ cfs_list_del_init(&req->rq_list);
+ cfs_atomic_dec(&imp->imp_inflight);
}
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
set->set_remaining--;
cfs_waitq_broadcast(&imp->imp_recovery_waitq);
int rc = 0;
ENTRY;
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_timedout = 1;
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
DEBUG_REQ(req->rq_fake ? D_INFO : D_WARNING, req,
"Request x"LPU64" sent from %s to NID %s "CFS_DURATION_T"s "
if (req->rq_fake)
RETURN(1);
- atomic_inc(&imp->imp_timeouts);
+ cfs_atomic_inc(&imp->imp_timeouts);
/* The DLM server doesn't want recovery run on its imports. */
if (imp->imp_dlm_fake)
DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)",
ptlrpc_import_state_name(req->rq_send_state),
ptlrpc_import_state_name(imp->imp_state));
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_status = -ETIMEDOUT;
req->rq_err = 1;
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
RETURN(1);
}
int ptlrpc_expired_set(void *data)
{
struct ptlrpc_request_set *set = data;
- struct list_head *tmp;
+ cfs_list_t *tmp;
time_t now = cfs_time_current_sec();
ENTRY;
/*
* A timeout expired. See which reqs it applies to...
*/
- list_for_each (tmp, &set->set_requests) {
+ cfs_list_for_each (tmp, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+ cfs_list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
/* don't expire request waiting for context */
if (req->rq_wait_ctx)
void ptlrpc_mark_interrupted(struct ptlrpc_request *req)
{
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_intr = 1;
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
}
void ptlrpc_interrupted_set(void *data)
{
struct ptlrpc_request_set *set = data;
- struct list_head *tmp;
+ cfs_list_t *tmp;
LASSERT(set != NULL);
CERROR("INTERRUPTED SET %p\n", set);
- list_for_each(tmp, &set->set_requests) {
+ cfs_list_for_each(tmp, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+ cfs_list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
if (req->rq_phase != RQ_PHASE_RPC &&
req->rq_phase != RQ_PHASE_UNREGISTERING)
*/
int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
time_t now = cfs_time_current_sec();
int timeout = 0;
struct ptlrpc_request *req;
SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
- list_for_each(tmp, &set->set_requests) {
- req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+ cfs_list_for_each(tmp, &set->set_requests) {
+ req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain);
/*
* Request in-flight?
int ptlrpc_set_wait(struct ptlrpc_request_set *set)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct ptlrpc_request *req;
struct l_wait_info lwi;
int rc, timeout;
ENTRY;
- if (list_empty(&set->set_requests))
+ if (cfs_list_empty(&set->set_requests))
RETURN(0);
- list_for_each(tmp, &set->set_requests) {
- req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+ cfs_list_for_each(tmp, &set->set_requests) {
+ req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain);
if (req->rq_phase == RQ_PHASE_NEW)
(void)ptlrpc_send_new_req(req);
}
LASSERT(set->set_remaining == 0);
rc = 0;
- list_for_each(tmp, &set->set_requests) {
- req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+ cfs_list_for_each(tmp, &set->set_requests) {
+ req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain);
LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
if (req->rq_status != 0)
struct ptlrpc_set_cbdata *cbdata, *n;
int err;
- list_for_each_entry_safe(cbdata, n,
+ cfs_list_for_each_entry_safe(cbdata, n,
&set->set_cblist, psc_item) {
- list_del_init(&cbdata->psc_item);
+ cfs_list_del_init(&cbdata->psc_item);
err = cbdata->psc_interpret(set, cbdata->psc_data, rc);
if (err && !rc)
rc = err;
LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
LASSERTF(request->rq_rqbd == NULL, "req %p\n",request);/* client-side */
- LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
- LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
- LASSERTF(list_empty(&request->rq_exp_list), "req %p\n", request);
+ LASSERTF(cfs_list_empty(&request->rq_list), "req %p\n", request);
+ LASSERTF(cfs_list_empty(&request->rq_set_chain), "req %p\n", request);
+ LASSERTF(cfs_list_empty(&request->rq_exp_list), "req %p\n", request);
LASSERTF(!request->rq_replay, "req %p\n", request);
LASSERT(request->rq_cli_ctx || request->rq_fake);
* request->rq_reqmsg to NULL while osc_close is dereferencing it. */
if (request->rq_import != NULL) {
if (!locked)
- spin_lock(&request->rq_import->imp_lock);
- list_del_init(&request->rq_replay_list);
+ cfs_spin_lock(&request->rq_import->imp_lock);
+ cfs_list_del_init(&request->rq_replay_list);
if (!locked)
- spin_unlock(&request->rq_import->imp_lock);
+ cfs_spin_unlock(&request->rq_import->imp_lock);
}
- LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request);
+ LASSERTF(cfs_list_empty(&request->rq_replay_list), "req %p\n", request);
- if (atomic_read(&request->rq_refcount) != 0) {
+ if (cfs_atomic_read(&request->rq_refcount) != 0) {
DEBUG_REQ(D_ERROR, request,
"freeing request with nonzero refcount");
LBUG();
}
DEBUG_REQ(D_INFO, request, "refcount now %u",
- atomic_read(&request->rq_refcount) - 1);
+ cfs_atomic_read(&request->rq_refcount) - 1);
- if (atomic_dec_and_test(&request->rq_refcount)) {
+ if (cfs_atomic_dec_and_test(&request->rq_refcount)) {
__ptlrpc_free_req(request, locked);
RETURN(1);
}
/*
* Might sleep.
*/
- LASSERT(!in_interrupt());
+ LASSERT(!cfs_in_interrupt());
/*
* Let's setup deadline for reply unlink.
/* caller must hold imp->imp_lock */
void ptlrpc_free_committed(struct obd_import *imp)
{
- struct list_head *tmp, *saved;
+ cfs_list_t *tmp, *saved;
struct ptlrpc_request *req;
struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
ENTRY;
imp->imp_last_transno_checked = imp->imp_peer_committed_transno;
imp->imp_last_generation_checked = imp->imp_generation;
- list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
- req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
+ cfs_list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
+ req = cfs_list_entry(tmp, struct ptlrpc_request,
+ rq_replay_list);
/* XXX ok to remove when 1357 resolved - rread 05/29/03 */
LASSERT(req != last_req);
DEBUG_REQ(D_RPCTRACE, req, "commit (last_committed "LPU64")",
imp->imp_peer_committed_transno);
free_req:
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_replay = 0;
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
if (req->rq_commit_cb != NULL)
req->rq_commit_cb(req);
- list_del_init(&req->rq_replay_list);
+ cfs_list_del_init(&req->rq_replay_list);
__ptlrpc_req_finished(req, 1);
}
lustre_msg_set_handle(req->rq_reqmsg, &(struct lustre_handle){ 0 });
req->rq_status = -EAGAIN;
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_resend = 1;
req->rq_net_err = 0;
req->rq_timedout = 0;
old_xid, req->rq_xid);
}
ptlrpc_client_wake_req(req);
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
}
/* XXX: this function and rq_status are currently unused */
DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
req->rq_status = -ERESTARTSYS;
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_restart = 1;
req->rq_timedout = 0;
ptlrpc_client_wake_req(req);
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
}
struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
{
ENTRY;
- atomic_inc(&req->rq_refcount);
+ cfs_atomic_inc(&req->rq_refcount);
RETURN(req);
}
void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
struct obd_import *imp)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
LASSERT_SPIN_LOCKED(&imp->imp_lock);
lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
/* don't re-add requests that have been replayed */
- if (!list_empty(&req->rq_replay_list))
+ if (!cfs_list_empty(&req->rq_replay_list))
return;
lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
LASSERT(imp->imp_replayable);
/* Balanced in ptlrpc_free_committed, usually. */
ptlrpc_request_addref(req);
- list_for_each_prev(tmp, &imp->imp_replay_list) {
+ cfs_list_for_each_prev(tmp, &imp->imp_replay_list) {
struct ptlrpc_request *iter =
- list_entry(tmp, struct ptlrpc_request, rq_replay_list);
+ cfs_list_entry(tmp, struct ptlrpc_request,
+ rq_replay_list);
/* We may have duplicate transnos if we create and then
* open a file, or for closes retained if to match creating
continue;
}
- list_add(&req->rq_replay_list, &iter->rq_replay_list);
+ cfs_list_add(&req->rq_replay_list, &iter->rq_replay_list);
return;
}
- list_add(&req->rq_replay_list, &imp->imp_replay_list);
+ cfs_list_add(&req->rq_replay_list, &imp->imp_replay_list);
}
int ptlrpc_queue_wait(struct ptlrpc_request *req)
struct obd_import *imp = req->rq_import;
ENTRY;
- atomic_dec(&imp->imp_replay_inflight);
+ cfs_atomic_dec(&imp->imp_replay_inflight);
if (!ptlrpc_client_replied(req)) {
CERROR("request replay timed out, restarting recovery\n");
if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) {
/** replay was failed due to version mismatch */
DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n");
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_vbr_failed = 1;
imp->imp_no_lock_replay = 1;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
} else {
/** The transno had better not change over replay. */
LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) ==
lustre_msg_get_transno(req->rq_repmsg));
}
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
/** if replays by version then gap was occur on server, no trust to locks */
if (lustre_msg_get_flags(req->rq_repmsg) & MSG_VERSION_REPLAY)
imp->imp_no_lock_replay = 1;
imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
LASSERT(imp->imp_last_replay_transno);
DEBUG_REQ(D_HA, req, "got rep");
* imp_last_replay_transno shouldn't be set to 0 anyway
*/
if (req->rq_transno > 0) {
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
LASSERT(req->rq_transno <= imp->imp_last_replay_transno);
imp->imp_last_replay_transno = req->rq_transno;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
} else
CERROR("Transno is 0 during replay!\n");
/* continue with recovery */
DEBUG_REQ(D_HA, req, "REPLAY");
- atomic_inc(&req->rq_import->imp_replay_inflight);
+ cfs_atomic_inc(&req->rq_import->imp_replay_inflight);
ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
ptlrpcd_add_req(req, PSCOPE_OTHER);
void ptlrpc_abort_inflight(struct obd_import *imp)
{
- struct list_head *tmp, *n;
+ cfs_list_t *tmp, *n;
ENTRY;
/* Make sure that no new requests get processed for this import.
* ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
* this flag and then putting requests on sending_list or delayed_list.
*/
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
/* XXX locking? Maybe we should remove each request with the list
* locked? Also, how do we know if the requests on the list are
* being freed at this time?
*/
- list_for_each_safe(tmp, n, &imp->imp_sending_list) {
+ cfs_list_for_each_safe(tmp, n, &imp->imp_sending_list) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request, rq_list);
+ cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
DEBUG_REQ(D_RPCTRACE, req, "inflight");
- spin_lock (&req->rq_lock);
+ cfs_spin_lock (&req->rq_lock);
if (req->rq_import_generation < imp->imp_generation) {
req->rq_err = 1;
req->rq_status = -EINTR;
ptlrpc_client_wake_req(req);
}
- spin_unlock (&req->rq_lock);
+ cfs_spin_unlock (&req->rq_lock);
}
- list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
+ cfs_list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request, rq_list);
+ cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
- spin_lock (&req->rq_lock);
+ cfs_spin_lock (&req->rq_lock);
if (req->rq_import_generation < imp->imp_generation) {
req->rq_err = 1;
req->rq_status = -EINTR;
ptlrpc_client_wake_req(req);
}
- spin_unlock (&req->rq_lock);
+ cfs_spin_unlock (&req->rq_lock);
}
/* Last chance to free reqs left on the replay list, but we
if (imp->imp_replayable)
ptlrpc_free_committed(imp);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
EXIT;
}
void ptlrpc_abort_set(struct ptlrpc_request_set *set)
{
- struct list_head *tmp, *pos;
+ cfs_list_t *tmp, *pos;
LASSERT(set != NULL);
- list_for_each_safe(pos, tmp, &set->set_requests) {
+ cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(pos, struct ptlrpc_request, rq_set_chain);
+ cfs_list_entry(pos, struct ptlrpc_request,
+ rq_set_chain);
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
if (req->rq_phase != RQ_PHASE_RPC) {
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
continue;
}
req->rq_err = 1;
req->rq_status = -EINTR;
ptlrpc_client_wake_req(req);
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
}
}
static __u64 ptlrpc_last_xid;
-static spinlock_t ptlrpc_last_xid_lock;
+static cfs_spinlock_t ptlrpc_last_xid_lock;
/* Initialize the XID for the node. This is common among all requests on
* this node, and only requires the property that it is monotonically
{
time_t now = cfs_time_current_sec();
- spin_lock_init(&ptlrpc_last_xid_lock);
+ cfs_spin_lock_init(&ptlrpc_last_xid_lock);
if (now < YEAR_2004) {
ll_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid));
ptlrpc_last_xid >>= 2;
__u64 ptlrpc_next_xid(void)
{
__u64 tmp;
- spin_lock(&ptlrpc_last_xid_lock);
+ cfs_spin_lock(&ptlrpc_last_xid_lock);
tmp = ++ptlrpc_last_xid;
- spin_unlock(&ptlrpc_last_xid_lock);
+ cfs_spin_unlock(&ptlrpc_last_xid_lock);
return tmp;
}
#if BITS_PER_LONG == 32
/* need to avoid possible word tearing on 32-bit systems */
__u64 tmp;
- spin_lock(&ptlrpc_last_xid_lock);
+ cfs_spin_lock(&ptlrpc_last_xid_lock);
tmp = ptlrpc_last_xid + 1;
- spin_unlock(&ptlrpc_last_xid_lock);
+ cfs_spin_unlock(&ptlrpc_last_xid_lock);
return tmp;
#else
/* No need to lock, since returned value is racy anyways */
conn->c_peer = peer;
conn->c_self = self;
- INIT_HLIST_NODE(&conn->c_hash);
- atomic_set(&conn->c_refcount, 1);
+ CFS_INIT_HLIST_NODE(&conn->c_hash);
+ cfs_atomic_set(&conn->c_refcount, 1);
if (uuid)
obd_str2uuid(&conn->c_remote_uuid, uuid->uuid);
EXIT;
out:
CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
- conn, atomic_read(&conn->c_refcount),
+ conn, cfs_atomic_read(&conn->c_refcount),
libcfs_nid2str(conn->c_peer.nid));
return conn;
}
if (!conn)
RETURN(rc);
- LASSERT(!hlist_unhashed(&conn->c_hash));
+ LASSERT(!cfs_hlist_unhashed(&conn->c_hash));
/*
* We do not remove connection from hashtable and
* when ptlrpc_connection_fini()->lh_exit->conn_exit()
* path is called.
*/
- if (atomic_dec_return(&conn->c_refcount) == 1)
+ if (cfs_atomic_dec_return(&conn->c_refcount) == 1)
rc = 1;
CDEBUG(D_INFO, "PUT conn=%p refcount %d to %s\n",
- conn, atomic_read(&conn->c_refcount),
+ conn, cfs_atomic_read(&conn->c_refcount),
libcfs_nid2str(conn->c_peer.nid));
RETURN(rc);
{
ENTRY;
- atomic_inc(&conn->c_refcount);
+ cfs_atomic_inc(&conn->c_refcount);
CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
- conn, atomic_read(&conn->c_refcount),
+ conn, cfs_atomic_read(&conn->c_refcount),
libcfs_nid2str(conn->c_peer.nid));
RETURN(conn);
}
static int
-conn_compare(void *key, struct hlist_node *hnode)
+conn_compare(void *key, cfs_hlist_node_t *hnode)
{
struct ptlrpc_connection *conn;
lnet_process_id_t *conn_key;
LASSERT(key != NULL);
conn_key = (lnet_process_id_t*)key;
- conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+ conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
return conn_key->nid == conn->c_peer.nid &&
conn_key->pid == conn->c_peer.pid;
}
static void *
-conn_key(struct hlist_node *hnode)
+conn_key(cfs_hlist_node_t *hnode)
{
struct ptlrpc_connection *conn;
- conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+ conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
return &conn->c_peer;
}
static void *
-conn_get(struct hlist_node *hnode)
+conn_get(cfs_hlist_node_t *hnode)
{
struct ptlrpc_connection *conn;
- conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
- atomic_inc(&conn->c_refcount);
+ conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+ cfs_atomic_inc(&conn->c_refcount);
return conn;
}
static void *
-conn_put(struct hlist_node *hnode)
+conn_put(cfs_hlist_node_t *hnode)
{
struct ptlrpc_connection *conn;
- conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
- atomic_dec(&conn->c_refcount);
+ conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+ cfs_atomic_dec(&conn->c_refcount);
return conn;
}
static void
-conn_exit(struct hlist_node *hnode)
+conn_exit(cfs_hlist_node_t *hnode)
{
struct ptlrpc_connection *conn;
- conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+ conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
/*
* Nothing should be left. Connection user put it and
* connection also was deleted from table by this time
* so we should have 0 refs.
*/
- LASSERTF(atomic_read(&conn->c_refcount) == 0,
+ LASSERTF(cfs_atomic_read(&conn->c_refcount) == 0,
"Busy connection with %d refs\n",
- atomic_read(&conn->c_refcount));
+ cfs_atomic_read(&conn->c_refcount));
OBD_FREE_PTR(conn);
}
/* Failed send: make it seem like the reply timed out, just
* like failing sends in client.c does currently... */
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_net_err = 1;
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
ptlrpc_client_wake_req(req);
}
for adaptive timeouts' early reply. */
LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_receiving_reply = 0;
req->rq_early = 0;
/* NB don't unlock till after wakeup; req can disappear under us
* since we don't have our own ref */
ptlrpc_client_wake_req(req);
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
EXIT;
}
"event type %d, status %d, desc %p\n",
ev->type, ev->status, desc);
- spin_lock(&desc->bd_lock);
+ cfs_spin_lock(&desc->bd_lock);
LASSERT(desc->bd_network_rw);
desc->bd_network_rw = 0;
* otherwise */
ptlrpc_client_wake_req(desc->bd_req);
- spin_unlock(&desc->bd_lock);
+ cfs_spin_unlock(&desc->bd_lock);
EXIT;
}
req->rq_reqbuf = ev->md.start + ev->offset;
if (ev->type == LNET_EVENT_PUT && ev->status == 0)
req->rq_reqdata_len = ev->mlength;
- do_gettimeofday(&req->rq_arrival_time);
+ cfs_gettimeofday(&req->rq_arrival_time);
req->rq_peer = ev->initiator;
req->rq_self = ev->target.nid;
req->rq_rqbd = rqbd;
#ifdef CRAY_XT3
req->rq_uid = ev->uid;
#endif
- spin_lock_init(&req->rq_lock);
+ cfs_spin_lock_init(&req->rq_lock);
CFS_INIT_LIST_HEAD(&req->rq_timed_list);
- atomic_set(&req->rq_refcount, 1);
+ cfs_atomic_set(&req->rq_refcount, 1);
if (ev->type == LNET_EVENT_PUT)
CDEBUG(D_RPCTRACE, "incoming req@%p x"LPU64" msgsize %u\n",
req, req->rq_xid, ev->mlength);
CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer));
- spin_lock(&service->srv_lock);
+ cfs_spin_lock(&service->srv_lock);
req->rq_history_seq = service->srv_request_seq++;
- list_add_tail(&req->rq_history_list, &service->srv_request_history);
+ cfs_list_add_tail(&req->rq_history_list, &service->srv_request_history);
if (ev->unlinked) {
service->srv_nrqbd_receiving--;
rqbd->rqbd_refcount++;
}
- list_add_tail(&req->rq_list, &service->srv_req_in_queue);
+ cfs_list_add_tail(&req->rq_list, &service->srv_req_in_queue);
service->srv_n_queued_reqs++;
/* NB everything can disappear under us once the request
* has been queued and we unlock, so do the wake now... */
cfs_waitq_signal(&service->srv_waitq);
- spin_unlock(&service->srv_lock);
+ cfs_spin_unlock(&service->srv_lock);
EXIT;
}
* net's ref on 'rs' */
LASSERT (ev->unlinked);
ptlrpc_rs_decref(rs);
- atomic_dec (&svc->srv_outstanding_replies);
+ cfs_atomic_dec (&svc->srv_outstanding_replies);
EXIT;
return;
}
if (ev->unlinked) {
/* Last network callback. The net's ref on 'rs' stays put
* until ptlrpc_handle_rs() is done with it */
- spin_lock(&svc->srv_lock);
- spin_lock(&rs->rs_lock);
+ cfs_spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&rs->rs_lock);
rs->rs_on_net = 0;
if (!rs->rs_no_ack ||
rs->rs_transno <= rs->rs_export->exp_obd->obd_last_committed)
ptlrpc_schedule_difficult_reply (rs);
- spin_unlock(&rs->rs_lock);
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&rs->rs_lock);
+ cfs_spin_unlock(&svc->srv_lock);
}
EXIT;
"event type %d, status %d, desc %p\n",
ev->type, ev->status, desc);
- spin_lock(&desc->bd_lock);
+ cfs_spin_lock(&desc->bd_lock);
if ((ev->type == LNET_EVENT_ACK ||
ev->type == LNET_EVENT_REPLY) &&
cfs_waitq_signal(&desc->bd_waitq);
}
- spin_unlock(&desc->bd_lock);
+ cfs_spin_unlock(&desc->bd_lock);
EXIT;
}
void *liblustre_services_callback;
void *
-liblustre_register_waitidle_callback (struct list_head *callback_list,
+liblustre_register_waitidle_callback (cfs_list_t *callback_list,
const char *name,
int (*fn)(void *arg), void *arg)
{
llwc->llwc_name = name;
llwc->llwc_fn = fn;
llwc->llwc_arg = arg;
- list_add_tail(&llwc->llwc_list, callback_list);
+ cfs_list_add_tail(&llwc->llwc_list, callback_list);
return (llwc);
}
{
struct liblustre_wait_callback *llwc = opaque;
- list_del(&llwc->llwc_list);
+ cfs_list_del(&llwc->llwc_list);
OBD_FREE(llwc, sizeof(*llwc));
}
int
liblustre_wait_event (int timeout)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct liblustre_wait_callback *llwc;
int found_something = 0;
found_something = 1;
/* Give all registered callbacks a bite at the cherry */
- list_for_each(tmp, &liblustre_wait_callbacks) {
- llwc = list_entry(tmp, struct liblustre_wait_callback,
- llwc_list);
+ cfs_list_for_each(tmp, &liblustre_wait_callbacks) {
+ llwc = cfs_list_entry(tmp,
+ struct liblustre_wait_callback,
+ llwc_list);
if (llwc->llwc_fn(llwc->llwc_arg))
found_something = 1;
{
static int recursed = 0;
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct liblustre_wait_callback *llwc;
int idle = 0;
idle = 1;
- list_for_each(tmp, &liblustre_idle_callbacks) {
- llwc = list_entry(tmp, struct liblustre_wait_callback,
- llwc_list);
+ cfs_list_for_each(tmp, &liblustre_idle_callbacks) {
+ llwc = cfs_list_entry(tmp,
+ struct liblustre_wait_callback,
+ llwc_list);
if (!llwc->llwc_fn(llwc->llwc_arg)) {
idle = 0;
liblustre_register_wait_callback("liblustre_check_services",
&liblustre_check_services,
NULL);
- init_completion_module(liblustre_wait_event);
+ cfs_init_completion_module(liblustre_wait_event);
#endif
rc = ptlrpcd_addref();
if (rc == 0)
/* Each mechanism is described by the following struct: */
struct gss_api_mech {
- struct list_head gm_list;
- struct module *gm_owner;
+ cfs_list_t gm_list;
+ cfs_module_t *gm_owner;
char *gm_name;
rawobj_t gm_oid;
- atomic_t gm_count;
+ cfs_atomic_t gm_count;
struct gss_api_ops *gm_ops;
int gm_sf_num;
struct subflavor_desc *gm_sfs;
/* 4. now the token */
LASSERT(size >= (sizeof(__u32) + token_size));
*p++ = cpu_to_le32(((__u32) token_size));
- if (copy_from_user(p, token, token_size)) {
+ if (cfs_copy_from_user(p, token, token_size)) {
CERROR("can't copy token\n");
return -EFAULT;
}
- size -= sizeof(__u32) + size_round4(token_size);
+ size -= sizeof(__u32) + cfs_size_round4(token_size);
req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, offset,
msg->lm_buflens[offset] - size, 0);
return -EPROTO;
}
- if (outlen < (4 + 2) * 4 + size_round4(ghdr->gh_handle.len) +
- size_round4(msg->lm_buflens[2])) {
+ if (outlen < (4 + 2) * 4 + cfs_size_round4(ghdr->gh_handle.len) +
+ cfs_size_round4(msg->lm_buflens[2])) {
CERROR("output buffer size %ld too small\n", outlen);
return -EFAULT;
}
status = 0;
effective = 0;
- if (copy_to_user(outbuf, &status, 4))
+ if (cfs_copy_to_user(outbuf, &status, 4))
return -EFAULT;
outbuf += 4;
- if (copy_to_user(outbuf, &ghdr->gh_major, 4))
+ if (cfs_copy_to_user(outbuf, &ghdr->gh_major, 4))
return -EFAULT;
outbuf += 4;
- if (copy_to_user(outbuf, &ghdr->gh_minor, 4))
+ if (cfs_copy_to_user(outbuf, &ghdr->gh_minor, 4))
return -EFAULT;
outbuf += 4;
- if (copy_to_user(outbuf, &ghdr->gh_seqwin, 4))
+ if (cfs_copy_to_user(outbuf, &ghdr->gh_seqwin, 4))
return -EFAULT;
outbuf += 4;
effective += 4 * 4;
/* handle */
obj_len = ghdr->gh_handle.len;
round_len = (obj_len + 3) & ~ 3;
- if (copy_to_user(outbuf, &obj_len, 4))
+ if (cfs_copy_to_user(outbuf, &obj_len, 4))
return -EFAULT;
outbuf += 4;
- if (copy_to_user(outbuf, (char *) ghdr->gh_handle.data, round_len))
+ if (cfs_copy_to_user(outbuf, (char *) ghdr->gh_handle.data, round_len))
return -EFAULT;
outbuf += round_len;
effective += 4 + round_len;
/* out token */
obj_len = msg->lm_buflens[2];
round_len = (obj_len + 3) & ~ 3;
- if (copy_to_user(outbuf, &obj_len, 4))
+ if (cfs_copy_to_user(outbuf, &obj_len, 4))
return -EFAULT;
outbuf += 4;
- if (copy_to_user(outbuf, lustre_msg_buf(msg, 2, 0), round_len))
+ if (cfs_copy_to_user(outbuf, lustre_msg_buf(msg, 2, 0), round_len))
return -EFAULT;
outbuf += round_len;
effective += 4 + round_len;
"version\n", count, (unsigned long) sizeof(param));
RETURN(-EINVAL);
}
- if (copy_from_user(¶m, buffer, sizeof(param))) {
+ if (cfs_copy_from_user(¶m, buffer, sizeof(param))) {
CERROR("failed copy data from lgssd\n");
RETURN(-EFAULT);
}
RETURN(-EINVAL);
}
- spin_lock(&obd->obd_dev_lock);
+ cfs_spin_lock(&obd->obd_dev_lock);
if (obd->obd_stopping) {
CERROR("obd %s has stopped\n", obdname);
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
RETURN(-EINVAL);
}
strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME) &&
strcmp(obd->obd_type->typ_name, LUSTRE_MGC_NAME)) {
CERROR("obd %s is not a client device\n", obdname);
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
RETURN(-EINVAL);
}
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
- down_read(&obd->u.cli.cl_sem);
+ cfs_down_read(&obd->u.cli.cl_sem);
if (obd->u.cli.cl_import == NULL) {
CERROR("obd %s: import has gone\n", obd->obd_name);
RETURN(-EINVAL);
}
imp = class_import_get(obd->u.cli.cl_import);
- up_read(&obd->u.cli.cl_sem);
+ cfs_up_read(&obd->u.cli.cl_sem);
if (imp->imp_deactive) {
CERROR("import has been deactivated\n");
param.reply_length = lsize;
out_copy:
- if (copy_to_user(buffer, ¶m, sizeof(param)))
+ if (cfs_copy_to_user(buffer, ¶m, sizeof(param)))
rc = -EFAULT;
else
rc = 0;
int rc;
ENTRY;
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
if (cli_ctx_is_error(ctx) || !cli_ctx_is_uptodate(ctx)) {
CDEBUG(D_SEC, "ctx %p(%u->%s) not uptodate, "
RETURN(0);
}
- might_sleep();
+ cfs_might_sleep();
CWARN("%s ctx %p idx "LPX64" (%u->%s)\n",
sec_is_reverse(ctx->cc_sec) ?
GSS_SEQ_WIN_MAIN / 4)
struct gss_svc_seq_data {
- spinlock_t ssd_lock;
+ cfs_spinlock_t ssd_lock;
/*
* highest sequence number seen so far, for main and back window
*/
__u32 gc_flavor;
__u32 gc_proc;
__u32 gc_win;
- atomic_t gc_seq;
+ cfs_atomic_t gc_seq;
rawobj_t gc_handle;
struct gss_ctx *gc_mechctx;
/* handle for the buddy svc ctx */
struct gss_sec {
struct ptlrpc_sec gs_base;
struct gss_api_mech *gs_mech;
- spinlock_t gs_lock;
+ cfs_spinlock_t gs_lock;
__u64 gs_rvs_hdl;
};
struct gss_sec_pipefs {
struct gss_sec gsp_base;
int gsp_chash_size; /* must be 2^n */
- struct hlist_head gsp_chash[0];
+ cfs_hlist_head_t gsp_chash[0];
};
/*
/*
* all contexts listed here. access is protected by sec spinlock.
*/
- struct hlist_head gsk_clist;
+ cfs_hlist_head_t gsk_clist;
/*
* specially point to root ctx (only one at a time). access is
* protected by sec spinlock.
/*
* specially serialize upcalls for root context.
*/
- struct mutex gsk_root_uc_lock;
+ cfs_mutex_t gsk_root_uc_lock;
#ifdef HAVE_KEYRING_UPCALL_SERIALIZED
- struct mutex gsk_uc_lock; /* serialize upcalls */
+ cfs_mutex_t gsk_uc_lock; /* serialize upcalls */
#endif
};
static inline void keyring_upcall_lock(struct gss_sec_keyring *gsec_kr)
{
#ifdef HAVE_KEYRING_UPCALL_SERIALIZED
- mutex_lock(&gsec_kr->gsk_uc_lock);
+ cfs_mutex_lock(&gsec_kr->gsk_uc_lock);
#endif
}
static inline void keyring_upcall_unlock(struct gss_sec_keyring *gsec_kr)
{
#ifdef HAVE_KEYRING_UPCALL_SERIALIZED
- mutex_unlock(&gsec_kr->gsk_uc_lock);
+ cfs_mutex_unlock(&gsec_kr->gsk_uc_lock);
#endif
}
LASSERT(timer);
CDEBUG(D_SEC, "ctx %p: start timer %lds\n", ctx, timeout);
- timeout = timeout * HZ + cfs_time_current();
+ timeout = timeout * CFS_HZ + cfs_time_current();
init_timer(timer);
timer->expires = timeout;
}
ctx->cc_expire = cfs_time_current_sec() + KEYRING_UPCALL_TIMEOUT;
- clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
- atomic_inc(&ctx->cc_refcount); /* for the caller */
+ cfs_clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
+ cfs_atomic_inc(&ctx->cc_refcount); /* for the caller */
return ctx;
}
/* at this time the association with key has been broken. */
LASSERT(sec);
- LASSERT(atomic_read(&sec->ps_refcount) > 0);
- LASSERT(atomic_read(&sec->ps_nctx) > 0);
- LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+ LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
+ LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0);
+ LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
LASSERT(gctx_kr->gck_key == NULL);
ctx_clear_timer_kr(ctx);
OBD_FREE_PTR(gctx_kr);
- atomic_dec(&sec->ps_nctx);
+ cfs_atomic_dec(&sec->ps_nctx);
sptlrpc_sec_put(sec);
}
if (sync) {
ctx_destroy_kr(ctx);
} else {
- atomic_inc(&ctx->cc_refcount);
+ cfs_atomic_inc(&ctx->cc_refcount);
sptlrpc_gc_add_ctx(ctx);
}
}
static void ctx_put_kr(struct ptlrpc_cli_ctx *ctx, int sync)
{
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
- if (atomic_dec_and_test(&ctx->cc_refcount))
+ if (cfs_atomic_dec_and_test(&ctx->cc_refcount))
ctx_release_kr(ctx, sync);
}
* - lock ctx -> unlist -> unlock ctx -> lock key -> unbind -> unlock key
*/
-static inline void spin_lock_if(spinlock_t *lock, int condition)
+static inline void spin_lock_if(cfs_spinlock_t *lock, int condition)
{
if (condition)
- spin_lock(lock);
+ cfs_spin_lock(lock);
}
-static inline void spin_unlock_if(spinlock_t *lock, int condition)
+static inline void spin_unlock_if(cfs_spinlock_t *lock, int condition)
{
if (condition)
- spin_unlock(lock);
+ cfs_spin_unlock(lock);
}
static void ctx_enlist_kr(struct ptlrpc_cli_ctx *ctx, int is_root, int locked)
struct ptlrpc_sec *sec = ctx->cc_sec;
struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- LASSERT(!test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(!cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
spin_lock_if(&sec->ps_lock, !locked);
- atomic_inc(&ctx->cc_refcount);
- set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
- hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
+ cfs_atomic_inc(&ctx->cc_refcount);
+ cfs_set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
+ cfs_hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
if (is_root)
gsec_kr->gsk_root_ctx = ctx;
struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
/* if hashed bit has gone, leave the job to somebody who is doing it */
- if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
+ if (cfs_test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
return 0;
/* drop ref inside spin lock to prevent race with other operations */
if (gsec_kr->gsk_root_ctx == ctx)
gsec_kr->gsk_root_ctx = NULL;
- hlist_del_init(&ctx->cc_cache);
- atomic_dec(&ctx->cc_refcount);
+ cfs_hlist_del_init(&ctx->cc_cache);
+ cfs_atomic_dec(&ctx->cc_refcount);
spin_unlock_if(&sec->ps_lock, !locked);
*/
static void bind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
{
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
LASSERT(atomic_read(&key->usage) > 0);
LASSERT(ctx2gctx_keyring(ctx)->gck_key == NULL);
LASSERT(key->payload.data == NULL);
/* at this time context may or may not in list. */
key_get(key);
- atomic_inc(&ctx->cc_refcount);
+ cfs_atomic_inc(&ctx->cc_refcount);
ctx2gctx_keyring(ctx)->gck_key = key;
key->payload.data = ctx;
}
static void unbind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
{
LASSERT(key->payload.data == ctx);
- LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+ LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
/* must revoke the key, or others may treat it as newly created */
key_revoke_locked(key);
/*
* caller should hold one ref on contexts in freelist.
*/
-static void dispose_ctx_list_kr(struct hlist_head *freelist)
+static void dispose_ctx_list_kr(cfs_hlist_head_t *freelist)
{
- struct hlist_node *pos, *next;
+ cfs_hlist_node_t *pos, *next;
struct ptlrpc_cli_ctx *ctx;
struct gss_cli_ctx *gctx;
- hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) {
- hlist_del_init(&ctx->cc_cache);
+ cfs_hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) {
+ cfs_hlist_del_init(&ctx->cc_cache);
/* reverse ctx: update current seq to buddy svcctx if exist.
* ideally this should be done at gss_cli_ctx_finalize(), but
if (!rawobj_empty(&gctx->gc_svc_handle) &&
sec_is_reverse(gctx->gc_base.cc_sec)) {
gss_svc_upcall_update_sequence(&gctx->gc_svc_handle,
- (__u32) atomic_read(&gctx->gc_seq));
+ (__u32) cfs_atomic_read(&gctx->gc_seq));
}
/* we need to wakeup waiting reqs here. the context might
struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
struct ptlrpc_cli_ctx *ctx = NULL;
- spin_lock(&sec->ps_lock);
+ cfs_spin_lock(&sec->ps_lock);
ctx = gsec_kr->gsk_root_ctx;
if (ctx == NULL && unlikely(sec_is_reverse(sec))) {
- struct hlist_node *node;
+ cfs_hlist_node_t *node;
struct ptlrpc_cli_ctx *tmp;
/* reverse ctx, search root ctx in list, choose the one
* with shortest expire time, which is most possibly have
* an established peer ctx at client side. */
- hlist_for_each_entry(tmp, node, &gsec_kr->gsk_clist, cc_cache) {
+ cfs_hlist_for_each_entry(tmp, node, &gsec_kr->gsk_clist,
+ cc_cache) {
if (ctx == NULL || ctx->cc_expire == 0 ||
ctx->cc_expire > tmp->cc_expire) {
ctx = tmp;
}
if (ctx) {
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
- LASSERT(!hlist_empty(&gsec_kr->gsk_clist));
- atomic_inc(&ctx->cc_refcount);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(!cfs_hlist_empty(&gsec_kr->gsk_clist));
+ cfs_atomic_inc(&ctx->cc_refcount);
}
- spin_unlock(&sec->ps_lock);
+ cfs_spin_unlock(&sec->ps_lock);
return ctx;
}
struct key *key)
{
struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- struct hlist_node *hnode;
+ cfs_hlist_node_t *hnode;
struct ptlrpc_cli_ctx *ctx;
cfs_time_t now;
ENTRY;
LASSERT(sec_is_reverse(sec));
- spin_lock(&sec->ps_lock);
+ cfs_spin_lock(&sec->ps_lock);
now = cfs_time_current_sec();
/* set all existing ctxs short expiry */
- hlist_for_each_entry(ctx, hnode, &gsec_kr->gsk_clist, cc_cache) {
+ cfs_hlist_for_each_entry(ctx, hnode, &gsec_kr->gsk_clist, cc_cache) {
if (ctx->cc_expire > now + RVS_CTX_EXPIRE_NICE) {
ctx->cc_early_expire = 1;
ctx->cc_expire = now + RVS_CTX_EXPIRE_NICE;
if (key)
bind_key_ctx(key, new_ctx);
- spin_unlock(&sec->ps_lock);
+ cfs_spin_unlock(&sec->ps_lock);
}
static void construct_key_desc(void *buf, int bufsize,
CFS_INIT_HLIST_HEAD(&gsec_kr->gsk_clist);
gsec_kr->gsk_root_ctx = NULL;
- mutex_init(&gsec_kr->gsk_root_uc_lock);
+ cfs_mutex_init(&gsec_kr->gsk_root_uc_lock);
#ifdef HAVE_KEYRING_UPCALL_SERIALIZED
- mutex_init(&gsec_kr->gsk_uc_lock);
+ cfs_mutex_init(&gsec_kr->gsk_uc_lock);
#endif
if (gss_sec_create_common(&gsec_kr->gsk_base, &gss_policy_keyring,
CDEBUG(D_SEC, "destroy %s@%p\n", sec->ps_policy->sp_name, sec);
- LASSERT(hlist_empty(&gsec_kr->gsk_clist));
+ LASSERT(cfs_hlist_empty(&gsec_kr->gsk_clist));
LASSERT(gsec_kr->gsk_root_ctx == NULL);
gss_sec_destroy_common(gsec);
* the root upcall lock, make sure nobody else populated new root
* context after last check. */
if (is_root) {
- mutex_lock(&gsec_kr->gsk_root_uc_lock);
+ cfs_mutex_lock(&gsec_kr->gsk_root_uc_lock);
ctx = sec_lookup_root_ctx_kr(sec);
if (ctx)
if (likely(key->payload.data != NULL)) {
ctx = key->payload.data;
- LASSERT(atomic_read(&ctx->cc_refcount) >= 1);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) >= 1);
LASSERT(ctx2gctx_keyring(ctx)->gck_key == key);
LASSERT(atomic_read(&key->usage) >= 2);
/* simply take a ref and return. it's upper layer's
* responsibility to detect & replace dead ctx. */
- atomic_inc(&ctx->cc_refcount);
+ cfs_atomic_inc(&ctx->cc_refcount);
} else {
/* pre initialization with a cli_ctx. this can't be done in
* key_instantiate() because we'v no enough information
key_put(key);
out:
if (is_root)
- mutex_unlock(&gsec_kr->gsk_root_uc_lock);
+ cfs_mutex_unlock(&gsec_kr->gsk_root_uc_lock);
RETURN(ctx);
}
struct ptlrpc_cli_ctx *ctx,
int sync)
{
- LASSERT(atomic_read(&sec->ps_refcount) > 0);
- LASSERT(atomic_read(&ctx->cc_refcount) == 0);
+ LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
ctx_release_kr(ctx, sync);
}
int grace, int force)
{
struct gss_sec_keyring *gsec_kr;
- struct hlist_head freelist = CFS_HLIST_HEAD_INIT;
- struct hlist_node *pos, *next;
+ cfs_hlist_head_t freelist = CFS_HLIST_HEAD_INIT;
+ cfs_hlist_node_t *pos, *next;
struct ptlrpc_cli_ctx *ctx;
ENTRY;
gsec_kr = sec2gsec_keyring(sec);
- spin_lock(&sec->ps_lock);
- hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_kr->gsk_clist, cc_cache) {
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ cfs_spin_lock(&sec->ps_lock);
+ cfs_hlist_for_each_entry_safe(ctx, pos, next,
+ &gsec_kr->gsk_clist, cc_cache) {
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
continue;
/* at this moment there's at least 2 base reference:
* key association and in-list. */
- if (atomic_read(&ctx->cc_refcount) > 2) {
+ if (cfs_atomic_read(&ctx->cc_refcount) > 2) {
if (!force)
continue;
CWARN("flush busy ctx %p(%u->%s, extra ref %d)\n",
ctx, ctx->cc_vcred.vc_uid,
sec2target_str(ctx->cc_sec),
- atomic_read(&ctx->cc_refcount) - 2);
+ cfs_atomic_read(&ctx->cc_refcount) - 2);
}
- set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
+ cfs_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
if (!grace)
- clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+ cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
- atomic_inc(&ctx->cc_refcount);
+ cfs_atomic_inc(&ctx->cc_refcount);
if (ctx_unlist_kr(ctx, 1)) {
- hlist_add_head(&ctx->cc_cache, &freelist);
+ cfs_hlist_add_head(&ctx->cc_cache, &freelist);
} else {
- LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
- atomic_dec(&ctx->cc_refcount);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) >= 2);
+ cfs_atomic_dec(&ctx->cc_refcount);
}
}
- spin_unlock(&sec->ps_lock);
+ cfs_spin_unlock(&sec->ps_lock);
dispose_ctx_list_kr(&freelist);
EXIT;
ENTRY;
CDEBUG(D_SEC, "sec %p(%d, nctx %d), uid %d, grace %d, force %d\n",
- sec, atomic_read(&sec->ps_refcount), atomic_read(&sec->ps_nctx),
+ sec, cfs_atomic_read(&sec->ps_refcount),
+ cfs_atomic_read(&sec->ps_nctx),
uid, grace, force);
if (uid != -1 && uid != 0)
void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec)
{
struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- struct hlist_head freelist = CFS_HLIST_HEAD_INIT;
- struct hlist_node *pos, *next;
+ cfs_hlist_head_t freelist = CFS_HLIST_HEAD_INIT;
+ cfs_hlist_node_t *pos, *next;
struct ptlrpc_cli_ctx *ctx;
ENTRY;
CWARN("running gc\n");
- spin_lock(&sec->ps_lock);
- hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_kr->gsk_clist, cc_cache) {
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ cfs_spin_lock(&sec->ps_lock);
+ cfs_hlist_for_each_entry_safe(ctx, pos, next,
+ &gsec_kr->gsk_clist, cc_cache) {
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
- atomic_inc(&ctx->cc_refcount);
+ cfs_atomic_inc(&ctx->cc_refcount);
if (cli_ctx_check_death(ctx) && ctx_unlist_kr(ctx, 1)) {
- hlist_add_head(&ctx->cc_cache, &freelist);
+ cfs_hlist_add_head(&ctx->cc_cache, &freelist);
CWARN("unhashed ctx %p\n", ctx);
} else {
- LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
- atomic_dec(&ctx->cc_refcount);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) >= 2);
+ cfs_atomic_dec(&ctx->cc_refcount);
}
}
- spin_unlock(&sec->ps_lock);
+ cfs_spin_unlock(&sec->ps_lock);
dispose_ctx_list_kr(&freelist);
EXIT;
int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq)
{
struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- struct hlist_node *pos, *next;
+ cfs_hlist_node_t *pos, *next;
struct ptlrpc_cli_ctx *ctx;
struct gss_cli_ctx *gctx;
time_t now = cfs_time_current_sec();
ENTRY;
- spin_lock(&sec->ps_lock);
- hlist_for_each_entry_safe(ctx, pos, next,
+ cfs_spin_lock(&sec->ps_lock);
+ cfs_hlist_for_each_entry_safe(ctx, pos, next,
&gsec_kr->gsk_clist, cc_cache) {
struct key *key;
char flags_str[40];
"seq %d, win %u, key %08x(ref %d), "
"hdl "LPX64":"LPX64", mech: %s\n",
ctx, ctx->cc_vcred.vc_uid,
- atomic_read(&ctx->cc_refcount),
+ cfs_atomic_read(&ctx->cc_refcount),
ctx->cc_expire,
ctx->cc_expire ? ctx->cc_expire - now : 0,
flags_str,
- atomic_read(&gctx->gc_seq),
+ cfs_atomic_read(&gctx->gc_seq),
gctx->gc_win,
key ? key->serial : 0,
key ? atomic_read(&key->usage) : 0,
gss_handle_to_u64(&gctx->gc_svc_handle),
mech);
}
- spin_unlock(&sec->ps_lock);
+ cfs_spin_unlock(&sec->ps_lock);
RETURN(0);
}
static
int gss_cli_ctx_validate_kr(struct ptlrpc_cli_ctx *ctx)
{
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
LASSERT(ctx->cc_sec);
if (cli_ctx_check_death(ctx)) {
static
void gss_cli_ctx_die_kr(struct ptlrpc_cli_ctx *ctx, int grace)
{
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
LASSERT(ctx->cc_sec);
cli_ctx_expire(ctx);
*/
LASSERT(cfs_current()->signal->session_keyring);
- lockdep_off();
+ cfs_lockdep_off();
rc = key_link(cfs_current()->signal->session_keyring, key);
- lockdep_on();
+ cfs_lockdep_on();
if (unlikely(rc)) {
CERROR("failed to link key %08x to keyring %08x: %d\n",
key->serial,
RETURN(rc);
}
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
LASSERT(ctx->cc_sec);
ctx_clear_timer_kr(ctx);
cli_ctx_expire(ctx);
if (rc != -ERESTART)
- set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
+ cfs_set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
}
/* let user space think it's a success */
#include "gss_asn1.h"
#include "gss_krb5.h"
-static spinlock_t krb5_seq_lock;
+static cfs_spinlock_t krb5_seq_lock;
struct krb5_enctype {
char *ke_dispname;
}
khdr->kh_filler = 0xff;
- spin_lock(&krb5_seq_lock);
+ cfs_spin_lock(&krb5_seq_lock);
khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
- spin_unlock(&krb5_seq_lock);
+ cfs_spin_unlock(&krb5_seq_lock);
}
static __u32 verify_krb5_header(struct krb5_ctx *kctx,
fill_krb5_header(kctx, khdr, 1);
/* generate confounder */
- get_random_bytes(conf, ke->ke_conf_size);
+ ll_get_random_bytes(conf, ke->ke_conf_size);
/* get encryption blocksize. note kc_keye might not associated with
* a tfm, currently only for arcfour-hmac */
fill_krb5_header(kctx, khdr, 1);
/* generate confounder */
- get_random_bytes(conf, ke->ke_conf_size);
+ ll_get_random_bytes(conf, ke->ke_conf_size);
/* get encryption blocksize. note kc_keye might not associated with
* a tfm, currently only for arcfour-hmac */
{
int status;
- spin_lock_init(&krb5_seq_lock);
+ cfs_spin_lock_init(&krb5_seq_lock);
status = lgss_mech_register(&gss_kerberos_mech);
if (status)
#include "gss_api.h"
static CFS_LIST_HEAD(registered_mechs);
-static spinlock_t registered_mechs_lock = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t registered_mechs_lock = CFS_SPIN_LOCK_UNLOCKED;
int lgss_mech_register(struct gss_api_mech *gm)
{
- spin_lock(®istered_mechs_lock);
- list_add(&gm->gm_list, ®istered_mechs);
- spin_unlock(®istered_mechs_lock);
+ cfs_spin_lock(®istered_mechs_lock);
+ cfs_list_add(&gm->gm_list, ®istered_mechs);
+ cfs_spin_unlock(®istered_mechs_lock);
CWARN("Register %s mechanism\n", gm->gm_name);
return 0;
}
void lgss_mech_unregister(struct gss_api_mech *gm)
{
- spin_lock(®istered_mechs_lock);
- list_del(&gm->gm_list);
- spin_unlock(®istered_mechs_lock);
+ cfs_spin_lock(®istered_mechs_lock);
+ cfs_list_del(&gm->gm_list);
+ cfs_spin_unlock(®istered_mechs_lock);
CWARN("Unregister %s mechanism\n", gm->gm_name);
}
struct gss_api_mech *lgss_mech_get(struct gss_api_mech *gm)
{
- __module_get(gm->gm_owner);
+ __cfs_module_get(gm->gm_owner);
return gm;
}
{
struct gss_api_mech *pos, *gm = NULL;
- spin_lock(®istered_mechs_lock);
- list_for_each_entry(pos, ®istered_mechs, gm_list) {
+ cfs_spin_lock(®istered_mechs_lock);
+ cfs_list_for_each_entry(pos, ®istered_mechs, gm_list) {
if (0 == strcmp(name, pos->gm_name)) {
- if (!try_module_get(pos->gm_owner))
+ if (!cfs_try_module_get(pos->gm_owner))
continue;
gm = pos;
break;
}
}
- spin_unlock(®istered_mechs_lock);
+ cfs_spin_unlock(®istered_mechs_lock);
return gm;
}
{
struct gss_api_mech *pos, *gm = NULL;
- spin_lock(®istered_mechs_lock);
- list_for_each_entry(pos, ®istered_mechs, gm_list) {
- if (!try_module_get(pos->gm_owner))
+ cfs_spin_lock(®istered_mechs_lock);
+ cfs_list_for_each_entry(pos, ®istered_mechs, gm_list) {
+ if (!cfs_try_module_get(pos->gm_owner))
continue;
if (!mech_supports_subflavor(pos, subflavor)) {
- module_put(pos->gm_owner);
+ cfs_module_put(pos->gm_owner);
continue;
}
gm = pos;
break;
}
- spin_unlock(®istered_mechs_lock);
+ cfs_spin_unlock(®istered_mechs_lock);
return gm;
}
void lgss_mech_put(struct gss_api_mech *gm)
{
- module_put(gm->gm_owner);
+ cfs_module_put(gm->gm_owner);
}
/* The mech could probably be determined from the token instead, but it's just
OBD_FREE_PTR(gctx);
- atomic_dec(&sec->ps_nctx);
+ cfs_atomic_dec(&sec->ps_nctx);
sptlrpc_sec_put(sec);
}
static
-void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *hash)
+void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *hash)
{
- set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
- atomic_inc(&ctx->cc_refcount);
- hlist_add_head(&ctx->cc_cache, hash);
+ cfs_set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
+ cfs_atomic_inc(&ctx->cc_refcount);
+ cfs_hlist_add_head(&ctx->cc_cache, hash);
}
/*
* caller must hold spinlock
*/
static
-void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist)
+void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *freelist)
{
LASSERT_SPIN_LOCKED(&ctx->cc_sec->ps_lock);
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
- LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
- LASSERT(!hlist_unhashed(&ctx->cc_cache));
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
+ LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
- clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
+ cfs_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
- if (atomic_dec_and_test(&ctx->cc_refcount)) {
- __hlist_del(&ctx->cc_cache);
- hlist_add_head(&ctx->cc_cache, freelist);
+ if (cfs_atomic_dec_and_test(&ctx->cc_refcount)) {
+ __cfs_hlist_del(&ctx->cc_cache);
+ cfs_hlist_add_head(&ctx->cc_cache, freelist);
} else {
- hlist_del_init(&ctx->cc_cache);
+ cfs_hlist_del_init(&ctx->cc_cache);
}
}
* return 1 if the context is dead.
*/
static
-int ctx_check_death_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist)
+int ctx_check_death_pf(struct ptlrpc_cli_ctx *ctx,
+ cfs_hlist_head_t *freelist)
{
if (cli_ctx_check_death(ctx)) {
if (freelist)
static inline
int ctx_check_death_locked_pf(struct ptlrpc_cli_ctx *ctx,
- struct hlist_head *freelist)
+ cfs_hlist_head_t *freelist)
{
LASSERT(ctx->cc_sec);
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
- LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
return ctx_check_death_pf(ctx, freelist);
}
}
static
-void ctx_list_destroy_pf(struct hlist_head *head)
+void ctx_list_destroy_pf(cfs_hlist_head_t *head)
{
struct ptlrpc_cli_ctx *ctx;
- while (!hlist_empty(head)) {
- ctx = hlist_entry(head->first, struct ptlrpc_cli_ctx, cc_cache);
+ while (!cfs_hlist_empty(head)) {
+ ctx = cfs_hlist_entry(head->first, struct ptlrpc_cli_ctx,
+ cc_cache);
- LASSERT(atomic_read(&ctx->cc_refcount) == 0);
- LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
+ LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT,
+ &ctx->cc_flags) == 0);
- hlist_del_init(&ctx->cc_cache);
+ cfs_hlist_del_init(&ctx->cc_cache);
ctx_destroy_pf(ctx->cc_sec, ctx);
}
}
void gss_cli_ctx_die_pf(struct ptlrpc_cli_ctx *ctx, int grace)
{
LASSERT(ctx->cc_sec);
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
cli_ctx_expire(ctx);
- spin_lock(&ctx->cc_sec->ps_lock);
+ cfs_spin_lock(&ctx->cc_sec->ps_lock);
- if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
- LASSERT(!hlist_unhashed(&ctx->cc_cache));
- LASSERT(atomic_read(&ctx->cc_refcount) > 1);
+ if (cfs_test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
+ LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 1);
- hlist_del_init(&ctx->cc_cache);
- if (atomic_dec_and_test(&ctx->cc_refcount))
+ cfs_hlist_del_init(&ctx->cc_cache);
+ if (cfs_atomic_dec_and_test(&ctx->cc_refcount))
LBUG();
}
- spin_unlock(&ctx->cc_sec->ps_lock);
+ cfs_spin_unlock(&ctx->cc_sec->ps_lock);
}
/****************************************
{
struct gss_sec_pipefs *gsec_pf;
struct ptlrpc_cli_ctx *ctx;
- struct hlist_node *pos, *next;
+ cfs_hlist_node_t *pos, *next;
CFS_HLIST_HEAD(freelist);
unsigned int hash;
ENTRY;
(__u64) new->cc_vcred.vc_uid);
LASSERT(hash < gsec_pf->gsp_chash_size);
- spin_lock(&gsec->gs_base.ps_lock);
+ cfs_spin_lock(&gsec->gs_base.ps_lock);
- hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_pf->gsp_chash[hash], cc_cache) {
+ cfs_hlist_for_each_entry_safe(ctx, pos, next,
+ &gsec_pf->gsp_chash[hash], cc_cache) {
if (!ctx_match_pf(ctx, &new->cc_vcred))
continue;
ctx_enhash_pf(new, &gsec_pf->gsp_chash[hash]);
- spin_unlock(&gsec->gs_base.ps_lock);
+ cfs_spin_unlock(&gsec->gs_base.ps_lock);
ctx_list_destroy_pf(&freelist);
EXIT;
static
void gss_ctx_cache_gc_pf(struct gss_sec_pipefs *gsec_pf,
- struct hlist_head *freelist)
+ cfs_hlist_head_t *freelist)
{
struct ptlrpc_sec *sec;
struct ptlrpc_cli_ctx *ctx;
- struct hlist_node *pos, *next;
+ cfs_hlist_node_t *pos, *next;
int i;
ENTRY;
CDEBUG(D_SEC, "do gc on sec %s@%p\n", sec->ps_policy->sp_name, sec);
for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
- hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_pf->gsp_chash[i], cc_cache)
+ cfs_hlist_for_each_entry_safe(ctx, pos, next,
+ &gsec_pf->gsp_chash[i], cc_cache)
ctx_check_death_locked_pf(ctx, freelist);
}
hash_size = GSS_SEC_PIPEFS_CTX_HASH_SIZE;
alloc_size = sizeof(*gsec_pf) +
- sizeof(struct hlist_head) * hash_size;
+ sizeof(cfs_hlist_head_t) * hash_size;
OBD_ALLOC(gsec_pf, alloc_size);
if (!gsec_pf)
gss_sec_destroy_common(gsec);
OBD_FREE(gsec, sizeof(*gsec_pf) +
- sizeof(struct hlist_head) * gsec_pf->gsp_chash_size);
+ sizeof(cfs_hlist_head_t) * gsec_pf->gsp_chash_size);
}
static
struct gss_sec *gsec;
struct gss_sec_pipefs *gsec_pf;
struct ptlrpc_cli_ctx *ctx = NULL, *new = NULL;
- struct hlist_head *hash_head;
- struct hlist_node *pos, *next;
+ cfs_hlist_head_t *hash_head;
+ cfs_hlist_node_t *pos, *next;
CFS_HLIST_HEAD(freelist);
unsigned int hash, gc = 0, found = 0;
ENTRY;
- might_sleep();
+ cfs_might_sleep();
gsec = container_of(sec, struct gss_sec, gs_base);
gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
LASSERT(hash < gsec_pf->gsp_chash_size);
retry:
- spin_lock(&sec->ps_lock);
+ cfs_spin_lock(&sec->ps_lock);
/* gc_next == 0 means never do gc */
if (remove_dead && sec->ps_gc_next &&
gc = 1;
}
- hlist_for_each_entry_safe(ctx, pos, next, hash_head, cc_cache) {
+ cfs_hlist_for_each_entry_safe(ctx, pos, next, hash_head, cc_cache) {
if (gc == 0 &&
ctx_check_death_locked_pf(ctx,
remove_dead ? &freelist : NULL))
if (found) {
if (new && new != ctx) {
/* lost the race, just free it */
- hlist_add_head(&new->cc_cache, &freelist);
+ cfs_hlist_add_head(&new->cc_cache, &freelist);
new = NULL;
}
/* hot node, move to head */
if (hash_head->first != &ctx->cc_cache) {
- __hlist_del(&ctx->cc_cache);
- hlist_add_head(&ctx->cc_cache, hash_head);
+ __cfs_hlist_del(&ctx->cc_cache);
+ cfs_hlist_add_head(&ctx->cc_cache, hash_head);
}
} else {
/* don't allocate for reverse sec */
if (sec_is_reverse(sec)) {
- spin_unlock(&sec->ps_lock);
+ cfs_spin_unlock(&sec->ps_lock);
RETURN(NULL);
}
ctx_enhash_pf(new, hash_head);
ctx = new;
} else if (create) {
- spin_unlock(&sec->ps_lock);
+ cfs_spin_unlock(&sec->ps_lock);
new = ctx_create_pf(sec, vcred);
if (new) {
- clear_bit(PTLRPC_CTX_NEW_BIT, &new->cc_flags);
+ cfs_clear_bit(PTLRPC_CTX_NEW_BIT,
+ &new->cc_flags);
goto retry;
}
} else
/* hold a ref */
if (ctx)
- atomic_inc(&ctx->cc_refcount);
+ cfs_atomic_inc(&ctx->cc_refcount);
- spin_unlock(&sec->ps_lock);
+ cfs_spin_unlock(&sec->ps_lock);
/* the allocator of the context must give the first push to refresh */
if (new) {
struct ptlrpc_cli_ctx *ctx,
int sync)
{
- LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
- LASSERT(hlist_unhashed(&ctx->cc_cache));
+ LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+ LASSERT(cfs_hlist_unhashed(&ctx->cc_cache));
/* if required async, we must clear the UPTODATE bit to prevent extra
* rpcs during destroy procedure. */
if (!sync)
- clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+ cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
/* destroy this context */
ctx_destroy_pf(sec, ctx);
struct gss_sec *gsec;
struct gss_sec_pipefs *gsec_pf;
struct ptlrpc_cli_ctx *ctx;
- struct hlist_node *pos, *next;
+ cfs_hlist_node_t *pos, *next;
CFS_HLIST_HEAD(freelist);
int i, busy = 0;
ENTRY;
gsec = container_of(sec, struct gss_sec, gs_base);
gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
- spin_lock(&sec->ps_lock);
+ cfs_spin_lock(&sec->ps_lock);
for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
- hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_pf->gsp_chash[i], cc_cache) {
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ cfs_hlist_for_each_entry_safe(ctx, pos, next,
+ &gsec_pf->gsp_chash[i],
+ cc_cache) {
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
continue;
- if (atomic_read(&ctx->cc_refcount) > 1) {
+ if (cfs_atomic_read(&ctx->cc_refcount) > 1) {
busy++;
if (!force)
continue;
CWARN("flush busy(%d) ctx %p(%u->%s) by force, "
"grace %d\n",
- atomic_read(&ctx->cc_refcount),
+ cfs_atomic_read(&ctx->cc_refcount),
ctx, ctx->cc_vcred.vc_uid,
sec2target_str(ctx->cc_sec), grace);
}
ctx_unhash_pf(ctx, &freelist);
- set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
+ cfs_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
if (!grace)
- clear_bit(PTLRPC_CTX_UPTODATE_BIT,
- &ctx->cc_flags);
+ cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT,
+ &ctx->cc_flags);
}
}
- spin_unlock(&sec->ps_lock);
+ cfs_spin_unlock(&sec->ps_lock);
ctx_list_destroy_pf(&freelist);
RETURN(busy);
struct gss_upcall_msg {
struct rpc_pipe_msg gum_base;
- atomic_t gum_refcount;
- struct list_head gum_list;
+ cfs_atomic_t gum_refcount;
+ cfs_list_t gum_list;
__u32 gum_mechidx;
struct gss_sec *gum_gsec;
struct gss_cli_ctx *gum_gctx;
struct gss_upcall_msg_data gum_data;
};
-static atomic_t upcall_seq = ATOMIC_INIT(0);
+static cfs_atomic_t upcall_seq = CFS_ATOMIC_INIT(0);
static inline
__u32 upcall_get_sequence(void)
{
- return (__u32) atomic_inc_return(&upcall_seq);
+ return (__u32) cfs_atomic_inc_return(&upcall_seq);
}
enum mech_idx_t {
/* pipefs dentries for each mechanisms */
static struct dentry *de_pipes[MECH_MAX] = { NULL, };
/* all upcall messgaes linked here */
-static struct list_head upcall_lists[MECH_MAX];
+static cfs_list_t upcall_lists[MECH_MAX];
/* and protected by this */
-static spinlock_t upcall_locks[MECH_MAX];
+static cfs_spinlock_t upcall_locks[MECH_MAX];
static inline
void upcall_list_lock(int idx)
{
- spin_lock(&upcall_locks[idx]);
+ cfs_spin_lock(&upcall_locks[idx]);
}
static inline
void upcall_list_unlock(int idx)
{
- spin_unlock(&upcall_locks[idx]);
+ cfs_spin_unlock(&upcall_locks[idx]);
}
static
__u32 idx = msg->gum_mechidx;
upcall_list_lock(idx);
- list_add(&msg->gum_list, &upcall_lists[idx]);
+ cfs_list_add(&msg->gum_list, &upcall_lists[idx]);
upcall_list_unlock(idx);
}
__u32 idx = msg->gum_mechidx;
upcall_list_lock(idx);
- list_del_init(&msg->gum_list);
+ cfs_list_del_init(&msg->gum_list);
upcall_list_unlock(idx);
}
void gss_release_msg(struct gss_upcall_msg *gmsg)
{
ENTRY;
- LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
+ LASSERT(cfs_atomic_read(&gmsg->gum_refcount) > 0);
- if (!atomic_dec_and_test(&gmsg->gum_refcount)) {
+ if (!cfs_atomic_dec_and_test(&gmsg->gum_refcount)) {
EXIT;
return;
}
gmsg->gum_gctx = NULL;
}
- LASSERT(list_empty(&gmsg->gum_list));
- LASSERT(list_empty(&gmsg->gum_base.list));
+ LASSERT(cfs_list_empty(&gmsg->gum_list));
+ LASSERT(cfs_list_empty(&gmsg->gum_base.list));
OBD_FREE_PTR(gmsg);
EXIT;
}
LASSERT(idx < MECH_MAX);
LASSERT_SPIN_LOCKED(&upcall_locks[idx]);
- if (list_empty(&gmsg->gum_list))
+ if (cfs_list_empty(&gmsg->gum_list))
return;
- list_del_init(&gmsg->gum_list);
- LASSERT(atomic_read(&gmsg->gum_refcount) > 1);
- atomic_dec(&gmsg->gum_refcount);
+ cfs_list_del_init(&gmsg->gum_list);
+ LASSERT(cfs_atomic_read(&gmsg->gum_refcount) > 1);
+ cfs_atomic_dec(&gmsg->gum_refcount);
}
static
if (gmsg->gum_gctx) {
struct ptlrpc_cli_ctx *ctx = &gmsg->gum_gctx->gc_base;
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
sptlrpc_cli_ctx_expire(ctx);
- set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
+ cfs_set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
}
}
struct gss_upcall_msg *gmsg;
upcall_list_lock(mechidx);
- list_for_each_entry(gmsg, &upcall_lists[mechidx], gum_list) {
+ cfs_list_for_each_entry(gmsg, &upcall_lists[mechidx], gum_list) {
if (gmsg->gum_data.gum_seq != seq)
continue;
- LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
+ LASSERT(cfs_atomic_read(&gmsg->gum_refcount) > 0);
LASSERT(gmsg->gum_mechidx == mechidx);
- atomic_inc(&gmsg->gum_refcount);
+ cfs_atomic_inc(&gmsg->gum_refcount);
upcall_list_unlock(mechidx);
return gmsg;
}
if (mlen > buflen)
mlen = buflen;
- left = copy_to_user(dst, data, mlen);
+ left = cfs_copy_to_user(dst, data, mlen);
if (left < 0) {
msg->errno = left;
RETURN(left);
if (!buf)
RETURN(-ENOMEM);
- if (copy_from_user(buf, src, mlen)) {
+ if (cfs_copy_from_user(buf, src, mlen)) {
CERROR("failed copy user space data\n");
GOTO(out_free, rc = -EFAULT);
}
gss_unhash_msg(gss_msg);
gctx = gss_msg->gum_gctx;
LASSERT(gctx);
- LASSERT(atomic_read(&gctx->gc_base.cc_refcount) > 0);
+ LASSERT(cfs_atomic_read(&gctx->gc_base.cc_refcount) > 0);
/* timeout is not in use for now */
if (simple_get_bytes(&data, &datalen, &timeout, sizeof(timeout)))
ctx = &gctx->gc_base;
sptlrpc_cli_ctx_expire(ctx);
if (rc != -ERESTART || gss_err != GSS_S_COMPLETE)
- set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
+ cfs_set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
CERROR("refresh ctx %p(uid %d) failed: %d/0x%08x: %s\n",
ctx, ctx->cc_vcred.vc_uid, rc, gss_err,
- test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags) ?
+ cfs_test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags) ?
"fatal error" : "non-fatal");
}
static cfs_time_t ratelimit = 0;
ENTRY;
- LASSERT(list_empty(&msg->list));
+ LASSERT(cfs_list_empty(&msg->list));
/* normally errno is >= 0 */
if (msg->errno >= 0) {
gmsg = container_of(msg, struct gss_upcall_msg, gum_base);
gumd = &gmsg->gum_data;
- LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
+ LASSERT(cfs_atomic_read(&gmsg->gum_refcount) > 0);
CERROR("failed msg %p (seq %u, uid %u, svc %u, nid "LPX64", obd %.*s): "
"errno %d\n", msg, gumd->gum_seq, gumd->gum_uid, gumd->gum_svc,
gumd->gum_nid, (int) sizeof(gumd->gum_obd),
gumd->gum_obd, msg->errno);
- atomic_inc(&gmsg->gum_refcount);
+ cfs_atomic_inc(&gmsg->gum_refcount);
gss_unhash_msg(gmsg);
if (msg->errno == -ETIMEDOUT || msg->errno == -EPIPE) {
cfs_time_t now = cfs_time_current_sec();
LASSERT(idx < MECH_MAX);
upcall_list_lock(idx);
- while (!list_empty(&upcall_lists[idx])) {
+ while (!cfs_list_empty(&upcall_lists[idx])) {
struct gss_upcall_msg *gmsg;
struct gss_upcall_msg_data *gumd;
- gmsg = list_entry(upcall_lists[idx].next,
- struct gss_upcall_msg, gum_list);
+ gmsg = cfs_list_entry(upcall_lists[idx].next,
+ struct gss_upcall_msg, gum_list);
gumd = &gmsg->gum_data;
- LASSERT(list_empty(&gmsg->gum_base.list));
+ LASSERT(cfs_list_empty(&gmsg->gum_base.list));
CERROR("failing remaining msg %p:seq %u, uid %u, svc %u, "
"nid "LPX64", obd %.*s\n", gmsg,
gumd->gum_obd);
gmsg->gum_base.errno = -EPIPE;
- atomic_inc(&gmsg->gum_refcount);
+ cfs_atomic_inc(&gmsg->gum_refcount);
gss_unhash_msg_nolock(gmsg);
gss_msg_fail_ctx(gmsg);
int rc = 0;
ENTRY;
- might_sleep();
+ cfs_might_sleep();
LASSERT(ctx->cc_sec);
LASSERT(ctx->cc_sec->ps_import);
gmsg->gum_base.errno = 0;
/* init upcall msg */
- atomic_set(&gmsg->gum_refcount, 1);
+ cfs_atomic_set(&gmsg->gum_refcount, 1);
gmsg->gum_mechidx = mech_name2idx(gsec->gs_mech->gm_name);
gmsg->gum_gsec = gsec;
gmsg->gum_gctx = container_of(sptlrpc_cli_ctx_get(ctx),
de_pipes[MECH_KRB5] = de;
CFS_INIT_LIST_HEAD(&upcall_lists[MECH_KRB5]);
- spin_lock_init(&upcall_locks[MECH_KRB5]);
+ cfs_spin_lock_init(&upcall_locks[MECH_KRB5]);
return 0;
}
__u32 i;
for (i = 0; i < MECH_MAX; i++) {
- LASSERT(list_empty(&upcall_lists[i]));
+ LASSERT(cfs_list_empty(&upcall_lists[i]));
/* dput pipe dentry here might cause lgssd oops. */
de_pipes[i] = NULL;
LASSERT(buf);
LASSERT(buflen);
- len = size_round4(obj->len);
+ len = cfs_size_round4(obj->len);
if (*buflen < 4 + len) {
CERROR("buflen %u < %u\n", *buflen, 4 + len);
return 0;
}
- len = local ? obj->len : size_round4(obj->len);
+ len = local ? obj->len : cfs_size_round4(obj->len);
if (*buflen < len) {
CERROR("buflen %u < %u\n", *buflen, len);
obj->len = 0;
#define GSS_SVC_UPCALL_TIMEOUT (20)
-static spinlock_t __ctx_index_lock;
+static cfs_spinlock_t __ctx_index_lock;
static __u64 __ctx_index;
__u64 gss_get_next_ctx_index(void)
{
__u64 idx;
- spin_lock(&__ctx_index_lock);
+ cfs_spin_lock(&__ctx_index_lock);
idx = __ctx_index++;
- spin_unlock(&__ctx_index_lock);
+ cfs_spin_unlock(&__ctx_index_lock);
return idx;
}
len++;
if ((len & (BITS_PER_LONG/8-1)) == 0)
- hash = hash_long(hash^l, BITS_PER_LONG);
+ hash = cfs_hash_long(hash^l, BITS_PER_LONG);
} while (len);
return hash >> (BITS_PER_LONG - bits);
struct cache_head h;
__u32 lustre_svc;
__u64 nid;
- wait_queue_head_t waitq;
+ cfs_waitq_t waitq;
rawobj_t in_handle, in_token;
rawobj_t out_handle, out_token;
int major_status, minor_status;
new->lustre_svc = item->lustre_svc;
new->nid = item->nid;
- init_waitqueue_head(&new->waitq);
+ cfs_waitq_init(&new->waitq);
}
static inline void __rsi_update(struct rsi *new, struct rsi *item)
out:
rsi_free(&rsii);
if (rsip) {
- wake_up_all(&rsip->waitq);
+ cfs_waitq_broadcast(&rsip->waitq);
cache_put(&rsip->h, &rsi_cache);
} else {
status = -ENOMEM;
{
struct rsi *rsi = container_of(item, struct rsi, h);
- LASSERT(atomic_read(&item->refcnt) > 0);
+ LASSERT(cfs_atomic_read(&item->refcnt) > 0);
if (cache_put(item, cd)) {
LASSERT(item->next == NULL);
out:
rsi_free(&rsii);
if (rsip) {
- wake_up_all(&rsip->waitq);
+ cfs_waitq_broadcast(&rsip->waitq);
rsi_put(&rsip->h, &rsi_cache);
}
tmp->ctx.gsc_mechctx = NULL;
memset(&new->ctx.gsc_seqdata, 0, sizeof(new->ctx.gsc_seqdata));
- spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
+ cfs_spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
}
#ifdef HAVE_SUNRPC_CACHE_V2
goto out;
if (rv == -ENOENT) {
CERROR("NOENT? set rsc entry negative\n");
- set_bit(CACHE_NEGATIVE, &rsci.h.flags);
+ cfs_set_bit(CACHE_NEGATIVE, &rsci.h.flags);
} else {
rawobj_t tmp_buf;
unsigned long ctx_expiry;
{
struct rsc *rsci = container_of(item, struct rsc, h);
- LASSERT(atomic_read(&item->refcnt) > 0);
+ LASSERT(cfs_atomic_read(&item->refcnt) > 0);
if (cache_put(item, cd)) {
LASSERT(item->next == NULL);
goto out;
if (rv == -ENOENT) {
CERROR("NOENT? set rsc entry negative\n");
- set_bit(CACHE_NEGATIVE, &rsci.h.flags);
+ cfs_set_bit(CACHE_NEGATIVE, &rsci.h.flags);
} else {
struct gss_api_mech *gm;
rawobj_t tmp_buf;
int n;
ENTRY;
- write_lock(&rsc_cache.hash_lock);
+ cfs_write_lock(&rsc_cache.hash_lock);
for (n = 0; n < RSC_HASHMAX; n++) {
for (ch = &rsc_cache.hash_table[n]; *ch;) {
rscp = container_of(*ch, struct rsc, h);
*ch = (*ch)->next;
rscp->h.next = NULL;
cache_get(&rscp->h);
- set_bit(CACHE_NEGATIVE, &rscp->h.flags);
+ cfs_set_bit(CACHE_NEGATIVE, &rscp->h.flags);
COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
rsc_cache.entries--;
}
}
- write_unlock(&rsc_cache.hash_lock);
+ cfs_write_unlock(&rsc_cache.hash_lock);
EXIT;
}
struct ptlrpc_reply_state *rs;
struct rsc *rsci = NULL;
struct rsi *rsip = NULL, rsikey;
- wait_queue_t wait;
+ cfs_waitlink_t wait;
int replen = sizeof(struct ptlrpc_body);
struct gss_rep_header *rephdr;
int first_check = 1;
}
cache_get(&rsip->h); /* take an extra ref */
- init_waitqueue_head(&rsip->waitq);
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&rsip->waitq, &wait);
+ cfs_waitq_init(&rsip->waitq);
+ cfs_waitlink_init(&wait);
+ cfs_waitq_add(&rsip->waitq, &wait);
cache_check:
/* Note each time cache_check() will drop a reference if return
first_check = 0;
read_lock(&rsi_cache.hash_lock);
- valid = test_bit(CACHE_VALID, &rsip->h.flags);
+ valid = cfs_test_bit(CACHE_VALID, &rsip->h.flags);
if (valid == 0)
- set_current_state(TASK_INTERRUPTIBLE);
+ cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
read_unlock(&rsi_cache.hash_lock);
if (valid == 0)
- schedule_timeout(GSS_SVC_UPCALL_TIMEOUT * HZ);
+ cfs_schedule_timeout(GSS_SVC_UPCALL_TIMEOUT *
+ CFS_HZ);
cache_get(&rsip->h);
goto cache_check;
break;
}
- remove_wait_queue(&rsip->waitq, &wait);
+ cfs_waitq_del(&rsip->waitq, &wait);
cache_put(&rsip->h, &rsi_cache);
if (rc)
}
grctx->src_init = 1;
- grctx->src_reserve_len = size_round4(rsip->out_token.len);
+ grctx->src_reserve_len = cfs_size_round4(rsip->out_token.len);
rc = lustre_pack_reply_v2(req, 1, &replen, NULL, 0);
if (rc) {
if (rsci) {
/* if anything went wrong, we don't keep the context too */
if (rc != SECSVC_OK)
- set_bit(CACHE_NEGATIVE, &rsci->h.flags);
+ cfs_set_bit(CACHE_NEGATIVE, &rsci->h.flags);
else
CDEBUG(D_SEC, "create rsc with idx "LPX64"\n",
gss_handle_to_u64(&rsci->handle));
struct rsc *rsc = container_of(ctx, struct rsc, ctx);
/* can't be found */
- set_bit(CACHE_NEGATIVE, &rsc->h.flags);
+ cfs_set_bit(CACHE_NEGATIVE, &rsc->h.flags);
/* to be removed at next scan */
rsc->h.expiry_time = 1;
}
{
int i;
- spin_lock_init(&__ctx_index_lock);
+ cfs_spin_lock_init(&__ctx_index_lock);
/*
* this helps reducing context index confliction. after server reboot,
* conflicting request from clients might be filtered out by initial
* sequence number checking, thus no chance to sent error notification
* back to clients.
*/
- get_random_bytes(&__ctx_index, sizeof(__ctx_index));
+ ll_get_random_bytes(&__ctx_index, sizeof(__ctx_index));
cache_register(&rsi_cache);
for (i = 0; i < 6; i++) {
if (atomic_read(&rsi_cache.readers) > 0)
break;
- set_current_state(TASK_UNINTERRUPTIBLE);
- LASSERT(HZ >= 4);
- schedule_timeout(HZ / 4);
+ cfs_set_current_state(TASK_UNINTERRUPTIBLE);
+ LASSERT(CFS_HZ >= 4);
+ cfs_schedule_timeout(CFS_HZ / 4);
}
if (atomic_read(&rsi_cache.readers) == 0)
* statistic of "out-of-sequence-window"
*/
static struct {
- spinlock_t oos_lock;
- atomic_t oos_cli_count; /* client occurrence */
+ cfs_spinlock_t oos_lock;
+ cfs_atomic_t oos_cli_count; /* client occurrence */
int oos_cli_behind; /* client max seqs behind */
- atomic_t oos_svc_replay[3]; /* server replay detected */
- atomic_t oos_svc_pass[3]; /* server verified ok */
+ cfs_atomic_t oos_svc_replay[3]; /* server replay detected */
+ cfs_atomic_t oos_svc_pass[3]; /* server verified ok */
} gss_stat_oos = {
- .oos_cli_count = ATOMIC_INIT(0),
+ .oos_cli_count = CFS_ATOMIC_INIT(0),
.oos_cli_behind = 0,
- .oos_svc_replay = { ATOMIC_INIT(0), },
- .oos_svc_pass = { ATOMIC_INIT(0), },
+ .oos_svc_replay = { CFS_ATOMIC_INIT(0), },
+ .oos_svc_pass = { CFS_ATOMIC_INIT(0), },
};
void gss_stat_oos_record_cli(int behind)
{
- atomic_inc(&gss_stat_oos.oos_cli_count);
+ cfs_atomic_inc(&gss_stat_oos.oos_cli_count);
- spin_lock(&gss_stat_oos.oos_lock);
+ cfs_spin_lock(&gss_stat_oos.oos_lock);
if (behind > gss_stat_oos.oos_cli_behind)
gss_stat_oos.oos_cli_behind = behind;
- spin_unlock(&gss_stat_oos.oos_lock);
+ cfs_spin_unlock(&gss_stat_oos.oos_lock);
}
void gss_stat_oos_record_svc(int phase, int replay)
LASSERT(phase >= 0 && phase <= 2);
if (replay)
- atomic_inc(&gss_stat_oos.oos_svc_replay[phase]);
+ cfs_atomic_inc(&gss_stat_oos.oos_svc_replay[phase]);
else
- atomic_inc(&gss_stat_oos.oos_svc_pass[phase]);
+ cfs_atomic_inc(&gss_stat_oos.oos_svc_pass[phase]);
}
static int gss_proc_read_oos(char *page, char **start, off_t off, int count,
" phase 2: %d\n",
GSS_SEQ_WIN_MAIN,
GSS_SEQ_WIN_BACK,
- atomic_read(&gss_stat_oos.oos_cli_count),
+ cfs_atomic_read(&gss_stat_oos.oos_cli_count),
gss_stat_oos.oos_cli_behind,
- atomic_read(&gss_stat_oos.oos_svc_replay[0]),
- atomic_read(&gss_stat_oos.oos_svc_replay[1]),
- atomic_read(&gss_stat_oos.oos_svc_replay[2]),
- atomic_read(&gss_stat_oos.oos_svc_pass[2]));
+ cfs_atomic_read(&gss_stat_oos.oos_svc_replay[0]),
+ cfs_atomic_read(&gss_stat_oos.oos_svc_replay[1]),
+ cfs_atomic_read(&gss_stat_oos.oos_svc_replay[2]),
+ cfs_atomic_read(&gss_stat_oos.oos_svc_pass[2]));
return written;
}
{
int rc;
- spin_lock_init(&gss_stat_oos.oos_lock);
+ cfs_spin_lock_init(&gss_stat_oos.oos_lock);
gss_proc_root = lprocfs_register("gss", sptlrpc_proc_root,
gss_lprocfs_vars, NULL);
int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
{
- LASSERT(atomic_read(&ctx->cc_refcount));
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount));
- if (!test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
+ if (!cfs_test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
if (!ctx->cc_early_expire)
- clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+ cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
CWARN("ctx %p(%u->%s) get expired: %lu(%+lds)\n",
ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
* someone else, in which case nobody will make further use
* of it. we don't care, and mark it UPTODATE will help
* destroying server side context when it be destroied. */
- set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+ cfs_set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
if (sec_is_reverse(ctx->cc_sec)) {
CWARN("server installed reverse ctx %p idx "LPX64", "
*/
switch (phase) {
case 0:
- if (test_bit(seq_num % win_size, window))
+ if (cfs_test_bit(seq_num % win_size, window))
goto replay;
break;
case 1:
{
int rc = 0;
- spin_lock(&ssd->ssd_lock);
+ cfs_spin_lock(&ssd->ssd_lock);
if (set == 0) {
/*
gss_stat_oos_record_svc(2, 0);
}
exit:
- spin_unlock(&ssd->ssd_lock);
+ cfs_spin_unlock(&ssd->ssd_lock);
return rc;
}
flags |= LUSTRE_GSS_PACK_USER;
redo:
- seq = atomic_inc_return(&gctx->gc_seq);
+ seq = cfs_atomic_inc_return(&gctx->gc_seq);
rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
ctx->cc_sec->ps_part,
*
* Note: null mode dosen't check sequence number. */
if (svc != SPTLRPC_SVC_NULL &&
- atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
- int behind = atomic_read(&gctx->gc_seq) - seq;
+ cfs_atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
+ int behind = cfs_atomic_read(&gctx->gc_seq) - seq;
gss_stat_oos_record_cli(behind);
CWARN("req %p: %u behind, retry signing\n", req, behind);
ghdr->gh_flags |= LUSTRE_GSS_PACK_USER;
redo:
- ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
+ ghdr->gh_seq = cfs_atomic_inc_return(&gctx->gc_seq);
/* buffer objects */
hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
LASSERT(token.len <= buflens[1]);
/* see explain in gss_cli_ctx_sign() */
- if (unlikely(atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
+ if (unlikely(cfs_atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
GSS_SEQ_REPACK_THRESHOLD)) {
- int behind = atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
+ int behind = cfs_atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
gss_stat_oos_record_cli(behind);
CWARN("req %p: %u behind, retry sealing\n", req, behind);
- ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
+ ghdr->gh_seq = cfs_atomic_inc_return(&gctx->gc_seq);
goto redo;
}
return -EOPNOTSUPP;
}
- spin_lock_init(&gsec->gs_lock);
+ cfs_spin_lock_init(&gsec->gs_lock);
gsec->gs_rvs_hdl = 0ULL;
/* initialize upper ptlrpc_sec */
sec = &gsec->gs_base;
sec->ps_policy = policy;
- atomic_set(&sec->ps_refcount, 0);
- atomic_set(&sec->ps_nctx, 0);
+ cfs_atomic_set(&sec->ps_refcount, 0);
+ cfs_atomic_set(&sec->ps_nctx, 0);
sec->ps_id = sptlrpc_get_next_secid();
sec->ps_flvr = *sf;
sec->ps_import = class_import_get(imp);
- spin_lock_init(&sec->ps_lock);
+ cfs_spin_lock_init(&sec->ps_lock);
CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
if (!svcctx) {
ENTRY;
LASSERT(sec->ps_import);
- LASSERT(atomic_read(&sec->ps_refcount) == 0);
- LASSERT(atomic_read(&sec->ps_nctx) == 0);
+ LASSERT(cfs_atomic_read(&sec->ps_refcount) == 0);
+ LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
if (gsec->gs_mech) {
lgss_mech_put(gsec->gs_mech);
struct gss_cli_ctx *gctx = ctx2gctx(ctx);
gctx->gc_win = 0;
- atomic_set(&gctx->gc_seq, 0);
+ cfs_atomic_set(&gctx->gc_seq, 0);
CFS_INIT_HLIST_NODE(&ctx->cc_cache);
- atomic_set(&ctx->cc_refcount, 0);
+ cfs_atomic_set(&ctx->cc_refcount, 0);
ctx->cc_sec = sec;
ctx->cc_ops = ctxops;
ctx->cc_expire = 0;
ctx->cc_flags = PTLRPC_CTX_NEW;
ctx->cc_vcred = *vcred;
- spin_lock_init(&ctx->cc_lock);
+ cfs_spin_lock_init(&ctx->cc_lock);
CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
/* take a ref on belonging sec, balanced in ctx destroying */
- atomic_inc(&sec->ps_refcount);
+ cfs_atomic_inc(&sec->ps_refcount);
/* statistic only */
- atomic_inc(&sec->ps_nctx);
+ cfs_atomic_inc(&sec->ps_nctx);
CDEBUG(D_SEC, "%s@%p: create ctx %p(%u->%s)\n",
sec->ps_policy->sp_name, ctx->cc_sec,
{
struct gss_cli_ctx *gctx = ctx2gctx(ctx);
- LASSERT(atomic_read(&sec->ps_nctx) > 0);
- LASSERT(atomic_read(&ctx->cc_refcount) == 0);
+ LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
LASSERT(ctx->cc_sec == sec);
/*
* asynchronous which finished by request_out_callback(). so
* we add refcount, whoever drop finally drop the refcount to
* 0 should responsible for the rest of destroy. */
- atomic_inc(&ctx->cc_refcount);
+ cfs_atomic_inc(&ctx->cc_refcount);
gss_do_ctx_fini_rpc(gctx);
gss_cli_ctx_finalize(gctx);
- if (!atomic_dec_and_test(&ctx->cc_refcount))
+ if (!cfs_atomic_dec_and_test(&ctx->cc_refcount))
return 1;
}
static inline
void gss_svc_reqctx_addref(struct gss_svc_reqctx *grctx)
{
- LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
- atomic_inc(&grctx->src_base.sc_refcount);
+ LASSERT(cfs_atomic_read(&grctx->src_base.sc_refcount) > 0);
+ cfs_atomic_inc(&grctx->src_base.sc_refcount);
}
static inline
void gss_svc_reqctx_decref(struct gss_svc_reqctx *grctx)
{
- LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
+ LASSERT(cfs_atomic_read(&grctx->src_base.sc_refcount) > 0);
- if (atomic_dec_and_test(&grctx->src_base.sc_refcount))
+ if (cfs_atomic_dec_and_test(&grctx->src_base.sc_refcount))
gss_svc_reqctx_free(grctx);
}
RETURN(SECSVC_DROP);
grctx->src_base.sc_policy = sptlrpc_policy_get(policy);
- atomic_set(&grctx->src_base.sc_refcount, 1);
+ cfs_atomic_set(&grctx->src_base.sc_refcount, 1);
req->rq_svc_ctx = &grctx->src_base;
gw = &grctx->src_wirectx;
void gss_svc_free_ctx(struct ptlrpc_svc_ctx *ctx)
{
- LASSERT(atomic_read(&ctx->sc_refcount) == 0);
+ LASSERT(cfs_atomic_read(&ctx->sc_refcount) == 0);
gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx));
}
* each reverse root ctx will record its latest sequence number on its
* buddy svcctx before be destroied, so here we continue use it.
*/
- atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
+ cfs_atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
if (gss_svc_upcall_dup_handle(&cli_gctx->gc_svc_handle, svc_gctx)) {
CERROR("failed to dup svc handle\n");
#define IMPORT_SET_STATE(imp, state) \
do { \
- spin_lock(&imp->imp_lock); \
+ cfs_spin_lock(&imp->imp_lock); \
IMPORT_SET_STATE_NOLOCK(imp, state); \
- spin_unlock(&imp->imp_lock); \
+ cfs_spin_unlock(&imp->imp_lock); \
} while(0)
* though. */
int ptlrpc_init_import(struct obd_import *imp)
{
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_generation++;
imp->imp_state = LUSTRE_IMP_NEW;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
return 0;
}
{
int rc = 0;
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
if (imp->imp_state == LUSTRE_IMP_FULL &&
(conn_cnt == 0 || conn_cnt == imp->imp_conn_cnt)) {
}
ptlrpc_deactivate_timeouts(imp);
IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
if (obd_dump_on_timeout)
libcfs_debug_dumplog();
obd_import_event(imp->imp_obd, imp, IMP_EVENT_DISCON);
rc = 1;
} else {
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
CDEBUG(D_HA, "%s: import %p already %s (conn %u, was %u): %s\n",
imp->imp_client->cli_name, imp,
(imp->imp_state == LUSTRE_IMP_FULL &&
CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd));
imp->imp_invalid = 1;
imp->imp_generation++;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
ptlrpc_abort_inflight(imp);
obd_import_event(imp->imp_obd, imp, IMP_EVENT_INACTIVE);
*/
void ptlrpc_deactivate_import(struct obd_import *imp)
{
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
ptlrpc_deactivate_and_unlock_import(imp);
}
static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp)
{
time_t now = cfs_time_current_sec();
- struct list_head *tmp, *n;
+ cfs_list_t *tmp, *n;
struct ptlrpc_request *req;
unsigned int timeout = 0;
- spin_lock(&imp->imp_lock);
- list_for_each_safe(tmp, n, &imp->imp_sending_list) {
- req = list_entry(tmp, struct ptlrpc_request, rq_list);
+ cfs_spin_lock(&imp->imp_lock);
+ cfs_list_for_each_safe(tmp, n, &imp->imp_sending_list) {
+ req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
timeout = max(ptlrpc_inflight_deadline(req, now), timeout);
}
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
return timeout;
}
*/
void ptlrpc_invalidate_import(struct obd_import *imp)
{
- struct list_head *tmp, *n;
+ cfs_list_t *tmp, *n;
struct ptlrpc_request *req;
struct l_wait_info lwi;
unsigned int timeout;
int rc;
- atomic_inc(&imp->imp_inval_count);
+ cfs_atomic_inc(&imp->imp_inval_count);
/*
* If this is an invalid MGC connection, then don't bother
(timeout > 1)?cfs_time_seconds(1):cfs_time_seconds(1)/2,
NULL, NULL);
rc = l_wait_event(imp->imp_recovery_waitq,
- (atomic_read(&imp->imp_inflight) == 0), &lwi);
+ (cfs_atomic_read(&imp->imp_inflight) == 0),
+ &lwi);
if (rc) {
const char *cli_tgt = obd2cli_tgt(imp->imp_obd);
CERROR("%s: rc = %d waiting for callback (%d != 0)\n",
- cli_tgt, rc, atomic_read(&imp->imp_inflight));
+ cli_tgt, rc,
+ cfs_atomic_read(&imp->imp_inflight));
- spin_lock(&imp->imp_lock);
- if (atomic_read(&imp->imp_inflight) == 0) {
- int count = atomic_read(&imp->imp_unregistering);
+ cfs_spin_lock(&imp->imp_lock);
+ if (cfs_atomic_read(&imp->imp_inflight) == 0) {
+ int count = cfs_atomic_read(&imp->imp_unregistering);
/* We know that "unregistering" rpcs only can
* survive in sending or delaying lists (they
* this point. */
rc = 0;
} else {
- list_for_each_safe(tmp, n,
- &imp->imp_sending_list) {
- req = list_entry(tmp,
- struct ptlrpc_request,
- rq_list);
+ cfs_list_for_each_safe(tmp, n,
+ &imp->imp_sending_list) {
+ req = cfs_list_entry(tmp,
+ struct ptlrpc_request,
+ rq_list);
DEBUG_REQ(D_ERROR, req,
"still on sending list");
}
- list_for_each_safe(tmp, n,
- &imp->imp_delayed_list) {
- req = list_entry(tmp,
- struct ptlrpc_request,
- rq_list);
+ cfs_list_for_each_safe(tmp, n,
+ &imp->imp_delayed_list) {
+ req = cfs_list_entry(tmp,
+ struct ptlrpc_request,
+ rq_list);
DEBUG_REQ(D_ERROR, req,
"still on delayed list");
}
"Network is sluggish? Waiting them "
"to error out.\n", cli_tgt,
ptlrpc_phase2str(RQ_PHASE_UNREGISTERING),
- atomic_read(&imp->imp_unregistering));
+ cfs_atomic_read(&imp->
+ imp_unregistering));
}
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
}
} while (rc != 0);
* Let's additionally check that no new rpcs added to import in
* "invalidate" state.
*/
- LASSERT(atomic_read(&imp->imp_inflight) == 0);
+ LASSERT(cfs_atomic_read(&imp->imp_inflight) == 0);
out:
obd_import_event(imp->imp_obd, imp, IMP_EVENT_INVALIDATE);
sptlrpc_import_flush_all_ctx(imp);
- atomic_dec(&imp->imp_inval_count);
+ cfs_atomic_dec(&imp->imp_inval_count);
cfs_waitq_broadcast(&imp->imp_recovery_waitq);
}
{
struct obd_device *obd = imp->imp_obd;
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_invalid = 0;
ptlrpc_activate_timeouts(imp);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
obd_import_event(obd, imp, IMP_EVENT_ACTIVE);
}
CDEBUG(D_HA, "%s: waking up pinger\n",
obd2cli_tgt(imp->imp_obd));
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_force_verify = 1;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
ptlrpc_pinger_wake_up();
}
/* Do a fresh connect next time by zeroing the handle */
ptlrpc_disconnect_import(imp, 1);
/* Wait for all invalidate calls to finish */
- if (atomic_read(&imp->imp_inval_count) > 0) {
+ if (cfs_atomic_read(&imp->imp_inval_count) > 0) {
int rc;
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
rc = l_wait_event(imp->imp_recovery_waitq,
- (atomic_read(&imp->imp_inval_count) == 0),
+ (cfs_atomic_read(&imp->imp_inval_count) == 0),
&lwi);
if (rc)
CERROR("Interrupted, inval=%d\n",
- atomic_read(&imp->imp_inval_count));
+ cfs_atomic_read(&imp->imp_inval_count));
}
/* Allow reconnect attempts */
int tried_all = 1;
ENTRY;
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
- if (list_empty(&imp->imp_conn_list)) {
+ if (cfs_list_empty(&imp->imp_conn_list)) {
CERROR("%s: no connections available\n",
imp->imp_obd->obd_name);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
RETURN(-EINVAL);
}
- list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
+ cfs_list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
CDEBUG(D_HA, "%s: connect to NID %s last attempt "LPU64"\n",
imp->imp_obd->obd_name,
libcfs_nid2str(conn->oic_conn->c_peer.nid),
imp->imp_obd->obd_name, imp, imp_conn->oic_uuid.uuid,
libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
RETURN(0);
}
static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno)
{
struct ptlrpc_request *req;
- struct list_head *tmp;
+ cfs_list_t *tmp;
- if (list_empty(&imp->imp_replay_list))
+ if (cfs_list_empty(&imp->imp_replay_list))
return 0;
tmp = imp->imp_replay_list.next;
- req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
+ req = cfs_list_entry(tmp, struct ptlrpc_request, rq_replay_list);
*transno = req->rq_transno;
if (req->rq_transno == 0) {
DEBUG_REQ(D_ERROR, req, "zero transno in replay");
int rc;
ENTRY;
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
if (imp->imp_state == LUSTRE_IMP_CLOSED) {
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
CERROR("can't connect to a closed import\n");
RETURN(-EINVAL);
} else if (imp->imp_state == LUSTRE_IMP_FULL) {
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
CERROR("already connected\n");
RETURN(0);
} else if (imp->imp_state == LUSTRE_IMP_CONNECTING) {
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
CERROR("already connecting\n");
RETURN(-EALREADY);
}
set_transno = ptlrpc_first_transno(imp,
&imp->imp_connect_data.ocd_transno);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
if (new_uuid) {
struct obd_uuid uuid;
if (imp->imp_recon_bk) {
CDEBUG(D_HA, "Last reconnection attempt (%d) for %s\n",
imp->imp_conn_cnt, obd2cli_tgt(imp->imp_obd));
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_last_recon = 1;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
}
}
aa->pcaa_initial_connect = initial_connect;
if (aa->pcaa_initial_connect) {
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_replayable = 1;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
lustre_msg_add_op_flags(request->rq_reqmsg,
MSG_CONNECT_INITIAL);
}
ENTRY;
- spin_lock(&imp->imp_lock);
- if (list_empty(&imp->imp_conn_list))
+ cfs_spin_lock(&imp->imp_lock);
+ if (cfs_list_empty(&imp->imp_conn_list))
GOTO(unlock, 0);
#ifdef __KERNEL__
- imp_conn = list_entry(imp->imp_conn_list.prev,
- struct obd_import_conn,
- oic_item);
+ imp_conn = cfs_list_entry(imp->imp_conn_list.prev,
+ struct obd_import_conn,
+ oic_item);
/* XXX: When the failover node is the primary node, it is possible
* to have two identical connections in imp_conn_list. We must
#endif
unlock:
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
if (wake_pinger)
ptlrpc_pinger_wake_up();
int msg_flags;
ENTRY;
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
if (imp->imp_state == LUSTRE_IMP_CLOSED) {
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
RETURN(0);
}
/* if this reconnect to busy export - not need select new target
* for connecting*/
imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
GOTO(out, rc);
}
if (aa->pcaa_initial_connect) {
if (msg_flags & MSG_CONNECT_REPLAYABLE) {
imp->imp_replayable = 1;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
CDEBUG(D_HA, "connected to replayable target: %s\n",
obd2cli_tgt(imp->imp_obd));
} else {
imp->imp_replayable = 0;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
}
/* if applies, adjust the imp->imp_msg_magic here
GOTO(finish, rc = 0);
} else {
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
}
/* Determine what recovery state to move the import to. */
imp->imp_obd->obd_name,
obd2cli_tgt(imp->imp_obd));
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_resend_replay = 1;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
} else {
ocd = req_capsule_server_sized_get(&request->rq_pill,
&RMF_CONNECT_DATA, ret);
- spin_lock(&imp->imp_lock);
- list_del(&imp->imp_conn_current->oic_item);
- list_add(&imp->imp_conn_current->oic_item, &imp->imp_conn_list);
+ cfs_spin_lock(&imp->imp_lock);
+ cfs_list_del(&imp->imp_conn_current->oic_item);
+ cfs_list_add(&imp->imp_conn_current->oic_item,
+ &imp->imp_conn_list);
imp->imp_last_success_conn =
imp->imp_conn_current->oic_last_attempt;
if (ocd == NULL) {
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
CERROR("Wrong connect data from server\n");
rc = -EPROTO;
GOTO(out, rc);
imp->imp_connect_data = *ocd;
exp = class_conn2export(&imp->imp_dlm_handle);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
/* check that server granted subset of flags we asked for. */
LASSERTF((ocd->ocd_connect_flags &
out:
if (rc != 0) {
IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
if (aa->pcaa_initial_connect && !imp->imp_initial_recov &&
(request->rq_import_generation == imp->imp_generation))
ptlrpc_deactivate_and_unlock_import(imp);
else
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
if ((imp->imp_recon_bk && imp->imp_last_recon) ||
(rc == -EACCES)) {
(char *)imp->imp_connection->c_remote_uuid.uuid, rc);
}
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_last_recon = 0;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
cfs_waitq_broadcast(&imp->imp_recovery_waitq);
RETURN(rc);
void * data, int rc)
{
ENTRY;
- atomic_dec(&req->rq_import->imp_replay_inflight);
+ cfs_atomic_dec(&req->rq_import->imp_replay_inflight);
if (req->rq_status == 0 &&
!req->rq_import->imp_vbr_failed) {
ptlrpc_import_recovery_state_machine(req->rq_import);
CDEBUG(D_WARNING,
"%s: version recovery fails, reconnecting\n",
req->rq_import->imp_obd->obd_name);
- spin_lock(&req->rq_import->imp_lock);
+ cfs_spin_lock(&req->rq_import->imp_lock);
req->rq_import->imp_vbr_failed = 0;
- spin_unlock(&req->rq_import->imp_lock);
+ cfs_spin_unlock(&req->rq_import->imp_lock);
} else {
CDEBUG(D_HA, "%s: LAST_REPLAY message error: %d, "
"reconnecting\n",
struct ptlrpc_request *req;
ENTRY;
- LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
- atomic_inc(&imp->imp_replay_inflight);
+ LASSERT(cfs_atomic_read(&imp->imp_replay_inflight) == 0);
+ cfs_atomic_inc(&imp->imp_replay_inflight);
req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING, LUSTRE_OBD_VERSION,
OBD_PING);
if (req == NULL) {
- atomic_dec(&imp->imp_replay_inflight);
+ cfs_atomic_dec(&imp->imp_replay_inflight);
RETURN(-ENOMEM);
}
obd2cli_tgt(imp->imp_obd));
rc = ptlrpc_replay_next(imp, &inflight);
if (inflight == 0 &&
- atomic_read(&imp->imp_replay_inflight) == 0) {
+ cfs_atomic_read(&imp->imp_replay_inflight) == 0) {
IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS);
rc = ldlm_replay_locks(imp);
if (rc)
}
if (imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS) {
- if (atomic_read(&imp->imp_replay_inflight) == 0) {
+ if (cfs_atomic_read(&imp->imp_replay_inflight) == 0) {
IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_WAIT);
rc = signal_completed_replay(imp);
if (rc)
}
if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT) {
- if (atomic_read(&imp->imp_replay_inflight) == 0) {
+ if (cfs_atomic_read(&imp->imp_replay_inflight) == 0) {
IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
}
}
}
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
if (imp->imp_state != LUSTRE_IMP_FULL)
GOTO(out, 0);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_DISCONNECT,
LUSTRE_OBD_VERSION, rq_opc);
}
set_state:
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
out:
if (noclose)
IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
memset(&imp->imp_remote_handle, 0, sizeof(imp->imp_remote_handle));
/* Try all connections in the future - bz 12758 */
imp->imp_last_recon = 0;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
RETURN(rc);
}
{
ENTRY;
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
imp->imp_generation++;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
ptlrpc_abort_inflight(imp);
EXIT;
drop to 0, and because 0 could mean an error */
return 0;
- spin_lock(&at->at_lock);
+ cfs_spin_lock(&at->at_lock);
if (unlikely(at->at_binstart == 0)) {
/* Special case to remove default from history */
/* if we changed, report the old value */
old = (at->at_current != old) ? old : 0;
- spin_unlock(&at->at_lock);
+ cfs_spin_unlock(&at->at_lock);
return old;
}
}
/* Not found in list, add it under a lock */
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
/* Check unused under lock */
for (; i < IMP_AT_MAX_PORTALS; i++) {
at->iat_portal[i] = portal;
out:
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
return i;
}
for (; i < fmt->rf_fields[loc].nr; ++i)
if (fmt->rf_fields[loc].d[i]->rmf_size != -1)
- size += size_round(fmt->rf_fields[loc].d[i]->rmf_size);
+ size += cfs_size_round(fmt->rf_fields[loc].d[i]->
+ rmf_size);
return size;
}
#include <libcfs/list.h>
#define LLOG_CLIENT_ENTRY(ctxt, imp) do { \
- mutex_down(&ctxt->loc_sem); \
+ cfs_mutex_down(&ctxt->loc_sem); \
if (ctxt->loc_imp) { \
imp = class_import_get(ctxt->loc_imp); \
} else { \
"but I'll try again next time. Not fatal.\n", \
ctxt->loc_idx); \
imp = NULL; \
- mutex_up(&ctxt->loc_sem); \
+ cfs_mutex_up(&ctxt->loc_sem); \
return (-EINVAL); \
} \
- mutex_up(&ctxt->loc_sem); \
+ cfs_mutex_up(&ctxt->loc_sem); \
} while(0)
#define LLOG_CLIENT_EXIT(ctxt, imp) do { \
- mutex_down(&ctxt->loc_sem); \
+ cfs_mutex_down(&ctxt->loc_sem); \
if (ctxt->loc_imp != imp) \
CWARN("loc_imp has changed from %p to %p\n", \
ctxt->loc_imp, imp); \
class_import_put(imp); \
- mutex_up(&ctxt->loc_sem); \
+ cfs_mutex_up(&ctxt->loc_sem); \
} while(0)
/* This is a callback from the llog_* functions.
ENTRY;
- if (list_empty(&ctxt->loc_handle->u.chd.chd_head)) {
+ if (cfs_list_empty(&ctxt->loc_handle->u.chd.chd_head)) {
CDEBUG(D_HA, "there is no record related to ctxt %p\n", ctxt);
RETURN(0);
}
ENTRY;
LASSERT(ctxt);
- mutex_down(&ctxt->loc_sem);
+ cfs_mutex_down(&ctxt->loc_sem);
if (ctxt->loc_imp != imp) {
if (ctxt->loc_imp) {
CWARN("changing the import %p - %p\n",
}
ctxt->loc_imp = class_import_get(imp);
}
- mutex_up(&ctxt->loc_sem);
+ cfs_mutex_up(&ctxt->loc_sem);
RETURN(0);
}
EXPORT_SYMBOL(llog_receptor_accept);
new_imp = ctxt->loc_obd->u.cli.cl_import;
LASSERTF(ctxt->loc_imp == NULL || ctxt->loc_imp == new_imp,
"%p - %p\n", ctxt->loc_imp, new_imp);
- mutex_down(&ctxt->loc_sem);
+ cfs_mutex_down(&ctxt->loc_sem);
if (ctxt->loc_imp != new_imp) {
if (ctxt->loc_imp)
class_import_put(ctxt->loc_imp);
ctxt->loc_imp = class_import_get(new_imp);
}
- mutex_up(&ctxt->loc_sem);
+ cfs_mutex_up(&ctxt->loc_sem);
RETURN(0);
}
EXPORT_SYMBOL(llog_initiator_connect);
if (!idarray)
GOTO(release_ctxt, rc = -ENOMEM);
- mutex_down(&obd->obd_olg.olg_cat_processing);
+ cfs_mutex_down(&obd->obd_olg.olg_cat_processing);
rc = llog_get_cat_list(obd, name, 0, count, idarray);
if (rc)
GOTO(out_free, rc);
out_pop:
pop_ctxt(&saved, &ctxt->loc_exp->exp_obd->obd_lvfs_ctxt, NULL);
out_free:
- mutex_up(&obd->obd_olg.olg_cat_processing);
+ cfs_mutex_up(&obd->obd_olg.olg_cat_processing);
OBD_VFREE(idarray, size);
release_ctxt:
llog_ctxt_put(ctxt);
* hose a kernel by allowing the request history to grow too
* far. */
bufpages = (svc->srv_buf_size + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
- if (val > num_physpages/(2 * bufpages))
+ if (val > cfs_num_physpages/(2 * bufpages))
return -ERANGE;
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
svc->srv_max_history_rqbds = val;
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
return count;
}
if (val > svc->srv_threads_max)
return -ERANGE;
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
svc->srv_threads_min = val;
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
return count;
}
if (val < svc->srv_threads_min)
return -ERANGE;
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
svc->srv_threads_max = val;
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
return count;
}
struct ptlrpc_srh_iterator *srhi,
__u64 seq)
{
- struct list_head *e;
+ cfs_list_t *e;
struct ptlrpc_request *req;
if (srhi->srhi_req != NULL &&
* be near the head), we shouldn't have to do long
* re-scans */
LASSERT (srhi->srhi_seq == srhi->srhi_req->rq_history_seq);
- LASSERT (!list_empty(&svc->srv_request_history));
+ LASSERT (!cfs_list_empty(&svc->srv_request_history));
e = &srhi->srhi_req->rq_history_list;
} else {
/* search from start */
}
while (e != &svc->srv_request_history) {
- req = list_entry(e, struct ptlrpc_request, rq_history_list);
+ req = cfs_list_entry(e, struct ptlrpc_request, rq_history_list);
if (req->rq_history_seq >= seq) {
srhi->srhi_seq = req->rq_history_seq;
srhi->srhi_seq = 0;
srhi->srhi_req = NULL;
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
rc = ptlrpc_lprocfs_svc_req_history_seek(svc, srhi, *pos);
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
if (rc == 0) {
*pos = srhi->srhi_seq;
struct ptlrpc_srh_iterator *srhi = iter;
int rc;
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
rc = ptlrpc_lprocfs_svc_req_history_seek(svc, srhi, *pos + 1);
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
if (rc != 0) {
OBD_FREE(srhi, sizeof(*srhi));
struct ptlrpc_request *req;
int rc;
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
rc = ptlrpc_lprocfs_svc_req_history_seek(svc, srhi, srhi->srhi_seq);
svc->srv_request_history_print_fn(s, srhi->srhi_req);
}
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
return rc;
}
if (val < 0)
return -ERANGE;
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
svc->srv_hpreq_ratio = val;
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
return count;
}
struct l_wait_info lwi;
int rc;
- LASSERT(!in_interrupt()); /* might sleep */
+ LASSERT(!cfs_in_interrupt()); /* might sleep */
if (!ptlrpc_server_bulk_active(desc)) /* completed or */
return; /* never started */
int rc;
ENTRY;
- LASSERT(!in_interrupt()); /* might sleep */
+ LASSERT(!cfs_in_interrupt()); /* might sleep */
/* Let's setup deadline for reply unlink. */
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
CERROR("not replying on NULL connection\n"); /* bug 9635 */
return -ENOTCONN;
}
- atomic_inc (&svc->srv_outstanding_replies);
+ cfs_atomic_inc (&svc->srv_outstanding_replies);
ptlrpc_rs_addref(rs); /* +1 ref for the network */
rc = sptlrpc_svc_wrap_reply(req);
req->rq_xid, req->rq_reply_off);
out:
if (unlikely(rc != 0)) {
- atomic_dec (&svc->srv_outstanding_replies);
+ cfs_atomic_dec (&svc->srv_outstanding_replies);
ptlrpc_req_drop_rs(req);
}
ptlrpc_connection_put(conn);
}
}
- spin_lock(&request->rq_lock);
+ cfs_spin_lock(&request->rq_lock);
/* If the MD attach succeeds, there _will_ be a reply_in callback */
request->rq_receiving_reply = !noreply;
/* We are responsible for unlinking the reply buffer */
request->rq_resend = 0;
request->rq_restart = 0;
request->rq_reply_truncate = 0;
- spin_unlock(&request->rq_lock);
+ cfs_spin_unlock(&request->rq_lock);
if (!noreply) {
reply_md.start = request->rq_repbuf;
if (rc != 0) {
CERROR("LNetMDAttach failed: %d\n", rc);
LASSERT (rc == -ENOMEM);
- spin_lock(&request->rq_lock);
+ cfs_spin_lock(&request->rq_lock);
/* ...but the MD attach didn't succeed... */
request->rq_receiving_reply = 0;
- spin_unlock(&request->rq_lock);
+ cfs_spin_unlock(&request->rq_lock);
GOTO(cleanup_me, rc = -ENOMEM);
}
ptlrpc_request_addref(request);
if (obd->obd_svc_stats != NULL)
lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
- atomic_read(&request->rq_import->imp_inflight));
+ cfs_atomic_read(&request->rq_import->imp_inflight));
OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
- do_gettimeofday(&request->rq_arrival_time);
+ cfs_gettimeofday(&request->rq_arrival_time);
request->rq_sent = cfs_time_current_sec();
/* We give the server rq_timeout secs to process the req, and
add the network latency for our local timeout. */
static inline int lustre_msg_hdr_size_v2(int count)
{
- return size_round(offsetof(struct lustre_msg_v2, lm_buflens[count]));
+ return cfs_size_round(offsetof(struct lustre_msg_v2,
+ lm_buflens[count]));
}
int lustre_msg_hdr_size(__u32 magic, int count)
size = lustre_msg_hdr_size_v2(count);
for (i = 0; i < count; i++)
- size += size_round(lengths[i]);
+ size += cfs_size_round(lengths[i]);
return size;
}
#if RS_DEBUG
CFS_LIST_HEAD(ptlrpc_rs_debug_lru);
-spinlock_t ptlrpc_rs_debug_lock;
+cfs_spinlock_t ptlrpc_rs_debug_lock;
#define PTLRPC_RS_DEBUG_LRU_ADD(rs) \
do { \
- spin_lock(&ptlrpc_rs_debug_lock); \
- list_add_tail(&(rs)->rs_debug_list, &ptlrpc_rs_debug_lru); \
- spin_unlock(&ptlrpc_rs_debug_lock); \
+ cfs_spin_lock(&ptlrpc_rs_debug_lock); \
+ cfs_list_add_tail(&(rs)->rs_debug_list, &ptlrpc_rs_debug_lru); \
+ cfs_spin_unlock(&ptlrpc_rs_debug_lock); \
} while (0)
#define PTLRPC_RS_DEBUG_LRU_DEL(rs) \
do { \
- spin_lock(&ptlrpc_rs_debug_lock); \
- list_del(&(rs)->rs_debug_list); \
- spin_unlock(&ptlrpc_rs_debug_lock); \
+ cfs_spin_lock(&ptlrpc_rs_debug_lock); \
+ cfs_list_del(&(rs)->rs_debug_list); \
+ cfs_spin_unlock(&ptlrpc_rs_debug_lock); \
} while (0)
#else
# define PTLRPC_RS_DEBUG_LRU_ADD(rs) do {} while(0)
{
struct ptlrpc_reply_state *rs = NULL;
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
/* See if we have anything in a pool, and wait if nothing */
- while (list_empty(&svc->srv_free_rs_list)) {
+ while (cfs_list_empty(&svc->srv_free_rs_list)) {
struct l_wait_info lwi;
int rc;
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
/* If we cannot get anything for some long time, we better
bail out instead of waiting infinitely */
lwi = LWI_TIMEOUT(cfs_time_seconds(10), NULL, NULL);
rc = l_wait_event(svc->srv_free_rs_waitq,
- !list_empty(&svc->srv_free_rs_list), &lwi);
+ !cfs_list_empty(&svc->srv_free_rs_list),
+ &lwi);
if (rc)
goto out;
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
}
- rs = list_entry(svc->srv_free_rs_list.next, struct ptlrpc_reply_state,
- rs_list);
- list_del(&rs->rs_list);
- spin_unlock(&svc->srv_lock);
+ rs = cfs_list_entry(svc->srv_free_rs_list.next,
+ struct ptlrpc_reply_state, rs_list);
+ cfs_list_del(&rs->rs_list);
+ cfs_spin_unlock(&svc->srv_lock);
LASSERT(rs);
memset(rs, 0, svc->srv_max_reply_size);
rs->rs_service = svc;
LASSERT(svc);
- spin_lock(&svc->srv_lock);
- list_add(&rs->rs_list, &svc->srv_free_rs_list);
- spin_unlock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
+ cfs_list_add(&rs->rs_list, &svc->srv_free_rs_list);
+ cfs_spin_unlock(&svc->srv_lock);
cfs_waitq_signal(&svc->srv_free_rs_waitq);
}
RETURN(rc);
rs = req->rq_reply_state;
- atomic_set(&rs->rs_refcount, 1); /* 1 ref for rq_reply_state */
+ cfs_atomic_set(&rs->rs_refcount, 1); /* 1 ref for rq_reply_state */
rs->rs_cb_id.cbid_fn = reply_out_callback;
rs->rs_cb_id.cbid_arg = rs;
rs->rs_service = req->rq_rqbd->rqbd_service;
CFS_INIT_LIST_HEAD(&rs->rs_exp_list);
CFS_INIT_LIST_HEAD(&rs->rs_obd_list);
CFS_INIT_LIST_HEAD(&rs->rs_list);
- spin_lock_init(&rs->rs_lock);
+ cfs_spin_lock_init(&rs->rs_lock);
req->rq_replen = msg_len;
req->rq_reply_state = rs;
offset = lustre_msg_hdr_size_v2(bufcount);
for (i = 0; i < n; i++)
- offset += size_round(m->lm_buflens[i]);
+ offset += cfs_size_round(m->lm_buflens[i]);
return (char *)m + offset;
}
if (move_data && msg->lm_bufcount > segment + 1) {
tail = lustre_msg_buf_v2(msg, segment + 1, 0);
for (n = segment + 1; n < msg->lm_bufcount; n++)
- tail_len += size_round(msg->lm_buflens[n]);
+ tail_len += cfs_size_round(msg->lm_buflens[n]);
}
msg->lm_buflens[segment] = newlen;
{
PTLRPC_RS_DEBUG_LRU_DEL(rs);
- LASSERT (atomic_read(&rs->rs_refcount) == 0);
+ LASSERT (cfs_atomic_read(&rs->rs_refcount) == 0);
LASSERT (!rs->rs_difficult || rs->rs_handled);
LASSERT (!rs->rs_on_net);
LASSERT (!rs->rs_scheduled);
LASSERT (rs->rs_export == NULL);
LASSERT (rs->rs_nlocks == 0);
- LASSERT (list_empty(&rs->rs_exp_list));
- LASSERT (list_empty(&rs->rs_obd_list));
+ LASSERT (cfs_list_empty(&rs->rs_exp_list));
+ LASSERT (cfs_list_empty(&rs->rs_obd_list));
sptlrpc_svc_free_rs(rs);
}
for (i = 0; i < m->lm_bufcount; i++) {
if (swabbed)
__swab32s(&m->lm_buflens[i]);
- required_len += size_round(m->lm_buflens[i]);
+ required_len += cfs_size_round(m->lm_buflens[i]);
}
if (len < required_len) {
(char *)req->rq_export->exp_connection->c_remote_uuid.uuid : "<?>",
req->rq_request_portal, req->rq_reply_portal,
req->rq_reqlen, req->rq_replen,
- req->rq_early_count, req->rq_timedout, req->rq_deadline,
- atomic_read(&req->rq_refcount), DEBUG_REQ_FLAGS(req),
+ req->rq_early_count, req->rq_timedout,
+ req->rq_deadline,
+ cfs_atomic_read(&req->rq_refcount),
+ DEBUG_REQ_FLAGS(req),
req->rq_reqmsg && req_ptlrpc_body_swabbed(req) ?
lustre_msg_get_flags(req->rq_reqmsg) : -1,
req->rq_repmsg && rep_ptlrpc_body_swabbed(req) ?
#include <obd_class.h>
#include "ptlrpc_internal.h"
-struct semaphore pinger_sem;
+cfs_semaphore_t pinger_sem;
static CFS_LIST_HEAD(pinger_imports);
-static struct list_head timeout_list = CFS_LIST_HEAD_INIT(timeout_list);
+static cfs_list_t timeout_list = CFS_LIST_HEAD_INIT(timeout_list);
struct ptlrpc_request *
ptlrpc_prep_ping(struct obd_import *imp)
{
return cfs_time_shift(obd_timeout);
}
-static atomic_t suspend_timeouts = ATOMIC_INIT(0);
+static cfs_atomic_t suspend_timeouts = CFS_ATOMIC_INIT(0);
static cfs_time_t suspend_wakeup_time = 0;
cfs_duration_t pinger_check_timeout(cfs_time_t time)
cfs_time_t timeout = PING_INTERVAL;
/* The timeout list is a increase order sorted list */
- mutex_down(&pinger_sem);
- list_for_each_entry(item, &timeout_list, ti_chain) {
+ cfs_mutex_down(&pinger_sem);
+ cfs_list_for_each_entry(item, &timeout_list, ti_chain) {
int ti_timeout = item->ti_timeout;
if (timeout > ti_timeout)
timeout = ti_timeout;
break;
}
- mutex_up(&pinger_sem);
+ cfs_mutex_up(&pinger_sem);
return cfs_time_sub(cfs_time_add(time, cfs_time_seconds(timeout)),
cfs_time_current());
}
#ifdef __KERNEL__
-static wait_queue_head_t suspend_timeouts_waitq;
+static cfs_waitq_t suspend_timeouts_waitq;
#endif
cfs_time_t ptlrpc_suspend_wakeup_time(void)
if (imp->imp_no_timeout)
return;
imp->imp_no_timeout = 1;
- atomic_inc(&suspend_timeouts);
- CDEBUG(D_HA|D_WARNING, "deactivate timeouts %u\n", atomic_read(&suspend_timeouts));
+ cfs_atomic_inc(&suspend_timeouts);
+ CDEBUG(D_HA|D_WARNING, "deactivate timeouts %u\n",
+ cfs_atomic_read(&suspend_timeouts));
#endif
}
if (!imp->imp_no_timeout)
return;
imp->imp_no_timeout = 0;
- LASSERT(atomic_read(&suspend_timeouts) > 0);
- if (atomic_dec_and_test(&suspend_timeouts)) {
+ LASSERT(cfs_atomic_read(&suspend_timeouts) > 0);
+ if (cfs_atomic_dec_and_test(&suspend_timeouts)) {
suspend_wakeup_time = cfs_time_current();
- wake_up(&suspend_timeouts_waitq);
+ cfs_waitq_signal(&suspend_timeouts_waitq);
}
- CDEBUG(D_HA|D_WARNING, "activate timeouts %u\n", atomic_read(&suspend_timeouts));
+ CDEBUG(D_HA|D_WARNING, "activate timeouts %u\n",
+ cfs_atomic_read(&suspend_timeouts));
#endif
}
int ptlrpc_check_suspend(void)
{
- if (atomic_read(&suspend_timeouts))
+ if (cfs_atomic_read(&suspend_timeouts))
return 1;
return 0;
}
{
struct l_wait_info lwi;
- if (atomic_read(&suspend_timeouts)) {
+ if (cfs_atomic_read(&suspend_timeouts)) {
DEBUG_REQ(D_NET, req, "-- suspend %d regular timeout",
- atomic_read(&suspend_timeouts));
+ cfs_atomic_read(&suspend_timeouts));
lwi = LWI_INTR(NULL, NULL);
l_wait_event(suspend_timeouts_waitq,
- atomic_read(&suspend_timeouts) == 0, &lwi);
+ cfs_atomic_read(&suspend_timeouts) == 0, &lwi);
DEBUG_REQ(D_NET, req, "-- recharge regular timeout");
return 1;
}
{
int force, level;
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
level = imp->imp_state;
force = imp->imp_force_verify;
if (force)
imp->imp_force_verify = 0;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
CDEBUG(level == LUSTRE_IMP_FULL ? D_INFO : D_HA,
"level %s/%u force %u deactive %u pingable %u\n",
struct l_wait_info lwi;
cfs_duration_t time_to_next_wake;
struct timeout_item *item;
- struct list_head *iter;
+ cfs_list_t *iter;
- mutex_down(&pinger_sem);
- list_for_each_entry(item, &timeout_list, ti_chain) {
+ cfs_mutex_down(&pinger_sem);
+ cfs_list_for_each_entry(item, &timeout_list, ti_chain) {
item->ti_cb(item, item->ti_cb_data);
}
- list_for_each(iter, &pinger_imports) {
+ cfs_list_for_each(iter, &pinger_imports) {
struct obd_import *imp =
- list_entry(iter, struct obd_import,
- imp_pinger_chain);
+ cfs_list_entry(iter, struct obd_import,
+ imp_pinger_chain);
ptlrpc_pinger_process_import(imp, this_ping);
/* obd_timeout might have changed */
cfs_time_seconds(PING_INTERVAL))))
ptlrpc_update_next_ping(imp);
}
- mutex_up(&pinger_sem);
+ cfs_mutex_up(&pinger_sem);
/* update memory usage info */
obd_update_maxusage();
RETURN(-EALREADY);
ptlrpc_pinger_remove_timeouts();
- mutex_down(&pinger_sem);
+ cfs_mutex_down(&pinger_sem);
pinger_thread->t_flags = SVC_STOPPING;
cfs_waitq_signal(&pinger_thread->t_ctl_waitq);
- mutex_up(&pinger_sem);
+ cfs_mutex_up(&pinger_sem);
l_wait_event(pinger_thread->t_ctl_waitq,
(pinger_thread->t_flags & SVC_STOPPED), &lwi);
int ptlrpc_pinger_add_import(struct obd_import *imp)
{
ENTRY;
- if (!list_empty(&imp->imp_pinger_chain))
+ if (!cfs_list_empty(&imp->imp_pinger_chain))
RETURN(-EALREADY);
- mutex_down(&pinger_sem);
+ cfs_mutex_down(&pinger_sem);
CDEBUG(D_HA, "adding pingable import %s->%s\n",
imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
/* if we add to pinger we want recovery on this import */
imp->imp_obd->obd_no_recov = 0;
ptlrpc_update_next_ping(imp);
/* XXX sort, blah blah */
- list_add_tail(&imp->imp_pinger_chain, &pinger_imports);
+ cfs_list_add_tail(&imp->imp_pinger_chain, &pinger_imports);
class_import_get(imp);
ptlrpc_pinger_wake_up();
- mutex_up(&pinger_sem);
+ cfs_mutex_up(&pinger_sem);
RETURN(0);
}
int ptlrpc_pinger_del_import(struct obd_import *imp)
{
ENTRY;
- if (list_empty(&imp->imp_pinger_chain))
+ if (cfs_list_empty(&imp->imp_pinger_chain))
RETURN(-ENOENT);
- mutex_down(&pinger_sem);
- list_del_init(&imp->imp_pinger_chain);
+ cfs_mutex_down(&pinger_sem);
+ cfs_list_del_init(&imp->imp_pinger_chain);
CDEBUG(D_HA, "removing pingable import %s->%s\n",
imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
/* if we remove from pinger we don't want recovery on this import */
imp->imp_obd->obd_no_recov = 1;
class_import_put(imp);
- mutex_up(&pinger_sem);
+ cfs_mutex_up(&pinger_sem);
RETURN(0);
}
LASSERT_SEM_LOCKED(&pinger_sem);
- list_for_each_entry(item, &timeout_list, ti_chain)
+ cfs_list_for_each_entry(item, &timeout_list, ti_chain)
if (item->ti_event == event)
goto out;
item = ptlrpc_new_timeout(time, event, cb, data);
if (item) {
- list_for_each_entry_reverse(tmp, &timeout_list, ti_chain) {
+ cfs_list_for_each_entry_reverse(tmp, &timeout_list, ti_chain) {
if (tmp->ti_timeout < time) {
- list_add(&item->ti_chain, &tmp->ti_chain);
+ cfs_list_add(&item->ti_chain, &tmp->ti_chain);
goto out;
}
}
- list_add(&item->ti_chain, &timeout_list);
+ cfs_list_add(&item->ti_chain, &timeout_list);
}
out:
return item;
*/
int ptlrpc_add_timeout_client(int time, enum timeout_event event,
timeout_cb_t cb, void *data,
- struct list_head *obd_list)
+ cfs_list_t *obd_list)
{
struct timeout_item *ti;
- mutex_down(&pinger_sem);
+ cfs_mutex_down(&pinger_sem);
ti = ptlrpc_pinger_register_timeout(time, event, cb, data);
if (!ti) {
- mutex_up(&pinger_sem);
+ cfs_mutex_up(&pinger_sem);
return (-EINVAL);
}
- list_add(obd_list, &ti->ti_obd_list);
- mutex_up(&pinger_sem);
+ cfs_list_add(obd_list, &ti->ti_obd_list);
+ cfs_mutex_up(&pinger_sem);
return 0;
}
-int ptlrpc_del_timeout_client(struct list_head *obd_list,
+int ptlrpc_del_timeout_client(cfs_list_t *obd_list,
enum timeout_event event)
{
struct timeout_item *ti = NULL, *item;
- if (list_empty(obd_list))
+ if (cfs_list_empty(obd_list))
return 0;
- mutex_down(&pinger_sem);
- list_del_init(obd_list);
+ cfs_mutex_down(&pinger_sem);
+ cfs_list_del_init(obd_list);
/**
* If there are no obd attached to the timeout event
* list, remove this timeout event from the pinger
*/
- list_for_each_entry(item, &timeout_list, ti_chain) {
+ cfs_list_for_each_entry(item, &timeout_list, ti_chain) {
if (item->ti_event == event) {
ti = item;
break;
}
}
LASSERTF(ti != NULL, "ti is NULL ! \n");
- if (list_empty(&ti->ti_obd_list)) {
- list_del(&ti->ti_chain);
+ if (cfs_list_empty(&ti->ti_obd_list)) {
+ cfs_list_del(&ti->ti_chain);
OBD_FREE_PTR(ti);
}
- mutex_up(&pinger_sem);
+ cfs_mutex_up(&pinger_sem);
return 0;
}
{
struct timeout_item *item, *tmp;
- mutex_down(&pinger_sem);
- list_for_each_entry_safe(item, tmp, &timeout_list, ti_chain) {
- LASSERT(list_empty(&item->ti_obd_list));
- list_del(&item->ti_chain);
+ cfs_mutex_down(&pinger_sem);
+ cfs_list_for_each_entry_safe(item, tmp, &timeout_list, ti_chain) {
+ LASSERT(cfs_list_empty(&item->ti_obd_list));
+ cfs_list_del(&item->ti_chain);
OBD_FREE_PTR(item);
}
- mutex_up(&pinger_sem);
+ cfs_mutex_up(&pinger_sem);
return 0;
}
static int pet_refcount = 0;
static int pet_state;
-static wait_queue_head_t pet_waitq;
+static cfs_waitq_t pet_waitq;
CFS_LIST_HEAD(pet_list);
-static spinlock_t pet_lock = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t pet_lock = CFS_SPIN_LOCK_UNLOCKED;
int ping_evictor_wake(struct obd_export *exp)
{
struct obd_device *obd;
- spin_lock(&pet_lock);
+ cfs_spin_lock(&pet_lock);
if (pet_state != PET_READY) {
/* eventually the new obd will call here again. */
- spin_unlock(&pet_lock);
+ cfs_spin_unlock(&pet_lock);
return 1;
}
obd = class_exp2obd(exp);
- if (list_empty(&obd->obd_evict_list)) {
+ if (cfs_list_empty(&obd->obd_evict_list)) {
class_incref(obd, __FUNCTION__, cfs_current());
- list_add(&obd->obd_evict_list, &pet_list);
+ cfs_list_add(&obd->obd_evict_list, &pet_list);
}
- spin_unlock(&pet_lock);
+ cfs_spin_unlock(&pet_lock);
- wake_up(&pet_waitq);
+ cfs_waitq_signal(&pet_waitq);
return 0;
}
CDEBUG(D_HA, "Starting Ping Evictor\n");
pet_state = PET_READY;
while (1) {
- l_wait_event(pet_waitq, (!list_empty(&pet_list)) ||
+ l_wait_event(pet_waitq, (!cfs_list_empty(&pet_list)) ||
(pet_state == PET_TERMINATE), &lwi);
/* loop until all obd's will be removed */
- if ((pet_state == PET_TERMINATE) && list_empty(&pet_list))
+ if ((pet_state == PET_TERMINATE) && cfs_list_empty(&pet_list))
break;
/* we only get here if pet_exp != NULL, and the end of this
* loop is the only place which sets it NULL again, so lock
* is not strictly necessary. */
- spin_lock(&pet_lock);
- obd = list_entry(pet_list.next, struct obd_device,
- obd_evict_list);
- spin_unlock(&pet_lock);
+ cfs_spin_lock(&pet_lock);
+ obd = cfs_list_entry(pet_list.next, struct obd_device,
+ obd_evict_list);
+ cfs_spin_unlock(&pet_lock);
expire_time = cfs_time_current_sec() - PING_EVICT_TIMEOUT;
* the obd lock (class_unlink_export), which means we can't
* lose the last ref on the export. If they've already been
* removed from the list, we won't find them here. */
- spin_lock(&obd->obd_dev_lock);
- while (!list_empty(&obd->obd_exports_timed)) {
- exp = list_entry(obd->obd_exports_timed.next,
- struct obd_export,exp_obd_chain_timed);
+ cfs_spin_lock(&obd->obd_dev_lock);
+ while (!cfs_list_empty(&obd->obd_exports_timed)) {
+ exp = cfs_list_entry(obd->obd_exports_timed.next,
+ struct obd_export,
+ exp_obd_chain_timed);
if (expire_time > exp->exp_last_request_time) {
class_export_get(exp);
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
LCONSOLE_WARN("%s: haven't heard from client %s"
" (at %s) in %ld seconds. I think"
" it's dead, and I am evicting"
exp->exp_last_request_time);
class_fail_export(exp);
class_export_put(exp);
- spin_lock(&obd->obd_dev_lock);
+ cfs_spin_lock(&obd->obd_dev_lock);
} else {
/* List is sorted, so everyone below is ok */
break;
}
}
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
- spin_lock(&pet_lock);
- list_del_init(&obd->obd_evict_list);
- spin_unlock(&pet_lock);
+ cfs_spin_lock(&pet_lock);
+ cfs_list_del_init(&obd->obd_evict_list);
+ cfs_spin_unlock(&pet_lock);
class_decref(obd, __FUNCTION__, cfs_current());
}
if (++pet_refcount > 1)
return;
- init_waitqueue_head(&pet_waitq);
+ cfs_waitq_init(&pet_waitq);
rc = cfs_kernel_thread(ping_evictor_main, NULL, CLONE_VM | CLONE_FILES);
if (rc < 0) {
return;
pet_state = PET_TERMINATE;
- wake_up(&pet_waitq);
+ cfs_waitq_signal(&pet_waitq);
}
EXPORT_SYMBOL(ping_evictor_stop);
#else /* !__KERNEL__ */
cfs_time_t curtime = cfs_time_current();
struct ptlrpc_request *req;
struct ptlrpc_request_set *set;
- struct list_head *iter;
+ cfs_list_t *iter;
struct obd_import *imp;
struct pinger_data *pd = &pinger_args;
int rc;
}
/* have we reached ping point? */
- if (!pd->pd_set && time_before(curtime, pd->pd_next_ping)) {
+ if (!pd->pd_set && cfs_time_before(curtime, pd->pd_next_ping)) {
pd->pd_recursion--;
return 0;
}
set = pd->pd_set;
/* add rpcs into set */
- mutex_down(&pinger_sem);
- list_for_each(iter, &pinger_imports) {
- struct obd_import *imp =
- list_entry(iter, struct obd_import, imp_pinger_chain);
+ cfs_mutex_down(&pinger_sem);
+ cfs_list_for_each(iter, &pinger_imports) {
+ struct obd_import *imp = cfs_list_entry(iter, struct obd_import,
+ imp_pinger_chain);
int generation, level;
if (cfs_time_aftereq(pd->pd_this_ping,
imp->imp_next_ping - 5 * CFS_TICK)) {
/* Add a ping. */
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
generation = imp->imp_generation;
level = imp->imp_state;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
if (level != LUSTRE_IMP_FULL) {
CDEBUG(D_HA,
}
}
pd->pd_this_ping = curtime;
- mutex_up(&pinger_sem);
+ cfs_mutex_up(&pinger_sem);
/* Might be empty, that's OK. */
if (set->set_remaining == 0)
CDEBUG(D_RPCTRACE, "nothing to ping\n");
- list_for_each(iter, &set->set_requests) {
+ cfs_list_for_each(iter, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(iter, struct ptlrpc_request,
- rq_set_chain);
+ cfs_list_entry(iter, struct ptlrpc_request,
+ rq_set_chain);
DEBUG_REQ(D_RPCTRACE, req, "pinging %s->%s",
req->rq_import->imp_obd->obd_uuid.uuid,
obd2cli_tgt(req->rq_import->imp_obd));
}
/* Expire all the requests that didn't come back. */
- mutex_down(&pinger_sem);
- list_for_each(iter, &set->set_requests) {
- req = list_entry(iter, struct ptlrpc_request,
- rq_set_chain);
+ cfs_mutex_down(&pinger_sem);
+ cfs_list_for_each(iter, &set->set_requests) {
+ req = cfs_list_entry(iter, struct ptlrpc_request,
+ rq_set_chain);
if (req->rq_phase == RQ_PHASE_COMPLETE)
continue;
* phase and take care of inflights. */
ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
imp = req->rq_import;
- spin_lock(&imp->imp_lock);
- if (!list_empty(&req->rq_list)) {
- list_del_init(&req->rq_list);
- atomic_dec(&imp->imp_inflight);
+ cfs_spin_lock(&imp->imp_lock);
+ if (!cfs_list_empty(&req->rq_list)) {
+ cfs_list_del_init(&req->rq_list);
+ cfs_atomic_dec(&imp->imp_inflight);
}
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
set->set_remaining--;
}
- mutex_up(&pinger_sem);
+ cfs_mutex_up(&pinger_sem);
ptlrpc_set_destroy(set);
pd->pd_set = NULL;
void ptlrpc_pinger_sending_on_import(struct obd_import *imp)
{
#ifdef ENABLE_PINGER
- mutex_down(&pinger_sem);
+ cfs_mutex_down(&pinger_sem);
ptlrpc_update_next_ping(imp);
if (pinger_args.pd_set == NULL &&
- time_before(imp->imp_next_ping, pinger_args.pd_next_ping)) {
+ cfs_time_before(imp->imp_next_ping, pinger_args.pd_next_ping)) {
CDEBUG(D_HA, "set next ping to "CFS_TIME_T"(cur "CFS_TIME_T")\n",
imp->imp_next_ping, cfs_time_current());
pinger_args.pd_next_ping = imp->imp_next_ping;
}
- mutex_up(&pinger_sem);
+ cfs_mutex_up(&pinger_sem);
#endif
}
int ptlrpc_add_timeout_client(int time, enum timeout_event event,
timeout_cb_t cb, void *data,
- struct list_head *obd_list)
+ cfs_list_t *obd_list)
{
return 0;
}
-int ptlrpc_del_timeout_client(struct list_head *obd_list,
+int ptlrpc_del_timeout_client(cfs_list_t *obd_list,
enum timeout_event event)
{
return 0;
int ptlrpc_pinger_add_import(struct obd_import *imp)
{
ENTRY;
- if (!list_empty(&imp->imp_pinger_chain))
+ if (!cfs_list_empty(&imp->imp_pinger_chain))
RETURN(-EALREADY);
CDEBUG(D_HA, "adding pingable import %s->%s\n",
imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
ptlrpc_pinger_sending_on_import(imp);
- mutex_down(&pinger_sem);
- list_add_tail(&imp->imp_pinger_chain, &pinger_imports);
+ cfs_mutex_down(&pinger_sem);
+ cfs_list_add_tail(&imp->imp_pinger_chain, &pinger_imports);
class_import_get(imp);
- mutex_up(&pinger_sem);
+ cfs_mutex_up(&pinger_sem);
RETURN(0);
}
int ptlrpc_pinger_del_import(struct obd_import *imp)
{
ENTRY;
- if (list_empty(&imp->imp_pinger_chain))
+ if (cfs_list_empty(&imp->imp_pinger_chain))
RETURN(-ENOENT);
- mutex_down(&pinger_sem);
- list_del_init(&imp->imp_pinger_chain);
+ cfs_mutex_down(&pinger_sem);
+ cfs_list_del_init(&imp->imp_pinger_chain);
CDEBUG(D_HA, "removing pingable import %s->%s\n",
imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
class_import_put(imp);
- mutex_up(&pinger_sem);
+ cfs_mutex_up(&pinger_sem);
RETURN(0);
}
/* XXX force pinger to run, if needed */
struct obd_import *imp;
ENTRY;
- list_for_each_entry(imp, &pinger_imports, imp_pinger_chain) {
+ cfs_list_for_each_entry(imp, &pinger_imports, imp_pinger_chain) {
CDEBUG(D_RPCTRACE, "checking import %s->%s\n",
imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
#ifdef ENABLE_LIBLUSTRE_RECOVERY
#include "ptlrpc_internal.h"
-extern spinlock_t ptlrpc_last_xid_lock;
-extern spinlock_t ptlrpc_rs_debug_lock;
-extern spinlock_t ptlrpc_all_services_lock;
-extern struct semaphore pinger_sem;
-extern struct semaphore ptlrpcd_sem;
+extern cfs_spinlock_t ptlrpc_last_xid_lock;
+extern cfs_spinlock_t ptlrpc_rs_debug_lock;
+extern cfs_spinlock_t ptlrpc_all_services_lock;
+extern cfs_semaphore_t pinger_sem;
+extern cfs_semaphore_t ptlrpcd_sem;
__init int ptlrpc_init(void)
{
ENTRY;
lustre_assert_wire_constants();
- spin_lock_init(&ptlrpc_rs_debug_lock);
- spin_lock_init(&ptlrpc_all_services_lock);
- init_mutex(&pinger_sem);
- init_mutex(&ptlrpcd_sem);
+ cfs_spin_lock_init(&ptlrpc_rs_debug_lock);
+ cfs_spin_lock_init(&ptlrpc_all_services_lock);
+ cfs_init_mutex(&pinger_sem);
+ cfs_init_mutex(&ptlrpcd_sem);
ptlrpc_init_xid();
rc = req_layout_init();
}
};
-struct semaphore ptlrpcd_sem;
+cfs_semaphore_t ptlrpcd_sem;
static int ptlrpcd_users = 0;
void ptlrpcd_wake(struct ptlrpc_request *req)
*/
void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
{
- struct list_head *tmp, *pos;
+ cfs_list_t *tmp, *pos;
- list_for_each_safe(pos, tmp, &set->set_requests) {
+ cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(pos, struct ptlrpc_request, rq_set_chain);
+ cfs_list_entry(pos, struct ptlrpc_request,
+ rq_set_chain);
LASSERT(req->rq_phase == RQ_PHASE_NEW);
- list_del_init(&req->rq_set_chain);
+ cfs_list_del_init(&req->rq_set_chain);
req->rq_set = NULL;
ptlrpcd_add_req(req, PSCOPE_OTHER);
set->set_remaining--;
static int ptlrpcd_check(const struct lu_env *env, struct ptlrpcd_ctl *pc)
{
- struct list_head *tmp, *pos;
+ cfs_list_t *tmp, *pos;
struct ptlrpc_request *req;
int rc = 0;
ENTRY;
- spin_lock(&pc->pc_set->set_new_req_lock);
- list_for_each_safe(pos, tmp, &pc->pc_set->set_new_requests) {
- req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
- list_del_init(&req->rq_set_chain);
+ cfs_spin_lock(&pc->pc_set->set_new_req_lock);
+ cfs_list_for_each_safe(pos, tmp, &pc->pc_set->set_new_requests) {
+ req = cfs_list_entry(pos, struct ptlrpc_request, rq_set_chain);
+ cfs_list_del_init(&req->rq_set_chain);
ptlrpc_set_add_req(pc->pc_set, req);
/*
* Need to calculate its timeout.
*/
rc = 1;
}
- spin_unlock(&pc->pc_set->set_new_req_lock);
+ cfs_spin_unlock(&pc->pc_set->set_new_req_lock);
if (pc->pc_set->set_remaining) {
rc = rc | ptlrpc_check_set(env, pc->pc_set);
* XXX: our set never completes, so we prune the completed
* reqs after each iteration. boy could this be smarter.
*/
- list_for_each_safe(pos, tmp, &pc->pc_set->set_requests) {
- req = list_entry(pos, struct ptlrpc_request,
+ cfs_list_for_each_safe(pos, tmp, &pc->pc_set->set_requests) {
+ req = cfs_list_entry(pos, struct ptlrpc_request,
rq_set_chain);
if (req->rq_phase != RQ_PHASE_COMPLETE)
continue;
- list_del_init(&req->rq_set_chain);
+ cfs_list_del_init(&req->rq_set_chain);
req->rq_set = NULL;
ptlrpc_req_finished (req);
}
/*
* If new requests have been added, make sure to wake up.
*/
- spin_lock(&pc->pc_set->set_new_req_lock);
- rc = !list_empty(&pc->pc_set->set_new_requests);
- spin_unlock(&pc->pc_set->set_new_req_lock);
+ cfs_spin_lock(&pc->pc_set->set_new_req_lock);
+ rc = !cfs_list_empty(&pc->pc_set->set_new_requests);
+ cfs_spin_unlock(&pc->pc_set->set_new_req_lock);
}
RETURN(rc);
LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
}
- complete(&pc->pc_starting);
+ cfs_complete(&pc->pc_starting);
if (rc != 0)
RETURN(rc);
/*
* Abort inflight rpcs for forced stop case.
*/
- if (test_bit(LIOD_STOP, &pc->pc_flags)) {
- if (test_bit(LIOD_FORCE, &pc->pc_flags))
+ if (cfs_test_bit(LIOD_STOP, &pc->pc_flags)) {
+ if (cfs_test_bit(LIOD_FORCE, &pc->pc_flags))
ptlrpc_abort_set(pc->pc_set);
exit++;
}
/*
* Wait for inflight requests to drain.
*/
- if (!list_empty(&pc->pc_set->set_requests))
+ if (!cfs_list_empty(&pc->pc_set->set_requests))
ptlrpc_set_wait(pc->pc_set);
lu_context_fini(&env.le_ctx);
- complete(&pc->pc_finishing);
+ cfs_complete(&pc->pc_finishing);
- clear_bit(LIOD_START, &pc->pc_flags);
- clear_bit(LIOD_STOP, &pc->pc_flags);
- clear_bit(LIOD_FORCE, &pc->pc_flags);
+ cfs_clear_bit(LIOD_START, &pc->pc_flags);
+ cfs_clear_bit(LIOD_STOP, &pc->pc_flags);
+ cfs_clear_bit(LIOD_FORCE, &pc->pc_flags);
return 0;
}
/*
* XXX: send replay requests.
*/
- if (test_bit(LIOD_RECOVERY, &pc->pc_flags))
+ if (cfs_test_bit(LIOD_RECOVERY, &pc->pc_flags))
rc = ptlrpcd_check(&pc->pc_env, pc);
}
}
{
struct ptlrpcd_ctl *pc = arg;
- return (list_empty(&pc->pc_set->set_new_requests) &&
+ return (cfs_list_empty(&pc->pc_set->set_new_requests) &&
pc->pc_set->set_remaining == 0);
}
/*
* Do not allow start second thread for one pc.
*/
- if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
+ if (cfs_test_and_set_bit(LIOD_START, &pc->pc_flags)) {
CERROR("Starting second thread (%s) for same pc %p\n",
name, pc);
RETURN(-EALREADY);
}
- init_completion(&pc->pc_starting);
- init_completion(&pc->pc_finishing);
- spin_lock_init(&pc->pc_lock);
+ cfs_init_completion(&pc->pc_starting);
+ cfs_init_completion(&pc->pc_finishing);
+ cfs_spin_lock_init(&pc->pc_lock);
strncpy(pc->pc_name, name, sizeof(pc->pc_name) - 1);
pc->pc_set = ptlrpc_prep_set();
if (pc->pc_set == NULL)
GOTO(out, rc);
}
rc = 0;
- wait_for_completion(&pc->pc_starting);
+ cfs_wait_for_completion(&pc->pc_starting);
#else
pc->pc_wait_callback =
liblustre_register_wait_callback("ptlrpcd_check_async_rpcs",
#endif
out:
if (rc)
- clear_bit(LIOD_START, &pc->pc_flags);
+ cfs_clear_bit(LIOD_START, &pc->pc_flags);
RETURN(rc);
}
void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
{
- if (!test_bit(LIOD_START, &pc->pc_flags)) {
+ if (!cfs_test_bit(LIOD_START, &pc->pc_flags)) {
CERROR("Thread for pc %p was not started\n", pc);
return;
}
- set_bit(LIOD_STOP, &pc->pc_flags);
+ cfs_set_bit(LIOD_STOP, &pc->pc_flags);
if (force)
- set_bit(LIOD_FORCE, &pc->pc_flags);
+ cfs_set_bit(LIOD_FORCE, &pc->pc_flags);
cfs_waitq_signal(&pc->pc_set->set_waitq);
#ifdef __KERNEL__
- wait_for_completion(&pc->pc_finishing);
+ cfs_wait_for_completion(&pc->pc_finishing);
#else
liblustre_deregister_wait_callback(pc->pc_wait_callback);
liblustre_deregister_idle_callback(pc->pc_idle_callback);
pc = &ptlrpcd_scopes[i].pscope_thread[j].pt_ctl;
- if (test_bit(LIOD_START, &pc->pc_flags))
+ if (cfs_test_bit(LIOD_START, &pc->pc_flags))
ptlrpcd_stop(pc, 0);
}
}
int j;
ENTRY;
- mutex_down(&ptlrpcd_sem);
+ cfs_mutex_down(&ptlrpcd_sem);
if (++ptlrpcd_users == 1) {
for (i = 0; rc == 0 && i < PSCOPE_NR; ++i) {
for (j = 0; rc == 0 && j < PT_NR; ++j) {
pt = &ptlrpcd_scopes[i].pscope_thread[j];
pc = &pt->pt_ctl;
if (j == PT_RECOVERY)
- set_bit(LIOD_RECOVERY, &pc->pc_flags);
+ cfs_set_bit(LIOD_RECOVERY, &pc->pc_flags);
rc = ptlrpcd_start(pt->pt_name, pc);
}
}
ptlrpcd_fini();
}
}
- mutex_up(&ptlrpcd_sem);
+ cfs_mutex_up(&ptlrpcd_sem);
RETURN(rc);
}
void ptlrpcd_decref(void)
{
- mutex_down(&ptlrpcd_sem);
+ cfs_mutex_down(&ptlrpcd_sem);
if (--ptlrpcd_users == 0)
ptlrpcd_fini();
- mutex_up(&ptlrpcd_sem);
+ cfs_mutex_up(&ptlrpcd_sem);
}
#include <lustre_log.h>
#include "ptlrpc_internal.h"
-static atomic_t llcd_count = ATOMIC_INIT(0);
+static cfs_atomic_t llcd_count = CFS_ATOMIC_INIT(0);
static cfs_mem_cache_t *llcd_cache = NULL;
#ifdef __KERNEL__
llcd->llcd_cookiebytes = 0;
llcd->llcd_size = size;
- spin_lock(&lcm->lcm_lock);
+ cfs_spin_lock(&lcm->lcm_lock);
llcd->llcd_lcm = lcm;
- atomic_inc(&lcm->lcm_count);
- list_add_tail(&llcd->llcd_list, &lcm->lcm_llcds);
- spin_unlock(&lcm->lcm_lock);
- atomic_inc(&llcd_count);
+ cfs_atomic_inc(&lcm->lcm_count);
+ cfs_list_add_tail(&llcd->llcd_list, &lcm->lcm_llcds);
+ cfs_spin_unlock(&lcm->lcm_lock);
+ cfs_atomic_inc(&llcd_count);
CDEBUG(D_RPCTRACE, "Alloc llcd %p on lcm %p (%d)\n",
- llcd, lcm, atomic_read(&lcm->lcm_count));
+ llcd, lcm, cfs_atomic_read(&lcm->lcm_count));
return llcd;
}
int size;
if (lcm) {
- if (atomic_read(&lcm->lcm_count) == 0) {
+ if (cfs_atomic_read(&lcm->lcm_count) == 0) {
CERROR("Invalid llcd free %p\n", llcd);
llcd_print(llcd, __FUNCTION__, __LINE__);
LBUG();
}
- spin_lock(&lcm->lcm_lock);
- LASSERT(!list_empty(&llcd->llcd_list));
- list_del_init(&llcd->llcd_list);
- atomic_dec(&lcm->lcm_count);
- spin_unlock(&lcm->lcm_lock);
+ cfs_spin_lock(&lcm->lcm_lock);
+ LASSERT(!cfs_list_empty(&llcd->llcd_list));
+ cfs_list_del_init(&llcd->llcd_list);
+ cfs_atomic_dec(&lcm->lcm_count);
+ cfs_spin_unlock(&lcm->lcm_lock);
CDEBUG(D_RPCTRACE, "Free llcd %p on lcm %p (%d)\n",
- llcd, lcm, atomic_read(&lcm->lcm_count));
+ llcd, lcm, cfs_atomic_read(&lcm->lcm_count));
}
- LASSERT(atomic_read(&llcd_count) > 0);
- atomic_dec(&llcd_count);
+ LASSERT(cfs_atomic_read(&llcd_count) > 0);
+ cfs_atomic_dec(&llcd_count);
size = offsetof(struct llog_canceld_ctxt, llcd_cookies) +
llcd->llcd_size;
* Check if we're in exit stage. Do not send llcd in
* this case.
*/
- if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
+ if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
GOTO(exit, rc = -ENODEV);
CDEBUG(D_RPCTRACE, "Sending llcd %p\n", llcd);
* Let all know that we're stopping. This will also make
* llcd_send() refuse any new llcds.
*/
- set_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags);
+ cfs_set_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags);
/*
* Stop processing thread. No new rpcs will be accepted for
* those forgotten in sync may still be attached to ctxt. Let's
* print them.
*/
- if (atomic_read(&lcm->lcm_count) != 0) {
+ if (cfs_atomic_read(&lcm->lcm_count) != 0) {
struct llog_canceld_ctxt *llcd;
- struct list_head *tmp;
+ cfs_list_t *tmp;
CERROR("Busy llcds found (%d) on lcm %p\n",
- atomic_read(&lcm->lcm_count), lcm);
+ cfs_atomic_read(&lcm->lcm_count), lcm);
- spin_lock(&lcm->lcm_lock);
- list_for_each(tmp, &lcm->lcm_llcds) {
- llcd = list_entry(tmp, struct llog_canceld_ctxt,
- llcd_list);
+ cfs_spin_lock(&lcm->lcm_lock);
+ cfs_list_for_each(tmp, &lcm->lcm_llcds) {
+ llcd = cfs_list_entry(tmp, struct llog_canceld_ctxt,
+ llcd_list);
llcd_print(llcd, __FUNCTION__, __LINE__);
}
- spin_unlock(&lcm->lcm_lock);
+ cfs_spin_unlock(&lcm->lcm_lock);
/*
* No point to go further with busy llcds at this point
snprintf(lcm->lcm_name, sizeof(lcm->lcm_name),
"lcm_%s", name);
- atomic_set(&lcm->lcm_count, 0);
- atomic_set(&lcm->lcm_refcount, 1);
- spin_lock_init(&lcm->lcm_lock);
+ cfs_atomic_set(&lcm->lcm_count, 0);
+ cfs_atomic_set(&lcm->lcm_refcount, 1);
+ cfs_spin_lock_init(&lcm->lcm_lock);
CFS_INIT_LIST_HEAD(&lcm->lcm_llcds);
rc = llog_recov_thread_start(lcm);
if (rc) {
/*
* Start recovery in separate thread.
*/
- mutex_down(&ctxt->loc_sem);
+ cfs_mutex_down(&ctxt->loc_sem);
ctxt->loc_gen = *gen;
rc = llog_recov_thread_replay(ctxt, ctxt->llog_proc_cb, logid);
- mutex_up(&ctxt->loc_sem);
+ cfs_mutex_up(&ctxt->loc_sem);
RETURN(rc);
}
LASSERT(ctxt != NULL);
- mutex_down(&ctxt->loc_sem);
+ cfs_mutex_down(&ctxt->loc_sem);
if (!ctxt->loc_lcm) {
CDEBUG(D_RPCTRACE, "No lcm for ctxt %p\n", ctxt);
GOTO(out, rc = -ENODEV);
GOTO(out, rc = -ENODEV);
}
- if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags)) {
+ if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags)) {
CDEBUG(D_RPCTRACE, "Commit thread is stopping for ctxt %p\n",
ctxt);
GOTO(out, rc = -ENODEV);
* Allocation is successful, let's check for stop
* flag again to fall back as soon as possible.
*/
- if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
+ if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
GOTO(out, rc = -ENODEV);
}
* Allocation is successful, let's check for stop
* flag again to fall back as soon as possible.
*/
- if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
+ if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
GOTO(out, rc = -ENODEV);
}
out:
if (rc)
llcd_put(ctxt);
- mutex_up(&ctxt->loc_sem);
+ cfs_mutex_up(&ctxt->loc_sem);
return rc;
}
EXPORT_SYMBOL(llog_obd_repl_cancel);
/*
* Flush any remaining llcd.
*/
- mutex_down(&ctxt->loc_sem);
+ cfs_mutex_down(&ctxt->loc_sem);
if (exp && (ctxt->loc_imp == exp->exp_imp_reverse)) {
/*
* This is ost->mds connection, we can't be sure that mds
*/
CDEBUG(D_RPCTRACE, "Kill cached llcd\n");
llcd_put(ctxt);
- mutex_up(&ctxt->loc_sem);
+ cfs_mutex_up(&ctxt->loc_sem);
} else {
/*
* This is either llog_sync() from generic llog code or sync
* llcds to the target with waiting for completion.
*/
CDEBUG(D_RPCTRACE, "Sync cached llcd\n");
- mutex_up(&ctxt->loc_sem);
+ cfs_mutex_up(&ctxt->loc_sem);
rc = llog_cancel(ctxt, NULL, 0, NULL, OBD_LLOG_FL_SENDNOW);
}
RETURN(rc);
* In 2.6.22 cfs_mem_cache_destroy() will not return error
* for busy resources. Let's check it another way.
*/
- LASSERTF(atomic_read(&llcd_count) == 0,
+ LASSERTF(cfs_atomic_read(&llcd_count) == 0,
"Can't destroy llcd cache! Number of "
- "busy llcds: %d\n", atomic_read(&llcd_count));
+ "busy llcds: %d\n", cfs_atomic_read(&llcd_count));
cfs_mem_cache_destroy(llcd_cache);
llcd_cache = NULL;
}
int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
{
int rc = 0;
- struct list_head *tmp, *pos;
+ cfs_list_t *tmp, *pos;
struct ptlrpc_request *req = NULL;
__u64 last_transno;
ENTRY;
/* It might have committed some after we last spoke, so make sure we
* get rid of them now.
*/
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_last_transno_checked = 0;
ptlrpc_free_committed(imp);
last_transno = imp->imp_last_replay_transno;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
CDEBUG(D_HA, "import %p from %s committed "LPU64" last "LPU64"\n",
imp, obd2cli_tgt(imp->imp_obd),
* imp_lock is being held by ptlrpc_replay, but it's not. it's
* just a little race...
*/
- list_for_each_safe(tmp, pos, &imp->imp_replay_list) {
- req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
+ cfs_list_for_each_safe(tmp, pos, &imp->imp_replay_list) {
+ req = cfs_list_entry(tmp, struct ptlrpc_request,
+ rq_replay_list);
/* If need to resend the last sent transno (because a
reconnect has occurred), then stop on the matching
req = NULL;
}
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_resend_replay = 0;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
if (req != NULL) {
rc = ptlrpc_replay_req(req);
*/
/* Well... what if lctl recover is called twice at the same time?
*/
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
if (imp->imp_state != LUSTRE_IMP_RECOVER) {
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
RETURN(-1);
}
- list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) {
+ cfs_list_for_each_entry_safe(req, next, &imp->imp_sending_list,
+ rq_list) {
LASSERTF((long)req > CFS_PAGE_SIZE && req != LP_POISON,
"req %p bad\n", req);
LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
if (!req->rq_no_resend)
ptlrpc_resend_req(req);
}
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
RETURN(0);
}
void ptlrpc_wake_delayed(struct obd_import *imp)
{
- struct list_head *tmp, *pos;
+ cfs_list_t *tmp, *pos;
struct ptlrpc_request *req;
- spin_lock(&imp->imp_lock);
- list_for_each_safe(tmp, pos, &imp->imp_delayed_list) {
- req = list_entry(tmp, struct ptlrpc_request, rq_list);
+ cfs_spin_lock(&imp->imp_lock);
+ cfs_list_for_each_safe(tmp, pos, &imp->imp_delayed_list) {
+ req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set);
ptlrpc_client_wake_req(req);
}
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
}
void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req)
/* Wait for recovery to complete and resend. If evicted, then
this request will be errored out later.*/
- spin_lock(&failed_req->rq_lock);
+ cfs_spin_lock(&failed_req->rq_lock);
if (!failed_req->rq_no_resend)
failed_req->rq_resend = 1;
- spin_unlock(&failed_req->rq_lock);
+ cfs_spin_unlock(&failed_req->rq_lock);
EXIT;
}
/* set before invalidate to avoid messages about imp_inval
* set without imp_deactive in ptlrpc_import_delay_req */
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_deactive = 1;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
ptlrpc_invalidate_import(imp);
}
int rc;
ENTRY;
- spin_lock(&imp->imp_lock);
- if (atomic_read(&imp->imp_inval_count)) {
- spin_unlock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
+ if (cfs_atomic_read(&imp->imp_inval_count)) {
+ cfs_spin_unlock(&imp->imp_lock);
RETURN(-EINVAL);
}
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
/* force import to be disconnected. */
ptlrpc_set_import_discon(imp, 0);
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_deactive = 0;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
rc = ptlrpc_recover_import_no_retry(imp, new_uuid);
int ptlrpc_import_in_recovery(struct obd_import *imp)
{
int in_recovery = 1;
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
if (imp->imp_state == LUSTRE_IMP_FULL ||
imp->imp_state == LUSTRE_IMP_CLOSED ||
imp->imp_state == LUSTRE_IMP_DISCON)
in_recovery = 0;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
return in_recovery;
}
ENTRY;
/* Check if reconnect is already in progress */
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
if (imp->imp_state != LUSTRE_IMP_DISCON) {
in_recovery = 1;
}
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
if (in_recovery == 1)
RETURN(-EALREADY);
* policy registers *
***********************************************/
-static rwlock_t policy_lock;
+static cfs_rwlock_t policy_lock;
static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
NULL,
};
if (number >= SPTLRPC_POLICY_MAX)
return -EINVAL;
- write_lock(&policy_lock);
+ cfs_write_lock(&policy_lock);
if (unlikely(policies[number])) {
- write_unlock(&policy_lock);
+ cfs_write_unlock(&policy_lock);
return -EALREADY;
}
policies[number] = policy;
- write_unlock(&policy_lock);
+ cfs_write_unlock(&policy_lock);
CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
return 0;
LASSERT(number < SPTLRPC_POLICY_MAX);
- write_lock(&policy_lock);
+ cfs_write_lock(&policy_lock);
if (unlikely(policies[number] == NULL)) {
- write_unlock(&policy_lock);
+ cfs_write_unlock(&policy_lock);
CERROR("%s: already unregistered\n", policy->sp_name);
return -EINVAL;
}
LASSERT(policies[number] == policy);
policies[number] = NULL;
- write_unlock(&policy_lock);
+ cfs_write_unlock(&policy_lock);
CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
return 0;
static
struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
{
- static DECLARE_MUTEX(load_mutex);
- static atomic_t loaded = ATOMIC_INIT(0);
+ static CFS_DECLARE_MUTEX(load_mutex);
+ static cfs_atomic_t loaded = CFS_ATOMIC_INIT(0);
struct ptlrpc_sec_policy *policy;
__u16 number = SPTLRPC_FLVR_POLICY(flavor);
__u16 flag = 0;
return NULL;
while (1) {
- read_lock(&policy_lock);
+ cfs_read_lock(&policy_lock);
policy = policies[number];
- if (policy && !try_module_get(policy->sp_owner))
+ if (policy && !cfs_try_module_get(policy->sp_owner))
policy = NULL;
if (policy == NULL)
- flag = atomic_read(&loaded);
- read_unlock(&policy_lock);
+ flag = cfs_atomic_read(&loaded);
+ cfs_read_unlock(&policy_lock);
if (policy != NULL || flag != 0 ||
number != SPTLRPC_POLICY_GSS)
break;
/* try to load gss module, once */
- mutex_down(&load_mutex);
- if (atomic_read(&loaded) == 0) {
- if (request_module("ptlrpc_gss") == 0)
+ cfs_mutex_down(&load_mutex);
+ if (cfs_atomic_read(&loaded) == 0) {
+ if (cfs_request_module("ptlrpc_gss") == 0)
CWARN("module ptlrpc_gss loaded on demand\n");
else
CERROR("Unable to load module ptlrpc_gss\n");
- atomic_set(&loaded, 1);
+ cfs_atomic_set(&loaded, 1);
}
- mutex_up(&load_mutex);
+ cfs_mutex_up(&load_mutex);
}
return policy;
struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
{
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
- atomic_inc(&ctx->cc_refcount);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+ cfs_atomic_inc(&ctx->cc_refcount);
return ctx;
}
EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
struct ptlrpc_sec *sec = ctx->cc_sec;
LASSERT(sec);
- LASSERT(atomic_read(&ctx->cc_refcount));
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount));
- if (!atomic_dec_and_test(&ctx->cc_refcount))
+ if (!cfs_atomic_dec_and_test(&ctx->cc_refcount))
return;
sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
{
struct ptlrpc_request *req, *next;
- spin_lock(&ctx->cc_lock);
- list_for_each_entry_safe(req, next, &ctx->cc_req_list, rq_ctx_chain) {
- list_del_init(&req->rq_ctx_chain);
+ cfs_spin_lock(&ctx->cc_lock);
+ cfs_list_for_each_entry_safe(req, next, &ctx->cc_req_list,
+ rq_ctx_chain) {
+ cfs_list_del_init(&req->rq_ctx_chain);
ptlrpc_client_wake_req(req);
}
- spin_unlock(&ctx->cc_lock);
+ cfs_spin_unlock(&ctx->cc_lock);
}
EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
{
int adapt = 0;
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
if (imp->imp_sec_expire &&
imp->imp_sec_expire < cfs_time_current_sec()) {
adapt = 1;
imp->imp_sec_expire = 0;
}
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
if (!adapt)
return 0;
/* request might be asked to release earlier while still
* in the context waiting list.
*/
- if (!list_empty(&req->rq_ctx_chain)) {
- spin_lock(&req->rq_cli_ctx->cc_lock);
- list_del_init(&req->rq_ctx_chain);
- spin_unlock(&req->rq_cli_ctx->cc_lock);
+ if (!cfs_list_empty(&req->rq_ctx_chain)) {
+ cfs_spin_lock(&req->rq_cli_ctx->cc_lock);
+ cfs_list_del_init(&req->rq_ctx_chain);
+ cfs_spin_unlock(&req->rq_cli_ctx->cc_lock);
}
sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
LASSERT(newctx);
if (unlikely(newctx == oldctx &&
- test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
+ cfs_test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
/*
* still get the old dead ctx, usually means system too busy
*/
CWARN("ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
newctx, newctx->cc_flags);
- cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE, HZ);
+ cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
+ CFS_HZ);
} else {
/*
* it's possible newctx == oldctx if we're switching
{
struct ptlrpc_request *req = data;
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_intr = 1;
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
}
static
void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
{
- spin_lock(&ctx->cc_lock);
- if (!list_empty(&req->rq_ctx_chain))
- list_del_init(&req->rq_ctx_chain);
- spin_unlock(&ctx->cc_lock);
+ cfs_spin_lock(&ctx->cc_lock);
+ if (!cfs_list_empty(&req->rq_ctx_chain))
+ cfs_list_del_init(&req->rq_ctx_chain);
+ cfs_spin_unlock(&ctx->cc_lock);
}
/*
if (cli_ctx_is_eternal(ctx))
RETURN(0);
- if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
+ if (unlikely(cfs_test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
LASSERT(ctx->cc_ops->refresh);
ctx->cc_ops->refresh(ctx);
}
- LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
+ LASSERT(cfs_test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
LASSERT(ctx->cc_ops->validate);
if (ctx->cc_ops->validate(ctx) == 0) {
RETURN(0);
}
- if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
+ if (unlikely(cfs_test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
req->rq_err = 1;
req_off_ctx_list(req, ctx);
RETURN(-EPERM);
* 2. Current context never be refreshed, then we are fine: we
* never really send request with old context before.
*/
- if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
+ if (cfs_test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
unlikely(req->rq_reqmsg) &&
lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
req_off_ctx_list(req, ctx);
RETURN(0);
}
- if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
+ if (unlikely(cfs_test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
req_off_ctx_list(req, ctx);
/*
* don't switch ctx if import was deactivated
/* Now we're sure this context is during upcall, add myself into
* waiting list
*/
- spin_lock(&ctx->cc_lock);
- if (list_empty(&req->rq_ctx_chain))
- list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
- spin_unlock(&ctx->cc_lock);
+ cfs_spin_lock(&ctx->cc_lock);
+ if (cfs_list_empty(&req->rq_ctx_chain))
+ cfs_list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
+ cfs_spin_unlock(&ctx->cc_lock);
if (timeout < 0)
RETURN(-EWOULDBLOCK);
/* Clear any flags that may be present from previous sends */
LASSERT(req->rq_receiving_reply == 0);
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_err = 0;
req->rq_timedout = 0;
req->rq_resend = 0;
req->rq_restart = 0;
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
- lwi = LWI_TIMEOUT_INTR(timeout * HZ, ctx_refresh_timeout,
+ lwi = LWI_TIMEOUT_INTR(timeout * CFS_HZ, ctx_refresh_timeout,
ctx_refresh_interrupt, req);
rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
sec = req->rq_cli_ctx->cc_sec;
- spin_lock(&sec->ps_lock);
+ cfs_spin_lock(&sec->ps_lock);
req->rq_flvr = sec->ps_flvr;
- spin_unlock(&sec->ps_lock);
+ cfs_spin_unlock(&sec->ps_lock);
/* force SVC_NULL for context initiation rpc, SVC_INTG for context
* destruction rpc */
int rc;
ENTRY;
- might_sleep();
+ cfs_might_sleep();
sec = sptlrpc_import_sec_ref(imp);
ctx = get_my_ctx(sec);
if (!req)
RETURN(-ENOMEM);
- spin_lock_init(&req->rq_lock);
- atomic_set(&req->rq_refcount, 10000);
+ cfs_spin_lock_init(&req->rq_lock);
+ cfs_atomic_set(&req->rq_refcount, 10000);
CFS_INIT_LIST_HEAD(&req->rq_ctx_chain);
cfs_waitq_init(&req->rq_reply_waitq);
req->rq_import = imp;
req->rq_cli_ctx = ctx;
rc = sptlrpc_req_refresh_ctx(req, 0);
- LASSERT(list_empty(&req->rq_ctx_chain));
+ LASSERT(cfs_list_empty(&req->rq_ctx_chain));
sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
OBD_FREE_PTR(req);
GOTO(err_req, rc = -ENOMEM);
/* sanity checkings and copy data out, do it inside spinlock */
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
if (req->rq_replied) {
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
GOTO(err_buf, rc = -EALREADY);
}
if (req->rq_reply_off != 0) {
CERROR("early reply with offset %u\n", req->rq_reply_off);
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
GOTO(err_buf, rc = -EPROTO);
}
/* even another early arrived the size should be the same */
CERROR("data size has changed from %u to %u\n",
early_size, req->rq_nob_received);
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
GOTO(err_buf, rc = -EINVAL);
}
if (req->rq_nob_received < sizeof(struct lustre_msg)) {
CERROR("early reply length %d too small\n",
req->rq_nob_received);
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
GOTO(err_buf, rc = -EALREADY);
}
memcpy(early_buf, req->rq_repbuf, early_size);
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
- spin_lock_init(&early_req->rq_lock);
+ cfs_spin_lock_init(&early_req->rq_lock);
early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
early_req->rq_flvr = req->rq_flvr;
early_req->rq_repbuf = early_buf;
/*
* "fixed" sec (e.g. null) use sec_id < 0
*/
-static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
+static cfs_atomic_t sptlrpc_sec_id = CFS_ATOMIC_INIT(1);
int sptlrpc_get_next_secid(void)
{
- return atomic_inc_return(&sptlrpc_sec_id);
+ return cfs_atomic_inc_return(&sptlrpc_sec_id);
}
EXPORT_SYMBOL(sptlrpc_get_next_secid);
{
struct ptlrpc_sec_policy *policy = sec->ps_policy;
- LASSERT(atomic_read(&sec->ps_refcount) == 0);
- LASSERT(atomic_read(&sec->ps_nctx) == 0);
+ LASSERT(cfs_atomic_read(&sec->ps_refcount) == 0);
+ LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
LASSERT(policy->sp_cops->destroy_sec);
CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
{
- LASSERT(atomic_read(&sec->ps_refcount) > 0);
+ LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
if (sec->ps_policy->sp_cops->kill_sec) {
sec->ps_policy->sp_cops->kill_sec(sec);
struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
{
if (sec) {
- LASSERT(atomic_read(&sec->ps_refcount) > 0);
- atomic_inc(&sec->ps_refcount);
+ LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
+ cfs_atomic_inc(&sec->ps_refcount);
}
return sec;
void sptlrpc_sec_put(struct ptlrpc_sec *sec)
{
if (sec) {
- LASSERT(atomic_read(&sec->ps_refcount) > 0);
+ LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
- if (atomic_dec_and_test(&sec->ps_refcount)) {
- LASSERT(atomic_read(&sec->ps_nctx) == 0);
+ if (cfs_atomic_dec_and_test(&sec->ps_refcount)) {
+ LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
sptlrpc_gc_del_sec(sec);
sec_cop_destroy_sec(sec);
sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
if (sec) {
- atomic_inc(&sec->ps_refcount);
+ cfs_atomic_inc(&sec->ps_refcount);
sec->ps_part = sp;
{
struct ptlrpc_sec *sec;
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
sec = sptlrpc_sec_get(imp->imp_sec);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
return sec;
}
{
struct ptlrpc_sec *old_sec;
- LASSERT(atomic_read(&sec->ps_refcount) > 0);
+ LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
old_sec = imp->imp_sec;
imp->imp_sec = sec;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
if (old_sec) {
sptlrpc_sec_kill(old_sec);
sptlrpc_secflags2str(sf->sf_flags,
str2, sizeof(str2)));
- spin_lock(&sec->ps_lock);
+ cfs_spin_lock(&sec->ps_lock);
flavor_copy(&sec->ps_flvr, sf);
- spin_unlock(&sec->ps_lock);
+ cfs_spin_unlock(&sec->ps_lock);
}
/*
int rc = 0;
ENTRY;
- might_sleep();
+ cfs_might_sleep();
if (imp == NULL)
RETURN(0);
sptlrpc_flavor2name(&sf, str, sizeof(str)));
}
- mutex_down(&imp->imp_sec_mutex);
+ cfs_mutex_down(&imp->imp_sec_mutex);
newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
if (newsec) {
rc = -EPERM;
}
- mutex_up(&imp->imp_sec_mutex);
+ cfs_mutex_up(&imp->imp_sec_mutex);
out:
sptlrpc_sec_put(sec);
RETURN(rc);
int rc;
LASSERT(ctx);
- LASSERT(atomic_read(&ctx->cc_refcount));
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount));
LASSERT(ctx->cc_sec);
LASSERT(ctx->cc_sec->ps_policy);
LASSERT(req->rq_reqmsg == NULL);
struct ptlrpc_sec_policy *policy;
LASSERT(ctx);
- LASSERT(atomic_read(&ctx->cc_refcount));
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount));
LASSERT(ctx->cc_sec);
LASSERT(ctx->cc_sec->ps_policy);
ENTRY;
LASSERT(ctx);
- LASSERT(atomic_read(&ctx->cc_refcount));
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount));
LASSERT(ctx->cc_sec);
LASSERT(ctx->cc_sec->ps_policy);
ENTRY;
LASSERT(ctx);
- LASSERT(atomic_read(&ctx->cc_refcount));
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount));
LASSERT(ctx->cc_sec);
LASSERT(ctx->cc_sec->ps_policy);
if (req->rq_ctx_fini)
return 0;
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
/* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
* the first req with the new flavor, then treat it as current flavor,
if (req->rq_auth_gss &&
!(req->rq_ctx_init && (req->rq_auth_usr_root ||
req->rq_auth_usr_mdt))) {
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d)\n",
req->rq_auth_gss, req->rq_ctx_init,
req->rq_auth_usr_root, req->rq_auth_usr_mdt);
}
exp->exp_flvr_adapt = 0;
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
req->rq_svc_ctx, &flavor);
* gss root ctx init */
if (!req->rq_auth_gss || !req->rq_ctx_init ||
(!req->rq_auth_usr_root && !req->rq_auth_usr_mdt)) {
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
return 0;
}
* shortly, and let _this_ rpc pass through */
if (exp->exp_flvr_changed) {
LASSERT(exp->exp_flvr_adapt);
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
return 0;
}
exp->exp_flvr_old[0].sf_rpc,
exp->exp_flvr_old[1].sf_rpc);
flavor = exp->exp_flvr;
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
req->rq_svc_ctx,
"install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
exp->exp_flvr_old[0].sf_rpc,
exp->exp_flvr_old[1].sf_rpc);
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
req->rq_svc_ctx);
exp->exp_flvr_old[1].sf_rpc,
exp->exp_flvr_expire[0] -
cfs_time_current_sec());
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
return 0;
}
} else {
exp->exp_flvr_old[1].sf_rpc,
exp->exp_flvr_expire[1] -
cfs_time_current_sec());
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
return 0;
}
} else {
exp->exp_flvr_old[1].sf_rpc);
}
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u) with "
"unauthorized flavor %x, expect %x|%x(%+ld)|%x(%+ld)\n",
LASSERT(obd);
- spin_lock(&obd->obd_dev_lock);
+ cfs_spin_lock(&obd->obd_dev_lock);
- list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
+ cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
if (exp->exp_connection == NULL)
continue;
/* note if this export had just been updated flavor
* (exp_flvr_changed == 1), this will override the
* previous one. */
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
exp->exp_connection->c_peer.nid,
&new_flvr);
exp->exp_flvr.sf_rpc,
exp->exp_flvr_old[1].sf_rpc);
}
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
}
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
}
EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
if (ctx == NULL)
return;
- LASSERT(atomic_read(&ctx->sc_refcount) > 0);
- atomic_inc(&ctx->sc_refcount);
+ LASSERT(cfs_atomic_read(&ctx->sc_refcount) > 0);
+ cfs_atomic_inc(&ctx->sc_refcount);
}
void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
if (ctx == NULL)
return;
- LASSERT(atomic_read(&ctx->sc_refcount) > 0);
- if (atomic_dec_and_test(&ctx->sc_refcount)) {
+ LASSERT(cfs_atomic_read(&ctx->sc_refcount) > 0);
+ if (cfs_atomic_dec_and_test(&ctx->sc_refcount)) {
if (ctx->sc_policy->sp_sops->free_ctx)
ctx->sc_policy->sp_sops->free_ctx(ctx);
}
if (ctx == NULL)
return;
- LASSERT(atomic_read(&ctx->sc_refcount) > 0);
+ LASSERT(cfs_atomic_read(&ctx->sc_refcount) > 0);
if (ctx->sc_policy->sp_sops->invalidate_ctx)
ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
}
{
int rc;
- rwlock_init(&policy_lock);
+ cfs_rwlock_init(&policy_lock);
rc = sptlrpc_gc_init();
if (rc)
/*
* in-pool pages bookkeeping
*/
- spinlock_t epp_lock; /* protect following fields */
+ cfs_spinlock_t epp_lock; /* protect following fields */
unsigned long epp_total_pages; /* total pages in pools */
unsigned long epp_free_pages; /* current pages available */
/*
* memory shrinker
*/
-const int pools_shrinker_seeks = DEFAULT_SEEKS;
-static struct shrinker *pools_shrinker = NULL;
+const int pools_shrinker_seeks = CFS_DEFAULT_SEEKS;
+static struct cfs_shrinker *pools_shrinker = NULL;
/*
{
int rc;
- spin_lock(&page_pools.epp_lock);
+ cfs_spin_lock(&page_pools.epp_lock);
rc = snprintf(page, count,
"physical pages: %lu\n"
"max waitqueue depth: %u\n"
"max wait time: "CFS_TIME_T"/%u\n"
,
- num_physpages,
+ cfs_num_physpages,
PAGES_PER_POOL,
page_pools.epp_max_pages,
page_pools.epp_max_pools,
page_pools.epp_st_missings,
page_pools.epp_st_lowfree,
page_pools.epp_st_max_wqlen,
- page_pools.epp_st_max_wait, HZ
+ page_pools.epp_st_max_wait, CFS_HZ
);
- spin_unlock(&page_pools.epp_lock);
+ cfs_spin_unlock(&page_pools.epp_lock);
return rc;
}
static int enc_pools_shrink(int nr_to_scan, unsigned int gfp_mask)
{
if (unlikely(nr_to_scan != 0)) {
- spin_lock(&page_pools.epp_lock);
+ cfs_spin_lock(&page_pools.epp_lock);
nr_to_scan = min(nr_to_scan, (int) page_pools.epp_free_pages -
PTLRPC_MAX_BRW_PAGES);
if (nr_to_scan > 0) {
page_pools.epp_st_shrinks++;
page_pools.epp_last_shrink = cfs_time_current_sec();
}
- spin_unlock(&page_pools.epp_lock);
+ cfs_spin_unlock(&page_pools.epp_lock);
}
/*
*/
if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
CACHE_QUIESCENT_PERIOD)) {
- spin_lock(&page_pools.epp_lock);
+ cfs_spin_lock(&page_pools.epp_lock);
page_pools.epp_idle_idx = IDLE_IDX_MAX;
- spin_unlock(&page_pools.epp_lock);
+ cfs_spin_unlock(&page_pools.epp_lock);
}
LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
LASSERT(npages_to_npools(npages) == npools);
LASSERT(page_pools.epp_growing);
- spin_lock(&page_pools.epp_lock);
+ cfs_spin_lock(&page_pools.epp_lock);
/*
* (1) fill all the free slots of current pools.
CDEBUG(D_SEC, "add %d pages to total %lu\n", npages,
page_pools.epp_total_pages);
- spin_unlock(&page_pools.epp_lock);
+ cfs_spin_unlock(&page_pools.epp_lock);
}
static int enc_pools_add_pages(int npages)
{
- static DECLARE_MUTEX(sem_add_pages);
+ static CFS_DECLARE_MUTEX(sem_add_pages);
cfs_page_t ***pools;
int npools, alloced = 0;
int i, j, rc = -ENOMEM;
if (npages < PTLRPC_MAX_BRW_PAGES)
npages = PTLRPC_MAX_BRW_PAGES;
- down(&sem_add_pages);
+ cfs_down(&sem_add_pages);
if (npages + page_pools.epp_total_pages > page_pools.epp_max_pages)
npages = page_pools.epp_max_pages - page_pools.epp_total_pages;
CERROR("Failed to allocate %d enc pages\n", npages);
}
- up(&sem_add_pages);
+ cfs_up(&sem_add_pages);
return rc;
}
if (desc->bd_enc_iov == NULL)
return -ENOMEM;
- spin_lock(&page_pools.epp_lock);
+ cfs_spin_lock(&page_pools.epp_lock);
page_pools.epp_st_access++;
again:
if (enc_pools_should_grow(desc->bd_iov_count, now)) {
page_pools.epp_growing = 1;
- spin_unlock(&page_pools.epp_lock);
+ cfs_spin_unlock(&page_pools.epp_lock);
enc_pools_add_pages(page_pools.epp_pages_short / 2);
- spin_lock(&page_pools.epp_lock);
+ cfs_spin_lock(&page_pools.epp_lock);
page_pools.epp_growing = 0;
page_pools.epp_st_max_wqlen =
page_pools.epp_waitqlen;
- set_current_state(CFS_TASK_UNINT);
+ cfs_set_current_state(CFS_TASK_UNINT);
cfs_waitlink_init(&waitlink);
cfs_waitq_add(&page_pools.epp_waitq, &waitlink);
- spin_unlock(&page_pools.epp_lock);
+ cfs_spin_unlock(&page_pools.epp_lock);
cfs_waitq_wait(&waitlink, CFS_TASK_UNINT);
cfs_waitq_del(&page_pools.epp_waitq, &waitlink);
LASSERT(page_pools.epp_waitqlen > 0);
- spin_lock(&page_pools.epp_lock);
+ cfs_spin_lock(&page_pools.epp_lock);
page_pools.epp_waitqlen--;
}
page_pools.epp_last_access = cfs_time_current_sec();
- spin_unlock(&page_pools.epp_lock);
+ cfs_spin_unlock(&page_pools.epp_lock);
return 0;
}
EXPORT_SYMBOL(sptlrpc_enc_pool_get_pages);
LASSERT(desc->bd_iov_count > 0);
- spin_lock(&page_pools.epp_lock);
+ cfs_spin_lock(&page_pools.epp_lock);
p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
enc_pools_wakeup();
- spin_unlock(&page_pools.epp_lock);
+ cfs_spin_unlock(&page_pools.epp_lock);
OBD_FREE(desc->bd_enc_iov,
desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
{
int need_grow = 0;
- spin_lock(&page_pools.epp_lock);
+ cfs_spin_lock(&page_pools.epp_lock);
if (page_pools.epp_growing == 0 && page_pools.epp_total_pages == 0) {
page_pools.epp_growing = 1;
need_grow = 1;
}
- spin_unlock(&page_pools.epp_lock);
+ cfs_spin_unlock(&page_pools.epp_lock);
if (need_grow) {
enc_pools_add_pages(PTLRPC_MAX_BRW_PAGES +
PTLRPC_MAX_BRW_PAGES);
- spin_lock(&page_pools.epp_lock);
+ cfs_spin_lock(&page_pools.epp_lock);
page_pools.epp_growing = 0;
enc_pools_wakeup();
- spin_unlock(&page_pools.epp_lock);
+ cfs_spin_unlock(&page_pools.epp_lock);
}
return 0;
}
* maximum capacity is 1/8 of total physical memory.
* is the 1/8 a good number?
*/
- page_pools.epp_max_pages = num_physpages / 8;
+ page_pools.epp_max_pages = cfs_num_physpages / 8;
page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
cfs_waitq_init(&page_pools.epp_waitq);
page_pools.epp_last_shrink = cfs_time_current_sec();
page_pools.epp_last_access = cfs_time_current_sec();
- spin_lock_init(&page_pools.epp_lock);
+ cfs_spin_lock_init(&page_pools.epp_lock);
page_pools.epp_total_pages = 0;
page_pools.epp_free_pages = 0;
if (page_pools.epp_pools == NULL)
return -ENOMEM;
- pools_shrinker = set_shrinker(pools_shrinker_seeks, enc_pools_shrink);
+ pools_shrinker = cfs_set_shrinker(pools_shrinker_seeks,
+ enc_pools_shrink);
if (pools_shrinker == NULL) {
enc_pools_free();
return -ENOMEM;
LASSERT(page_pools.epp_pools);
LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages);
- remove_shrinker(pools_shrinker);
+ cfs_remove_shrinker(pools_shrinker);
npools = npages_to_npools(page_pools.epp_total_pages);
cleaned = enc_pools_cleanup(page_pools.epp_pools, npools);
page_pools.epp_st_grow_fails,
page_pools.epp_st_shrinks, page_pools.epp_st_access,
page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
- page_pools.epp_st_max_wait, HZ);
+ page_pools.epp_st_max_wait, CFS_HZ);
}
}
struct sptlrpc_rule *rules;
int nslot;
- might_sleep();
+ cfs_might_sleep();
if (rset->srs_nrule < rset->srs_nslot)
return 0;
int spec_dir, spec_net;
int rc, n, match = 0;
- might_sleep();
+ cfs_might_sleep();
spec_net = rule_spec_net(rule);
spec_dir = rule_spec_dir(rule);
struct sptlrpc_rule *rule;
int i, n, rc;
- might_sleep();
+ cfs_might_sleep();
/* merge general rules firstly, then target-specific rules */
for (i = 0; i < 2; i++) {
**********************************/
struct sptlrpc_conf_tgt {
- struct list_head sct_list;
+ cfs_list_t sct_list;
char sct_name[MAX_OBD_NAME];
struct sptlrpc_rule_set sct_rset;
};
struct sptlrpc_conf {
- struct list_head sc_list;
+ cfs_list_t sc_list;
char sc_fsname[MTI_NAME_MAXLEN];
unsigned int sc_modified; /* modified during updating */
unsigned int sc_updated:1, /* updated copy from MGS */
sc_local:1; /* local copy from target */
struct sptlrpc_rule_set sc_rset; /* fs general rules */
- struct list_head sc_tgts; /* target-specific rules */
+ cfs_list_t sc_tgts; /* target-specific rules */
};
-static struct mutex sptlrpc_conf_lock;
+static cfs_mutex_t sptlrpc_conf_lock;
static CFS_LIST_HEAD(sptlrpc_confs);
static inline int is_hex(char c)
sptlrpc_rule_set_free(&conf->sc_rset);
- list_for_each_entry_safe(conf_tgt, conf_tgt_next,
- &conf->sc_tgts, sct_list) {
+ cfs_list_for_each_entry_safe(conf_tgt, conf_tgt_next,
+ &conf->sc_tgts, sct_list) {
sptlrpc_rule_set_free(&conf_tgt->sct_rset);
- list_del(&conf_tgt->sct_list);
+ cfs_list_del(&conf_tgt->sct_list);
OBD_FREE_PTR(conf_tgt);
}
- LASSERT(list_empty(&conf->sc_tgts));
+ LASSERT(cfs_list_empty(&conf->sc_tgts));
conf->sc_updated = 0;
conf->sc_local = 0;
CDEBUG(D_SEC, "free sptlrpc conf %s\n", conf->sc_fsname);
sptlrpc_conf_free_rsets(conf);
- list_del(&conf->sc_list);
+ cfs_list_del(&conf->sc_list);
OBD_FREE_PTR(conf);
}
{
struct sptlrpc_conf_tgt *conf_tgt;
- list_for_each_entry(conf_tgt, &conf->sc_tgts, sct_list) {
+ cfs_list_for_each_entry(conf_tgt, &conf->sc_tgts, sct_list) {
if (strcmp(conf_tgt->sct_name, name) == 0)
return conf_tgt;
}
if (conf_tgt) {
strncpy(conf_tgt->sct_name, name, sizeof(conf_tgt->sct_name));
sptlrpc_rule_set_init(&conf_tgt->sct_rset);
- list_add(&conf_tgt->sct_list, &conf->sc_tgts);
+ cfs_list_add(&conf_tgt->sct_list, &conf->sc_tgts);
}
return conf_tgt;
{
struct sptlrpc_conf *conf;
- list_for_each_entry(conf, &sptlrpc_confs, sc_list) {
+ cfs_list_for_each_entry(conf, &sptlrpc_confs, sc_list) {
if (strcmp(conf->sc_fsname, fsname) == 0)
return conf;
}
strcpy(conf->sc_fsname, fsname);
sptlrpc_rule_set_init(&conf->sc_rset);
CFS_INIT_LIST_HEAD(&conf->sc_tgts);
- list_add(&conf->sc_list, &sptlrpc_confs);
+ cfs_list_add(&conf->sc_list, &sptlrpc_confs);
CDEBUG(D_SEC, "create sptlrpc conf %s\n", conf->sc_fsname);
return conf;
if (conf == NULL) {
target2fsname(target, fsname, sizeof(fsname));
- mutex_lock(&sptlrpc_conf_lock);
+ cfs_mutex_lock(&sptlrpc_conf_lock);
conf = sptlrpc_conf_get(fsname, 0);
if (conf == NULL) {
CERROR("can't find conf\n");
} else {
rc = sptlrpc_conf_merge_rule(conf, target, &rule);
}
- mutex_unlock(&sptlrpc_conf_lock);
+ cfs_mutex_unlock(&sptlrpc_conf_lock);
} else {
- LASSERT(mutex_is_locked(&sptlrpc_conf_lock));
+ LASSERT(cfs_mutex_is_locked(&sptlrpc_conf_lock));
rc = sptlrpc_conf_merge_rule(conf, target, &rule);
}
if (logname2fsname(logname, fsname, sizeof(fsname)))
return;
- mutex_lock(&sptlrpc_conf_lock);
+ cfs_mutex_lock(&sptlrpc_conf_lock);
conf = sptlrpc_conf_get(fsname, 0);
if (conf && conf->sc_local) {
}
conf->sc_modified = 0;
- mutex_unlock(&sptlrpc_conf_lock);
+ cfs_mutex_unlock(&sptlrpc_conf_lock);
}
EXPORT_SYMBOL(sptlrpc_conf_log_update_begin);
if (logname2fsname(logname, fsname, sizeof(fsname)))
return;
- mutex_lock(&sptlrpc_conf_lock);
+ cfs_mutex_lock(&sptlrpc_conf_lock);
conf = sptlrpc_conf_get(fsname, 0);
if (conf) {
conf->sc_updated = 1;
}
- mutex_unlock(&sptlrpc_conf_lock);
+ cfs_mutex_unlock(&sptlrpc_conf_lock);
}
EXPORT_SYMBOL(sptlrpc_conf_log_update_end);
if (logname2fsname(logname, fsname, sizeof(fsname)))
return;
- mutex_lock(&sptlrpc_conf_lock);
+ cfs_mutex_lock(&sptlrpc_conf_lock);
conf = sptlrpc_conf_get(fsname, 1);
- mutex_unlock(&sptlrpc_conf_lock);
+ cfs_mutex_unlock(&sptlrpc_conf_lock);
}
EXPORT_SYMBOL(sptlrpc_conf_log_start);
if (logname2fsname(logname, fsname, sizeof(fsname)))
return;
- mutex_lock(&sptlrpc_conf_lock);
+ cfs_mutex_lock(&sptlrpc_conf_lock);
conf = sptlrpc_conf_get(fsname, 0);
if (conf)
sptlrpc_conf_free(conf);
- mutex_unlock(&sptlrpc_conf_lock);
+ cfs_mutex_unlock(&sptlrpc_conf_lock);
}
EXPORT_SYMBOL(sptlrpc_conf_log_stop);
target2fsname(target->uuid, name, sizeof(name));
- mutex_lock(&sptlrpc_conf_lock);
+ cfs_mutex_lock(&sptlrpc_conf_lock);
conf = sptlrpc_conf_get(name, 0);
if (conf == NULL)
rc = sptlrpc_rule_set_choose(&conf->sc_rset, from, to, nid, sf);
out:
- mutex_unlock(&sptlrpc_conf_lock);
+ cfs_mutex_unlock(&sptlrpc_conf_lock);
if (rc == 0)
get_default_flavor(sf);
CDEBUG(D_SEC, "obd %s\n", obd->u.cli.cl_target_uuid.uuid);
/* serialize with connect/disconnect import */
- down_read(&obd->u.cli.cl_sem);
+ cfs_down_read(&obd->u.cli.cl_sem);
imp = obd->u.cli.cl_import;
if (imp) {
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
if (imp->imp_sec)
imp->imp_sec_expire = cfs_time_current_sec() +
SEC_ADAPT_DELAY;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
}
- up_read(&obd->u.cli.cl_sem);
+ cfs_up_read(&obd->u.cli.cl_sem);
EXIT;
}
EXPORT_SYMBOL(sptlrpc_conf_client_adapt);
sptlrpc_record_rule_set(llh, conf->sc_fsname, &conf->sc_rset);
- list_for_each_entry(conf_tgt, &conf->sc_tgts, sct_list) {
+ cfs_list_for_each_entry(conf_tgt, &conf->sc_tgts, sct_list) {
sptlrpc_record_rule_set(llh, conf_tgt->sct_name,
&conf_tgt->sct_rset);
}
target2fsname(obd->obd_uuid.uuid, fsname, sizeof(fsname));
- mutex_lock(&sptlrpc_conf_lock);
+ cfs_mutex_lock(&sptlrpc_conf_lock);
conf = sptlrpc_conf_get(fsname, 0);
if (conf == NULL) {
conf_tgt ? &conf_tgt->sct_rset: NULL,
LUSTRE_SP_ANY, sp_dst, rset);
out:
- mutex_unlock(&sptlrpc_conf_lock);
+ cfs_mutex_unlock(&sptlrpc_conf_lock);
RETURN(rc);
}
EXPORT_SYMBOL(sptlrpc_conf_target_get_rules);
int sptlrpc_conf_init(void)
{
- mutex_init(&sptlrpc_conf_lock);
+ cfs_mutex_init(&sptlrpc_conf_lock);
return 0;
}
{
struct sptlrpc_conf *conf, *conf_next;
- mutex_lock(&sptlrpc_conf_lock);
- list_for_each_entry_safe(conf, conf_next, &sptlrpc_confs, sc_list) {
+ cfs_mutex_lock(&sptlrpc_conf_lock);
+ cfs_list_for_each_entry_safe(conf, conf_next, &sptlrpc_confs, sc_list) {
sptlrpc_conf_free(conf);
}
- LASSERT(list_empty(&sptlrpc_confs));
- mutex_unlock(&sptlrpc_conf_lock);
+ LASSERT(cfs_list_empty(&sptlrpc_confs));
+ cfs_mutex_unlock(&sptlrpc_conf_lock);
}
#ifdef __KERNEL__
-static struct mutex sec_gc_mutex;
+static cfs_mutex_t sec_gc_mutex;
static CFS_LIST_HEAD(sec_gc_list);
-static spinlock_t sec_gc_list_lock;
+static cfs_spinlock_t sec_gc_list_lock;
static CFS_LIST_HEAD(sec_gc_ctx_list);
-static spinlock_t sec_gc_ctx_list_lock;
+static cfs_spinlock_t sec_gc_ctx_list_lock;
static struct ptlrpc_thread sec_gc_thread;
-static atomic_t sec_gc_wait_del = ATOMIC_INIT(0);
+static cfs_atomic_t sec_gc_wait_del = CFS_ATOMIC_INIT(0);
void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
{
LASSERT(sec->ps_policy->sp_cops->gc_ctx);
LASSERT(sec->ps_gc_interval > 0);
- LASSERT(list_empty(&sec->ps_gc_list));
+ LASSERT(cfs_list_empty(&sec->ps_gc_list));
sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
- spin_lock(&sec_gc_list_lock);
- list_add_tail(&sec_gc_list, &sec->ps_gc_list);
- spin_unlock(&sec_gc_list_lock);
+ cfs_spin_lock(&sec_gc_list_lock);
+ cfs_list_add_tail(&sec_gc_list, &sec->ps_gc_list);
+ cfs_spin_unlock(&sec_gc_list_lock);
CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name);
}
void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
{
- if (list_empty(&sec->ps_gc_list))
+ if (cfs_list_empty(&sec->ps_gc_list))
return;
- might_sleep();
+ cfs_might_sleep();
/* signal before list_del to make iteration in gc thread safe */
- atomic_inc(&sec_gc_wait_del);
+ cfs_atomic_inc(&sec_gc_wait_del);
- spin_lock(&sec_gc_list_lock);
- list_del_init(&sec->ps_gc_list);
- spin_unlock(&sec_gc_list_lock);
+ cfs_spin_lock(&sec_gc_list_lock);
+ cfs_list_del_init(&sec->ps_gc_list);
+ cfs_spin_unlock(&sec_gc_list_lock);
/* barrier */
- mutex_lock(&sec_gc_mutex);
- mutex_unlock(&sec_gc_mutex);
+ cfs_mutex_lock(&sec_gc_mutex);
+ cfs_mutex_unlock(&sec_gc_mutex);
- atomic_dec(&sec_gc_wait_del);
+ cfs_atomic_dec(&sec_gc_wait_del);
CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
}
void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx)
{
- LASSERT(list_empty(&ctx->cc_gc_chain));
+ LASSERT(cfs_list_empty(&ctx->cc_gc_chain));
CDEBUG(D_SEC, "hand over ctx %p(%u->%s)\n",
ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
- spin_lock(&sec_gc_ctx_list_lock);
- list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
- spin_unlock(&sec_gc_ctx_list_lock);
+ cfs_spin_lock(&sec_gc_ctx_list_lock);
+ cfs_list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
+ cfs_spin_unlock(&sec_gc_ctx_list_lock);
sec_gc_thread.t_flags |= SVC_SIGNAL;
cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
{
struct ptlrpc_cli_ctx *ctx;
- spin_lock(&sec_gc_ctx_list_lock);
+ cfs_spin_lock(&sec_gc_ctx_list_lock);
- while (!list_empty(&sec_gc_ctx_list)) {
- ctx = list_entry(sec_gc_ctx_list.next,
- struct ptlrpc_cli_ctx, cc_gc_chain);
- list_del_init(&ctx->cc_gc_chain);
- spin_unlock(&sec_gc_ctx_list_lock);
+ while (!cfs_list_empty(&sec_gc_ctx_list)) {
+ ctx = cfs_list_entry(sec_gc_ctx_list.next,
+ struct ptlrpc_cli_ctx, cc_gc_chain);
+ cfs_list_del_init(&ctx->cc_gc_chain);
+ cfs_spin_unlock(&sec_gc_ctx_list_lock);
LASSERT(ctx->cc_sec);
- LASSERT(atomic_read(&ctx->cc_refcount) == 1);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 1);
CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n",
ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
sptlrpc_cli_ctx_put(ctx, 1);
- spin_lock(&sec_gc_ctx_list_lock);
+ cfs_spin_lock(&sec_gc_ctx_list_lock);
}
- spin_unlock(&sec_gc_ctx_list_lock);
+ cfs_spin_unlock(&sec_gc_ctx_list_lock);
}
static void sec_do_gc(struct ptlrpc_sec *sec)
* to trace each sec as order of expiry time.
* another issue here is we wakeup as fixed interval instead of
* according to each sec's expiry time */
- mutex_lock(&sec_gc_mutex);
- list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
+ cfs_mutex_lock(&sec_gc_mutex);
+ cfs_list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
/* if someone is waiting to be deleted, let it
* proceed as soon as possible. */
- if (atomic_read(&sec_gc_wait_del)) {
+ if (cfs_atomic_read(&sec_gc_wait_del)) {
CWARN("deletion pending, start over\n");
- mutex_unlock(&sec_gc_mutex);
+ cfs_mutex_unlock(&sec_gc_mutex);
goto again;
}
sec_do_gc(sec);
}
- mutex_unlock(&sec_gc_mutex);
+ cfs_mutex_unlock(&sec_gc_mutex);
/* check ctx list again before sleep */
sec_process_ctx_list();
- lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * HZ, NULL, NULL);
+ lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * CFS_HZ, NULL, NULL);
l_wait_event(thread->t_ctl_waitq,
thread->t_flags & (SVC_STOPPING | SVC_SIGNAL),
&lwi);
struct l_wait_info lwi = { 0 };
int rc;
- mutex_init(&sec_gc_mutex);
- spin_lock_init(&sec_gc_list_lock);
- spin_lock_init(&sec_gc_ctx_list_lock);
+ cfs_mutex_init(&sec_gc_mutex);
+ cfs_spin_lock_init(&sec_gc_list_lock);
+ cfs_spin_lock_init(&sec_gc_ctx_list_lock);
/* initialize thread control */
memset(&sec_gc_thread, 0, sizeof(sec_gc_thread));
seq_printf(seq, "flags: %s\n",
sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str)));
seq_printf(seq, "id: %d\n", sec->ps_id);
- seq_printf(seq, "refcount: %d\n", atomic_read(&sec->ps_refcount));
- seq_printf(seq, "nctx: %d\n", atomic_read(&sec->ps_nctx));
+ seq_printf(seq, "refcount: %d\n",
+ cfs_atomic_read(&sec->ps_refcount));
+ seq_printf(seq, "nctx: %d\n", cfs_atomic_read(&sec->ps_nctx));
seq_printf(seq, "gc internal %ld\n", sec->ps_gc_interval);
seq_printf(seq, "gc next %ld\n",
sec->ps_gc_interval ?
struct vfs_cred *vcred,
int create, int remove_dead)
{
- atomic_inc(&null_cli_ctx.cc_refcount);
+ cfs_atomic_inc(&null_cli_ctx.cc_refcount);
return &null_cli_ctx;
}
}
static struct ptlrpc_svc_ctx null_svc_ctx = {
- .sc_refcount = ATOMIC_INIT(1),
+ .sc_refcount = CFS_ATOMIC_INIT(1),
.sc_policy = &null_policy,
};
req->rq_reqlen = req->rq_reqdata_len;
req->rq_svc_ctx = &null_svc_ctx;
- atomic_inc(&req->rq_svc_ctx->sc_refcount);
+ cfs_atomic_inc(&req->rq_svc_ctx->sc_refcount);
return SECSVC_OK;
}
}
rs->rs_svc_ctx = req->rq_svc_ctx;
- atomic_inc(&req->rq_svc_ctx->sc_refcount);
+ cfs_atomic_inc(&req->rq_svc_ctx->sc_refcount);
rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
rs->rs_repbuf_len = rs_size - sizeof(*rs);
static
void null_free_rs(struct ptlrpc_reply_state *rs)
{
- LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
- atomic_dec(&rs->rs_svc_ctx->sc_refcount);
+ LASSERT(cfs_atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
+ cfs_atomic_dec(&rs->rs_svc_ctx->sc_refcount);
if (!rs->rs_prealloc)
OBD_FREE(rs, rs->rs_size);
static void null_init_internal(void)
{
- static HLIST_HEAD(__list);
+ static CFS_HLIST_HEAD(__list);
null_sec.ps_policy = &null_policy;
- atomic_set(&null_sec.ps_refcount, 1); /* always busy */
+ cfs_atomic_set(&null_sec.ps_refcount, 1); /* always busy */
null_sec.ps_id = -1;
null_sec.ps_import = NULL;
null_sec.ps_flvr.sf_rpc = SPTLRPC_FLVR_NULL;
null_sec.ps_flvr.sf_flags = 0;
null_sec.ps_part = LUSTRE_SP_ANY;
null_sec.ps_dying = 0;
- spin_lock_init(&null_sec.ps_lock);
- atomic_set(&null_sec.ps_nctx, 1); /* for "null_cli_ctx" */
+ cfs_spin_lock_init(&null_sec.ps_lock);
+ cfs_atomic_set(&null_sec.ps_nctx, 1); /* for "null_cli_ctx" */
CFS_INIT_LIST_HEAD(&null_sec.ps_gc_list);
null_sec.ps_gc_interval = 0;
null_sec.ps_gc_next = 0;
- hlist_add_head(&null_cli_ctx.cc_cache, &__list);
- atomic_set(&null_cli_ctx.cc_refcount, 1); /* for hash */
+ cfs_hlist_add_head(&null_cli_ctx.cc_cache, &__list);
+ cfs_atomic_set(&null_cli_ctx.cc_refcount, 1); /* for hash */
null_cli_ctx.cc_sec = &null_sec;
null_cli_ctx.cc_ops = &null_ctx_ops;
null_cli_ctx.cc_expire = 0;
null_cli_ctx.cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_ETERNAL |
PTLRPC_CTX_UPTODATE;
null_cli_ctx.cc_vcred.vc_uid = 0;
- spin_lock_init(&null_cli_ctx.cc_lock);
+ cfs_spin_lock_init(&null_cli_ctx.cc_lock);
CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_req_list);
CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain);
}
struct plain_sec {
struct ptlrpc_sec pls_base;
- rwlock_t pls_lock;
+ cfs_rwlock_t pls_lock;
struct ptlrpc_cli_ctx *pls_ctx;
};
OBD_ALLOC_PTR(ctx_new);
- write_lock(&plsec->pls_lock);
+ cfs_write_lock(&plsec->pls_lock);
ctx = plsec->pls_ctx;
if (ctx) {
- atomic_inc(&ctx->cc_refcount);
+ cfs_atomic_inc(&ctx->cc_refcount);
if (ctx_new)
OBD_FREE_PTR(ctx_new);
} else if (ctx_new) {
ctx = ctx_new;
- atomic_set(&ctx->cc_refcount, 1); /* for cache */
+ cfs_atomic_set(&ctx->cc_refcount, 1); /* for cache */
ctx->cc_sec = &plsec->pls_base;
ctx->cc_ops = &plain_ctx_ops;
ctx->cc_expire = 0;
ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
ctx->cc_vcred.vc_uid = 0;
- spin_lock_init(&ctx->cc_lock);
+ cfs_spin_lock_init(&ctx->cc_lock);
CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
plsec->pls_ctx = ctx;
- atomic_inc(&plsec->pls_base.ps_nctx);
- atomic_inc(&plsec->pls_base.ps_refcount);
+ cfs_atomic_inc(&plsec->pls_base.ps_nctx);
+ cfs_atomic_inc(&plsec->pls_base.ps_refcount);
- atomic_inc(&ctx->cc_refcount); /* for caller */
+ cfs_atomic_inc(&ctx->cc_refcount); /* for caller */
}
- write_unlock(&plsec->pls_lock);
+ cfs_write_unlock(&plsec->pls_lock);
return ctx;
}
LASSERT(sec->ps_policy == &plain_policy);
LASSERT(sec->ps_import);
- LASSERT(atomic_read(&sec->ps_refcount) == 0);
- LASSERT(atomic_read(&sec->ps_nctx) == 0);
+ LASSERT(cfs_atomic_read(&sec->ps_refcount) == 0);
+ LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
LASSERT(plsec->pls_ctx == NULL);
class_import_put(sec->ps_import);
/*
* initialize plain_sec
*/
- rwlock_init(&plsec->pls_lock);
+ cfs_rwlock_init(&plsec->pls_lock);
plsec->pls_ctx = NULL;
sec = &plsec->pls_base;
sec->ps_policy = &plain_policy;
- atomic_set(&sec->ps_refcount, 0);
- atomic_set(&sec->ps_nctx, 0);
+ cfs_atomic_set(&sec->ps_refcount, 0);
+ cfs_atomic_set(&sec->ps_nctx, 0);
sec->ps_id = sptlrpc_get_next_secid();
sec->ps_import = class_import_get(imp);
sec->ps_flvr = *sf;
- spin_lock_init(&sec->ps_lock);
+ cfs_spin_lock_init(&sec->ps_lock);
CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
sec->ps_gc_interval = 0;
sec->ps_gc_next = 0;
struct ptlrpc_cli_ctx *ctx;
ENTRY;
- read_lock(&plsec->pls_lock);
+ cfs_read_lock(&plsec->pls_lock);
ctx = plsec->pls_ctx;
if (ctx)
- atomic_inc(&ctx->cc_refcount);
- read_unlock(&plsec->pls_lock);
+ cfs_atomic_inc(&ctx->cc_refcount);
+ cfs_read_unlock(&plsec->pls_lock);
if (unlikely(ctx == NULL))
ctx = plain_sec_install_ctx(plsec);
void plain_release_ctx(struct ptlrpc_sec *sec,
struct ptlrpc_cli_ctx *ctx, int sync)
{
- LASSERT(atomic_read(&sec->ps_refcount) > 0);
- LASSERT(atomic_read(&sec->ps_nctx) > 0);
- LASSERT(atomic_read(&ctx->cc_refcount) == 0);
+ LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
+ LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
LASSERT(ctx->cc_sec == sec);
OBD_FREE_PTR(ctx);
- atomic_dec(&sec->ps_nctx);
+ cfs_atomic_dec(&sec->ps_nctx);
sptlrpc_sec_put(sec);
}
if (uid != -1)
RETURN(0);
- write_lock(&plsec->pls_lock);
+ cfs_write_lock(&plsec->pls_lock);
ctx = plsec->pls_ctx;
plsec->pls_ctx = NULL;
- write_unlock(&plsec->pls_lock);
+ cfs_write_unlock(&plsec->pls_lock);
if (ctx)
sptlrpc_cli_ctx_put(ctx, 1);
****************************************/
static struct ptlrpc_svc_ctx plain_svc_ctx = {
- .sc_refcount = ATOMIC_INIT(1),
+ .sc_refcount = CFS_ATOMIC_INIT(1),
.sc_policy = &plain_policy,
};
req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
req->rq_svc_ctx = &plain_svc_ctx;
- atomic_inc(&req->rq_svc_ctx->sc_refcount);
+ cfs_atomic_inc(&req->rq_svc_ctx->sc_refcount);
RETURN(SECSVC_OK);
}
}
rs->rs_svc_ctx = req->rq_svc_ctx;
- atomic_inc(&req->rq_svc_ctx->sc_refcount);
+ cfs_atomic_inc(&req->rq_svc_ctx->sc_refcount);
rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
rs->rs_repbuf_len = rs_size - sizeof(*rs);
{
ENTRY;
- LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
- atomic_dec(&rs->rs_svc_ctx->sc_refcount);
+ LASSERT(cfs_atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
+ cfs_atomic_dec(&rs->rs_svc_ctx->sc_refcount);
if (!rs->rs_prealloc)
OBD_FREE(rs, rs->rs_size);
static int ptlrpc_server_post_idle_rqbds (struct ptlrpc_service *svc);
static CFS_LIST_HEAD(ptlrpc_all_services);
-spinlock_t ptlrpc_all_services_lock;
+cfs_spinlock_t ptlrpc_all_services_lock;
static char *
ptlrpc_alloc_request_buffer (int size)
return (NULL);
}
- spin_lock(&svc->srv_lock);
- list_add(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
+ cfs_spin_lock(&svc->srv_lock);
+ cfs_list_add(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
svc->srv_nbufs++;
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
return (rqbd);
}
struct ptlrpc_service *svc = rqbd->rqbd_service;
LASSERT (rqbd->rqbd_refcount == 0);
- LASSERT (list_empty(&rqbd->rqbd_reqs));
+ LASSERT (cfs_list_empty(&rqbd->rqbd_reqs));
- spin_lock(&svc->srv_lock);
- list_del(&rqbd->rqbd_list);
+ cfs_spin_lock(&svc->srv_lock);
+ cfs_list_del(&rqbd->rqbd_list);
svc->srv_nbufs--;
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
ptlrpc_free_request_buffer (rqbd->rqbd_buffer, svc->srv_buf_size);
OBD_FREE_PTR(rqbd);
#define HRT_STOPPING 1
struct ptlrpc_hr_thread {
- spinlock_t hrt_lock;
- unsigned long hrt_flags;
- cfs_waitq_t hrt_wait;
- struct list_head hrt_queue;
- struct completion hrt_completion;
+ cfs_spinlock_t hrt_lock;
+ unsigned long hrt_flags;
+ cfs_waitq_t hrt_wait;
+ cfs_list_t hrt_queue;
+ cfs_completion_t hrt_completion;
};
struct ptlrpc_hr_service {
};
struct rs_batch {
- struct list_head rsb_replies;
+ cfs_list_t rsb_replies;
struct ptlrpc_service *rsb_svc;
unsigned int rsb_n_replies;
};
if (hr->hr_index >= hr->hr_n_threads)
hr->hr_index = 0;
- spin_lock(&hr->hr_threads[idx].hrt_lock);
- list_splice_init(&b->rsb_replies,
- &hr->hr_threads[idx].hrt_queue);
- spin_unlock(&hr->hr_threads[idx].hrt_lock);
+ cfs_spin_lock(&hr->hr_threads[idx].hrt_lock);
+ cfs_list_splice_init(&b->rsb_replies,
+ &hr->hr_threads[idx].hrt_queue);
+ cfs_spin_unlock(&hr->hr_threads[idx].hrt_lock);
cfs_waitq_signal(&hr->hr_threads[idx].hrt_wait);
b->rsb_n_replies = 0;
}
if (svc != b->rsb_svc || b->rsb_n_replies >= MAX_SCHEDULED) {
if (b->rsb_svc != NULL) {
rs_batch_dispatch(b);
- spin_unlock(&b->rsb_svc->srv_lock);
+ cfs_spin_unlock(&b->rsb_svc->srv_lock);
}
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
b->rsb_svc = svc;
}
- spin_lock(&rs->rs_lock);
+ cfs_spin_lock(&rs->rs_lock);
rs->rs_scheduled_ever = 1;
if (rs->rs_scheduled == 0) {
- list_move(&rs->rs_list, &b->rsb_replies);
+ cfs_list_move(&rs->rs_list, &b->rsb_replies);
rs->rs_scheduled = 1;
b->rsb_n_replies++;
}
rs->rs_committed = 1;
- spin_unlock(&rs->rs_lock);
+ cfs_spin_unlock(&rs->rs_lock);
}
/**
{
if (b->rsb_svc != 0) {
rs_batch_dispatch(b);
- spin_unlock(&b->rsb_svc->srv_lock);
+ cfs_spin_unlock(&b->rsb_svc->srv_lock);
}
}
int idx;
ENTRY;
- LASSERT(list_empty(&rs->rs_list));
+ LASSERT(cfs_list_empty(&rs->rs_list));
idx = hr->hr_index++;
if (hr->hr_index >= hr->hr_n_threads)
hr->hr_index = 0;
- spin_lock(&hr->hr_threads[idx].hrt_lock);
- list_add_tail(&rs->rs_list, &hr->hr_threads[idx].hrt_queue);
- spin_unlock(&hr->hr_threads[idx].hrt_lock);
+ cfs_spin_lock(&hr->hr_threads[idx].hrt_lock);
+ cfs_list_add_tail(&rs->rs_list, &hr->hr_threads[idx].hrt_queue);
+ cfs_spin_unlock(&hr->hr_threads[idx].hrt_lock);
cfs_waitq_signal(&hr->hr_threads[idx].hrt_wait);
EXIT;
#else
- list_add_tail(&rs->rs_list, &rs->rs_service->srv_reply_queue);
+ cfs_list_add_tail(&rs->rs_list, &rs->rs_service->srv_reply_queue);
#endif
}
}
rs->rs_scheduled = 1;
- list_del_init(&rs->rs_list);
+ cfs_list_del_init(&rs->rs_list);
ptlrpc_dispatch_difficult_reply(rs);
EXIT;
}
* to attend to complete them. */
/* CAVEAT EMPTOR: spinlock ordering!!! */
- spin_lock(&exp->exp_uncommitted_replies_lock);
- list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies,
- rs_obd_list) {
+ cfs_spin_lock(&exp->exp_uncommitted_replies_lock);
+ cfs_list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies,
+ rs_obd_list) {
LASSERT (rs->rs_difficult);
/* VBR: per-export last_committed */
LASSERT(rs->rs_export);
if (rs->rs_transno <= exp->exp_last_committed) {
- list_del_init(&rs->rs_obd_list);
+ cfs_list_del_init(&rs->rs_obd_list);
rs_batch_add(&batch, rs);
}
}
- spin_unlock(&exp->exp_uncommitted_replies_lock);
+ cfs_spin_unlock(&exp->exp_uncommitted_replies_lock);
rs_batch_fini(&batch);
EXIT;
}
int posted = 0;
for (;;) {
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
- if (list_empty (&svc->srv_idle_rqbds)) {
- spin_unlock(&svc->srv_lock);
+ if (cfs_list_empty (&svc->srv_idle_rqbds)) {
+ cfs_spin_unlock(&svc->srv_lock);
return (posted);
}
- rqbd = list_entry(svc->srv_idle_rqbds.next,
- struct ptlrpc_request_buffer_desc,
- rqbd_list);
- list_del (&rqbd->rqbd_list);
+ rqbd = cfs_list_entry(svc->srv_idle_rqbds.next,
+ struct ptlrpc_request_buffer_desc,
+ rqbd_list);
+ cfs_list_del (&rqbd->rqbd_list);
/* assume we will post successfully */
svc->srv_nrqbd_receiving++;
- list_add (&rqbd->rqbd_list, &svc->srv_active_rqbds);
+ cfs_list_add (&rqbd->rqbd_list, &svc->srv_active_rqbds);
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
rc = ptlrpc_register_rqbd(rqbd);
if (rc != 0)
posted = 1;
}
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
svc->srv_nrqbd_receiving--;
- list_del(&rqbd->rqbd_list);
- list_add_tail(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
+ cfs_list_del(&rqbd->rqbd_list);
+ cfs_list_add_tail(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
/* Don't complain if no request buffers are posted right now; LNET
* won't drop requests because we set the portal lazy! */
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
return (-1);
}
/* First initialise enough for early teardown */
service->srv_name = name;
- spin_lock_init(&service->srv_lock);
+ cfs_spin_lock_init(&service->srv_lock);
CFS_INIT_LIST_HEAD(&service->srv_threads);
cfs_waitq_init(&service->srv_waitq);
#endif
CFS_INIT_LIST_HEAD(&service->srv_free_rs_list);
cfs_waitq_init(&service->srv_free_rs_waitq);
- atomic_set(&service->srv_n_difficult_replies, 0);
+ cfs_atomic_set(&service->srv_n_difficult_replies, 0);
- spin_lock_init(&service->srv_at_lock);
+ cfs_spin_lock_init(&service->srv_at_lock);
CFS_INIT_LIST_HEAD(&service->srv_req_in_queue);
array = &service->srv_at_array;
array->paa_deadline = -1;
/* allocate memory for srv_at_array (ptlrpc_at_array) */
- OBD_ALLOC(array->paa_reqs_array, sizeof(struct list_head) * size);
+ OBD_ALLOC(array->paa_reqs_array, sizeof(cfs_list_t) * size);
if (array->paa_reqs_array == NULL)
GOTO(failed, NULL);
timeout is less than this, we'll be sending an early reply. */
at_init(&service->srv_at_estimate, 10, 0);
- spin_lock (&ptlrpc_all_services_lock);
- list_add (&service->srv_list, &ptlrpc_all_services);
- spin_unlock (&ptlrpc_all_services_lock);
+ cfs_spin_lock (&ptlrpc_all_services_lock);
+ cfs_list_add (&service->srv_list, &ptlrpc_all_services);
+ cfs_spin_unlock (&ptlrpc_all_services_lock);
/* Now allocate the request buffers */
rc = ptlrpc_grow_req_bufs(service);
*/
static void ptlrpc_server_free_request(struct ptlrpc_request *req)
{
- LASSERT(atomic_read(&req->rq_refcount) == 0);
- LASSERT(list_empty(&req->rq_timed_list));
+ LASSERT(cfs_atomic_read(&req->rq_refcount) == 0);
+ LASSERT(cfs_list_empty(&req->rq_timed_list));
/* DEBUG_REQ() assumes the reply state of a request with a valid
* ref will not be destroyed until that reference is dropped. */
struct ptlrpc_request_buffer_desc *rqbd = req->rq_rqbd;
struct ptlrpc_service *svc = rqbd->rqbd_service;
int refcount;
- struct list_head *tmp;
- struct list_head *nxt;
+ cfs_list_t *tmp;
+ cfs_list_t *nxt;
- if (!atomic_dec_and_test(&req->rq_refcount))
+ if (!cfs_atomic_dec_and_test(&req->rq_refcount))
return;
- spin_lock(&svc->srv_at_lock);
+ cfs_spin_lock(&svc->srv_at_lock);
if (req->rq_at_linked) {
struct ptlrpc_at_array *array = &svc->srv_at_array;
__u32 index = req->rq_at_index;
- LASSERT(!list_empty(&req->rq_timed_list));
- list_del_init(&req->rq_timed_list);
+ LASSERT(!cfs_list_empty(&req->rq_timed_list));
+ cfs_list_del_init(&req->rq_timed_list);
req->rq_at_linked = 0;
array->paa_reqs_count[index]--;
array->paa_count--;
} else
- LASSERT(list_empty(&req->rq_timed_list));
- spin_unlock(&svc->srv_at_lock);
+ LASSERT(cfs_list_empty(&req->rq_timed_list));
+ cfs_spin_unlock(&svc->srv_at_lock);
/* finalize request */
if (req->rq_export) {
req->rq_export = NULL;
}
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
svc->srv_n_active_reqs--;
- list_add(&req->rq_list, &rqbd->rqbd_reqs);
+ cfs_list_add(&req->rq_list, &rqbd->rqbd_reqs);
refcount = --(rqbd->rqbd_refcount);
if (refcount == 0) {
/* request buffer is now idle: add to history */
- list_del(&rqbd->rqbd_list);
- list_add_tail(&rqbd->rqbd_list, &svc->srv_history_rqbds);
+ cfs_list_del(&rqbd->rqbd_list);
+ cfs_list_add_tail(&rqbd->rqbd_list, &svc->srv_history_rqbds);
svc->srv_n_history_rqbds++;
/* cull some history?
* I expect only about 1 or 2 rqbds need to be recycled here */
while (svc->srv_n_history_rqbds > svc->srv_max_history_rqbds) {
- rqbd = list_entry(svc->srv_history_rqbds.next,
- struct ptlrpc_request_buffer_desc,
- rqbd_list);
+ rqbd = cfs_list_entry(svc->srv_history_rqbds.next,
+ struct ptlrpc_request_buffer_desc,
+ rqbd_list);
- list_del(&rqbd->rqbd_list);
+ cfs_list_del(&rqbd->rqbd_list);
svc->srv_n_history_rqbds--;
/* remove rqbd's reqs from svc's req history while
* I've got the service lock */
- list_for_each(tmp, &rqbd->rqbd_reqs) {
- req = list_entry(tmp, struct ptlrpc_request,
- rq_list);
+ cfs_list_for_each(tmp, &rqbd->rqbd_reqs) {
+ req = cfs_list_entry(tmp, struct ptlrpc_request,
+ rq_list);
/* Track the highest culled req seq */
if (req->rq_history_seq >
svc->srv_request_max_cull_seq)
svc->srv_request_max_cull_seq =
req->rq_history_seq;
- list_del(&req->rq_history_list);
+ cfs_list_del(&req->rq_history_list);
}
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
- list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) {
- req = list_entry(rqbd->rqbd_reqs.next,
- struct ptlrpc_request,
- rq_list);
- list_del(&req->rq_list);
+ cfs_list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) {
+ req = cfs_list_entry(rqbd->rqbd_reqs.next,
+ struct ptlrpc_request,
+ rq_list);
+ cfs_list_del(&req->rq_list);
ptlrpc_server_free_request(req);
}
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
/*
* now all reqs including the embedded req has been
* disposed, schedule request buffer for re-use.
*/
- LASSERT(atomic_read(&rqbd->rqbd_req.rq_refcount) == 0);
- list_add_tail(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
+ LASSERT(cfs_atomic_read(&rqbd->rqbd_req.rq_refcount) ==
+ 0);
+ cfs_list_add_tail(&rqbd->rqbd_list,
+ &svc->srv_idle_rqbds);
}
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
} else if (req->rq_reply_state && req->rq_reply_state->rs_prealloc) {
/* If we are low on memory, we are not interested in history */
- list_del(&req->rq_list);
- list_del_init(&req->rq_history_list);
- spin_unlock(&svc->srv_lock);
+ cfs_list_del(&req->rq_list);
+ cfs_list_del_init(&req->rq_history_list);
+ cfs_spin_unlock(&svc->srv_lock);
ptlrpc_server_free_request(req);
} else {
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
}
}
/* exports may get disconnected from the chain even though the
export has references, so we must keep the spin lock while
manipulating the lists */
- spin_lock(&exp->exp_obd->obd_dev_lock);
+ cfs_spin_lock(&exp->exp_obd->obd_dev_lock);
- if (list_empty(&exp->exp_obd_chain_timed)) {
+ if (cfs_list_empty(&exp->exp_obd_chain_timed)) {
/* this one is not timed */
- spin_unlock(&exp->exp_obd->obd_dev_lock);
+ cfs_spin_unlock(&exp->exp_obd->obd_dev_lock);
RETURN_EXIT;
}
- list_move_tail(&exp->exp_obd_chain_timed,
- &exp->exp_obd->obd_exports_timed);
+ cfs_list_move_tail(&exp->exp_obd_chain_timed,
+ &exp->exp_obd->obd_exports_timed);
- oldest_exp = list_entry(exp->exp_obd->obd_exports_timed.next,
- struct obd_export, exp_obd_chain_timed);
+ oldest_exp = cfs_list_entry(exp->exp_obd->obd_exports_timed.next,
+ struct obd_export, exp_obd_chain_timed);
oldest_time = oldest_exp->exp_last_request_time;
- spin_unlock(&exp->exp_obd->obd_dev_lock);
+ cfs_spin_unlock(&exp->exp_obd->obd_dev_lock);
if (exp->exp_obd->obd_recovering) {
/* be nice to everyone during recovery */
struct ptlrpc_at_array *array = &svc->srv_at_array;
__s32 next;
- spin_lock(&svc->srv_at_lock);
+ cfs_spin_lock(&svc->srv_at_lock);
if (array->paa_count == 0) {
cfs_timer_disarm(&svc->srv_at_timer);
- spin_unlock(&svc->srv_at_lock);
+ cfs_spin_unlock(&svc->srv_at_lock);
return;
}
ptlrpc_at_timer((unsigned long)svc);
else
cfs_timer_arm(&svc->srv_at_timer, cfs_time_shift(next));
- spin_unlock(&svc->srv_at_lock);
+ cfs_spin_unlock(&svc->srv_at_lock);
CDEBUG(D_INFO, "armed %s at %+ds\n", svc->srv_name, next);
}
if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0)
return(-ENOSYS);
- spin_lock(&svc->srv_at_lock);
- LASSERT(list_empty(&req->rq_timed_list));
+ cfs_spin_lock(&svc->srv_at_lock);
+ LASSERT(cfs_list_empty(&req->rq_timed_list));
index = (unsigned long)req->rq_deadline % array->paa_size;
if (array->paa_reqs_count[index] > 0) {
/* latest rpcs will have the latest deadlines in the list,
* so search backward. */
- list_for_each_entry_reverse(rq, &array->paa_reqs_array[index],
- rq_timed_list) {
+ cfs_list_for_each_entry_reverse(rq,
+ &array->paa_reqs_array[index],
+ rq_timed_list) {
if (req->rq_deadline >= rq->rq_deadline) {
- list_add(&req->rq_timed_list,
- &rq->rq_timed_list);
+ cfs_list_add(&req->rq_timed_list,
+ &rq->rq_timed_list);
break;
}
}
}
/* Add the request at the head of the list */
- if (list_empty(&req->rq_timed_list))
- list_add(&req->rq_timed_list, &array->paa_reqs_array[index]);
+ if (cfs_list_empty(&req->rq_timed_list))
+ cfs_list_add(&req->rq_timed_list,
+ &array->paa_reqs_array[index]);
req->rq_at_linked = 1;
req->rq_at_index = index;
array->paa_deadline = req->rq_deadline;
found = 1;
}
- spin_unlock(&svc->srv_at_lock);
+ cfs_spin_unlock(&svc->srv_at_lock);
if (found)
ptlrpc_at_set_timer(svc);
reqcopy->rq_reqmsg = reqmsg;
memcpy(reqmsg, req->rq_reqmsg, req->rq_reqlen);
- LASSERT(atomic_read(&req->rq_refcount));
+ LASSERT(cfs_atomic_read(&req->rq_refcount));
/** if it is last refcount then early reply isn't needed */
- if (atomic_read(&req->rq_refcount) == 1) {
+ if (cfs_atomic_read(&req->rq_refcount) == 1) {
DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, "
"abort sending early reply\n");
GOTO(out, rc = -EINVAL);
static int ptlrpc_at_check_timed(struct ptlrpc_service *svc)
{
struct ptlrpc_request *rq, *n;
- struct list_head work_list;
+ cfs_list_t work_list;
struct ptlrpc_at_array *array = &svc->srv_at_array;
__u32 index, count;
time_t deadline;
int first, counter = 0;
ENTRY;
- spin_lock(&svc->srv_at_lock);
+ cfs_spin_lock(&svc->srv_at_lock);
if (svc->srv_at_check == 0) {
- spin_unlock(&svc->srv_at_lock);
+ cfs_spin_unlock(&svc->srv_at_lock);
RETURN(0);
}
delay = cfs_time_sub(cfs_time_current(), svc->srv_at_checktime);
svc->srv_at_check = 0;
if (array->paa_count == 0) {
- spin_unlock(&svc->srv_at_lock);
+ cfs_spin_unlock(&svc->srv_at_lock);
RETURN(0);
}
first = array->paa_deadline - now;
if (first > at_early_margin) {
/* We've still got plenty of time. Reset the timer. */
- spin_unlock(&svc->srv_at_lock);
+ cfs_spin_unlock(&svc->srv_at_lock);
ptlrpc_at_set_timer(svc);
RETURN(0);
}
count = array->paa_count;
while (count > 0) {
count -= array->paa_reqs_count[index];
- list_for_each_entry_safe(rq, n, &array->paa_reqs_array[index],
- rq_timed_list) {
+ cfs_list_for_each_entry_safe(rq, n,
+ &array->paa_reqs_array[index],
+ rq_timed_list) {
if (rq->rq_deadline <= now + at_early_margin) {
- list_del_init(&rq->rq_timed_list);
+ cfs_list_del_init(&rq->rq_timed_list);
/**
* ptlrpc_server_drop_request() may drop
* refcount to 0 already. Let's check this and
* don't add entry to work_list
*/
- if (likely(atomic_inc_not_zero(&rq->rq_refcount)))
- list_add(&rq->rq_timed_list, &work_list);
+ if (likely(cfs_atomic_inc_not_zero(&rq->rq_refcount)))
+ cfs_list_add(&rq->rq_timed_list, &work_list);
counter++;
array->paa_reqs_count[index]--;
array->paa_count--;
index = 0;
}
array->paa_deadline = deadline;
- spin_unlock(&svc->srv_at_lock);
+ cfs_spin_unlock(&svc->srv_at_lock);
/* we have a new earliest deadline, restart the timer */
ptlrpc_at_set_timer(svc);
/* we took additional refcount so entries can't be deleted from list, no
* locking is needed */
- while (!list_empty(&work_list)) {
- rq = list_entry(work_list.next, struct ptlrpc_request,
- rq_timed_list);
- list_del_init(&rq->rq_timed_list);
+ while (!cfs_list_empty(&work_list)) {
+ rq = cfs_list_entry(work_list.next, struct ptlrpc_request,
+ rq_timed_list);
+ cfs_list_del_init(&rq->rq_timed_list);
if (ptlrpc_at_send_early_reply(rq) == 0)
ptlrpc_at_add_timed(rq);
RETURN(rc);
}
if (req->rq_export && req->rq_ops) {
- spin_lock(&req->rq_export->exp_lock);
- list_add(&req->rq_exp_list, &req->rq_export->exp_queued_rpc);
- spin_unlock(&req->rq_export->exp_lock);
+ cfs_spin_lock(&req->rq_export->exp_lock);
+ cfs_list_add(&req->rq_exp_list,
+ &req->rq_export->exp_queued_rpc);
+ cfs_spin_unlock(&req->rq_export->exp_lock);
}
RETURN(0);
{
ENTRY;
if (req->rq_export && req->rq_ops) {
- spin_lock(&req->rq_export->exp_lock);
- list_del_init(&req->rq_exp_list);
- spin_unlock(&req->rq_export->exp_lock);
+ cfs_spin_lock(&req->rq_export->exp_lock);
+ cfs_list_del_init(&req->rq_exp_list);
+ cfs_spin_unlock(&req->rq_export->exp_lock);
}
EXIT;
}
{
ENTRY;
LASSERT(svc != NULL);
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
if (req->rq_hp == 0) {
int opc = lustre_msg_get_opc(req->rq_reqmsg);
/* Add to the high priority queue. */
- list_move_tail(&req->rq_list, &svc->srv_request_hpq);
+ cfs_list_move_tail(&req->rq_list, &svc->srv_request_hpq);
req->rq_hp = 1;
if (opc != OBD_PING)
DEBUG_REQ(D_NET, req, "high priority req");
}
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
EXIT;
}
struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
ENTRY;
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
/* It may happen that the request is already taken for the processing
* but still in the export list, do not re-add it into the HP list. */
if (req->rq_phase == RQ_PHASE_NEW)
ptlrpc_hpreq_reorder_nolock(svc, req);
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
EXIT;
}
if (rc < 0)
RETURN(rc);
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
/* Before inserting the request into the queue, check if it is not
* inserted yet, or even already handled -- it may happen due to
* a racing ldlm_server_blocking_ast(). */
- if (req->rq_phase == RQ_PHASE_NEW && list_empty(&req->rq_list)) {
+ if (req->rq_phase == RQ_PHASE_NEW && cfs_list_empty(&req->rq_list)) {
if (rc)
ptlrpc_hpreq_reorder_nolock(svc, req);
else
- list_add_tail(&req->rq_list, &svc->srv_request_queue);
+ cfs_list_add_tail(&req->rq_list,
+ &svc->srv_request_queue);
}
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
RETURN(0);
}
ENTRY;
if (ptlrpc_server_allow_normal(svc, force) &&
- !list_empty(&svc->srv_request_queue) &&
- (list_empty(&svc->srv_request_hpq) ||
+ !cfs_list_empty(&svc->srv_request_queue) &&
+ (cfs_list_empty(&svc->srv_request_hpq) ||
svc->srv_hpreq_count >= svc->srv_hpreq_ratio)) {
- req = list_entry(svc->srv_request_queue.next,
- struct ptlrpc_request, rq_list);
+ req = cfs_list_entry(svc->srv_request_queue.next,
+ struct ptlrpc_request, rq_list);
svc->srv_hpreq_count = 0;
- } else if (!list_empty(&svc->srv_request_hpq)) {
- req = list_entry(svc->srv_request_hpq.next,
- struct ptlrpc_request, rq_list);
+ } else if (!cfs_list_empty(&svc->srv_request_hpq)) {
+ req = cfs_list_entry(svc->srv_request_hpq.next,
+ struct ptlrpc_request, rq_list);
svc->srv_hpreq_count++;
}
RETURN(req);
static int ptlrpc_server_request_pending(struct ptlrpc_service *svc, int force)
{
return ((ptlrpc_server_allow_normal(svc, force) &&
- !list_empty(&svc->srv_request_queue)) ||
- !list_empty(&svc->srv_request_hpq));
+ !cfs_list_empty(&svc->srv_request_queue)) ||
+ !cfs_list_empty(&svc->srv_request_hpq));
}
/* Handle freshly incoming reqs, add to timed early reply list,
LASSERT(svc);
- spin_lock(&svc->srv_lock);
- if (list_empty(&svc->srv_req_in_queue)) {
- spin_unlock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
+ if (cfs_list_empty(&svc->srv_req_in_queue)) {
+ cfs_spin_unlock(&svc->srv_lock);
RETURN(0);
}
- req = list_entry(svc->srv_req_in_queue.next,
- struct ptlrpc_request, rq_list);
- list_del_init (&req->rq_list);
+ req = cfs_list_entry(svc->srv_req_in_queue.next,
+ struct ptlrpc_request, rq_list);
+ cfs_list_del_init (&req->rq_list);
/* Consider this still a "queued" request as far as stats are
concerned */
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
/* go through security check/transform */
rc = sptlrpc_svc_unwrap_request(req);
RETURN(1);
err_req:
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
svc->srv_n_queued_reqs--;
svc->srv_n_active_reqs++;
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
ptlrpc_server_finish_request(req);
RETURN(1);
LASSERT(svc);
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
if (unlikely(!ptlrpc_server_request_pending(svc, 0) ||
(
#ifndef __KERNEL__
/* !@%$# liblustre only has 1 thread */
- atomic_read(&svc->srv_n_difficult_replies) != 0 &&
+ cfs_atomic_read(&svc->srv_n_difficult_replies) != 0 &&
#endif
svc->srv_n_active_reqs >= (svc->srv_threads_running - 1)))) {
/* Don't handle regular requests in the last thread, in order * re
* to handle difficult replies (which might block other threads)
* as well as handle any incoming reqs, early replies, etc.
* That means we always need at least 2 service threads. */
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
RETURN(0);
}
request = ptlrpc_server_request_get(svc, 0);
if (request == NULL) {
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
RETURN(0);
}
if (unlikely(fail_opc)) {
if (request->rq_export && request->rq_ops) {
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
OBD_FAIL_TIMEOUT(fail_opc, 4);
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
request = ptlrpc_server_request_get(svc, 0);
if (request == NULL) {
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
RETURN(0);
}
LASSERT(ptlrpc_server_request_pending(svc, 0));
}
}
- list_del_init(&request->rq_list);
+ cfs_list_del_init(&request->rq_list);
svc->srv_n_queued_reqs--;
svc->srv_n_active_reqs++;
if (request->rq_hp)
/* The phase is changed under the lock here because we need to know
* the request is under processing (see ptlrpc_hpreq_reorder()). */
ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET);
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
ptlrpc_hpreq_fini(request);
if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
libcfs_debug_dumplog();
- do_gettimeofday(&work_start);
+ cfs_gettimeofday(&work_start);
timediff = cfs_timeval_sub(&work_start, &request->rq_arrival_time,NULL);
if (likely(svc->srv_stats != NULL)) {
lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
(request->rq_export ?
(char *)request->rq_export->exp_client_uuid.uuid : "0"),
(request->rq_export ?
- atomic_read(&request->rq_export->exp_refcount) : -99),
+ cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
lustre_msg_get_status(request->rq_reqmsg), request->rq_xid,
libcfs_id2str(request->rq_peer),
lustre_msg_get_opc(request->rq_reqmsg));
request->rq_deadline));
}
- do_gettimeofday(&work_end);
+ cfs_gettimeofday(&work_end);
timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
"%s:%s+%d:%d:x"LPU64":%s:%d Request procesed in "
(request->rq_export ?
(char *)request->rq_export->exp_client_uuid.uuid : "0"),
(request->rq_export ?
- atomic_read(&request->rq_export->exp_refcount) : -99),
+ cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
lustre_msg_get_status(request->rq_reqmsg),
request->rq_xid,
libcfs_id2str(request->rq_peer),
}
out_req:
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
if (request->rq_hp)
svc->srv_n_hpreq--;
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
ptlrpc_server_finish_request(request);
RETURN(1);
LASSERT (rs->rs_difficult);
LASSERT (rs->rs_scheduled);
- LASSERT (list_empty(&rs->rs_list));
+ LASSERT (cfs_list_empty(&rs->rs_list));
- spin_lock (&exp->exp_lock);
+ cfs_spin_lock (&exp->exp_lock);
/* Noop if removed already */
- list_del_init (&rs->rs_exp_list);
- spin_unlock (&exp->exp_lock);
+ cfs_list_del_init (&rs->rs_exp_list);
+ cfs_spin_unlock (&exp->exp_lock);
/* The disk commit callback holds exp_uncommitted_replies_lock while it
* iterates over newly committed replies, removing them from
* HRT threads and further commit callbacks by checking rs_committed
* which is set in the commit callback while it holds both
* rs_lock and exp_uncommitted_reples.
- *
+ *
* If we see rs_committed clear, the commit callback _may_ not have
* handled this reply yet and we race with it to grab
* exp_uncommitted_replies_lock before removing the reply from
* rs_lock, which we do right next.
*/
if (!rs->rs_committed) {
- spin_lock(&exp->exp_uncommitted_replies_lock);
- list_del_init(&rs->rs_obd_list);
- spin_unlock(&exp->exp_uncommitted_replies_lock);
+ cfs_spin_lock(&exp->exp_uncommitted_replies_lock);
+ cfs_list_del_init(&rs->rs_obd_list);
+ cfs_spin_unlock(&exp->exp_uncommitted_replies_lock);
}
- spin_lock(&rs->rs_lock);
+ cfs_spin_lock(&rs->rs_lock);
been_handled = rs->rs_handled;
rs->rs_handled = 1;
}
if ((!been_handled && rs->rs_on_net) || nlocks > 0) {
- spin_unlock(&rs->rs_lock);
+ cfs_spin_unlock(&rs->rs_lock);
if (!been_handled && rs->rs_on_net) {
LNetMDUnlink(rs->rs_md_h);
ldlm_lock_decref(&rs->rs_locks[nlocks],
rs->rs_modes[nlocks]);
- spin_lock(&rs->rs_lock);
+ cfs_spin_lock(&rs->rs_lock);
}
rs->rs_scheduled = 0;
if (!rs->rs_on_net) {
/* Off the net */
- spin_unlock(&rs->rs_lock);
+ cfs_spin_unlock(&rs->rs_lock);
class_export_put (exp);
rs->rs_export = NULL;
ptlrpc_rs_decref (rs);
- atomic_dec (&svc->srv_outstanding_replies);
- if (atomic_dec_and_test(&svc->srv_n_difficult_replies) &&
+ cfs_atomic_dec (&svc->srv_outstanding_replies);
+ if (cfs_atomic_dec_and_test(&svc->srv_n_difficult_replies) &&
svc->srv_is_stopping)
cfs_waitq_broadcast(&svc->srv_waitq);
RETURN(1);
}
/* still on the net; callback will schedule */
- spin_unlock(&rs->rs_lock);
+ cfs_spin_unlock(&rs->rs_lock);
RETURN(1);
}
struct ptlrpc_reply_state *rs = NULL;
ENTRY;
- spin_lock(&svc->srv_lock);
- if (!list_empty(&svc->srv_reply_queue)) {
- rs = list_entry(svc->srv_reply_queue.prev,
- struct ptlrpc_reply_state,
- rs_list);
- list_del_init(&rs->rs_list);
+ cfs_spin_lock(&svc->srv_lock);
+ if (!cfs_list_empty(&svc->srv_reply_queue)) {
+ rs = cfs_list_entry(svc->srv_reply_queue.prev,
+ struct ptlrpc_reply_state,
+ rs_list);
+ cfs_list_del_init(&rs->rs_list);
}
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
if (rs != NULL)
ptlrpc_handle_rs(rs);
RETURN(rs != NULL);
{
int did_something = 0;
int rc;
- struct list_head *tmp, *nxt;
+ cfs_list_t *tmp, *nxt;
ENTRY;
/* I'm relying on being single threaded, not to have to lock
* ptlrpc_all_services etc */
- list_for_each_safe (tmp, nxt, &ptlrpc_all_services) {
+ cfs_list_for_each_safe (tmp, nxt, &ptlrpc_all_services) {
struct ptlrpc_service *svc =
- list_entry (tmp, struct ptlrpc_service, srv_list);
+ cfs_list_entry (tmp, struct ptlrpc_service, srv_list);
if (svc->srv_threads_running != 0) /* I've recursed */
continue;
struct obd_device *dev = data->dev;
struct ptlrpc_reply_state *rs;
#ifdef WITH_GROUP_INFO
- struct group_info *ginfo = NULL;
+ cfs_group_info_t *ginfo = NULL;
#endif
struct lu_env env;
int counter = 0, rc = 0;
if (svc->srv_cpu_affinity) {
int cpu, num_cpu;
- for (cpu = 0, num_cpu = 0; cpu < num_possible_cpus(); cpu++) {
- if (!cpu_online(cpu))
+ for (cpu = 0, num_cpu = 0; cpu < cfs_num_possible_cpus();
+ cpu++) {
+ if (!cfs_cpu_online(cpu))
continue;
- if (num_cpu == thread->t_id % num_online_cpus())
+ if (num_cpu == thread->t_id % cfs_num_online_cpus())
break;
num_cpu++;
}
- set_cpus_allowed(cfs_current(), node_to_cpumask(cpu_to_node(cpu)));
+ cfs_set_cpus_allowed(cfs_current(),
+ node_to_cpumask(cpu_to_node(cpu)));
}
#endif
#ifdef WITH_GROUP_INFO
- ginfo = groups_alloc(0);
+ ginfo = cfs_groups_alloc(0);
if (!ginfo) {
rc = -ENOMEM;
goto out;
}
- set_current_groups(ginfo);
- put_group_info(ginfo);
+ cfs_set_current_groups(ginfo);
+ cfs_put_group_info(ginfo);
#endif
if (svc->srv_init != NULL) {
goto out_srv_fini;
}
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
/* SVC_STOPPING may already be set here if someone else is trying
* to stop the service while this new thread has been dynamically
* forked. We still set SVC_RUNNING to let our creator know that
* we are now running, however we will exit as soon as possible */
thread->t_flags |= SVC_RUNNING;
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
/*
* wake up our creator. Note: @data is invalid after this point,
*/
cfs_waitq_signal(&thread->t_ctl_waitq);
- thread->t_watchdog = lc_watchdog_add(GET_TIMEOUT(svc), NULL, NULL);
+ thread->t_watchdog = lc_watchdog_add(CFS_GET_TIMEOUT(svc), NULL, NULL);
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
svc->srv_threads_running++;
- list_add(&rs->rs_list, &svc->srv_free_rs_list);
- spin_unlock(&svc->srv_lock);
+ cfs_list_add(&rs->rs_list, &svc->srv_free_rs_list);
+ cfs_spin_unlock(&svc->srv_lock);
cfs_waitq_signal(&svc->srv_free_rs_waitq);
CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id,
lc_watchdog_disable(thread->t_watchdog);
- cond_resched();
+ cfs_cond_resched();
l_wait_event_exclusive (svc->srv_waitq,
thread->t_flags & SVC_STOPPING ||
svc->srv_is_stopping ||
- (!list_empty(&svc->srv_idle_rqbds) &&
+ (!cfs_list_empty(&svc->srv_idle_rqbds) &&
svc->srv_rqbd_timeout == 0) ||
- !list_empty(&svc->srv_req_in_queue) ||
+ !cfs_list_empty(&svc->srv_req_in_queue) ||
(ptlrpc_server_request_pending(svc, 0) &&
(svc->srv_n_active_reqs <
(svc->srv_threads_running - 1))) ||
if (thread->t_flags & SVC_STOPPING || svc->srv_is_stopping)
break;
- lc_watchdog_touch(thread->t_watchdog, GET_TIMEOUT(svc));
+ lc_watchdog_touch(thread->t_watchdog, CFS_GET_TIMEOUT(svc));
ptlrpc_check_rqbd_pool(svc);
/* Ignore return code - we tried... */
ptlrpc_start_thread(dev, svc);
- if (!list_empty(&svc->srv_req_in_queue)) {
+ if (!cfs_list_empty(&svc->srv_req_in_queue)) {
/* Process all incoming reqs before handling any */
ptlrpc_server_handle_req_in(svc);
/* but limit ourselves in case of flood */
lu_context_exit(&env.le_ctx);
}
- if (!list_empty(&svc->srv_idle_rqbds) &&
+ if (!cfs_list_empty(&svc->srv_idle_rqbds) &&
ptlrpc_server_post_idle_rqbds(svc) < 0) {
/* I just failed to repost request buffers. Wait
* for a timeout (unless something else happens)
CDEBUG(D_RPCTRACE, "service thread [ %p : %u ] %d exiting: rc %d\n",
thread, thread->t_pid, thread->t_id, rc);
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
svc->srv_threads_running--; /* must know immediately */
thread->t_id = rc;
thread->t_flags = SVC_STOPPED;
cfs_waitq_signal(&thread->t_ctl_waitq);
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
return rc;
}
};
static int hrt_dont_sleep(struct ptlrpc_hr_thread *t,
- struct list_head *replies)
+ cfs_list_t *replies)
{
int result;
- spin_lock(&t->hrt_lock);
- list_splice_init(&t->hrt_queue, replies);
- result = test_bit(HRT_STOPPING, &t->hrt_flags) ||
- !list_empty(replies);
- spin_unlock(&t->hrt_lock);
+ cfs_spin_lock(&t->hrt_lock);
+ cfs_list_splice_init(&t->hrt_queue, replies);
+ result = cfs_test_bit(HRT_STOPPING, &t->hrt_flags) ||
+ !cfs_list_empty(replies);
+ cfs_spin_unlock(&t->hrt_lock);
return result;
}
cfs_daemonize_ctxt(threadname);
#if defined(CONFIG_SMP) && defined(HAVE_NODE_TO_CPUMASK)
- set_cpus_allowed(cfs_current(),
- node_to_cpumask(cpu_to_node(hr_args->cpu_index)));
+ cfs_set_cpus_allowed(cfs_current(),
+ node_to_cpumask(cpu_to_node(hr_args->cpu_index)));
#endif
- set_bit(HRT_RUNNING, &t->hrt_flags);
+ cfs_set_bit(HRT_RUNNING, &t->hrt_flags);
cfs_waitq_signal(&t->hrt_wait);
- while (!test_bit(HRT_STOPPING, &t->hrt_flags)) {
+ while (!cfs_test_bit(HRT_STOPPING, &t->hrt_flags)) {
- cfs_wait_event(t->hrt_wait, hrt_dont_sleep(t, &replies));
- while (!list_empty(&replies)) {
+ l_cfs_wait_event(t->hrt_wait, hrt_dont_sleep(t, &replies));
+ while (!cfs_list_empty(&replies)) {
struct ptlrpc_reply_state *rs;
- rs = list_entry(replies.prev,
- struct ptlrpc_reply_state,
- rs_list);
- list_del_init(&rs->rs_list);
+ rs = cfs_list_entry(replies.prev,
+ struct ptlrpc_reply_state,
+ rs_list);
+ cfs_list_del_init(&rs->rs_list);
ptlrpc_handle_rs(rs);
}
}
- clear_bit(HRT_RUNNING, &t->hrt_flags);
- complete(&t->hrt_completion);
+ cfs_clear_bit(HRT_RUNNING, &t->hrt_flags);
+ cfs_complete(&t->hrt_completion);
return 0;
}
rc = cfs_kernel_thread(ptlrpc_hr_main, (void*)&args,
CLONE_VM|CLONE_FILES);
if (rc < 0) {
- complete(&t->hrt_completion);
+ cfs_complete(&t->hrt_completion);
GOTO(out, rc);
}
- cfs_wait_event(t->hrt_wait, test_bit(HRT_RUNNING, &t->hrt_flags));
+ l_cfs_wait_event(t->hrt_wait, cfs_test_bit(HRT_RUNNING, &t->hrt_flags));
RETURN(0);
out:
return rc;
{
ENTRY;
- set_bit(HRT_STOPPING, &t->hrt_flags);
+ cfs_set_bit(HRT_STOPPING, &t->hrt_flags);
cfs_waitq_signal(&t->hrt_wait);
- wait_for_completion(&t->hrt_completion);
+ cfs_wait_for_completion(&t->hrt_completion);
EXIT;
}
for (n = 0, cpu = 0; n < hr->hr_n_threads; n++) {
#if defined(CONFIG_SMP) && defined(HAVE_NODE_TO_CPUMASK)
- while(!cpu_online(cpu)) {
+ while(!cfs_cpu_online(cpu)) {
cpu++;
- if (cpu >= num_possible_cpus())
+ if (cpu >= cfs_num_possible_cpus())
cpu = 0;
}
#endif
CDEBUG(D_RPCTRACE, "Stopping thread [ %p : %u ]\n",
thread, thread->t_pid);
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
/* let the thread know that we would like it to stop asap */
thread->t_flags |= SVC_STOPPING;
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
cfs_waitq_broadcast(&svc->srv_waitq);
l_wait_event(thread->t_ctl_waitq,
(thread->t_flags & SVC_STOPPED), &lwi);
- spin_lock(&svc->srv_lock);
- list_del(&thread->t_link);
- spin_unlock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
+ cfs_list_del(&thread->t_link);
+ cfs_spin_unlock(&svc->srv_lock);
OBD_FREE_PTR(thread);
EXIT;
struct ptlrpc_thread *thread;
ENTRY;
- spin_lock(&svc->srv_lock);
- while (!list_empty(&svc->srv_threads)) {
- thread = list_entry(svc->srv_threads.next,
- struct ptlrpc_thread, t_link);
+ cfs_spin_lock(&svc->srv_lock);
+ while (!cfs_list_empty(&svc->srv_threads)) {
+ thread = cfs_list_entry(svc->srv_threads.next,
+ struct ptlrpc_thread, t_link);
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
ptlrpc_stop_thread(svc, thread);
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
}
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
EXIT;
}
RETURN(-ENOMEM);
cfs_waitq_init(&thread->t_ctl_waitq);
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
if (svc->srv_threads_started >= svc->srv_threads_max) {
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
OBD_FREE_PTR(thread);
RETURN(-EMFILE);
}
- list_add(&thread->t_link, &svc->srv_threads);
+ cfs_list_add(&thread->t_link, &svc->srv_threads);
id = svc->srv_threads_started++;
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
thread->t_svc = svc;
thread->t_id = id;
if (rc < 0) {
CERROR("cannot start thread '%s': rc %d\n", name, rc);
- spin_lock(&svc->srv_lock);
- list_del(&thread->t_link);
+ cfs_spin_lock(&svc->srv_lock);
+ cfs_list_del(&thread->t_link);
--svc->srv_threads_started;
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
OBD_FREE(thread, sizeof(*thread));
RETURN(rc);
int ptlrpc_hr_init(void)
{
int i;
- int n_cpus = num_online_cpus();
+ int n_cpus = cfs_num_online_cpus();
struct ptlrpc_hr_service *hr;
int size;
int rc;
for (i = 0; i < n_cpus; i++) {
struct ptlrpc_hr_thread *t = &hr->hr_threads[i];
- spin_lock_init(&t->hrt_lock);
+ cfs_spin_lock_init(&t->hrt_lock);
cfs_waitq_init(&t->hrt_wait);
CFS_INIT_LIST_HEAD(&t->hrt_queue);
- init_completion(&t->hrt_completion);
+ cfs_init_completion(&t->hrt_completion);
}
hr->hr_n_threads = n_cpus;
hr->hr_size = size;
int rc;
struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(10),
NULL, NULL);
- rc = l_wait_event(svc->srv_waitq,
- atomic_read(&svc->srv_n_difficult_replies) == 0,
+ rc = l_wait_event(svc->srv_waitq, cfs_atomic_read(&svc-> \
+ srv_n_difficult_replies) == 0,
&lwi);
if (rc == 0)
break;
{
int rc;
struct l_wait_info lwi;
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct ptlrpc_reply_state *rs, *t;
struct ptlrpc_at_array *array = &service->srv_at_array;
ENTRY;
cfs_timer_disarm(&service->srv_at_timer);
ptlrpc_stop_all_threads(service);
- LASSERT(list_empty(&service->srv_threads));
+ LASSERT(cfs_list_empty(&service->srv_threads));
- spin_lock (&ptlrpc_all_services_lock);
- list_del_init (&service->srv_list);
- spin_unlock (&ptlrpc_all_services_lock);
+ cfs_spin_lock (&ptlrpc_all_services_lock);
+ cfs_list_del_init (&service->srv_list);
+ cfs_spin_unlock (&ptlrpc_all_services_lock);
ptlrpc_lprocfs_unregister_service(service);
/* Unlink all the request buffers. This forces a 'final' event with
* its 'unlink' flag set for each posted rqbd */
- list_for_each(tmp, &service->srv_active_rqbds) {
+ cfs_list_for_each(tmp, &service->srv_active_rqbds) {
struct ptlrpc_request_buffer_desc *rqbd =
- list_entry(tmp, struct ptlrpc_request_buffer_desc,
- rqbd_list);
+ cfs_list_entry(tmp, struct ptlrpc_request_buffer_desc,
+ rqbd_list);
rc = LNetMDUnlink(rqbd->rqbd_md_h);
LASSERT (rc == 0 || rc == -ENOENT);
/* Wait for the network to release any buffers it's currently
* filling */
for (;;) {
- spin_lock(&service->srv_lock);
+ cfs_spin_lock(&service->srv_lock);
rc = service->srv_nrqbd_receiving;
- spin_unlock(&service->srv_lock);
+ cfs_spin_unlock(&service->srv_lock);
if (rc == 0)
break;
}
/* schedule all outstanding replies to terminate them */
- spin_lock(&service->srv_lock);
- while (!list_empty(&service->srv_active_replies)) {
+ cfs_spin_lock(&service->srv_lock);
+ while (!cfs_list_empty(&service->srv_active_replies)) {
struct ptlrpc_reply_state *rs =
- list_entry(service->srv_active_replies.next,
- struct ptlrpc_reply_state, rs_list);
- spin_lock(&rs->rs_lock);
+ cfs_list_entry(service->srv_active_replies.next,
+ struct ptlrpc_reply_state, rs_list);
+ cfs_spin_lock(&rs->rs_lock);
ptlrpc_schedule_difficult_reply(rs);
- spin_unlock(&rs->rs_lock);
+ cfs_spin_unlock(&rs->rs_lock);
}
- spin_unlock(&service->srv_lock);
+ cfs_spin_unlock(&service->srv_lock);
/* purge the request queue. NB No new replies (rqbds all unlinked)
* and no service threads, so I'm the only thread noodling the
* request queue now */
- while (!list_empty(&service->srv_req_in_queue)) {
+ while (!cfs_list_empty(&service->srv_req_in_queue)) {
struct ptlrpc_request *req =
- list_entry(service->srv_req_in_queue.next,
- struct ptlrpc_request,
- rq_list);
+ cfs_list_entry(service->srv_req_in_queue.next,
+ struct ptlrpc_request,
+ rq_list);
- list_del(&req->rq_list);
+ cfs_list_del(&req->rq_list);
service->srv_n_queued_reqs--;
service->srv_n_active_reqs++;
ptlrpc_server_finish_request(req);
struct ptlrpc_request *req;
req = ptlrpc_server_request_get(service, 1);
- list_del(&req->rq_list);
+ cfs_list_del(&req->rq_list);
service->srv_n_queued_reqs--;
service->srv_n_active_reqs++;
ptlrpc_hpreq_fini(req);
LASSERT(service->srv_n_queued_reqs == 0);
LASSERT(service->srv_n_active_reqs == 0);
LASSERT(service->srv_n_history_rqbds == 0);
- LASSERT(list_empty(&service->srv_active_rqbds));
+ LASSERT(cfs_list_empty(&service->srv_active_rqbds));
/* Now free all the request buffers since nothing references them
* any more... */
- while (!list_empty(&service->srv_idle_rqbds)) {
+ while (!cfs_list_empty(&service->srv_idle_rqbds)) {
struct ptlrpc_request_buffer_desc *rqbd =
- list_entry(service->srv_idle_rqbds.next,
- struct ptlrpc_request_buffer_desc,
- rqbd_list);
+ cfs_list_entry(service->srv_idle_rqbds.next,
+ struct ptlrpc_request_buffer_desc,
+ rqbd_list);
ptlrpc_free_rqbd(rqbd);
}
ptlrpc_wait_replies(service);
- list_for_each_entry_safe(rs, t, &service->srv_free_rs_list, rs_list) {
- list_del(&rs->rs_list);
+ cfs_list_for_each_entry_safe(rs, t, &service->srv_free_rs_list,
+ rs_list) {
+ cfs_list_del(&rs->rs_list);
OBD_FREE(rs, service->srv_max_reply_size);
}
if (array->paa_reqs_array != NULL) {
OBD_FREE(array->paa_reqs_array,
- sizeof(struct list_head) * array->paa_size);
+ sizeof(cfs_list_t) * array->paa_size);
array->paa_reqs_array = NULL;
}
if (svc == NULL)
return 0;
- do_gettimeofday(&right_now);
+ cfs_gettimeofday(&right_now);
- spin_lock(&svc->srv_lock);
+ cfs_spin_lock(&svc->srv_lock);
if (!ptlrpc_server_request_pending(svc, 1)) {
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
return 0;
}
/* How long has the next entry been waiting? */
- if (list_empty(&svc->srv_request_queue))
- request = list_entry(svc->srv_request_hpq.next,
- struct ptlrpc_request, rq_list);
+ if (cfs_list_empty(&svc->srv_request_queue))
+ request = cfs_list_entry(svc->srv_request_hpq.next,
+ struct ptlrpc_request, rq_list);
else
- request = list_entry(svc->srv_request_queue.next,
- struct ptlrpc_request, rq_list);
+ request = cfs_list_entry(svc->srv_request_queue.next,
+ struct ptlrpc_request, rq_list);
timediff = cfs_timeval_sub(&right_now, &request->rq_arrival_time, NULL);
- spin_unlock(&svc->srv_lock);
+ cfs_spin_unlock(&svc->srv_lock);
if ((timediff / ONE_MILLION) > (AT_OFF ? obd_timeout * 3/2 :
at_max)) {
__u32 start_epoch;
struct obd_device_target *obt = &obd->u.obt;
struct ptlrpc_request *req;
- struct list_head client_list;
+ cfs_list_t client_list;
- spin_lock(&obt->obt_translock);
+ cfs_spin_lock(&obt->obt_translock);
start_epoch = lr_epoch(le64_to_cpu(obt->obt_last_transno)) + 1;
obt->obt_last_transno = cpu_to_le64((__u64)start_epoch <<
LR_EPOCH_BITS);
obt->obt_lsd->lsd_start_epoch = cpu_to_le32(start_epoch);
- spin_unlock(&obt->obt_translock);
+ cfs_spin_unlock(&obt->obt_translock);
CFS_INIT_LIST_HEAD(&client_list);
- spin_lock_bh(&obd->obd_processing_task_lock);
- list_splice_init(&obd->obd_final_req_queue, &client_list);
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_list_splice_init(&obd->obd_final_req_queue, &client_list);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
/**
* go through list of exports participated in recovery and
* set new epoch for them
*/
- list_for_each_entry(req, &client_list, rq_list) {
+ cfs_list_for_each_entry(req, &client_list, rq_list) {
LASSERT(!req->rq_export->exp_delayed);
obt_client_epoch_update(req->rq_export);
}
/** return list back at once */
- spin_lock_bh(&obd->obd_processing_task_lock);
- list_splice_init(&client_list, &obd->obd_final_req_queue);
- spin_unlock_bh(&obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
+ cfs_list_splice_init(&client_list, &obd->obd_final_req_queue);
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
obt_server_data_update(obd, 1);
}
lut->lut_lsd.lsd_uuid, lut->lut_mount_count,
lut->lut_last_transno);
- spin_lock(&lut->lut_translock);
+ cfs_spin_lock(&lut->lut_translock);
lut->lut_lsd.lsd_last_transno = lut->lut_last_transno;
- spin_unlock(&lut->lut_translock);
+ cfs_spin_unlock(&lut->lut_translock);
lsd_cpu_to_le(&lut->lut_lsd, &tmp_lsd);
if (lut->lut_last_rcvd != NULL)
struct lu_env env;
struct ptlrpc_request *req;
__u32 start_epoch;
- struct list_head client_list;
+ cfs_list_t client_list;
int rc;
if (lut->lut_obd->obd_stopping)
return;
}
- spin_lock(&lut->lut_translock);
+ cfs_spin_lock(&lut->lut_translock);
start_epoch = lr_epoch(lut->lut_last_transno) + 1;
lut->lut_last_transno = (__u64)start_epoch << LR_EPOCH_BITS;
lut->lut_lsd.lsd_start_epoch = start_epoch;
- spin_unlock(&lut->lut_translock);
+ cfs_spin_unlock(&lut->lut_translock);
CFS_INIT_LIST_HEAD(&client_list);
/**
* The recovery is not yet finished and final queue can still be updated
* with resend requests. Move final list to separate one for processing
*/
- spin_lock_bh(&lut->lut_obd->obd_processing_task_lock);
- list_splice_init(&lut->lut_obd->obd_final_req_queue, &client_list);
- spin_unlock_bh(&lut->lut_obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&lut->lut_obd->obd_processing_task_lock);
+ cfs_list_splice_init(&lut->lut_obd->obd_final_req_queue, &client_list);
+ cfs_spin_unlock_bh(&lut->lut_obd->obd_processing_task_lock);
/**
* go through list of exports participated in recovery and
* set new epoch for them
*/
- list_for_each_entry(req, &client_list, rq_list) {
+ cfs_list_for_each_entry(req, &client_list, rq_list) {
LASSERT(!req->rq_export->exp_delayed);
if (!req->rq_export->exp_vbr_failed)
lut_client_epoch_update(&env, lut, req->rq_export);
}
/** return list back at once */
- spin_lock_bh(&lut->lut_obd->obd_processing_task_lock);
- list_splice_init(&client_list, &lut->lut_obd->obd_final_req_queue);
- spin_unlock_bh(&lut->lut_obd->obd_processing_task_lock);
+ cfs_spin_lock_bh(&lut->lut_obd->obd_processing_task_lock);
+ cfs_list_splice_init(&client_list, &lut->lut_obd->obd_final_req_queue);
+ cfs_spin_unlock_bh(&lut->lut_obd->obd_processing_task_lock);
/** update server epoch */
lut_server_data_update(&env, lut, 1);
lu_env_fini(&env);
{
struct obd_export *exp = data;
LASSERT(exp->exp_obd == lut->lut_obd);
- spin_lock(&lut->lut_translock);
+ cfs_spin_lock(&lut->lut_translock);
if (transno > lut->lut_obd->obd_last_committed)
lut->lut_obd->obd_last_committed = transno;
LASSERT(exp);
if (transno > exp->exp_last_committed) {
exp->exp_last_committed = transno;
- spin_unlock(&lut->lut_translock);
+ cfs_spin_unlock(&lut->lut_translock);
ptlrpc_commit_replies(exp);
} else {
- spin_unlock(&lut->lut_translock);
+ cfs_spin_unlock(&lut->lut_translock);
}
class_export_cb_put(exp);
if (transno)
lut->lut_bottom = dt;
lut->lut_last_rcvd = NULL;
- spin_lock_init(&lut->lut_translock);
- spin_lock_init(&lut->lut_client_bitmap_lock);
- spin_lock_init(&lut->lut_trans_table_lock);
+ cfs_spin_lock_init(&lut->lut_translock);
+ cfs_spin_lock_init(&lut->lut_client_bitmap_lock);
+ cfs_spin_lock_init(&lut->lut_trans_table_lock);
/** obdfilter has no lu_device stack yet */
if (dt == NULL)
if (!oqctl)
RETURN(-ENOMEM);
- down(&obt->obt_quotachecking);
+ cfs_down(&obt->obt_quotachecking);
id = UGQUOTA2LQC(type);
/* quota already turned on */
if ((obt->obt_qctxt.lqc_flags & id) == id)
if (is_master) {
mds = &obd->u.mds;
- down(&mds->mds_qonoff_sem);
+ cfs_down(&mds->mds_qonoff_sem);
/* turn on cluster wide quota */
rc1 = mds_admin_quota_on(obd, oqctl);
if (rc1 && rc1 != -EALREADY) {
out_ctxt:
if (mds != NULL)
- up(&mds->mds_qonoff_sem);
+ cfs_up(&mds->mds_qonoff_sem);
pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
out:
- up(&obt->obt_quotachecking);
+ cfs_up(&obt->obt_quotachecking);
OBD_FREE_PTR(oqctl);
return rc;
}
if (count > MAX_STYPE_SIZE)
return -EINVAL;
- if (copy_from_user(stype, buffer, count))
+ if (cfs_copy_from_user(stype, buffer, count))
return -EFAULT;
for (i = 0 ; i < count ; i++) {
lqs->lqs_key = lqs_key;
- spin_lock_init(&lqs->lqs_lock);
+ cfs_spin_lock_init(&lqs->lqs_lock);
lqs->lqs_bwrite_pending = 0;
lqs->lqs_iwrite_pending = 0;
lqs->lqs_ino_rec = 0;
}
lqs_initref(lqs);
- spin_lock(&qctxt->lqc_lock);
+ cfs_spin_lock(&qctxt->lqc_lock);
if (!qctxt->lqc_valid)
rc = -EBUSY;
else
rc = cfs_hash_add_unique(qctxt->lqc_lqs_hash,
&lqs->lqs_key, &lqs->lqs_hash);
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
if (!rc)
lqs_getref(lqs);
CDEBUG(D_QUOTA, "before: bunit: %lu, iunit: %lu.\n",
lqs->lqs_bunit_sz, lqs->lqs_iunit_sz);
- spin_lock(&lqs->lqs_lock);
+ cfs_spin_lock(&lqs->lqs_lock);
for (i = 0; i < 2; i++) {
if (i == 0 && !QAQ_IS_ADJBLK(oqaq))
continue;
if (tmp < 0)
rc |= i ? LQS_INO_INCREASE : LQS_BLK_INCREASE;
}
- spin_unlock(&lqs->lqs_lock);
+ cfs_spin_unlock(&lqs->lqs_lock);
CDEBUG(D_QUOTA, "after: bunit: %lu, iunit: %lu.\n",
lqs->lqs_bunit_sz, lqs->lqs_iunit_sz);
rc = target_quotacheck_callback(exp, oqctl);
class_export_put(exp);
- up(qta->qta_sem);
+ cfs_up(qta->qta_sem);
OBD_FREE_PTR(qta);
return rc;
}
if (!qta)
RETURN(ENOMEM);
- down(&obt->obt_quotachecking);
+ cfs_down(&obt->obt_quotachecking);
qta->qta_exp = exp;
qta->qta_obd = obd;
/* we get ref for exp because target_quotacheck_callback() will use this
* export later b=18126 */
class_export_get(exp);
- rc = kernel_thread(target_quotacheck_thread, qta, CLONE_VM|CLONE_FILES);
+ rc = cfs_kernel_thread(target_quotacheck_thread, qta,
+ CLONE_VM|CLONE_FILES);
if (rc >= 0) {
CDEBUG(D_INFO, "%s: target_quotacheck_thread: %d\n",
obd->obd_name, rc);
}
out:
- up(&obt->obt_quotachecking);
+ cfs_up(&obt->obt_quotachecking);
OBD_FREE_PTR(qta);
return rc;
}
unsigned long default_itune_ratio = 50; /* 50 percentage */
cfs_mem_cache_t *qunit_cachep = NULL;
-struct list_head qunit_hash[NR_DQHASH];
-spinlock_t qunit_hash_lock = SPIN_LOCK_UNLOCKED;
+cfs_list_t qunit_hash[NR_DQHASH];
+cfs_spinlock_t qunit_hash_lock = CFS_SPIN_LOCK_UNLOCKED;
/* please sync qunit_state with qunit_state_names */
enum qunit_state {
};
struct lustre_qunit {
- struct list_head lq_hash; /** Hash list in memory */
- atomic_t lq_refcnt; /** Use count */
+ cfs_list_t lq_hash; /** Hash list in memory */
+ cfs_atomic_t lq_refcnt; /** Use count */
struct lustre_quota_ctxt *lq_ctxt; /** Quota context this applies to */
struct qunit_data lq_data; /** See qunit_data */
unsigned int lq_opc; /** QUOTA_DQACQ, QUOTA_DQREL */
cfs_waitq_t lq_waitq; /** Threads waiting for this qunit */
- spinlock_t lq_lock; /** Protect the whole structure */
+ cfs_spinlock_t lq_lock; /** Protect the whole structure */
enum qunit_state lq_state; /** Present the status of qunit */
int lq_rc; /** The rc of lq_data */
pid_t lq_owner;
#define QUNIT_SET_STATE(qunit, state) \
do { \
- spin_lock(&qunit->lq_lock); \
+ cfs_spin_lock(&qunit->lq_lock); \
QDATA_DEBUG((&qunit->lq_data), "qunit(%p) lq_state(%s->%s), " \
"lq_rc(%d), lq_owner(%d)\n", \
qunit, qunit_state_names[qunit->lq_state], \
qunit_state_names[state], qunit->lq_rc, \
qunit->lq_owner); \
qunit->lq_state = state; \
- spin_unlock(&qunit->lq_lock); \
+ cfs_spin_unlock(&qunit->lq_lock); \
} while(0)
#define QUNIT_SET_STATE_AND_RC(qunit, state, rc) \
do { \
- spin_lock(&qunit->lq_lock); \
+ cfs_spin_lock(&qunit->lq_lock); \
qunit->lq_rc = rc; \
QDATA_DEBUG((&qunit->lq_data), "qunit(%p) lq_state(%s->%s), " \
"lq_rc(%d), lq_owner(%d)\n", \
qunit_state_names[state], qunit->lq_rc, \
qunit->lq_owner); \
qunit->lq_state = state; \
- spin_unlock(&qunit->lq_lock); \
+ cfs_spin_unlock(&qunit->lq_lock); \
} while(0)
int should_translate_quota (struct obd_import *imp)
int i;
ENTRY;
- spin_lock(&qunit_hash_lock);
+ cfs_spin_lock(&qunit_hash_lock);
for (i = 0; i < NR_DQHASH; i++)
- LASSERT(list_empty(qunit_hash + i));
- spin_unlock(&qunit_hash_lock);
+ LASSERT(cfs_list_empty(qunit_hash + i));
+ cfs_spin_unlock(&qunit_hash_lock);
if (qunit_cachep) {
int rc;
if (!qunit_cachep)
RETURN(-ENOMEM);
- spin_lock(&qunit_hash_lock);
+ cfs_spin_lock(&qunit_hash_lock);
for (i = 0; i < NR_DQHASH; i++)
CFS_INIT_LIST_HEAD(qunit_hash + i);
- spin_unlock(&qunit_hash_lock);
+ cfs_spin_unlock(&qunit_hash_lock);
RETURN(0);
}
struct qunit_data *tmp;
LASSERT_SPIN_LOCKED(&qunit_hash_lock);
- list_for_each_entry(qunit, qunit_hash + hashent, lq_hash) {
+ cfs_list_for_each_entry(qunit, qunit_hash + hashent, lq_hash) {
tmp = &qunit->lq_data;
if (qunit->lq_ctxt == qctxt &&
qdata->qd_id == tmp->qd_id &&
if (!ll_sb_any_quota_active(sb))
RETURN(0);
- spin_lock(&qctxt->lqc_lock);
+ cfs_spin_lock(&qctxt->lqc_lock);
if (!qctxt->lqc_valid){
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
RETURN(0);
}
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
OBD_ALLOC_PTR(qctl);
if (qctl == NULL)
QDATA_IS_GRP(qdata) ? "g" : "u", qdata->qd_id);
GOTO (out, ret = 0);
}
- spin_lock(&lqs->lqs_lock);
+ cfs_spin_lock(&lqs->lqs_lock);
if (QDATA_IS_BLK(qdata)) {
qunit_sz = lqs->lqs_bunit_sz;
record, qunit_sz, tune_sz, ret);
LASSERT(ret == 0 || qdata->qd_count);
- spin_unlock(&lqs->lqs_lock);
+ cfs_spin_unlock(&lqs->lqs_lock);
lqs_putref(lqs);
EXIT;
RETURN(NULL);
CFS_INIT_LIST_HEAD(&qunit->lq_hash);
- init_waitqueue_head(&qunit->lq_waitq);
- atomic_set(&qunit->lq_refcnt, 1);
+ cfs_waitq_init(&qunit->lq_waitq);
+ cfs_atomic_set(&qunit->lq_refcnt, 1);
qunit->lq_ctxt = qctxt;
memcpy(&qunit->lq_data, qdata, sizeof(*qdata));
qunit->lq_opc = opc;
- qunit->lq_lock = SPIN_LOCK_UNLOCKED;
+ qunit->lq_lock = CFS_SPIN_LOCK_UNLOCKED;
QUNIT_SET_STATE_AND_RC(qunit, QUNIT_CREATED, 0);
qunit->lq_owner = cfs_curproc_pid();
RETURN(qunit);
static inline void qunit_get(struct lustre_qunit *qunit)
{
- atomic_inc(&qunit->lq_refcnt);
+ cfs_atomic_inc(&qunit->lq_refcnt);
}
static void qunit_put(struct lustre_qunit *qunit)
{
- LASSERT(atomic_read(&qunit->lq_refcnt));
- if (atomic_dec_and_test(&qunit->lq_refcnt))
+ LASSERT(cfs_atomic_read(&qunit->lq_refcnt));
+ if (cfs_atomic_dec_and_test(&qunit->lq_refcnt))
free_qunit(qunit);
}
static void
insert_qunit_nolock(struct lustre_quota_ctxt *qctxt, struct lustre_qunit *qunit)
{
- struct list_head *head;
+ cfs_list_t *head;
- LASSERT(list_empty(&qunit->lq_hash));
+ LASSERT(cfs_list_empty(&qunit->lq_hash));
qunit_get(qunit);
head = qunit_hash + qunit_hashfn(qctxt, &qunit->lq_data);
- list_add(&qunit->lq_hash, head);
+ cfs_list_add(&qunit->lq_hash, head);
QUNIT_SET_STATE(qunit, QUNIT_IN_HASH);
}
qunit->lq_data.qd_id),
qunit->lq_ctxt, 0);
if (lqs && !IS_ERR(lqs)) {
- spin_lock(&lqs->lqs_lock);
+ cfs_spin_lock(&lqs->lqs_lock);
if (qunit->lq_opc == QUOTA_DQACQ)
quota_compute_lqs(&qunit->lq_data, lqs, 0, 1);
if (qunit->lq_opc == QUOTA_DQREL)
quota_compute_lqs(&qunit->lq_data, lqs, 0, 0);
- spin_unlock(&lqs->lqs_lock);
+ cfs_spin_unlock(&lqs->lqs_lock);
/* this is for quota_search_lqs */
lqs_putref(lqs);
/* this is for schedule_dqacq */
static void remove_qunit_nolock(struct lustre_qunit *qunit)
{
- LASSERT(!list_empty(&qunit->lq_hash));
+ LASSERT(!cfs_list_empty(&qunit->lq_hash));
LASSERT_SPIN_LOCKED(&qunit_hash_lock);
- list_del_init(&qunit->lq_hash);
+ cfs_list_del_init(&qunit->lq_hash);
QUNIT_SET_STATE(qunit, QUNIT_RM_FROM_HASH);
qunit_put(qunit);
}
return NULL;
}
- INIT_LIST_HEAD(&qunit->lq_hash);
- qunit->lq_lock = SPIN_LOCK_UNLOCKED;
- init_waitqueue_head(&qunit->lq_waitq);
- atomic_set(&qunit->lq_refcnt, 1);
+ CFS_INIT_LIST_HEAD(&qunit->lq_hash);
+ qunit->lq_lock = CFS_SPIN_LOCK_UNLOCKED;
+ cfs_waitq_init(&qunit->lq_waitq);
+ cfs_atomic_set(&qunit->lq_refcnt, 1);
qunit->lq_ctxt = qctxt;
qunit->lq_data.qd_id = oqctl->qc_id;
qunit->lq_data.qd_flags = oqctl->qc_type;
qunit->lq_opc = QUOTA_LAST_OPC;
while (1) {
- spin_lock(&qunit_hash_lock);
+ cfs_spin_lock(&qunit_hash_lock);
find_qunit = dqacq_in_flight(qctxt, &qunit->lq_data);
if (find_qunit) {
- spin_unlock(&qunit_hash_lock);
+ cfs_spin_unlock(&qunit_hash_lock);
qunit_put(find_qunit);
qctxt_wait_pending_dqacq(qctxt, oqctl->qc_id,
oqctl->qc_type, isblk);
break;
}
insert_qunit_nolock(qctxt, qunit);
- spin_unlock(&qunit_hash_lock);
+ cfs_spin_unlock(&qunit_hash_lock);
return qunit;
}
}
LASSERT(qunit->lq_opc == QUOTA_LAST_OPC);
- spin_lock(&qunit_hash_lock);
+ cfs_spin_lock(&qunit_hash_lock);
remove_qunit_nolock(qunit);
- spin_unlock(&qunit_hash_lock);
+ cfs_spin_unlock(&qunit_hash_lock);
QUNIT_SET_STATE_AND_RC(qunit, QUNIT_FINISHED, QUOTA_REQ_RETURNED);
- wake_up(&qunit->lq_waitq);
+ cfs_waitq_signal(&qunit->lq_waitq);
qunit_put(qunit);
}
}
out:
/* remove the qunit from hash */
- spin_lock(&qunit_hash_lock);
+ cfs_spin_lock(&qunit_hash_lock);
qunit = dqacq_in_flight(qctxt, qdata);
/* this qunit has been removed by qctxt_cleanup() */
if (!qunit) {
- spin_unlock(&qunit_hash_lock);
+ cfs_spin_unlock(&qunit_hash_lock);
QDATA_DEBUG(qdata, "%s is discarded because qunit isn't found\n",
opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
RETURN(err);
/* remove this qunit from lq_hash so that new processes cannot be added
* to qunit->lq_waiters */
remove_qunit_nolock(qunit);
- spin_unlock(&qunit_hash_lock);
+ cfs_spin_unlock(&qunit_hash_lock);
compute_lqs_after_removing_qunit(qunit);
rc = QUOTA_REQ_RETURNED;
QUNIT_SET_STATE_AND_RC(qunit, QUNIT_FINISHED, rc);
/* wake up all waiters */
- wake_up_all(&qunit->lq_waitq);
+ cfs_waitq_broadcast(&qunit->lq_waitq);
/* this is for dqacq_in_flight() */
qunit_put(qunit);
LASSERT(req);
LASSERT(req->rq_import);
- down_read(&obt->obt_rwsem);
+ cfs_down_read(&obt->obt_rwsem);
/* if a quota req timeouts or is dropped, we should update quota
* statistics which will be handled in dqacq_completion. And in
* this situation we should get qdata from request instead of
rc = dqacq_completion(obd, qctxt, qdata, rc,
lustre_msg_get_opc(req->rq_reqmsg));
- up_read(&obt->obt_rwsem);
+ cfs_up_read(&obt->obt_rwsem);
RETURN(rc);
}
int rc;
ENTRY;
- spin_lock(&qctxt->lqc_lock);
+ cfs_spin_lock(&qctxt->lqc_lock);
/* quit waiting when mds is back or qctxt is cleaned up */
rc = qctxt->lqc_import || !qctxt->lqc_valid;
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
RETURN(rc);
}
int i;
ENTRY;
- spin_lock(&qunit_hash_lock);
+ cfs_spin_lock(&qunit_hash_lock);
for (i = 0; i < NR_DQHASH; i++) {
- list_for_each_entry_safe(qunit, tmp, &qunit_hash[i], lq_hash) {
+ cfs_list_for_each_entry_safe(qunit, tmp, &qunit_hash[i],
+ lq_hash) {
if (qunit->lq_ctxt != qctxt)
continue;
* if no others change it, then the waiters will return
* -EAGAIN to caller who can perform related quota
* acq/rel if necessary. */
- wake_up_all(&qunit->lq_waitq);
+ cfs_waitq_broadcast(&qunit->lq_waitq);
}
}
- spin_unlock(&qunit_hash_lock);
+ cfs_spin_unlock(&qunit_hash_lock);
EXIT;
}
int rc = 0;
ENTRY;
- spin_lock(&qunit->lq_lock);
+ cfs_spin_lock(&qunit->lq_lock);
switch (qunit->lq_state) {
case QUNIT_IN_HASH:
case QUNIT_RM_FROM_HASH:
default:
CERROR("invalid qunit state %d\n", qunit->lq_state);
}
- spin_unlock(&qunit->lq_lock);
+ cfs_spin_unlock(&qunit->lq_lock);
if (!rc) {
- spin_lock(&qctxt->lqc_lock);
+ cfs_spin_lock(&qctxt->lqc_lock);
rc = !qctxt->lqc_valid;
if (!is_master)
rc |= !qctxt->lqc_import;
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
}
RETURN(rc);
{
/* revoke lqs_xxx_rec which is computed in check_cur_qunit
* b=18630 */
- spin_lock(&lqs->lqs_lock);
+ cfs_spin_lock(&lqs->lqs_lock);
quota_compute_lqs(qdata, lqs, 0, (opc == QUOTA_DQACQ) ? 1 : 0);
- spin_unlock(&lqs->lqs_lock);
+ cfs_spin_unlock(&lqs->lqs_lock);
}
static int
ENTRY;
LASSERT(opc == QUOTA_DQACQ || opc == QUOTA_DQREL);
- do_gettimeofday(&work_start);
+ cfs_gettimeofday(&work_start);
lqs = quota_search_lqs(LQS_KEY(QDATA_IS_GRP(qdata), qdata->qd_id),
qctxt, 0);
OBD_FAIL_TIMEOUT(OBD_FAIL_QUOTA_DELAY_SD, 5);
- spin_lock(&qunit_hash_lock);
+ cfs_spin_lock(&qunit_hash_lock);
qunit = dqacq_in_flight(qctxt, qdata);
if (qunit) {
- spin_unlock(&qunit_hash_lock);
+ cfs_spin_unlock(&qunit_hash_lock);
qunit_put(empty);
revoke_lqs_rec(lqs, qdata, opc);
qunit = empty;
qunit_get(qunit);
insert_qunit_nolock(qctxt, qunit);
- spin_unlock(&qunit_hash_lock);
+ cfs_spin_unlock(&qunit_hash_lock);
/* From here, the quota request will be sent anyway.
* When this qdata request returned or is cancelled,
/* this is for qunit_get() */
qunit_put(qunit);
- do_gettimeofday(&work_end);
+ cfs_gettimeofday(&work_end);
timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
if (opc == QUOTA_DQACQ)
lprocfs_counter_add(qctxt->lqc_stats,
RETURN(rc ? rc : rc2);
}
- spin_lock(&qctxt->lqc_lock);
+ cfs_spin_lock(&qctxt->lqc_lock);
if (!qctxt->lqc_import) {
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
QDATA_DEBUG(qdata, "lqc_import is invalid.\n");
- spin_lock(&qunit_hash_lock);
+ cfs_spin_lock(&qunit_hash_lock);
remove_qunit_nolock(qunit);
- spin_unlock(&qunit_hash_lock);
+ cfs_spin_unlock(&qunit_hash_lock);
compute_lqs_after_removing_qunit(qunit);
QUNIT_SET_STATE_AND_RC(qunit, QUNIT_FINISHED, -EAGAIN);
- wake_up_all(&qunit->lq_waitq);
+ cfs_waitq_broadcast(&qunit->lq_waitq);
/* this is for qunit_get() */
qunit_put(qunit);
/* this for alloc_qunit() */
qunit_put(qunit);
- spin_lock(&qctxt->lqc_lock);
+ cfs_spin_lock(&qctxt->lqc_lock);
if (wait && !qctxt->lqc_import) {
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
LASSERT(oti && oti->oti_thread &&
oti->oti_thread->t_watchdog);
check_qm(qctxt), &lwi);
CDEBUG(D_QUOTA, "wake up when quota master is back\n");
lc_watchdog_touch(oti->oti_thread->t_watchdog,
- GET_TIMEOUT(oti->oti_thread->t_svc));
+ CFS_GET_TIMEOUT(oti->oti_thread->t_svc));
} else {
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
}
RETURN(-EAGAIN);
}
imp = class_import_get(qctxt->lqc_import);
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
/* build dqacq/dqrel request */
LASSERT(imp);
* rc = -EBUSY, it means recovery is happening
* other rc < 0, it means real errors, functions who call
* schedule_dqacq should take care of this */
- spin_lock(&qunit->lq_lock);
+ cfs_spin_lock(&qunit->lq_lock);
rc = qunit->lq_rc;
- spin_unlock(&qunit->lq_lock);
+ cfs_spin_unlock(&qunit->lq_lock);
CDEBUG(D_QUOTA, "qunit(%p) finishes waiting: id(%u) flag(%u) "
"rc(%d) owner(%d)\n", qunit, qunit->lq_data.qd_id,
qunit->lq_data.qd_flags, rc, qunit->lq_owner);
}
qunit_put(qunit);
- do_gettimeofday(&work_end);
+ cfs_gettimeofday(&work_end);
timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
if (opc == QUOTA_DQACQ)
lprocfs_counter_add(qctxt->lqc_stats,
int rc = 0;
ENTRY;
- do_gettimeofday(&work_start);
+ cfs_gettimeofday(&work_start);
qdata.qd_id = id;
qdata.qd_flags = type;
if (isblk)
QDATA_SET_BLK(&qdata);
qdata.qd_count = 0;
- spin_lock(&qunit_hash_lock);
+ cfs_spin_lock(&qunit_hash_lock);
qunit = dqacq_in_flight(qctxt, &qdata);
- spin_unlock(&qunit_hash_lock);
+ cfs_spin_unlock(&qunit_hash_lock);
if (qunit) {
struct qunit_data *p = &qunit->lq_data;
CDEBUG(D_QUOTA, "qunit(%p) finishes waiting: rc(%d) "
"owner(%d)\n", qunit, qunit->lq_rc, qunit->lq_owner);
/* keep same as schedule_dqacq() b=17030 */
- spin_lock(&qunit->lq_lock);
+ cfs_spin_lock(&qunit->lq_lock);
rc = qunit->lq_rc;
- spin_unlock(&qunit->lq_lock);
+ cfs_spin_unlock(&qunit->lq_lock);
/* this is for dqacq_in_flight() */
qunit_put(qunit);
- do_gettimeofday(&work_end);
+ cfs_gettimeofday(&work_end);
timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
lprocfs_counter_add(qctxt->lqc_stats,
isblk ? LQUOTA_WAIT_PENDING_BLK_QUOTA :
LQUOTA_WAIT_PENDING_INO_QUOTA,
timediff);
} else {
- do_gettimeofday(&work_end);
+ cfs_gettimeofday(&work_end);
timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
lprocfs_counter_add(qctxt->lqc_stats,
isblk ? LQUOTA_NOWAIT_PENDING_BLK_QUOTA :
cfs_waitq_init(&qctxt->lqc_wait_for_qmaster);
cfs_waitq_init(&qctxt->lqc_lqs_waitq);
- atomic_set(&qctxt->lqc_lqs, 0);
- spin_lock_init(&qctxt->lqc_lock);
- spin_lock(&qctxt->lqc_lock);
+ cfs_atomic_set(&qctxt->lqc_lqs, 0);
+ cfs_spin_lock_init(&qctxt->lqc_lock);
+ cfs_spin_lock(&qctxt->lqc_lock);
qctxt->lqc_handler = handler;
qctxt->lqc_sb = sb;
qctxt->lqc_obt = obt;
qctxt->lqc_switch_seconds = 300; /* enlarging will wait 5 minutes
* after the last shrinking */
qctxt->lqc_sync_blk = 0;
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
qctxt->lqc_lqs_hash = cfs_hash_create("LQS_HASH",
HASH_LQS_CUR_BITS,
int rc;
ENTRY;
- rc = !atomic_read(&qctxt->lqc_lqs);
+ rc = !cfs_atomic_read(&qctxt->lqc_lqs);
RETURN(rc);
}
void qctxt_cleanup(struct lustre_quota_ctxt *qctxt, int force)
{
struct lustre_qunit *qunit, *tmp;
- struct list_head tmp_list;
+ cfs_list_t tmp_list;
struct l_wait_info lwi = { 0 };
struct obd_device_target *obt = qctxt->lqc_obt;
int i;
CFS_INIT_LIST_HEAD(&tmp_list);
- spin_lock(&qctxt->lqc_lock);
+ cfs_spin_lock(&qctxt->lqc_lock);
qctxt->lqc_valid = 0;
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
- spin_lock(&qunit_hash_lock);
+ cfs_spin_lock(&qunit_hash_lock);
for (i = 0; i < NR_DQHASH; i++) {
- list_for_each_entry_safe(qunit, tmp, &qunit_hash[i], lq_hash) {
+ cfs_list_for_each_entry_safe(qunit, tmp, &qunit_hash[i],
+ lq_hash) {
if (qunit->lq_ctxt != qctxt)
continue;
remove_qunit_nolock(qunit);
- list_add(&qunit->lq_hash, &tmp_list);
+ cfs_list_add(&qunit->lq_hash, &tmp_list);
}
}
- spin_unlock(&qunit_hash_lock);
+ cfs_spin_unlock(&qunit_hash_lock);
- list_for_each_entry_safe(qunit, tmp, &tmp_list, lq_hash) {
- list_del_init(&qunit->lq_hash);
+ cfs_list_for_each_entry_safe(qunit, tmp, &tmp_list, lq_hash) {
+ cfs_list_del_init(&qunit->lq_hash);
compute_lqs_after_removing_qunit(qunit);
/* wake up all waiters */
QUNIT_SET_STATE_AND_RC(qunit, QUNIT_FINISHED, 0);
- wake_up_all(&qunit->lq_waitq);
+ cfs_waitq_broadcast(&qunit->lq_waitq);
qunit_put(qunit);
}
* unpredicted. So we must wait until lqc_wait_for_qmaster is empty */
while (cfs_waitq_active(&qctxt->lqc_wait_for_qmaster)) {
cfs_waitq_signal(&qctxt->lqc_wait_for_qmaster);
- cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE,
- cfs_time_seconds(1));
+ cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
+ cfs_time_seconds(1));
}
cfs_hash_for_each_safe(qctxt->lqc_lqs_hash, hash_put_lqs, NULL);
l_wait_event(qctxt->lqc_lqs_waitq, check_lqs(qctxt), &lwi);
- down_write(&obt->obt_rwsem);
+ cfs_down_write(&obt->obt_rwsem);
cfs_hash_destroy(qctxt->lqc_lqs_hash);
qctxt->lqc_lqs_hash = NULL;
- up_write(&obt->obt_rwsem);
+ cfs_up_write(&obt->obt_rwsem);
ptlrpcd_decref();
struct qslave_recov_thread_data {
struct obd_device *obd;
struct lustre_quota_ctxt *qctxt;
- struct completion comp;
+ cfs_completion_t comp;
};
/* FIXME only recovery block quota by now */
/* for obdfilter */
class_incref(obd, "qslave_recovd_filter", obd);
- complete(&data->comp);
+ cfs_complete(&data->comp);
- spin_lock(&qctxt->lqc_lock);
+ cfs_spin_lock(&qctxt->lqc_lock);
if (qctxt->lqc_recovery) {
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
class_decref(obd, "qslave_recovd_filter", obd);
RETURN(0);
} else {
qctxt->lqc_recovery = 1;
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
}
for (type = USRQUOTA; type < MAXQUOTAS; type++) {
struct qunit_data qdata;
struct quota_info *dqopt = sb_dqopt(qctxt->lqc_sb);
- struct list_head id_list;
+ cfs_list_t id_list;
struct dquot_id *dqid, *tmp;
int ret;
if (rc)
CERROR("Get ids from quota file failed. (rc:%d)\n", rc);
- list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
- list_del_init(&dqid->di_link);
+ cfs_list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
+ cfs_list_del_init(&dqid->di_link);
/* skip slave recovery on itself */
if (is_master(qctxt))
goto free;
}
}
- spin_lock(&qctxt->lqc_lock);
+ cfs_spin_lock(&qctxt->lqc_lock);
qctxt->lqc_recovery = 0;
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
class_decref(obd, "qslave_recovd_filter", obd);
RETURN(rc);
}
data.obd = obd;
data.qctxt = qctxt;
- init_completion(&data.comp);
+ cfs_init_completion(&data.comp);
- rc = kernel_thread(qslave_recovery_main, &data, CLONE_VM|CLONE_FILES);
+ rc = cfs_kernel_thread(qslave_recovery_main, &data,
+ CLONE_VM|CLONE_FILES);
if (rc < 0) {
CERROR("Cannot start quota recovery thread: rc %d\n", rc);
goto exit;
}
- wait_for_completion(&data.comp);
+ cfs_wait_for_completion(&data.comp);
exit:
EXIT;
}
{
struct obd_device_target *obt = &obd->u.obt;
struct lustre_quota_ctxt *qctxt = &obt->obt_qctxt;
- struct list_head id_list;
+ cfs_list_t id_list;
int i, rc;
LASSERT_SEM_LOCKED(&obt->obt_quotachecking);
- INIT_LIST_HEAD(&id_list);
+ CFS_INIT_LIST_HEAD(&id_list);
for (i = 0; i < MAXQUOTAS; i++) {
struct dquot_id *dqid, *tmp;
continue;
}
- list_for_each_entry_safe(dqid, tmp, &id_list,
- di_link) {
+ cfs_list_for_each_entry_safe(dqid, tmp, &id_list,
+ di_link) {
struct lustre_qunit_size *lqs;
- list_del_init(&dqid->di_link);
+ cfs_list_del_init(&dqid->di_link);
lqs = quota_search_lqs(LQS_KEY(i, dqid->di_id),
qctxt, 1);
if (lqs && !IS_ERR(lqs)) {
}
static int
-lqs_compare(void *key, struct hlist_node *hnode)
+lqs_compare(void *key, cfs_hlist_node_t *hnode)
{
struct lustre_qunit_size *q;
int rc;
ENTRY;
LASSERT(key);
- q = hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
+ q = cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
- spin_lock(&q->lqs_lock);
+ cfs_spin_lock(&q->lqs_lock);
rc = (q->lqs_key == *((unsigned long long *)key));
- spin_unlock(&q->lqs_lock);
+ cfs_spin_unlock(&q->lqs_lock);
RETURN(rc);
}
static void *
-lqs_get(struct hlist_node *hnode)
+lqs_get(cfs_hlist_node_t *hnode)
{
struct lustre_qunit_size *q =
- hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
+ cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
ENTRY;
__lqs_getref(q);
}
static void *
-lqs_put(struct hlist_node *hnode)
+lqs_put(cfs_hlist_node_t *hnode)
{
struct lustre_qunit_size *q =
- hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
+ cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
ENTRY;
__lqs_putref(q);
}
static void
-lqs_exit(struct hlist_node *hnode)
+lqs_exit(cfs_hlist_node_t *hnode)
{
struct lustre_qunit_size *q =
- hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
+ cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
ENTRY;
/*
* lqs also was deleted from table by this time
* so we should have 0 refs.
*/
- LASSERTF(atomic_read(&q->lqs_refcount) == 0,
+ LASSERTF(cfs_atomic_read(&q->lqs_refcount) == 0,
"Busy lqs %p with %d refs\n", q,
- atomic_read(&q->lqs_refcount));
+ cfs_atomic_read(&q->lqs_refcount));
OBD_FREE_PTR(q);
EXIT;
}
int rc = 0;
ENTRY;
- do_gettimeofday(&work_start);
+ cfs_gettimeofday(&work_start);
switch (oqctl->qc_cmd) {
case Q_QUOTAON:
oqctl->qc_id = obt->obt_qfmt; /* override qfmt version */
CDEBUG(D_INFO, "mds_quotactl admin quota command %d, id %u, "
"type %d, failed: rc = %d\n",
oqctl->qc_cmd, oqctl->qc_id, oqctl->qc_type, rc);
- do_gettimeofday(&work_end);
+ cfs_gettimeofday(&work_end);
timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
lprocfs_counter_add(qctxt->lqc_stats, LQUOTA_QUOTA_CTL, timediff);
int rc = 0;
ENTRY;
- do_gettimeofday(&work_start);
+ cfs_gettimeofday(&work_start);
switch (oqctl->qc_cmd) {
case Q_FINVALIDATE:
case Q_QUOTAON:
case Q_QUOTAOFF:
- down(&obt->obt_quotachecking);
+ cfs_down(&obt->obt_quotachecking);
if (oqctl->qc_cmd == Q_FINVALIDATE &&
(obt->obt_qctxt.lqc_flags & UGQUOTA2LQC(oqctl->qc_type))) {
CWARN("quota[%u] is on yet\n", oqctl->qc_type);
- up(&obt->obt_quotachecking);
+ cfs_up(&obt->obt_quotachecking);
rc = -EBUSY;
break;
}
else if (quota_is_off(qctxt, oqctl))
rc = -EALREADY;
}
- up(&obt->obt_quotachecking);
+ cfs_up(&obt->obt_quotachecking);
}
break;
obd->obd_name, oqctl->qc_cmd);
RETURN(-EFAULT);
}
- do_gettimeofday(&work_end);
+ cfs_gettimeofday(&work_end);
timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
lprocfs_counter_add(qctxt->lqc_stats, LQUOTA_QUOTA_CTL, timediff);
#ifdef HAVE_QUOTA_SUPPORT
static cfs_time_t last_print = 0;
-static spinlock_t last_print_lock = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t last_print_lock = CFS_SPIN_LOCK_UNLOCKED;
static int filter_quota_setup(struct obd_device *obd)
{
struct obd_device_target *obt = &obd->u.obt;
ENTRY;
- init_rwsem(&obt->obt_rwsem);
+ cfs_init_rwsem(&obt->obt_rwsem);
obt->obt_qfmt = LUSTRE_QUOTA_V2;
- sema_init(&obt->obt_quotachecking, 1);
+ cfs_sema_init(&obt->obt_quotachecking, 1);
rc = qctxt_init(obd, NULL);
if (rc)
CERROR("initialize quota context failed! (rc:%d)\n", rc);
LASSERT(imp != NULL);
/* setup the quota context import */
- spin_lock(&qctxt->lqc_lock);
+ cfs_spin_lock(&qctxt->lqc_lock);
if (qctxt->lqc_import != NULL) {
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
if (qctxt->lqc_import == imp)
CDEBUG(D_WARNING, "%s: lqc_import(%p) of obd(%p) was "
"activated already.\n", obd->obd_name, imp, obd);
imp->imp_connect_data.ocd_connect_flags |=
(exp->exp_connect_flags &
(OBD_CONNECT_QUOTA64 | OBD_CONNECT_CHANGE_QS));
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
CDEBUG(D_QUOTA, "%s: lqc_import(%p) of obd(%p) is reactivated "
"now.\n", obd->obd_name, imp, obd);
/* when exp->exp_imp_reverse is destroyed, the corresponding lqc_import
* should be invalid b=12374 */
- spin_lock(&qctxt->lqc_lock);
+ cfs_spin_lock(&qctxt->lqc_lock);
if (qctxt->lqc_import == imp) {
qctxt->lqc_import = NULL;
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
CDEBUG(D_QUOTA, "%s: lqc_import(%p) of obd(%p) is invalid now.\n",
obd->obd_name, imp, obd);
ptlrpc_cleanup_imp(imp);
dqacq_interrupt(qctxt);
} else {
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
}
RETURN(0);
}
rc);
break;
} else {
- spin_lock(&lqs->lqs_lock);
+ cfs_spin_lock(&lqs->lqs_lock);
if (lqs->lqs_bunit_sz <= qctxt->lqc_sync_blk) {
oa->o_flags |= (cnt == USRQUOTA) ?
OBD_FL_NO_USRQUOTA : OBD_FL_NO_GRPQUOTA;
- spin_unlock(&lqs->lqs_lock);
+ cfs_spin_unlock(&lqs->lqs_lock);
CDEBUG(D_QUOTA, "set sync flag: bunit(%lu), "
"sync_blk(%d)\n", lqs->lqs_bunit_sz,
qctxt->lqc_sync_blk);
lqs_putref(lqs);
continue;
}
- spin_unlock(&lqs->lqs_lock);
+ cfs_spin_unlock(&lqs->lqs_lock);
/* this is for quota_search_lqs */
lqs_putref(lqs);
}
int rc = 0, rc2[2] = { 0, 0 };
ENTRY;
- spin_lock(&qctxt->lqc_lock);
+ cfs_spin_lock(&qctxt->lqc_lock);
if (!qctxt->lqc_valid){
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
RETURN(rc);
}
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
for (i = 0; i < MAXQUOTAS; i++) {
struct lustre_qunit_size *lqs = NULL;
}
rc2[i] = compute_remquota(obd, qctxt, &qdata[i], isblk);
- spin_lock(&lqs->lqs_lock);
+ cfs_spin_lock(&lqs->lqs_lock);
if (!cycle) {
if (isblk) {
pending[i] = count * CFS_PAGE_SIZE;
if (inode) {
mb = pending[i];
rc = fsfilt_get_mblk(obd, qctxt->lqc_sb,
- &mb, inode,frags);
+ &mb, inode,
+ frags);
if (rc)
CERROR("%s: can't get extra "
"meta blocks\n",
rc2[i] = QUOTA_RET_ACQUOTA;
}
- spin_unlock(&lqs->lqs_lock);
+ cfs_spin_unlock(&lqs->lqs_lock);
if (lqs->lqs_blk_rec < 0 &&
qdata[i].qd_count <
* pre-dqacq in time and quota hash on ost is used up, we
* have to wait for the completion of in flight dqacq/dqrel,
* in order to get enough quota for write b=12588 */
- do_gettimeofday(&work_start);
+ cfs_gettimeofday(&work_start);
while ((rc = quota_check_common(obd, id, pending, count, cycle, isblk,
inode, frags)) &
QUOTA_RET_ACQUOTA) {
- spin_lock(&qctxt->lqc_lock);
+ cfs_spin_lock(&qctxt->lqc_lock);
if (!qctxt->lqc_import && oti) {
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
LASSERT(oti && oti->oti_thread &&
oti->oti_thread->t_watchdog);
&lwi);
CDEBUG(D_QUOTA, "wake up when quota master is back\n");
lc_watchdog_touch(oti->oti_thread->t_watchdog,
- GET_TIMEOUT(oti->oti_thread->t_svc));
+ CFS_GET_TIMEOUT(oti->oti_thread->t_svc));
} else {
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
}
cycle++;
if (oti && oti->oti_thread && oti->oti_thread->t_watchdog)
lc_watchdog_touch(oti->oti_thread->t_watchdog,
- GET_TIMEOUT(oti->oti_thread->t_svc));
+ CFS_GET_TIMEOUT(oti->oti_thread->t_svc));
CDEBUG(D_QUOTA, "rc: %d, count_err: %d\n", rc,
count_err++);
- init_waitqueue_head(&waitq);
+ cfs_waitq_init(&waitq);
lwi = LWI_TIMEOUT(cfs_time_seconds(min(cycle, 10)), NULL,
NULL);
l_wait_event(waitq, 0, &lwi);
}
if (rc < 0 || cycle % 10 == 0) {
- spin_lock(&last_print_lock);
+ cfs_spin_lock(&last_print_lock);
if (last_print == 0 ||
cfs_time_before((last_print + cfs_time_seconds(30)),
cfs_time_current())) {
last_print = cfs_time_current();
- spin_unlock(&last_print_lock);
+ cfs_spin_unlock(&last_print_lock);
CWARN("still haven't managed to acquire quota "
"space from the quota master after %d "
"retries (err=%d, rc=%d)\n",
cycle, count_err - 1, rc);
} else {
- spin_unlock(&last_print_lock);
+ cfs_spin_unlock(&last_print_lock);
}
}
CDEBUG(D_QUOTA, "recheck quota with rc: %d, cycle: %d\n", rc,
cycle);
}
- do_gettimeofday(&work_end);
+ cfs_gettimeofday(&work_end);
timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
lprocfs_counter_add(qctxt->lqc_stats,
isblk ? LQUOTA_WAIT_FOR_CHK_BLK :
if (!ll_sb_any_quota_active(qctxt->lqc_sb))
RETURN(0);
- do_gettimeofday(&work_start);
+ cfs_gettimeofday(&work_start);
for (i = 0; i < MAXQUOTAS; i++) {
struct lustre_qunit_size *lqs = NULL;
continue;
}
- spin_lock(&lqs->lqs_lock);
+ cfs_spin_lock(&lqs->lqs_lock);
if (isblk) {
LASSERTF(lqs->lqs_bwrite_pending >= pending[i],
"there are too many blocks! [id %u] [%c] "
obd->obd_name,
isblk ? lqs->lqs_bwrite_pending : lqs->lqs_iwrite_pending,
i, pending[i], isblk);
- spin_unlock(&lqs->lqs_lock);
+ cfs_spin_unlock(&lqs->lqs_lock);
/* for quota_search_lqs in pending_commit */
lqs_putref(lqs);
/* for quota_search_lqs in quota_check */
lqs_putref(lqs);
}
- do_gettimeofday(&work_end);
+ cfs_gettimeofday(&work_end);
timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
lprocfs_counter_add(qctxt->lqc_stats,
isblk ? LQUOTA_WAIT_FOR_COMMIT_BLK :
RETURN(0);
}
- init_rwsem(&obt->obt_rwsem);
+ cfs_init_rwsem(&obt->obt_rwsem);
obt->obt_qfmt = LUSTRE_QUOTA_V2;
mds->mds_quota_info.qi_version = LUSTRE_QUOTA_V2;
- sema_init(&obt->obt_quotachecking, 1);
+ cfs_sema_init(&obt->obt_quotachecking, 1);
/* initialize quota master and quota context */
- sema_init(&mds->mds_qonoff_sem, 1);
+ cfs_sema_init(&mds->mds_qonoff_sem, 1);
rc = qctxt_init(obd, dqacq_handler);
if (rc) {
CERROR("%s: initialize quota context failed! (rc:%d)\n",
memset(&oqctl, 0, sizeof(oqctl));
oqctl.qc_type = UGQUOTA;
- down(&mds->mds_qonoff_sem);
+ cfs_down(&mds->mds_qonoff_sem);
mds_admin_quota_off(obd, &oqctl);
- up(&mds->mds_qonoff_sem);
+ cfs_up(&mds->mds_qonoff_sem);
RETURN(0);
}
#endif /* __KERNEL__ */
struct osc_quota_info {
- struct list_head oqi_hash; /* hash list */
+ cfs_list_t oqi_hash; /* hash list */
struct client_obd *oqi_cli; /* osc obd */
unsigned int oqi_id; /* uid/gid of a file */
short oqi_type; /* quota type */
};
-spinlock_t qinfo_list_lock = SPIN_LOCK_UNLOCKED;
+cfs_spinlock_t qinfo_list_lock = CFS_SPIN_LOCK_UNLOCKED;
-static struct list_head qinfo_hash[NR_DQHASH];
+static cfs_list_t qinfo_hash[NR_DQHASH];
/* SLAB cache for client quota context */
cfs_mem_cache_t *qinfo_cachep = NULL;
/* caller must hold qinfo_list_lock */
static inline void insert_qinfo_hash(struct osc_quota_info *oqi)
{
- struct list_head *head = qinfo_hash +
+ cfs_list_t *head = qinfo_hash +
hashfn(oqi->oqi_cli, oqi->oqi_id, oqi->oqi_type);
LASSERT_SPIN_LOCKED(&qinfo_list_lock);
- list_add(&oqi->oqi_hash, head);
+ cfs_list_add(&oqi->oqi_hash, head);
}
/* caller must hold qinfo_list_lock */
static inline void remove_qinfo_hash(struct osc_quota_info *oqi)
{
LASSERT_SPIN_LOCKED(&qinfo_list_lock);
- list_del_init(&oqi->oqi_hash);
+ cfs_list_del_init(&oqi->oqi_hash);
}
/* caller must hold qinfo_list_lock */
ENTRY;
LASSERT_SPIN_LOCKED(&qinfo_list_lock);
- list_for_each_entry(oqi, &qinfo_hash[hashent], oqi_hash) {
+ cfs_list_for_each_entry(oqi, &qinfo_hash[hashent], oqi_hash) {
if (oqi->oqi_cli == cli &&
oqi->oqi_id == id && oqi->oqi_type == type)
return oqi;
int cnt, rc = QUOTA_OK;
ENTRY;
- spin_lock(&qinfo_list_lock);
+ cfs_spin_lock(&qinfo_list_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
struct osc_quota_info *oqi = NULL;
break;
}
}
- spin_unlock(&qinfo_list_lock);
+ cfs_spin_unlock(&qinfo_list_lock);
if (rc == NO_QUOTA)
CDEBUG(D_QUOTA, "chkdq found noquota for %s %d\n",
break;
}
- spin_lock(&qinfo_list_lock);
+ cfs_spin_lock(&qinfo_list_lock);
old = find_qinfo(cli, id, cnt);
if (old && !noquota)
remove_qinfo_hash(old);
else if (!old && noquota)
insert_qinfo_hash(oqi);
- spin_unlock(&qinfo_list_lock);
+ cfs_spin_unlock(&qinfo_list_lock);
if (old || !noquota)
free_qinfo(oqi);
int i;
ENTRY;
- spin_lock(&qinfo_list_lock);
+ cfs_spin_lock(&qinfo_list_lock);
for (i = 0; i < NR_DQHASH; i++) {
- list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
+ cfs_list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
if (oqi->oqi_cli != cli)
continue;
remove_qinfo_hash(oqi);
free_qinfo(oqi);
}
}
- spin_unlock(&qinfo_list_lock);
+ cfs_spin_unlock(&qinfo_list_lock);
RETURN(0);
}
int i, rc;
ENTRY;
- spin_lock(&qinfo_list_lock);
+ cfs_spin_lock(&qinfo_list_lock);
for (i = 0; i < NR_DQHASH; i++) {
- list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
+ cfs_list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
remove_qinfo_hash(oqi);
free_qinfo(oqi);
}
}
- spin_unlock(&qinfo_list_lock);
+ cfs_spin_unlock(&qinfo_list_lock);
rc = cfs_mem_cache_destroy(qinfo_cachep);
LASSERTF(rc == 0, "couldn't destory qinfo_cachep slab\n");
lqs->lqs_bunit_sz, lqs->lqs_btune_sz, lqs->lqs_iunit_sz, \
lqs->lqs_itune_sz, lqs->lqs_bwrite_pending, \
lqs->lqs_iwrite_pending, lqs->lqs_ino_rec, \
- lqs->lqs_blk_rec, atomic_read(&lqs->lqs_refcount), ## arg);
+ lqs->lqs_blk_rec, cfs_atomic_read(&lqs->lqs_refcount), ## arg);
/* quota_context.c */
static inline int client_quota_should_resend(int resend, struct client_obd *cli)
{
- return (atomic_read(&cli->cl_quota_resends) >= 0) ?
- atomic_read(&cli->cl_quota_resends) > resend : 1;
+ return (cfs_atomic_read(&cli->cl_quota_resends) >= 0) ?
+ cfs_atomic_read(&cli->cl_quota_resends) > resend : 1;
}
#endif
#ifdef HAVE_QUOTA_SUPPORT
/* lock ordering: mds->mds_qonoff_sem > dquot->dq_sem */
-static struct list_head lustre_dquot_hash[NR_DQHASH];
-static spinlock_t dquot_hash_lock = SPIN_LOCK_UNLOCKED;
+static cfs_list_t lustre_dquot_hash[NR_DQHASH];
+static cfs_spinlock_t dquot_hash_lock = CFS_SPIN_LOCK_UNLOCKED;
cfs_mem_cache_t *lustre_dquot_cachep;
/* FIXME cleanup work ?? */
for (i = 0; i < NR_DQHASH; i++) {
- LASSERT(list_empty(lustre_dquot_hash + i));
+ LASSERT(cfs_list_empty(lustre_dquot_hash + i));
}
if (lustre_dquot_cachep) {
int rc;
ENTRY;
LASSERT_SPIN_LOCKED(&dquot_hash_lock);
- list_for_each_entry(dquot, &lustre_dquot_hash[hashent], dq_hash) {
+ cfs_list_for_each_entry(dquot, &lustre_dquot_hash[hashent], dq_hash) {
if (dquot->dq_info == lqi &&
dquot->dq_id == id && dquot->dq_type == type)
RETURN(dquot);
RETURN(NULL);
CFS_INIT_LIST_HEAD(&dquot->dq_hash);
- init_mutex_locked(&dquot->dq_sem);
+ cfs_init_mutex_locked(&dquot->dq_sem);
dquot->dq_refcnt = 1;
dquot->dq_info = lqi;
dquot->dq_id = id;
static void insert_dquot_nolock(struct lustre_dquot *dquot)
{
- struct list_head *head = lustre_dquot_hash +
+ cfs_list_t *head = lustre_dquot_hash +
dquot_hashfn(dquot->dq_info, dquot->dq_id, dquot->dq_type);
- LASSERT(list_empty(&dquot->dq_hash));
- list_add(&dquot->dq_hash, head);
+ LASSERT(cfs_list_empty(&dquot->dq_hash));
+ cfs_list_add(&dquot->dq_hash, head);
}
static void remove_dquot_nolock(struct lustre_dquot *dquot)
{
- LASSERT(!list_empty(&dquot->dq_hash));
- list_del_init(&dquot->dq_hash);
+ LASSERT(!cfs_list_empty(&dquot->dq_hash));
+ cfs_list_del_init(&dquot->dq_hash);
}
static void lustre_dqput(struct lustre_dquot *dquot)
{
ENTRY;
- spin_lock(&dquot_hash_lock);
+ cfs_spin_lock(&dquot_hash_lock);
LASSERT(dquot->dq_refcnt);
dquot->dq_refcnt--;
if (!dquot->dq_refcnt) {
remove_dquot_nolock(dquot);
free_dquot(dquot);
}
- spin_unlock(&dquot_hash_lock);
+ cfs_spin_unlock(&dquot_hash_lock);
EXIT;
}
if ((empty = alloc_dquot(lqi, id, type)) == NULL)
RETURN(ERR_PTR(-ENOMEM));
- spin_lock(&dquot_hash_lock);
+ cfs_spin_lock(&dquot_hash_lock);
if ((dquot = find_dquot(hashent, lqi, id, type)) != NULL) {
dquot->dq_refcnt++;
- spin_unlock(&dquot_hash_lock);
+ cfs_spin_unlock(&dquot_hash_lock);
free_dquot(empty);
} else {
int rc;
dquot = empty;
insert_dquot_nolock(dquot);
- spin_unlock(&dquot_hash_lock);
+ cfs_spin_unlock(&dquot_hash_lock);
rc = fsfilt_dquot(obd, dquot, QFILE_RD_DQUOT);
- up(&dquot->dq_sem);
+ cfs_up(&dquot->dq_sem);
if (rc) {
CERROR("can't read dquot from admin quotafile! "
"(rc:%d)\n", rc);
oqaq->qaq_flags = type;
lqs = quota_search_lqs(LQS_KEY(type, id), qctxt, 0);
if (lqs && !IS_ERR(lqs)) {
- spin_lock(&lqs->lqs_lock);
+ cfs_spin_lock(&lqs->lqs_lock);
oqaq->qaq_bunit_sz = lqs->lqs_bunit_sz;
oqaq->qaq_iunit_sz = lqs->lqs_iunit_sz;
oqaq->qaq_flags = lqs->lqs_flags;
- spin_unlock(&lqs->lqs_lock);
+ cfs_spin_unlock(&lqs->lqs_lock);
lqs_putref(lqs);
} else {
CDEBUG(D_QUOTA, "Can't find the lustre qunit size!\n");
if (!oqaq)
GOTO(out, rc = -ENOMEM);
- down(&dquot->dq_sem);
+ cfs_down(&dquot->dq_sem);
init_oqaq(oqaq, qctxt, id, type);
rc = dquot_create_oqaq(qctxt, dquot, ost_num, mdt_num,
else
qid[USRQUOTA] = dquot->dq_id;
- up(&dquot->dq_sem);
+ cfs_up(&dquot->dq_sem);
rc = qctxt_adjust_qunit(obd, qctxt, qid, is_blk, 0, NULL);
if (rc == -EDQUOT || rc == -EBUSY) {
RETURN(rc);
out_sem:
- up(&dquot->dq_sem);
+ cfs_up(&dquot->dq_sem);
goto out;
}
DQUOT_DEBUG(dquot, "get dquot in dqacq_handler\n");
QINFO_DEBUG(dquot->dq_info, "get dquot in dqadq_handler\n");
- down(&mds->mds_qonoff_sem);
- down(&dquot->dq_sem);
+ cfs_down(&mds->mds_qonoff_sem);
+ cfs_down(&dquot->dq_sem);
if (dquot->dq_status & DQ_STATUS_RECOVERY) {
DQUOT_DEBUG(dquot, "this dquot is under recovering.\n");
rc = fsfilt_dquot(obd, dquot, QFILE_WR_DQUOT);
EXIT;
out:
- up(&dquot->dq_sem);
- up(&mds->mds_qonoff_sem);
+ cfs_up(&dquot->dq_sem);
+ cfs_up(&mds->mds_qonoff_sem);
lustre_dqput(dquot);
if (rc != -EDQUOT)
dqacq_adjust_qunit_sz(obd, qdata->qd_id, QDATA_IS_GRP(qdata),
qdata->qd_qunit = QDATA_IS_BLK(qdata) ? qctxt->lqc_bunit_sz :
qctxt->lqc_iunit_sz;
} else {
- spin_lock(&lqs->lqs_lock);
+ cfs_spin_lock(&lqs->lqs_lock);
qdata->qd_qunit = QDATA_IS_BLK(qdata) ? lqs->lqs_bunit_sz :
lqs->lqs_iunit_sz;
- spin_unlock(&lqs->lqs_lock);
+ cfs_spin_unlock(&lqs->lqs_lock);
}
if (QDATA_IS_BLK(qdata))
oqctl->qc_type != UGQUOTA)
RETURN(-EINVAL);
- down(&obt->obt_quotachecking);
+ cfs_down(&obt->obt_quotachecking);
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- down(&mds->mds_qonoff_sem);
+ cfs_down(&mds->mds_qonoff_sem);
for (i = 0; i < MAXQUOTAS; i++) {
struct file *fp;
filp_close(fp, 0);
}
- up(&mds->mds_qonoff_sem);
+ cfs_up(&mds->mds_qonoff_sem);
pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- up(&obt->obt_quotachecking);
+ cfs_up(&obt->obt_quotachecking);
RETURN(rc ? : rc1);
}
oqctl->qc_type != UGQUOTA)
RETURN(-EINVAL);
- down(&obt->obt_quotachecking);
+ cfs_down(&obt->obt_quotachecking);
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- down(&mds->mds_qonoff_sem);
+ cfs_down(&mds->mds_qonoff_sem);
oqctl->qc_cmd = Q_FINVALIDATE;
rc = fsfilt_quotactl(obd, obd->u.obt.obt_sb, oqctl);
if (!rc)
rc = obd_quotactl(mds->mds_osc_exp, oqctl);
- up(&mds->mds_qonoff_sem);
+ cfs_up(&mds->mds_qonoff_sem);
pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- up(&obt->obt_quotachecking);
+ cfs_up(&obt->obt_quotachecking);
RETURN(rc);
}
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- down(&mds->mds_qonoff_sem);
+ cfs_down(&mds->mds_qonoff_sem);
for (i = 0; i < MAXQUOTAS && !rc; i++) {
struct file *fp;
filp_close(fp, 0);
qinfo->qi_files[i] = NULL;
}
- up(&mds->mds_qonoff_sem);
+ cfs_up(&mds->mds_qonoff_sem);
pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
RETURN(rc);
oqctl->qc_type != UGQUOTA)
RETURN(-EINVAL);
- down(&obt->obt_quotachecking);
+ cfs_down(&obt->obt_quotachecking);
if (obt->obt_qctxt.lqc_immutable) {
LCONSOLE_ERROR("Failed to turn Quota on, immutable mode "
"(is SOM enabled?)\n");
- up(&obt->obt_quotachecking);
+ cfs_up(&obt->obt_quotachecking);
RETURN(-ECANCELED);
}
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- down(&mds->mds_qonoff_sem);
+ cfs_down(&mds->mds_qonoff_sem);
rc2 = mds_admin_quota_on(obd, oqctl);
if (rc2 && rc2 != -EALREADY) {
CWARN("mds quota[%d] is failed to be on for %d\n", oqctl->qc_type, rc2);
EXIT;
out:
- up(&mds->mds_qonoff_sem);
+ cfs_up(&mds->mds_qonoff_sem);
pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- up(&obt->obt_quotachecking);
+ cfs_up(&obt->obt_quotachecking);
return rc ? : (rc1 ? : rc2);
}
RETURN(-EINVAL);
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- down(&mds->mds_qonoff_sem);
+ cfs_down(&mds->mds_qonoff_sem);
/* close admin quota files */
rc2 = mds_admin_quota_off(obd, oqctl);
if (rc2 && rc2 != -EALREADY) {
EXIT;
out:
- up(&mds->mds_qonoff_sem);
+ cfs_up(&mds->mds_qonoff_sem);
pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
return rc ? : (rc1 ? : rc2);
}
int rc;
ENTRY;
- down(&obt->obt_quotachecking);
+ cfs_down(&obt->obt_quotachecking);
rc = do_mds_quota_off(obd, oqctl);
- up(&obt->obt_quotachecking);
+ cfs_up(&obt->obt_quotachecking);
RETURN(rc);
}
oqctl->qc_type != GRPQUOTA)
RETURN(-EINVAL);
- down(&mds->mds_qonoff_sem);
+ cfs_down(&mds->mds_qonoff_sem);
if (qinfo->qi_files[oqctl->qc_type] == NULL) {
CWARN("quota[%u] is off\n", oqctl->qc_type);
GOTO(out, rc = -ESRCH);
EXIT;
out:
- up(&mds->mds_qonoff_sem);
+ cfs_up(&mds->mds_qonoff_sem);
return rc;
}
oqctl->qc_type != GRPQUOTA)
RETURN(-EINVAL);
- down(&mds->mds_qonoff_sem);
+ cfs_down(&mds->mds_qonoff_sem);
if (qinfo->qi_files[oqctl->qc_type] == NULL) {
CWARN("quota[%u] is off\n", oqctl->qc_type);
GOTO(out, rc = -ESRCH);
EXIT;
out:
- up(&mds->mds_qonoff_sem);
+ cfs_up(&mds->mds_qonoff_sem);
return rc;
}
OBD_ALLOC_PTR(oqaq);
if (!oqaq)
RETURN(-ENOMEM);
- down(&mds->mds_qonoff_sem);
+ cfs_down(&mds->mds_qonoff_sem);
init_oqaq(oqaq, qctxt, oqctl->qc_id, oqctl->qc_type);
if (qinfo->qi_files[oqctl->qc_type] == NULL) {
DQUOT_DEBUG(dquot, "get dquot in mds_set_blk\n");
QINFO_DEBUG(dquot->dq_info, "get dquot in mds_set_blk\n");
- down(&dquot->dq_sem);
+ cfs_down(&dquot->dq_sem);
if (dquot->dq_status) {
- up(&dquot->dq_sem);
+ cfs_up(&dquot->dq_sem);
lustre_dqput(dquot);
GOTO(out_sem, rc = -EBUSY);
}
rc = fsfilt_dquot(obd, dquot, QFILE_WR_DQUOT);
- up(&dquot->dq_sem);
+ cfs_up(&dquot->dq_sem);
if (rc) {
CERROR("set limit failed! (rc:%d)\n", rc);
goto out;
}
- up(&mds->mds_qonoff_sem);
+ cfs_up(&mds->mds_qonoff_sem);
adjust_lqs(obd, oqaq);
orig_set = ihardlimit || isoftlimit;
now_set = dqblk->dqb_ihardlimit || dqblk->dqb_isoftlimit;
if (dqblk->dqb_valid & QIF_ILIMITS && orig_set != now_set) {
- down(&dquot->dq_sem);
+ cfs_down(&dquot->dq_sem);
dquot->dq_dqb.dqb_curinodes = 0;
- up(&dquot->dq_sem);
+ cfs_up(&dquot->dq_sem);
rc = mds_init_slave_ilimits(obd, oqctl, orig_set);
if (rc) {
CERROR("init slave ilimits failed! (rc:%d)\n", rc);
orig_set = bhardlimit || bsoftlimit;
now_set = dqblk->dqb_bhardlimit || dqblk->dqb_bsoftlimit;
if (dqblk->dqb_valid & QIF_BLIMITS && orig_set != now_set) {
- down(&dquot->dq_sem);
+ cfs_down(&dquot->dq_sem);
dquot->dq_dqb.dqb_curspace = 0;
- up(&dquot->dq_sem);
+ cfs_up(&dquot->dq_sem);
rc = mds_init_slave_blimits(obd, oqctl, orig_set);
if (rc) {
CERROR("init slave blimits failed! (rc:%d)\n", rc);
}
revoke_out:
- down(&mds->mds_qonoff_sem);
- down(&dquot->dq_sem);
+ cfs_down(&mds->mds_qonoff_sem);
+ cfs_down(&dquot->dq_sem);
if (rc) {
/* cancel previous setting */
dquot->dq_dqb.dqb_ihardlimit = ihardlimit;
dquot->dq_dqb.dqb_itime = itime;
}
rc2 = fsfilt_dquot(obd, dquot, QFILE_WR_DQUOT);
- up(&dquot->dq_sem);
+ cfs_up(&dquot->dq_sem);
out:
- down(&dquot->dq_sem);
+ cfs_down(&dquot->dq_sem);
dquot->dq_status &= ~DQ_STATUS_SET;
- up(&dquot->dq_sem);
+ cfs_up(&dquot->dq_sem);
lustre_dqput(dquot);
EXIT;
out_sem:
- up(&mds->mds_qonoff_sem);
+ cfs_up(&mds->mds_qonoff_sem);
if (oqaq)
OBD_FREE_PTR(oqaq);
oqctl->qc_type != GRPQUOTA)
RETURN(-EINVAL);
- down(&mds->mds_qonoff_sem);
+ cfs_down(&mds->mds_qonoff_sem);
dqblk->dqb_valid = 0;
if (qinfo->qi_files[oqctl->qc_type] == NULL) {
CWARN("quota[%u] is off\n", oqctl->qc_type);
if (IS_ERR(dquot))
GOTO(out, rc = PTR_ERR(dquot));
- down(&dquot->dq_sem);
+ cfs_down(&dquot->dq_sem);
dqblk->dqb_ihardlimit = dquot->dq_dqb.dqb_ihardlimit;
dqblk->dqb_isoftlimit = dquot->dq_dqb.dqb_isoftlimit;
dqblk->dqb_bhardlimit = dquot->dq_dqb.dqb_bhardlimit;
dqblk->dqb_btime = dquot->dq_dqb.dqb_btime;
dqblk->dqb_itime = dquot->dq_dqb.dqb_itime;
dqblk->dqb_valid |= QIF_LIMITS | QIF_TIMES;
- up(&dquot->dq_sem);
+ cfs_up(&dquot->dq_sem);
lustre_dqput(dquot);
- up(&mds->mds_qonoff_sem);
+ cfs_up(&mds->mds_qonoff_sem);
/* the usages in admin quota file is inaccurate */
dqblk->dqb_curinodes = 0;
EXIT;
return rc;
out:
- up(&mds->mds_qonoff_sem);
+ cfs_up(&mds->mds_qonoff_sem);
return rc;
}
RETURN(PTR_ERR(dquot));
}
- down(&dquot->dq_sem);
+ cfs_down(&dquot->dq_sem);
/* don't recovery the dquot without limits or under setting */
if (!(dquot->dq_dqb.dqb_bhardlimit || dquot->dq_dqb.dqb_bsoftlimit) ||
GOTO(skip, rc = 0);
dquot->dq_status |= DQ_STATUS_RECOVERY;
- up(&dquot->dq_sem);
+ cfs_up(&dquot->dq_sem);
/* get real bhardlimit from all slaves. */
qctl->qc_cmd = Q_GETOQUOTA;
total_limits += qctl->qc_dqblk.dqb_bhardlimit;
/* amend the usage of the administrative quotafile */
- down(&mds->mds_qonoff_sem);
- down(&dquot->dq_sem);
+ cfs_down(&mds->mds_qonoff_sem);
+ cfs_down(&dquot->dq_sem);
dquot->dq_dqb.dqb_curspace = total_limits << QUOTABLOCK_BITS;
if (rc)
CERROR("write dquot failed! (rc:%d)\n", rc);
- up(&dquot->dq_sem);
- up(&mds->mds_qonoff_sem);
+ cfs_up(&dquot->dq_sem);
+ cfs_up(&mds->mds_qonoff_sem);
EXIT;
out:
- down(&dquot->dq_sem);
+ cfs_down(&dquot->dq_sem);
dquot->dq_status &= ~DQ_STATUS_RECOVERY;
skip:
- up(&dquot->dq_sem);
+ cfs_up(&dquot->dq_sem);
lustre_dqput(dquot);
OBD_FREE_PTR(qctl);
struct qmaster_recov_thread_data {
struct obd_device *obd;
- struct completion comp;
+ cfs_completion_t comp;
};
static int qmaster_recovery_main(void *arg)
/* for lov */
class_incref(mds->mds_osc_obd, "qmaster_recovd_lov", mds->mds_osc_obd);
- complete(&data->comp);
+ cfs_complete(&data->comp);
for (type = USRQUOTA; type < MAXQUOTAS; type++) {
- struct list_head id_list;
+ cfs_list_t id_list;
struct dquot_id *dqid, *tmp;
- down(&mds->mds_qonoff_sem);
+ cfs_down(&mds->mds_qonoff_sem);
if (qinfo->qi_files[type] == NULL) {
- up(&mds->mds_qonoff_sem);
+ cfs_up(&mds->mds_qonoff_sem);
continue;
}
CFS_INIT_LIST_HEAD(&id_list);
rc = fsfilt_qids(obd, qinfo->qi_files[type], NULL, type,
&id_list);
- up(&mds->mds_qonoff_sem);
+ cfs_up(&mds->mds_qonoff_sem);
if (rc)
CERROR("error get ids from admin quotafile.(%d)\n", rc);
- list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
- list_del_init(&dqid->di_link);
+ cfs_list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
+ cfs_list_del_init(&dqid->di_link);
if (rc)
goto free;
if (unlikely(!mds->mds_quota || obd->obd_stopping))
RETURN(rc);
- mutex_down(&obd->obd_dev_sem);
+ cfs_mutex_down(&obd->obd_dev_sem);
if (mds->mds_lov_desc.ld_active_tgt_count != mds->mds_lov_objid_count) {
CWARN("Only %u/%u OSTs are active, abort quota recovery\n",
mds->mds_lov_desc.ld_active_tgt_count,
mds->mds_lov_objid_count);
- mutex_up(&obd->obd_dev_sem);
+ cfs_mutex_up(&obd->obd_dev_sem);
RETURN(rc);
}
- mutex_up(&obd->obd_dev_sem);
+ cfs_mutex_up(&obd->obd_dev_sem);
data.obd = obd;
- init_completion(&data.comp);
+ cfs_init_completion(&data.comp);
- rc = kernel_thread(qmaster_recovery_main, &data, CLONE_VM|CLONE_FILES);
+ rc = cfs_kernel_thread(qmaster_recovery_main, &data,
+ CLONE_VM|CLONE_FILES);
if (rc < 0)
CERROR("%s: cannot start quota recovery thread: rc %d\n",
obd->obd_name, rc);
- wait_for_completion(&data.comp);
+ cfs_wait_for_completion(&data.comp);
RETURN(rc);
}
static struct it_node {
struct interval_node node;
- struct list_head list;
+ cfs_list_t list;
int hit, valid;
} *it_array;
static int it_count;
/* list */
contended_count = 0;
gettimeofday(&start, NULL);
- list_for_each_entry(n, &header, list) {
+ cfs_list_for_each_entry(n, &header, list) {
if (extent_overlapped(&ext, &n->node.in_extent)) {
count = LOOP_COUNT;
while (count--);
__F(&n->node.in_extent));
interval_erase(&n->node, &root);
n->valid = 0;
- list_del_init(&n->list);
+ cfs_list_del_init(&n->list);
} else {
__u64 low, high;
low = (random() % max_count) & ALIGN_MASK;
dprintf("Adding a node "__S"\n",
__F(&n->node.in_extent));
n->valid = 1;
- list_add(&n->list, &header);
+ cfs_list_add(&n->list, &header);
}
}
n->hit = 0;
n->valid = 1;
if (i == 0)
- list_add_tail(&n->list, &header);
+ cfs_list_add_tail(&n->list, &header);
else
- list_add_tail(&n->list, &it_array[rand()%i].list);
+ cfs_list_add_tail(&n->list, &it_array[rand()%i].list);
}
return root;
memset(buf, 0, sizeof(rawbuf));
data->ioc_version = OBD_IOCTL_VERSION;
- data->ioc_inllen1 = sizeof(rawbuf) - size_round(sizeof(*data));
- data->ioc_inlbuf1 = buf + size_round(sizeof(*data));
+ data->ioc_inllen1 = sizeof(rawbuf) - cfs_size_round(sizeof(*data));
+ data->ioc_inlbuf1 = buf + cfs_size_round(sizeof(*data));
data->ioc_len = obd_ioctl_packlen(data);
rc = l2_ioctl(OBD_DEV_ID, OBD_GET_VERSION, buf);
for (index = 0;; index++) {
memset(buf, 0, sizeof(rawbuf));
data->ioc_version = OBD_IOCTL_VERSION;
- data->ioc_inllen1 = sizeof(rawbuf) - size_round(sizeof(*data));
- data->ioc_inlbuf1 = buf + size_round(sizeof(*data));
+ data->ioc_inllen1 =
+ sizeof(rawbuf) - cfs_size_round(sizeof(*data));
+ data->ioc_inlbuf1 = buf + cfs_size_round(sizeof(*data));
data->ioc_len = obd_ioctl_packlen(data);
data->ioc_count = index;
l_cond_broadcast(&shared_data->cond);
else
l_cond_wait(&shared_data->cond,
- &shared_data->mutex);
+ &shared_data->mutex);
shmem_unlock ();
}
memset(&data, 0x00, sizeof(data));
data.ioc_dev = cur_device;
- data.ioc_inllen1 = sizeof(rawbuf) - size_round(sizeof(data));
+ data.ioc_inllen1 = sizeof(rawbuf) - cfs_size_round(sizeof(data));
memset(buf, 0, sizeof(rawbuf));
rc = obd_ioctl_pack(&data, &buf, sizeof(rawbuf));
if (rc) {
data.ioc_dev = cur_device;
data.ioc_inllen1 = strlen(argv[1]) + 1;
data.ioc_inlbuf1 = argv[1];
- data.ioc_inllen2 = sizeof(rawbuf) - size_round(sizeof(data)) -
- size_round(data.ioc_inllen1);
+ data.ioc_inllen2 = sizeof(rawbuf) - cfs_size_round(sizeof(data)) -
+ cfs_size_round(data.ioc_inllen1);
memset(buf, 0, sizeof(rawbuf));
rc = obd_ioctl_pack(&data, &buf, sizeof(rawbuf));
if (rc) {
data.ioc_inllen3 = strlen(to) + 1;
data.ioc_inlbuf3 = to;
}
- data.ioc_inllen4 = sizeof(rawbuf) - size_round(sizeof(data)) -
- size_round(data.ioc_inllen1) -
- size_round(data.ioc_inllen2) -
- size_round(data.ioc_inllen3);
+ data.ioc_inllen4 = sizeof(rawbuf) - cfs_size_round(sizeof(data)) -
+ cfs_size_round(data.ioc_inllen1) -
+ cfs_size_round(data.ioc_inllen2) -
+ cfs_size_round(data.ioc_inllen3);
memset(buf, 0, sizeof(rawbuf));
rc = obd_ioctl_pack(&data, &buf, sizeof(rawbuf));
if (rc) {
data.ioc_inllen3 = strlen(to) + 1;
data.ioc_inlbuf3 = to;
}
- data.ioc_inllen4 = sizeof(rawbuf) - size_round(sizeof(data)) -
- size_round(data.ioc_inllen1) -
- size_round(data.ioc_inllen2) -
- size_round(data.ioc_inllen3);
+ data.ioc_inllen4 = sizeof(rawbuf) - cfs_size_round(sizeof(data)) -
+ cfs_size_round(data.ioc_inllen1) -
+ cfs_size_round(data.ioc_inllen2) -
+ cfs_size_round(data.ioc_inllen3);
memset(buf, 0, sizeof(rawbuf));
rc = obd_ioctl_pack(&data, &buf, sizeof(rawbuf));
if (rc) {