/#[ \t]*define[ \t]*\bTHREAD_SIZE\b[ \t]*\bTHREAD_SIZE\b/d
s/\bcfs_kernel_cap_t\b/kernel_cap_t/g
/typedef[ \t]*\bkernel_cap_t\b[ \t]*\bkernel_cap_t\b/d
+
+################################################################################
+# macros in libcfs/include/libcfs/linux/portals_compat25.h
+s/\bRECALC_SIGPENDING\b/recalc_sigpending()/g
+/#[ \t]*define[ \t]*\brecalc_sigpending\b *( *)[ \t]*\brecalc_sigpending\b *( *)/d
+s/\bCLEAR_SIGPENDING\b/clear_tsk_thread_flag(current, TIF_SIGPENDING)/g
+/#[ \t]*define[ \t]*\bclear_tsk_thread_flag\b *( *\w* *, *\w* *)[ \t]*\bclear_tsk_thread_flag\b *( *\w* *, *\w* *)/d
+s/\bCURRENT_SECONDS\b/get_seconds()/g
+/#[ \t]*define[ \t]*\bget_seconds\b *( *)[ \t]*\bget_seconds\b *( *)/d
+s/\bCFS_NR_CPUS\b/NR_CPUS/g
+/#[ \t]*define[ \t]*\bNR_CPUS\b[ \t]*\bNR_CPUS\b/d
#define CFS_PUSH_JOURNAL do {;} while(0)
#define CFS_POP_JOURNAL do {;} while(0)
-#define THREAD_NAME(comm, fmt, a...)
/*
* Kernel thread:
*
*/
typedef sigset_t cfs_sigset_t;
-#define SIGNAL_MASK_ASSERT()
/*
* Timer
*/
/* XXX smp_call_function is not supported in xnu */
#define smp_call_function(f, a, n, w) do {} while(0)
int cfs_online_cpus(void);
-#define smp_num_cpus cfs_online_cpus()
/*
* Misc
#define unlikely(exp) (exp)
#endif
-#define lock_kernel() do {} while(0)
-#define unlock_kernel() do {} while(0)
+#define lock_kernel() do {} while(0)
+#define unlock_kernel() do {} while(0)
-#define USERMODEHELPER(path, argv, envp) (0)
+#define call_usermodehelper(path, argv, envp, 1) (0)
#define cfs_module(name, version, init, fini) \
extern kern_return_t _start(kmod_info_t *ki, void *data); \
int rc;
cfs_race_state = 0;
CERROR("cfs_race id %x sleeping\n", id);
- cfs_wait_event_interruptible(cfs_race_waitq,
- cfs_race_state != 0, rc);
+ rc = wait_event_interruptible(cfs_race_waitq,
+ cfs_race_state != 0);
CERROR("cfs_fail_race id %x awake, rc=%d\n", id, rc);
} else {
CERROR("cfs_fail_race id %x waking\n", id);
#define cfs_for_each_possible_cpu(cpu) for_each_cpu(cpu)
#endif
-#ifdef NR_CPUS
-#define CFS_NR_CPUS NR_CPUS
-#else
-#define CFS_NR_CPUS 1
+#ifndef NR_CPUS
+#define NR_CPUS 1
#endif
/*
static inline time_t cfs_time_current_sec(void)
{
- return CURRENT_SECONDS;
+ return get_seconds();
}
static inline void cfs_fs_time_current(cfs_fs_time_t *t)
#ifndef __LIBCFS_LINUX_PORTALS_COMPAT_H__
#define __LIBCFS_LINUX_PORTALS_COMPAT_H__
-#include <net/sock.h>
-
-// XXX BUG 1511 -- remove this stanza and all callers when bug 1511 is resolved
-#if defined(SPINLOCK_DEBUG) && SPINLOCK_DEBUG
-# define SIGNAL_MASK_ASSERT() \
- LASSERT(current->sighand->siglock.magic == SPINLOCK_MAGIC)
-#else
-# define SIGNAL_MASK_ASSERT()
-#endif
-// XXX BUG 1511 -- remove this stanza and all callers when bug 1511 is resolved
-
-#define SIGNAL_MASK_LOCK(task, flags) \
- spin_lock_irqsave(&task->sighand->siglock, flags)
-#define SIGNAL_MASK_UNLOCK(task, flags) \
- spin_unlock_irqrestore(&task->sighand->siglock, flags)
-#define USERMODEHELPER(path, argv, envp) \
- call_usermodehelper(path, argv, envp, 1)
-#define RECALC_SIGPENDING recalc_sigpending()
-#define CLEAR_SIGPENDING clear_tsk_thread_flag(current, \
- TIF_SIGPENDING)
-# define CURRENT_SECONDS get_seconds()
-# define smp_num_cpus num_online_cpus()
-
-#define cfs_wait_event_interruptible(wq, condition, ret) \
- ret = wait_event_interruptible(wq, condition)
-#define cfs_wait_event_interruptible_exclusive(wq, condition, ret) \
- ret = wait_event_interruptible_exclusive(wq, condition)
-
-#define THREAD_NAME(comm, len, fmt, a...) \
- snprintf(comm, len, fmt, ## a)
-
-/* 2.6 alloc_page users can use page->lru */
-#define PAGE_LIST_ENTRY lru
-#define PAGE_LIST(page) ((page)->lru)
-
#ifndef __user
#define __user
#endif
-#ifndef __fls
-#define __cfs_fls fls
-#else
-#define __cfs_fls __fls
-#endif
-
#ifdef HAVE_5ARGS_SYSCTL_PROC_HANDLER
#define ll_proc_dointvec(table, write, filp, buffer, lenp, ppos) \
proc_dointvec(table, write, buffer, lenp, ppos);
}
/* using binary seach */
-static __inline__ unsigned long __cfs_fls(long data)
+static __inline__ unsigned long fls(long data)
{
int pos = 32;
return pos;
}
-#define __cfs_ffz(x) __cfs_ffs(~(x))
-#define __cfs_flz(x) __cfs_fls(~(x))
+#define ffz(x) ffs(~(x))
+#define flz(x) fls(~(x))
unsigned long find_next_bit(unsigned long *addr,
unsigned long size, unsigned long offset);
* Signal
*/
-#define SIGNAL_MASK_ASSERT() do {} while(0)
#define SIGNAL_MASK_LOCK(task, flags) do {} while(0)
#define SIGNAL_MASK_UNLOCK(task, flags) do {} while(0)
-#define USERMODEHELPER(path, argv, envp) do {} while(0)
-#define RECALC_SIGPENDING do {} while(0)
-#define CLEAR_SIGPENDING do {} while(0)
-#define CURRENT_SECONDS get_seconds()
+#define call_usermodehelper(path, argv, envp, 1) do {} while(0)
+#define recalc_sigpending() do {} while(0)
+#define clear_tsk_thread_flag(current, TIF_SIGPENDING) do {} while(0)
#endif
#define ll_proc_dointvec(table, write, filp, buffer, lenp, ppos) \
cfs_waitq_del(&wq, &__wait); \
} while(0)
-#define cfs_wait_event_interruptible(wq, condition, __ret) \
-do { \
- cfs_waitlink_t __wait; \
- \
- __ret = 0; \
- cfs_waitlink_init(&__wait); \
- while (TRUE) { \
- cfs_waitq_add(&wq, &__wait); \
- if (condition) { \
- break; \
- } \
- cfs_waitq_wait(&__wait, CFS_TASK_INTERRUPTIBLE); \
- cfs_waitq_del(&wq, &__wait); \
- } \
- cfs_waitq_del(&wq, &__wait); \
-} while(0)
+#define wait_event_interruptible(wq, condition) \
+{ \
+ cfs_waitlink_t __wait; \
+ \
+ __ret = 0; \
+ cfs_waitlink_init(&__wait); \
+ while (TRUE) { \
+ cfs_waitq_add(&wq, &__wait); \
+ if (condition) { \
+ break; \
+ } \
+ cfs_waitq_wait(&__wait, CFS_TASK_INTERRUPTIBLE);\
+ cfs_waitq_del(&wq, &__wait); \
+ } \
+ cfs_waitq_del(&wq, &__wait); \
+ __ret; \
+}
-# define cfs_wait_event_interruptible_exclusive(wq, condition, rc) \
- cfs_wait_event_interruptible(wq, condition, rc)
+# define wait_event_interruptible_exclusive(wq, condition) \
+ wait_event_interruptible(wq, condition)
/*
retval == 0; condition met; we're good.
#define SMP_CACHE_BYTES 128
-#define CFS_NR_CPUS (32)
+#define NR_CPUS (32)
#define smp_num_cpus ((CCHAR)KeNumberProcessors)
#define num_possible_cpus() smp_num_cpus
#define num_online_cpus() smp_num_cpus
#define local_irq_save(x)
#define local_irq_restore(x)
-#define THREAD_NAME
-
#define va_copy(_d, _s) (_d = _s)
char *strnchr(const char *s, size_t count, int c);
#endif
}
total += bd.bd_bucket->hsb_count;
- dist[min(__cfs_fls(bd.bd_bucket->hsb_count/max(theta,1)),7)]++;
+ dist[min(fls(bd.bd_bucket->hsb_count/max(theta,1)),7)]++;
cfs_hash_bd_unlock(hs, &bd, 0);
}
argv[2] = NULL;
- rc = USERMODEHELPER(argv[0], argv, envp);
+ rc = call_usermodehelper(argv[0], argv, envp, 1);
if (rc < 0 && rc != -ENOENT) {
CERROR("Error %d invoking LNET debug log upcall %s %s; "
"check /proc/sys/lnet/debug_log_upcall\n",
LASSERT(argc >= 2);
- rc = USERMODEHELPER(argv[0], argv, envp);
+ rc = call_usermodehelper(argv[0], argv, envp, 1);
if (rc < 0 && rc != -ENOENT) {
CERROR("Error %d invoking LNET upcall %s %s%s%s%s%s%s%s%s; "
"check /proc/sys/lnet/upcall\n",
sigset_t
cfs_block_allsigs(void)
{
- unsigned long flags;
- sigset_t old;
-
- SIGNAL_MASK_LOCK(current, flags);
- old = current->blocked;
- sigfillset(¤t->blocked);
- RECALC_SIGPENDING;
- SIGNAL_MASK_UNLOCK(current, flags);
+ unsigned long flags;
+ sigset_t old;
- return old;
+ spin_lock_irqsave(¤t->sighand->siglock, flags);
+ old = current->blocked;
+ sigfillset(¤t->blocked);
+ recalc_sigpending();
+ spin_unlock_irqrestore(¤t->sighand->siglock, flags);
+ return old;
}
sigset_t cfs_block_sigs(unsigned long sigs)
unsigned long flags;
sigset_t old;
- SIGNAL_MASK_LOCK(current, flags);
+ spin_lock_irqsave(¤t->sighand->siglock, flags);
old = current->blocked;
sigaddsetmask(¤t->blocked, sigs);
- RECALC_SIGPENDING;
- SIGNAL_MASK_UNLOCK(current, flags);
+ recalc_sigpending();
+ spin_unlock_irqrestore(¤t->sighand->siglock, flags);
return old;
}
unsigned long flags;
sigset_t old;
- SIGNAL_MASK_LOCK(current, flags);
+ spin_lock_irqsave(¤t->sighand->siglock, flags);
old = current->blocked;
sigaddsetmask(¤t->blocked, ~sigs);
- RECALC_SIGPENDING;
- SIGNAL_MASK_UNLOCK(current, flags);
-
+ recalc_sigpending();
+ spin_unlock_irqrestore(¤t->sighand->siglock, flags);
return old;
}
void
cfs_restore_sigs (cfs_sigset_t old)
{
- unsigned long flags;
+ unsigned long flags;
- SIGNAL_MASK_LOCK(current, flags);
- current->blocked = old;
- RECALC_SIGPENDING;
- SIGNAL_MASK_UNLOCK(current, flags);
+ spin_lock_irqsave(¤t->sighand->siglock, flags);
+ current->blocked = old;
+ recalc_sigpending();
+ spin_unlock_irqrestore(¤t->sighand->siglock, flags);
}
int
cfs_signal_pending(void)
{
- return signal_pending(current);
+ return signal_pending(current);
}
void
cfs_clear_sigpending(void)
{
- unsigned long flags;
+ unsigned long flags;
- SIGNAL_MASK_LOCK(current, flags);
- CLEAR_SIGPENDING;
- SIGNAL_MASK_UNLOCK(current, flags);
+ spin_lock_irqsave(¤t->sighand->siglock, flags);
+ clear_tsk_thread_flag(current, TIF_SIGPENDING);
+ spin_unlock_irqrestore(¤t->sighand->siglock, flags);
}
int
#if !KLWT_SUPPORT
int lwt_enabled;
-lwt_cpu_t lwt_cpus[CFS_NR_CPUS];
+lwt_cpu_t lwt_cpus[NR_CPUS];
#endif
int lwt_pages_per_cpu;
#include <libcfs/libcfs.h>
/* XXX move things up to the top, comment */
-union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[CFS_NR_CPUS] __cacheline_aligned;
+union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned;
char cfs_tracefile[TRACEFILE_NAME_SIZE];
long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
};
#define TCD_MAX_TYPES 8
-extern union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[CFS_NR_CPUS];
+extern union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS];
#define cfs_tcd_for_each(tcd, i, j) \
for (i = 0; cfs_trace_data[i] != NULL; i++) \
* (see, for example, linux-tracefile.h).
*/
-extern char *cfs_trace_console_buffers[CFS_NR_CPUS][CFS_TCD_TYPE_MAX];
+extern char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
extern cfs_trace_buf_type_t cfs_trace_buf_idx_get(void);
static inline char *cfs_trace_get_console_buffer(void)
return size;
if (first_bit != 0) {
int tmp = (*word++) & (~0UL << first_bit);
- bit = __cfs_ffz(tmp);
+ bit = ffz(tmp);
if (bit < BITS_PER_LONG)
goto found;
word++;
}
while (word <= last) {
if (*word != ~0UL) {
- bit = __cfs_ffz(*word);
+ bit = ffz(*word);
goto found;
}
word++;
while (1) {
int dumplog = 1;
- cfs_wait_event_interruptible(lcw_event_waitq,
- is_watchdog_fired(), rc);
+ rc = wait_event_interruptible(lcw_event_waitq,
+ is_watchdog_fired());
CDEBUG(D_INFO, "Watchdog got woken up...\n");
if (test_bit(LCW_FLAG_STOP, &lcw_flags)) {
CDEBUG(D_INFO, "LCW_FLAG_STOP set, shutting down...\n");
10 /* 10% pages for CFS_TCD_TYPE_DISPATCH */
};
-char *cfs_trace_console_buffers[CFS_NR_CPUS][CFS_TCD_TYPE_MAX];
+char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
struct rw_semaphore cfs_tracefile_sem;
for (i = 0; i < CFS_TCD_TYPE_MAX; i++) {
cfs_trace_data[i] =
kmalloc(sizeof(union cfs_trace_data_union) * \
- CFS_NR_CPUS, GFP_KERNEL);
+ NR_CPUS, GFP_KERNEL);
if (cfs_trace_data[i] == NULL)
goto out;
}
continue;
}
- cfs_wi_sched_unlock(sched);
- cfs_wait_event_interruptible_exclusive(sched->ws_waitq,
- !cfs_wi_sched_cansleep(sched), rc);
- cfs_wi_sched_lock(sched);
+ cfs_wi_sched_unlock(sched);
+ rc = wait_event_interruptible_exclusive(sched->ws_waitq,
+ !cfs_wi_sched_cansleep(sched));
+ cfs_wi_sched_lock(sched);
}
cfs_wi_sched_unlock(sched);
* when there's nothing left to do */
break;
}
- cfs_wait_event_interruptible_exclusive (
- kqswnal_data.kqn_sched_waitq,
- kqswnal_data.kqn_shuttingdown == 2 ||
- !cfs_list_empty(&kqswnal_data. \
- kqn_readyrxds) ||
- !cfs_list_empty(&kqswnal_data. \
- kqn_donetxds) ||
- !cfs_list_empty(&kqswnal_data. \
- kqn_delayedtxds, rc));
+ rc = wait_event_interruptible_exclusive (
+ kqswnal_data.kqn_sched_waitq,
+ kqswnal_data.kqn_shuttingdown == 2 ||
+ !cfs_list_empty(&kqswnal_data. \
+ kqn_readyrxds) ||
+ !cfs_list_empty(&kqswnal_data. \
+ kqn_donetxds) ||
+ !cfs_list_empty(&kqswnal_data. \
+ kqn_delayedtxds));
LASSERT (rc == 0);
} else if (need_resched())
cfs_schedule ();
#define RANAL_CONN_CLOSING 1
#define RANAL_CONN_CLOSED 2
-typedef struct kra_peer
-{
- cfs_list_t rap_list; /* stash on global peer list */
- cfs_list_t rap_connd_list; /* schedule on kra_connd_peers */
- cfs_list_t rap_conns; /* all active connections */
- cfs_list_t rap_tx_queue; /* msgs waiting for a conn */
- lnet_nid_t rap_nid; /* who's on the other end(s) */
- __u32 rap_ip; /* IP address of peer */
- int rap_port; /* port on which peer listens */
- cfs_atomic_t rap_refcount; /* # users */
- int rap_persistence; /* "known" peer refs */
- int rap_connecting; /* connection forming */
- unsigned long rap_reconnect_time; /* CURRENT_SECONDS when reconnect OK */
- unsigned long rap_reconnect_interval; /* exponential backoff */
+typedef struct kra_peer {
+ cfs_list_t rap_list; /* stash on global peer list */
+ cfs_list_t rap_connd_list; /* schedule on kra_connd_peers */
+ cfs_list_t rap_conns; /* all active connections */
+ cfs_list_t rap_tx_queue; /* msgs waiting for a conn */
+ lnet_nid_t rap_nid; /* who's on the other end(s) */
+ __u32 rap_ip; /* IP address of peer */
+ int rap_port; /* port on which peer listens */
+ cfs_atomic_t rap_refcount; /* # users */
+ int rap_persistence; /* "known" peer refs */
+ int rap_connecting; /* connection forming */
+ unsigned long rap_reconnect_time; /* get_seconds() when reconnect OK */
+ unsigned long rap_reconnect_interval; /* exponential backoff */
} kra_peer_t;
extern kra_data_t kranal_data;
nloops = 0;
if (!did_something) { /* wait for something to do */
- cfs_wait_event_interruptible_exclusive(
- sched->kss_waitq,
- !ksocknal_sched_cansleep(sched), rc);
- LASSERT (rc == 0);
+ rc = wait_event_interruptible_exclusive(
+ sched->kss_waitq,
+ !ksocknal_sched_cansleep(sched));
+ LASSERT (rc == 0);
} else {
cfs_cond_resched();
}
#define cfs_set_current_state(foo) do { current->state = foo; } while (0)
-#define cfs_wait_event_interruptible(wq, condition, ret) \
+#define wait_event_interruptible(wq, condition) \
{ \
- struct l_wait_info lwi; \
- int timeout = 100000000;/* for ever */ \
- int ret; \
- \
- lwi = LWI_TIMEOUT(timeout, NULL, NULL); \
- ret = l_wait_event(NULL, condition, &lwi); \
- \
- ret; \
+ struct l_wait_info lwi; \
+ int timeout = 100000000;/* forever */ \
+ int ret; \
+ \
+ lwi = LWI_TIMEOUT(timeout, NULL, NULL); \
+ ret = l_wait_event(NULL, condition, &lwi); \
+ \
+ ret; \
}
#define daemonize(l) do {} while (0)
#define sigfillset(l) do {} while (0)
#define recalc_sigpending(l) do {} while (0)
-#define USERMODEHELPER(path, argv, envp) (0)
-#define SIGNAL_MASK_ASSERT()
+#define call_usermodehelper(path, argv, envp, wait) (0)
#define CFS_KERN_INFO
#if HZ != 1
ret = -EINTR; \
break; \
} \
- /* We have to do this here because some signals */ \
- /* are not blockable - ie from strace(1). */ \
- /* In these cases we want to schedule_timeout() */ \
- /* again, because we don't want that to return */ \
- /* -EINTR when the RPC actually succeeded. */ \
- /* the RECALC_SIGPENDING below will deliver the */ \
- /* signal properly. */ \
- cfs_clear_sigpending(); \
+ /* We have to do this here because some signals */ \
+ /* are not blockable - ie from strace(1). */ \
+ /* In these cases we want to schedule_timeout() */ \
+ /* again, because we don't want that to return */ \
+ /* -EINTR when the RPC actually succeeded. */ \
+ /* the recalc_sigpending() below will deliver the */ \
+ /* signal properly. */ \
+ cfs_clear_sigpending(); \
} \
} \
\
snprintf(keystr, sizeof(keystr), LPU64, entry->ue_key);
do_gettimeofday(&start);
- rc = USERMODEHELPER(argv[0], argv, envp);
+ rc = call_usermodehelper(argv[0], argv, envp, 1);
do_gettimeofday(&end);
if (rc < 0) {
CERROR("%s: error invoking upcall %s %s %s: rc %d; "
}
do_gettimeofday(&start);
- rc = USERMODEHELPER(argv[0], argv, NULL);
+ rc = call_usermodehelper(argv[0], argv, NULL, 0);
do_gettimeofday(&end);
if (rc < 0) {
int deadline;
ENTRY;
- SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
-
cfs_list_for_each(tmp, &set->set_requests) {
req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain);