* Thread schedule APIs.
*/
#define MAX_SCHEDULE_TIMEOUT ((long)(~0UL>>12))
-extern void thread_set_timer_deadline(uint64_t deadline);
+extern void thread_set_timer_deadline(__u64 deadline);
extern void thread_cancel_timer(void);
static inline int cfs_schedule_timeout(int state, int64_t timeout)
result = assert_wait((event_t)current_uthread(), state);
#endif
if (timeout > 0) {
- uint64_t expire;
+ __u64 expire;
nanoseconds_to_absolutetime(timeout, &expire);
clock_absolutetime_interval_to_deadline(expire, &expire);
thread_set_timer_deadline(expire);
void ksleep_del(struct ksleep_chan *chan, struct ksleep_link *link);
void ksleep_wait(struct ksleep_chan *chan, int state);
-int64_t ksleep_timedwait(struct ksleep_chan *chan, int state, uint64_t timeout);
+int64_t ksleep_timedwait(struct ksleep_chan *chan, int state, __u64 timeout);
void ksleep_wake(struct ksleep_chan *chan);
void ksleep_wake_all(struct ksleep_chan *chan);
#define atomic_inc(a) OSIncrementAtomic((SInt32 *)&((a)->counter))
#define atomic_dec(a) OSDecrementAtomic((SInt32 *)&((a)->counter))
#else /* !__DARWIN8__ */
-#define atomic_add(v, a) hw_atomic_add((uint32_t *)&((a)->counter), v)
-#define atomic_sub(v, a) hw_atomic_sub((uint32_t *)&((a)->counter), v)
+#define atomic_add(v, a) hw_atomic_add((__u32 *)&((a)->counter), v)
+#define atomic_sub(v, a) hw_atomic_sub((__u32 *)&((a)->counter), v)
#define atomic_inc(a) atomic_add(1, a)
#define atomic_dec(a) atomic_sub(1, a)
#endif /* !__DARWIN8__ */
#define sigmask(sig) ((__u32)1 << ((sig) - 1))
-#endif // LIBCFS_SIGNAL_H
\ No newline at end of file
+#endif // LIBCFS_SIGNAL_H
extern u64 mxlnd_nid2nic_id(lnet_nid_t nid);
/* in mxlnd_cb.c */
-void mxlnd_eager_recv(void *context, uint64_t match_value, uint32_t length);
+void mxlnd_eager_recv(void *context, __u64 match_value, __u32 length);
extern mx_unexp_handler_action_t mxlnd_unexpected_recv(void *context,
- mx_endpoint_addr_t source, uint64_t match_value, uint32_t length,
+ mx_endpoint_addr_t source, __u64 match_value, __u64 length,
void *data_if_available);
extern void mxlnd_peer_free(struct kmx_peer *peer);
extern void mxlnd_conn_free(struct kmx_conn *conn);
{
int ret = 0;
mx_return_t mxret = MX_SUCCESS;
- uint64_t mask = 0xF00FFFFFFFFFFFFFLL;
+ __u64 mask = 0xF00FFFFFFFFFFFFFLL;
rx->mxc_msg_type = msg_type;
rx->mxc_lntmsg[0] = lntmsg; /* may be NULL if EAGER */
*/
mx_unexp_handler_action_t
mxlnd_unexpected_recv(void *context, mx_endpoint_addr_t source,
- uint64_t match_value, uint32_t length, void *data_if_available)
+ __u64 match_value, __u32 length, void *data_if_available)
{
int ret = 0;
struct kmx_ctx *rx = NULL;
struct kmx_ctx *rx_data = NULL;
struct kmx_conn *conn = NULL;
int nob = 0;
- uint32_t length = 0;
+ __u32 length = 0;
struct kmx_peer *peer = NULL;
CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
struct kqswnal_tx *ktx_alloclist; /* stack in kqn_txds */
unsigned int ktx_state:7; /* What I'm doing */
unsigned int ktx_firsttmpfrag:1; /* ktx_frags[0] is in my ebuffer ? 0 : 1 */
- uint32_t ktx_basepage; /* page offset in reserved elan tx vaddrs for mapping pages */
+ __u32 ktx_basepage; /* page offset in reserved elan tx vaddrs for mapping pages */
int ktx_npages; /* pages reserved for mapping messages */
int ktx_nmappedpages; /* # pages mapped for current message */
int ktx_port; /* destination ep port */
int nfrags = ktx->ktx_nfrag;
int nmapped = ktx->ktx_nmappedpages;
int maxmapped = ktx->ktx_npages;
- uint32_t basepage = ktx->ktx_basepage + nmapped;
+ __u32 basepage = ktx->ktx_basepage + nmapped;
char *ptr;
EP_RAILMASK railmask;
int nfrags = ktx->ktx_nfrag;
int nmapped = ktx->ktx_nmappedpages;
int maxmapped = ktx->ktx_npages;
- uint32_t basepage = ktx->ktx_basepage + nmapped;
+ __u32 basepage = ktx->ktx_basepage + nmapped;
EP_RAILMASK railmask;
int rail;
/*****************************************************/
{
- uint32_t nentries;
+ __u32 nentries;
vvrc = vv_cq_create(kibnal_data.kib_hca, IBNAL_CQ_ENTRIES(),
kibnal_cq_callback,
*/
int64_t ksleep_timedwait(struct ksleep_chan *chan,
cfs_task_state_t state,
- uint64_t timeout)
+ __u64 timeout)
{
event_t event;
kspin_lock(&chan->guard);
if (!has_hits(chan, event)) {
int result;
- uint64_t expire;
+ __u64 expire;
result = assert_wait(event, state);
if (timeout > 0) {
/*
if (result == THREAD_TIMED_OUT)
timeout = 0;
else {
- uint64_t now;
+ __u64 now;
clock_get_uptime(&now);
if (expire > now)
absolutetime_to_nanoseconds(expire - now, &timeout);
}
extern boolean_t thread_call_func_cancel(thread_call_func_t, thread_call_param_t, boolean_t);
-extern void thread_call_func_delayed(thread_call_func_t, thread_call_param_t, uint64_t);
+extern void thread_call_func_delayed(thread_call_func_t, thread_call_param_t, __u64);
static void ktimer_disarm_locked(struct ktimer *t)
{
*
*********************************************************************/
-char *get_file_list(const char *dirname, int file_type, uint32_t *count)
+char *get_file_list(const char *dirname, int file_type, __u32 *count)
{
DIR *pdir = NULL;
struct oid_table *ptable)
{
char *dir_list;
- uint32_t num;
+ __u32 num;
int deviceindex;
unsigned char *ret_val = NULL;
int i=0;
} lustre_sysstatus;
/* File operation related functions */
-char *get_file_list(const char *dirname, int file_type, uint32_t *count);
+char *get_file_list(const char *dirname, int file_type, __u32 *count);
extern int is_directory(const char *filename);
extern int read_string(const char *filepath, char *lustre_var,size_t var_size);
int read_counter64(const char *file_path, counter64 *c64,int factor);
static u_long ulong_ret;
static unsigned char string[SPRINT_MAX_LEN];
char file_path[MAX_PATH_SIZE];
- uint32_t num;
+ __u32 num;
char *dir_list;
if (header_generic(vp,name,length,exact,var_len,write_method)