Cleanup some macros in linux-tcpip.h, linux-time.h, linux-type.h.
Move some different kernel compatible macros to portals_compat25.h.
Signed-off-by: Liu Xuezhao <xuezhao.liu@emc.com>
Signed-off-by: Peng Tao <tao.peng@emc.com>
Signed-off-by: James Simmons <uja.ornl@gmail.com>
Change-Id: Ia03046ff53a5f51e67d2fe7ccee7a6441ebe1dc7
Reviewed-on: http://review.whamcloud.com/4777
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Keith Mannthey <keith.mannthey@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
/#[ \t]*define[ \t]*\bget_cpu\b *( *)[ \t]*\bget_cpu\b *( *)/d
s/\bcfs_put_cpu\b/put_cpu/g
/#[ \t]*define[ \t]*\bput_cpu\b *( *)[ \t]*\bput_cpu\b *( *)/d
+
+################################################################################
+# macros in linux-time.h
+s/\bCFS_HZ\b/HZ/g
+/#[ \t]*define[ \t]*\bHZ\b[ \t]*\bHZ\b/d
+s/\bCURRENT_KERN_TIME\b/CURRENT_TIME/g
+/#[ \t]*define[ \t]*\bCURRENT_TIME\b[ \t]*\bCURRENT_TIME\b/d
+s/\bcfs_gettimeofday\b/do_gettimeofday/g
+/#[ \t]*define[ \t]*\bdo_gettimeofday\b *( *\w* *)[ \t]*\bdo_gettimeofday\b *( *\w* *)/d
+
+################################################################################
+# macros in linux-type.h
+s/\bcfs_umode_t\b/umode_t/g
+/typedef[ \t]*\bumode_t\b[ \t]*\bumode_t\b/d
+
+################################################################################
+# macros in libcfs/include/libcfs/linux/libcfs.h
+s/\bCFS_THREAD_SIZE\b/THREAD_SIZE/g
+/#[ \t]*define[ \t]*\bTHREAD_SIZE\b[ \t]*\bTHREAD_SIZE\b/d
+s/\bcfs_kernel_cap_t\b/kernel_cap_t/g
+/typedef[ \t]*\bkernel_cap_t\b[ \t]*\bkernel_cap_t\b/d
*
* and opaque scalar type
*
- * cfs_kernel_cap_t
+ * kernel_cap_t
*/
#endif
uid_t cfs_curproc_uid(void);
#else
# define THREAD_SIZE 8192
#endif
-#define LUSTRE_TRACE_SIZE (THREAD_SIZE >> 5)
#define CHECK_STACK(msgdata, mask, cdls) do {} while(0)
#define CDEBUG_STACK() (0L)
/*
* XNU has no capabilities
*/
-typedef __u32 cfs_kernel_cap_t;
+typedef __u32 kernel_cap_t;
#ifdef __KERNEL__
enum {
EXTRA_DIST = kp30.h libcfs.h linux-fs.h linux-lock.h linux-mem.h \
linux-prim.h linux-time.h linux-tcpip.h linux-cpu.h \
- portals_compat25.h linux-bitops.h linux-types.h linux-crypto.h
+ portals_compat25.h linux-bitops.h linux-crypto.h
#include <libcfs/linux/linux-fs.h>
#include <libcfs/linux/linux-tcpip.h>
#include <libcfs/linux/linux-bitops.h>
-#include <libcfs/linux/linux-types.h>
#include <libcfs/linux/kp30.h>
#ifdef HAVE_ASM_TYPES_H
#include <linux/sched.h> /* THREAD_SIZE */
#include <linux/rbtree.h>
-#define CFS_THREAD_SIZE THREAD_SIZE
-#define LUSTRE_TRACE_SIZE (THREAD_SIZE >> 5)
#if !defined(__x86_64__)
# ifdef __ia64__
#define CFS_CURPROC_COMM_MAX (sizeof ((struct task_struct *)0)->comm)
#include <linux/capability.h>
-typedef kernel_cap_t cfs_kernel_cap_t;
/*
* No stack-back-tracing in Linux for now.
#endif
#include <net/sock.h>
+#include <libcfs/linux/portals_compat25.h>
#ifndef HIPQUAD
// XXX Should just kill all users
typedef struct socket cfs_socket_t;
-#define SOCK_SNDBUF(so) ((so)->sk->sk_sndbuf)
#define SOCK_TEST_NOSPACE(so) test_bit(SOCK_NOSPACE, &(so)->flags)
static inline int
{
return sock->sk->sk_wmem_queued;
}
-
-#ifndef HAVE_SK_SLEEP
-static inline wait_queue_head_t *cfs_sk_sleep(struct sock *sk)
-{
- return sk->sk_sleep;
-}
-#else
-#define cfs_sk_sleep(sk) sk_sleep(sk)
-#endif
#endif
#define ONE_BILLION ((u_int64_t)1000000000)
#define ONE_MILLION 1000000
-#define CFS_HZ HZ
#ifndef __KERNEL__
#error This include is only for kernel use.
return (unsigned long long)t->tv_sec * ONE_BILLION + t->tv_nsec;
}
-#define CURRENT_KERN_TIME CURRENT_TIME
/*
* Generic kernel stuff
static inline void cfs_fs_time_current(cfs_fs_time_t *t)
{
- *t = CURRENT_KERN_TIME;
+ *t = CURRENT_TIME;
}
static inline time_t cfs_fs_time_sec(cfs_fs_time_t *t)
#define CFS_TIME_T "%lu"
#define CFS_DURATION_T "%ld"
-#define cfs_gettimeofday(tv) do_gettimeofday(tv)
#endif /* __LIBCFS_LINUX_LINUX_TIME_H__ */
/*
+++ /dev/null
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/user-bitops.h
- */
-#include <linux/types.h>
-
-typedef umode_t cfs_umode_t;
#ifndef __LIBCFS_LINUX_PORTALS_COMPAT_H__
#define __LIBCFS_LINUX_PORTALS_COMPAT_H__
+#include <net/sock.h>
+
// XXX BUG 1511 -- remove this stanza and all callers when bug 1511 is resolved
#if defined(SPINLOCK_DEBUG) && SPINLOCK_DEBUG
# define SIGNAL_MASK_ASSERT() \
int (*handler)(void *data, int write,
loff_t pos, void *buffer, int len));
+#ifdef HAVE_INIT_NET
+# define DEFAULT_NET (&init_net)
+#else
+/* some broken backports */
+# define DEFAULT_NET (NULL)
+#endif
+
#endif /* _PORTALS_COMPAT_H */
#include <libcfs/posix/posix-wordsize.h>
#include <libcfs/user-bitops.h>
-# define cfs_gettimeofday(tv) gettimeofday(tv, NULL);
+# define do_gettimeofday(tv) gettimeofday(tv, NULL);
typedef unsigned long long cfs_cycles_t;
#define IS_ERR(a) ((unsigned long)(a) > (unsigned long)-1000L)
#endif
# ifndef THREAD_SIZE /* x86_64 linux has THREAD_SIZE in userspace */
-# define CFS_THREAD_SIZE 8192
+# define THREAD_SIZE 8192
# else
-# define CFS_THREAD_SIZE THREAD_SIZE
# endif
-#define LUSTRE_TRACE_SIZE (CFS_THREAD_SIZE >> 5)
-
#define CFS_CHECK_STACK(msgdata, mask, cdls) do {} while(0)
#define CDEBUG_STACK() (0L)
*/
#define CFS_CURPROC_COMM_MAX (sizeof ((struct task_struct *)0)->comm)
-typedef __u32 cfs_kernel_cap_t;
+typedef __u32 kernel_cap_t;
/**
* Module support (probably shouldn't be used in generic code?)
#include <asm/types.h>
#include <stdbool.h> /* for bool */
#ifndef HAVE_UMODE_T
-typedef unsigned short cfs_umode_t;
+typedef unsigned short umode_t;
#else
-typedef umode_t cfs_umode_t;
#endif
/*
#endif /* __KERNEL__*/
-#ifndef CFS_THREAD_SIZE
-# define CFS_THREAD_SIZE query_stack_size()
+#ifndef THREAD_SIZE
+# define THREAD_SIZE query_stack_size()
#endif
-#define LUSTRE_TRACE_SIZE (CFS_THREAD_SIZE >> 5)
-
#ifdef __KERNEL__
-#define CDEBUG_STACK() (CFS_THREAD_SIZE - (__u32)IoGetRemainingStackSize())
+#define CDEBUG_STACK() (THREAD_SIZE - (__u32)IoGetRemainingStackSize())
#define CFS_CHECK_STACK(msgdata, mask, cdls) do {} while(0)
#else /* !__KERNEL__ */
#define CFS_CHECK_STACK(msgdata, mask, cdls) do {} while(0)
NTSTATUS Status;
LARGE_INTEGER TimeOut;
- TimeOut.QuadPart = -1 * (10000000/CFS_HZ) * timeout;
+ TimeOut.QuadPart = -1 * (10000000/HZ) * timeout;
Status = KeWaitForSingleObject(
event,
* capabilities support
*/
-typedef __u32 cfs_kernel_cap_t;
+typedef __u32 kernel_cap_t;
#define cap_raise(c, flag) do {} while(0)
#define cap_lower(c, flag) do {} while(0)
int cgroups;
gid_t groups[NGROUPS];
cfs_group_info_t *group_info;
- cfs_kernel_cap_t cap_effective,
+ kernel_cap_t cap_effective,
cap_inheritable,
cap_permitted;
#include <libcfs/winnt/portals_compat25.h>
-#define CFS_HZ (100)
+#define HZ (100)
struct timespec {
__u32 tv_sec;
#define jiffies (ULONG_PTR)JIFFIES()
#define cfs_jiffies (ULONG_PTR)JIFFIES()
-static inline void cfs_gettimeofday(struct timeval *tv)
+static inline void do_gettimeofday(struct timeval *tv)
{
LARGE_INTEGER Time;
KeQueryTickCount(&Tick);
Elapse.QuadPart = Tick.QuadPart * KeQueryTimeIncrement();
- Elapse.QuadPart /= (10000000 / CFS_HZ);
+ Elapse.QuadPart /= (10000000 / HZ);
return Elapse.QuadPart;
}
static inline time_t cfs_time_current_sec(void)
{
- return (time_t)(JIFFIES() / CFS_HZ);
+ return (time_t)(JIFFIES() / HZ);
}
#define cfs_time_before(t1, t2) (((signed)(t1) - (signed)(t2)) < 0)
static inline cfs_duration_t cfs_time_seconds(cfs_duration_t seconds)
{
- return (cfs_duration_t)(seconds * CFS_HZ);
+ return (cfs_duration_t)(seconds * HZ);
}
static inline time_t cfs_duration_sec(cfs_duration_t d)
{
- return (time_t)(d / CFS_HZ);
+ return (time_t)(d / HZ);
}
static inline void cfs_duration_usec(cfs_duration_t d, struct timeval *s)
{
- s->tv_sec = (__u32)(d / CFS_HZ);
- s->tv_usec = (__u32)((d - (cfs_duration_t)s->tv_sec * CFS_HZ) *
- ONE_MILLION / CFS_HZ);
+ s->tv_sec = (__u32)(d / HZ);
+ s->tv_usec = (__u32)((d - (cfs_duration_t)s->tv_sec * HZ) *
+ ONE_MILLION / HZ);
}
static inline void cfs_duration_nsec(cfs_duration_t d, struct timespec *s)
{
- s->tv_sec = (__u32) (d / CFS_HZ);
- s->tv_nsec = (__u32)((d - (cfs_duration_t)s->tv_sec * CFS_HZ) *
- ONE_BILLION / CFS_HZ);
+ s->tv_sec = (__u32) (d / HZ);
+ s->tv_nsec = (__u32)((d - (cfs_duration_t)s->tv_sec * HZ) *
+ ONE_BILLION / HZ);
}
static inline void cfs_fs_time_usec(cfs_fs_time_t *t, struct timeval *v)
/* liblustre. time(2) based implementation. */
int nanosleep(const struct timespec *rqtp, struct timespec *rmtp);
void sleep(int time);
-void cfs_gettimeofday(struct timeval *tv);
+void do_gettimeofday(struct timeval *tv);
int gettimeofday(struct timeval *tv, void * tz);
#endif /* !__KERNEL__ */
typedef __u16 uid_t, gid_t;
typedef __u16 mode_t;
-typedef __u16 cfs_umode_t;
+typedef __u16 umode_t;
typedef __u32 sigset_t;
return cap_raised(current_cap(), cfs_cap_unpack(cap));
}
-void cfs_kernel_cap_pack(cfs_kernel_cap_t kcap, cfs_cap_t *cap)
+void cfs_kernel_cap_pack(kernel_cap_t kcap, cfs_cap_t *cap)
{
#if defined (_LINUX_CAPABILITY_VERSION) && _LINUX_CAPABILITY_VERSION == 0x19980330
*cap = cfs_cap_pack(kcap);
#endif
}
-void cfs_kernel_cap_unpack(cfs_kernel_cap_t *kcap, cfs_cap_t cap)
+void cfs_kernel_cap_unpack(kernel_cap_t *kcap, cfs_cap_t cap)
{
#if defined (_LINUX_CAPABILITY_VERSION) && _LINUX_CAPABILITY_VERSION == 0x19980330
*kcap = cfs_cap_unpack(cap);
/* For sys_open & sys_close */
#include <linux/syscalls.h>
+#ifndef HAVE_SK_SLEEP
+static inline wait_queue_head_t *sk_sleep(struct sock *sk)
+{
+ return sk->sk_sleep;
+}
+#endif
+
int
libcfs_sock_ioctl(int cmd, unsigned long arg)
{
newsock->ops = sock->ops;
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(cfs_sk_sleep(sock->sk), &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(sk_sleep(sock->sk), &wait);
- rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
- if (rc == -EAGAIN) {
- /* Nothing ready, so wait for activity */
- schedule();
- rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
- }
+ rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
+ if (rc == -EAGAIN) {
+ /* Nothing ready, so wait for activity */
+ schedule();
+ rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
+ }
- remove_wait_queue(cfs_sk_sleep(sock->sk), &wait);
- set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk_sleep(sock->sk), &wait);
+ set_current_state(TASK_RUNNING);
if (rc != 0)
goto failed;
void
libcfs_sock_abort_accept (struct socket *sock)
{
- wake_up_all(cfs_sk_sleep(sock->sk));
+ wake_up_all(sk_sleep(sock->sk));
}
EXPORT_SYMBOL(libcfs_sock_abort_accept);
}
-void cfs_gettimeofday(struct timeval *tv)
+void do_gettimeofday(struct timeval *tv)
{
LARGE_INTEGER Time;
int gettimeofday(struct timeval *tv, void * tz)
{
- cfs_gettimeofday(tv);
+ do_gettimeofday(tv);
return 0;
}
KeAcquireSpinLock(&(timer->Lock), &Irql);
if (!cfs_is_flag_set(timer->Flags, CFS_TIMER_FLAG_TIMERED)){
- timeout.QuadPart = (LONGLONG)-1*1000*1000*10/CFS_HZ*deadline;
+ timeout.QuadPart = (LONGLONG)-1*1000*1000*10/HZ*deadline;
if (KeSetTimer(&timer->Timer, timeout, &timer->Dpc)) {
cfs_set_flag(timer->Flags, CFS_TIMER_FLAG_TIMERED);
{
struct timeval tv;
- cfs_gettimeofday(&tv);
+ do_gettimeofday(&tv);
header->ph_subsys = subsys;
header->ph_mask = mask;
goto failed_with_init;
}
- mx_get_endpoint_addr(kmxlnd_data.kmx_endpt, &kmxlnd_data.kmx_epa);
- mx_decompose_endpoint_addr(kmxlnd_data.kmx_epa, &nic_id, &ep_id);
- mxret = mx_connect(kmxlnd_data.kmx_endpt, nic_id, ep_id,
- MXLND_MSG_MAGIC, MXLND_CONNECT_TIMEOUT/CFS_HZ*1000,
- &kmxlnd_data.kmx_epa);
- if (mxret != MX_SUCCESS) {
- CNETERR("unable to connect to myself (%s)\n", mx_strerror(mxret));
- goto failed_with_endpoint;
- }
+ mx_get_endpoint_addr(kmxlnd_data.kmx_endpt, &kmxlnd_data.kmx_epa);
+ mx_decompose_endpoint_addr(kmxlnd_data.kmx_epa, &nic_id, &ep_id);
+ mxret = mx_connect(kmxlnd_data.kmx_endpt, nic_id, ep_id,
+ MXLND_MSG_MAGIC, MXLND_CONNECT_TIMEOUT/HZ*1000,
+ &kmxlnd_data.kmx_epa);
+ if (mxret != MX_SUCCESS) {
+ CNETERR("unable to connect to myself (%s)\n", mx_strerror(mxret));
+ goto failed_with_endpoint;
+ }
ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ip);
CDEBUG(D_NET, "My NID is 0x%llx\n", ni->ni_nid);
mx_strerror(mxret));
goto failed_with_endpoint;
}
- mxret = mx_set_request_timeout(kmxlnd_data.kmx_endpt, NULL,
- MXLND_COMM_TIMEOUT/CFS_HZ*1000);
- if (mxret != MX_SUCCESS) {
- CERROR("mx_set_request_timeout() failed with %s\n",
- mx_strerror(mxret));
- goto failed_with_endpoint;
- }
+ mxret = mx_set_request_timeout(kmxlnd_data.kmx_endpt, NULL,
+ MXLND_COMM_TIMEOUT/HZ*1000);
+ if (mxret != MX_SUCCESS) {
+ CERROR("mx_set_request_timeout() failed with %s\n",
+ mx_strerror(mxret));
+ goto failed_with_endpoint;
+ }
return 0;
failed_with_endpoint:
/* calls write_[un]lock(kmx_global_lock) */
mxlnd_del_peer(LNET_NID_ANY);
- /* wakeup request_waitds */
- mx_wakeup(kmxlnd_data.kmx_endpt);
+ /* wakeup request_waitds */
+ mx_wakeup(kmxlnd_data.kmx_endpt);
up(&kmxlnd_data.kmx_tx_queue_sem);
up(&kmxlnd_data.kmx_conn_sem);
- mxlnd_sleep(2 * CFS_HZ);
+ mxlnd_sleep(2 * HZ);
/* fall through */
kmxlnd_data.kmx_ni = ni;
ni->ni_data = &kmxlnd_data;
- cfs_gettimeofday(&tv);
- kmxlnd_data.kmx_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
- CDEBUG(D_NET, "my incarnation is %llu\n", kmxlnd_data.kmx_incarnation);
+ do_gettimeofday(&tv);
+ kmxlnd_data.kmx_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
+ CDEBUG(D_NET, "my incarnation is %llu\n", kmxlnd_data.kmx_incarnation);
rwlock_init (&kmxlnd_data.kmx_global_lock);
spin_lock_init (&kmxlnd_data.kmx_mem_lock);
#define MXLND_NDAEMONS 3 /* connd, timeoutd, tx_queued */
#define MXLND_MX_BOARD 0 /* Use the first MX NIC if more than 1 avail */
#define MXLND_MX_EP_ID 0 /* MX endpoint ID */
-#define MXLND_COMM_TIMEOUT (20 * CFS_HZ) /* timeout for send/recv (jiffies) */
-#define MXLND_WAIT_TIMEOUT CFS_HZ /* timeout for wait (jiffies) */
-#define MXLND_CONNECT_TIMEOUT (5 * CFS_HZ) /* timeout for connections (jiffies) */
+#define MXLND_COMM_TIMEOUT (20 * HZ) /* timeout for send/recv (jiffies) */
+#define MXLND_WAIT_TIMEOUT HZ /* timeout for wait (jiffies) */
+#define MXLND_CONNECT_TIMEOUT (5 * HZ) /* timeout for connections (jiffies) */
#define MXLND_POLLING 1000 /* poll iterations before blocking */
#define MXLND_LOOKUP_COUNT 5 /* how many times to try to resolve MAC */
#define MXLND_MAX_PEERS 1024 /* number of nodes talking to me */
if (tmp_id != 0ULL)
ret = 0;
break;
- } else if (ret == -EHOSTUNREACH && try < tries) {
- /* add a little backoff */
- CDEBUG(D_NET, "sleeping for %d jiffies\n",
- CFS_HZ/4);
- mxlnd_sleep(CFS_HZ/4);
- }
+ } else if (ret == -EHOSTUNREACH && try < tries) {
+ /* add a little backoff */
+ CDEBUG(D_NET, "sleeping for %d jiffies\n",
+ HZ/4);
+ mxlnd_sleep(HZ/4);
+ }
}
} while (try++ < tries);
CDEBUG(D_NET, "done trying. ret = %d\n", ret);
mx_strerror(mxret), mxret, libcfs_nid2str(peer->mxp_nid));
mxlnd_conn_decref(conn);
}
- mx_set_request_timeout(kmxlnd_data.kmx_endpt, request,
- MXLND_CONNECT_TIMEOUT/CFS_HZ*1000);
- return;
+ mx_set_request_timeout(kmxlnd_data.kmx_endpt, request,
+ MXLND_CONNECT_TIMEOUT/HZ*1000);
+ return;
}
#define MXLND_STATS 0
}
#if MXLND_STATS
- if (cfs_time_after(jiffies, last)) {
- last = jiffies + CFS_HZ;
- CDEBUG(D_NET, "status= %s credits= %d outstanding= %d ntx_msgs= %d "
- "ntx_posted= %d ntx_data= %d data_posted= %d\n",
- mxlnd_connstatus_to_str(conn->mxk_status), conn->mxk_credits,
- conn->mxk_outstanding, conn->mxk_ntx_msgs, conn->mxk_ntx_posted,
- conn->mxk_ntx_data, conn->mxk_data_posted);
- }
+ if (cfs_time_after(jiffies, last)) {
+ last = jiffies + HZ;
+ CDEBUG(D_NET, "status= %s credits= %d outstanding= %d ntx_msgs= %d "
+ "ntx_posted= %d ntx_data= %d data_posted= %d\n",
+ mxlnd_connstatus_to_str(conn->mxk_status), conn->mxk_credits,
+ conn->mxk_outstanding, conn->mxk_ntx_msgs, conn->mxk_ntx_posted,
+ conn->mxk_ntx_data, conn->mxk_data_posted);
+ }
#endif
spin_lock(&conn->mxk_lock);
int
mxlnd_timeoutd(void *arg)
{
- int i = 0;
- long id = (long) arg;
- unsigned long now = 0;
- unsigned long next = 0;
- unsigned long delay = CFS_HZ;
- kmx_peer_t *peer = NULL;
- kmx_peer_t *temp = NULL;
- kmx_conn_t *conn = NULL;
+ int i = 0;
+ long id = (long) arg;
+ unsigned long now = 0;
+ unsigned long next = 0;
+ unsigned long delay = HZ;
+ kmx_peer_t *peer = NULL;
+ kmx_peer_t *temp = NULL;
+ kmx_conn_t *conn = NULL;
rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
CDEBUG(D_NET, "timeoutd starting\n");
continue;
}
- if ((conn->mxk_status == MXLND_CONN_READY ||
- conn->mxk_status == MXLND_CONN_FAIL) &&
- cfs_time_after(now,
- conn->mxk_last_tx +
- CFS_HZ)) {
+ if ((conn->mxk_status == MXLND_CONN_READY ||
+ conn->mxk_status == MXLND_CONN_FAIL) &&
+ cfs_time_after(now,
+ conn->mxk_last_tx +
+ HZ)) {
write_unlock(g_lock);
mxlnd_check_sends(peer);
write_lock(g_lock);
if (net == NULL)
goto failed;
- memset(net, 0, sizeof(*net));
+ memset(net, 0, sizeof(*net));
- cfs_gettimeofday(&tv);
- net->ibn_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
+ do_gettimeofday(&tv);
+ net->ibn_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
ni->ni_peertimeout = *kiblnd_tunables.kib_peertimeout;
ni->ni_maxtxcredits = *kiblnd_tunables.kib_credits;
static inline int
kiblnd_send_keepalive(kib_conn_t *conn)
{
- return (*kiblnd_tunables.kib_keepalive > 0) &&
- cfs_time_after(jiffies, conn->ibc_last_send +
- *kiblnd_tunables.kib_keepalive*CFS_HZ);
+ return (*kiblnd_tunables.kib_keepalive > 0) &&
+ cfs_time_after(jiffies, conn->ibc_last_send +
+ *kiblnd_tunables.kib_keepalive*HZ);
}
static inline int
LASSERT (!tx->tx_queued); /* not queued for sending already */
LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
- tx->tx_queued = 1;
- tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * CFS_HZ);
+ tx->tx_queued = 1;
+ tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ);
if (tx->tx_conn == NULL) {
kiblnd_conn_addref(conn);
if (chunk == 0)
chunk = 1;
- for (i = 0; i < chunk; i++) {
- kiblnd_check_conns(peer_index);
- peer_index = (peer_index + 1) %
- kiblnd_data.kib_peer_hash_size;
- }
+ for (i = 0; i < chunk; i++) {
+ kiblnd_check_conns(peer_index);
+ peer_index = (peer_index + 1) %
+ kiblnd_data.kib_peer_hash_size;
+ }
- deadline += p * CFS_HZ;
+ deadline += p * HZ;
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
}
goto failed;
}
- /* Initialized the incarnation - it must be for-all-time unique, even
- * accounting for the fact that we increment it when we disconnect a
- * peer that's using it */
- cfs_gettimeofday(&tv);
- kptllnd_data.kptl_incarnation = (((__u64)tv.tv_sec) * 1000000) +
- tv.tv_usec;
- CDEBUG(D_NET, "Incarnation="LPX64"\n", kptllnd_data.kptl_incarnation);
+ /* Initialized the incarnation - it must be for-all-time unique, even
+ * accounting for the fact that we increment it when we disconnect a
+ * peer that's using it */
+ do_gettimeofday(&tv);
+ kptllnd_data.kptl_incarnation = (((__u64)tv.tv_sec) * 1000000) +
+ tv.tv_usec;
+ CDEBUG(D_NET, "Incarnation="LPX64"\n", kptllnd_data.kptl_incarnation);
target.nid = LNET_NID_ANY;
target.pid = LNET_PID_ANY; /* NB target for NAK doesn't matter */
/* lnet_finalize() will be called when tx is torn down, so I must
* return success from here on... */
- tx->tx_deadline = jiffies + (*kptllnd_tunables.kptl_timeout * CFS_HZ);
- tx->tx_rdma_mdh = mdh;
- tx->tx_active = 1;
- cfs_list_add_tail(&tx->tx_list, &peer->peer_activeq);
+ tx->tx_deadline = jiffies + (*kptllnd_tunables.kptl_timeout * HZ);
+ tx->tx_rdma_mdh = mdh;
+ tx->tx_active = 1;
+ cfs_list_add_tail(&tx->tx_list, &peer->peer_activeq);
/* peer has now got my ref on 'tx' */
kptllnd_data.kptl_peer_hash_size;
}
- deadline += p * CFS_HZ;
- stamp++;
- continue;
+ deadline += p * HZ;
+ stamp++;
+ continue;
}
kptllnd_handle_closing_peers();
}
- tx->tx_deadline = jiffies + (*kptllnd_tunables.kptl_timeout * CFS_HZ);
- tx->tx_active = 1;
- tx->tx_msg_mdh = msg_mdh;
- kptllnd_queue_tx(peer, tx);
+ tx->tx_deadline = jiffies + (*kptllnd_tunables.kptl_timeout * HZ);
+ tx->tx_active = 1;
+ tx->tx_msg_mdh = msg_mdh;
+ kptllnd_queue_tx(peer, tx);
}
/* NB "restarts" comes from peer_sendq of a single peer */
MIN(peer->rap_reconnect_interval,
*kranal_tunables.kra_max_reconnect_interval);
- peer->rap_reconnect_time = jiffies + peer->rap_reconnect_interval *
- CFS_HZ;
+ peer->rap_reconnect_time = jiffies + peer->rap_reconnect_interval * HZ;
/* Grab all blocked packets while we have the global lock */
cfs_list_add(&zombies, &peer->rap_tx_queue);
ni->ni_data = &kranal_data;
kranal_data.kra_ni = ni;
- /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
- * a unique (for all time) connstamp so we can uniquely identify
- * the sender. The connstamp is an incrementing counter
- * initialised with seconds + microseconds at startup time. So we
- * rely on NOT creating connections more frequently on average than
- * 1MHz to ensure we don't use old connstamps when we reboot. */
- cfs_gettimeofday(&tv);
- kranal_data.kra_connstamp =
- kranal_data.kra_peerstamp = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
+ /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
+ * a unique (for all time) connstamp so we can uniquely identify
+ * the sender. The connstamp is an incrementing counter
+ * initialised with seconds + microseconds at startup time. So we
+ * rely on NOT creating connections more frequently on average than
+ * 1MHz to ensure we don't use old connstamps when we reboot. */
+ do_gettimeofday(&tv);
+ kranal_data.kra_connstamp =
+ kranal_data.kra_peerstamp = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
rwlock_init(&kranal_data.kra_global_lock);
LASSERT (conn->rac_state == RANAL_CONN_ESTABLISHED ||
conn->rac_state == RANAL_CONN_CLOSING);
- if (!conn->rac_close_sent &&
- cfs_time_aftereq(now, conn->rac_last_tx + conn->rac_keepalive *
- CFS_HZ)) {
- /* not sent in a while; schedule conn so scheduler sends a keepalive */
- CDEBUG(D_NET, "Scheduling keepalive %p->%s\n",
- conn, libcfs_nid2str(conn->rac_peer->rap_nid));
- kranal_schedule_conn(conn);
- }
-
- timeout = conn->rac_timeout * CFS_HZ;
-
- if (!conn->rac_close_recvd &&
- cfs_time_aftereq(now, conn->rac_last_rx + timeout)) {
- CERROR("%s received from %s within %lu seconds\n",
- (conn->rac_state == RANAL_CONN_ESTABLISHED) ?
- "Nothing" : "CLOSE not",
- libcfs_nid2str(conn->rac_peer->rap_nid),
- (now - conn->rac_last_rx)/CFS_HZ);
- return -ETIMEDOUT;
- }
+ if (!conn->rac_close_sent &&
+ cfs_time_aftereq(now, conn->rac_last_tx + conn->rac_keepalive *
+ HZ)) {
+ /* not sent in a while; schedule conn so scheduler sends a keepalive */
+ CDEBUG(D_NET, "Scheduling keepalive %p->%s\n",
+ conn, libcfs_nid2str(conn->rac_peer->rap_nid));
+ kranal_schedule_conn(conn);
+ }
+
+ timeout = conn->rac_timeout * HZ;
+
+ if (!conn->rac_close_recvd &&
+ cfs_time_aftereq(now, conn->rac_last_rx + timeout)) {
+ CERROR("%s received from %s within %lu seconds\n",
+ (conn->rac_state == RANAL_CONN_ESTABLISHED) ?
+ "Nothing" : "CLOSE not",
+ libcfs_nid2str(conn->rac_peer->rap_nid),
+ (now - conn->rac_last_rx)/HZ);
+ return -ETIMEDOUT;
+ }
if (conn->rac_state != RANAL_CONN_ESTABLISHED)
return 0;
spin_lock_irqsave(&conn->rac_lock, flags);
- cfs_list_for_each (ttmp, &conn->rac_fmaq) {
- tx = cfs_list_entry(ttmp, kra_tx_t, tx_list);
+ cfs_list_for_each (ttmp, &conn->rac_fmaq) {
+ tx = cfs_list_entry(ttmp, kra_tx_t, tx_list);
- if (cfs_time_aftereq(now, tx->tx_qtime + timeout)) {
+ if (cfs_time_aftereq(now, tx->tx_qtime + timeout)) {
spin_unlock_irqrestore(&conn->rac_lock, flags);
- CERROR("tx on fmaq for %s blocked %lu seconds\n",
- libcfs_nid2str(conn->rac_peer->rap_nid),
- (now - tx->tx_qtime)/CFS_HZ);
- return -ETIMEDOUT;
- }
- }
+ CERROR("tx on fmaq for %s blocked %lu seconds\n",
+ libcfs_nid2str(conn->rac_peer->rap_nid),
+ (now - tx->tx_qtime)/HZ);
+ return -ETIMEDOUT;
+ }
+ }
- cfs_list_for_each (ttmp, &conn->rac_rdmaq) {
- tx = cfs_list_entry(ttmp, kra_tx_t, tx_list);
+ cfs_list_for_each (ttmp, &conn->rac_rdmaq) {
+ tx = cfs_list_entry(ttmp, kra_tx_t, tx_list);
- if (cfs_time_aftereq(now, tx->tx_qtime + timeout)) {
+ if (cfs_time_aftereq(now, tx->tx_qtime + timeout)) {
spin_unlock_irqrestore(&conn->rac_lock, flags);
- CERROR("tx on rdmaq for %s blocked %lu seconds\n",
- libcfs_nid2str(conn->rac_peer->rap_nid),
- (now - tx->tx_qtime)/CFS_HZ);
- return -ETIMEDOUT;
- }
- }
+ CERROR("tx on rdmaq for %s blocked %lu seconds\n",
+ libcfs_nid2str(conn->rac_peer->rap_nid),
+ (now - tx->tx_qtime)/HZ);
+ return -ETIMEDOUT;
+ }
+ }
- cfs_list_for_each (ttmp, &conn->rac_replyq) {
- tx = cfs_list_entry(ttmp, kra_tx_t, tx_list);
+ cfs_list_for_each (ttmp, &conn->rac_replyq) {
+ tx = cfs_list_entry(ttmp, kra_tx_t, tx_list);
- if (cfs_time_aftereq(now, tx->tx_qtime + timeout)) {
+ if (cfs_time_aftereq(now, tx->tx_qtime + timeout)) {
spin_unlock_irqrestore(&conn->rac_lock, flags);
- CERROR("tx on replyq for %s blocked %lu seconds\n",
- libcfs_nid2str(conn->rac_peer->rap_nid),
- (now - tx->tx_qtime)/CFS_HZ);
- return -ETIMEDOUT;
- }
- }
+ CERROR("tx on replyq for %s blocked %lu seconds\n",
+ libcfs_nid2str(conn->rac_peer->rap_nid),
+ (now - tx->tx_qtime)/HZ);
+ return -ETIMEDOUT;
+ }
+ }
spin_unlock_irqrestore(&conn->rac_lock, flags);
return 0;
conn_index = (conn_index + 1) % conn_entries;
}
- next_check_time += p * CFS_HZ;
+ next_check_time += p * HZ;
spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
return 0;
case RAP_NOT_DONE:
- if (cfs_time_aftereq(jiffies,
- conn->rac_last_tx + conn->rac_keepalive *
- CFS_HZ))
- CWARN("EAGAIN sending %02x (idle %lu secs)\n",
- msg->ram_type,
- (jiffies - conn->rac_last_tx)/CFS_HZ);
- return -EAGAIN;
+ if (cfs_time_aftereq(jiffies,
+ conn->rac_last_tx + conn->rac_keepalive *
+ HZ))
+ CWARN("EAGAIN sending %02x (idle %lu secs)\n",
+ msg->ram_type,
+ (jiffies - conn->rac_last_tx)/HZ);
+ return -EAGAIN;
}
}
/* RDMAs in progress */
LASSERT (!conn->rac_close_sent);
- if (cfs_time_aftereq(jiffies,
- conn->rac_last_tx +
- conn->rac_keepalive * CFS_HZ)) {
- CDEBUG(D_NET, "sending NOOP (rdma in progress)\n");
- kranal_init_msg(&conn->rac_msg, RANAL_MSG_NOOP);
- kranal_sendmsg(conn, &conn->rac_msg, NULL, 0);
- }
+ if (cfs_time_aftereq(jiffies,
+ conn->rac_last_tx +
+ conn->rac_keepalive * HZ)) {
+ CDEBUG(D_NET, "sending NOOP (rdma in progress)\n");
+ kranal_init_msg(&conn->rac_msg, RANAL_MSG_NOOP);
+ kranal_sendmsg(conn, &conn->rac_msg, NULL, 0);
+ }
return;
}
spin_unlock_irqrestore(&conn->rac_lock, flags);
- if (cfs_time_aftereq(jiffies,
- conn->rac_last_tx + conn->rac_keepalive *
- CFS_HZ)) {
- CDEBUG(D_NET, "sending NOOP -> %s (%p idle %lu(%ld))\n",
- libcfs_nid2str(conn->rac_peer->rap_nid), conn,
- (jiffies - conn->rac_last_tx)/CFS_HZ,
- conn->rac_keepalive);
- kranal_init_msg(&conn->rac_msg, RANAL_MSG_NOOP);
- kranal_sendmsg(conn, &conn->rac_msg, NULL, 0);
- }
+ if (cfs_time_aftereq(jiffies,
+ conn->rac_last_tx + conn->rac_keepalive *
+ HZ)) {
+ CDEBUG(D_NET, "sending NOOP -> %s (%p idle %lu(%ld))\n",
+ libcfs_nid2str(conn->rac_peer->rap_nid), conn,
+ (jiffies - conn->rac_last_tx)/HZ,
+ conn->rac_keepalive);
+ kranal_init_msg(&conn->rac_msg, RANAL_MSG_NOOP);
+ kranal_sendmsg(conn, &conn->rac_msg, NULL, 0);
+ }
return;
}
conn, libcfs_nid2str(conn->rac_peer->rap_nid), nfma, nreplies);
}
-int
-kranal_process_new_conn (kra_conn_t *conn)
+int kranal_process_new_conn (kra_conn_t *conn)
{
- RAP_RETURN rrc;
+ RAP_RETURN rrc;
- rrc = RapkCompleteSync(conn->rac_rihandle, 1);
- if (rrc == RAP_SUCCESS)
- return 0;
+ rrc = RapkCompleteSync(conn->rac_rihandle, 1);
+ if (rrc == RAP_SUCCESS)
+ return 0;
- LASSERT (rrc == RAP_NOT_DONE);
- if (!cfs_time_aftereq(jiffies, conn->rac_last_tx +
- conn->rac_timeout * CFS_HZ))
- return -EAGAIN;
+ LASSERT (rrc == RAP_NOT_DONE);
+ if (!cfs_time_aftereq(jiffies, conn->rac_last_tx +
+ conn->rac_timeout * HZ))
+ return -EAGAIN;
- /* Too late */
- rrc = RapkCompleteSync(conn->rac_rihandle, 0);
- LASSERT (rrc == RAP_SUCCESS);
- return -ETIMEDOUT;
+ /* Too late */
+ rrc = RapkCompleteSync(conn->rac_rihandle, 0);
+ LASSERT (rrc == RAP_SUCCESS);
+ return -ETIMEDOUT;
}
int
continue;
}
- /* retry with exponential backoff until HZ */
- if (conn->rac_keepalive == 0)
- conn->rac_keepalive = 1;
- else if (conn->rac_keepalive <= CFS_HZ)
- conn->rac_keepalive *= 2;
- else
- conn->rac_keepalive += CFS_HZ;
-
- deadline = conn->rac_last_tx + conn->rac_keepalive;
+ /* retry with exponential backoff until HZ */
+ if (conn->rac_keepalive == 0)
+ conn->rac_keepalive = 1;
+ else if (conn->rac_keepalive <= HZ)
+ conn->rac_keepalive *= 2;
+ else
+ conn->rac_keepalive += HZ;
+
+ deadline = conn->rac_last_tx + conn->rac_keepalive;
spin_lock_irqsave(&dev->rad_lock, flags);
}
PORTAL_MODULE_UNUSE;
}
-__u64
-ksocknal_new_incarnation (void)
+__u64 ksocknal_new_incarnation (void)
{
- struct timeval tv;
+ struct timeval tv;
- /* The incarnation number is the time this module loaded and it
- * identifies this particular instance of the socknal. Hopefully
- * we won't be able to reboot more frequently than 1MHz for the
- * forseeable future :) */
+ /* The incarnation number is the time this module loaded and it
+ * identifies this particular instance of the socknal. Hopefully
+ * we won't be able to reboot more frequently than 1MHz for the
+ * forseeable future :) */
- cfs_gettimeofday(&tv);
+ do_gettimeofday(&tv);
- return (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
+ return (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
}
int
#endif /* LNET_USE_LIB_FREELIST */
-__u64
-lnet_create_interface_cookie (void)
+__u64 lnet_create_interface_cookie (void)
{
- /* NB the interface cookie in wire handles guards against delayed
- * replies and ACKs appearing valid after reboot. Initialisation time,
- * even if it's only implemented to millisecond resolution is probably
- * easily good enough. */
- struct timeval tv;
- __u64 cookie;
+ /* NB the interface cookie in wire handles guards against delayed
+ * replies and ACKs appearing valid after reboot. Initialisation time,
+ * even if it's only implemented to millisecond resolution is probably
+ * easily good enough. */
+ struct timeval tv;
+ __u64 cookie;
#ifndef __KERNEL__
- int rc = gettimeofday (&tv, NULL);
- LASSERT (rc == 0);
+ int rc = gettimeofday (&tv, NULL);
+ LASSERT (rc == 0);
#else
- cfs_gettimeofday(&tv);
+ do_gettimeofday(&tv);
#endif
- cookie = tv.tv_sec;
- cookie *= 1000000;
- cookie += tv.tv_usec;
- return cookie;
+ cookie = tv.tv_sec;
+ cookie *= 1000000;
+ cookie += tv.tv_usec;
+ return cookie;
}
static char *
seed[0] ^= (LNET_NIDADDR(ni->ni_nid) | lnd_type);
}
- cfs_gettimeofday(&tv);
- cfs_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
- seeded = 1;
- return;
+ do_gettimeofday(&tv);
+ cfs_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
+ seeded = 1;
+ return;
}
/* NB expects LNET_LOCK held */
#define BRW_MAGIC 0xeeb0eeb1eeb2eeb3ULL
#define BRW_MSIZE sizeof(__u64)
-int
-brw_inject_one_error (void)
+int brw_inject_one_error (void)
{
- struct timeval tv;
+ struct timeval tv;
- if (brw_inject_errors <= 0) return 0;
+ if (brw_inject_errors <= 0) return 0;
#ifndef __KERNEL__
- gettimeofday(&tv, NULL);
+ gettimeofday(&tv, NULL);
#else
- cfs_gettimeofday(&tv);
+ do_gettimeofday(&tv);
#endif
- if ((tv.tv_usec & 1) == 0) return 0;
+ if ((tv.tv_usec & 1) == 0) return 0;
- return brw_inject_errors--;
+ return brw_inject_errors--;
}
void
struct lvfs_ucred {
__u32 luc_fsuid;
__u32 luc_fsgid;
- cfs_kernel_cap_t luc_cap;
+ kernel_cap_t luc_cap;
__u32 luc_uid;
__u32 luc_umask;
};
* (2) The type of child is in \a child_mode.
* (3) The result hint is stored in \a ah;
*/
- void (*do_ah_init)(const struct lu_env *env,
- struct dt_allocation_hint *ah,
- struct dt_object *parent,
+ void (*do_ah_init)(const struct lu_env *env,
+ struct dt_allocation_hint *ah,
+ struct dt_object *parent,
struct dt_object *child,
- cfs_umode_t child_mode);
+ umode_t child_mode);
/**
* Create new object on this device.
*
#define loff_t long long
#define ERESTART 2001
-typedef unsigned short cfs_umode_t;
+typedef unsigned short umode_t;
#endif
#ifndef ERESTARTSYS
#define ERESTARTSYS ERESTART
#endif
-#define CFS_HZ 1
+
+#ifdef HZ
+#undef HZ
+#endif
+#define HZ 1
/* random */
#define ATTR_FILE 0
struct iattr {
- unsigned int ia_valid;
- cfs_umode_t ia_mode;
- uid_t ia_uid;
- gid_t ia_gid;
- loff_t ia_size;
- time_t ia_atime;
- time_t ia_mtime;
- time_t ia_ctime;
- unsigned int ia_attr_flags;
+ unsigned int ia_valid;
+ umode_t ia_mode;
+ uid_t ia_uid;
+ gid_t ia_gid;
+ loff_t ia_size;
+ time_t ia_atime;
+ time_t ia_mtime;
+ time_t ia_ctime;
+ unsigned int ia_attr_flags;
};
/* defined in kernel header include/linux/namei.h */
#define SIGNAL_MASK_ASSERT()
#define CFS_KERN_INFO
-#if CFS_HZ != 1
+#if HZ != 1
#error "liblustre's jiffies currently expects HZ to be 1"
#endif
#define jiffies \
#define __fsfilt_check_slow(obd, start, msg) \
do { \
- if (cfs_time_before(jiffies, start + 15 * CFS_HZ)) \
- break; \
- else if (cfs_time_before(jiffies, start + 30 * CFS_HZ)) \
- CDEBUG(D_VFSTRACE, "%s: slow %s %lus\n", obd->obd_name, \
- msg, (jiffies-start) / CFS_HZ); \
- else if (cfs_time_before(jiffies, start + DISK_TIMEOUT * CFS_HZ)) \
- CWARN("%s: slow %s %lus\n", obd->obd_name, msg, \
- (jiffies - start) / CFS_HZ); \
- else \
- CERROR("%s: slow %s %lus\n", obd->obd_name, msg, \
- (jiffies - start) / CFS_HZ); \
+ if (cfs_time_before(jiffies, start + 15 * HZ)) \
+ break; \
+ else if (cfs_time_before(jiffies, start + 30 * HZ)) \
+ CDEBUG(D_VFSTRACE, "%s: slow %s %lus\n", obd->obd_name, \
+ msg, (jiffies-start) / HZ); \
+ else if (cfs_time_before(jiffies, start + DISK_TIMEOUT * HZ)) \
+ CWARN("%s: slow %s %lus\n", obd->obd_name, msg, \
+ (jiffies - start) / HZ); \
+ else \
+ CERROR("%s: slow %s %lus\n", obd->obd_name, msg, \
+ (jiffies - start) / HZ); \
} while (0)
#define fsfilt_check_slow(obd, start, msg) \
/* simple.c */
struct lvfs_ucred {
- __u32 luc_uid;
- __u32 luc_gid;
- __u32 luc_fsuid;
- __u32 luc_fsgid;
- cfs_kernel_cap_t luc_cap;
- __u32 luc_umask;
- struct group_info *luc_ginfo;
- struct md_identity *luc_identity;
+ __u32 luc_uid;
+ __u32 luc_gid;
+ __u32 luc_fsuid;
+ __u32 luc_fsgid;
+ kernel_cap_t luc_cap;
+ __u32 luc_umask;
+ struct group_info *luc_ginfo;
+ struct md_identity *luc_identity;
};
struct lvfs_callback_ops {
#ifdef CLIENT_OBD_LIST_LOCK_DEBUG
static inline void __client_obd_list_lock(client_obd_lock_t *lock,
- const char *func, int line)
+ const char *func, int line)
{
unsigned long cur = jiffies;
while (1) {
if (spin_trylock(&lock->lock)) {
- LASSERT(lock->task == NULL);
- lock->task = current;
- lock->func = func;
- lock->line = line;
- lock->time = jiffies;
- break;
- }
-
- if ((jiffies - cur > 5 * CFS_HZ) &&
- (jiffies - lock->time > 5 * CFS_HZ)) {
+ LASSERT(lock->task == NULL);
+ lock->task = current;
+ lock->func = func;
+ lock->line = line;
+ lock->time = jiffies;
+ break;
+ }
+
+ if ((jiffies - cur > 5 * HZ) &&
+ (jiffies - lock->time > 5 * HZ)) {
struct task_struct *task = lock->task;
if (task == NULL)
continue;
- LCONSOLE_WARN("%s:%d: lock %p was acquired"
- " by <%s:%d:%s:%d> for %lu seconds.\n",
+ LCONSOLE_WARN("%s:%d: lock %p was acquired"
+ " by <%s:%d:%s:%d> for %lu seconds.\n",
current->comm, current->pid,
- lock, task->comm, task->pid,
- lock->func, lock->line,
- (jiffies - lock->time) / CFS_HZ);
- LCONSOLE_WARN("====== for process holding the "
- "lock =====\n");
- libcfs_debug_dumpstack(task);
- LCONSOLE_WARN("====== for current process =====\n");
- libcfs_debug_dumpstack(NULL);
- LCONSOLE_WARN("====== end =======\n");
- cfs_pause(1000 * CFS_HZ);
- }
+ lock, task->comm, task->pid,
+ lock->func, lock->line,
+ (jiffies - lock->time) / HZ);
+ LCONSOLE_WARN("====== for process holding the "
+ "lock =====\n");
+ libcfs_debug_dumpstack(task);
+ LCONSOLE_WARN("====== for current process =====\n");
+ libcfs_debug_dumpstack(NULL);
+ LCONSOLE_WARN("====== end =======\n");
+ cfs_pause(1000 * HZ);
+ }
cpu_relax();
- }
+ }
}
#define client_obd_list_lock(lock) \
target_request_copy_put(req);
}
- delta = (jiffies - delta) / CFS_HZ;
- CDEBUG(D_INFO,"4: recovery completed in %lus - %d/%d reqs/locks\n",
- delta, obd->obd_replayed_requests, obd->obd_replayed_locks);
- if (delta > OBD_RECOVERY_TIME_SOFT) {
- CWARN("too long recovery - read logs\n");
- libcfs_debug_dumplog();
- }
+ delta = (jiffies - delta) / HZ;
+ CDEBUG(D_INFO,"4: recovery completed in %lus - %d/%d reqs/locks\n",
+ delta, obd->obd_replayed_requests, obd->obd_replayed_locks);
+ if (delta > OBD_RECOVERY_TIME_SOFT) {
+ CWARN("too long recovery - read logs\n");
+ libcfs_debug_dumplog();
+ }
target_finish_recovery(obd);
"dlm namespace %s free waiting on refcount %d\n",
ldlm_ns_name(ns), cfs_atomic_read(&ns->ns_bref));
force_wait:
- if (force)
- lwi = LWI_TIMEOUT(obd_timeout * CFS_HZ / 4, NULL, NULL);
+ if (force)
+ lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
rc = l_wait_event(ns->ns_waitq,
cfs_atomic_read(&ns->ns_bref) == 0, &lwi);
#include <lustre_dlm.h>
#include <lustre_fid.h>
-#define HALF_SEC (CFS_HZ >> 1)
+#define HALF_SEC (HZ >> 1)
#define LFSCK_CHECKPOINT_INTERVAL 60
#define LFSCK_NAMEENTRY_DEAD 1 /* The object has been unlinked. */
des->lp_dir_cookie = cpu_to_le64(src->lp_dir_cookie);
}
-static inline cfs_umode_t lfsck_object_type(const struct dt_object *obj)
+static inline umode_t lfsck_object_type(const struct dt_object *obj)
{
return lu_object_attr(&obj->do_lu);
}
{
lfsck->li_bookmark_ram.lb_speed_limit = limit;
if (limit != LFSCK_SPEED_NO_LIMIT) {
- if (limit > CFS_HZ) {
- lfsck->li_sleep_rate = limit / CFS_HZ;
+ if (limit > HZ) {
+ lfsck->li_sleep_rate = limit / HZ;
lfsck->li_sleep_jif = 1;
} else {
lfsck->li_sleep_rate = 1;
- lfsck->li_sleep_jif = CFS_HZ / limit;
+ lfsck->li_sleep_jif = HZ / limit;
}
} else {
lfsck->li_sleep_jif = 0;
lfsck->li_time_last_checkpoint;
__u64 checked = ns->ln_items_checked + com->lc_new_checked;
__u64 speed = checked;
- __u64 new_checked = com->lc_new_checked * CFS_HZ;
+ __u64 new_checked = com->lc_new_checked * HZ;
__u32 rtime = ns->ln_run_time_phase1 +
cfs_duration_sec(duration + HALF_SEC);
com->lc_new_checked;
__u64 speed1 = ns->ln_items_checked;
__u64 speed2 = checked;
- __u64 new_checked = com->lc_new_checked * CFS_HZ;
+ __u64 new_checked = com->lc_new_checked * HZ;
__u32 rtime = ns->ln_run_time_phase2 +
cfs_duration_sec(duration + HALF_SEC);
struct timeval now;
struct ll_sb_info *sbi = seq->private;
struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
- int k;
+ int k;
- cfs_gettimeofday(&now);
+ do_gettimeofday(&now);
- if (!sbi->ll_rw_stats_on) {
- seq_printf(seq, "disabled\n"
+ if (!sbi->ll_rw_stats_on) {
+ seq_printf(seq, "disabled\n"
"write anything in this file to activate, "
"then 0 or \"[D/d]isabled\" to deactivate\n");
return 0;
static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
{
- struct timeval now;
- struct ll_sb_info *sbi = seq->private;
- struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
+ struct timeval now;
+ struct ll_sb_info *sbi = seq->private;
+ struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
- cfs_gettimeofday(&now);
+ do_gettimeofday(&now);
- if (!sbi->ll_rw_stats_on) {
+ if (!sbi->ll_rw_stats_on) {
seq_printf(seq, "disabled\n"
"write anything in this file to activate, "
"then 0 or \"[D/d]isabled\" to deactivate\n");
struct ll_sb_info *sbi = seq->private;
struct ll_rw_process_info *offset = sbi->ll_rw_offset_info;
struct ll_rw_process_info *process = sbi->ll_rw_process_info;
- int i;
+ int i;
- cfs_gettimeofday(&now);
+ do_gettimeofday(&now);
- if (!sbi->ll_rw_stats_on) {
+ if (!sbi->ll_rw_stats_on) {
seq_printf(seq, "disabled\n"
"write anything in this file to activate, "
"then 0 or \"[D/d]isabled\" to deactivate\n");
}
}
- cfs_gettimeofday(&tv);
- cfs_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
+ do_gettimeofday(&tv);
+ cfs_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
init_timer(&ll_capa_timer);
ll_capa_timer.function = ll_capa_timer_callback;
struct dt_allocation_hint *ah,
struct dt_object *parent,
struct dt_object *child,
- cfs_umode_t child_mode)
+ umode_t child_mode)
{
struct lod_device *d = lu2lod_dev(child->do_lu.lo_dev);
struct dt_object *nextp = NULL;
int orph_index_init(const struct lu_env *env, struct mdd_device *mdd);
void orph_index_fini(const struct lu_env *env, struct mdd_device *mdd);
int orph_declare_index_insert(const struct lu_env *, struct mdd_object *,
- cfs_umode_t mode, struct thandle *);
+ umode_t mode, struct thandle *);
int orph_declare_index_delete(const struct lu_env *, struct mdd_object *,
struct thandle *);
return lu_fid_eq(mdo2fid(obj), &LU_OBF_FID);
}
-static inline cfs_umode_t mdd_object_type(const struct mdd_object *obj)
+static inline umode_t mdd_object_type(const struct mdd_object *obj)
{
return lu_object_attr(&obj->mod_obj.mo_lu);
}
!md_capable(uc, CFS_CAP_FOWNER))
RETURN(-EPERM);
- if (la->la_mode == (cfs_umode_t) -1)
- la->la_mode = tmp_la->la_mode;
- else
- la->la_mode = (la->la_mode & S_IALLUGO) |
- (tmp_la->la_mode & ~S_IALLUGO);
+ if (la->la_mode == (umode_t) -1)
+ la->la_mode = tmp_la->la_mode;
+ else
+ la->la_mode = (la->la_mode & S_IALLUGO) |
+ (tmp_la->la_mode & ~S_IALLUGO);
/* Also check the setgid bit! */
if (!lustre_in_group_p(uc, (la->la_valid & LA_GID) ?
int orph_declare_index_insert(const struct lu_env *env,
struct mdd_object *obj,
- cfs_umode_t mode, struct thandle *th)
+ umode_t mode, struct thandle *th)
{
struct mdd_device *mdd = mdo2mdd(&obj->mod_obj);
struct dt_key *key;
static inline void set_capa_key_expiry(struct mdt_device *mdt)
{
- mdt->mdt_ck_expiry = jiffies + mdt->mdt_ck_timeout * CFS_HZ;
+ mdt->mdt_ck_expiry = jiffies + mdt->mdt_ck_timeout * HZ;
}
static void make_capa_key(struct lustre_capa_key *key,
DEBUG_CAPA_KEY(D_SEC, rkey, "new");
}
}
- if (rc) {
- DEBUG_CAPA_KEY(D_ERROR, rkey, "update failed for");
- /* next retry is in 300 sec */
- mdt->mdt_ck_expiry = jiffies + 300 * CFS_HZ;
- }
+ if (rc) {
+ DEBUG_CAPA_KEY(D_ERROR, rkey, "update failed for");
+ /* next retry is in 300 sec */
+ mdt->mdt_ck_expiry = jiffies + 300 * HZ;
+ }
cfs_timer_arm(&mdt->mdt_ck_timer, mdt->mdt_ck_expiry);
CDEBUG(D_SEC, "mdt_ck_timer %lu\n", mdt->mdt_ck_expiry);
argv[0] = cache->uc_upcall;
snprintf(keystr, sizeof(keystr), LPU64, entry->ue_key);
- cfs_gettimeofday(&start);
- rc = USERMODEHELPER(argv[0], argv, envp);
- cfs_gettimeofday(&end);
- if (rc < 0) {
+ do_gettimeofday(&start);
+ rc = USERMODEHELPER(argv[0], argv, envp);
+ do_gettimeofday(&end);
+ if (rc < 0) {
CERROR("%s: error invoking upcall %s %s %s: rc %d; "
"check /proc/fs/lustre/mdt/%s/identity_upcall, "
"time %ldus\n",
rq_state &= ~(RQ_NOW | RQ_LATER);
spin_unlock(&config_list_lock);
- /* Always wait a few seconds to allow the server who
- caused the lock revocation to finish its setup, plus some
- random so everyone doesn't try to reconnect at once. */
- to = MGC_TIMEOUT_MIN_SECONDS * CFS_HZ;
- to += rand * CFS_HZ / 100; /* rand is centi-seconds */
- lwi = LWI_TIMEOUT(to, NULL, NULL);
- l_wait_event(rq_waitq, rq_state & RQ_STOP, &lwi);
+ /* Always wait a few seconds to allow the server who
+ caused the lock revocation to finish its setup, plus some
+ random so everyone doesn't try to reconnect at once. */
+ to = MGC_TIMEOUT_MIN_SECONDS * HZ;
+ to += rand * HZ / 100; /* rand is centi-seconds */
+ lwi = LWI_TIMEOUT(to, NULL, NULL);
+ l_wait_event(rq_waitq, rq_state & RQ_STOP, &lwi);
/*
* iterate & processing through the list. for each cld, process
if (idx == 0) {
struct timeval now;
- cfs_gettimeofday(&now);
+ do_gettimeofday(&now);
rc = seq_printf(p, "%-25s %lu.%lu secs.usecs\n",
"snapshot_time", now.tv_sec, now.tv_usec);
if (rc < 0)
spin_lock_init(&bucket->lock);
}
- /** bug 21430: add randomness to the initial base */
- cfs_get_random_bytes(seed, sizeof(seed));
- cfs_gettimeofday(&tv);
- cfs_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
+ /** bug 21430: add randomness to the initial base */
+ cfs_get_random_bytes(seed, sizeof(seed));
+ do_gettimeofday(&tv);
+ cfs_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
cfs_get_random_bytes(&handle_base, sizeof(handle_base));
LASSERT(handle_base != 0ULL);
RETURN(-EINVAL);
}
- cfs_gettimeofday(&start);
+ do_gettimeofday(&start);
rc = USERMODEHELPER(argv[0], argv, NULL);
- cfs_gettimeofday(&end);
+ do_gettimeofday(&end);
if (rc < 0) {
CERROR("lctl: error invoking upcall %s %s %s: rc = %d; "
{
struct echo_thread_info *info = echo_env_info(env);
struct lu_buf *buf = &info->eti_buf;
- cfs_umode_t mode = lu_object_attr(&next->mo_lu);
+ umode_t mode = lu_object_attr(&next->mo_lu);
int need = ma->ma_need;
int rc = 0, rc2;
struct ofd_device *ofd = ofd_dev(obd->obd_lu_dev);
int rc;
- rc = snprintf(page, count, "%ld\n", ofd->ofd_fmd_max_age / CFS_HZ);
+ rc = snprintf(page, count, "%ld\n", ofd->ofd_fmd_max_age / HZ);
return rc;
}
if (val > 65536 || val < 1)
return -EINVAL;
- ofd->ofd_fmd_max_age = val * CFS_HZ;
+ ofd->ofd_fmd_max_age = val * HZ;
return count;
}
};
#define OFD_FMD_MAX_NUM_DEFAULT 128
-#define OFD_FMD_MAX_AGE_DEFAULT ((obd_timeout + 10) * CFS_HZ)
+#define OFD_FMD_MAX_AGE_DEFAULT ((obd_timeout + 10) * HZ)
enum {
LPROC_OFD_READ_BYTES = 0,
struct obd_device *dev = seq->private;
struct client_obd *cli = &dev->u.cli;
unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
- int i;
+ int i;
- cfs_gettimeofday(&now);
+ do_gettimeofday(&now);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ client_obd_list_lock(&cli->cl_loi_list_lock);
seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
now.tv_sec, now.tv_usec);
static int osc_stats_seq_show(struct seq_file *seq, void *v)
{
- struct timeval now;
- struct obd_device *dev = seq->private;
- struct osc_stats *stats = &obd2osc_dev(dev)->od_stats;
-
- cfs_gettimeofday(&now);
-
- seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
- now.tv_sec, now.tv_usec);
- seq_printf(seq, "lockless_write_bytes\t\t"LPU64"\n",
- stats->os_lockless_writes);
- seq_printf(seq, "lockless_read_bytes\t\t"LPU64"\n",
- stats->os_lockless_reads);
- seq_printf(seq, "lockless_truncate\t\t"LPU64"\n",
- stats->os_lockless_truncates);
- return 0;
+ struct timeval now;
+ struct obd_device *dev = seq->private;
+ struct osc_stats *stats = &obd2osc_dev(dev)->od_stats;
+
+ do_gettimeofday(&now);
+
+ seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
+ now.tv_sec, now.tv_usec);
+ seq_printf(seq, "lockless_write_bytes\t\t"LPU64"\n",
+ stats->os_lockless_writes);
+ seq_printf(seq, "lockless_read_bytes\t\t"LPU64"\n",
+ stats->os_lockless_reads);
+ seq_printf(seq, "lockless_truncate\t\t"LPU64"\n",
+ stats->os_lockless_truncates);
+ return 0;
}
static ssize_t osc_stats_seq_write(struct file *file, const char *buf,
}
static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
- cfs_umode_t mode,
- struct dt_allocation_hint *hint,
- struct thandle *th)
+ umode_t mode, struct dt_allocation_hint *hint,
+ struct thandle *th)
{
int result;
struct osd_device *osd = osd_obj2dev(obj);
}
static int osd_mknod(struct osd_thread_info *info, struct osd_object *obj,
- struct lu_attr *attr,
- struct dt_allocation_hint *hint,
- struct dt_object_format *dof,
- struct thandle *th)
+ struct lu_attr *attr,
+ struct dt_allocation_hint *hint,
+ struct dt_object_format *dof,
+ struct thandle *th)
{
- cfs_umode_t mode = attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX);
- int result;
+ umode_t mode = attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX);
+ int result;
- LINVRNT(osd_invariant(obj));
- LASSERT(obj->oo_inode == NULL);
+ LINVRNT(osd_invariant(obj));
+ LASSERT(obj->oo_inode == NULL);
LASSERT(S_ISCHR(mode) || S_ISBLK(mode) ||
S_ISFIFO(mode) || S_ISSOCK(mode));
static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
struct dt_object *parent, struct dt_object *child,
- cfs_umode_t child_mode)
+ umode_t child_mode)
{
LASSERT(ah);
if (isize > osd->od_readcache_max_filesize)
cache = 0;
- cfs_gettimeofday(&start);
- for (i = 0; i < npages; i++) {
+ do_gettimeofday(&start);
+ for (i = 0; i < npages; i++) {
if (cache == 0)
generic_error_remove_page(inode->i_mapping,
if (off)
memset(p + off, 0, PAGE_CACHE_SIZE - off);
kunmap(lnb[i].page);
- }
- }
- cfs_gettimeofday(&end);
- timediff = cfs_timeval_sub(&end, &start, NULL);
- lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
+ }
+ }
+ do_gettimeofday(&end);
+ timediff = cfs_timeval_sub(&end, &start, NULL);
+ lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
if (iobuf->dr_npages) {
rc = osd->od_fsops->fs_map_inode_pages(inode, iobuf->dr_pages,
if (i_size_read(inode) > osd->od_readcache_max_filesize)
cache = 0;
- cfs_gettimeofday(&start);
- for (i = 0; i < npages; i++) {
+ do_gettimeofday(&start);
+ for (i = 0; i < npages; i++) {
if (i_size_read(inode) <= lnb[i].lnb_file_offset)
/* If there's no more data, abort early.
LPROC_OSD_CACHE_MISS, 1);
osd_iobuf_add_page(iobuf, lnb[i].page);
}
- if (cache == 0)
- generic_error_remove_page(inode->i_mapping,lnb[i].page);
- }
- cfs_gettimeofday(&end);
- timediff = cfs_timeval_sub(&end, &start, NULL);
- lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
+ if (cache == 0)
+ generic_error_remove_page(inode->i_mapping,lnb[i].page);
+ }
+ do_gettimeofday(&end);
+ timediff = cfs_timeval_sub(&end, &start, NULL);
+ lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
if (iobuf->dr_npages) {
rc = osd->od_fsops->fs_map_inode_pages(inode, iobuf->dr_pages,
static void brw_stats_show(struct seq_file *seq, struct brw_stats *brw_stats)
{
- struct timeval now;
+ struct timeval now;
- /* this sampling races with updates */
- cfs_gettimeofday(&now);
- seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
- now.tv_sec, now.tv_usec);
+ /* this sampling races with updates */
+ do_gettimeofday(&now);
+ seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
+ now.tv_sec, now.tv_usec);
display_brw_stats(seq, "pages per bulk r/w", "rpcs",
&brw_stats->hist[BRW_R_PAGES],
&brw_stats->hist[BRW_R_RPC_HIST],
&brw_stats->hist[BRW_W_RPC_HIST], 0);
- display_brw_stats(seq, "I/O time (1/1000s)", "ios",
- &brw_stats->hist[BRW_R_IO_TIME],
- &brw_stats->hist[BRW_W_IO_TIME], 1000 / CFS_HZ);
+ display_brw_stats(seq, "I/O time (1/1000s)", "ios",
+ &brw_stats->hist[BRW_R_IO_TIME],
+ &brw_stats->hist[BRW_W_IO_TIME], 1000 / HZ);
display_brw_stats(seq, "disk I/O size", "ios",
&brw_stats->hist[BRW_R_DISK_IOSIZE],
#include "osd_oi.h"
#include "osd_scrub.h"
-#define HALF_SEC (CFS_HZ >> 1)
+#define HALF_SEC (HZ >> 1)
#define OSD_OTABLE_MAX_HASH 0x00000000ffffffffULL
if (thread_is_running(&scrub->os_thread)) {
cfs_duration_t duration = cfs_time_current() -
scrub->os_time_last_checkpoint;
- __u64 new_checked = scrub->os_new_checked * CFS_HZ;
+ __u64 new_checked = scrub->os_new_checked * HZ;
__u32 rtime = sf->sf_run_time +
cfs_duration_sec(duration + HALF_SEC);
static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
struct dt_object *parent, struct dt_object *child,
- cfs_umode_t child_mode)
+ umode_t child_mode)
{
LASSERT(ah);
struct dt_allocation_hint *ah,
struct dt_object *parent,
struct dt_object *child,
- cfs_umode_t child_mode)
+ umode_t child_mode)
{
LASSERT(ah);
RETURN(0);
}
- cfs_gettimeofday(&work_start);
- timediff = cfs_timeval_sub(&work_start, &req->rq_arrival_time, NULL);
- if (obd->obd_svc_stats != NULL) {
- lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
- timediff);
- ptlrpc_lprocfs_rpc_sent(req, timediff);
- }
+ do_gettimeofday(&work_start);
+ timediff = cfs_timeval_sub(&work_start, &req->rq_arrival_time, NULL);
+ if (obd->obd_svc_stats != NULL) {
+ lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
+ timediff);
+ ptlrpc_lprocfs_rpc_sent(req, timediff);
+ }
if (lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_REPLY &&
lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_ERR) {
* size to non-zero if this was a successful receive. */
req->rq_xid = ev->match_bits;
req->rq_reqbuf = ev->md.start + ev->offset;
- if (ev->type == LNET_EVENT_PUT && ev->status == 0)
- req->rq_reqdata_len = ev->mlength;
- cfs_gettimeofday(&req->rq_arrival_time);
- req->rq_peer = ev->initiator;
- req->rq_self = ev->target.nid;
- req->rq_rqbd = rqbd;
+ if (ev->type == LNET_EVENT_PUT && ev->status == 0)
+ req->rq_reqdata_len = ev->mlength;
+ do_gettimeofday(&req->rq_arrival_time);
+ req->rq_peer = ev->initiator;
+ req->rq_self = ev->target.nid;
+ req->rq_rqbd = rqbd;
req->rq_phase = RQ_PHASE_NEW;
spin_lock_init(&req->rq_lock);
CFS_INIT_LIST_HEAD(&req->rq_timed_list);
key_revoke_locked(key);
}
-static
-void ctx_start_timer_kr(struct ptlrpc_cli_ctx *ctx, long timeout)
+static void ctx_start_timer_kr(struct ptlrpc_cli_ctx *ctx, long timeout)
{
- struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
- struct timer_list *timer = gctx_kr->gck_timer;
+ struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
+ struct timer_list *timer = gctx_kr->gck_timer;
- LASSERT(timer);
+ LASSERT(timer);
- CDEBUG(D_SEC, "ctx %p: start timer %lds\n", ctx, timeout);
- timeout = timeout * CFS_HZ + cfs_time_current();
+ CDEBUG(D_SEC, "ctx %p: start timer %lds\n", ctx, timeout);
+ timeout = timeout * HZ + cfs_time_current();
- init_timer(timer);
- timer->expires = timeout;
- timer->data = (unsigned long ) ctx;
- timer->function = ctx_upcall_timeout_kr;
+ init_timer(timer);
+ timer->expires = timeout;
+ timer->data = (unsigned long ) ctx;
+ timer->function = ctx_upcall_timeout_kr;
- add_timer(timer);
+ add_timer(timer);
}
/*
cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
read_unlock(&rsi_cache.hash_lock);
- if (valid == 0)
- cfs_schedule_timeout(GSS_SVC_UPCALL_TIMEOUT *
- CFS_HZ);
+ if (valid == 0)
+ cfs_schedule_timeout(GSS_SVC_UPCALL_TIMEOUT *
+ HZ);
- cache_get(&rsip->h);
- goto cache_check;
+ cache_get(&rsip->h);
+ goto cache_check;
}
CWARN("waited %ds timeout, drop\n", GSS_SVC_UPCALL_TIMEOUT);
break;
* upcall issued before the channel be opened thus nfsv4 cache code will
* drop the request direclty, thus lead to unnecessary recovery time.
* here we wait at miximum 1.5 seconds. */
- for (i = 0; i < 6; i++) {
- if (atomic_read(&rsi_cache.readers) > 0)
- break;
- cfs_set_current_state(TASK_UNINTERRUPTIBLE);
- LASSERT(CFS_HZ >= 4);
- cfs_schedule_timeout(CFS_HZ / 4);
- }
+ for (i = 0; i < 6; i++) {
+ if (atomic_read(&rsi_cache.readers) > 0)
+ break;
+ cfs_set_current_state(TASK_UNINTERRUPTIBLE);
+ LASSERT(HZ >= 4);
+ cfs_schedule_timeout(HZ / 4);
+ }
if (atomic_read(&rsi_cache.readers) == 0)
CWARN("Init channel is not opened by lsvcgssd, following "
lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
cfs_atomic_read(&request->rq_import->imp_inflight));
- OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
+ OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
- cfs_gettimeofday(&request->rq_arrival_time);
- request->rq_sent = cfs_time_current_sec();
- /* We give the server rq_timeout secs to process the req, and
- add the network latency for our local timeout. */
+ do_gettimeofday(&request->rq_arrival_time);
+ request->rq_sent = cfs_time_current_sec();
+ /* We give the server rq_timeout secs to process the req, and
+ add the network latency for our local timeout. */
request->rq_deadline = request->rq_sent + request->rq_timeout +
ptlrpc_at_get_net_latency(request);
"ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
newctx, newctx->cc_flags);
- cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
- CFS_HZ);
- } else {
+ cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
+ HZ);
+ } else {
/*
* it's possible newctx == oldctx if we're switching
* subflavor with the same sec.
req->rq_restart = 0;
spin_unlock(&req->rq_lock);
- lwi = LWI_TIMEOUT_INTR(timeout * CFS_HZ, ctx_refresh_timeout,
- ctx_refresh_interrupt, req);
- rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
+ lwi = LWI_TIMEOUT_INTR(timeout * HZ, ctx_refresh_timeout,
+ ctx_refresh_interrupt, req);
+ rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
/*
* following cases could lead us here:
page_pools.epp_st_grow_fails,
page_pools.epp_st_shrinks,
page_pools.epp_st_access,
- page_pools.epp_st_missings,
- page_pools.epp_st_lowfree,
- page_pools.epp_st_max_wqlen,
- page_pools.epp_st_max_wait, CFS_HZ
- );
+ page_pools.epp_st_missings,
+ page_pools.epp_st_lowfree,
+ page_pools.epp_st_max_wqlen,
+ page_pools.epp_st_max_wait, HZ
+ );
spin_unlock(&page_pools.epp_lock);
return rc;
CFS_TIME_T"/%d\n",
page_pools.epp_st_max_pages, page_pools.epp_st_grows,
page_pools.epp_st_grow_fails,
- page_pools.epp_st_shrinks, page_pools.epp_st_access,
- page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
- page_pools.epp_st_max_wait, CFS_HZ);
- }
+ page_pools.epp_st_shrinks, page_pools.epp_st_access,
+ page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
+ page_pools.epp_st_max_wait, HZ);
+ }
}
#else /* !__KERNEL__ */
}
mutex_unlock(&sec_gc_mutex);
- /* check ctx list again before sleep */
- sec_process_ctx_list();
-
- lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * CFS_HZ, NULL, NULL);
- l_wait_event(thread->t_ctl_waitq,
- thread_is_stopping(thread) ||
- thread_is_signal(thread),
- &lwi);
+ /* check ctx list again before sleep */
+ sec_process_ctx_list();
+
+ lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * HZ, NULL, NULL);
+ l_wait_event(thread->t_ctl_waitq,
+ thread_is_stopping(thread) ||
+ thread_is_signal(thread),
+ &lwi);
if (thread_test_and_clear_flags(thread, SVC_STOPPING))
break;
ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET);
- if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
- libcfs_debug_dumplog();
+ if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
+ libcfs_debug_dumplog();
- cfs_gettimeofday(&work_start);
- timediff = cfs_timeval_sub(&work_start, &request->rq_arrival_time,NULL);
- if (likely(svc->srv_stats != NULL)) {
+ do_gettimeofday(&work_start);
+ timediff = cfs_timeval_sub(&work_start, &request->rq_arrival_time,NULL);
+ if (likely(svc->srv_stats != NULL)) {
lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
timediff);
lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
request->rq_deadline));
}
- cfs_gettimeofday(&work_end);
- timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
+ do_gettimeofday(&work_end);
+ timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
"%s:%s+%d:%d:x"LPU64":%s:%d Request procesed in "
"%ldus (%ldus total) trans "LPU64" rc %d/%d\n",
struct timeval right_now;
long timediff;
- cfs_gettimeofday(&right_now);
+ do_gettimeofday(&right_now);
spin_lock(&svcpt->scp_req_lock);
- /* How long has the next entry been waiting? */
+ /* How long has the next entry been waiting? */
if (ptlrpc_server_high_pending(svcpt, true))
request = ptlrpc_nrs_req_peek_nolock(svcpt, true);
else if (ptlrpc_server_normal_pending(svcpt, true))