*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, Whamcloud, Inc.
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_LNET
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/fs.h>
#include <linux/fs_struct.h>
#include <linux/sched.h>
-
-#include <libcfs/libcfs.h>
+#ifdef HAVE_SCHED_HEADERS
+#include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
+#endif
+#include <linux/uaccess.h>
#if defined(CONFIG_KGDB)
#include <asm/kgdb.h>
#endif
-#define LINUX_WAITQ(w) ((wait_queue_t *) w)
-#define LINUX_WAITQ_HEAD(w) ((wait_queue_head_t *) w)
+#include <libcfs/linux/linux-time.h>
-void
-cfs_waitq_init(cfs_waitq_t *waitq)
+#ifndef HAVE_KTIME_GET_TS64
+void ktime_get_ts64(struct timespec64 *ts)
{
- init_waitqueue_head(LINUX_WAITQ_HEAD(waitq));
-}
-EXPORT_SYMBOL(cfs_waitq_init);
+ struct timespec now;
-void
-cfs_waitlink_init(cfs_waitlink_t *link)
-{
- init_waitqueue_entry(LINUX_WAITQ(link), current);
+ ktime_get_ts(&now);
+ *ts = timespec_to_timespec64(now);
}
-EXPORT_SYMBOL(cfs_waitlink_init);
+EXPORT_SYMBOL(ktime_get_ts64);
+#endif /* HAVE_KTIME_GET_TS64 */
-void
-cfs_waitq_add(cfs_waitq_t *waitq, cfs_waitlink_t *link)
+#ifndef HAVE_KTIME_GET_REAL_TS64
+void ktime_get_real_ts64(struct timespec64 *ts)
{
- add_wait_queue(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
-}
-EXPORT_SYMBOL(cfs_waitq_add);
-
-#ifndef HAVE___ADD_WAIT_QUEUE_EXCLUSIVE
+ struct timespec now;
-static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
- wait_queue_t *wait)
-{
- wait->flags |= WQ_FLAG_EXCLUSIVE;
- __add_wait_queue(q, wait);
+ getnstimeofday(&now);
+ *ts = timespec_to_timespec64(now);
}
+EXPORT_SYMBOL(ktime_get_real_ts64);
+#endif /* HAVE_KTIME_GET_REAL_TS64 */
-#endif /* HAVE___ADD_WAIT_QUEUE_EXCLUSIVE */
-
-void
-cfs_waitq_add_exclusive(cfs_waitq_t *waitq,
- cfs_waitlink_t *link)
-{
- add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
-}
-EXPORT_SYMBOL(cfs_waitq_add_exclusive);
-
-/**
- * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
- * waiting threads, which is not always desirable because all threads will
- * be waken up again and again, even user only needs a few of them to be
- * active most time. This is not good for performance because cache can
- * be polluted by different threads.
- *
- * LIFO list can resolve this problem because we always wakeup the most
- * recent active thread by default.
- *
- * NB: please don't call non-exclusive & exclusive wait on the same
- * waitq if cfs_waitq_add_exclusive_head is used.
+#ifndef HAVE_KTIME_GET_REAL_SECONDS
+/*
+ * Get the seconds portion of CLOCK_REALTIME (wall clock).
+ * This is the clock that can be altered by NTP and is
+ * independent of a reboot.
*/
-void
-cfs_waitq_add_exclusive_head(cfs_waitq_t *waitq, cfs_waitlink_t *link)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
- __add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
- spin_unlock_irqrestore(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
-}
-EXPORT_SYMBOL(cfs_waitq_add_exclusive_head);
-
-void
-cfs_waitq_del(cfs_waitq_t *waitq, cfs_waitlink_t *link)
-{
- remove_wait_queue(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
-}
-EXPORT_SYMBOL(cfs_waitq_del);
-
-int
-cfs_waitq_active(cfs_waitq_t *waitq)
+time64_t ktime_get_real_seconds(void)
{
- return waitqueue_active(LINUX_WAITQ_HEAD(waitq));
+ return (time64_t)get_seconds();
}
-EXPORT_SYMBOL(cfs_waitq_active);
+EXPORT_SYMBOL(ktime_get_real_seconds);
+#endif /* HAVE_KTIME_GET_REAL_SECONDS */
-void
-cfs_waitq_signal(cfs_waitq_t *waitq)
-{
- wake_up(LINUX_WAITQ_HEAD(waitq));
-}
-EXPORT_SYMBOL(cfs_waitq_signal);
-
-void
-cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr)
-{
- wake_up_nr(LINUX_WAITQ_HEAD(waitq), nr);
-}
-EXPORT_SYMBOL(cfs_waitq_signal_nr);
-
-void
-cfs_waitq_broadcast(cfs_waitq_t *waitq)
-{
- wake_up_all(LINUX_WAITQ_HEAD(waitq));
-}
-EXPORT_SYMBOL(cfs_waitq_broadcast);
-
-void
-cfs_waitq_wait(cfs_waitlink_t *link, cfs_task_state_t state)
-{
- schedule();
-}
-EXPORT_SYMBOL(cfs_waitq_wait);
-
-int64_t
-cfs_waitq_timedwait(cfs_waitlink_t *link, cfs_task_state_t state,
- int64_t timeout)
-{
- return schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(cfs_waitq_timedwait);
-
-void
-cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t timeout)
-{
- set_current_state(state);
- schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(cfs_schedule_timeout_and_set_state);
-
-void
-cfs_schedule_timeout(int64_t timeout)
-{
- schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(cfs_schedule_timeout);
-
-void
-cfs_schedule(void)
+#ifndef HAVE_KTIME_GET_SECONDS
+/*
+ * Get the seconds portion of CLOCK_MONOTONIC
+ * This clock is immutable and is reset across
+ * reboots. For older platforms this is a
+ * wrapper around get_seconds which is valid
+ * until 2038. By that time this will be gone
+ * one would hope.
+ */
+time64_t ktime_get_seconds(void)
{
- schedule();
-}
-EXPORT_SYMBOL(cfs_schedule);
+ struct timespec64 now;
-/* deschedule for a bit... */
-void
-cfs_pause(cfs_duration_t ticks)
-{
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(ticks);
+ ktime_get_ts64(&now);
+ return now.tv_sec;
}
-EXPORT_SYMBOL(cfs_pause);
+EXPORT_SYMBOL(ktime_get_seconds);
+#endif /* HAVE_KTIME_GET_SECONDS */
-int cfs_need_resched(void)
+int cfs_kernel_write(struct file *filp, const void *buf, size_t count,
+ loff_t *pos)
{
- return need_resched();
-}
-EXPORT_SYMBOL(cfs_need_resched);
+#ifdef HAVE_NEW_KERNEL_WRITE
+ return kernel_write(filp, buf, count, pos);
+#else
+ mm_segment_t __old_fs = get_fs();
+ int rc;
-void cfs_cond_resched(void)
-{
- cond_resched();
-}
-EXPORT_SYMBOL(cfs_cond_resched);
+ set_fs(get_ds());
+ rc = vfs_write(filp, (__force const char __user *)buf, count, pos);
+ set_fs(__old_fs);
-void cfs_init_timer(cfs_timer_t *t)
-{
- init_timer(t);
+ return rc;
+#endif
}
-EXPORT_SYMBOL(cfs_init_timer);
+EXPORT_SYMBOL(cfs_kernel_write);
-void cfs_timer_init(cfs_timer_t *t, cfs_timer_func_t *func, void *arg)
+ssize_t cfs_kernel_read(struct file *file, void *buf, size_t count, loff_t *pos)
{
- init_timer(t);
- t->function = func;
- t->data = (unsigned long)arg;
-}
-EXPORT_SYMBOL(cfs_timer_init);
+#ifdef HAVE_KERNEL_READ_LAST_POSP
+ return kernel_read(file, buf, count, pos);
+#else
+ ssize_t size = kernel_read(file, *pos, buf, count);
-void cfs_timer_done(cfs_timer_t *t)
-{
- return;
+ if (size > 0)
+ *pos += size;
+ return size;
+#endif
}
-EXPORT_SYMBOL(cfs_timer_done);
+EXPORT_SYMBOL(cfs_kernel_read);
-void cfs_timer_arm(cfs_timer_t *t, cfs_time_t deadline)
+#ifndef HAVE_KSET_FIND_OBJ
+struct kobject *kset_find_obj(struct kset *kset, const char *name)
{
- mod_timer(t, deadline);
-}
-EXPORT_SYMBOL(cfs_timer_arm);
+ struct kobject *ret = NULL;
+ struct kobject *k;
-void cfs_timer_disarm(cfs_timer_t *t)
-{
- del_timer(t);
-}
-EXPORT_SYMBOL(cfs_timer_disarm);
+ spin_lock(&kset->list_lock);
-int cfs_timer_is_armed(cfs_timer_t *t)
-{
- return timer_pending(t);
-}
-EXPORT_SYMBOL(cfs_timer_is_armed);
+ list_for_each_entry(k, &kset->list, entry) {
+ if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
+ if (kref_get_unless_zero(&k->kref))
+ ret = k;
+ break;
+ }
+ }
-cfs_time_t cfs_timer_deadline(cfs_timer_t *t)
-{
- return t->expires;
+ spin_unlock(&kset->list_lock);
+ return ret;
}
-EXPORT_SYMBOL(cfs_timer_deadline);
-
-void cfs_enter_debugger(void)
-{
-#if defined(CONFIG_KGDB)
-// BREAKPOINT();
-#elif defined(__arch_um__)
- asm("int $3");
-#else
- /* nothing */
+EXPORT_SYMBOL_GPL(kset_find_obj);
#endif
-}
-void cfs_daemonize(char *str) {
- unsigned long flags;
+#ifndef HAVE_KSTRTOBOOL_FROM_USER
+int kstrtobool_from_user(const char __user *s, size_t count, bool *res)
+{
+ /* Longest string needed to differentiate, newline, terminator */
+ char buf[4];
- daemonize(str);
- SIGNAL_MASK_LOCK(current, flags);
- sigfillset(¤t->blocked);
- RECALC_SIGPENDING;
- SIGNAL_MASK_UNLOCK(current, flags);
-}
-
-int cfs_daemonize_ctxt(char *str) {
-
- cfs_daemonize(str);
-#ifndef HAVE_UNSHARE_FS_STRUCT
- {
- struct task_struct *tsk = current;
- struct fs_struct *fs = NULL;
- fs = copy_fs_struct(tsk->fs);
- if (fs == NULL)
- return -ENOMEM;
- exit_fs(tsk);
- tsk->fs = fs;
- }
-#else
- unshare_fs_struct();
-#endif
- return 0;
+ count = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, s, count))
+ return -EFAULT;
+ buf[count] = '\0';
+ return strtobool(buf, res);
}
+EXPORT_SYMBOL(kstrtobool_from_user);
+#endif /* !HAVE_KSTRTOBOOL_FROM_USER */
sigset_t
cfs_block_allsigs(void)
{
- unsigned long flags;
- sigset_t old;
-
- SIGNAL_MASK_LOCK(current, flags);
- old = current->blocked;
- sigfillset(¤t->blocked);
- RECALC_SIGPENDING;
- SIGNAL_MASK_UNLOCK(current, flags);
+ unsigned long flags;
+ sigset_t old;
- return old;
+ spin_lock_irqsave(¤t->sighand->siglock, flags);
+ old = current->blocked;
+ sigfillset(¤t->blocked);
+ recalc_sigpending();
+ spin_unlock_irqrestore(¤t->sighand->siglock, flags);
+ return old;
}
+EXPORT_SYMBOL(cfs_block_allsigs);
sigset_t cfs_block_sigs(unsigned long sigs)
{
unsigned long flags;
sigset_t old;
- SIGNAL_MASK_LOCK(current, flags);
+ spin_lock_irqsave(¤t->sighand->siglock, flags);
old = current->blocked;
sigaddsetmask(¤t->blocked, sigs);
- RECALC_SIGPENDING;
- SIGNAL_MASK_UNLOCK(current, flags);
+ recalc_sigpending();
+ spin_unlock_irqrestore(¤t->sighand->siglock, flags);
return old;
}
+EXPORT_SYMBOL(cfs_block_sigs);
/* Block all signals except for the @sigs */
sigset_t cfs_block_sigsinv(unsigned long sigs)
unsigned long flags;
sigset_t old;
- SIGNAL_MASK_LOCK(current, flags);
+ spin_lock_irqsave(¤t->sighand->siglock, flags);
old = current->blocked;
sigaddsetmask(¤t->blocked, ~sigs);
- RECALC_SIGPENDING;
- SIGNAL_MASK_UNLOCK(current, flags);
-
+ recalc_sigpending();
+ spin_unlock_irqrestore(¤t->sighand->siglock, flags);
return old;
}
+EXPORT_SYMBOL(cfs_block_sigsinv);
void
-cfs_restore_sigs (cfs_sigset_t old)
+cfs_restore_sigs(sigset_t old)
{
- unsigned long flags;
-
- SIGNAL_MASK_LOCK(current, flags);
- current->blocked = old;
- RECALC_SIGPENDING;
- SIGNAL_MASK_UNLOCK(current, flags);
-}
+ unsigned long flags;
-int
-cfs_signal_pending(void)
-{
- return signal_pending(current);
+ spin_lock_irqsave(¤t->sighand->siglock, flags);
+ current->blocked = old;
+ recalc_sigpending();
+ spin_unlock_irqrestore(¤t->sighand->siglock, flags);
}
+EXPORT_SYMBOL(cfs_restore_sigs);
void
cfs_clear_sigpending(void)
{
- unsigned long flags;
-
- SIGNAL_MASK_LOCK(current, flags);
- CLEAR_SIGPENDING;
- SIGNAL_MASK_UNLOCK(current, flags);
-}
-
-int
-libcfs_arch_init(void)
-{
- return 0;
-}
+ unsigned long flags;
-void
-libcfs_arch_cleanup(void)
-{
- return;
+ spin_lock_irqsave(¤t->sighand->siglock, flags);
+ clear_tsk_thread_flag(current, TIF_SIGPENDING);
+ spin_unlock_irqrestore(¤t->sighand->siglock, flags);
}
-
-EXPORT_SYMBOL(libcfs_arch_init);
-EXPORT_SYMBOL(libcfs_arch_cleanup);
-EXPORT_SYMBOL(cfs_enter_debugger);
-EXPORT_SYMBOL(cfs_daemonize);
-EXPORT_SYMBOL(cfs_daemonize_ctxt);
-EXPORT_SYMBOL(cfs_block_allsigs);
-EXPORT_SYMBOL(cfs_block_sigs);
-EXPORT_SYMBOL(cfs_block_sigsinv);
-EXPORT_SYMBOL(cfs_restore_sigs);
-EXPORT_SYMBOL(cfs_signal_pending);
EXPORT_SYMBOL(cfs_clear_sigpending);