Whamcloud - gitweb
LU-13783 libcfs: provide fallback kallsyms_lookup_name()
[fs/lustre-release.git] / libcfs / libcfs / linux / linux-prim.c
index e399b77..0940c80 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
  */
 
 #define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/kallsyms.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/fs_struct.h>
+#include <linux/fs.h>
 #include <linux/sched.h>
-
-#include <libcfs/libcfs.h>
+#ifdef HAVE_SCHED_HEADERS
+#include <linux/sched/mm.h>
+#endif
+#include <linux/uaccess.h>
 
 #if defined(CONFIG_KGDB)
 #include <asm/kgdb.h>
 #endif
 
-#define LINUX_WAITQ(w) ((wait_queue_t *) w)
-#define LINUX_WAITQ_HEAD(w) ((wait_queue_head_t *) w)
+#include <libcfs/linux/linux-time.h>
+#include <libcfs/linux/linux-wait.h>
 
-void
-cfs_waitq_init(cfs_waitq_t *waitq)
+#ifndef HAVE_KTIME_GET_TS64
+void ktime_get_ts64(struct timespec64 *ts)
 {
-        init_waitqueue_head(LINUX_WAITQ_HEAD(waitq));
-}
-EXPORT_SYMBOL(cfs_waitq_init);
+       struct timespec now;
 
-void
-cfs_waitlink_init(cfs_waitlink_t *link)
-{
-        init_waitqueue_entry(LINUX_WAITQ(link), current);
+       ktime_get_ts(&now);
+       *ts = timespec_to_timespec64(now);
 }
-EXPORT_SYMBOL(cfs_waitlink_init);
+EXPORT_SYMBOL(ktime_get_ts64);
+#endif /* HAVE_KTIME_GET_TS64 */
 
-void
-cfs_waitq_add(cfs_waitq_t *waitq, cfs_waitlink_t *link)
+#ifndef HAVE_KTIME_GET_REAL_TS64
+void ktime_get_real_ts64(struct timespec64 *ts)
 {
-        add_wait_queue(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
-}
-EXPORT_SYMBOL(cfs_waitq_add);
-
-#ifndef HAVE___ADD_WAIT_QUEUE_EXCLUSIVE
+       struct timespec now;
 
-static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
-                                              wait_queue_t *wait)
-{
-        wait->flags |= WQ_FLAG_EXCLUSIVE;
-        __add_wait_queue(q, wait);
+       getnstimeofday(&now);
+       *ts = timespec_to_timespec64(now);
 }
+EXPORT_SYMBOL(ktime_get_real_ts64);
+#endif /* HAVE_KTIME_GET_REAL_TS64 */
 
-#endif /* HAVE___ADD_WAIT_QUEUE_EXCLUSIVE */
-
-void
-cfs_waitq_add_exclusive(cfs_waitq_t *waitq,
-                        cfs_waitlink_t *link)
-{
-        add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
-}
-EXPORT_SYMBOL(cfs_waitq_add_exclusive);
-
-/**
- * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
- * waiting threads, which is not always desirable because all threads will
- * be waken up again and again, even user only needs a few of them to be
- * active most time. This is not good for performance because cache can
- * be polluted by different threads.
- *
- * LIFO list can resolve this problem because we always wakeup the most
- * recent active thread by default.
- *
- * NB: please don't call non-exclusive & exclusive wait on the same
- * waitq if cfs_waitq_add_exclusive_head is used.
+#ifndef HAVE_KTIME_GET_REAL_SECONDS
+/*
+ * Get the seconds portion of CLOCK_REALTIME (wall clock).
+ * This is the clock that can be altered by NTP and is
+ * independent of a reboot.
  */
-void
-cfs_waitq_add_exclusive_head(cfs_waitq_t *waitq, cfs_waitlink_t *link)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
-       __add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
-       spin_unlock_irqrestore(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
-}
-EXPORT_SYMBOL(cfs_waitq_add_exclusive_head);
-
-void
-cfs_waitq_del(cfs_waitq_t *waitq, cfs_waitlink_t *link)
-{
-        remove_wait_queue(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
-}
-EXPORT_SYMBOL(cfs_waitq_del);
-
-int
-cfs_waitq_active(cfs_waitq_t *waitq)
-{
-        return waitqueue_active(LINUX_WAITQ_HEAD(waitq));
-}
-EXPORT_SYMBOL(cfs_waitq_active);
-
-void
-cfs_waitq_signal(cfs_waitq_t *waitq)
-{
-        wake_up(LINUX_WAITQ_HEAD(waitq));
-}
-EXPORT_SYMBOL(cfs_waitq_signal);
-
-void
-cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr)
-{
-        wake_up_nr(LINUX_WAITQ_HEAD(waitq), nr);
-}
-EXPORT_SYMBOL(cfs_waitq_signal_nr);
-
-void
-cfs_waitq_broadcast(cfs_waitq_t *waitq)
-{
-        wake_up_all(LINUX_WAITQ_HEAD(waitq));
-}
-EXPORT_SYMBOL(cfs_waitq_broadcast);
-
-void
-cfs_waitq_wait(cfs_waitlink_t *link, cfs_task_state_t state)
-{
-        schedule();
-}
-EXPORT_SYMBOL(cfs_waitq_wait);
-
-int64_t
-cfs_waitq_timedwait(cfs_waitlink_t *link, cfs_task_state_t state,
-                    int64_t timeout)
-{
-        return schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(cfs_waitq_timedwait);
-
-void
-cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t timeout)
-{
-        set_current_state(state);
-        schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(cfs_schedule_timeout_and_set_state);
-
-void
-cfs_schedule_timeout(int64_t timeout)
-{
-        schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(cfs_schedule_timeout);
-
-void
-cfs_schedule(void)
-{
-        schedule();
-}
-EXPORT_SYMBOL(cfs_schedule);
-
-/* deschedule for a bit... */
-void
-cfs_pause(cfs_duration_t ticks)
-{
-        set_current_state(TASK_UNINTERRUPTIBLE);
-        schedule_timeout(ticks);
-}
-EXPORT_SYMBOL(cfs_pause);
-
-int cfs_need_resched(void)
-{
-        return need_resched();
-}
-EXPORT_SYMBOL(cfs_need_resched);
-
-void cfs_cond_resched(void)
+time64_t ktime_get_real_seconds(void)
 {
-        cond_resched();
+       return (time64_t)get_seconds();
 }
-EXPORT_SYMBOL(cfs_cond_resched);
+EXPORT_SYMBOL(ktime_get_real_seconds);
+#endif /* HAVE_KTIME_GET_REAL_SECONDS */
 
-void cfs_init_timer(cfs_timer_t *t)
-{
-        init_timer(t);
-}
-EXPORT_SYMBOL(cfs_init_timer);
-
-void cfs_timer_init(cfs_timer_t *t, cfs_timer_func_t *func, void *arg)
+#ifndef HAVE_KTIME_GET_SECONDS
+/*
+ * Get the seconds portion of CLOCK_MONOTONIC
+ * This clock is immutable and is reset across
+ * reboots. For older platforms this is a
+ * wrapper around get_seconds which is valid
+ * until 2038. By that time this will be gone
+ * one would hope.
+ */
+time64_t ktime_get_seconds(void)
 {
-        init_timer(t);
-        t->function = func;
-        t->data = (unsigned long)arg;
-}
-EXPORT_SYMBOL(cfs_timer_init);
+       struct timespec64 now;
 
-void cfs_timer_done(cfs_timer_t *t)
-{
-        return;
+       ktime_get_ts64(&now);
+       return now.tv_sec;
 }
-EXPORT_SYMBOL(cfs_timer_done);
+EXPORT_SYMBOL(ktime_get_seconds);
+#endif /* HAVE_KTIME_GET_SECONDS */
 
-void cfs_timer_arm(cfs_timer_t *t, cfs_time_t deadline)
-{
-        mod_timer(t, deadline);
-}
-EXPORT_SYMBOL(cfs_timer_arm);
+static int (*cfs_apply_workqueue_attrs_t)(struct workqueue_struct *wq,
+                                         const struct workqueue_attrs *attrs);
 
-void cfs_timer_disarm(cfs_timer_t *t)
+int cfs_apply_workqueue_attrs(struct workqueue_struct *wq,
+                             const struct workqueue_attrs *attrs)
 {
-        del_timer(t);
+       if (cfs_apply_workqueue_attrs_t)
+               return cfs_apply_workqueue_attrs_t(wq, attrs);
+       return 0;
 }
-EXPORT_SYMBOL(cfs_timer_disarm);
+EXPORT_SYMBOL_GPL(cfs_apply_workqueue_attrs);
 
-int  cfs_timer_is_armed(cfs_timer_t *t)
-{
-        return timer_pending(t);
-}
-EXPORT_SYMBOL(cfs_timer_is_armed);
-
-cfs_time_t cfs_timer_deadline(cfs_timer_t *t)
-{
-        return t->expires;
-}
-EXPORT_SYMBOL(cfs_timer_deadline);
+#ifndef HAVE_XARRAY_SUPPORT
+struct kmem_cache (*radix_tree_node_cachep);
+#endif
 
-void cfs_enter_debugger(void)
+void __init cfs_arch_init(void)
 {
-#if defined(CONFIG_KGDB)
-//        BREAKPOINT();
-#else
-        /* nothing */
+#ifndef HAVE_WAIT_VAR_EVENT
+       wait_bit_init();
+#endif
+       cfs_apply_workqueue_attrs_t =
+               (void *)kallsyms_lookup_name("apply_workqueue_attrs");
+#ifndef HAVE_XARRAY_SUPPORT
+       radix_tree_node_cachep =
+               (void *)kallsyms_lookup_name("radix_tree_node_cachep");
 #endif
 }
 
-sigset_t
-cfs_block_allsigs(void)
+int cfs_kernel_write(struct file *filp, const void *buf, size_t count,
+                    loff_t *pos)
 {
-        unsigned long          flags;
-        sigset_t        old;
-
-        SIGNAL_MASK_LOCK(current, flags);
-        old = current->blocked;
-        sigfillset(&current->blocked);
-        RECALC_SIGPENDING;
-        SIGNAL_MASK_UNLOCK(current, flags);
-
-        return old;
-}
+#ifdef HAVE_NEW_KERNEL_WRITE
+       return kernel_write(filp, buf, count, pos);
+#else
+       mm_segment_t __old_fs = get_fs();
+       int rc;
 
-sigset_t cfs_block_sigs(unsigned long sigs)
-{
-       unsigned long  flags;
-       sigset_t        old;
+       set_fs(KERNEL_DS);
+       rc = vfs_write(filp, (__force const char __user *)buf, count, pos);
+       set_fs(__old_fs);
 
-       SIGNAL_MASK_LOCK(current, flags);
-       old = current->blocked;
-       sigaddsetmask(&current->blocked, sigs);
-       RECALC_SIGPENDING;
-       SIGNAL_MASK_UNLOCK(current, flags);
-       return old;
+       return rc;
+#endif
 }
+EXPORT_SYMBOL(cfs_kernel_write);
 
-/* Block all signals except for the @sigs */
-sigset_t cfs_block_sigsinv(unsigned long sigs)
+ssize_t cfs_kernel_read(struct file *file, void *buf, size_t count, loff_t *pos)
 {
-       unsigned long flags;
-       sigset_t old;
-
-       SIGNAL_MASK_LOCK(current, flags);
-       old = current->blocked;
-       sigaddsetmask(&current->blocked, ~sigs);
-       RECALC_SIGPENDING;
-       SIGNAL_MASK_UNLOCK(current, flags);
+#ifdef HAVE_KERNEL_READ_LAST_POSP
+       return kernel_read(file, buf, count, pos);
+#else
+       ssize_t size = kernel_read(file, *pos, buf, count);
 
-       return old;
+       if (size > 0)
+               *pos += size;
+       return size;
+#endif
 }
+EXPORT_SYMBOL(cfs_kernel_read);
 
-void
-cfs_restore_sigs (cfs_sigset_t old)
+#ifndef HAVE_KSET_FIND_OBJ
+struct kobject *kset_find_obj(struct kset *kset, const char *name)
 {
-        unsigned long  flags;
+       struct kobject *ret = NULL;
+       struct kobject *k;
 
-        SIGNAL_MASK_LOCK(current, flags);
-        current->blocked = old;
-        RECALC_SIGPENDING;
-        SIGNAL_MASK_UNLOCK(current, flags);
-}
+       spin_lock(&kset->list_lock);
 
-int
-cfs_signal_pending(void)
-{
-        return signal_pending(current);
-}
+       list_for_each_entry(k, &kset->list, entry) {
+               if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
+                       if (kref_get_unless_zero(&k->kref))
+                               ret = k;
+                       break;
+               }
+       }
 
-void
-cfs_clear_sigpending(void)
-{
-        unsigned long flags;
-
-        SIGNAL_MASK_LOCK(current, flags);
-        CLEAR_SIGPENDING;
-        SIGNAL_MASK_UNLOCK(current, flags);
+       spin_unlock(&kset->list_lock);
+       return ret;
 }
+EXPORT_SYMBOL_GPL(kset_find_obj);
+#endif
 
-int
-libcfs_arch_init(void)
+#ifndef HAVE_KSTRTOBOOL_FROM_USER
+int kstrtobool_from_user(const char __user *s, size_t count, bool *res)
 {
-        return 0;
-}
+       /* Longest string needed to differentiate, newline, terminator */
+       char buf[4];
 
-void
-libcfs_arch_cleanup(void)
-{
-        return;
+       count = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, s, count))
+               return -EFAULT;
+       buf[count] = '\0';
+       return strtobool(buf, res);
 }
-
-EXPORT_SYMBOL(libcfs_arch_init);
-EXPORT_SYMBOL(libcfs_arch_cleanup);
-EXPORT_SYMBOL(cfs_enter_debugger);
-EXPORT_SYMBOL(cfs_block_allsigs);
-EXPORT_SYMBOL(cfs_block_sigs);
-EXPORT_SYMBOL(cfs_block_sigsinv);
-EXPORT_SYMBOL(cfs_restore_sigs);
-EXPORT_SYMBOL(cfs_signal_pending);
-EXPORT_SYMBOL(cfs_clear_sigpending);
+EXPORT_SYMBOL(kstrtobool_from_user);
+#endif /* !HAVE_KSTRTOBOOL_FROM_USER */