X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=libcfs%2Flibcfs%2Flinux%2Flinux-prim.c;h=b6719d1bd82f18dbd271514f1165016d4276cb3f;hb=e089a515efae3391709b997be889ebe0f3306e9d;hp=ae7df198b9ae28b40f5cec6dcadef0a5ad197a57;hpb=cdaeb287ec52b54aab62ff465518c9d98ee3736b;p=fs%2Flustre-release.git diff --git a/libcfs/libcfs/linux/linux-prim.c b/libcfs/libcfs/linux/linux-prim.c index ae7df19..b6719d1 100644 --- a/libcfs/libcfs/linux/linux-prim.c +++ b/libcfs/libcfs/linux/linux-prim.c @@ -27,7 +27,7 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, Whamcloud, Inc. + * Copyright (c) 2011, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -35,9 +35,6 @@ */ #define DEBUG_SUBSYSTEM S_LNET -#ifndef AUTOCONF_INCLUDED -#include -#endif #include #include #include @@ -49,166 +46,6 @@ #include #endif -#define LINUX_WAITQ(w) ((wait_queue_t *) w) -#define LINUX_WAITQ_HEAD(w) ((wait_queue_head_t *) w) - -void -cfs_waitq_init(cfs_waitq_t *waitq) -{ - init_waitqueue_head(LINUX_WAITQ_HEAD(waitq)); -} -EXPORT_SYMBOL(cfs_waitq_init); - -void -cfs_waitlink_init(cfs_waitlink_t *link) -{ - init_waitqueue_entry(LINUX_WAITQ(link), current); -} -EXPORT_SYMBOL(cfs_waitlink_init); - -void -cfs_waitq_add(cfs_waitq_t *waitq, cfs_waitlink_t *link) -{ - add_wait_queue(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link)); -} -EXPORT_SYMBOL(cfs_waitq_add); - -#ifndef HAVE___ADD_WAIT_QUEUE_EXCLUSIVE - -static inline void __add_wait_queue_exclusive(wait_queue_head_t *q, - wait_queue_t *wait) -{ - wait->flags |= WQ_FLAG_EXCLUSIVE; - __add_wait_queue(q, wait); -} - -#endif /* HAVE___ADD_WAIT_QUEUE_EXCLUSIVE */ - -void -cfs_waitq_add_exclusive(cfs_waitq_t *waitq, - cfs_waitlink_t *link) -{ - add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link)); -} -EXPORT_SYMBOL(cfs_waitq_add_exclusive); - -/** - * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively - * waiting threads, which is not always desirable because all threads will - * be waken up again and again, even user only needs a few of them to be - * active most time. This is not good for performance because cache can - * be polluted by different threads. - * - * LIFO list can resolve this problem because we always wakeup the most - * recent active thread by default. - * - * NB: please don't call non-exclusive & exclusive wait on the same - * waitq if cfs_waitq_add_exclusive_head is used. - */ -void -cfs_waitq_add_exclusive_head(cfs_waitq_t *waitq, cfs_waitlink_t *link) -{ - unsigned long flags; - - spin_lock_irqsave(&LINUX_WAITQ_HEAD(waitq)->lock, flags); - __add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link)); - spin_unlock_irqrestore(&LINUX_WAITQ_HEAD(waitq)->lock, flags); -} -EXPORT_SYMBOL(cfs_waitq_add_exclusive_head); - -void -cfs_waitq_del(cfs_waitq_t *waitq, cfs_waitlink_t *link) -{ - remove_wait_queue(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link)); -} -EXPORT_SYMBOL(cfs_waitq_del); - -int -cfs_waitq_active(cfs_waitq_t *waitq) -{ - return waitqueue_active(LINUX_WAITQ_HEAD(waitq)); -} -EXPORT_SYMBOL(cfs_waitq_active); - -void -cfs_waitq_signal(cfs_waitq_t *waitq) -{ - wake_up(LINUX_WAITQ_HEAD(waitq)); -} -EXPORT_SYMBOL(cfs_waitq_signal); - -void -cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr) -{ - wake_up_nr(LINUX_WAITQ_HEAD(waitq), nr); -} -EXPORT_SYMBOL(cfs_waitq_signal_nr); - -void -cfs_waitq_broadcast(cfs_waitq_t *waitq) -{ - wake_up_all(LINUX_WAITQ_HEAD(waitq)); -} -EXPORT_SYMBOL(cfs_waitq_broadcast); - -void -cfs_waitq_wait(cfs_waitlink_t *link, cfs_task_state_t state) -{ - schedule(); -} -EXPORT_SYMBOL(cfs_waitq_wait); - -int64_t -cfs_waitq_timedwait(cfs_waitlink_t *link, cfs_task_state_t state, - int64_t timeout) -{ - return schedule_timeout(timeout); -} -EXPORT_SYMBOL(cfs_waitq_timedwait); - -void -cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t timeout) -{ - set_current_state(state); - schedule_timeout(timeout); -} -EXPORT_SYMBOL(cfs_schedule_timeout_and_set_state); - -void -cfs_schedule_timeout(int64_t timeout) -{ - schedule_timeout(timeout); -} -EXPORT_SYMBOL(cfs_schedule_timeout); - -void -cfs_schedule(void) -{ - schedule(); -} -EXPORT_SYMBOL(cfs_schedule); - -/* deschedule for a bit... */ -void -cfs_pause(cfs_duration_t ticks) -{ - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(ticks); -} -EXPORT_SYMBOL(cfs_pause); - -int cfs_need_resched(void) -{ - return need_resched(); -} -EXPORT_SYMBOL(cfs_need_resched); - -void cfs_cond_resched(void) -{ - cond_resched(); -} -EXPORT_SYMBOL(cfs_cond_resched); - void cfs_init_timer(cfs_timer_t *t) { init_timer(t); @@ -257,55 +94,23 @@ void cfs_enter_debugger(void) { #if defined(CONFIG_KGDB) // BREAKPOINT(); -#elif defined(__arch_um__) - asm("int $3"); #else /* nothing */ #endif } -void cfs_daemonize(char *str) { - unsigned long flags; - - daemonize(str); - SIGNAL_MASK_LOCK(current, flags); - sigfillset(¤t->blocked); - RECALC_SIGPENDING; - SIGNAL_MASK_UNLOCK(current, flags); -} - -int cfs_daemonize_ctxt(char *str) { - - cfs_daemonize(str); -#ifndef HAVE_UNSHARE_FS_STRUCT - { - struct task_struct *tsk = current; - struct fs_struct *fs = NULL; - fs = copy_fs_struct(tsk->fs); - if (fs == NULL) - return -ENOMEM; - exit_fs(tsk); - tsk->fs = fs; - } -#else - unshare_fs_struct(); -#endif - return 0; -} - sigset_t cfs_block_allsigs(void) { - unsigned long flags; - sigset_t old; - - SIGNAL_MASK_LOCK(current, flags); - old = current->blocked; - sigfillset(¤t->blocked); - RECALC_SIGPENDING; - SIGNAL_MASK_UNLOCK(current, flags); + unsigned long flags; + sigset_t old; - return old; + spin_lock_irqsave(¤t->sighand->siglock, flags); + old = current->blocked; + sigfillset(¤t->blocked); + recalc_sigpending(); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); + return old; } sigset_t cfs_block_sigs(unsigned long sigs) @@ -313,11 +118,11 @@ sigset_t cfs_block_sigs(unsigned long sigs) unsigned long flags; sigset_t old; - SIGNAL_MASK_LOCK(current, flags); + spin_lock_irqsave(¤t->sighand->siglock, flags); old = current->blocked; sigaddsetmask(¤t->blocked, sigs); - RECALC_SIGPENDING; - SIGNAL_MASK_UNLOCK(current, flags); + recalc_sigpending(); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); return old; } @@ -327,40 +132,39 @@ sigset_t cfs_block_sigsinv(unsigned long sigs) unsigned long flags; sigset_t old; - SIGNAL_MASK_LOCK(current, flags); + spin_lock_irqsave(¤t->sighand->siglock, flags); old = current->blocked; sigaddsetmask(¤t->blocked, ~sigs); - RECALC_SIGPENDING; - SIGNAL_MASK_UNLOCK(current, flags); - + recalc_sigpending(); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); return old; } void cfs_restore_sigs (cfs_sigset_t old) { - unsigned long flags; + unsigned long flags; - SIGNAL_MASK_LOCK(current, flags); - current->blocked = old; - RECALC_SIGPENDING; - SIGNAL_MASK_UNLOCK(current, flags); + spin_lock_irqsave(¤t->sighand->siglock, flags); + current->blocked = old; + recalc_sigpending(); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); } int cfs_signal_pending(void) { - return signal_pending(current); + return signal_pending(current); } void cfs_clear_sigpending(void) { - unsigned long flags; + unsigned long flags; - SIGNAL_MASK_LOCK(current, flags); - CLEAR_SIGPENDING; - SIGNAL_MASK_UNLOCK(current, flags); + spin_lock_irqsave(¤t->sighand->siglock, flags); + clear_tsk_thread_flag(current, TIF_SIGPENDING); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); } int @@ -378,8 +182,6 @@ libcfs_arch_cleanup(void) EXPORT_SYMBOL(libcfs_arch_init); EXPORT_SYMBOL(libcfs_arch_cleanup); EXPORT_SYMBOL(cfs_enter_debugger); -EXPORT_SYMBOL(cfs_daemonize); -EXPORT_SYMBOL(cfs_daemonize_ctxt); EXPORT_SYMBOL(cfs_block_allsigs); EXPORT_SYMBOL(cfs_block_sigs); EXPORT_SYMBOL(cfs_block_sigsinv);