cfs_block_sigsinv() and cfs_restore_sigs() are simple
wrappers which save a couple of line of code and
hurt readability for people not familiar with them.
They aren't used often enough to be worthwhile,
so discard them and open-code the functionality.
The sigorsets() call isn't needed as or-ing with current->blocked is
exactly what sigprocmask(SIG_BLOCK) does.
Linux-commit:
6afe572bc76688cd840032254217a4877b66e916
Change-Id: Ia9189e0885dffb098df7abef09db42ecb49196cd
Signed-off-by: Mr NeilBrown <neilb@suse.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-on: https://review.whamcloud.com/38530
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Shaun Tancheff <shaun.tancheff@hpe.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
#endif
#endif
-#ifdef HAVE_FORCE_SIG_WITH_TASK
-#define cfs_force_sig(sig, task) force_sig((sig), (task))
-#else
-#define cfs_force_sig(sig, task) \
-do { \
- unsigned long flags; \
- \
- spin_lock_irqsave(&task->sighand->siglock, flags); \
- task->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; \
- send_sig(sig, task, 1); \
- spin_unlock_irqrestore(&task->sighand->siglock, flags); \
-} while (0)
-#endif
-
typedef s32 timeout_t;
-/* Block all signals except for the @sigs */
-static inline void cfs_block_sigsinv(unsigned long sigs, sigset_t *old)
-{
- sigset_t new;
-
- siginitsetinv(&new, sigs);
- sigorsets(&new, ¤t->blocked, &new);
- sigprocmask(SIG_BLOCK, &new, old);
-}
-
-static inline void
-cfs_restore_sigs(sigset_t *old)
-{
- sigprocmask(SIG_SETMASK, old, NULL);
-}
-
/* need both kernel and user-land acceptor */
#define LNET_ACCEPTOR_MIN_RESERVED_PORT 512
#define LNET_ACCEPTOR_MAX_RESERVED_PORT 1023
* Defined by platform
*/
int unshare_fs_struct(void);
-void cfs_block_sigsinv(unsigned long sigs, sigset_t *sigset);
-void cfs_restore_sigs(sigset_t *sigset);
int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data);
}
#endif /* HAVE_KREF_READ */
+#ifdef HAVE_FORCE_SIG_WITH_TASK
+#define cfs_force_sig(sig, task) force_sig((sig), (task))
+#else
+#define cfs_force_sig(sig, task) \
+do { \
+ unsigned long flags; \
+ \
+ spin_lock_irqsave(&task->sighand->siglock, flags); \
+ task->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; \
+ send_sig(sig, task, 1); \
+ spin_unlock_irqrestore(&task->sighand->siglock, flags); \
+} while (0)
+#endif
+
void cfs_arch_init(void);
#ifndef container_of_safe
wait_queue_entry_t __wq_entry; \
unsigned long flags; \
long __ret = ret; /* explicit shadow */ \
- sigset_t __old_blocked; \
+ sigset_t __old_blocked, __new_blocked; \
\
- cfs_block_sigsinv(0, &__old_blocked); \
+ siginitset(&__new_blocked, LUSTRE_FATAL_SIGS); \
+ sigprocmask(0, &__new_blocked, &__old_blocked); \
init_wait(&__wq_entry); \
if (exclusive) \
__wq_entry.flags = WQ_FLAG_EXCLUSIVE; \
cmd; \
} \
finish_wait(&wq_head, &__wq_entry); \
- cfs_restore_sigs(&__old_blocked); \
+ sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
__ret; \
})
wait_queue_entry_t __wq_entry; \
unsigned long flags; \
long __ret = ret; /* explicit shadow */ \
- sigset_t __old_blocked; \
+ sigset_t __old_blocked, __new_blocked; \
\
- cfs_block_sigsinv(0, &__old_blocked); \
+ siginitset(&__new_blocked, LUSTRE_FATAL_SIGS); \
+ sigprocmask(0, &__new_blocked, &__old_blocked); \
init_wait(&__wq_entry); \
__wq_entry.flags = WQ_FLAG_EXCLUSIVE; \
for (;;) { \
} \
cmd; \
} \
- cfs_restore_sigs(&__old_blocked); \
+ sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
finish_wait(&wq_head, &__wq_entry); \
__ret; \
})
struct vvp_io *vio;
int result;
__u16 refcheck;
- sigset_t set;
+ sigset_t old, new;
struct inode *inode = NULL;
struct ll_inode_info *lli;
ENTRY;
vio->u.fault.ft_vma = vma;
vio->u.fault.ft_vmpage = vmpage;
- cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM), &set);
+ siginitsetinv(&new, sigmask(SIGKILL) | sigmask(SIGTERM));
+ sigprocmask(SIG_BLOCK, &new, &old);
inode = vvp_object_inode(io->ci_obj);
lli = ll_i2info(inode);
result = cl_io_loop(env, io);
- cfs_restore_sigs(&set);
+ sigprocmask(SIG_SETMASK, &old, NULL);
if (result == 0) {
lock_page(vmpage);
bool cached;
vm_fault_t result;
ktime_t kstart = ktime_get();
- sigset_t set;
+ sigset_t old, new;
result = pcc_fault(vma, vmf, &cached);
if (cached)
/* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
* so that it can be killed by admin but not cause segfault by
- * other signals. */
- cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM), &set);
+ * other signals.
+ */
+ siginitsetinv(&new, sigmask(SIGKILL) | sigmask(SIGTERM));
+ sigprocmask(SIG_BLOCK, &new, &old);
/* make sure offset is not a negative number */
if (vmf->pgoff > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
result |= VM_FAULT_LOCKED;
}
- cfs_restore_sigs(&set);
+ sigprocmask(SIG_SETMASK, &old, NULL);
out:
if (vmf->page && result == VM_FAULT_LOCKED) {
*/
if (rc == -ETIMEDOUT &&
signal_pending(current)) {
- sigset_t blocked_sigs;
+ sigset_t old, new;
- cfs_block_sigsinv(LUSTRE_FATAL_SIGS,
- &blocked_sigs);
+ siginitset(&new, LUSTRE_FATAL_SIGS);
+ sigprocmask(SIG_BLOCK, &new, &old);
/*
* In fact we only interrupt for the
* "fatal" signals like SIGINT or
*/
if (signal_pending(current))
ptlrpc_interrupted_set(set);
- cfs_restore_sigs(&blocked_sigs);
+ sigprocmask(SIG_SETMASK, &old, NULL);
}
}