Threads started by kthread_run() ignore all signals,
as kthreadd() calls ignore_signals(), and this is
inherited by all children.
So there is no need to call cfs_block_allsigs() in functions
that are only run from kthread_run().
For the case of lnet_ping_md_unlink() it is not from a kernel
thread but nothing in that function should be affected by
signals so it is safe to remove.
For lnet_ping() we need to manually block signals since
LNetEQPool() can unconditionally abort when a signal is
recieved.
Linux-commit:
1b2dad1459e480028a2714439048d8a634132857
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Change-Id: I124dccf78a3187d5f4a31c7b76db5369aaafc369
Reviewed-on: https://review.whamcloud.com/35350
Tested-by: jenkins <devops@whamcloud.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Jian Yu <yujian@whamcloud.com>
Reviewed-by: Shaun Tancheff <shaun.tancheff@hpe.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
* Defined by platform
*/
int unshare_fs_struct(void);
-sigset_t cfs_block_allsigs(void);
sigset_t cfs_block_sigs(unsigned long sigs);
sigset_t cfs_block_sigsinv(unsigned long sigs);
void cfs_restore_sigs(sigset_t);
EXPORT_SYMBOL(kstrtobool_from_user);
#endif /* !HAVE_KSTRTOBOOL_FROM_USER */
-sigset_t
-cfs_block_allsigs(void)
-{
- unsigned long flags;
- sigset_t old;
-
- spin_lock_irqsave(¤t->sighand->siglock, flags);
- old = current->blocked;
- sigfillset(¤t->blocked);
- recalc_sigpending();
- spin_unlock_irqrestore(¤t->sighand->siglock, flags);
- return old;
-}
-EXPORT_SYMBOL(cfs_block_allsigs);
-
sigset_t cfs_block_sigs(unsigned long sigs)
{
unsigned long flags;
{
struct cfs_wi_sched *sched = (struct cfs_wi_sched *)arg;
- cfs_block_allsigs();
-
/* CPT affinity scheduler? */
if (sched->ws_cptab != NULL)
if (cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt) != 0)
struct timer_list timer;
DEFINE_WAIT(wait);
- cfs_block_allsigs();
-
/* all gnilnd threads need to run fairly urgently */
set_user_nice(current, *kgnilnd_tunables.kgn_nice);
spin_lock(&kgnilnd_data.kgn_reaper_lock);
dev = &kgnilnd_data.kgn_devices[(threadno + 1) % kgnilnd_data.kgn_ndevs];
- cfs_block_allsigs();
-
/* all gnilnd threads need to run fairly urgently */
set_user_nice(current, *kgnilnd_tunables.kgn_sched_nice);
deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_sched_timeout);
DEFINE_WAIT(mover_done);
snprintf(name, sizeof(name), "kgnilnd_dgn_%02d", dev->gnd_id);
- cfs_block_allsigs();
/* all gnilnd threads need to run fairly urgently */
set_user_nice(current, *kgnilnd_tunables.kgn_nice);
DEFINE_WAIT(wait);
snprintf(name, sizeof(name), "kgnilnd_dg_%02d", dev->gnd_id);
- cfs_block_allsigs();
+
/* all gnilnd threads need to run fairly urgently */
set_user_nice(current, *kgnilnd_tunables.kgn_nice);
int i = 1;
DEFINE_WAIT(wait);
- cfs_block_allsigs();
set_user_nice(current, *kgnilnd_tunables.kgn_nice);
kgnilnd_data.kgn_ruhroh_running = 1;
rs_event_t event;
lnet_nid_t nid;
- cfs_block_allsigs();
-
/* all gnilnd threads need to run fairly urgently */
set_user_nice(current, *kgnilnd_tunables.kgn_nice);
int peer_index = 0;
unsigned long deadline = jiffies;
- cfs_block_allsigs();
-
init_waitqueue_entry(&wait, current);
kiblnd_data.kib_connd = current;
int busy_loops = 0;
int rc;
- cfs_block_allsigs();
-
init_waitqueue_entry(&wait, current);
sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
LASSERT(*kiblnd_tunables.kib_dev_failover != 0);
- cfs_block_allsigs();
-
init_waitqueue_entry(&wait, current);
write_lock_irqsave(glock, flags);
return -ENOMEM;
}
- cfs_block_allsigs();
-
rc = cfs_cpt_bind(lnet_cpt_table(), sched->kss_cpt);
if (rc != 0) {
CWARN("Can't set CPU partition affinity to %d: %d\n",
int nloops = 0;
int cons_retry = 0;
- cfs_block_allsigs();
-
init_waitqueue_entry(&wait, current);
spin_lock_bh(connd_lock);
int peer_index = 0;
time64_t deadline = ktime_get_seconds();
- cfs_block_allsigs ();
-
init_waitqueue_entry(&wait, current);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
LASSERT(lnet_acceptor_state.pta_sock == NULL);
- cfs_block_allsigs();
-
rc = lnet_sock_listen(&lnet_acceptor_state.pta_sock,
0, accept_port, accept_backlog,
lnet_acceptor_state.pta_ns);
#include <linux/ktime.h>
#include <linux/moduleparam.h>
#include <linux/uaccess.h>
+#ifdef HAVE_SCHED_HEADERS
+#include <linux/sched/signal.h>
+#endif
#include <lnet/lib-lnet.h>
lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf,
struct lnet_handle_md *ping_mdh)
{
- sigset_t blocked = cfs_block_allsigs();
-
LNetMDUnlink(*ping_mdh);
LNetInvalidateMDHandle(ping_mdh);
CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
schedule_timeout_uninterruptible(cfs_time_seconds(1));
}
-
- cfs_restore_sigs(blocked);
}
static void
do {
/* MUST block for unlink to complete */
- if (unlinked)
- blocked = cfs_block_allsigs();
+ if (unlinked) {
+ sigset_t set;
+
+ sigfillset(&set);
+ sigprocmask(SIG_SETMASK, &set, &blocked);
+ }
rc2 = LNetEQPoll(&eqh, 1, timeout, &event, &which);
* 4. Checks if there are any NIs on the remote recovery queue
* and pings them.
*/
- cfs_block_allsigs();
-
while (the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING) {
now = ktime_get_real_seconds();
wait_for_completion(&the_lnet.ln_started);
CDEBUG(D_NET, "started\n");
- cfs_block_allsigs();
for (;;) {
if (lnet_peer_discovery_wait_for_work())
{
int rc = 0;
- cfs_block_allsigs();
-
while (!stt_data.stt_shuttingdown) {
stt_check_timers(&stt_data.stt_prev_slot);