*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, Intel Corporation.
+ * Copyright (c) 2012, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_LNET
+#include <linux/kthread.h>
#include <libcfs/libcfs.h>
#include "tracefile.h"
+#ifndef WITH_WATCHDOG
+#define WITH_WATCHDOG
+#endif
+
struct lc_watchdog {
- spinlock_t lcw_lock; /* check or change lcw_list */
- int lcw_refcount; /* must hold lcw_pending_timers_lock */
- cfs_timer_t lcw_timer; /* kernel timer */
- cfs_list_t lcw_list; /* chain on pending list */
- cfs_time_t lcw_last_touched; /* last touched stamp */
- cfs_task_t *lcw_task; /* owner task */
- void (*lcw_callback)(pid_t, void *);
- void *lcw_data;
-
- pid_t lcw_pid;
-
- enum {
- LC_WATCHDOG_DISABLED,
- LC_WATCHDOG_ENABLED,
- LC_WATCHDOG_EXPIRED
- } lcw_state;
+ spinlock_t lcw_lock; /* check or change lcw_list */
+ int lcw_refcount; /* must hold lcw_pending_timers_lock */
+ struct timer_list lcw_timer; /* kernel timer */
+ struct list_head lcw_list; /* chain on pending list */
+ ktime_t lcw_last_touched;/* last touched stamp */
+ struct task_struct *lcw_task; /* owner task */
+ void (*lcw_callback)(pid_t, void *);
+ void *lcw_data;
+
+ pid_t lcw_pid;
+
+ enum {
+ LC_WATCHDOG_DISABLED,
+ LC_WATCHDOG_ENABLED,
+ LC_WATCHDOG_EXPIRED
+ } lcw_state;
};
#ifdef WITH_WATCHDOG
*/
static struct completion lcw_start_completion;
static struct completion lcw_stop_completion;
-static cfs_waitq_t lcw_event_waitq;
+static wait_queue_head_t lcw_event_waitq;
/*
* Set this and wake lcw_event_waitq to stop the dispatcher.
*/
/* BH lock! */
static DEFINE_SPINLOCK(lcw_pending_timers_lock);
-static cfs_list_t lcw_pending_timers = CFS_LIST_HEAD_INIT(lcw_pending_timers);
+static struct list_head lcw_pending_timers = LIST_HEAD_INIT(lcw_pending_timers);
/* Last time a watchdog expired */
-static cfs_time_t lcw_last_watchdog_time;
+static time64_t lcw_last_watchdog_time;
static int lcw_recent_watchdog_count;
static void
ENTRY;
rcu_read_lock();
if (lcw->lcw_task == NULL) {
- LCONSOLE_WARN("Process " LPPID " was not found in the task "
+ LCONSOLE_WARN("Process %d was not found in the task "
"list; watchdog callback may be incomplete\n",
(int)lcw->lcw_pid);
} else {
EXIT;
}
-static void lcw_cb(ulong_ptr_t data)
+static void lcw_cb(cfs_timer_cb_arg_t data)
{
- struct lc_watchdog *lcw = (struct lc_watchdog *)data;
+ struct lc_watchdog *lcw = cfs_from_timer(lcw, data, lcw_timer);
ENTRY;
if (lcw->lcw_state != LC_WATCHDOG_ENABLED) {
lcw->lcw_state = LC_WATCHDOG_EXPIRED;
spin_lock_bh(&lcw->lcw_lock);
- LASSERT(cfs_list_empty(&lcw->lcw_list));
+ LASSERT(list_empty(&lcw->lcw_list));
spin_lock_bh(&lcw_pending_timers_lock);
lcw->lcw_refcount++; /* +1 for pending list */
- cfs_list_add(&lcw->lcw_list, &lcw_pending_timers);
- cfs_waitq_signal(&lcw_event_waitq);
+ list_add(&lcw->lcw_list, &lcw_pending_timers);
+ wake_up(&lcw_event_waitq);
spin_unlock_bh(&lcw_pending_timers_lock);
spin_unlock_bh(&lcw->lcw_lock);
return 1;
spin_lock_bh(&lcw_pending_timers_lock);
- rc = !cfs_list_empty(&lcw_pending_timers);
+ rc = !list_empty(&lcw_pending_timers);
spin_unlock_bh(&lcw_pending_timers_lock);
return rc;
}
static void lcw_dump_stack(struct lc_watchdog *lcw)
{
- cfs_time_t current_time;
- cfs_duration_t delta_time;
- struct timeval timediff;
-
- current_time = cfs_time_current();
- delta_time = cfs_time_sub(current_time, lcw->lcw_last_touched);
- cfs_duration_usec(delta_time, &timediff);
-
- /*
- * Check to see if we should throttle the watchdog timer to avoid
- * too many dumps going to the console thus triggering an NMI.
- */
- delta_time = cfs_duration_sec(cfs_time_sub(current_time,
- lcw_last_watchdog_time));
-
- if (delta_time < libcfs_watchdog_ratelimit &&
- lcw_recent_watchdog_count > 3) {
- LCONSOLE_WARN("Service thread pid %u was inactive for "
- "%lu.%.02lus. Watchdog stack traces are limited "
- "to 3 per %d seconds, skipping this one.\n",
- (int)lcw->lcw_pid,
- timediff.tv_sec,
- timediff.tv_usec / 10000,
- libcfs_watchdog_ratelimit);
- } else {
- if (delta_time < libcfs_watchdog_ratelimit) {
- lcw_recent_watchdog_count++;
- } else {
- memcpy(&lcw_last_watchdog_time, ¤t_time,
- sizeof(current_time));
- lcw_recent_watchdog_count = 0;
- }
-
- LCONSOLE_WARN("Service thread pid %u was inactive for "
- "%lu.%.02lus. The thread might be hung, or it "
- "might only be slow and will resume later. "
- "Dumping the stack trace for debugging purposes:"
- "\n",
- (int)lcw->lcw_pid,
- timediff.tv_sec,
- timediff.tv_usec / 10000);
- lcw_dump(lcw);
- }
+ time64_t current_time = ktime_get_seconds();
+ struct timespec64 timediff;
+ time64_t delta_time;
+
+ timediff = ktime_to_timespec64(ktime_sub(ktime_get(),
+ lcw->lcw_last_touched));
+
+ /* LU-9235: Don't dump stack if the thread is just touched. */
+ if (timediff.tv_sec == 0)
+ return;
+
+ /*
+ * Check to see if we should throttle the watchdog timer to avoid
+ * too many dumps going to the console thus triggering an NMI.
+ */
+ delta_time = current_time - lcw_last_watchdog_time;
+ if (delta_time < libcfs_watchdog_ratelimit &&
+ lcw_recent_watchdog_count > 3) {
+ LCONSOLE_WARN("Service thread pid %u was inactive for %lu.%.02lus. Watchdog stack traces are limited to 3 per %d seconds, skipping this one.\n",
+ (int)lcw->lcw_pid,
+ timediff.tv_sec,
+ timediff.tv_nsec / (NSEC_PER_SEC / 100),
+ libcfs_watchdog_ratelimit);
+ } else {
+ if (delta_time < libcfs_watchdog_ratelimit) {
+ lcw_recent_watchdog_count++;
+ } else {
+ memcpy(&lcw_last_watchdog_time, ¤t_time,
+ sizeof(current_time));
+ lcw_recent_watchdog_count = 0;
+ }
+
+ LCONSOLE_WARN("Service thread pid %u was inactive for %lu.%.02lus. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:\n",
+ (int)lcw->lcw_pid,
+ timediff.tv_sec,
+ timediff.tv_nsec / (NSEC_PER_SEC / 100));
+ lcw_dump(lcw);
+ }
+}
+
+/*
+ * Provided watchdog handlers
+ */
+
+static void lc_watchdog_dumplog(pid_t pid, void *data)
+{
+ libcfs_debug_dumplog_internal((void *)((uintptr_t)pid));
}
static int lcw_dispatch_main(void *data)
{
int rc = 0;
struct lc_watchdog *lcw;
- CFS_LIST_HEAD (zombies);
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
ENTRY;
CDEBUG(D_INFO, "LCW_FLAG_STOP set, shutting down...\n");
spin_lock_bh(&lcw_pending_timers_lock);
- rc = !cfs_list_empty(&lcw_pending_timers);
+ rc = !list_empty(&lcw_pending_timers);
spin_unlock_bh(&lcw_pending_timers_lock);
if (rc) {
CERROR("pending timers list was not empty at "
}
spin_lock_bh(&lcw_pending_timers_lock);
- while (!cfs_list_empty(&lcw_pending_timers)) {
- int is_dumplog;
-
- lcw = cfs_list_entry(lcw_pending_timers.next,
- struct lc_watchdog, lcw_list);
- /* +1 ref for callback to make sure lwc wouldn't be
- * deleted after releasing lcw_pending_timers_lock */
- lcw->lcw_refcount++;
+ while (!list_empty(&lcw_pending_timers)) {
+ int is_dumplog;
+
+ lcw = list_entry(lcw_pending_timers.next,
+ struct lc_watchdog, lcw_list);
+ /* +1 ref for callback to make sure lwc wouldn't be
+ * deleted after releasing lcw_pending_timers_lock */
+ lcw->lcw_refcount++;
spin_unlock_bh(&lcw_pending_timers_lock);
/* lock ordering */
spin_lock_bh(&lcw->lcw_lock);
spin_lock_bh(&lcw_pending_timers_lock);
- if (cfs_list_empty(&lcw->lcw_list)) {
+ if (list_empty(&lcw->lcw_list)) {
/* already removed from pending list */
lcw->lcw_refcount--; /* -1 ref for callback */
if (lcw->lcw_refcount == 0)
- cfs_list_add(&lcw->lcw_list, &zombies);
+ list_add(&lcw->lcw_list, &zombies);
spin_unlock_bh(&lcw->lcw_lock);
- /* still hold lcw_pending_timers_lock */
- continue;
- }
+ /* still hold lcw_pending_timers_lock */
+ continue;
+ }
- cfs_list_del_init(&lcw->lcw_list);
- lcw->lcw_refcount--; /* -1 ref for pending list */
+ list_del_init(&lcw->lcw_list);
+ lcw->lcw_refcount--; /* -1 ref for pending list */
spin_unlock_bh(&lcw_pending_timers_lock);
spin_unlock_bh(&lcw->lcw_lock);
- CDEBUG(D_INFO, "found lcw for pid " LPPID "\n",
+ CDEBUG(D_INFO, "found lcw for pid %d\n",
lcw->lcw_pid);
lcw_dump_stack(lcw);
spin_lock_bh(&lcw_pending_timers_lock);
lcw->lcw_refcount--; /* -1 ref for callback */
if (lcw->lcw_refcount == 0)
- cfs_list_add(&lcw->lcw_list, &zombies);
+ list_add(&lcw->lcw_list, &zombies);
}
spin_unlock_bh(&lcw_pending_timers_lock);
- while (!cfs_list_empty(&zombies)) {
- lcw = cfs_list_entry(lcw_pending_timers.next,
- struct lc_watchdog, lcw_list);
- cfs_list_del(&lcw->lcw_list);
- LIBCFS_FREE(lcw, sizeof(*lcw));
- }
- }
+ while (!list_empty(&zombies)) {
+ lcw = list_entry(zombies.next,
+ struct lc_watchdog, lcw_list);
+ list_del_init(&lcw->lcw_list);
+ LIBCFS_FREE(lcw, sizeof(*lcw));
+ }
+ }
complete(&lcw_stop_completion);
static void lcw_dispatch_start(void)
{
- cfs_task_t *task;
+ struct task_struct *task;
ENTRY;
LASSERT(lcw_refcount == 1);
init_completion(&lcw_stop_completion);
init_completion(&lcw_start_completion);
- cfs_waitq_init(&lcw_event_waitq);
+ init_waitqueue_head(&lcw_event_waitq);
CDEBUG(D_INFO, "starting dispatch thread\n");
task = kthread_run(lcw_dispatch_main, NULL, "lc_watchdogd");
CDEBUG(D_INFO, "trying to stop watchdog dispatcher.\n");
set_bit(LCW_FLAG_STOP, &lcw_flags);
- cfs_waitq_signal(&lcw_event_waitq);
+ wake_up(&lcw_event_waitq);
wait_for_completion(&lcw_stop_completion);
+ clear_bit(LCW_FLAG_STOP, &lcw_flags);
CDEBUG(D_INFO, "watchdog dispatcher has shut down.\n");
spin_lock_init(&lcw->lcw_lock);
lcw->lcw_refcount = 1; /* refcount for owner */
- lcw->lcw_task = cfs_current();
+ lcw->lcw_task = current;
lcw->lcw_pid = current_pid();
lcw->lcw_callback = (callback != NULL) ? callback : lc_watchdog_dumplog;
lcw->lcw_data = data;
lcw->lcw_state = LC_WATCHDOG_DISABLED;
- CFS_INIT_LIST_HEAD(&lcw->lcw_list);
- cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw);
+ INIT_LIST_HEAD(&lcw->lcw_list);
+ cfs_timer_setup(&lcw->lcw_timer, lcw_cb, (unsigned long)lcw, 0);
mutex_lock(&lcw_refcount_mutex);
if (++lcw_refcount == 1)
lcw_dispatch_start();
mutex_unlock(&lcw_refcount_mutex);
- /* Keep this working in case we enable them by default */
- if (lcw->lcw_state == LC_WATCHDOG_ENABLED) {
- lcw->lcw_last_touched = cfs_time_current();
- cfs_timer_arm(&lcw->lcw_timer, cfs_time_seconds(timeout) +
- cfs_time_current());
- }
+ /* Keep this working in case we enable them by default */
+ if (lcw->lcw_state == LC_WATCHDOG_ENABLED) {
+ lcw->lcw_last_touched = ktime_get();
+ mod_timer(&lcw->lcw_timer, cfs_time_seconds(timeout) +
+ jiffies);
+ }
RETURN(lcw);
}
static void lcw_update_time(struct lc_watchdog *lcw, const char *message)
{
- cfs_time_t newtime = cfs_time_current();;
-
- if (lcw->lcw_state == LC_WATCHDOG_EXPIRED) {
- struct timeval timediff;
- cfs_time_t delta_time = cfs_time_sub(newtime,
- lcw->lcw_last_touched);
- cfs_duration_usec(delta_time, &timediff);
-
- LCONSOLE_WARN("Service thread pid %u %s after %lu.%.02lus. "
- "This indicates the system was overloaded (too "
- "many service threads, or there were not enough "
- "hardware resources).\n",
- lcw->lcw_pid,
- message,
- timediff.tv_sec,
- timediff.tv_usec / 10000);
- }
- lcw->lcw_last_touched = newtime;
+ ktime_t newtime = ktime_get();
+
+ if (lcw->lcw_state == LC_WATCHDOG_EXPIRED) {
+ ktime_t lapse = ktime_sub(newtime, lcw->lcw_last_touched);
+ struct timespec64 timediff;
+
+ timediff = ktime_to_timespec64(lapse);
+ LCONSOLE_WARN("Service thread pid %u %s after %lu.%.02lus. This indicates the system was overloaded (too many service threads, or there were not enough hardware resources).\n",
+ lcw->lcw_pid, message,
+ timediff.tv_sec,
+ timediff.tv_nsec / (NSEC_PER_SEC / 100));
+ }
+ lcw->lcw_last_touched = newtime;
}
static void lc_watchdog_del_pending(struct lc_watchdog *lcw)
{
spin_lock_bh(&lcw->lcw_lock);
- if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
+ if (unlikely(!list_empty(&lcw->lcw_list))) {
spin_lock_bh(&lcw_pending_timers_lock);
- cfs_list_del_init(&lcw->lcw_list);
+ list_del_init(&lcw->lcw_list);
lcw->lcw_refcount--; /* -1 ref for pending list */
spin_unlock_bh(&lcw_pending_timers_lock);
}
void lc_watchdog_touch(struct lc_watchdog *lcw, int timeout)
{
- ENTRY;
- LASSERT(lcw != NULL);
+ ENTRY;
+ LASSERT(lcw != NULL);
- lc_watchdog_del_pending(lcw);
+ lc_watchdog_del_pending(lcw);
- lcw_update_time(lcw, "resumed");
- lcw->lcw_state = LC_WATCHDOG_ENABLED;
+ lcw_update_time(lcw, "resumed");
- cfs_timer_arm(&lcw->lcw_timer, cfs_time_current() +
- cfs_time_seconds(timeout));
+ mod_timer(&lcw->lcw_timer, jiffies + cfs_time_seconds(timeout));
+ lcw->lcw_state = LC_WATCHDOG_ENABLED;
- EXIT;
+ EXIT;
}
EXPORT_SYMBOL(lc_watchdog_touch);
ENTRY;
LASSERT(lcw != NULL);
- cfs_timer_disarm(&lcw->lcw_timer);
+ del_timer(&lcw->lcw_timer);
lcw_update_time(lcw, "stopped");
spin_lock_bh(&lcw->lcw_lock);
spin_lock_bh(&lcw_pending_timers_lock);
- if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
- cfs_list_del_init(&lcw->lcw_list);
+ if (unlikely(!list_empty(&lcw->lcw_list))) {
+ list_del_init(&lcw->lcw_list);
lcw->lcw_refcount--; /* -1 ref for pending list */
}
}
EXPORT_SYMBOL(lc_watchdog_delete);
-/*
- * Provided watchdog handlers
- */
-
-void lc_watchdog_dumplog(pid_t pid, void *data)
-{
- libcfs_debug_dumplog_internal((void *)((long_ptr_t)pid));
-}
-EXPORT_SYMBOL(lc_watchdog_dumplog);
-
#else /* !defined(WITH_WATCHDOG) */
struct lc_watchdog *lc_watchdog_add(int timeout,