4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/libcfs/watchdog.c
38 * Author: Jacob Berkman <jacob@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_LNET
43 #include <linux/kthread.h>
44 #include <libcfs/libcfs.h>
45 #include "tracefile.h"
48 spinlock_t lcw_lock; /* check or change lcw_list */
49 int lcw_refcount; /* must hold lcw_pending_timers_lock */
50 struct timer_list lcw_timer; /* kernel timer */
51 struct list_head lcw_list; /* chain on pending list */
52 cfs_time_t lcw_last_touched;/* last touched stamp */
53 struct task_struct *lcw_task; /* owner task */
54 void (*lcw_callback)(pid_t, void *);
68 * The dispatcher will complete lcw_start_completion when it starts,
69 * and lcw_stop_completion when it exits.
70 * Wake lcw_event_waitq to signal timer callback dispatches.
72 static struct completion lcw_start_completion;
73 static struct completion lcw_stop_completion;
74 static wait_queue_head_t lcw_event_waitq;
77 * Set this and wake lcw_event_waitq to stop the dispatcher.
82 static unsigned long lcw_flags = 0;
85 * Number of outstanding watchdogs.
86 * When it hits 1, we start the dispatcher.
87 * When it hits 0, we stop the dispatcher.
89 static __u32 lcw_refcount = 0;
90 static DEFINE_MUTEX(lcw_refcount_mutex);
93 * List of timers that have fired that need their callbacks run by the
97 static DEFINE_SPINLOCK(lcw_pending_timers_lock);
98 static struct list_head lcw_pending_timers = LIST_HEAD_INIT(lcw_pending_timers);
100 /* Last time a watchdog expired */
101 static cfs_time_t lcw_last_watchdog_time;
102 static int lcw_recent_watchdog_count;
105 lcw_dump(struct lc_watchdog *lcw)
109 if (lcw->lcw_task == NULL) {
110 LCONSOLE_WARN("Process " LPPID " was not found in the task "
111 "list; watchdog callback may be incomplete\n",
114 libcfs_debug_dumpstack(lcw->lcw_task);
121 static void lcw_cb(ulong_ptr_t data)
123 struct lc_watchdog *lcw = (struct lc_watchdog *)data;
126 if (lcw->lcw_state != LC_WATCHDOG_ENABLED) {
131 lcw->lcw_state = LC_WATCHDOG_EXPIRED;
133 spin_lock_bh(&lcw->lcw_lock);
134 LASSERT(list_empty(&lcw->lcw_list));
136 spin_lock_bh(&lcw_pending_timers_lock);
137 lcw->lcw_refcount++; /* +1 for pending list */
138 list_add(&lcw->lcw_list, &lcw_pending_timers);
139 wake_up(&lcw_event_waitq);
141 spin_unlock_bh(&lcw_pending_timers_lock);
142 spin_unlock_bh(&lcw->lcw_lock);
146 static int is_watchdog_fired(void)
150 if (test_bit(LCW_FLAG_STOP, &lcw_flags))
153 spin_lock_bh(&lcw_pending_timers_lock);
154 rc = !list_empty(&lcw_pending_timers);
155 spin_unlock_bh(&lcw_pending_timers_lock);
159 static void lcw_dump_stack(struct lc_watchdog *lcw)
161 cfs_time_t current_time;
162 cfs_duration_t delta_time;
163 struct timeval timediff;
165 current_time = cfs_time_current();
166 delta_time = cfs_time_sub(current_time, lcw->lcw_last_touched);
167 cfs_duration_usec(delta_time, &timediff);
170 * Check to see if we should throttle the watchdog timer to avoid
171 * too many dumps going to the console thus triggering an NMI.
173 delta_time = cfs_duration_sec(cfs_time_sub(current_time,
174 lcw_last_watchdog_time));
176 if (delta_time < libcfs_watchdog_ratelimit &&
177 lcw_recent_watchdog_count > 3) {
178 LCONSOLE_WARN("Service thread pid %u was inactive for "
179 "%lu.%.02lus. Watchdog stack traces are limited "
180 "to 3 per %d seconds, skipping this one.\n",
183 timediff.tv_usec / 10000,
184 libcfs_watchdog_ratelimit);
186 if (delta_time < libcfs_watchdog_ratelimit) {
187 lcw_recent_watchdog_count++;
189 memcpy(&lcw_last_watchdog_time, ¤t_time,
190 sizeof(current_time));
191 lcw_recent_watchdog_count = 0;
194 LCONSOLE_WARN("Service thread pid %u was inactive for "
195 "%lu.%.02lus. The thread might be hung, or it "
196 "might only be slow and will resume later. "
197 "Dumping the stack trace for debugging purposes:"
201 timediff.tv_usec / 10000);
207 * Provided watchdog handlers
210 static void lc_watchdog_dumplog(pid_t pid, void *data)
212 libcfs_debug_dumplog_internal((void *)((long_ptr_t)pid));
215 static int lcw_dispatch_main(void *data)
218 struct lc_watchdog *lcw;
219 struct list_head zombies = LIST_HEAD_INIT(zombies);
223 complete(&lcw_start_completion);
228 rc = wait_event_interruptible(lcw_event_waitq,
229 is_watchdog_fired());
230 CDEBUG(D_INFO, "Watchdog got woken up...\n");
231 if (test_bit(LCW_FLAG_STOP, &lcw_flags)) {
232 CDEBUG(D_INFO, "LCW_FLAG_STOP set, shutting down...\n");
234 spin_lock_bh(&lcw_pending_timers_lock);
235 rc = !list_empty(&lcw_pending_timers);
236 spin_unlock_bh(&lcw_pending_timers_lock);
238 CERROR("pending timers list was not empty at "
239 "time of watchdog dispatch shutdown\n");
244 spin_lock_bh(&lcw_pending_timers_lock);
245 while (!list_empty(&lcw_pending_timers)) {
248 lcw = list_entry(lcw_pending_timers.next,
249 struct lc_watchdog, lcw_list);
250 /* +1 ref for callback to make sure lwc wouldn't be
251 * deleted after releasing lcw_pending_timers_lock */
253 spin_unlock_bh(&lcw_pending_timers_lock);
256 spin_lock_bh(&lcw->lcw_lock);
257 spin_lock_bh(&lcw_pending_timers_lock);
259 if (list_empty(&lcw->lcw_list)) {
260 /* already removed from pending list */
261 lcw->lcw_refcount--; /* -1 ref for callback */
262 if (lcw->lcw_refcount == 0)
263 list_add(&lcw->lcw_list, &zombies);
264 spin_unlock_bh(&lcw->lcw_lock);
265 /* still hold lcw_pending_timers_lock */
269 list_del_init(&lcw->lcw_list);
270 lcw->lcw_refcount--; /* -1 ref for pending list */
272 spin_unlock_bh(&lcw_pending_timers_lock);
273 spin_unlock_bh(&lcw->lcw_lock);
275 CDEBUG(D_INFO, "found lcw for pid " LPPID "\n",
279 is_dumplog = lcw->lcw_callback == lc_watchdog_dumplog;
280 if (lcw->lcw_state != LC_WATCHDOG_DISABLED &&
281 (dumplog || !is_dumplog)) {
282 lcw->lcw_callback(lcw->lcw_pid, lcw->lcw_data);
283 if (dumplog && is_dumplog)
287 spin_lock_bh(&lcw_pending_timers_lock);
288 lcw->lcw_refcount--; /* -1 ref for callback */
289 if (lcw->lcw_refcount == 0)
290 list_add(&lcw->lcw_list, &zombies);
292 spin_unlock_bh(&lcw_pending_timers_lock);
294 while (!list_empty(&zombies)) {
295 lcw = list_entry(zombies.next,
296 struct lc_watchdog, lcw_list);
297 list_del_init(&lcw->lcw_list);
298 LIBCFS_FREE(lcw, sizeof(*lcw));
302 complete(&lcw_stop_completion);
307 static void lcw_dispatch_start(void)
309 struct task_struct *task;
312 LASSERT(lcw_refcount == 1);
314 init_completion(&lcw_stop_completion);
315 init_completion(&lcw_start_completion);
316 init_waitqueue_head(&lcw_event_waitq);
318 CDEBUG(D_INFO, "starting dispatch thread\n");
319 task = kthread_run(lcw_dispatch_main, NULL, "lc_watchdogd");
321 CERROR("error spawning watchdog dispatch thread: %ld\n",
326 wait_for_completion(&lcw_start_completion);
327 CDEBUG(D_INFO, "watchdog dispatcher initialization complete.\n");
332 static void lcw_dispatch_stop(void)
335 LASSERT(lcw_refcount == 0);
337 CDEBUG(D_INFO, "trying to stop watchdog dispatcher.\n");
339 set_bit(LCW_FLAG_STOP, &lcw_flags);
340 wake_up(&lcw_event_waitq);
342 wait_for_completion(&lcw_stop_completion);
344 CDEBUG(D_INFO, "watchdog dispatcher has shut down.\n");
349 struct lc_watchdog *lc_watchdog_add(int timeout,
350 void (*callback)(pid_t, void *),
353 struct lc_watchdog *lcw = NULL;
356 LIBCFS_ALLOC(lcw, sizeof(*lcw));
358 CDEBUG(D_INFO, "Could not allocate new lc_watchdog\n");
359 RETURN(ERR_PTR(-ENOMEM));
362 spin_lock_init(&lcw->lcw_lock);
363 lcw->lcw_refcount = 1; /* refcount for owner */
364 lcw->lcw_task = current;
365 lcw->lcw_pid = current_pid();
366 lcw->lcw_callback = (callback != NULL) ? callback : lc_watchdog_dumplog;
367 lcw->lcw_data = data;
368 lcw->lcw_state = LC_WATCHDOG_DISABLED;
370 INIT_LIST_HEAD(&lcw->lcw_list);
371 cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw);
373 mutex_lock(&lcw_refcount_mutex);
374 if (++lcw_refcount == 1)
375 lcw_dispatch_start();
376 mutex_unlock(&lcw_refcount_mutex);
378 /* Keep this working in case we enable them by default */
379 if (lcw->lcw_state == LC_WATCHDOG_ENABLED) {
380 lcw->lcw_last_touched = cfs_time_current();
381 cfs_timer_arm(&lcw->lcw_timer, cfs_time_seconds(timeout) +
387 EXPORT_SYMBOL(lc_watchdog_add);
389 static void lcw_update_time(struct lc_watchdog *lcw, const char *message)
391 cfs_time_t newtime = cfs_time_current();
393 if (lcw->lcw_state == LC_WATCHDOG_EXPIRED) {
394 struct timeval timediff;
395 cfs_time_t delta_time = cfs_time_sub(newtime,
396 lcw->lcw_last_touched);
397 cfs_duration_usec(delta_time, &timediff);
399 LCONSOLE_WARN("Service thread pid %u %s after %lu.%.02lus. "
400 "This indicates the system was overloaded (too "
401 "many service threads, or there were not enough "
402 "hardware resources).\n",
406 timediff.tv_usec / 10000);
408 lcw->lcw_last_touched = newtime;
411 static void lc_watchdog_del_pending(struct lc_watchdog *lcw)
413 spin_lock_bh(&lcw->lcw_lock);
414 if (unlikely(!list_empty(&lcw->lcw_list))) {
415 spin_lock_bh(&lcw_pending_timers_lock);
416 list_del_init(&lcw->lcw_list);
417 lcw->lcw_refcount--; /* -1 ref for pending list */
418 spin_unlock_bh(&lcw_pending_timers_lock);
421 spin_unlock_bh(&lcw->lcw_lock);
424 void lc_watchdog_touch(struct lc_watchdog *lcw, int timeout)
427 LASSERT(lcw != NULL);
429 lc_watchdog_del_pending(lcw);
431 lcw_update_time(lcw, "resumed");
433 cfs_timer_arm(&lcw->lcw_timer, cfs_time_current() +
434 cfs_time_seconds(timeout));
435 lcw->lcw_state = LC_WATCHDOG_ENABLED;
439 EXPORT_SYMBOL(lc_watchdog_touch);
441 void lc_watchdog_disable(struct lc_watchdog *lcw)
444 LASSERT(lcw != NULL);
446 lc_watchdog_del_pending(lcw);
448 lcw_update_time(lcw, "completed");
449 lcw->lcw_state = LC_WATCHDOG_DISABLED;
453 EXPORT_SYMBOL(lc_watchdog_disable);
455 void lc_watchdog_delete(struct lc_watchdog *lcw)
460 LASSERT(lcw != NULL);
462 cfs_timer_disarm(&lcw->lcw_timer);
464 lcw_update_time(lcw, "stopped");
466 spin_lock_bh(&lcw->lcw_lock);
467 spin_lock_bh(&lcw_pending_timers_lock);
468 if (unlikely(!list_empty(&lcw->lcw_list))) {
469 list_del_init(&lcw->lcw_list);
470 lcw->lcw_refcount--; /* -1 ref for pending list */
473 lcw->lcw_refcount--; /* -1 ref for owner */
474 dead = lcw->lcw_refcount == 0;
475 spin_unlock_bh(&lcw_pending_timers_lock);
476 spin_unlock_bh(&lcw->lcw_lock);
479 LIBCFS_FREE(lcw, sizeof(*lcw));
481 mutex_lock(&lcw_refcount_mutex);
482 if (--lcw_refcount == 0)
484 mutex_unlock(&lcw_refcount_mutex);
488 EXPORT_SYMBOL(lc_watchdog_delete);
490 #else /* !defined(WITH_WATCHDOG) */
492 struct lc_watchdog *lc_watchdog_add(int timeout,
493 void (*callback)(pid_t pid, void *),
496 static struct lc_watchdog watchdog;
499 EXPORT_SYMBOL(lc_watchdog_add);
501 void lc_watchdog_touch(struct lc_watchdog *lcw, int timeout)
504 EXPORT_SYMBOL(lc_watchdog_touch);
506 void lc_watchdog_disable(struct lc_watchdog *lcw)
509 EXPORT_SYMBOL(lc_watchdog_disable);
511 void lc_watchdog_delete(struct lc_watchdog *lcw)
514 EXPORT_SYMBOL(lc_watchdog_delete);