4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/libcfs/watchdog.c
38 * Author: Jacob Berkman <jacob@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_LNET
43 #include <libcfs/libcfs.h>
44 #include "tracefile.h"
47 spinlock_t lcw_lock; /* check or change lcw_list */
48 int lcw_refcount; /* must hold lcw_pending_timers_lock */
49 cfs_timer_t lcw_timer; /* kernel timer */
50 cfs_list_t lcw_list; /* chain on pending list */
51 cfs_time_t lcw_last_touched; /* last touched stamp */
52 cfs_task_t *lcw_task; /* owner task */
53 void (*lcw_callback)(pid_t, void *);
67 * The dispatcher will complete lcw_start_completion when it starts,
68 * and lcw_stop_completion when it exits.
69 * Wake lcw_event_waitq to signal timer callback dispatches.
71 static struct completion lcw_start_completion;
72 static struct completion lcw_stop_completion;
73 static cfs_waitq_t lcw_event_waitq;
76 * Set this and wake lcw_event_waitq to stop the dispatcher.
81 static unsigned long lcw_flags = 0;
84 * Number of outstanding watchdogs.
85 * When it hits 1, we start the dispatcher.
86 * When it hits 0, we stop the dispatcher.
88 static __u32 lcw_refcount = 0;
89 static DEFINE_MUTEX(lcw_refcount_mutex);
92 * List of timers that have fired that need their callbacks run by the
96 static DEFINE_SPINLOCK(lcw_pending_timers_lock);
97 static cfs_list_t lcw_pending_timers = CFS_LIST_HEAD_INIT(lcw_pending_timers);
99 /* Last time a watchdog expired */
100 static cfs_time_t lcw_last_watchdog_time;
101 static int lcw_recent_watchdog_count;
104 lcw_dump(struct lc_watchdog *lcw)
107 #if defined(HAVE_TASKLIST_LOCK)
108 read_lock(&tasklist_lock);
112 if (lcw->lcw_task == NULL) {
113 LCONSOLE_WARN("Process " LPPID " was not found in the task "
114 "list; watchdog callback may be incomplete\n",
117 libcfs_debug_dumpstack(lcw->lcw_task);
120 #if defined(HAVE_TASKLIST_LOCK)
121 read_unlock(&tasklist_lock);
128 static void lcw_cb(ulong_ptr_t data)
130 struct lc_watchdog *lcw = (struct lc_watchdog *)data;
133 if (lcw->lcw_state != LC_WATCHDOG_ENABLED) {
138 lcw->lcw_state = LC_WATCHDOG_EXPIRED;
140 spin_lock_bh(&lcw->lcw_lock);
141 LASSERT(cfs_list_empty(&lcw->lcw_list));
143 spin_lock_bh(&lcw_pending_timers_lock);
144 lcw->lcw_refcount++; /* +1 for pending list */
145 cfs_list_add(&lcw->lcw_list, &lcw_pending_timers);
146 cfs_waitq_signal(&lcw_event_waitq);
148 spin_unlock_bh(&lcw_pending_timers_lock);
149 spin_unlock_bh(&lcw->lcw_lock);
153 static int is_watchdog_fired(void)
157 if (test_bit(LCW_FLAG_STOP, &lcw_flags))
160 spin_lock_bh(&lcw_pending_timers_lock);
161 rc = !cfs_list_empty(&lcw_pending_timers);
162 spin_unlock_bh(&lcw_pending_timers_lock);
166 static void lcw_dump_stack(struct lc_watchdog *lcw)
168 cfs_time_t current_time;
169 cfs_duration_t delta_time;
170 struct timeval timediff;
172 current_time = cfs_time_current();
173 delta_time = cfs_time_sub(current_time, lcw->lcw_last_touched);
174 cfs_duration_usec(delta_time, &timediff);
177 * Check to see if we should throttle the watchdog timer to avoid
178 * too many dumps going to the console thus triggering an NMI.
180 delta_time = cfs_duration_sec(cfs_time_sub(current_time,
181 lcw_last_watchdog_time));
183 if (delta_time < libcfs_watchdog_ratelimit &&
184 lcw_recent_watchdog_count > 3) {
185 LCONSOLE_WARN("Service thread pid %u was inactive for "
186 "%lu.%.02lus. Watchdog stack traces are limited "
187 "to 3 per %d seconds, skipping this one.\n",
190 timediff.tv_usec / 10000,
191 libcfs_watchdog_ratelimit);
193 if (delta_time < libcfs_watchdog_ratelimit) {
194 lcw_recent_watchdog_count++;
196 memcpy(&lcw_last_watchdog_time, ¤t_time,
197 sizeof(current_time));
198 lcw_recent_watchdog_count = 0;
201 LCONSOLE_WARN("Service thread pid %u was inactive for "
202 "%lu.%.02lus. The thread might be hung, or it "
203 "might only be slow and will resume later. "
204 "Dumping the stack trace for debugging purposes:"
208 timediff.tv_usec / 10000);
213 static int lcw_dispatch_main(void *data)
217 struct lc_watchdog *lcw;
218 CFS_LIST_HEAD (zombies);
222 cfs_daemonize("lc_watchdogd");
224 SIGNAL_MASK_LOCK(current, flags);
225 sigfillset(¤t->blocked);
227 SIGNAL_MASK_UNLOCK(current, flags);
229 complete(&lcw_start_completion);
234 cfs_wait_event_interruptible(lcw_event_waitq,
235 is_watchdog_fired(), rc);
236 CDEBUG(D_INFO, "Watchdog got woken up...\n");
237 if (test_bit(LCW_FLAG_STOP, &lcw_flags)) {
238 CDEBUG(D_INFO, "LCW_FLAG_STOP set, shutting down...\n");
240 spin_lock_bh(&lcw_pending_timers_lock);
241 rc = !cfs_list_empty(&lcw_pending_timers);
242 spin_unlock_bh(&lcw_pending_timers_lock);
244 CERROR("pending timers list was not empty at "
245 "time of watchdog dispatch shutdown\n");
250 spin_lock_bh(&lcw_pending_timers_lock);
251 while (!cfs_list_empty(&lcw_pending_timers)) {
254 lcw = cfs_list_entry(lcw_pending_timers.next,
255 struct lc_watchdog, lcw_list);
256 /* +1 ref for callback to make sure lwc wouldn't be
257 * deleted after releasing lcw_pending_timers_lock */
259 spin_unlock_bh(&lcw_pending_timers_lock);
262 spin_lock_bh(&lcw->lcw_lock);
263 spin_lock_bh(&lcw_pending_timers_lock);
265 if (cfs_list_empty(&lcw->lcw_list)) {
266 /* already removed from pending list */
267 lcw->lcw_refcount--; /* -1 ref for callback */
268 if (lcw->lcw_refcount == 0)
269 cfs_list_add(&lcw->lcw_list, &zombies);
270 spin_unlock_bh(&lcw->lcw_lock);
271 /* still hold lcw_pending_timers_lock */
275 cfs_list_del_init(&lcw->lcw_list);
276 lcw->lcw_refcount--; /* -1 ref for pending list */
278 spin_unlock_bh(&lcw_pending_timers_lock);
279 spin_unlock_bh(&lcw->lcw_lock);
281 CDEBUG(D_INFO, "found lcw for pid " LPPID "\n",
285 is_dumplog = lcw->lcw_callback == lc_watchdog_dumplog;
286 if (lcw->lcw_state != LC_WATCHDOG_DISABLED &&
287 (dumplog || !is_dumplog)) {
288 lcw->lcw_callback(lcw->lcw_pid, lcw->lcw_data);
289 if (dumplog && is_dumplog)
293 spin_lock_bh(&lcw_pending_timers_lock);
294 lcw->lcw_refcount--; /* -1 ref for callback */
295 if (lcw->lcw_refcount == 0)
296 cfs_list_add(&lcw->lcw_list, &zombies);
298 spin_unlock_bh(&lcw_pending_timers_lock);
300 while (!cfs_list_empty(&zombies)) {
301 lcw = cfs_list_entry(lcw_pending_timers.next,
302 struct lc_watchdog, lcw_list);
303 cfs_list_del(&lcw->lcw_list);
304 LIBCFS_FREE(lcw, sizeof(*lcw));
308 complete(&lcw_stop_completion);
313 static void lcw_dispatch_start(void)
318 LASSERT(lcw_refcount == 1);
320 init_completion(&lcw_stop_completion);
321 init_completion(&lcw_start_completion);
322 cfs_waitq_init(&lcw_event_waitq);
324 CDEBUG(D_INFO, "starting dispatch thread\n");
325 rc = cfs_create_thread(lcw_dispatch_main, NULL, 0);
327 CERROR("error spawning watchdog dispatch thread: %d\n", rc);
331 wait_for_completion(&lcw_start_completion);
332 CDEBUG(D_INFO, "watchdog dispatcher initialization complete.\n");
337 static void lcw_dispatch_stop(void)
340 LASSERT(lcw_refcount == 0);
342 CDEBUG(D_INFO, "trying to stop watchdog dispatcher.\n");
344 set_bit(LCW_FLAG_STOP, &lcw_flags);
345 cfs_waitq_signal(&lcw_event_waitq);
347 wait_for_completion(&lcw_stop_completion);
349 CDEBUG(D_INFO, "watchdog dispatcher has shut down.\n");
354 struct lc_watchdog *lc_watchdog_add(int timeout,
355 void (*callback)(pid_t, void *),
358 struct lc_watchdog *lcw = NULL;
361 LIBCFS_ALLOC(lcw, sizeof(*lcw));
363 CDEBUG(D_INFO, "Could not allocate new lc_watchdog\n");
364 RETURN(ERR_PTR(-ENOMEM));
367 spin_lock_init(&lcw->lcw_lock);
368 lcw->lcw_refcount = 1; /* refcount for owner */
369 lcw->lcw_task = cfs_current();
370 lcw->lcw_pid = cfs_curproc_pid();
371 lcw->lcw_callback = (callback != NULL) ? callback : lc_watchdog_dumplog;
372 lcw->lcw_data = data;
373 lcw->lcw_state = LC_WATCHDOG_DISABLED;
375 CFS_INIT_LIST_HEAD(&lcw->lcw_list);
376 cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw);
378 mutex_lock(&lcw_refcount_mutex);
379 if (++lcw_refcount == 1)
380 lcw_dispatch_start();
381 mutex_unlock(&lcw_refcount_mutex);
383 /* Keep this working in case we enable them by default */
384 if (lcw->lcw_state == LC_WATCHDOG_ENABLED) {
385 lcw->lcw_last_touched = cfs_time_current();
386 cfs_timer_arm(&lcw->lcw_timer, cfs_time_seconds(timeout) +
392 EXPORT_SYMBOL(lc_watchdog_add);
394 static void lcw_update_time(struct lc_watchdog *lcw, const char *message)
396 cfs_time_t newtime = cfs_time_current();;
398 if (lcw->lcw_state == LC_WATCHDOG_EXPIRED) {
399 struct timeval timediff;
400 cfs_time_t delta_time = cfs_time_sub(newtime,
401 lcw->lcw_last_touched);
402 cfs_duration_usec(delta_time, &timediff);
404 LCONSOLE_WARN("Service thread pid %u %s after %lu.%.02lus. "
405 "This indicates the system was overloaded (too "
406 "many service threads, or there were not enough "
407 "hardware resources).\n",
411 timediff.tv_usec / 10000);
413 lcw->lcw_last_touched = newtime;
416 static void lc_watchdog_del_pending(struct lc_watchdog *lcw)
418 spin_lock_bh(&lcw->lcw_lock);
419 if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
420 spin_lock_bh(&lcw_pending_timers_lock);
421 cfs_list_del_init(&lcw->lcw_list);
422 lcw->lcw_refcount--; /* -1 ref for pending list */
423 spin_unlock_bh(&lcw_pending_timers_lock);
426 spin_unlock_bh(&lcw->lcw_lock);
429 void lc_watchdog_touch(struct lc_watchdog *lcw, int timeout)
432 LASSERT(lcw != NULL);
434 lc_watchdog_del_pending(lcw);
436 lcw_update_time(lcw, "resumed");
437 lcw->lcw_state = LC_WATCHDOG_ENABLED;
439 cfs_timer_arm(&lcw->lcw_timer, cfs_time_current() +
440 cfs_time_seconds(timeout));
444 EXPORT_SYMBOL(lc_watchdog_touch);
446 void lc_watchdog_disable(struct lc_watchdog *lcw)
449 LASSERT(lcw != NULL);
451 lc_watchdog_del_pending(lcw);
453 lcw_update_time(lcw, "completed");
454 lcw->lcw_state = LC_WATCHDOG_DISABLED;
458 EXPORT_SYMBOL(lc_watchdog_disable);
460 void lc_watchdog_delete(struct lc_watchdog *lcw)
465 LASSERT(lcw != NULL);
467 cfs_timer_disarm(&lcw->lcw_timer);
469 lcw_update_time(lcw, "stopped");
471 spin_lock_bh(&lcw->lcw_lock);
472 spin_lock_bh(&lcw_pending_timers_lock);
473 if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
474 cfs_list_del_init(&lcw->lcw_list);
475 lcw->lcw_refcount--; /* -1 ref for pending list */
478 lcw->lcw_refcount--; /* -1 ref for owner */
479 dead = lcw->lcw_refcount == 0;
480 spin_unlock_bh(&lcw_pending_timers_lock);
481 spin_unlock_bh(&lcw->lcw_lock);
484 LIBCFS_FREE(lcw, sizeof(*lcw));
486 mutex_lock(&lcw_refcount_mutex);
487 if (--lcw_refcount == 0)
489 mutex_unlock(&lcw_refcount_mutex);
493 EXPORT_SYMBOL(lc_watchdog_delete);
496 * Provided watchdog handlers
499 void lc_watchdog_dumplog(pid_t pid, void *data)
501 libcfs_debug_dumplog_internal((void *)((long_ptr_t)pid));
503 EXPORT_SYMBOL(lc_watchdog_dumplog);
505 #else /* !defined(WITH_WATCHDOG) */
507 struct lc_watchdog *lc_watchdog_add(int timeout,
508 void (*callback)(pid_t pid, void *),
511 static struct lc_watchdog watchdog;
514 EXPORT_SYMBOL(lc_watchdog_add);
516 void lc_watchdog_touch(struct lc_watchdog *lcw, int timeout)
519 EXPORT_SYMBOL(lc_watchdog_touch);
521 void lc_watchdog_disable(struct lc_watchdog *lcw)
524 EXPORT_SYMBOL(lc_watchdog_disable);
526 void lc_watchdog_delete(struct lc_watchdog *lcw)
529 EXPORT_SYMBOL(lc_watchdog_delete);