4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/libcfs/watchdog.c
38 * Author: Jacob Berkman <jacob@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_LNET
43 #include <libcfs/libcfs.h>
44 #include "tracefile.h"
47 spinlock_t lcw_lock; /* check or change lcw_list */
48 int lcw_refcount; /* must hold lcw_pending_timers_lock */
49 cfs_timer_t lcw_timer; /* kernel timer */
50 cfs_list_t lcw_list; /* chain on pending list */
51 cfs_time_t lcw_last_touched; /* last touched stamp */
52 cfs_task_t *lcw_task; /* owner task */
53 void (*lcw_callback)(pid_t, void *);
67 * The dispatcher will complete lcw_start_completion when it starts,
68 * and lcw_stop_completion when it exits.
69 * Wake lcw_event_waitq to signal timer callback dispatches.
71 static struct completion lcw_start_completion;
72 static struct completion lcw_stop_completion;
73 static cfs_waitq_t lcw_event_waitq;
76 * Set this and wake lcw_event_waitq to stop the dispatcher.
81 static unsigned long lcw_flags = 0;
84 * Number of outstanding watchdogs.
85 * When it hits 1, we start the dispatcher.
86 * When it hits 0, we stop the dispatcher.
88 static __u32 lcw_refcount = 0;
89 static DEFINE_MUTEX(lcw_refcount_mutex);
92 * List of timers that have fired that need their callbacks run by the
96 static DEFINE_SPINLOCK(lcw_pending_timers_lock);
97 static cfs_list_t lcw_pending_timers = CFS_LIST_HEAD_INIT(lcw_pending_timers);
99 /* Last time a watchdog expired */
100 static cfs_time_t lcw_last_watchdog_time;
101 static int lcw_recent_watchdog_count;
104 lcw_dump(struct lc_watchdog *lcw)
107 #if defined(HAVE_TASKLIST_LOCK)
108 read_lock(&tasklist_lock);
112 if (lcw->lcw_task == NULL) {
113 LCONSOLE_WARN("Process " LPPID " was not found in the task "
114 "list; watchdog callback may be incomplete\n",
117 libcfs_debug_dumpstack(lcw->lcw_task);
120 #if defined(HAVE_TASKLIST_LOCK)
121 read_unlock(&tasklist_lock);
128 static void lcw_cb(ulong_ptr_t data)
130 struct lc_watchdog *lcw = (struct lc_watchdog *)data;
133 if (lcw->lcw_state != LC_WATCHDOG_ENABLED) {
138 lcw->lcw_state = LC_WATCHDOG_EXPIRED;
140 spin_lock_bh(&lcw->lcw_lock);
141 LASSERT(cfs_list_empty(&lcw->lcw_list));
143 spin_lock_bh(&lcw_pending_timers_lock);
144 lcw->lcw_refcount++; /* +1 for pending list */
145 cfs_list_add(&lcw->lcw_list, &lcw_pending_timers);
146 cfs_waitq_signal(&lcw_event_waitq);
148 spin_unlock_bh(&lcw_pending_timers_lock);
149 spin_unlock_bh(&lcw->lcw_lock);
153 static int is_watchdog_fired(void)
157 if (test_bit(LCW_FLAG_STOP, &lcw_flags))
160 spin_lock_bh(&lcw_pending_timers_lock);
161 rc = !cfs_list_empty(&lcw_pending_timers);
162 spin_unlock_bh(&lcw_pending_timers_lock);
166 static void lcw_dump_stack(struct lc_watchdog *lcw)
168 cfs_time_t current_time;
169 cfs_duration_t delta_time;
170 struct timeval timediff;
172 current_time = cfs_time_current();
173 delta_time = cfs_time_sub(current_time, lcw->lcw_last_touched);
174 cfs_duration_usec(delta_time, &timediff);
177 * Check to see if we should throttle the watchdog timer to avoid
178 * too many dumps going to the console thus triggering an NMI.
180 delta_time = cfs_duration_sec(cfs_time_sub(current_time,
181 lcw_last_watchdog_time));
183 if (delta_time < libcfs_watchdog_ratelimit &&
184 lcw_recent_watchdog_count > 3) {
185 LCONSOLE_WARN("Service thread pid %u was inactive for "
186 "%lu.%.02lus. Watchdog stack traces are limited "
187 "to 3 per %d seconds, skipping this one.\n",
190 timediff.tv_usec / 10000,
191 libcfs_watchdog_ratelimit);
193 if (delta_time < libcfs_watchdog_ratelimit) {
194 lcw_recent_watchdog_count++;
196 memcpy(&lcw_last_watchdog_time, ¤t_time,
197 sizeof(current_time));
198 lcw_recent_watchdog_count = 0;
201 LCONSOLE_WARN("Service thread pid %u was inactive for "
202 "%lu.%.02lus. The thread might be hung, or it "
203 "might only be slow and will resume later. "
204 "Dumping the stack trace for debugging purposes:"
208 timediff.tv_usec / 10000);
213 static int lcw_dispatch_main(void *data)
216 struct lc_watchdog *lcw;
217 CFS_LIST_HEAD (zombies);
221 complete(&lcw_start_completion);
226 cfs_wait_event_interruptible(lcw_event_waitq,
227 is_watchdog_fired(), rc);
228 CDEBUG(D_INFO, "Watchdog got woken up...\n");
229 if (test_bit(LCW_FLAG_STOP, &lcw_flags)) {
230 CDEBUG(D_INFO, "LCW_FLAG_STOP set, shutting down...\n");
232 spin_lock_bh(&lcw_pending_timers_lock);
233 rc = !cfs_list_empty(&lcw_pending_timers);
234 spin_unlock_bh(&lcw_pending_timers_lock);
236 CERROR("pending timers list was not empty at "
237 "time of watchdog dispatch shutdown\n");
242 spin_lock_bh(&lcw_pending_timers_lock);
243 while (!cfs_list_empty(&lcw_pending_timers)) {
246 lcw = cfs_list_entry(lcw_pending_timers.next,
247 struct lc_watchdog, lcw_list);
248 /* +1 ref for callback to make sure lwc wouldn't be
249 * deleted after releasing lcw_pending_timers_lock */
251 spin_unlock_bh(&lcw_pending_timers_lock);
254 spin_lock_bh(&lcw->lcw_lock);
255 spin_lock_bh(&lcw_pending_timers_lock);
257 if (cfs_list_empty(&lcw->lcw_list)) {
258 /* already removed from pending list */
259 lcw->lcw_refcount--; /* -1 ref for callback */
260 if (lcw->lcw_refcount == 0)
261 cfs_list_add(&lcw->lcw_list, &zombies);
262 spin_unlock_bh(&lcw->lcw_lock);
263 /* still hold lcw_pending_timers_lock */
267 cfs_list_del_init(&lcw->lcw_list);
268 lcw->lcw_refcount--; /* -1 ref for pending list */
270 spin_unlock_bh(&lcw_pending_timers_lock);
271 spin_unlock_bh(&lcw->lcw_lock);
273 CDEBUG(D_INFO, "found lcw for pid " LPPID "\n",
277 is_dumplog = lcw->lcw_callback == lc_watchdog_dumplog;
278 if (lcw->lcw_state != LC_WATCHDOG_DISABLED &&
279 (dumplog || !is_dumplog)) {
280 lcw->lcw_callback(lcw->lcw_pid, lcw->lcw_data);
281 if (dumplog && is_dumplog)
285 spin_lock_bh(&lcw_pending_timers_lock);
286 lcw->lcw_refcount--; /* -1 ref for callback */
287 if (lcw->lcw_refcount == 0)
288 cfs_list_add(&lcw->lcw_list, &zombies);
290 spin_unlock_bh(&lcw_pending_timers_lock);
292 while (!cfs_list_empty(&zombies)) {
293 lcw = cfs_list_entry(lcw_pending_timers.next,
294 struct lc_watchdog, lcw_list);
295 cfs_list_del(&lcw->lcw_list);
296 LIBCFS_FREE(lcw, sizeof(*lcw));
300 complete(&lcw_stop_completion);
305 static void lcw_dispatch_start(void)
310 LASSERT(lcw_refcount == 1);
312 init_completion(&lcw_stop_completion);
313 init_completion(&lcw_start_completion);
314 cfs_waitq_init(&lcw_event_waitq);
316 CDEBUG(D_INFO, "starting dispatch thread\n");
317 task = kthread_run(lcw_dispatch_main, NULL, "lc_watchdogd");
319 CERROR("error spawning watchdog dispatch thread: %ld\n",
324 wait_for_completion(&lcw_start_completion);
325 CDEBUG(D_INFO, "watchdog dispatcher initialization complete.\n");
330 static void lcw_dispatch_stop(void)
333 LASSERT(lcw_refcount == 0);
335 CDEBUG(D_INFO, "trying to stop watchdog dispatcher.\n");
337 set_bit(LCW_FLAG_STOP, &lcw_flags);
338 cfs_waitq_signal(&lcw_event_waitq);
340 wait_for_completion(&lcw_stop_completion);
342 CDEBUG(D_INFO, "watchdog dispatcher has shut down.\n");
347 struct lc_watchdog *lc_watchdog_add(int timeout,
348 void (*callback)(pid_t, void *),
351 struct lc_watchdog *lcw = NULL;
354 LIBCFS_ALLOC(lcw, sizeof(*lcw));
356 CDEBUG(D_INFO, "Could not allocate new lc_watchdog\n");
357 RETURN(ERR_PTR(-ENOMEM));
360 spin_lock_init(&lcw->lcw_lock);
361 lcw->lcw_refcount = 1; /* refcount for owner */
362 lcw->lcw_task = cfs_current();
363 lcw->lcw_pid = cfs_curproc_pid();
364 lcw->lcw_callback = (callback != NULL) ? callback : lc_watchdog_dumplog;
365 lcw->lcw_data = data;
366 lcw->lcw_state = LC_WATCHDOG_DISABLED;
368 CFS_INIT_LIST_HEAD(&lcw->lcw_list);
369 cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw);
371 mutex_lock(&lcw_refcount_mutex);
372 if (++lcw_refcount == 1)
373 lcw_dispatch_start();
374 mutex_unlock(&lcw_refcount_mutex);
376 /* Keep this working in case we enable them by default */
377 if (lcw->lcw_state == LC_WATCHDOG_ENABLED) {
378 lcw->lcw_last_touched = cfs_time_current();
379 cfs_timer_arm(&lcw->lcw_timer, cfs_time_seconds(timeout) +
385 EXPORT_SYMBOL(lc_watchdog_add);
387 static void lcw_update_time(struct lc_watchdog *lcw, const char *message)
389 cfs_time_t newtime = cfs_time_current();;
391 if (lcw->lcw_state == LC_WATCHDOG_EXPIRED) {
392 struct timeval timediff;
393 cfs_time_t delta_time = cfs_time_sub(newtime,
394 lcw->lcw_last_touched);
395 cfs_duration_usec(delta_time, &timediff);
397 LCONSOLE_WARN("Service thread pid %u %s after %lu.%.02lus. "
398 "This indicates the system was overloaded (too "
399 "many service threads, or there were not enough "
400 "hardware resources).\n",
404 timediff.tv_usec / 10000);
406 lcw->lcw_last_touched = newtime;
409 static void lc_watchdog_del_pending(struct lc_watchdog *lcw)
411 spin_lock_bh(&lcw->lcw_lock);
412 if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
413 spin_lock_bh(&lcw_pending_timers_lock);
414 cfs_list_del_init(&lcw->lcw_list);
415 lcw->lcw_refcount--; /* -1 ref for pending list */
416 spin_unlock_bh(&lcw_pending_timers_lock);
419 spin_unlock_bh(&lcw->lcw_lock);
422 void lc_watchdog_touch(struct lc_watchdog *lcw, int timeout)
425 LASSERT(lcw != NULL);
427 lc_watchdog_del_pending(lcw);
429 lcw_update_time(lcw, "resumed");
430 lcw->lcw_state = LC_WATCHDOG_ENABLED;
432 cfs_timer_arm(&lcw->lcw_timer, cfs_time_current() +
433 cfs_time_seconds(timeout));
437 EXPORT_SYMBOL(lc_watchdog_touch);
439 void lc_watchdog_disable(struct lc_watchdog *lcw)
442 LASSERT(lcw != NULL);
444 lc_watchdog_del_pending(lcw);
446 lcw_update_time(lcw, "completed");
447 lcw->lcw_state = LC_WATCHDOG_DISABLED;
451 EXPORT_SYMBOL(lc_watchdog_disable);
453 void lc_watchdog_delete(struct lc_watchdog *lcw)
458 LASSERT(lcw != NULL);
460 cfs_timer_disarm(&lcw->lcw_timer);
462 lcw_update_time(lcw, "stopped");
464 spin_lock_bh(&lcw->lcw_lock);
465 spin_lock_bh(&lcw_pending_timers_lock);
466 if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
467 cfs_list_del_init(&lcw->lcw_list);
468 lcw->lcw_refcount--; /* -1 ref for pending list */
471 lcw->lcw_refcount--; /* -1 ref for owner */
472 dead = lcw->lcw_refcount == 0;
473 spin_unlock_bh(&lcw_pending_timers_lock);
474 spin_unlock_bh(&lcw->lcw_lock);
477 LIBCFS_FREE(lcw, sizeof(*lcw));
479 mutex_lock(&lcw_refcount_mutex);
480 if (--lcw_refcount == 0)
482 mutex_unlock(&lcw_refcount_mutex);
486 EXPORT_SYMBOL(lc_watchdog_delete);
489 * Provided watchdog handlers
492 void lc_watchdog_dumplog(pid_t pid, void *data)
494 libcfs_debug_dumplog_internal((void *)((long_ptr_t)pid));
496 EXPORT_SYMBOL(lc_watchdog_dumplog);
498 #else /* !defined(WITH_WATCHDOG) */
500 struct lc_watchdog *lc_watchdog_add(int timeout,
501 void (*callback)(pid_t pid, void *),
504 static struct lc_watchdog watchdog;
507 EXPORT_SYMBOL(lc_watchdog_add);
509 void lc_watchdog_touch(struct lc_watchdog *lcw, int timeout)
512 EXPORT_SYMBOL(lc_watchdog_touch);
514 void lc_watchdog_disable(struct lc_watchdog *lcw)
517 EXPORT_SYMBOL(lc_watchdog_disable);
519 void lc_watchdog_delete(struct lc_watchdog *lcw)
522 EXPORT_SYMBOL(lc_watchdog_delete);