4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/libcfs/watchdog.c
38 * Author: Jacob Berkman <jacob@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_LNET
43 #include <libcfs/libcfs.h>
44 #include "tracefile.h"
47 spinlock_t lcw_lock; /* check or change lcw_list */
48 int lcw_refcount; /* must hold lcw_pending_timers_lock */
49 struct timer_list lcw_timer; /* kernel timer */
50 cfs_list_t lcw_list; /* chain on pending list */
51 cfs_time_t lcw_last_touched; /* last touched stamp */
52 struct task_struct *lcw_task; /* owner task */
53 void (*lcw_callback)(pid_t, void *);
67 * The dispatcher will complete lcw_start_completion when it starts,
68 * and lcw_stop_completion when it exits.
69 * Wake lcw_event_waitq to signal timer callback dispatches.
71 static struct completion lcw_start_completion;
72 static struct completion lcw_stop_completion;
73 static wait_queue_head_t lcw_event_waitq;
76 * Set this and wake lcw_event_waitq to stop the dispatcher.
81 static unsigned long lcw_flags = 0;
84 * Number of outstanding watchdogs.
85 * When it hits 1, we start the dispatcher.
86 * When it hits 0, we stop the dispatcher.
88 static __u32 lcw_refcount = 0;
89 static DEFINE_MUTEX(lcw_refcount_mutex);
92 * List of timers that have fired that need their callbacks run by the
96 static DEFINE_SPINLOCK(lcw_pending_timers_lock);
97 static cfs_list_t lcw_pending_timers = CFS_LIST_HEAD_INIT(lcw_pending_timers);
99 /* Last time a watchdog expired */
100 static cfs_time_t lcw_last_watchdog_time;
101 static int lcw_recent_watchdog_count;
104 lcw_dump(struct lc_watchdog *lcw)
108 if (lcw->lcw_task == NULL) {
109 LCONSOLE_WARN("Process " LPPID " was not found in the task "
110 "list; watchdog callback may be incomplete\n",
113 libcfs_debug_dumpstack(lcw->lcw_task);
120 static void lcw_cb(ulong_ptr_t data)
122 struct lc_watchdog *lcw = (struct lc_watchdog *)data;
125 if (lcw->lcw_state != LC_WATCHDOG_ENABLED) {
130 lcw->lcw_state = LC_WATCHDOG_EXPIRED;
132 spin_lock_bh(&lcw->lcw_lock);
133 LASSERT(cfs_list_empty(&lcw->lcw_list));
135 spin_lock_bh(&lcw_pending_timers_lock);
136 lcw->lcw_refcount++; /* +1 for pending list */
137 cfs_list_add(&lcw->lcw_list, &lcw_pending_timers);
138 wake_up(&lcw_event_waitq);
140 spin_unlock_bh(&lcw_pending_timers_lock);
141 spin_unlock_bh(&lcw->lcw_lock);
145 static int is_watchdog_fired(void)
149 if (test_bit(LCW_FLAG_STOP, &lcw_flags))
152 spin_lock_bh(&lcw_pending_timers_lock);
153 rc = !cfs_list_empty(&lcw_pending_timers);
154 spin_unlock_bh(&lcw_pending_timers_lock);
158 static void lcw_dump_stack(struct lc_watchdog *lcw)
160 cfs_time_t current_time;
161 cfs_duration_t delta_time;
162 struct timeval timediff;
164 current_time = cfs_time_current();
165 delta_time = cfs_time_sub(current_time, lcw->lcw_last_touched);
166 cfs_duration_usec(delta_time, &timediff);
169 * Check to see if we should throttle the watchdog timer to avoid
170 * too many dumps going to the console thus triggering an NMI.
172 delta_time = cfs_duration_sec(cfs_time_sub(current_time,
173 lcw_last_watchdog_time));
175 if (delta_time < libcfs_watchdog_ratelimit &&
176 lcw_recent_watchdog_count > 3) {
177 LCONSOLE_WARN("Service thread pid %u was inactive for "
178 "%lu.%.02lus. Watchdog stack traces are limited "
179 "to 3 per %d seconds, skipping this one.\n",
182 timediff.tv_usec / 10000,
183 libcfs_watchdog_ratelimit);
185 if (delta_time < libcfs_watchdog_ratelimit) {
186 lcw_recent_watchdog_count++;
188 memcpy(&lcw_last_watchdog_time, ¤t_time,
189 sizeof(current_time));
190 lcw_recent_watchdog_count = 0;
193 LCONSOLE_WARN("Service thread pid %u was inactive for "
194 "%lu.%.02lus. The thread might be hung, or it "
195 "might only be slow and will resume later. "
196 "Dumping the stack trace for debugging purposes:"
200 timediff.tv_usec / 10000);
205 static int lcw_dispatch_main(void *data)
208 struct lc_watchdog *lcw;
209 CFS_LIST_HEAD (zombies);
213 complete(&lcw_start_completion);
218 rc = wait_event_interruptible(lcw_event_waitq,
219 is_watchdog_fired());
220 CDEBUG(D_INFO, "Watchdog got woken up...\n");
221 if (test_bit(LCW_FLAG_STOP, &lcw_flags)) {
222 CDEBUG(D_INFO, "LCW_FLAG_STOP set, shutting down...\n");
224 spin_lock_bh(&lcw_pending_timers_lock);
225 rc = !cfs_list_empty(&lcw_pending_timers);
226 spin_unlock_bh(&lcw_pending_timers_lock);
228 CERROR("pending timers list was not empty at "
229 "time of watchdog dispatch shutdown\n");
234 spin_lock_bh(&lcw_pending_timers_lock);
235 while (!cfs_list_empty(&lcw_pending_timers)) {
238 lcw = cfs_list_entry(lcw_pending_timers.next,
239 struct lc_watchdog, lcw_list);
240 /* +1 ref for callback to make sure lwc wouldn't be
241 * deleted after releasing lcw_pending_timers_lock */
243 spin_unlock_bh(&lcw_pending_timers_lock);
246 spin_lock_bh(&lcw->lcw_lock);
247 spin_lock_bh(&lcw_pending_timers_lock);
249 if (cfs_list_empty(&lcw->lcw_list)) {
250 /* already removed from pending list */
251 lcw->lcw_refcount--; /* -1 ref for callback */
252 if (lcw->lcw_refcount == 0)
253 cfs_list_add(&lcw->lcw_list, &zombies);
254 spin_unlock_bh(&lcw->lcw_lock);
255 /* still hold lcw_pending_timers_lock */
259 cfs_list_del_init(&lcw->lcw_list);
260 lcw->lcw_refcount--; /* -1 ref for pending list */
262 spin_unlock_bh(&lcw_pending_timers_lock);
263 spin_unlock_bh(&lcw->lcw_lock);
265 CDEBUG(D_INFO, "found lcw for pid " LPPID "\n",
269 is_dumplog = lcw->lcw_callback == lc_watchdog_dumplog;
270 if (lcw->lcw_state != LC_WATCHDOG_DISABLED &&
271 (dumplog || !is_dumplog)) {
272 lcw->lcw_callback(lcw->lcw_pid, lcw->lcw_data);
273 if (dumplog && is_dumplog)
277 spin_lock_bh(&lcw_pending_timers_lock);
278 lcw->lcw_refcount--; /* -1 ref for callback */
279 if (lcw->lcw_refcount == 0)
280 cfs_list_add(&lcw->lcw_list, &zombies);
282 spin_unlock_bh(&lcw_pending_timers_lock);
284 while (!cfs_list_empty(&zombies)) {
285 lcw = cfs_list_entry(lcw_pending_timers.next,
286 struct lc_watchdog, lcw_list);
287 cfs_list_del(&lcw->lcw_list);
288 LIBCFS_FREE(lcw, sizeof(*lcw));
292 complete(&lcw_stop_completion);
297 static void lcw_dispatch_start(void)
299 struct task_struct *task;
302 LASSERT(lcw_refcount == 1);
304 init_completion(&lcw_stop_completion);
305 init_completion(&lcw_start_completion);
306 init_waitqueue_head(&lcw_event_waitq);
308 CDEBUG(D_INFO, "starting dispatch thread\n");
309 task = kthread_run(lcw_dispatch_main, NULL, "lc_watchdogd");
311 CERROR("error spawning watchdog dispatch thread: %ld\n",
316 wait_for_completion(&lcw_start_completion);
317 CDEBUG(D_INFO, "watchdog dispatcher initialization complete.\n");
322 static void lcw_dispatch_stop(void)
325 LASSERT(lcw_refcount == 0);
327 CDEBUG(D_INFO, "trying to stop watchdog dispatcher.\n");
329 set_bit(LCW_FLAG_STOP, &lcw_flags);
330 wake_up(&lcw_event_waitq);
332 wait_for_completion(&lcw_stop_completion);
334 CDEBUG(D_INFO, "watchdog dispatcher has shut down.\n");
339 struct lc_watchdog *lc_watchdog_add(int timeout,
340 void (*callback)(pid_t, void *),
343 struct lc_watchdog *lcw = NULL;
346 LIBCFS_ALLOC(lcw, sizeof(*lcw));
348 CDEBUG(D_INFO, "Could not allocate new lc_watchdog\n");
349 RETURN(ERR_PTR(-ENOMEM));
352 spin_lock_init(&lcw->lcw_lock);
353 lcw->lcw_refcount = 1; /* refcount for owner */
354 lcw->lcw_task = current;
355 lcw->lcw_pid = current_pid();
356 lcw->lcw_callback = (callback != NULL) ? callback : lc_watchdog_dumplog;
357 lcw->lcw_data = data;
358 lcw->lcw_state = LC_WATCHDOG_DISABLED;
360 CFS_INIT_LIST_HEAD(&lcw->lcw_list);
361 cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw);
363 mutex_lock(&lcw_refcount_mutex);
364 if (++lcw_refcount == 1)
365 lcw_dispatch_start();
366 mutex_unlock(&lcw_refcount_mutex);
368 /* Keep this working in case we enable them by default */
369 if (lcw->lcw_state == LC_WATCHDOG_ENABLED) {
370 lcw->lcw_last_touched = cfs_time_current();
371 cfs_timer_arm(&lcw->lcw_timer, cfs_time_seconds(timeout) +
377 EXPORT_SYMBOL(lc_watchdog_add);
379 static void lcw_update_time(struct lc_watchdog *lcw, const char *message)
381 cfs_time_t newtime = cfs_time_current();;
383 if (lcw->lcw_state == LC_WATCHDOG_EXPIRED) {
384 struct timeval timediff;
385 cfs_time_t delta_time = cfs_time_sub(newtime,
386 lcw->lcw_last_touched);
387 cfs_duration_usec(delta_time, &timediff);
389 LCONSOLE_WARN("Service thread pid %u %s after %lu.%.02lus. "
390 "This indicates the system was overloaded (too "
391 "many service threads, or there were not enough "
392 "hardware resources).\n",
396 timediff.tv_usec / 10000);
398 lcw->lcw_last_touched = newtime;
401 static void lc_watchdog_del_pending(struct lc_watchdog *lcw)
403 spin_lock_bh(&lcw->lcw_lock);
404 if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
405 spin_lock_bh(&lcw_pending_timers_lock);
406 cfs_list_del_init(&lcw->lcw_list);
407 lcw->lcw_refcount--; /* -1 ref for pending list */
408 spin_unlock_bh(&lcw_pending_timers_lock);
411 spin_unlock_bh(&lcw->lcw_lock);
414 void lc_watchdog_touch(struct lc_watchdog *lcw, int timeout)
417 LASSERT(lcw != NULL);
419 lc_watchdog_del_pending(lcw);
421 lcw_update_time(lcw, "resumed");
422 lcw->lcw_state = LC_WATCHDOG_ENABLED;
424 cfs_timer_arm(&lcw->lcw_timer, cfs_time_current() +
425 cfs_time_seconds(timeout));
429 EXPORT_SYMBOL(lc_watchdog_touch);
431 void lc_watchdog_disable(struct lc_watchdog *lcw)
434 LASSERT(lcw != NULL);
436 lc_watchdog_del_pending(lcw);
438 lcw_update_time(lcw, "completed");
439 lcw->lcw_state = LC_WATCHDOG_DISABLED;
443 EXPORT_SYMBOL(lc_watchdog_disable);
445 void lc_watchdog_delete(struct lc_watchdog *lcw)
450 LASSERT(lcw != NULL);
452 cfs_timer_disarm(&lcw->lcw_timer);
454 lcw_update_time(lcw, "stopped");
456 spin_lock_bh(&lcw->lcw_lock);
457 spin_lock_bh(&lcw_pending_timers_lock);
458 if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
459 cfs_list_del_init(&lcw->lcw_list);
460 lcw->lcw_refcount--; /* -1 ref for pending list */
463 lcw->lcw_refcount--; /* -1 ref for owner */
464 dead = lcw->lcw_refcount == 0;
465 spin_unlock_bh(&lcw_pending_timers_lock);
466 spin_unlock_bh(&lcw->lcw_lock);
469 LIBCFS_FREE(lcw, sizeof(*lcw));
471 mutex_lock(&lcw_refcount_mutex);
472 if (--lcw_refcount == 0)
474 mutex_unlock(&lcw_refcount_mutex);
478 EXPORT_SYMBOL(lc_watchdog_delete);
481 * Provided watchdog handlers
484 void lc_watchdog_dumplog(pid_t pid, void *data)
486 libcfs_debug_dumplog_internal((void *)((long_ptr_t)pid));
488 EXPORT_SYMBOL(lc_watchdog_dumplog);
490 #else /* !defined(WITH_WATCHDOG) */
492 struct lc_watchdog *lc_watchdog_add(int timeout,
493 void (*callback)(pid_t pid, void *),
496 static struct lc_watchdog watchdog;
499 EXPORT_SYMBOL(lc_watchdog_add);
501 void lc_watchdog_touch(struct lc_watchdog *lcw, int timeout)
504 EXPORT_SYMBOL(lc_watchdog_touch);
506 void lc_watchdog_disable(struct lc_watchdog *lcw)
509 EXPORT_SYMBOL(lc_watchdog_disable);
511 void lc_watchdog_delete(struct lc_watchdog *lcw)
514 EXPORT_SYMBOL(lc_watchdog_delete);