X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=libcfs%2Flibcfs%2Fwatchdog.c;h=131daa33fbe0c7d8013289b1ebc9e3ff33cd4734;hb=f95393b0d0a59cf3dc2f29cffc35dcc4cc9d7728;hp=4e5cc421cd2d7666522a777f040c8ce3c175770b;hpb=52b4a5cc0ed4ef2a5981366a931e1e1c1fdcbb4f;p=fs%2Flustre-release.git diff --git a/libcfs/libcfs/watchdog.c b/libcfs/libcfs/watchdog.c index 4e5cc42..131daa3 100644 --- a/libcfs/libcfs/watchdog.c +++ b/libcfs/libcfs/watchdog.c @@ -26,7 +26,7 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. */ /* @@ -44,16 +44,16 @@ #include "tracefile.h" struct lc_watchdog { - cfs_timer_t lcw_timer; /* kernel timer */ - struct list_head lcw_list; - struct timeval lcw_last_touched; - cfs_task_t *lcw_task; + cfs_timer_t lcw_timer; /* kernel timer */ + cfs_list_t lcw_list; + cfs_time_t lcw_last_touched; + cfs_task_t *lcw_task; + cfs_atomic_t lcw_refcount; - void (*lcw_callback)(pid_t, void *); - void *lcw_data; + void (*lcw_callback)(pid_t, void *); + void *lcw_data; - pid_t lcw_pid; - cfs_duration_t lcw_time; /* time until watchdog fires, jiffies */ + pid_t lcw_pid; enum { LC_WATCHDOG_DISABLED, @@ -68,8 +68,8 @@ struct lc_watchdog { * and lcw_stop_completion when it exits. * Wake lcw_event_waitq to signal timer callback dispatches. */ -static struct completion lcw_start_completion; -static struct completion lcw_stop_completion; +static cfs_completion_t lcw_start_completion; +static cfs_completion_t lcw_stop_completion; static cfs_waitq_t lcw_event_waitq; /* @@ -86,49 +86,43 @@ static unsigned long lcw_flags = 0; * When it hits 0, we stop the distpatcher. */ static __u32 lcw_refcount = 0; -static DECLARE_MUTEX(lcw_refcount_sem); +static CFS_DECLARE_MUTEX(lcw_refcount_sem); /* * List of timers that have fired that need their callbacks run by the * dispatcher. */ -static spinlock_t lcw_pending_timers_lock = SPIN_LOCK_UNLOCKED; /* BH lock! */ -static struct list_head lcw_pending_timers = \ +/* BH lock! */ +static cfs_spinlock_t lcw_pending_timers_lock = CFS_SPIN_LOCK_UNLOCKED; +static cfs_list_t lcw_pending_timers = \ CFS_LIST_HEAD_INIT(lcw_pending_timers); /* Last time a watchdog expired */ static cfs_time_t lcw_last_watchdog_time; static int lcw_recent_watchdog_count; -static spinlock_t lcw_last_watchdog_lock = SPIN_LOCK_UNLOCKED; static void lcw_dump(struct lc_watchdog *lcw) { - cfs_task_t *tsk; + ENTRY; #if defined(HAVE_TASKLIST_LOCK) - read_lock(&tasklist_lock); + cfs_read_lock(&tasklist_lock); #elif defined(HAVE_TASK_RCU) rcu_read_lock(); #else CERROR("unable to dump stack because of missing export\n"); - return; + RETURN_EXIT; #endif - ENTRY; - - tsk = find_task_by_pid(lcw->lcw_pid); - - if (tsk == NULL) { - CWARN("Process " LPPID " was not found in the task list; " - "watchdog callback may be incomplete\n", lcw->lcw_pid); - } else if (tsk != lcw->lcw_task) { - CWARN("The current process " LPPID " did not set the watchdog; " - "watchdog callback may be incomplete\n", lcw->lcw_pid); + if (lcw->lcw_task == NULL) { + LCONSOLE_WARN("Process " LPPID " was not found in the task " + "list; watchdog callback may be incomplete\n", + (int)lcw->lcw_pid); } else { - libcfs_debug_dumpstack(tsk); + libcfs_debug_dumpstack(lcw->lcw_task); } #if defined(HAVE_TASKLIST_LOCK) - read_unlock(&tasklist_lock); + cfs_read_unlock(&tasklist_lock); #elif defined(HAVE_TASK_RCU) rcu_read_unlock(); #endif @@ -138,9 +132,6 @@ lcw_dump(struct lc_watchdog *lcw) static void lcw_cb(ulong_ptr_t data) { struct lc_watchdog *lcw = (struct lc_watchdog *)data; - cfs_time_t current_time; - cfs_duration_t delta_time; - ENTRY; if (lcw->lcw_state != LC_WATCHDOG_ENABLED) { @@ -149,68 +140,93 @@ static void lcw_cb(ulong_ptr_t data) } lcw->lcw_state = LC_WATCHDOG_EXPIRED; - current_time = cfs_time_current(); - /* Check to see if we should throttle the watchdog timer to avoid - * too many dumps going to the console thus triggering an NMI. - * Normally we would not hold the spin lock over the CWARN but in - * this case we hold it to ensure non ratelimited lcw_dumps are not - * interleaved on the console making them hard to read. */ - spin_lock_bh(&lcw_last_watchdog_lock); - delta_time = cfs_duration_sec(current_time - lcw_last_watchdog_time); - - if (delta_time < libcfs_watchdog_ratelimit && lcw_recent_watchdog_count > 3) { - CWARN("Refusing to fire watchdog for pid %d: it was inactive " - "for %ldms. Rate limiting 1 per %d seconds.\n", - (int)lcw->lcw_pid,cfs_duration_sec(lcw->lcw_time) * 1000, - libcfs_watchdog_ratelimit); - } else { - if (delta_time < libcfs_watchdog_ratelimit) { - lcw_recent_watchdog_count++; - } else { - memcpy(&lcw_last_watchdog_time, ¤t_time, - sizeof(current_time)); - lcw_recent_watchdog_count = 0; - } + cfs_spin_lock_bh(&lcw_pending_timers_lock); + cfs_list_add(&lcw->lcw_list, &lcw_pending_timers); + cfs_waitq_signal(&lcw_event_waitq); + cfs_spin_unlock_bh(&lcw_pending_timers_lock); - /* This warning should appear on the console, but may not get - * into the logs since we're running in a softirq handler */ - CWARN("Watchdog triggered for pid %d: it was inactive for %lds\n", - (int)lcw->lcw_pid, cfs_duration_sec(lcw->lcw_time)); - lcw_dump(lcw); - } + EXIT; +} - spin_unlock_bh(&lcw_last_watchdog_lock); - spin_lock_bh(&lcw_pending_timers_lock); +static inline void lcw_get(struct lc_watchdog *lcw) +{ + cfs_atomic_inc(&lcw->lcw_refcount); +} - if (list_empty(&lcw->lcw_list)) { - list_add(&lcw->lcw_list, &lcw_pending_timers); - cfs_waitq_signal(&lcw_event_waitq); +static inline void lcw_put(struct lc_watchdog *lcw) +{ + if (cfs_atomic_dec_and_test(&lcw->lcw_refcount)) { + LASSERT(cfs_list_empty(&lcw->lcw_list)); + LIBCFS_FREE(lcw, sizeof(*lcw)); } - - spin_unlock_bh(&lcw_pending_timers_lock); - - EXIT; } static int is_watchdog_fired(void) { int rc; - if (test_bit(LCW_FLAG_STOP, &lcw_flags)) + if (cfs_test_bit(LCW_FLAG_STOP, &lcw_flags)) return 1; - spin_lock_bh(&lcw_pending_timers_lock); - rc = !list_empty(&lcw_pending_timers); - spin_unlock_bh(&lcw_pending_timers_lock); + cfs_spin_lock_bh(&lcw_pending_timers_lock); + rc = !cfs_list_empty(&lcw_pending_timers); + cfs_spin_unlock_bh(&lcw_pending_timers_lock); return rc; } +static void lcw_dump_stack(struct lc_watchdog *lcw) +{ + cfs_time_t current_time; + cfs_duration_t delta_time; + struct timeval timediff; + + current_time = cfs_time_current(); + delta_time = cfs_time_sub(current_time, lcw->lcw_last_touched); + cfs_duration_usec(delta_time, &timediff); + + /* + * Check to see if we should throttle the watchdog timer to avoid + * too many dumps going to the console thus triggering an NMI. + */ + delta_time = cfs_duration_sec(cfs_time_sub(current_time, + lcw_last_watchdog_time)); + + if (delta_time < libcfs_watchdog_ratelimit && + lcw_recent_watchdog_count > 3) { + LCONSOLE_WARN("Service thread pid %u was inactive for " + "%lu.%.02lus. Watchdog stack traces are limited " + "to 3 per %d seconds, skipping this one.\n", + (int)lcw->lcw_pid, + timediff.tv_sec, + timediff.tv_usec / 10000, + libcfs_watchdog_ratelimit); + } else { + if (delta_time < libcfs_watchdog_ratelimit) { + lcw_recent_watchdog_count++; + } else { + memcpy(&lcw_last_watchdog_time, ¤t_time, + sizeof(current_time)); + lcw_recent_watchdog_count = 0; + } + + LCONSOLE_WARN("Service thread pid %u was inactive for " + "%lu.%.02lus. The thread might be hung, or it " + "might only be slow and will resume later. " + "Dumping the stack trace for debugging purposes:" + "\n", + (int)lcw->lcw_pid, + timediff.tv_sec, + timediff.tv_usec / 10000); + lcw_dump(lcw); + } +} + static int lcw_dispatch_main(void *data) { int rc = 0; unsigned long flags; - struct lc_watchdog *lcw; + struct lc_watchdog *lcw, *lcwcb; ENTRY; @@ -221,17 +237,18 @@ static int lcw_dispatch_main(void *data) RECALC_SIGPENDING; SIGNAL_MASK_UNLOCK(current, flags); - complete(&lcw_start_completion); + cfs_complete(&lcw_start_completion); while (1) { - cfs_wait_event_interruptible(lcw_event_waitq, is_watchdog_fired(), rc); + cfs_wait_event_interruptible(lcw_event_waitq, + is_watchdog_fired(), rc); CDEBUG(D_INFO, "Watchdog got woken up...\n"); - if (test_bit(LCW_FLAG_STOP, &lcw_flags)) { + if (cfs_test_bit(LCW_FLAG_STOP, &lcw_flags)) { CDEBUG(D_INFO, "LCW_FLAG_STOP was set, shutting down...\n"); - spin_lock_bh(&lcw_pending_timers_lock); - rc = !list_empty(&lcw_pending_timers); - spin_unlock_bh(&lcw_pending_timers_lock); + cfs_spin_lock_bh(&lcw_pending_timers_lock); + rc = !cfs_list_empty(&lcw_pending_timers); + cfs_spin_unlock_bh(&lcw_pending_timers_lock); if (rc) { CERROR("pending timers list was not empty at " "time of watchdog dispatch shutdown\n"); @@ -239,27 +256,38 @@ static int lcw_dispatch_main(void *data) break; } - spin_lock_bh(&lcw_pending_timers_lock); - while (!list_empty(&lcw_pending_timers)) { + lcwcb = NULL; + cfs_spin_lock_bh(&lcw_pending_timers_lock); + while (!cfs_list_empty(&lcw_pending_timers)) { - lcw = list_entry(lcw_pending_timers.next, + lcw = cfs_list_entry(lcw_pending_timers.next, struct lc_watchdog, lcw_list); - list_del_init(&lcw->lcw_list); - spin_unlock_bh(&lcw_pending_timers_lock); - - CDEBUG(D_INFO, "found lcw for pid " LPPID ": inactive for " - "%lds\n", lcw->lcw_pid, cfs_duration_sec(lcw->lcw_time)); - - if (lcw->lcw_state != LC_WATCHDOG_DISABLED) - lcw->lcw_callback(lcw->lcw_pid, lcw->lcw_data); + lcw_get(lcw); + cfs_list_del_init(&lcw->lcw_list); + cfs_spin_unlock_bh(&lcw_pending_timers_lock); + + CDEBUG(D_INFO, "found lcw for pid " LPPID "\n", + lcw->lcw_pid); + lcw_dump_stack(lcw); + + if (lcwcb == NULL && + lcw->lcw_state != LC_WATCHDOG_DISABLED) + lcwcb = lcw; + else + lcw_put(lcw); + cfs_spin_lock_bh(&lcw_pending_timers_lock); + } + cfs_spin_unlock_bh(&lcw_pending_timers_lock); - spin_lock_bh(&lcw_pending_timers_lock); + /* only do callback once for this batch of lcws */ + if (lcwcb != NULL) { + lcwcb->lcw_callback(lcwcb->lcw_pid, lcwcb->lcw_data); + lcw_put(lcwcb); } - spin_unlock_bh(&lcw_pending_timers_lock); } - complete(&lcw_stop_completion); + cfs_complete(&lcw_stop_completion); RETURN(rc); } @@ -271,18 +299,18 @@ static void lcw_dispatch_start(void) ENTRY; LASSERT(lcw_refcount == 1); - init_completion(&lcw_stop_completion); - init_completion(&lcw_start_completion); + cfs_init_completion(&lcw_stop_completion); + cfs_init_completion(&lcw_start_completion); cfs_waitq_init(&lcw_event_waitq); CDEBUG(D_INFO, "starting dispatch thread\n"); - rc = kernel_thread(lcw_dispatch_main, NULL, 0); + rc = cfs_kernel_thread(lcw_dispatch_main, NULL, 0); if (rc < 0) { CERROR("error spawning watchdog dispatch thread: %d\n", rc); EXIT; return; } - wait_for_completion(&lcw_start_completion); + cfs_wait_for_completion(&lcw_start_completion); CDEBUG(D_INFO, "watchdog dispatcher initialization complete.\n"); EXIT; @@ -295,17 +323,17 @@ static void lcw_dispatch_stop(void) CDEBUG(D_INFO, "trying to stop watchdog dispatcher.\n"); - set_bit(LCW_FLAG_STOP, &lcw_flags); + cfs_set_bit(LCW_FLAG_STOP, &lcw_flags); cfs_waitq_signal(&lcw_event_waitq); - wait_for_completion(&lcw_stop_completion); + cfs_wait_for_completion(&lcw_stop_completion); CDEBUG(D_INFO, "watchdog dispatcher has shut down.\n"); EXIT; } -struct lc_watchdog *lc_watchdog_add(int timeout_ms, +struct lc_watchdog *lc_watchdog_add(int timeout, void (*callback)(pid_t, void *), void *data) { @@ -320,23 +348,23 @@ struct lc_watchdog *lc_watchdog_add(int timeout_ms, lcw->lcw_task = cfs_current(); lcw->lcw_pid = cfs_curproc_pid(); - lcw->lcw_time = cfs_time_seconds(timeout_ms) / 1000; lcw->lcw_callback = (callback != NULL) ? callback : lc_watchdog_dumplog; lcw->lcw_data = data; lcw->lcw_state = LC_WATCHDOG_DISABLED; CFS_INIT_LIST_HEAD(&lcw->lcw_list); cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw); + cfs_atomic_set(&lcw->lcw_refcount, 1); - down(&lcw_refcount_sem); + cfs_down(&lcw_refcount_sem); if (++lcw_refcount == 1) lcw_dispatch_start(); - up(&lcw_refcount_sem); + cfs_up(&lcw_refcount_sem); /* Keep this working in case we enable them by default */ if (lcw->lcw_state == LC_WATCHDOG_ENABLED) { - do_gettimeofday(&lcw->lcw_last_touched); - cfs_timer_arm(&lcw->lcw_timer, lcw->lcw_time + + lcw->lcw_last_touched = cfs_time_current(); + cfs_timer_arm(&lcw->lcw_timer, cfs_time_seconds(timeout) + cfs_time_current()); } @@ -346,58 +374,58 @@ EXPORT_SYMBOL(lc_watchdog_add); static void lcw_update_time(struct lc_watchdog *lcw, const char *message) { - struct timeval newtime; - struct timeval timediff; + cfs_time_t newtime = cfs_time_current();; - do_gettimeofday(&newtime); if (lcw->lcw_state == LC_WATCHDOG_EXPIRED) { - cfs_timeval_sub(&newtime, &lcw->lcw_last_touched, &timediff); - CWARN("Expired watchdog for pid " LPPID " %s after %lu.%.4lus\n", - lcw->lcw_pid, - message, - timediff.tv_sec, - timediff.tv_usec / 100); + struct timeval timediff; + cfs_time_t delta_time = cfs_time_sub(newtime, + lcw->lcw_last_touched); + cfs_duration_usec(delta_time, &timediff); + + LCONSOLE_WARN("Service thread pid %u %s after %lu.%.02lus. " + "This indicates the system was overloaded (too " + "many service threads, or there were not enough " + "hardware resources).\n", + lcw->lcw_pid, + message, + timediff.tv_sec, + timediff.tv_usec / 10000); } lcw->lcw_last_touched = newtime; } -void lc_watchdog_touch_ms(struct lc_watchdog *lcw, int timeout_ms) +void lc_watchdog_touch(struct lc_watchdog *lcw, int timeout) { ENTRY; LASSERT(lcw != NULL); + LASSERT(cfs_atomic_read(&lcw->lcw_refcount) > 0); - spin_lock_bh(&lcw_pending_timers_lock); - list_del_init(&lcw->lcw_list); - spin_unlock_bh(&lcw_pending_timers_lock); + cfs_spin_lock_bh(&lcw_pending_timers_lock); + cfs_list_del_init(&lcw->lcw_list); + cfs_spin_unlock_bh(&lcw_pending_timers_lock); - lcw_update_time(lcw, "touched"); + lcw_update_time(lcw, "resumed"); lcw->lcw_state = LC_WATCHDOG_ENABLED; cfs_timer_arm(&lcw->lcw_timer, cfs_time_current() + - cfs_time_seconds(timeout_ms) / 1000); + cfs_time_seconds(timeout)); EXIT; } -EXPORT_SYMBOL(lc_watchdog_touch_ms); - -/* deprecated - use above instead */ -void lc_watchdog_touch(struct lc_watchdog *lcw) -{ - lc_watchdog_touch_ms(lcw, (int)cfs_duration_sec(lcw->lcw_time) * 1000); -} EXPORT_SYMBOL(lc_watchdog_touch); void lc_watchdog_disable(struct lc_watchdog *lcw) { ENTRY; LASSERT(lcw != NULL); + LASSERT(cfs_atomic_read(&lcw->lcw_refcount) > 0); - spin_lock_bh(&lcw_pending_timers_lock); - if (!list_empty(&lcw->lcw_list)) - list_del_init(&lcw->lcw_list); - spin_unlock_bh(&lcw_pending_timers_lock); + cfs_spin_lock_bh(&lcw_pending_timers_lock); + if (!cfs_list_empty(&lcw->lcw_list)) + cfs_list_del_init(&lcw->lcw_list); + cfs_spin_unlock_bh(&lcw_pending_timers_lock); - lcw_update_time(lcw, "disabled"); + lcw_update_time(lcw, "completed"); lcw->lcw_state = LC_WATCHDOG_DISABLED; EXIT; @@ -408,22 +436,22 @@ void lc_watchdog_delete(struct lc_watchdog *lcw) { ENTRY; LASSERT(lcw != NULL); + LASSERT(cfs_atomic_read(&lcw->lcw_refcount) > 0); cfs_timer_disarm(&lcw->lcw_timer); - lcw_update_time(lcw, "deleted"); + lcw_update_time(lcw, "stopped"); - spin_lock_bh(&lcw_pending_timers_lock); - if (!list_empty(&lcw->lcw_list)) - list_del_init(&lcw->lcw_list); - spin_unlock_bh(&lcw_pending_timers_lock); + cfs_spin_lock_bh(&lcw_pending_timers_lock); + if (!cfs_list_empty(&lcw->lcw_list)) + cfs_list_del_init(&lcw->lcw_list); + cfs_spin_unlock_bh(&lcw_pending_timers_lock); + lcw_put(lcw); - down(&lcw_refcount_sem); + cfs_down(&lcw_refcount_sem); if (--lcw_refcount == 0) lcw_dispatch_stop(); - up(&lcw_refcount_sem); - - LIBCFS_FREE(lcw, sizeof(*lcw)); + cfs_up(&lcw_refcount_sem); EXIT; } @@ -441,7 +469,7 @@ EXPORT_SYMBOL(lc_watchdog_dumplog); #else /* !defined(WITH_WATCHDOG) */ -struct lc_watchdog *lc_watchdog_add(int timeout_ms, +struct lc_watchdog *lc_watchdog_add(int timeout, void (*callback)(pid_t pid, void *), void *data) { @@ -450,12 +478,7 @@ struct lc_watchdog *lc_watchdog_add(int timeout_ms, } EXPORT_SYMBOL(lc_watchdog_add); -void lc_watchdog_touch_ms(struct lc_watchdog *lcw, int timeout_ms) -{ -} -EXPORT_SYMBOL(lc_watchdog_touch_ms); - -void lc_watchdog_touch(struct lc_watchdog *lcw) +void lc_watchdog_touch(struct lc_watchdog *lcw, int timeout) { } EXPORT_SYMBOL(lc_watchdog_touch);