X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=libcfs%2Flibcfs%2Fwatchdog.c;h=b9f488ff59102a46bffd55d71e8baffbcc1866da;hb=fd79a11a9f4c9526af33a68c77a28437b3c29dfa;hp=f6de3a0aee7fe28d81fe5f774583e2cc822bc96c;hpb=3ce7fb1d3006d6e2488d81d03246c32b69a76ec6;p=fs%2Flustre-release.git diff --git a/libcfs/libcfs/watchdog.c b/libcfs/libcfs/watchdog.c index f6de3a0..b9f488f 100644 --- a/libcfs/libcfs/watchdog.c +++ b/libcfs/libcfs/watchdog.c @@ -26,7 +26,7 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. */ /* @@ -44,15 +44,16 @@ #include "tracefile.h" struct lc_watchdog { - cfs_timer_t lcw_timer; /* kernel timer */ - struct list_head lcw_list; - cfs_time_t lcw_last_touched; - cfs_task_t *lcw_task; + cfs_spinlock_t lcw_lock; /* check or change lcw_list */ + int lcw_refcount; /* must hold lcw_pending_timers_lock */ + cfs_timer_t lcw_timer; /* kernel timer */ + cfs_list_t lcw_list; /* chain on pending list */ + cfs_time_t lcw_last_touched; /* last touched stamp */ + cfs_task_t *lcw_task; /* owner task */ + void (*lcw_callback)(pid_t, void *); + void *lcw_data; - void (*lcw_callback)(pid_t, void *); - void *lcw_data; - - pid_t lcw_pid; + pid_t lcw_pid; enum { LC_WATCHDOG_DISABLED, @@ -67,8 +68,8 @@ struct lc_watchdog { * and lcw_stop_completion when it exits. * Wake lcw_event_waitq to signal timer callback dispatches. */ -static struct completion lcw_start_completion; -static struct completion lcw_stop_completion; +static cfs_completion_t lcw_start_completion; +static cfs_completion_t lcw_stop_completion; static cfs_waitq_t lcw_event_waitq; /* @@ -82,37 +83,37 @@ static unsigned long lcw_flags = 0; /* * Number of outstanding watchdogs. * When it hits 1, we start the dispatcher. - * When it hits 0, we stop the distpatcher. + * When it hits 0, we stop the dispatcher. */ static __u32 lcw_refcount = 0; -static DECLARE_MUTEX(lcw_refcount_sem); +static CFS_DECLARE_MUTEX(lcw_refcount_sem); /* * List of timers that have fired that need their callbacks run by the * dispatcher. */ -static spinlock_t lcw_pending_timers_lock = SPIN_LOCK_UNLOCKED; /* BH lock! */ -static struct list_head lcw_pending_timers = \ +/* BH lock! */ +static cfs_spinlock_t lcw_pending_timers_lock = CFS_SPIN_LOCK_UNLOCKED; +static cfs_list_t lcw_pending_timers = \ CFS_LIST_HEAD_INIT(lcw_pending_timers); /* Last time a watchdog expired */ static cfs_time_t lcw_last_watchdog_time; static int lcw_recent_watchdog_count; -static spinlock_t lcw_last_watchdog_lock = SPIN_LOCK_UNLOCKED; static void lcw_dump(struct lc_watchdog *lcw) { ENTRY; #if defined(HAVE_TASKLIST_LOCK) - read_lock(&tasklist_lock); + cfs_read_lock(&tasklist_lock); #elif defined(HAVE_TASK_RCU) rcu_read_lock(); #else - CERROR("unable to dump stack because of missing export\n"); + CERROR("unable to dump stack because of missing export\n"); RETURN_EXIT; #endif - if (lcw->lcw_task == NULL) { + if (lcw->lcw_task == NULL) { LCONSOLE_WARN("Process " LPPID " was not found in the task " "list; watchdog callback may be incomplete\n", (int)lcw->lcw_pid); @@ -121,7 +122,7 @@ lcw_dump(struct lc_watchdog *lcw) } #if defined(HAVE_TASKLIST_LOCK) - read_unlock(&tasklist_lock); + cfs_read_unlock(&tasklist_lock); #elif defined(HAVE_TASK_RCU) rcu_read_unlock(); #endif @@ -131,10 +132,6 @@ lcw_dump(struct lc_watchdog *lcw) static void lcw_cb(ulong_ptr_t data) { struct lc_watchdog *lcw = (struct lc_watchdog *)data; - cfs_time_t current_time; - cfs_duration_t delta_time; - struct timeval timediff; - ENTRY; if (lcw->lcw_state != LC_WATCHDOG_ENABLED) { @@ -143,17 +140,47 @@ static void lcw_cb(ulong_ptr_t data) } lcw->lcw_state = LC_WATCHDOG_EXPIRED; - current_time = cfs_time_current(); + cfs_spin_lock_bh(&lcw->lcw_lock); + LASSERT(cfs_list_empty(&lcw->lcw_list)); + + cfs_spin_lock_bh(&lcw_pending_timers_lock); + lcw->lcw_refcount++; /* +1 for pending list */ + cfs_list_add(&lcw->lcw_list, &lcw_pending_timers); + cfs_waitq_signal(&lcw_event_waitq); + + cfs_spin_unlock_bh(&lcw_pending_timers_lock); + cfs_spin_unlock_bh(&lcw->lcw_lock); + EXIT; +} + +static int is_watchdog_fired(void) +{ + int rc; + + if (cfs_test_bit(LCW_FLAG_STOP, &lcw_flags)) + return 1; + + cfs_spin_lock_bh(&lcw_pending_timers_lock); + rc = !cfs_list_empty(&lcw_pending_timers); + cfs_spin_unlock_bh(&lcw_pending_timers_lock); + return rc; +} + +static void lcw_dump_stack(struct lc_watchdog *lcw) +{ + cfs_time_t current_time; + cfs_duration_t delta_time; + struct timeval timediff; + + current_time = cfs_time_current(); delta_time = cfs_time_sub(current_time, lcw->lcw_last_touched); cfs_duration_usec(delta_time, &timediff); - /* Check to see if we should throttle the watchdog timer to avoid + /* + * Check to see if we should throttle the watchdog timer to avoid * too many dumps going to the console thus triggering an NMI. - * Normally we would not hold the spin lock over the CWARN but in - * this case we hold it to ensure non ratelimited lcw_dumps are not - * interleaved on the console making them hard to read. */ - spin_lock_bh(&lcw_last_watchdog_lock); + */ delta_time = cfs_duration_sec(cfs_time_sub(current_time, lcw_last_watchdog_time)); @@ -175,8 +202,6 @@ static void lcw_cb(ulong_ptr_t data) lcw_recent_watchdog_count = 0; } - /* This warning should appear on the console, but may not get - * into the logs since we're running in a softirq handler */ LCONSOLE_WARN("Service thread pid %u was inactive for " "%lu.%.02lus. The thread might be hung, or it " "might only be slow and will resume later. " @@ -186,32 +211,7 @@ static void lcw_cb(ulong_ptr_t data) timediff.tv_sec, timediff.tv_usec / 10000); lcw_dump(lcw); - } - - spin_unlock_bh(&lcw_last_watchdog_lock); - spin_lock_bh(&lcw_pending_timers_lock); - - if (list_empty(&lcw->lcw_list)) { - list_add(&lcw->lcw_list, &lcw_pending_timers); - cfs_waitq_signal(&lcw_event_waitq); } - - spin_unlock_bh(&lcw_pending_timers_lock); - - EXIT; -} - -static int is_watchdog_fired(void) -{ - int rc; - - if (test_bit(LCW_FLAG_STOP, &lcw_flags)) - return 1; - - spin_lock_bh(&lcw_pending_timers_lock); - rc = !list_empty(&lcw_pending_timers); - spin_unlock_bh(&lcw_pending_timers_lock); - return rc; } static int lcw_dispatch_main(void *data) @@ -219,6 +219,7 @@ static int lcw_dispatch_main(void *data) int rc = 0; unsigned long flags; struct lc_watchdog *lcw; + CFS_LIST_HEAD (zombies); ENTRY; @@ -229,17 +230,20 @@ static int lcw_dispatch_main(void *data) RECALC_SIGPENDING; SIGNAL_MASK_UNLOCK(current, flags); - complete(&lcw_start_completion); + cfs_complete(&lcw_start_completion); while (1) { - cfs_wait_event_interruptible(lcw_event_waitq, is_watchdog_fired(), rc); + int dumplog = 1; + + cfs_wait_event_interruptible(lcw_event_waitq, + is_watchdog_fired(), rc); CDEBUG(D_INFO, "Watchdog got woken up...\n"); - if (test_bit(LCW_FLAG_STOP, &lcw_flags)) { + if (cfs_test_bit(LCW_FLAG_STOP, &lcw_flags)) { CDEBUG(D_INFO, "LCW_FLAG_STOP was set, shutting down...\n"); - spin_lock_bh(&lcw_pending_timers_lock); - rc = !list_empty(&lcw_pending_timers); - spin_unlock_bh(&lcw_pending_timers_lock); + cfs_spin_lock_bh(&lcw_pending_timers_lock); + rc = !cfs_list_empty(&lcw_pending_timers); + cfs_spin_unlock_bh(&lcw_pending_timers_lock); if (rc) { CERROR("pending timers list was not empty at " "time of watchdog dispatch shutdown\n"); @@ -247,26 +251,65 @@ static int lcw_dispatch_main(void *data) break; } - spin_lock_bh(&lcw_pending_timers_lock); - while (!list_empty(&lcw_pending_timers)) { + cfs_spin_lock_bh(&lcw_pending_timers_lock); + while (!cfs_list_empty(&lcw_pending_timers)) { + int is_dumplog; + + lcw = cfs_list_entry(lcw_pending_timers.next, + struct lc_watchdog, lcw_list); + /* +1 ref for callback to make sure lwc wouldn't be + * deleted after releasing lcw_pending_timers_lock */ + lcw->lcw_refcount++; + cfs_spin_unlock_bh(&lcw_pending_timers_lock); + + /* lock ordering */ + cfs_spin_lock_bh(&lcw->lcw_lock); + cfs_spin_lock_bh(&lcw_pending_timers_lock); + + if (cfs_list_empty(&lcw->lcw_list)) { + /* already removed from pending list */ + lcw->lcw_refcount--; /* -1 ref for callback */ + if (lcw->lcw_refcount == 0) + cfs_list_add(&lcw->lcw_list, &zombies); + cfs_spin_unlock_bh(&lcw->lcw_lock); + /* still hold lcw_pending_timers_lock */ + continue; + } + + cfs_list_del_init(&lcw->lcw_list); + lcw->lcw_refcount--; /* -1 ref for pending list */ - lcw = list_entry(lcw_pending_timers.next, - struct lc_watchdog, - lcw_list); - list_del_init(&lcw->lcw_list); - spin_unlock_bh(&lcw_pending_timers_lock); + cfs_spin_unlock_bh(&lcw_pending_timers_lock); + cfs_spin_unlock_bh(&lcw->lcw_lock); - CDEBUG(D_INFO, "found lcw for pid " LPPID "\n", lcw->lcw_pid); + CDEBUG(D_INFO, "found lcw for pid " LPPID "\n", + lcw->lcw_pid); + lcw_dump_stack(lcw); - if (lcw->lcw_state != LC_WATCHDOG_DISABLED) + is_dumplog = lcw->lcw_callback == lc_watchdog_dumplog; + if (lcw->lcw_state != LC_WATCHDOG_DISABLED && + (dumplog || !is_dumplog)) { lcw->lcw_callback(lcw->lcw_pid, lcw->lcw_data); + if (dumplog && is_dumplog) + dumplog = 0; + } - spin_lock_bh(&lcw_pending_timers_lock); + cfs_spin_lock_bh(&lcw_pending_timers_lock); + lcw->lcw_refcount--; /* -1 ref for callback */ + if (lcw->lcw_refcount == 0) + cfs_list_add(&lcw->lcw_list, &zombies); + } + cfs_spin_unlock_bh(&lcw_pending_timers_lock); + + while (!cfs_list_empty(&zombies)) { + lcw = cfs_list_entry(lcw_pending_timers.next, + struct lc_watchdog, lcw_list); + cfs_list_del(&lcw->lcw_list); + LIBCFS_FREE(lcw, sizeof(*lcw)); } - spin_unlock_bh(&lcw_pending_timers_lock); } - complete(&lcw_stop_completion); + cfs_complete(&lcw_stop_completion); RETURN(rc); } @@ -278,18 +321,18 @@ static void lcw_dispatch_start(void) ENTRY; LASSERT(lcw_refcount == 1); - init_completion(&lcw_stop_completion); - init_completion(&lcw_start_completion); + cfs_init_completion(&lcw_stop_completion); + cfs_init_completion(&lcw_start_completion); cfs_waitq_init(&lcw_event_waitq); CDEBUG(D_INFO, "starting dispatch thread\n"); - rc = kernel_thread(lcw_dispatch_main, NULL, 0); + rc = cfs_create_thread(lcw_dispatch_main, NULL, 0); if (rc < 0) { CERROR("error spawning watchdog dispatch thread: %d\n", rc); EXIT; return; } - wait_for_completion(&lcw_start_completion); + cfs_wait_for_completion(&lcw_start_completion); CDEBUG(D_INFO, "watchdog dispatcher initialization complete.\n"); EXIT; @@ -302,10 +345,10 @@ static void lcw_dispatch_stop(void) CDEBUG(D_INFO, "trying to stop watchdog dispatcher.\n"); - set_bit(LCW_FLAG_STOP, &lcw_flags); + cfs_set_bit(LCW_FLAG_STOP, &lcw_flags); cfs_waitq_signal(&lcw_event_waitq); - wait_for_completion(&lcw_stop_completion); + cfs_wait_for_completion(&lcw_stop_completion); CDEBUG(D_INFO, "watchdog dispatcher has shut down.\n"); @@ -325,6 +368,8 @@ struct lc_watchdog *lc_watchdog_add(int timeout, RETURN(ERR_PTR(-ENOMEM)); } + cfs_spin_lock_init(&lcw->lcw_lock); + lcw->lcw_refcount = 1; /* refcount for owner */ lcw->lcw_task = cfs_current(); lcw->lcw_pid = cfs_curproc_pid(); lcw->lcw_callback = (callback != NULL) ? callback : lc_watchdog_dumplog; @@ -334,10 +379,10 @@ struct lc_watchdog *lc_watchdog_add(int timeout, CFS_INIT_LIST_HEAD(&lcw->lcw_list); cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw); - down(&lcw_refcount_sem); + cfs_down(&lcw_refcount_sem); if (++lcw_refcount == 1) lcw_dispatch_start(); - up(&lcw_refcount_sem); + cfs_up(&lcw_refcount_sem); /* Keep this working in case we enable them by default */ if (lcw->lcw_state == LC_WATCHDOG_ENABLED) { @@ -372,14 +417,25 @@ static void lcw_update_time(struct lc_watchdog *lcw, const char *message) lcw->lcw_last_touched = newtime; } +static void lc_watchdog_del_pending(struct lc_watchdog *lcw) +{ + cfs_spin_lock_bh(&lcw->lcw_lock); + if (unlikely(!cfs_list_empty(&lcw->lcw_list))) { + cfs_spin_lock_bh(&lcw_pending_timers_lock); + cfs_list_del_init(&lcw->lcw_list); + lcw->lcw_refcount--; /* -1 ref for pending list */ + cfs_spin_unlock_bh(&lcw_pending_timers_lock); + } + + cfs_spin_unlock_bh(&lcw->lcw_lock); +} + void lc_watchdog_touch(struct lc_watchdog *lcw, int timeout) { ENTRY; LASSERT(lcw != NULL); - spin_lock_bh(&lcw_pending_timers_lock); - list_del_init(&lcw->lcw_list); - spin_unlock_bh(&lcw_pending_timers_lock); + lc_watchdog_del_pending(lcw); lcw_update_time(lcw, "resumed"); lcw->lcw_state = LC_WATCHDOG_ENABLED; @@ -396,10 +452,7 @@ void lc_watchdog_disable(struct lc_watchdog *lcw) ENTRY; LASSERT(lcw != NULL); - spin_lock_bh(&lcw_pending_timers_lock); - if (!list_empty(&lcw->lcw_list)) - list_del_init(&lcw->lcw_list); - spin_unlock_bh(&lcw_pending_timers_lock); + lc_watchdog_del_pending(lcw); lcw_update_time(lcw, "completed"); lcw->lcw_state = LC_WATCHDOG_DISABLED; @@ -410,6 +463,8 @@ EXPORT_SYMBOL(lc_watchdog_disable); void lc_watchdog_delete(struct lc_watchdog *lcw) { + int dead; + ENTRY; LASSERT(lcw != NULL); @@ -417,17 +472,25 @@ void lc_watchdog_delete(struct lc_watchdog *lcw) lcw_update_time(lcw, "stopped"); - spin_lock_bh(&lcw_pending_timers_lock); - if (!list_empty(&lcw->lcw_list)) - list_del_init(&lcw->lcw_list); - spin_unlock_bh(&lcw_pending_timers_lock); + cfs_spin_lock_bh(&lcw->lcw_lock); + cfs_spin_lock_bh(&lcw_pending_timers_lock); + if (unlikely(!cfs_list_empty(&lcw->lcw_list))) { + cfs_list_del_init(&lcw->lcw_list); + lcw->lcw_refcount--; /* -1 ref for pending list */ + } + + lcw->lcw_refcount--; /* -1 ref for owner */ + dead = lcw->lcw_refcount == 0; + cfs_spin_unlock_bh(&lcw_pending_timers_lock); + cfs_spin_unlock_bh(&lcw->lcw_lock); + + if (dead) + LIBCFS_FREE(lcw, sizeof(*lcw)); - down(&lcw_refcount_sem); + cfs_down(&lcw_refcount_sem); if (--lcw_refcount == 0) lcw_dispatch_stop(); - up(&lcw_refcount_sem); - - LIBCFS_FREE(lcw, sizeof(*lcw)); + cfs_up(&lcw_refcount_sem); EXIT; }