* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
#include "tracefile.h"
struct lc_watchdog {
- cfs_timer_t lcw_timer; /* kernel timer */
- cfs_list_t lcw_list;
- cfs_time_t lcw_last_touched;
- cfs_task_t *lcw_task;
- cfs_atomic_t lcw_refcount;
+ cfs_spinlock_t lcw_lock; /* check or change lcw_list */
+ int lcw_refcount; /* must hold lcw_pending_timers_lock */
+ cfs_timer_t lcw_timer; /* kernel timer */
+ cfs_list_t lcw_list; /* chain on pending list */
+ cfs_time_t lcw_last_touched; /* last touched stamp */
+ cfs_task_t *lcw_task; /* owner task */
+ void (*lcw_callback)(pid_t, void *);
+ void *lcw_data;
- void (*lcw_callback)(pid_t, void *);
- void *lcw_data;
-
- pid_t lcw_pid;
+ pid_t lcw_pid;
enum {
LC_WATCHDOG_DISABLED,
/*
* Number of outstanding watchdogs.
* When it hits 1, we start the dispatcher.
- * When it hits 0, we stop the distpatcher.
+ * When it hits 0, we stop the dispatcher.
*/
static __u32 lcw_refcount = 0;
static CFS_DECLARE_MUTEX(lcw_refcount_sem);
#elif defined(HAVE_TASK_RCU)
rcu_read_lock();
#else
- CERROR("unable to dump stack because of missing export\n");
+ CERROR("unable to dump stack because of missing export\n");
RETURN_EXIT;
#endif
- if (lcw->lcw_task == NULL) {
+ if (lcw->lcw_task == NULL) {
LCONSOLE_WARN("Process " LPPID " was not found in the task "
"list; watchdog callback may be incomplete\n",
(int)lcw->lcw_pid);
lcw->lcw_state = LC_WATCHDOG_EXPIRED;
+ cfs_spin_lock_bh(&lcw->lcw_lock);
+ LASSERT(cfs_list_empty(&lcw->lcw_list));
+
cfs_spin_lock_bh(&lcw_pending_timers_lock);
+ lcw->lcw_refcount++; /* +1 for pending list */
cfs_list_add(&lcw->lcw_list, &lcw_pending_timers);
cfs_waitq_signal(&lcw_event_waitq);
- cfs_spin_unlock_bh(&lcw_pending_timers_lock);
+ cfs_spin_unlock_bh(&lcw_pending_timers_lock);
+ cfs_spin_unlock_bh(&lcw->lcw_lock);
EXIT;
}
-static inline void lcw_get(struct lc_watchdog *lcw)
-{
- cfs_atomic_inc(&lcw->lcw_refcount);
-}
-
-static inline void lcw_put(struct lc_watchdog *lcw)
-{
- if (cfs_atomic_dec_and_test(&lcw->lcw_refcount)) {
- LASSERT(cfs_list_empty(&lcw->lcw_list));
- LIBCFS_FREE(lcw, sizeof(*lcw));
- }
-}
-
static int is_watchdog_fired(void)
{
int rc;
{
int rc = 0;
unsigned long flags;
- struct lc_watchdog *lcw, *lcwcb;
+ struct lc_watchdog *lcw;
+ CFS_LIST_HEAD (zombies);
ENTRY;
cfs_complete(&lcw_start_completion);
while (1) {
+ int dumplog = 1;
+
cfs_wait_event_interruptible(lcw_event_waitq,
is_watchdog_fired(), rc);
CDEBUG(D_INFO, "Watchdog got woken up...\n");
break;
}
- lcwcb = NULL;
cfs_spin_lock_bh(&lcw_pending_timers_lock);
while (!cfs_list_empty(&lcw_pending_timers)) {
+ int is_dumplog;
lcw = cfs_list_entry(lcw_pending_timers.next,
- struct lc_watchdog,
- lcw_list);
- lcw_get(lcw);
+ struct lc_watchdog, lcw_list);
+ /* +1 ref for callback to make sure lwc wouldn't be
+ * deleted after releasing lcw_pending_timers_lock */
+ lcw->lcw_refcount++;
+ cfs_spin_unlock_bh(&lcw_pending_timers_lock);
+
+ /* lock ordering */
+ cfs_spin_lock_bh(&lcw->lcw_lock);
+ cfs_spin_lock_bh(&lcw_pending_timers_lock);
+
+ if (cfs_list_empty(&lcw->lcw_list)) {
+ /* already removed from pending list */
+ lcw->lcw_refcount--; /* -1 ref for callback */
+ if (lcw->lcw_refcount == 0)
+ cfs_list_add(&lcw->lcw_list, &zombies);
+ cfs_spin_unlock_bh(&lcw->lcw_lock);
+ /* still hold lcw_pending_timers_lock */
+ continue;
+ }
+
cfs_list_del_init(&lcw->lcw_list);
+ lcw->lcw_refcount--; /* -1 ref for pending list */
+
cfs_spin_unlock_bh(&lcw_pending_timers_lock);
+ cfs_spin_unlock_bh(&lcw->lcw_lock);
CDEBUG(D_INFO, "found lcw for pid " LPPID "\n",
lcw->lcw_pid);
lcw_dump_stack(lcw);
- if (lcwcb == NULL &&
- lcw->lcw_state != LC_WATCHDOG_DISABLED)
- lcwcb = lcw;
- else
- lcw_put(lcw);
+ is_dumplog = lcw->lcw_callback == lc_watchdog_dumplog;
+ if (lcw->lcw_state != LC_WATCHDOG_DISABLED &&
+ (dumplog || !is_dumplog)) {
+ lcw->lcw_callback(lcw->lcw_pid, lcw->lcw_data);
+ if (dumplog && is_dumplog)
+ dumplog = 0;
+ }
+
cfs_spin_lock_bh(&lcw_pending_timers_lock);
+ lcw->lcw_refcount--; /* -1 ref for callback */
+ if (lcw->lcw_refcount == 0)
+ cfs_list_add(&lcw->lcw_list, &zombies);
}
cfs_spin_unlock_bh(&lcw_pending_timers_lock);
- /* only do callback once for this batch of lcws */
- if (lcwcb != NULL) {
- lcwcb->lcw_callback(lcwcb->lcw_pid, lcwcb->lcw_data);
- lcw_put(lcwcb);
+ while (!cfs_list_empty(&zombies)) {
+ lcw = cfs_list_entry(lcw_pending_timers.next,
+ struct lc_watchdog, lcw_list);
+ cfs_list_del(&lcw->lcw_list);
+ LIBCFS_FREE(lcw, sizeof(*lcw));
}
}
RETURN(ERR_PTR(-ENOMEM));
}
+ cfs_spin_lock_init(&lcw->lcw_lock);
+ lcw->lcw_refcount = 1; /* refcount for owner */
lcw->lcw_task = cfs_current();
lcw->lcw_pid = cfs_curproc_pid();
lcw->lcw_callback = (callback != NULL) ? callback : lc_watchdog_dumplog;
CFS_INIT_LIST_HEAD(&lcw->lcw_list);
cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw);
- cfs_atomic_set(&lcw->lcw_refcount, 1);
cfs_down(&lcw_refcount_sem);
if (++lcw_refcount == 1)
lcw->lcw_last_touched = newtime;
}
+static void lc_watchdog_del_pending(struct lc_watchdog *lcw)
+{
+ cfs_spin_lock_bh(&lcw->lcw_lock);
+ if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
+ cfs_spin_lock_bh(&lcw_pending_timers_lock);
+ cfs_list_del_init(&lcw->lcw_list);
+ lcw->lcw_refcount--; /* -1 ref for pending list */
+ cfs_spin_unlock_bh(&lcw_pending_timers_lock);
+ }
+
+ cfs_spin_unlock_bh(&lcw->lcw_lock);
+}
+
void lc_watchdog_touch(struct lc_watchdog *lcw, int timeout)
{
ENTRY;
LASSERT(lcw != NULL);
- LASSERT(cfs_atomic_read(&lcw->lcw_refcount) > 0);
- cfs_spin_lock_bh(&lcw_pending_timers_lock);
- cfs_list_del_init(&lcw->lcw_list);
- cfs_spin_unlock_bh(&lcw_pending_timers_lock);
+ lc_watchdog_del_pending(lcw);
lcw_update_time(lcw, "resumed");
lcw->lcw_state = LC_WATCHDOG_ENABLED;
{
ENTRY;
LASSERT(lcw != NULL);
- LASSERT(cfs_atomic_read(&lcw->lcw_refcount) > 0);
- cfs_spin_lock_bh(&lcw_pending_timers_lock);
- if (!cfs_list_empty(&lcw->lcw_list))
- cfs_list_del_init(&lcw->lcw_list);
- cfs_spin_unlock_bh(&lcw_pending_timers_lock);
+ lc_watchdog_del_pending(lcw);
lcw_update_time(lcw, "completed");
lcw->lcw_state = LC_WATCHDOG_DISABLED;
void lc_watchdog_delete(struct lc_watchdog *lcw)
{
+ int dead;
+
ENTRY;
LASSERT(lcw != NULL);
- LASSERT(cfs_atomic_read(&lcw->lcw_refcount) > 0);
cfs_timer_disarm(&lcw->lcw_timer);
lcw_update_time(lcw, "stopped");
+ cfs_spin_lock_bh(&lcw->lcw_lock);
cfs_spin_lock_bh(&lcw_pending_timers_lock);
- if (!cfs_list_empty(&lcw->lcw_list))
+ if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
cfs_list_del_init(&lcw->lcw_list);
+ lcw->lcw_refcount--; /* -1 ref for pending list */
+ }
+
+ lcw->lcw_refcount--; /* -1 ref for owner */
+ dead = lcw->lcw_refcount == 0;
cfs_spin_unlock_bh(&lcw_pending_timers_lock);
- lcw_put(lcw);
+ cfs_spin_unlock_bh(&lcw->lcw_lock);
+
+ if (dead)
+ LIBCFS_FREE(lcw, sizeof(*lcw));
cfs_down(&lcw_refcount_sem);
if (--lcw_refcount == 0)