Whamcloud - gitweb
LU-1198 idl: move FID VER to DLM resource name[1]
[fs/lustre-release.git] / lustre / ptlrpc / sec_gc.c
index 930a7dc..e408270 100644 (file)
@@ -1,23 +1,43 @@
 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
  * vim:expandtab:shiftwidth=8:tabstop=8:
  *
- * Copyright (C) 2007 Cluster File Systems, Inc.
- *   Author: Eric Mei <ericm@clusterfs.com>
+ * GPL HEADER START
  *
- *   This file is part of Lustre, http://www.lustre.org.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
- *   Lustre is free software; you can redistribute it and/or
- *   modify it under the terms of version 2 of the GNU General Public
- *   License as published by the Free Software Foundation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
  *
- *   Lustre is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
  *
- *   You should have received a copy of the GNU General Public License
- *   along with Lustre; if not, write to the Free Software
- *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Whamcloud, Inc.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/sec_gc.c
+ *
+ * Author: Eric Mei <ericm@clusterfs.com>
  */
 
 #ifndef EXPORT_SYMTAB
 
 #ifdef __KERNEL__
 
-static DECLARE_MUTEX(sec_gc_mutex);
+static cfs_mutex_t sec_gc_mutex;
 static CFS_LIST_HEAD(sec_gc_list);
-static spinlock_t sec_gc_list_lock = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t sec_gc_list_lock;
 
 static CFS_LIST_HEAD(sec_gc_ctx_list);
-static spinlock_t sec_gc_ctx_list_lock = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t sec_gc_ctx_list_lock;
 
 static struct ptlrpc_thread sec_gc_thread;
-static atomic_t sec_gc_wait_del = ATOMIC_INIT(0);
+static cfs_atomic_t sec_gc_wait_del = CFS_ATOMIC_INIT(0);
 
 
 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
 {
         LASSERT(sec->ps_policy->sp_cops->gc_ctx);
         LASSERT(sec->ps_gc_interval > 0);
-        LASSERT(list_empty(&sec->ps_gc_list));
+        LASSERT(cfs_list_empty(&sec->ps_gc_list));
 
         sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
 
-        spin_lock(&sec_gc_list_lock);
-        list_add_tail(&sec_gc_list, &sec->ps_gc_list);
-        spin_unlock(&sec_gc_list_lock);
+        cfs_spin_lock(&sec_gc_list_lock);
+        cfs_list_add_tail(&sec_gc_list, &sec->ps_gc_list);
+        cfs_spin_unlock(&sec_gc_list_lock);
 
         CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name);
 }
@@ -69,23 +89,23 @@ EXPORT_SYMBOL(sptlrpc_gc_add_sec);
 
 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
 {
-        if (list_empty(&sec->ps_gc_list))
+        if (cfs_list_empty(&sec->ps_gc_list))
                 return;
 
-        might_sleep();
+        cfs_might_sleep();
 
         /* signal before list_del to make iteration in gc thread safe */
-        atomic_inc(&sec_gc_wait_del);
+        cfs_atomic_inc(&sec_gc_wait_del);
 
-        spin_lock(&sec_gc_list_lock);
-        list_del_init(&sec->ps_gc_list);
-        spin_unlock(&sec_gc_list_lock);
+        cfs_spin_lock(&sec_gc_list_lock);
+        cfs_list_del_init(&sec->ps_gc_list);
+        cfs_spin_unlock(&sec_gc_list_lock);
 
         /* barrier */
-        mutex_down(&sec_gc_mutex);
-        mutex_up(&sec_gc_mutex);
+        cfs_mutex_lock(&sec_gc_mutex);
+        cfs_mutex_unlock(&sec_gc_mutex);
 
-        atomic_dec(&sec_gc_wait_del);
+        cfs_atomic_dec(&sec_gc_wait_del);
 
         CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
 }
@@ -93,15 +113,15 @@ EXPORT_SYMBOL(sptlrpc_gc_del_sec);
 
 void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx)
 {
-        LASSERT(list_empty(&ctx->cc_gc_chain));
+        LASSERT(cfs_list_empty(&ctx->cc_gc_chain));
 
         CDEBUG(D_SEC, "hand over ctx %p(%u->%s)\n",
                ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
-        spin_lock(&sec_gc_ctx_list_lock);
-        list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
-        spin_unlock(&sec_gc_ctx_list_lock);
+        cfs_spin_lock(&sec_gc_ctx_list_lock);
+        cfs_list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
+        cfs_spin_unlock(&sec_gc_ctx_list_lock);
 
-        sec_gc_thread.t_flags |= SVC_SIGNAL;
+        thread_add_flags(&sec_gc_thread, SVC_SIGNAL);
         cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
 }
 EXPORT_SYMBOL(sptlrpc_gc_add_ctx);
@@ -110,24 +130,24 @@ static void sec_process_ctx_list(void)
 {
         struct ptlrpc_cli_ctx *ctx;
 
-        spin_lock(&sec_gc_ctx_list_lock);
+        cfs_spin_lock(&sec_gc_ctx_list_lock);
 
-        while (!list_empty(&sec_gc_ctx_list)) {
-                ctx = list_entry(sec_gc_ctx_list.next,
-                                 struct ptlrpc_cli_ctx, cc_gc_chain);
-                list_del_init(&ctx->cc_gc_chain);
-                spin_unlock(&sec_gc_ctx_list_lock);
+        while (!cfs_list_empty(&sec_gc_ctx_list)) {
+                ctx = cfs_list_entry(sec_gc_ctx_list.next,
+                                     struct ptlrpc_cli_ctx, cc_gc_chain);
+                cfs_list_del_init(&ctx->cc_gc_chain);
+                cfs_spin_unlock(&sec_gc_ctx_list_lock);
 
                 LASSERT(ctx->cc_sec);
-                LASSERT(atomic_read(&ctx->cc_refcount) == 1);
+                LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 1);
                 CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n",
                        ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
                 sptlrpc_cli_ctx_put(ctx, 1);
 
-                spin_lock(&sec_gc_ctx_list_lock);
+                cfs_spin_lock(&sec_gc_ctx_list_lock);
         }
 
-        spin_unlock(&sec_gc_ctx_list_lock);
+        cfs_spin_unlock(&sec_gc_ctx_list_lock);
 }
 
 static void sec_do_gc(struct ptlrpc_sec *sec)
@@ -135,7 +155,7 @@ static void sec_do_gc(struct ptlrpc_sec *sec)
         LASSERT(sec->ps_policy->sp_cops->gc_ctx);
 
         if (unlikely(sec->ps_gc_next == 0)) {
-                CWARN("sec %p(%s) has 0 gc time\n",
+                CDEBUG(D_SEC, "sec %p(%s) has 0 gc time\n",
                       sec, sec->ps_policy->sp_name);
                 return;
         }
@@ -154,16 +174,16 @@ static int sec_gc_main(void *arg)
         struct ptlrpc_thread *thread = (struct ptlrpc_thread *) arg;
         struct l_wait_info    lwi;
 
-        cfs_daemonize("sptlrpc_gc");
+        cfs_daemonize_ctxt("sptlrpc_gc");
 
         /* Record that the thread is running */
-        thread->t_flags = SVC_RUNNING;
+        thread_set_flags(thread, SVC_RUNNING);
         cfs_waitq_signal(&thread->t_ctl_waitq);
 
         while (1) {
                 struct ptlrpc_sec *sec;
 
-                thread->t_flags &= ~SVC_SIGNAL;
+                thread_clear_flags(thread, SVC_SIGNAL);
                 sec_process_ctx_list();
 again:
                 /* go through sec list do gc.
@@ -172,69 +192,71 @@ again:
                  * to trace each sec as order of expiry time.
                  * another issue here is we wakeup as fixed interval instead of
                  * according to each sec's expiry time */
-                mutex_down(&sec_gc_mutex);
-                list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
+                cfs_mutex_lock(&sec_gc_mutex);
+                cfs_list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
                         /* if someone is waiting to be deleted, let it
                          * proceed as soon as possible. */
-                        if (atomic_read(&sec_gc_wait_del)) {
-                                CWARN("deletion pending, start over\n");
-                                mutex_up(&sec_gc_mutex);
+                        if (cfs_atomic_read(&sec_gc_wait_del)) {
+                                CDEBUG(D_SEC, "deletion pending, start over\n");
+                                cfs_mutex_unlock(&sec_gc_mutex);
                                 goto again;
                         }
 
                         sec_do_gc(sec);
                 }
-                mutex_up(&sec_gc_mutex);
+                cfs_mutex_unlock(&sec_gc_mutex);
 
                 /* check ctx list again before sleep */
                 sec_process_ctx_list();
 
-                lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * HZ, NULL, NULL);
+                lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * CFS_HZ, NULL, NULL);
                 l_wait_event(thread->t_ctl_waitq,
-                             thread->t_flags & (SVC_STOPPING | SVC_SIGNAL),
+                             thread_is_stopping(thread) ||
+                             thread_is_signal(thread),
                              &lwi);
 
-                if (thread->t_flags & SVC_STOPPING) {
-                        thread->t_flags &= ~SVC_STOPPING;
+                if (thread_test_and_clear_flags(thread, SVC_STOPPING))
                         break;
-                }
         }
 
-        thread->t_flags = SVC_STOPPED;
+        thread_set_flags(thread, SVC_STOPPED);
         cfs_waitq_signal(&thread->t_ctl_waitq);
         return 0;
 }
 
-int sptlrpc_gc_start_thread(void)
+int sptlrpc_gc_init(void)
 {
         struct l_wait_info lwi = { 0 };
         int                rc;
 
+        cfs_mutex_init(&sec_gc_mutex);
+        cfs_spin_lock_init(&sec_gc_list_lock);
+        cfs_spin_lock_init(&sec_gc_ctx_list_lock);
+
         /* initialize thread control */
         memset(&sec_gc_thread, 0, sizeof(sec_gc_thread));
         cfs_waitq_init(&sec_gc_thread.t_ctl_waitq);
 
-        rc = cfs_kernel_thread(sec_gc_main, &sec_gc_thread,
-                               CLONE_VM | CLONE_FILES);
+        rc = cfs_create_thread(sec_gc_main, &sec_gc_thread, CFS_DAEMON_FLAGS);
         if (rc < 0) {
                 CERROR("can't start gc thread: %d\n", rc);
                 return rc;
         }
 
         l_wait_event(sec_gc_thread.t_ctl_waitq,
-                     sec_gc_thread.t_flags & SVC_RUNNING, &lwi);
+                     thread_is_running(&sec_gc_thread), &lwi);
         return 0;
 }
 
-void sptlrpc_gc_stop_thread(void)
+void sptlrpc_gc_fini(void)
 {
         struct l_wait_info lwi = { 0 };
 
-        sec_gc_thread.t_flags = SVC_STOPPING;
+        thread_set_flags(&sec_gc_thread, SVC_STOPPING);
         cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
 
         l_wait_event(sec_gc_thread.t_ctl_waitq,
-                     sec_gc_thread.t_flags & SVC_STOPPED, &lwi);
+                     thread_is_stopped(&sec_gc_thread), &lwi);
 }
 
 #else /* !__KERNEL__ */
@@ -245,11 +267,11 @@ void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
 {
 }
-int sptlrpc_gc_start_thread(void)
+int sptlrpc_gc_init(void)
 {
         return 0;
 }
-void sptlrpc_gc_stop_thread(void)
+void sptlrpc_gc_fini(void)
 {
 }