Whamcloud - gitweb
LU-13004 ptlrpc: Allow BULK_BUF_KIOV to accept a kvec
[fs/lustre-release.git] / lustre / ptlrpc / sec_gc.c
index 40f16eb..652a11b 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -27,7 +23,7 @@
  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2012, Intel Corporation.
+ * Copyright (c) 2012, 2016, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
 
 #define DEBUG_SUBSYSTEM S_SEC
 
-#ifndef __KERNEL__
-#include <liblustre.h>
-#else
+#include <linux/workqueue.h>
 #include <libcfs/libcfs.h>
-#endif
 
 #include <obd_support.h>
 #include <obd_class.h>
 #include <lustre_net.h>
 #include <lustre_sec.h>
 
-#define SEC_GC_INTERVAL (30 * 60)
-
-#ifdef __KERNEL__
-
-static struct mutex sec_gc_mutex;
-static CFS_LIST_HEAD(sec_gc_list);
-static spinlock_t sec_gc_list_lock;
+#include "ptlrpc_internal.h"
 
-static CFS_LIST_HEAD(sec_gc_ctx_list);
-static spinlock_t sec_gc_ctx_list_lock;
+#define SEC_GC_INTERVAL (30 * 60)
 
-static struct ptlrpc_thread sec_gc_thread;
-static cfs_atomic_t sec_gc_wait_del = CFS_ATOMIC_INIT(0);
+static DEFINE_MUTEX(sec_gc_mutex);
+static DEFINE_SPINLOCK(sec_gc_list_lock);
+static DEFINE_SPINLOCK(sec_gc_ctx_list_lock);
+static LIST_HEAD(sec_gc_list);
+static LIST_HEAD(sec_gc_ctx_list);
 
+static atomic_t sec_gc_wait_del = ATOMIC_INIT(0);
 
 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
 {
-        LASSERT(sec->ps_policy->sp_cops->gc_ctx);
-        LASSERT(sec->ps_gc_interval > 0);
-        LASSERT(cfs_list_empty(&sec->ps_gc_list));
+       LASSERT(sec->ps_policy->sp_cops->gc_ctx);
+       LASSERT(sec->ps_gc_interval > 0);
+       LASSERT(list_empty(&sec->ps_gc_list));
 
-        sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
+       sec->ps_gc_next = ktime_get_real_seconds() + sec->ps_gc_interval;
 
        spin_lock(&sec_gc_list_lock);
-       cfs_list_add_tail(&sec_gc_list, &sec->ps_gc_list);
+       list_add_tail(&sec->ps_gc_list, &sec_gc_list);
        spin_unlock(&sec_gc_list_lock);
 
        CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name);
 }
-EXPORT_SYMBOL(sptlrpc_gc_add_sec);
 
 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
 {
-        if (cfs_list_empty(&sec->ps_gc_list))
-                return;
+       if (list_empty(&sec->ps_gc_list))
+               return;
 
-        cfs_might_sleep();
+       might_sleep();
 
-        /* signal before list_del to make iteration in gc thread safe */
-        cfs_atomic_inc(&sec_gc_wait_del);
+       /* signal before list_del to make iteration in gc thread safe */
+       atomic_inc(&sec_gc_wait_del);
 
        spin_lock(&sec_gc_list_lock);
-       cfs_list_del_init(&sec->ps_gc_list);
+       list_del_init(&sec->ps_gc_list);
        spin_unlock(&sec_gc_list_lock);
 
        /* barrier */
        mutex_lock(&sec_gc_mutex);
        mutex_unlock(&sec_gc_mutex);
 
-       cfs_atomic_dec(&sec_gc_wait_del);
+       atomic_dec(&sec_gc_wait_del);
 
        CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
 }
-EXPORT_SYMBOL(sptlrpc_gc_del_sec);
+
+static void sec_gc_main(struct work_struct *ws);
+static DECLARE_DELAYED_WORK(sec_gc_work, sec_gc_main);
 
 void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx)
 {
-       LASSERT(cfs_list_empty(&ctx->cc_gc_chain));
+       LASSERT(list_empty(&ctx->cc_gc_chain));
 
        CDEBUG(D_SEC, "hand over ctx %p(%u->%s)\n",
               ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
        spin_lock(&sec_gc_ctx_list_lock);
-       cfs_list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
+       list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
        spin_unlock(&sec_gc_ctx_list_lock);
 
-       thread_add_flags(&sec_gc_thread, SVC_SIGNAL);
-       cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
+       mod_delayed_work(system_wq, &sec_gc_work, 0);
 }
 EXPORT_SYMBOL(sptlrpc_gc_add_ctx);
 
@@ -127,14 +117,14 @@ static void sec_process_ctx_list(void)
 
        spin_lock(&sec_gc_ctx_list_lock);
 
-       while (!cfs_list_empty(&sec_gc_ctx_list)) {
-               ctx = cfs_list_entry(sec_gc_ctx_list.next,
+       while (!list_empty(&sec_gc_ctx_list)) {
+               ctx = list_entry(sec_gc_ctx_list.next,
                                     struct ptlrpc_cli_ctx, cc_gc_chain);
-               cfs_list_del_init(&ctx->cc_gc_chain);
+               list_del_init(&ctx->cc_gc_chain);
                spin_unlock(&sec_gc_ctx_list_lock);
 
                LASSERT(ctx->cc_sec);
-               LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 1);
+               LASSERT(atomic_read(&ctx->cc_refcount) == 1);
                CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n",
                       ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
                sptlrpc_cli_ctx_put(ctx, 1);
@@ -147,127 +137,65 @@ static void sec_process_ctx_list(void)
 
 static void sec_do_gc(struct ptlrpc_sec *sec)
 {
-        LASSERT(sec->ps_policy->sp_cops->gc_ctx);
+       LASSERT(sec->ps_policy->sp_cops->gc_ctx);
 
-        if (unlikely(sec->ps_gc_next == 0)) {
-                CDEBUG(D_SEC, "sec %p(%s) has 0 gc time\n",
-                      sec, sec->ps_policy->sp_name);
-                return;
-        }
+       if (unlikely(sec->ps_gc_next == 0)) {
+               CDEBUG(D_SEC, "sec %p(%s) has 0 gc time\n",
+                      sec, sec->ps_policy->sp_name);
+               return;
+       }
 
-        CDEBUG(D_SEC, "check on sec %p(%s)\n", sec, sec->ps_policy->sp_name);
+       CDEBUG(D_SEC, "check on sec %p(%s)\n", sec, sec->ps_policy->sp_name);
 
-        if (cfs_time_after(sec->ps_gc_next, cfs_time_current_sec()))
-                return;
+       if (sec->ps_gc_next > ktime_get_real_seconds())
+               return;
 
-        sec->ps_policy->sp_cops->gc_ctx(sec);
-        sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
+       sec->ps_policy->sp_cops->gc_ctx(sec);
+       sec->ps_gc_next = ktime_get_real_seconds() + sec->ps_gc_interval;
 }
 
-static int sec_gc_main(void *arg)
+static void sec_gc_main(struct work_struct *ws)
 {
-        struct ptlrpc_thread *thread = (struct ptlrpc_thread *) arg;
-        struct l_wait_info    lwi;
-
-        cfs_daemonize_ctxt("sptlrpc_gc");
+       struct ptlrpc_sec *sec;
 
-        /* Record that the thread is running */
-        thread_set_flags(thread, SVC_RUNNING);
-        cfs_waitq_signal(&thread->t_ctl_waitq);
-
-        while (1) {
-                struct ptlrpc_sec *sec;
-
-                thread_clear_flags(thread, SVC_SIGNAL);
-                sec_process_ctx_list();
+       sec_process_ctx_list();
 again:
-                /* go through sec list do gc.
-                 * FIXME here we iterate through the whole list each time which
-                 * is not optimal. we perhaps want to use balanced binary tree
-                 * to trace each sec as order of expiry time.
-                 * another issue here is we wakeup as fixed interval instead of
-                 * according to each sec's expiry time */
-               mutex_lock(&sec_gc_mutex);
-                cfs_list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
-                        /* if someone is waiting to be deleted, let it
-                         * proceed as soon as possible. */
-                        if (cfs_atomic_read(&sec_gc_wait_del)) {
-                                CDEBUG(D_SEC, "deletion pending, start over\n");
-                               mutex_unlock(&sec_gc_mutex);
-                                goto again;
-                        }
-
-                        sec_do_gc(sec);
-                }
-               mutex_unlock(&sec_gc_mutex);
-
-                /* check ctx list again before sleep */
-                sec_process_ctx_list();
-
-                lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * CFS_HZ, NULL, NULL);
-                l_wait_event(thread->t_ctl_waitq,
-                             thread_is_stopping(thread) ||
-                             thread_is_signal(thread),
-                             &lwi);
-
-                if (thread_test_and_clear_flags(thread, SVC_STOPPING))
-                        break;
-        }
-
-        thread_set_flags(thread, SVC_STOPPED);
-        cfs_waitq_signal(&thread->t_ctl_waitq);
-        return 0;
-}
-
-int sptlrpc_gc_init(void)
-{
-       struct l_wait_info lwi = { 0 };
-       int                rc;
-
-       mutex_init(&sec_gc_mutex);
-       spin_lock_init(&sec_gc_list_lock);
-       spin_lock_init(&sec_gc_ctx_list_lock);
-
-        /* initialize thread control */
-        memset(&sec_gc_thread, 0, sizeof(sec_gc_thread));
-        cfs_waitq_init(&sec_gc_thread.t_ctl_waitq);
-
-        rc = cfs_create_thread(sec_gc_main, &sec_gc_thread, CFS_DAEMON_FLAGS);
-        if (rc < 0) {
-                CERROR("can't start gc thread: %d\n", rc);
-                return rc;
-        }
-
-        l_wait_event(sec_gc_thread.t_ctl_waitq,
-                     thread_is_running(&sec_gc_thread), &lwi);
-        return 0;
-}
-
-void sptlrpc_gc_fini(void)
-{
-        struct l_wait_info lwi = { 0 };
-
-        thread_set_flags(&sec_gc_thread, SVC_STOPPING);
-        cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
+       /*
+        * go through sec list do gc.
+        * FIXME here we iterate through the whole list each time which
+        * is not optimal. we perhaps want to use balanced binary tree
+        * to trace each sec as order of expiry time.
+        * another issue here is we wakeup as fixed interval instead of
+        * according to each sec's expiry time
+        */
+       mutex_lock(&sec_gc_mutex);
+       list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
+               /*
+                * if someone is waiting to be deleted, let it
+                * proceed as soon as possible.
+                */
+               if (atomic_read(&sec_gc_wait_del)) {
+                       CDEBUG(D_SEC, "deletion pending, start over\n");
+                       mutex_unlock(&sec_gc_mutex);
+                       goto again;
+               }
+
+               sec_do_gc(sec);
+       }
+       mutex_unlock(&sec_gc_mutex);
 
-        l_wait_event(sec_gc_thread.t_ctl_waitq,
-                     thread_is_stopped(&sec_gc_thread), &lwi);
+       /* check ctx list again before sleep */
+       sec_process_ctx_list();
+       schedule_delayed_work(&sec_gc_work, cfs_time_seconds(SEC_GC_INTERVAL));
 }
 
-#else /* !__KERNEL__ */
-
-void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
-{
-}
-void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
-{
-}
 int sptlrpc_gc_init(void)
 {
-        return 0;
+       schedule_delayed_work(&sec_gc_work, cfs_time_seconds(SEC_GC_INTERVAL));
+       return 0;
 }
+
 void sptlrpc_gc_fini(void)
 {
+       cancel_delayed_work_sync(&sec_gc_work);
 }
-
-#endif /* __KERNEL__ */