Whamcloud - gitweb
LU-13004 ptlrpc: Allow BULK_BUF_KIOV to accept a kvec
[fs/lustre-release.git] / lustre / ptlrpc / sec_gc.c
index c03eb0e..652a11b 100644 (file)
@@ -36,7 +36,7 @@
 
 #define DEBUG_SUBSYSTEM S_SEC
 
-#include <linux/kthread.h>
+#include <linux/workqueue.h>
 #include <libcfs/libcfs.h>
 
 #include <obd_support.h>
 
 #define SEC_GC_INTERVAL (30 * 60)
 
+static DEFINE_MUTEX(sec_gc_mutex);
+static DEFINE_SPINLOCK(sec_gc_list_lock);
+static DEFINE_SPINLOCK(sec_gc_ctx_list_lock);
+static LIST_HEAD(sec_gc_list);
+static LIST_HEAD(sec_gc_ctx_list);
 
-static struct mutex sec_gc_mutex;
-static spinlock_t sec_gc_list_lock;
-static struct list_head sec_gc_list;
-
-static spinlock_t sec_gc_ctx_list_lock;
-static struct list_head sec_gc_ctx_list;
-
-static struct ptlrpc_thread sec_gc_thread;
 static atomic_t sec_gc_wait_del = ATOMIC_INIT(0);
 
-
 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
 {
-        LASSERT(sec->ps_policy->sp_cops->gc_ctx);
-        LASSERT(sec->ps_gc_interval > 0);
+       LASSERT(sec->ps_policy->sp_cops->gc_ctx);
+       LASSERT(sec->ps_gc_interval > 0);
        LASSERT(list_empty(&sec->ps_gc_list));
 
-        sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
+       sec->ps_gc_next = ktime_get_real_seconds() + sec->ps_gc_interval;
 
        spin_lock(&sec_gc_list_lock);
        list_add_tail(&sec->ps_gc_list, &sec_gc_list);
@@ -98,6 +94,9 @@ void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
        CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
 }
 
+static void sec_gc_main(struct work_struct *ws);
+static DECLARE_DELAYED_WORK(sec_gc_work, sec_gc_main);
+
 void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx)
 {
        LASSERT(list_empty(&ctx->cc_gc_chain));
@@ -108,8 +107,7 @@ void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx)
        list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
        spin_unlock(&sec_gc_ctx_list_lock);
 
-       thread_add_flags(&sec_gc_thread, SVC_SIGNAL);
-       wake_up(&sec_gc_thread.t_ctl_waitq);
+       mod_delayed_work(system_wq, &sec_gc_work, 0);
 }
 EXPORT_SYMBOL(sptlrpc_gc_add_ctx);
 
@@ -139,114 +137,65 @@ static void sec_process_ctx_list(void)
 
 static void sec_do_gc(struct ptlrpc_sec *sec)
 {
-        LASSERT(sec->ps_policy->sp_cops->gc_ctx);
+       LASSERT(sec->ps_policy->sp_cops->gc_ctx);
 
-        if (unlikely(sec->ps_gc_next == 0)) {
-                CDEBUG(D_SEC, "sec %p(%s) has 0 gc time\n",
-                      sec, sec->ps_policy->sp_name);
-                return;
-        }
+       if (unlikely(sec->ps_gc_next == 0)) {
+               CDEBUG(D_SEC, "sec %p(%s) has 0 gc time\n",
+                      sec, sec->ps_policy->sp_name);
+               return;
+       }
 
-        CDEBUG(D_SEC, "check on sec %p(%s)\n", sec, sec->ps_policy->sp_name);
+       CDEBUG(D_SEC, "check on sec %p(%s)\n", sec, sec->ps_policy->sp_name);
 
-        if (cfs_time_after(sec->ps_gc_next, cfs_time_current_sec()))
-                return;
+       if (sec->ps_gc_next > ktime_get_real_seconds())
+               return;
 
-        sec->ps_policy->sp_cops->gc_ctx(sec);
-        sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
+       sec->ps_policy->sp_cops->gc_ctx(sec);
+       sec->ps_gc_next = ktime_get_real_seconds() + sec->ps_gc_interval;
 }
 
-static int sec_gc_main(void *arg)
+static void sec_gc_main(struct work_struct *ws)
 {
-       struct ptlrpc_thread *thread = (struct ptlrpc_thread *) arg;
-       struct l_wait_info    lwi;
-
-       unshare_fs_struct();
-
-       /* Record that the thread is running */
-       thread_set_flags(thread, SVC_RUNNING);
-       wake_up(&thread->t_ctl_waitq);
+       struct ptlrpc_sec *sec;
 
-       while (1) {
-               struct ptlrpc_sec *sec;
-
-               thread_clear_flags(thread, SVC_SIGNAL);
-               sec_process_ctx_list();
+       sec_process_ctx_list();
 again:
-               /* go through sec list do gc.
-                * FIXME here we iterate through the whole list each time which
-                * is not optimal. we perhaps want to use balanced binary tree
-                * to trace each sec as order of expiry time.
-                * another issue here is we wakeup as fixed interval instead of
-                * according to each sec's expiry time */
-               mutex_lock(&sec_gc_mutex);
-               list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
-                       /* if someone is waiting to be deleted, let it
-                        * proceed as soon as possible. */
-                       if (atomic_read(&sec_gc_wait_del)) {
-                               CDEBUG(D_SEC, "deletion pending, start over\n");
-                               mutex_unlock(&sec_gc_mutex);
-                               goto again;
-                       }
-
-                       sec_do_gc(sec);
+       /*
+        * go through sec list do gc.
+        * FIXME here we iterate through the whole list each time which
+        * is not optimal. we perhaps want to use balanced binary tree
+        * to trace each sec as order of expiry time.
+        * another issue here is we wakeup as fixed interval instead of
+        * according to each sec's expiry time
+        */
+       mutex_lock(&sec_gc_mutex);
+       list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
+               /*
+                * if someone is waiting to be deleted, let it
+                * proceed as soon as possible.
+                */
+               if (atomic_read(&sec_gc_wait_del)) {
+                       CDEBUG(D_SEC, "deletion pending, start over\n");
+                       mutex_unlock(&sec_gc_mutex);
+                       goto again;
                }
-               mutex_unlock(&sec_gc_mutex);
-
-               /* check ctx list again before sleep */
-               sec_process_ctx_list();
 
-               lwi = LWI_TIMEOUT(msecs_to_jiffies(SEC_GC_INTERVAL *
-                                                  MSEC_PER_SEC),
-                                 NULL, NULL);
-               l_wait_event(thread->t_ctl_waitq,
-                            thread_is_stopping(thread) ||
-                            thread_is_signal(thread),
-                            &lwi);
-
-               if (thread_test_and_clear_flags(thread, SVC_STOPPING))
-                       break;
+               sec_do_gc(sec);
        }
+       mutex_unlock(&sec_gc_mutex);
 
-       thread_set_flags(thread, SVC_STOPPED);
-       wake_up(&thread->t_ctl_waitq);
-       return 0;
+       /* check ctx list again before sleep */
+       sec_process_ctx_list();
+       schedule_delayed_work(&sec_gc_work, cfs_time_seconds(SEC_GC_INTERVAL));
 }
 
 int sptlrpc_gc_init(void)
 {
-       struct l_wait_info lwi = { 0 };
-       struct task_struct *task;
-
-       mutex_init(&sec_gc_mutex);
-       spin_lock_init(&sec_gc_list_lock);
-       spin_lock_init(&sec_gc_ctx_list_lock);
-
-       INIT_LIST_HEAD(&sec_gc_list);
-       INIT_LIST_HEAD(&sec_gc_ctx_list);
-
-       /* initialize thread control */
-       memset(&sec_gc_thread, 0, sizeof(sec_gc_thread));
-       init_waitqueue_head(&sec_gc_thread.t_ctl_waitq);
-
-       task = kthread_run(sec_gc_main, &sec_gc_thread, "sptlrpc_gc");
-       if (IS_ERR(task)) {
-               CERROR("can't start gc thread: %ld\n", PTR_ERR(task));
-               return PTR_ERR(task);
-       }
-
-       l_wait_event(sec_gc_thread.t_ctl_waitq,
-                    thread_is_running(&sec_gc_thread), &lwi);
+       schedule_delayed_work(&sec_gc_work, cfs_time_seconds(SEC_GC_INTERVAL));
        return 0;
 }
 
 void sptlrpc_gc_fini(void)
 {
-       struct l_wait_info lwi = { 0 };
-
-       thread_set_flags(&sec_gc_thread, SVC_STOPPING);
-       wake_up(&sec_gc_thread.t_ctl_waitq);
-
-       l_wait_event(sec_gc_thread.t_ctl_waitq,
-                    thread_is_stopped(&sec_gc_thread), &lwi);
+       cancel_delayed_work_sync(&sec_gc_work);
 }