1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2007 Cluster File Systems, Inc.
5 * Author: Eric Mei <ericm@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 # define EXPORT_SYMTAB
26 #define DEBUG_SUBSYSTEM S_SEC
29 #include <liblustre.h>
31 #include <libcfs/libcfs.h>
34 #include <obd_support.h>
35 #include <obd_class.h>
36 #include <lustre_net.h>
37 #include <lustre_sec.h>
39 #define SEC_GC_INTERVAL (30 * 60)
43 static DECLARE_MUTEX(sec_gc_mutex);
44 static CFS_LIST_HEAD(sec_gc_list);
45 static spinlock_t sec_gc_list_lock = SPIN_LOCK_UNLOCKED;
47 static CFS_LIST_HEAD(sec_gc_ctx_list);
48 static spinlock_t sec_gc_ctx_list_lock = SPIN_LOCK_UNLOCKED;
50 static struct ptlrpc_thread sec_gc_thread;
51 static atomic_t sec_gc_wait_del = ATOMIC_INIT(0);
54 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
56 LASSERT(sec->ps_policy->sp_cops->gc_ctx);
57 LASSERT(sec->ps_gc_interval > 0);
58 LASSERT(list_empty(&sec->ps_gc_list));
60 sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
62 spin_lock(&sec_gc_list_lock);
63 list_add_tail(&sec_gc_list, &sec->ps_gc_list);
64 spin_unlock(&sec_gc_list_lock);
66 CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name);
68 EXPORT_SYMBOL(sptlrpc_gc_add_sec);
70 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
72 if (list_empty(&sec->ps_gc_list))
77 /* signal before list_del to make iteration in gc thread safe */
78 atomic_inc(&sec_gc_wait_del);
80 spin_lock(&sec_gc_list_lock);
81 list_del_init(&sec->ps_gc_list);
82 spin_unlock(&sec_gc_list_lock);
85 mutex_down(&sec_gc_mutex);
86 mutex_up(&sec_gc_mutex);
88 atomic_dec(&sec_gc_wait_del);
90 CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
92 EXPORT_SYMBOL(sptlrpc_gc_del_sec);
94 void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx)
96 LASSERT(list_empty(&ctx->cc_gc_chain));
98 CDEBUG(D_SEC, "hand over ctx %p(%u->%s)\n",
99 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
100 spin_lock(&sec_gc_ctx_list_lock);
101 list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
102 spin_unlock(&sec_gc_ctx_list_lock);
104 sec_gc_thread.t_flags |= SVC_SIGNAL;
105 cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
107 EXPORT_SYMBOL(sptlrpc_gc_add_ctx);
109 static void sec_process_ctx_list(void)
111 struct ptlrpc_cli_ctx *ctx;
113 spin_lock(&sec_gc_ctx_list_lock);
115 while (!list_empty(&sec_gc_ctx_list)) {
116 ctx = list_entry(sec_gc_ctx_list.next,
117 struct ptlrpc_cli_ctx, cc_gc_chain);
118 list_del_init(&ctx->cc_gc_chain);
119 spin_unlock(&sec_gc_ctx_list_lock);
121 LASSERT(ctx->cc_sec);
122 LASSERT(atomic_read(&ctx->cc_refcount) == 1);
123 CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n",
124 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
125 sptlrpc_cli_ctx_put(ctx, 1);
127 spin_lock(&sec_gc_ctx_list_lock);
130 spin_unlock(&sec_gc_ctx_list_lock);
133 static void sec_do_gc(struct ptlrpc_sec *sec)
135 LASSERT(sec->ps_policy->sp_cops->gc_ctx);
137 if (unlikely(sec->ps_gc_next == 0)) {
138 CWARN("sec %p(%s) has 0 gc time\n",
139 sec, sec->ps_policy->sp_name);
143 CDEBUG(D_SEC, "check on sec %p(%s)\n", sec, sec->ps_policy->sp_name);
145 if (cfs_time_after(sec->ps_gc_next, cfs_time_current_sec()))
148 sec->ps_policy->sp_cops->gc_ctx(sec);
149 sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
152 static int sec_gc_main(void *arg)
154 struct ptlrpc_thread *thread = (struct ptlrpc_thread *) arg;
155 struct l_wait_info lwi;
157 cfs_daemonize("sptlrpc_gc");
159 /* Record that the thread is running */
160 thread->t_flags = SVC_RUNNING;
161 cfs_waitq_signal(&thread->t_ctl_waitq);
164 struct ptlrpc_sec *sec;
166 thread->t_flags &= ~SVC_SIGNAL;
167 sec_process_ctx_list();
169 /* go through sec list do gc.
170 * FIXME here we iterate through the whole list each time which
171 * is not optimal. we perhaps want to use balanced binary tree
172 * to trace each sec as order of expiry time.
173 * another issue here is we wakeup as fixed interval instead of
174 * according to each sec's expiry time */
175 mutex_down(&sec_gc_mutex);
176 list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
177 /* if someone is waiting to be deleted, let it
178 * proceed as soon as possible. */
179 if (atomic_read(&sec_gc_wait_del)) {
180 CWARN("deletion pending, start over\n");
181 mutex_up(&sec_gc_mutex);
187 mutex_up(&sec_gc_mutex);
189 /* check ctx list again before sleep */
190 sec_process_ctx_list();
192 lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * HZ, NULL, NULL);
193 l_wait_event(thread->t_ctl_waitq,
194 thread->t_flags & (SVC_STOPPING | SVC_SIGNAL),
197 if (thread->t_flags & SVC_STOPPING) {
198 thread->t_flags &= ~SVC_STOPPING;
203 thread->t_flags = SVC_STOPPED;
204 cfs_waitq_signal(&thread->t_ctl_waitq);
208 int sptlrpc_gc_start_thread(void)
210 struct l_wait_info lwi = { 0 };
213 /* initialize thread control */
214 memset(&sec_gc_thread, 0, sizeof(sec_gc_thread));
215 cfs_waitq_init(&sec_gc_thread.t_ctl_waitq);
217 rc = cfs_kernel_thread(sec_gc_main, &sec_gc_thread,
218 CLONE_VM | CLONE_FILES);
220 CERROR("can't start gc thread: %d\n", rc);
224 l_wait_event(sec_gc_thread.t_ctl_waitq,
225 sec_gc_thread.t_flags & SVC_RUNNING, &lwi);
229 void sptlrpc_gc_stop_thread(void)
231 struct l_wait_info lwi = { 0 };
233 sec_gc_thread.t_flags = SVC_STOPPING;
234 cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
236 l_wait_event(sec_gc_thread.t_ctl_waitq,
237 sec_gc_thread.t_flags & SVC_STOPPED, &lwi);
240 #else /* !__KERNEL__ */
242 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
245 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
248 int sptlrpc_gc_start_thread(void)
252 void sptlrpc_gc_stop_thread(void)
256 #endif /* __KERNEL__ */