1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2007 Cluster File Systems, Inc.
5 * Author: Eric Mei <ericm@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 # define EXPORT_SYMTAB
26 #define DEBUG_SUBSYSTEM S_SEC
29 #include <liblustre.h>
32 #include <obd_support.h>
33 #include <obd_class.h>
34 #include <lustre_net.h>
35 #include <lustre_sec.h>
37 #define SEC_GC_INTERVAL (30 * 60)
41 static DECLARE_MUTEX(sec_gc_mutex);
42 static CFS_LIST_HEAD(sec_gc_list);
43 static spinlock_t sec_gc_list_lock = SPIN_LOCK_UNLOCKED;
45 static CFS_LIST_HEAD(sec_gc_ctx_list);
46 static spinlock_t sec_gc_ctx_list_lock = SPIN_LOCK_UNLOCKED;
48 static struct ptlrpc_thread sec_gc_thread;
49 static atomic_t sec_gc_wait_del = ATOMIC_INIT(0);
52 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
54 if (!list_empty(&sec->ps_gc_list)) {
55 CERROR("sec %p(%s) already in gc list\n",
56 sec, sec->ps_policy->sp_name);
60 spin_lock(&sec_gc_list_lock);
61 list_add_tail(&sec_gc_list, &sec->ps_gc_list);
62 spin_unlock(&sec_gc_list_lock);
64 CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name);
66 EXPORT_SYMBOL(sptlrpc_gc_add_sec);
68 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
70 if (list_empty(&sec->ps_gc_list))
75 spin_lock(&sec_gc_list_lock);
76 list_del_init(&sec->ps_gc_list);
77 spin_unlock(&sec_gc_list_lock);
80 atomic_inc(&sec_gc_wait_del);
81 mutex_down(&sec_gc_mutex);
82 mutex_up(&sec_gc_mutex);
83 atomic_dec(&sec_gc_wait_del);
85 CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
87 EXPORT_SYMBOL(sptlrpc_gc_del_sec);
89 void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx)
91 LASSERT(list_empty(&ctx->cc_gc_chain));
93 CDEBUG(D_SEC, "hand over ctx %p(%u->%s)\n",
94 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
95 spin_lock(&sec_gc_ctx_list_lock);
96 list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
97 spin_unlock(&sec_gc_ctx_list_lock);
99 sec_gc_thread.t_flags |= SVC_SIGNAL;
100 cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
102 EXPORT_SYMBOL(sptlrpc_gc_add_ctx);
104 static void sec_process_ctx_list(void)
106 struct ptlrpc_cli_ctx *ctx;
108 spin_lock(&sec_gc_ctx_list_lock);
110 while (!list_empty(&sec_gc_ctx_list)) {
111 ctx = list_entry(sec_gc_ctx_list.next,
112 struct ptlrpc_cli_ctx, cc_gc_chain);
113 list_del_init(&ctx->cc_gc_chain);
114 spin_unlock(&sec_gc_ctx_list_lock);
116 LASSERT(ctx->cc_sec);
117 LASSERT(atomic_read(&ctx->cc_refcount) == 1);
118 CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n",
119 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
120 sptlrpc_cli_ctx_put(ctx, 1);
122 spin_lock(&sec_gc_ctx_list_lock);
125 spin_unlock(&sec_gc_ctx_list_lock);
128 static void sec_do_gc(struct ptlrpc_sec *sec)
130 cfs_time_t now = cfs_time_current_sec();
132 if (unlikely(sec->ps_gc_next == 0)) {
133 CWARN("sec %p(%s) has 0 gc time\n",
134 sec, sec->ps_policy->sp_name);
138 if (unlikely(sec->ps_policy->sp_cops->gc_ctx == NULL)) {
139 CWARN("sec %p(%s) is not prepared for gc\n",
140 sec, sec->ps_policy->sp_name);
144 CDEBUG(D_SEC, "check on sec %p(%s)\n", sec, sec->ps_policy->sp_name);
146 if (time_after(sec->ps_gc_next, now))
149 sec->ps_policy->sp_cops->gc_ctx(sec);
150 sec->ps_gc_next = now + sec->ps_gc_interval;
153 static int sec_gc_main(void *arg)
155 struct ptlrpc_thread *thread = (struct ptlrpc_thread *) arg;
156 struct l_wait_info lwi;
158 cfs_daemonize("sptlrpc_ctx_gc");
160 /* Record that the thread is running */
161 thread->t_flags = SVC_RUNNING;
162 cfs_waitq_signal(&thread->t_ctl_waitq);
165 struct ptlrpc_sec *sec, *next;
167 thread->t_flags &= ~SVC_SIGNAL;
168 sec_process_ctx_list();
170 mutex_down(&sec_gc_mutex);
171 list_for_each_entry_safe(sec, next, &sec_gc_list, ps_gc_list) {
172 /* if someone is waiting to be deleted, let it
173 * proceed as soon as possible. */
174 if (atomic_read(&sec_gc_wait_del)) {
175 CWARN("deletion pending, retry\n");
176 mutex_up(&sec_gc_mutex);
182 mutex_up(&sec_gc_mutex);
184 lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * HZ, NULL, NULL);
185 l_wait_event(thread->t_ctl_waitq,
186 thread->t_flags & (SVC_STOPPING | SVC_SIGNAL),
189 if (thread->t_flags & SVC_STOPPING) {
190 thread->t_flags &= ~SVC_STOPPING;
195 thread->t_flags = SVC_STOPPED;
196 cfs_waitq_signal(&thread->t_ctl_waitq);
200 int sptlrpc_gc_start_thread(void)
202 struct l_wait_info lwi = { 0 };
205 /* initialize thread control */
206 memset(&sec_gc_thread, 0, sizeof(sec_gc_thread));
207 cfs_waitq_init(&sec_gc_thread.t_ctl_waitq);
209 rc = cfs_kernel_thread(sec_gc_main, &sec_gc_thread,
210 CLONE_VM | CLONE_FILES);
212 CERROR("can't start gc thread: %d\n", rc);
216 l_wait_event(sec_gc_thread.t_ctl_waitq,
217 sec_gc_thread.t_flags & SVC_RUNNING, &lwi);
221 void sptlrpc_gc_stop_thread(void)
223 struct l_wait_info lwi = { 0 };
225 sec_gc_thread.t_flags = SVC_STOPPING;
226 cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
228 l_wait_event(sec_gc_thread.t_ctl_waitq,
229 sec_gc_thread.t_flags & SVC_STOPPED, &lwi);
232 #else /* !__KERNEL__ */
234 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
237 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
240 int sptlrpc_gc_start_thread(void)
244 void sptlrpc_gc_stop_thread(void)
248 #endif /* __KERNEL__ */