1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2007 Cluster File Systems, Inc.
5 * Author: Eric Mei <ericm@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 # define EXPORT_SYMTAB
26 #define DEBUG_SUBSYSTEM S_SEC
29 #include <liblustre.h>
32 #include <obd_support.h>
33 #include <obd_class.h>
34 #include <lustre_net.h>
35 #include <lustre_sec.h>
37 #define SEC_GC_INTERVAL (30 * 60)
41 static DECLARE_MUTEX(sec_gc_mutex);
42 static CFS_LIST_HEAD(sec_gc_list);
43 static spinlock_t sec_gc_list_lock = SPIN_LOCK_UNLOCKED;
45 static CFS_LIST_HEAD(sec_gc_ctx_list);
46 static spinlock_t sec_gc_ctx_list_lock = SPIN_LOCK_UNLOCKED;
48 static struct ptlrpc_thread sec_gc_thread;
49 static atomic_t sec_gc_wait_del = ATOMIC_INIT(0);
52 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
54 if (!list_empty(&sec->ps_gc_list)) {
55 CERROR("sec %p(%s) already in gc list\n",
56 sec, sec->ps_policy->sp_name);
60 spin_lock(&sec_gc_list_lock);
61 list_add_tail(&sec_gc_list, &sec->ps_gc_list);
62 spin_unlock(&sec_gc_list_lock);
64 CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name);
66 EXPORT_SYMBOL(sptlrpc_gc_add_sec);
68 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
70 if (list_empty(&sec->ps_gc_list))
75 spin_lock(&sec_gc_list_lock);
76 list_del_init(&sec->ps_gc_list);
77 spin_unlock(&sec_gc_list_lock);
80 atomic_inc(&sec_gc_wait_del);
81 mutex_down(&sec_gc_mutex);
82 mutex_up(&sec_gc_mutex);
83 atomic_dec(&sec_gc_wait_del);
85 CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
87 EXPORT_SYMBOL(sptlrpc_gc_del_sec);
89 void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx)
91 LASSERT(list_empty(&ctx->cc_gc_chain));
93 CDEBUG(D_SEC, "hand over ctx %p(%u->%s)\n",
94 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
95 spin_lock(&sec_gc_ctx_list_lock);
96 list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
97 spin_unlock(&sec_gc_ctx_list_lock);
99 sec_gc_thread.t_flags |= SVC_SIGNAL;
100 cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
102 EXPORT_SYMBOL(sptlrpc_gc_add_ctx);
104 static void sec_process_ctx_list(void)
106 struct ptlrpc_cli_ctx *ctx;
109 spin_lock(&sec_gc_ctx_list_lock);
110 if (!list_empty(&sec_gc_ctx_list)) {
111 ctx = list_entry(sec_gc_ctx_list.next,
112 struct ptlrpc_cli_ctx, cc_gc_chain);
113 list_del_init(&ctx->cc_gc_chain);
114 spin_unlock(&sec_gc_ctx_list_lock);
116 LASSERT(ctx->cc_sec);
117 LASSERT(atomic_read(&ctx->cc_refcount) == 1);
118 CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n",
119 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
120 sptlrpc_cli_ctx_put(ctx, 1);
124 spin_unlock(&sec_gc_ctx_list_lock);
127 static void sec_do_gc(struct ptlrpc_sec *sec)
129 cfs_time_t now = cfs_time_current_sec();
131 if (unlikely(sec->ps_gc_next == 0)) {
132 CWARN("sec %p(%s) has 0 gc time\n",
133 sec, sec->ps_policy->sp_name);
137 if (unlikely(sec->ps_policy->sp_cops->gc_ctx == NULL)) {
138 CWARN("sec %p(%s) is not prepared for gc\n",
139 sec, sec->ps_policy->sp_name);
143 CDEBUG(D_SEC, "check on sec %p(%s)\n", sec, sec->ps_policy->sp_name);
145 if (time_after(sec->ps_gc_next, now))
148 sec->ps_policy->sp_cops->gc_ctx(sec);
149 sec->ps_gc_next = now + sec->ps_gc_interval;
152 static int sec_gc_main(void *arg)
154 struct ptlrpc_thread *thread = (struct ptlrpc_thread *) arg;
155 struct l_wait_info lwi;
157 cfs_daemonize("sptlrpc_ctx_gc");
159 /* Record that the thread is running */
160 thread->t_flags = SVC_RUNNING;
161 cfs_waitq_signal(&thread->t_ctl_waitq);
164 struct ptlrpc_sec *sec, *next;
166 thread->t_flags &= ~SVC_SIGNAL;
167 sec_process_ctx_list();
169 mutex_down(&sec_gc_mutex);
170 list_for_each_entry_safe(sec, next, &sec_gc_list, ps_gc_list) {
172 * if someone is waiting to be deleted, let it
173 * proceed as soon as possible.
175 if (atomic_read(&sec_gc_wait_del)) {
176 CWARN("deletion pending, retry\n");
177 mutex_up(&sec_gc_mutex);
183 mutex_up(&sec_gc_mutex);
185 lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * HZ, NULL, NULL);
186 l_wait_event(thread->t_ctl_waitq,
187 thread->t_flags & (SVC_STOPPING | SVC_SIGNAL),
190 if (thread->t_flags & SVC_STOPPING) {
191 thread->t_flags &= ~SVC_STOPPING;
196 thread->t_flags = SVC_STOPPED;
197 cfs_waitq_signal(&thread->t_ctl_waitq);
201 int sptlrpc_gc_start_thread(void)
203 struct l_wait_info lwi = { 0 };
206 /* initialize thread control */
207 memset(&sec_gc_thread, 0, sizeof(sec_gc_thread));
208 cfs_waitq_init(&sec_gc_thread.t_ctl_waitq);
210 rc = cfs_kernel_thread(sec_gc_main, &sec_gc_thread,
211 CLONE_VM | CLONE_FILES);
213 CERROR("can't start gc thread: %d\n", rc);
217 l_wait_event(sec_gc_thread.t_ctl_waitq,
218 sec_gc_thread.t_flags & SVC_RUNNING, &lwi);
222 void sptlrpc_gc_stop_thread(void)
224 struct l_wait_info lwi = { 0 };
226 sec_gc_thread.t_flags = SVC_STOPPING;
227 cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
229 l_wait_event(sec_gc_thread.t_ctl_waitq,
230 sec_gc_thread.t_flags & SVC_STOPPED, &lwi);
233 #else /* !__KERNEL__ */
235 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
238 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
241 int sptlrpc_gc_start_thread(void)
245 void sptlrpc_gc_stop_thread(void)
249 #endif /* __KERNEL__ */