Whamcloud - gitweb
b=16098
[fs/lustre-release.git] / lustre / ptlrpc / sec_gc.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see [sun.com URL with a
20  * copy of GPLv2].
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/sec_gc.c
37  *
38  * Author: Eric Mei <ericm@clusterfs.com>
39  */
40
41 #ifndef EXPORT_SYMTAB
42 # define EXPORT_SYMTAB
43 #endif
44 #define DEBUG_SUBSYSTEM S_SEC
45
46 #ifndef __KERNEL__
47 #include <liblustre.h>
48 #else
49 #include <libcfs/libcfs.h>
50 #endif
51
52 #include <obd_support.h>
53 #include <obd_class.h>
54 #include <lustre_net.h>
55 #include <lustre_sec.h>
56
57 #define SEC_GC_INTERVAL (30 * 60)
58
59 #ifdef __KERNEL__
60
61 static DECLARE_MUTEX(sec_gc_mutex);
62 static CFS_LIST_HEAD(sec_gc_list);
63 static spinlock_t sec_gc_list_lock = SPIN_LOCK_UNLOCKED;
64
65 static CFS_LIST_HEAD(sec_gc_ctx_list);
66 static spinlock_t sec_gc_ctx_list_lock = SPIN_LOCK_UNLOCKED;
67
68 static struct ptlrpc_thread sec_gc_thread;
69 static atomic_t sec_gc_wait_del = ATOMIC_INIT(0);
70
71
72 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
73 {
74         LASSERT(sec->ps_policy->sp_cops->gc_ctx);
75         LASSERT(sec->ps_gc_interval > 0);
76         LASSERT(list_empty(&sec->ps_gc_list));
77
78         sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
79
80         spin_lock(&sec_gc_list_lock);
81         list_add_tail(&sec_gc_list, &sec->ps_gc_list);
82         spin_unlock(&sec_gc_list_lock);
83
84         CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name);
85 }
86 EXPORT_SYMBOL(sptlrpc_gc_add_sec);
87
88 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
89 {
90         if (list_empty(&sec->ps_gc_list))
91                 return;
92
93         might_sleep();
94
95         /* signal before list_del to make iteration in gc thread safe */
96         atomic_inc(&sec_gc_wait_del);
97
98         spin_lock(&sec_gc_list_lock);
99         list_del_init(&sec->ps_gc_list);
100         spin_unlock(&sec_gc_list_lock);
101
102         /* barrier */
103         mutex_down(&sec_gc_mutex);
104         mutex_up(&sec_gc_mutex);
105
106         atomic_dec(&sec_gc_wait_del);
107
108         CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
109 }
110 EXPORT_SYMBOL(sptlrpc_gc_del_sec);
111
112 void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx)
113 {
114         LASSERT(list_empty(&ctx->cc_gc_chain));
115
116         CDEBUG(D_SEC, "hand over ctx %p(%u->%s)\n",
117                ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
118         spin_lock(&sec_gc_ctx_list_lock);
119         list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
120         spin_unlock(&sec_gc_ctx_list_lock);
121
122         sec_gc_thread.t_flags |= SVC_SIGNAL;
123         cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
124 }
125 EXPORT_SYMBOL(sptlrpc_gc_add_ctx);
126
127 static void sec_process_ctx_list(void)
128 {
129         struct ptlrpc_cli_ctx *ctx;
130
131         spin_lock(&sec_gc_ctx_list_lock);
132
133         while (!list_empty(&sec_gc_ctx_list)) {
134                 ctx = list_entry(sec_gc_ctx_list.next,
135                                  struct ptlrpc_cli_ctx, cc_gc_chain);
136                 list_del_init(&ctx->cc_gc_chain);
137                 spin_unlock(&sec_gc_ctx_list_lock);
138
139                 LASSERT(ctx->cc_sec);
140                 LASSERT(atomic_read(&ctx->cc_refcount) == 1);
141                 CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n",
142                        ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
143                 sptlrpc_cli_ctx_put(ctx, 1);
144
145                 spin_lock(&sec_gc_ctx_list_lock);
146         }
147
148         spin_unlock(&sec_gc_ctx_list_lock);
149 }
150
151 static void sec_do_gc(struct ptlrpc_sec *sec)
152 {
153         LASSERT(sec->ps_policy->sp_cops->gc_ctx);
154
155         if (unlikely(sec->ps_gc_next == 0)) {
156                 CWARN("sec %p(%s) has 0 gc time\n",
157                       sec, sec->ps_policy->sp_name);
158                 return;
159         }
160
161         CDEBUG(D_SEC, "check on sec %p(%s)\n", sec, sec->ps_policy->sp_name);
162
163         if (cfs_time_after(sec->ps_gc_next, cfs_time_current_sec()))
164                 return;
165
166         sec->ps_policy->sp_cops->gc_ctx(sec);
167         sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
168 }
169
170 static int sec_gc_main(void *arg)
171 {
172         struct ptlrpc_thread *thread = (struct ptlrpc_thread *) arg;
173         struct l_wait_info    lwi;
174
175         cfs_daemonize("sptlrpc_gc");
176
177         /* Record that the thread is running */
178         thread->t_flags = SVC_RUNNING;
179         cfs_waitq_signal(&thread->t_ctl_waitq);
180
181         while (1) {
182                 struct ptlrpc_sec *sec;
183
184                 thread->t_flags &= ~SVC_SIGNAL;
185                 sec_process_ctx_list();
186 again:
187                 /* go through sec list do gc.
188                  * FIXME here we iterate through the whole list each time which
189                  * is not optimal. we perhaps want to use balanced binary tree
190                  * to trace each sec as order of expiry time.
191                  * another issue here is we wakeup as fixed interval instead of
192                  * according to each sec's expiry time */
193                 mutex_down(&sec_gc_mutex);
194                 list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
195                         /* if someone is waiting to be deleted, let it
196                          * proceed as soon as possible. */
197                         if (atomic_read(&sec_gc_wait_del)) {
198                                 CWARN("deletion pending, start over\n");
199                                 mutex_up(&sec_gc_mutex);
200                                 goto again;
201                         }
202
203                         sec_do_gc(sec);
204                 }
205                 mutex_up(&sec_gc_mutex);
206
207                 /* check ctx list again before sleep */
208                 sec_process_ctx_list();
209
210                 lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * HZ, NULL, NULL);
211                 l_wait_event(thread->t_ctl_waitq,
212                              thread->t_flags & (SVC_STOPPING | SVC_SIGNAL),
213                              &lwi);
214
215                 if (thread->t_flags & SVC_STOPPING) {
216                         thread->t_flags &= ~SVC_STOPPING;
217                         break;
218                 }
219         }
220
221         thread->t_flags = SVC_STOPPED;
222         cfs_waitq_signal(&thread->t_ctl_waitq);
223         return 0;
224 }
225
226 int sptlrpc_gc_start_thread(void)
227 {
228         struct l_wait_info lwi = { 0 };
229         int                rc;
230
231         /* initialize thread control */
232         memset(&sec_gc_thread, 0, sizeof(sec_gc_thread));
233         cfs_waitq_init(&sec_gc_thread.t_ctl_waitq);
234
235         rc = cfs_kernel_thread(sec_gc_main, &sec_gc_thread,
236                                CLONE_VM | CLONE_FILES);
237         if (rc < 0) {
238                 CERROR("can't start gc thread: %d\n", rc);
239                 return rc;
240         }
241
242         l_wait_event(sec_gc_thread.t_ctl_waitq,
243                      sec_gc_thread.t_flags & SVC_RUNNING, &lwi);
244         return 0;
245 }
246
247 void sptlrpc_gc_stop_thread(void)
248 {
249         struct l_wait_info lwi = { 0 };
250
251         sec_gc_thread.t_flags = SVC_STOPPING;
252         cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
253
254         l_wait_event(sec_gc_thread.t_ctl_waitq,
255                      sec_gc_thread.t_flags & SVC_STOPPED, &lwi);
256 }
257
258 #else /* !__KERNEL__ */
259
260 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
261 {
262 }
263 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
264 {
265 }
266 int sptlrpc_gc_start_thread(void)
267 {
268         return 0;
269 }
270 void sptlrpc_gc_stop_thread(void)
271 {
272 }
273
274 #endif /* __KERNEL__ */