Whamcloud - gitweb
5aea9a615587813d6eb1565573f56939cb94cd5d
[fs/lustre-release.git] / lustre / ptlrpc / sec_gc.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/sec_gc.c
37  *
38  * Author: Eric Mei <ericm@clusterfs.com>
39  */
40
41 #ifndef EXPORT_SYMTAB
42 # define EXPORT_SYMTAB
43 #endif
44 #define DEBUG_SUBSYSTEM S_SEC
45
46 #ifndef __KERNEL__
47 #include <liblustre.h>
48 #else
49 #include <libcfs/libcfs.h>
50 #endif
51
52 #include <obd_support.h>
53 #include <obd_class.h>
54 #include <lustre_net.h>
55 #include <lustre_sec.h>
56
57 #define SEC_GC_INTERVAL (30 * 60)
58
59 #ifdef __KERNEL__
60
61 static cfs_mutex_t sec_gc_mutex;
62 static CFS_LIST_HEAD(sec_gc_list);
63 static cfs_spinlock_t sec_gc_list_lock;
64
65 static CFS_LIST_HEAD(sec_gc_ctx_list);
66 static cfs_spinlock_t sec_gc_ctx_list_lock;
67
68 static struct ptlrpc_thread sec_gc_thread;
69 static cfs_atomic_t sec_gc_wait_del = CFS_ATOMIC_INIT(0);
70
71
72 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
73 {
74         LASSERT(sec->ps_policy->sp_cops->gc_ctx);
75         LASSERT(sec->ps_gc_interval > 0);
76         LASSERT(cfs_list_empty(&sec->ps_gc_list));
77
78         sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
79
80         cfs_spin_lock(&sec_gc_list_lock);
81         cfs_list_add_tail(&sec_gc_list, &sec->ps_gc_list);
82         cfs_spin_unlock(&sec_gc_list_lock);
83
84         CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name);
85 }
86 EXPORT_SYMBOL(sptlrpc_gc_add_sec);
87
88 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
89 {
90         if (cfs_list_empty(&sec->ps_gc_list))
91                 return;
92
93         cfs_might_sleep();
94
95         /* signal before list_del to make iteration in gc thread safe */
96         cfs_atomic_inc(&sec_gc_wait_del);
97
98         cfs_spin_lock(&sec_gc_list_lock);
99         cfs_list_del_init(&sec->ps_gc_list);
100         cfs_spin_unlock(&sec_gc_list_lock);
101
102         /* barrier */
103         cfs_mutex_lock(&sec_gc_mutex);
104         cfs_mutex_unlock(&sec_gc_mutex);
105
106         cfs_atomic_dec(&sec_gc_wait_del);
107
108         CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
109 }
110 EXPORT_SYMBOL(sptlrpc_gc_del_sec);
111
112 void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx)
113 {
114         LASSERT(cfs_list_empty(&ctx->cc_gc_chain));
115
116         CDEBUG(D_SEC, "hand over ctx %p(%u->%s)\n",
117                ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
118         cfs_spin_lock(&sec_gc_ctx_list_lock);
119         cfs_list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
120         cfs_spin_unlock(&sec_gc_ctx_list_lock);
121
122         thread_add_flags(&sec_gc_thread, SVC_SIGNAL);
123         cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
124 }
125 EXPORT_SYMBOL(sptlrpc_gc_add_ctx);
126
127 static void sec_process_ctx_list(void)
128 {
129         struct ptlrpc_cli_ctx *ctx;
130
131         cfs_spin_lock(&sec_gc_ctx_list_lock);
132
133         while (!cfs_list_empty(&sec_gc_ctx_list)) {
134                 ctx = cfs_list_entry(sec_gc_ctx_list.next,
135                                      struct ptlrpc_cli_ctx, cc_gc_chain);
136                 cfs_list_del_init(&ctx->cc_gc_chain);
137                 cfs_spin_unlock(&sec_gc_ctx_list_lock);
138
139                 LASSERT(ctx->cc_sec);
140                 LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 1);
141                 CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n",
142                        ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
143                 sptlrpc_cli_ctx_put(ctx, 1);
144
145                 cfs_spin_lock(&sec_gc_ctx_list_lock);
146         }
147
148         cfs_spin_unlock(&sec_gc_ctx_list_lock);
149 }
150
151 static void sec_do_gc(struct ptlrpc_sec *sec)
152 {
153         LASSERT(sec->ps_policy->sp_cops->gc_ctx);
154
155         if (unlikely(sec->ps_gc_next == 0)) {
156                 CDEBUG(D_SEC, "sec %p(%s) has 0 gc time\n",
157                       sec, sec->ps_policy->sp_name);
158                 return;
159         }
160
161         CDEBUG(D_SEC, "check on sec %p(%s)\n", sec, sec->ps_policy->sp_name);
162
163         if (cfs_time_after(sec->ps_gc_next, cfs_time_current_sec()))
164                 return;
165
166         sec->ps_policy->sp_cops->gc_ctx(sec);
167         sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
168 }
169
170 static int sec_gc_main(void *arg)
171 {
172         struct ptlrpc_thread *thread = (struct ptlrpc_thread *) arg;
173         struct l_wait_info    lwi;
174
175         cfs_daemonize_ctxt("sptlrpc_gc");
176
177         /* Record that the thread is running */
178         thread_set_flags(thread, SVC_RUNNING);
179         cfs_waitq_signal(&thread->t_ctl_waitq);
180
181         while (1) {
182                 struct ptlrpc_sec *sec;
183
184                 thread_clear_flags(thread, SVC_SIGNAL);
185                 sec_process_ctx_list();
186 again:
187                 /* go through sec list do gc.
188                  * FIXME here we iterate through the whole list each time which
189                  * is not optimal. we perhaps want to use balanced binary tree
190                  * to trace each sec as order of expiry time.
191                  * another issue here is we wakeup as fixed interval instead of
192                  * according to each sec's expiry time */
193                 cfs_mutex_lock(&sec_gc_mutex);
194                 cfs_list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
195                         /* if someone is waiting to be deleted, let it
196                          * proceed as soon as possible. */
197                         if (cfs_atomic_read(&sec_gc_wait_del)) {
198                                 CDEBUG(D_SEC, "deletion pending, start over\n");
199                                 cfs_mutex_unlock(&sec_gc_mutex);
200                                 goto again;
201                         }
202
203                         sec_do_gc(sec);
204                 }
205                 cfs_mutex_unlock(&sec_gc_mutex);
206
207                 /* check ctx list again before sleep */
208                 sec_process_ctx_list();
209
210                 lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * CFS_HZ, NULL, NULL);
211                 l_wait_event(thread->t_ctl_waitq,
212                              thread_is_stopping(thread) ||
213                              thread_is_signal(thread),
214                              &lwi);
215
216                 if (thread_test_and_clear_flags(thread, SVC_STOPPING))
217                         break;
218         }
219
220         thread_set_flags(thread, SVC_STOPPED);
221         cfs_waitq_signal(&thread->t_ctl_waitq);
222         return 0;
223 }
224
225 int sptlrpc_gc_init(void)
226 {
227         struct l_wait_info lwi = { 0 };
228         int                rc;
229
230         cfs_mutex_init(&sec_gc_mutex);
231         cfs_spin_lock_init(&sec_gc_list_lock);
232         cfs_spin_lock_init(&sec_gc_ctx_list_lock);
233
234         /* initialize thread control */
235         memset(&sec_gc_thread, 0, sizeof(sec_gc_thread));
236         cfs_waitq_init(&sec_gc_thread.t_ctl_waitq);
237
238         rc = cfs_create_thread(sec_gc_main, &sec_gc_thread, CFS_DAEMON_FLAGS);
239         if (rc < 0) {
240                 CERROR("can't start gc thread: %d\n", rc);
241                 return rc;
242         }
243
244         l_wait_event(sec_gc_thread.t_ctl_waitq,
245                      thread_is_running(&sec_gc_thread), &lwi);
246         return 0;
247 }
248
249 void sptlrpc_gc_fini(void)
250 {
251         struct l_wait_info lwi = { 0 };
252
253         thread_set_flags(&sec_gc_thread, SVC_STOPPING);
254         cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
255
256         l_wait_event(sec_gc_thread.t_ctl_waitq,
257                      thread_is_stopped(&sec_gc_thread), &lwi);
258 }
259
260 #else /* !__KERNEL__ */
261
262 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
263 {
264 }
265 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
266 {
267 }
268 int sptlrpc_gc_init(void)
269 {
270         return 0;
271 }
272 void sptlrpc_gc_fini(void)
273 {
274 }
275
276 #endif /* __KERNEL__ */