Whamcloud - gitweb
f6cc912e0c2aaa73da70c64911036c1b9a47c99f
[fs/lustre-release.git] / lustre / ptlrpc / sec_gc.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * Copyright (c) 2011 Whamcloud, Inc.
34  */
35 /*
36  * This file is part of Lustre, http://www.lustre.org/
37  * Lustre is a trademark of Sun Microsystems, Inc.
38  *
39  * lustre/ptlrpc/sec_gc.c
40  *
41  * Author: Eric Mei <ericm@clusterfs.com>
42  */
43
44 #ifndef EXPORT_SYMTAB
45 # define EXPORT_SYMTAB
46 #endif
47 #define DEBUG_SUBSYSTEM S_SEC
48
49 #ifndef __KERNEL__
50 #include <liblustre.h>
51 #else
52 #include <libcfs/libcfs.h>
53 #endif
54
55 #include <obd_support.h>
56 #include <obd_class.h>
57 #include <lustre_net.h>
58 #include <lustre_sec.h>
59
60 #define SEC_GC_INTERVAL (30 * 60)
61
62 #ifdef __KERNEL__
63
64 static cfs_mutex_t sec_gc_mutex;
65 static CFS_LIST_HEAD(sec_gc_list);
66 static cfs_spinlock_t sec_gc_list_lock;
67
68 static CFS_LIST_HEAD(sec_gc_ctx_list);
69 static cfs_spinlock_t sec_gc_ctx_list_lock;
70
71 static struct ptlrpc_thread sec_gc_thread;
72 static cfs_atomic_t sec_gc_wait_del = CFS_ATOMIC_INIT(0);
73
74
75 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
76 {
77         LASSERT(sec->ps_policy->sp_cops->gc_ctx);
78         LASSERT(sec->ps_gc_interval > 0);
79         LASSERT(cfs_list_empty(&sec->ps_gc_list));
80
81         sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
82
83         cfs_spin_lock(&sec_gc_list_lock);
84         cfs_list_add_tail(&sec_gc_list, &sec->ps_gc_list);
85         cfs_spin_unlock(&sec_gc_list_lock);
86
87         CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name);
88 }
89 EXPORT_SYMBOL(sptlrpc_gc_add_sec);
90
91 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
92 {
93         if (cfs_list_empty(&sec->ps_gc_list))
94                 return;
95
96         cfs_might_sleep();
97
98         /* signal before list_del to make iteration in gc thread safe */
99         cfs_atomic_inc(&sec_gc_wait_del);
100
101         cfs_spin_lock(&sec_gc_list_lock);
102         cfs_list_del_init(&sec->ps_gc_list);
103         cfs_spin_unlock(&sec_gc_list_lock);
104
105         /* barrier */
106         cfs_mutex_lock(&sec_gc_mutex);
107         cfs_mutex_unlock(&sec_gc_mutex);
108
109         cfs_atomic_dec(&sec_gc_wait_del);
110
111         CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
112 }
113 EXPORT_SYMBOL(sptlrpc_gc_del_sec);
114
115 void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx)
116 {
117         LASSERT(cfs_list_empty(&ctx->cc_gc_chain));
118
119         CDEBUG(D_SEC, "hand over ctx %p(%u->%s)\n",
120                ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
121         cfs_spin_lock(&sec_gc_ctx_list_lock);
122         cfs_list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
123         cfs_spin_unlock(&sec_gc_ctx_list_lock);
124
125         thread_add_flags(&sec_gc_thread, SVC_SIGNAL);
126         cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
127 }
128 EXPORT_SYMBOL(sptlrpc_gc_add_ctx);
129
130 static void sec_process_ctx_list(void)
131 {
132         struct ptlrpc_cli_ctx *ctx;
133
134         cfs_spin_lock(&sec_gc_ctx_list_lock);
135
136         while (!cfs_list_empty(&sec_gc_ctx_list)) {
137                 ctx = cfs_list_entry(sec_gc_ctx_list.next,
138                                      struct ptlrpc_cli_ctx, cc_gc_chain);
139                 cfs_list_del_init(&ctx->cc_gc_chain);
140                 cfs_spin_unlock(&sec_gc_ctx_list_lock);
141
142                 LASSERT(ctx->cc_sec);
143                 LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 1);
144                 CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n",
145                        ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
146                 sptlrpc_cli_ctx_put(ctx, 1);
147
148                 cfs_spin_lock(&sec_gc_ctx_list_lock);
149         }
150
151         cfs_spin_unlock(&sec_gc_ctx_list_lock);
152 }
153
154 static void sec_do_gc(struct ptlrpc_sec *sec)
155 {
156         LASSERT(sec->ps_policy->sp_cops->gc_ctx);
157
158         if (unlikely(sec->ps_gc_next == 0)) {
159                 CWARN("sec %p(%s) has 0 gc time\n",
160                       sec, sec->ps_policy->sp_name);
161                 return;
162         }
163
164         CDEBUG(D_SEC, "check on sec %p(%s)\n", sec, sec->ps_policy->sp_name);
165
166         if (cfs_time_after(sec->ps_gc_next, cfs_time_current_sec()))
167                 return;
168
169         sec->ps_policy->sp_cops->gc_ctx(sec);
170         sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
171 }
172
173 static int sec_gc_main(void *arg)
174 {
175         struct ptlrpc_thread *thread = (struct ptlrpc_thread *) arg;
176         struct l_wait_info    lwi;
177
178         cfs_daemonize_ctxt("sptlrpc_gc");
179
180         /* Record that the thread is running */
181         thread_set_flags(thread, SVC_RUNNING);
182         cfs_waitq_signal(&thread->t_ctl_waitq);
183
184         while (1) {
185                 struct ptlrpc_sec *sec;
186
187                 thread_clear_flags(thread, SVC_SIGNAL);
188                 sec_process_ctx_list();
189 again:
190                 /* go through sec list do gc.
191                  * FIXME here we iterate through the whole list each time which
192                  * is not optimal. we perhaps want to use balanced binary tree
193                  * to trace each sec as order of expiry time.
194                  * another issue here is we wakeup as fixed interval instead of
195                  * according to each sec's expiry time */
196                 cfs_mutex_lock(&sec_gc_mutex);
197                 cfs_list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
198                         /* if someone is waiting to be deleted, let it
199                          * proceed as soon as possible. */
200                         if (cfs_atomic_read(&sec_gc_wait_del)) {
201                                 CWARN("deletion pending, start over\n");
202                                 cfs_mutex_unlock(&sec_gc_mutex);
203                                 goto again;
204                         }
205
206                         sec_do_gc(sec);
207                 }
208                 cfs_mutex_unlock(&sec_gc_mutex);
209
210                 /* check ctx list again before sleep */
211                 sec_process_ctx_list();
212
213                 lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * CFS_HZ, NULL, NULL);
214                 l_wait_event(thread->t_ctl_waitq,
215                              thread_is_stopping(thread) ||
216                              thread_is_signal(thread),
217                              &lwi);
218
219                 if (thread_test_and_clear_flags(thread, SVC_STOPPING))
220                         break;
221         }
222
223         thread_set_flags(thread, SVC_STOPPED);
224         cfs_waitq_signal(&thread->t_ctl_waitq);
225         return 0;
226 }
227
228 int sptlrpc_gc_init(void)
229 {
230         struct l_wait_info lwi = { 0 };
231         int                rc;
232
233         cfs_mutex_init(&sec_gc_mutex);
234         cfs_spin_lock_init(&sec_gc_list_lock);
235         cfs_spin_lock_init(&sec_gc_ctx_list_lock);
236
237         /* initialize thread control */
238         memset(&sec_gc_thread, 0, sizeof(sec_gc_thread));
239         cfs_waitq_init(&sec_gc_thread.t_ctl_waitq);
240
241         rc = cfs_create_thread(sec_gc_main, &sec_gc_thread, CFS_DAEMON_FLAGS);
242         if (rc < 0) {
243                 CERROR("can't start gc thread: %d\n", rc);
244                 return rc;
245         }
246
247         l_wait_event(sec_gc_thread.t_ctl_waitq,
248                      thread_is_running(&sec_gc_thread), &lwi);
249         return 0;
250 }
251
252 void sptlrpc_gc_fini(void)
253 {
254         struct l_wait_info lwi = { 0 };
255
256         thread_set_flags(&sec_gc_thread, SVC_STOPPING);
257         cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
258
259         l_wait_event(sec_gc_thread.t_ctl_waitq,
260                      thread_is_stopped(&sec_gc_thread), &lwi);
261 }
262
263 #else /* !__KERNEL__ */
264
265 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
266 {
267 }
268 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
269 {
270 }
271 int sptlrpc_gc_init(void)
272 {
273         return 0;
274 }
275 void sptlrpc_gc_fini(void)
276 {
277 }
278
279 #endif /* __KERNEL__ */