Whamcloud - gitweb
LU-1146 build: batch update copyright messages
[fs/lustre-release.git] / lustre / ptlrpc / sec_gc.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  *
32  * Copyright (c) 2012, Whamcloud, Inc.
33  */
34 /*
35  * This file is part of Lustre, http://www.lustre.org/
36  * Lustre is a trademark of Sun Microsystems, Inc.
37  *
38  * lustre/ptlrpc/sec_gc.c
39  *
40  * Author: Eric Mei <ericm@clusterfs.com>
41  */
42
43 #ifndef EXPORT_SYMTAB
44 # define EXPORT_SYMTAB
45 #endif
46 #define DEBUG_SUBSYSTEM S_SEC
47
48 #ifndef __KERNEL__
49 #include <liblustre.h>
50 #else
51 #include <libcfs/libcfs.h>
52 #endif
53
54 #include <obd_support.h>
55 #include <obd_class.h>
56 #include <lustre_net.h>
57 #include <lustre_sec.h>
58
59 #define SEC_GC_INTERVAL (30 * 60)
60
61 #ifdef __KERNEL__
62
63 static cfs_mutex_t sec_gc_mutex;
64 static CFS_LIST_HEAD(sec_gc_list);
65 static cfs_spinlock_t sec_gc_list_lock;
66
67 static CFS_LIST_HEAD(sec_gc_ctx_list);
68 static cfs_spinlock_t sec_gc_ctx_list_lock;
69
70 static struct ptlrpc_thread sec_gc_thread;
71 static cfs_atomic_t sec_gc_wait_del = CFS_ATOMIC_INIT(0);
72
73
74 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
75 {
76         LASSERT(sec->ps_policy->sp_cops->gc_ctx);
77         LASSERT(sec->ps_gc_interval > 0);
78         LASSERT(cfs_list_empty(&sec->ps_gc_list));
79
80         sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
81
82         cfs_spin_lock(&sec_gc_list_lock);
83         cfs_list_add_tail(&sec_gc_list, &sec->ps_gc_list);
84         cfs_spin_unlock(&sec_gc_list_lock);
85
86         CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name);
87 }
88 EXPORT_SYMBOL(sptlrpc_gc_add_sec);
89
90 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
91 {
92         if (cfs_list_empty(&sec->ps_gc_list))
93                 return;
94
95         cfs_might_sleep();
96
97         /* signal before list_del to make iteration in gc thread safe */
98         cfs_atomic_inc(&sec_gc_wait_del);
99
100         cfs_spin_lock(&sec_gc_list_lock);
101         cfs_list_del_init(&sec->ps_gc_list);
102         cfs_spin_unlock(&sec_gc_list_lock);
103
104         /* barrier */
105         cfs_mutex_lock(&sec_gc_mutex);
106         cfs_mutex_unlock(&sec_gc_mutex);
107
108         cfs_atomic_dec(&sec_gc_wait_del);
109
110         CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
111 }
112 EXPORT_SYMBOL(sptlrpc_gc_del_sec);
113
114 void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx)
115 {
116         LASSERT(cfs_list_empty(&ctx->cc_gc_chain));
117
118         CDEBUG(D_SEC, "hand over ctx %p(%u->%s)\n",
119                ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
120         cfs_spin_lock(&sec_gc_ctx_list_lock);
121         cfs_list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
122         cfs_spin_unlock(&sec_gc_ctx_list_lock);
123
124         thread_add_flags(&sec_gc_thread, SVC_SIGNAL);
125         cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
126 }
127 EXPORT_SYMBOL(sptlrpc_gc_add_ctx);
128
129 static void sec_process_ctx_list(void)
130 {
131         struct ptlrpc_cli_ctx *ctx;
132
133         cfs_spin_lock(&sec_gc_ctx_list_lock);
134
135         while (!cfs_list_empty(&sec_gc_ctx_list)) {
136                 ctx = cfs_list_entry(sec_gc_ctx_list.next,
137                                      struct ptlrpc_cli_ctx, cc_gc_chain);
138                 cfs_list_del_init(&ctx->cc_gc_chain);
139                 cfs_spin_unlock(&sec_gc_ctx_list_lock);
140
141                 LASSERT(ctx->cc_sec);
142                 LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 1);
143                 CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n",
144                        ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
145                 sptlrpc_cli_ctx_put(ctx, 1);
146
147                 cfs_spin_lock(&sec_gc_ctx_list_lock);
148         }
149
150         cfs_spin_unlock(&sec_gc_ctx_list_lock);
151 }
152
153 static void sec_do_gc(struct ptlrpc_sec *sec)
154 {
155         LASSERT(sec->ps_policy->sp_cops->gc_ctx);
156
157         if (unlikely(sec->ps_gc_next == 0)) {
158                 CDEBUG(D_SEC, "sec %p(%s) has 0 gc time\n",
159                       sec, sec->ps_policy->sp_name);
160                 return;
161         }
162
163         CDEBUG(D_SEC, "check on sec %p(%s)\n", sec, sec->ps_policy->sp_name);
164
165         if (cfs_time_after(sec->ps_gc_next, cfs_time_current_sec()))
166                 return;
167
168         sec->ps_policy->sp_cops->gc_ctx(sec);
169         sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
170 }
171
172 static int sec_gc_main(void *arg)
173 {
174         struct ptlrpc_thread *thread = (struct ptlrpc_thread *) arg;
175         struct l_wait_info    lwi;
176
177         cfs_daemonize_ctxt("sptlrpc_gc");
178
179         /* Record that the thread is running */
180         thread_set_flags(thread, SVC_RUNNING);
181         cfs_waitq_signal(&thread->t_ctl_waitq);
182
183         while (1) {
184                 struct ptlrpc_sec *sec;
185
186                 thread_clear_flags(thread, SVC_SIGNAL);
187                 sec_process_ctx_list();
188 again:
189                 /* go through sec list do gc.
190                  * FIXME here we iterate through the whole list each time which
191                  * is not optimal. we perhaps want to use balanced binary tree
192                  * to trace each sec as order of expiry time.
193                  * another issue here is we wakeup as fixed interval instead of
194                  * according to each sec's expiry time */
195                 cfs_mutex_lock(&sec_gc_mutex);
196                 cfs_list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
197                         /* if someone is waiting to be deleted, let it
198                          * proceed as soon as possible. */
199                         if (cfs_atomic_read(&sec_gc_wait_del)) {
200                                 CDEBUG(D_SEC, "deletion pending, start over\n");
201                                 cfs_mutex_unlock(&sec_gc_mutex);
202                                 goto again;
203                         }
204
205                         sec_do_gc(sec);
206                 }
207                 cfs_mutex_unlock(&sec_gc_mutex);
208
209                 /* check ctx list again before sleep */
210                 sec_process_ctx_list();
211
212                 lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * CFS_HZ, NULL, NULL);
213                 l_wait_event(thread->t_ctl_waitq,
214                              thread_is_stopping(thread) ||
215                              thread_is_signal(thread),
216                              &lwi);
217
218                 if (thread_test_and_clear_flags(thread, SVC_STOPPING))
219                         break;
220         }
221
222         thread_set_flags(thread, SVC_STOPPED);
223         cfs_waitq_signal(&thread->t_ctl_waitq);
224         return 0;
225 }
226
227 int sptlrpc_gc_init(void)
228 {
229         struct l_wait_info lwi = { 0 };
230         int                rc;
231
232         cfs_mutex_init(&sec_gc_mutex);
233         cfs_spin_lock_init(&sec_gc_list_lock);
234         cfs_spin_lock_init(&sec_gc_ctx_list_lock);
235
236         /* initialize thread control */
237         memset(&sec_gc_thread, 0, sizeof(sec_gc_thread));
238         cfs_waitq_init(&sec_gc_thread.t_ctl_waitq);
239
240         rc = cfs_create_thread(sec_gc_main, &sec_gc_thread, CFS_DAEMON_FLAGS);
241         if (rc < 0) {
242                 CERROR("can't start gc thread: %d\n", rc);
243                 return rc;
244         }
245
246         l_wait_event(sec_gc_thread.t_ctl_waitq,
247                      thread_is_running(&sec_gc_thread), &lwi);
248         return 0;
249 }
250
251 void sptlrpc_gc_fini(void)
252 {
253         struct l_wait_info lwi = { 0 };
254
255         thread_set_flags(&sec_gc_thread, SVC_STOPPING);
256         cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
257
258         l_wait_event(sec_gc_thread.t_ctl_waitq,
259                      thread_is_stopped(&sec_gc_thread), &lwi);
260 }
261
262 #else /* !__KERNEL__ */
263
264 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
265 {
266 }
267 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
268 {
269 }
270 int sptlrpc_gc_init(void)
271 {
272         return 0;
273 }
274 void sptlrpc_gc_fini(void)
275 {
276 }
277
278 #endif /* __KERNEL__ */