Whamcloud - gitweb
land b_colibri_devel on HEAD:
[fs/lustre-release.git] / lustre / ptlrpc / sec_gc.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2007 Cluster File Systems, Inc.
5  *   Author: Eric Mei <ericm@clusterfs.com>
6  *
7  *   This file is part of Lustre, http://www.lustre.org.
8  *
9  *   Lustre is free software; you can redistribute it and/or
10  *   modify it under the terms of version 2 of the GNU General Public
11  *   License as published by the Free Software Foundation.
12  *
13  *   Lustre is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with Lustre; if not, write to the Free Software
20  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #ifndef EXPORT_SYMTAB
24 # define EXPORT_SYMTAB
25 #endif
26 #define DEBUG_SUBSYSTEM S_SEC
27
28 #ifndef __KERNEL__
29 #include <liblustre.h>
30 #else
31 #include <libcfs/libcfs.h>
32 #endif
33
34 #include <obd_support.h>
35 #include <obd_class.h>
36 #include <lustre_net.h>
37 #include <lustre_sec.h>
38
39 #define SEC_GC_INTERVAL (30 * 60)
40
41 #ifdef __KERNEL__
42
43 static DECLARE_MUTEX(sec_gc_mutex);
44 static CFS_LIST_HEAD(sec_gc_list);
45 static spinlock_t sec_gc_list_lock = SPIN_LOCK_UNLOCKED;
46
47 static CFS_LIST_HEAD(sec_gc_ctx_list);
48 static spinlock_t sec_gc_ctx_list_lock = SPIN_LOCK_UNLOCKED;
49
50 static struct ptlrpc_thread sec_gc_thread;
51 static atomic_t sec_gc_wait_del = ATOMIC_INIT(0);
52
53
54 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
55 {
56         LASSERT(sec->ps_policy->sp_cops->gc_ctx);
57         LASSERT(sec->ps_gc_interval > 0);
58         LASSERT(list_empty(&sec->ps_gc_list));
59
60         sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
61
62         spin_lock(&sec_gc_list_lock);
63         list_add_tail(&sec_gc_list, &sec->ps_gc_list);
64         spin_unlock(&sec_gc_list_lock);
65
66         CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name);
67 }
68 EXPORT_SYMBOL(sptlrpc_gc_add_sec);
69
70 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
71 {
72         if (list_empty(&sec->ps_gc_list))
73                 return;
74
75         might_sleep();
76
77         /* signal before list_del to make iteration in gc thread safe */
78         atomic_inc(&sec_gc_wait_del);
79
80         spin_lock(&sec_gc_list_lock);
81         list_del_init(&sec->ps_gc_list);
82         spin_unlock(&sec_gc_list_lock);
83
84         /* barrier */
85         mutex_down(&sec_gc_mutex);
86         mutex_up(&sec_gc_mutex);
87
88         atomic_dec(&sec_gc_wait_del);
89
90         CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
91 }
92 EXPORT_SYMBOL(sptlrpc_gc_del_sec);
93
94 void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx)
95 {
96         LASSERT(list_empty(&ctx->cc_gc_chain));
97
98         CDEBUG(D_SEC, "hand over ctx %p(%u->%s)\n",
99                ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
100         spin_lock(&sec_gc_ctx_list_lock);
101         list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
102         spin_unlock(&sec_gc_ctx_list_lock);
103
104         sec_gc_thread.t_flags |= SVC_SIGNAL;
105         cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
106 }
107 EXPORT_SYMBOL(sptlrpc_gc_add_ctx);
108
109 static void sec_process_ctx_list(void)
110 {
111         struct ptlrpc_cli_ctx *ctx;
112
113         spin_lock(&sec_gc_ctx_list_lock);
114
115         while (!list_empty(&sec_gc_ctx_list)) {
116                 ctx = list_entry(sec_gc_ctx_list.next,
117                                  struct ptlrpc_cli_ctx, cc_gc_chain);
118                 list_del_init(&ctx->cc_gc_chain);
119                 spin_unlock(&sec_gc_ctx_list_lock);
120
121                 LASSERT(ctx->cc_sec);
122                 LASSERT(atomic_read(&ctx->cc_refcount) == 1);
123                 CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n",
124                        ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
125                 sptlrpc_cli_ctx_put(ctx, 1);
126
127                 spin_lock(&sec_gc_ctx_list_lock);
128         }
129
130         spin_unlock(&sec_gc_ctx_list_lock);
131 }
132
133 static void sec_do_gc(struct ptlrpc_sec *sec)
134 {
135         LASSERT(sec->ps_policy->sp_cops->gc_ctx);
136
137         if (unlikely(sec->ps_gc_next == 0)) {
138                 CWARN("sec %p(%s) has 0 gc time\n",
139                       sec, sec->ps_policy->sp_name);
140                 return;
141         }
142
143         CDEBUG(D_SEC, "check on sec %p(%s)\n", sec, sec->ps_policy->sp_name);
144
145         if (cfs_time_after(sec->ps_gc_next, cfs_time_current_sec()))
146                 return;
147
148         sec->ps_policy->sp_cops->gc_ctx(sec);
149         sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
150 }
151
152 static int sec_gc_main(void *arg)
153 {
154         struct ptlrpc_thread *thread = (struct ptlrpc_thread *) arg;
155         struct l_wait_info    lwi;
156
157         cfs_daemonize("sptlrpc_gc");
158
159         /* Record that the thread is running */
160         thread->t_flags = SVC_RUNNING;
161         cfs_waitq_signal(&thread->t_ctl_waitq);
162
163         while (1) {
164                 struct ptlrpc_sec *sec;
165
166                 thread->t_flags &= ~SVC_SIGNAL;
167                 sec_process_ctx_list();
168 again:
169                 /* go through sec list do gc.
170                  * FIXME here we iterate through the whole list each time which
171                  * is not optimal. we perhaps want to use balanced binary tree
172                  * to trace each sec as order of expiry time.
173                  * another issue here is we wakeup as fixed interval instead of
174                  * according to each sec's expiry time */
175                 mutex_down(&sec_gc_mutex);
176                 list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
177                         /* if someone is waiting to be deleted, let it
178                          * proceed as soon as possible. */
179                         if (atomic_read(&sec_gc_wait_del)) {
180                                 CWARN("deletion pending, start over\n");
181                                 mutex_up(&sec_gc_mutex);
182                                 goto again;
183                         }
184
185                         sec_do_gc(sec);
186                 }
187                 mutex_up(&sec_gc_mutex);
188
189                 /* check ctx list again before sleep */
190                 sec_process_ctx_list();
191
192                 lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * HZ, NULL, NULL);
193                 l_wait_event(thread->t_ctl_waitq,
194                              thread->t_flags & (SVC_STOPPING | SVC_SIGNAL),
195                              &lwi);
196
197                 if (thread->t_flags & SVC_STOPPING) {
198                         thread->t_flags &= ~SVC_STOPPING;
199                         break;
200                 }
201         }
202
203         thread->t_flags = SVC_STOPPED;
204         cfs_waitq_signal(&thread->t_ctl_waitq);
205         return 0;
206 }
207
208 int sptlrpc_gc_start_thread(void)
209 {
210         struct l_wait_info lwi = { 0 };
211         int                rc;
212
213         /* initialize thread control */
214         memset(&sec_gc_thread, 0, sizeof(sec_gc_thread));
215         cfs_waitq_init(&sec_gc_thread.t_ctl_waitq);
216
217         rc = cfs_kernel_thread(sec_gc_main, &sec_gc_thread,
218                                CLONE_VM | CLONE_FILES);
219         if (rc < 0) {
220                 CERROR("can't start gc thread: %d\n", rc);
221                 return rc;
222         }
223
224         l_wait_event(sec_gc_thread.t_ctl_waitq,
225                      sec_gc_thread.t_flags & SVC_RUNNING, &lwi);
226         return 0;
227 }
228
229 void sptlrpc_gc_stop_thread(void)
230 {
231         struct l_wait_info lwi = { 0 };
232
233         sec_gc_thread.t_flags = SVC_STOPPING;
234         cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
235
236         l_wait_event(sec_gc_thread.t_ctl_waitq,
237                      sec_gc_thread.t_flags & SVC_STOPPED, &lwi);
238 }
239
240 #else /* !__KERNEL__ */
241
242 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
243 {
244 }
245 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
246 {
247 }
248 int sptlrpc_gc_start_thread(void)
249 {
250         return 0;
251 }
252 void sptlrpc_gc_stop_thread(void)
253 {
254 }
255
256 #endif /* __KERNEL__ */