4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/ptlrpc/sec_gc.c
33 * Author: Eric Mei <ericm@clusterfs.com>
36 #define DEBUG_SUBSYSTEM S_SEC
38 #include <linux/workqueue.h>
39 #include <libcfs/libcfs.h>
41 #include <obd_support.h>
42 #include <obd_class.h>
43 #include <lustre_net.h>
44 #include <lustre_sec.h>
46 #include "ptlrpc_internal.h"
48 #define SEC_GC_INTERVAL (30 * 60)
50 static DEFINE_MUTEX(sec_gc_mutex);
51 static DEFINE_SPINLOCK(sec_gc_list_lock);
52 static DEFINE_SPINLOCK(sec_gc_ctx_list_lock);
53 static LIST_HEAD(sec_gc_list);
54 static LIST_HEAD(sec_gc_ctx_list);
56 static atomic_t sec_gc_wait_del = ATOMIC_INIT(0);
58 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
60 LASSERT(sec->ps_policy->sp_cops->gc_ctx);
61 LASSERT(sec->ps_gc_interval > 0);
62 LASSERT(list_empty(&sec->ps_gc_list));
64 sec->ps_gc_next = ktime_get_real_seconds() + sec->ps_gc_interval;
66 spin_lock(&sec_gc_list_lock);
67 list_add_tail(&sec->ps_gc_list, &sec_gc_list);
68 spin_unlock(&sec_gc_list_lock);
70 CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name);
73 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
75 if (list_empty(&sec->ps_gc_list))
78 /* signal before list_del to make iteration in gc thread safe */
79 atomic_inc(&sec_gc_wait_del);
81 spin_lock(&sec_gc_list_lock);
82 list_del_init(&sec->ps_gc_list);
83 spin_unlock(&sec_gc_list_lock);
86 mutex_lock(&sec_gc_mutex);
87 mutex_unlock(&sec_gc_mutex);
89 atomic_dec(&sec_gc_wait_del);
91 CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
94 static void sec_gc_main(struct work_struct *ws);
95 static DECLARE_DELAYED_WORK(sec_gc_work, sec_gc_main);
97 void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx)
99 LASSERT(list_empty(&ctx->cc_gc_chain));
101 CDEBUG(D_SEC, "hand over ctx %p(%u->%s)\n",
102 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
103 spin_lock(&sec_gc_ctx_list_lock);
104 list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
105 spin_unlock(&sec_gc_ctx_list_lock);
107 mod_delayed_work(system_wq, &sec_gc_work, 0);
109 EXPORT_SYMBOL(sptlrpc_gc_add_ctx);
111 static void sec_process_ctx_list(void)
113 struct ptlrpc_cli_ctx *ctx;
115 spin_lock(&sec_gc_ctx_list_lock);
117 while ((ctx = list_first_entry_or_null(&sec_gc_ctx_list,
118 struct ptlrpc_cli_ctx,
119 cc_gc_chain)) != NULL) {
120 list_del_init(&ctx->cc_gc_chain);
121 spin_unlock(&sec_gc_ctx_list_lock);
123 LASSERT(ctx->cc_sec);
124 LASSERT(atomic_read(&ctx->cc_refcount) == 1);
125 CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n",
126 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
127 sptlrpc_cli_ctx_put(ctx, 1);
129 spin_lock(&sec_gc_ctx_list_lock);
132 spin_unlock(&sec_gc_ctx_list_lock);
135 static void sec_do_gc(struct ptlrpc_sec *sec)
137 LASSERT(sec->ps_policy->sp_cops->gc_ctx);
139 if (unlikely(sec->ps_gc_next == 0)) {
140 CDEBUG(D_SEC, "sec %p(%s) has 0 gc time\n",
141 sec, sec->ps_policy->sp_name);
145 CDEBUG(D_SEC, "check on sec %p(%s)\n", sec, sec->ps_policy->sp_name);
147 if (sec->ps_gc_next > ktime_get_real_seconds())
150 sec->ps_policy->sp_cops->gc_ctx(sec);
151 sec->ps_gc_next = ktime_get_real_seconds() + sec->ps_gc_interval;
154 static void sec_gc_main(struct work_struct *ws)
156 struct ptlrpc_sec *sec;
158 sec_process_ctx_list();
161 * go through sec list do gc.
162 * FIXME here we iterate through the whole list each time which
163 * is not optimal. we perhaps want to use balanced binary tree
164 * to trace each sec as order of expiry time.
165 * another issue here is we wakeup as fixed interval instead of
166 * according to each sec's expiry time
168 mutex_lock(&sec_gc_mutex);
169 list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
171 * if someone is waiting to be deleted, let it
172 * proceed as soon as possible.
174 if (atomic_read(&sec_gc_wait_del)) {
175 CDEBUG(D_SEC, "deletion pending, start over\n");
176 mutex_unlock(&sec_gc_mutex);
182 mutex_unlock(&sec_gc_mutex);
184 /* check ctx list again before sleep */
185 sec_process_ctx_list();
186 schedule_delayed_work(&sec_gc_work, cfs_time_seconds(SEC_GC_INTERVAL));
189 int sptlrpc_gc_init(void)
191 schedule_delayed_work(&sec_gc_work, cfs_time_seconds(SEC_GC_INTERVAL));
195 void sptlrpc_gc_fini(void)
197 cancel_delayed_work_sync(&sec_gc_work);