X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fptlrpc%2Fsec_gc.c;h=344711c9ed5bd16daa4f956991c6a2106d48c210;hb=853e132a41e6ee4ef7bac589823b30fafb228db0;hp=296b8ebefc6c152e14aba6b681189973e84ea0ac;hpb=9b73c02192b3e16c322402e8c080e660ba2c457c;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/sec_gc.c b/lustre/ptlrpc/sec_gc.c index 296b8eb..344711c 100644 --- a/lustre/ptlrpc/sec_gc.c +++ b/lustre/ptlrpc/sec_gc.c @@ -1,23 +1,41 @@ /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * - * Copyright (C) 2007 Cluster File Systems, Inc. - * Author: Eric Mei + * GPL HEADER START * - * This file is part of Lustre, http://www.lustre.org. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * Lustre is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. * - * Lustre is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * along with Lustre; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Use is subject to license terms. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * lustre/ptlrpc/sec_gc.c + * + * Author: Eric Mei */ #ifndef EXPORT_SYMTAB @@ -27,6 +45,8 @@ #ifndef __KERNEL__ #include +#else +#include #endif #include @@ -38,28 +58,28 @@ #ifdef __KERNEL__ -static DECLARE_MUTEX(sec_gc_mutex); +static cfs_mutex_t sec_gc_mutex; static CFS_LIST_HEAD(sec_gc_list); -static spinlock_t sec_gc_list_lock = SPIN_LOCK_UNLOCKED; +static cfs_spinlock_t sec_gc_list_lock; static CFS_LIST_HEAD(sec_gc_ctx_list); -static spinlock_t sec_gc_ctx_list_lock = SPIN_LOCK_UNLOCKED; +static cfs_spinlock_t sec_gc_ctx_list_lock; static struct ptlrpc_thread sec_gc_thread; -static atomic_t sec_gc_wait_del = ATOMIC_INIT(0); +static cfs_atomic_t sec_gc_wait_del = CFS_ATOMIC_INIT(0); void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec) { - if (!list_empty(&sec->ps_gc_list)) { - CERROR("sec %p(%s) already in gc list\n", - sec, sec->ps_policy->sp_name); - return; - } + LASSERT(sec->ps_policy->sp_cops->gc_ctx); + LASSERT(sec->ps_gc_interval > 0); + LASSERT(cfs_list_empty(&sec->ps_gc_list)); + + sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval; - spin_lock(&sec_gc_list_lock); - list_add_tail(&sec_gc_list, &sec->ps_gc_list); - spin_unlock(&sec_gc_list_lock); + cfs_spin_lock(&sec_gc_list_lock); + cfs_list_add_tail(&sec_gc_list, &sec->ps_gc_list); + cfs_spin_unlock(&sec_gc_list_lock); CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name); } @@ -67,20 +87,23 @@ EXPORT_SYMBOL(sptlrpc_gc_add_sec); void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec) { - if (list_empty(&sec->ps_gc_list)) + if (cfs_list_empty(&sec->ps_gc_list)) return; - might_sleep(); + cfs_might_sleep(); - spin_lock(&sec_gc_list_lock); - list_del_init(&sec->ps_gc_list); - spin_unlock(&sec_gc_list_lock); + /* signal before list_del to make iteration in gc thread safe */ + cfs_atomic_inc(&sec_gc_wait_del); + + cfs_spin_lock(&sec_gc_list_lock); + cfs_list_del_init(&sec->ps_gc_list); + cfs_spin_unlock(&sec_gc_list_lock); /* barrier */ - atomic_inc(&sec_gc_wait_del); - mutex_down(&sec_gc_mutex); - mutex_up(&sec_gc_mutex); - atomic_dec(&sec_gc_wait_del); + cfs_mutex_lock(&sec_gc_mutex); + cfs_mutex_unlock(&sec_gc_mutex); + + cfs_atomic_dec(&sec_gc_wait_del); CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name); } @@ -88,13 +111,13 @@ EXPORT_SYMBOL(sptlrpc_gc_del_sec); void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx) { - LASSERT(list_empty(&ctx->cc_gc_chain)); + LASSERT(cfs_list_empty(&ctx->cc_gc_chain)); CDEBUG(D_SEC, "hand over ctx %p(%u->%s)\n", ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec)); - spin_lock(&sec_gc_ctx_list_lock); - list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list); - spin_unlock(&sec_gc_ctx_list_lock); + cfs_spin_lock(&sec_gc_ctx_list_lock); + cfs_list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list); + cfs_spin_unlock(&sec_gc_ctx_list_lock); sec_gc_thread.t_flags |= SVC_SIGNAL; cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq); @@ -105,28 +128,29 @@ static void sec_process_ctx_list(void) { struct ptlrpc_cli_ctx *ctx; -again: - spin_lock(&sec_gc_ctx_list_lock); - if (!list_empty(&sec_gc_ctx_list)) { - ctx = list_entry(sec_gc_ctx_list.next, - struct ptlrpc_cli_ctx, cc_gc_chain); - list_del_init(&ctx->cc_gc_chain); - spin_unlock(&sec_gc_ctx_list_lock); + cfs_spin_lock(&sec_gc_ctx_list_lock); + + while (!cfs_list_empty(&sec_gc_ctx_list)) { + ctx = cfs_list_entry(sec_gc_ctx_list.next, + struct ptlrpc_cli_ctx, cc_gc_chain); + cfs_list_del_init(&ctx->cc_gc_chain); + cfs_spin_unlock(&sec_gc_ctx_list_lock); LASSERT(ctx->cc_sec); - LASSERT(atomic_read(&ctx->cc_refcount) == 1); + LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 1); CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n", ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec)); sptlrpc_cli_ctx_put(ctx, 1); - goto again; + cfs_spin_lock(&sec_gc_ctx_list_lock); } - spin_unlock(&sec_gc_ctx_list_lock); + + cfs_spin_unlock(&sec_gc_ctx_list_lock); } static void sec_do_gc(struct ptlrpc_sec *sec) { - cfs_time_t now = cfs_time_current_sec(); + LASSERT(sec->ps_policy->sp_cops->gc_ctx); if (unlikely(sec->ps_gc_next == 0)) { CWARN("sec %p(%s) has 0 gc time\n", @@ -134,19 +158,13 @@ static void sec_do_gc(struct ptlrpc_sec *sec) return; } - if (unlikely(sec->ps_policy->sp_cops->gc_ctx == NULL)) { - CWARN("sec %p(%s) is not prepared for gc\n", - sec, sec->ps_policy->sp_name); - return; - } - CDEBUG(D_SEC, "check on sec %p(%s)\n", sec, sec->ps_policy->sp_name); - if (time_after(sec->ps_gc_next, now)) + if (cfs_time_after(sec->ps_gc_next, cfs_time_current_sec())) return; sec->ps_policy->sp_cops->gc_ctx(sec); - sec->ps_gc_next = now + sec->ps_gc_interval; + sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval; } static int sec_gc_main(void *arg) @@ -154,35 +172,42 @@ static int sec_gc_main(void *arg) struct ptlrpc_thread *thread = (struct ptlrpc_thread *) arg; struct l_wait_info lwi; - cfs_daemonize("sptlrpc_ctx_gc"); + cfs_daemonize_ctxt("sptlrpc_gc"); /* Record that the thread is running */ thread->t_flags = SVC_RUNNING; cfs_waitq_signal(&thread->t_ctl_waitq); while (1) { - struct ptlrpc_sec *sec, *next; + struct ptlrpc_sec *sec; thread->t_flags &= ~SVC_SIGNAL; sec_process_ctx_list(); again: - mutex_down(&sec_gc_mutex); - list_for_each_entry_safe(sec, next, &sec_gc_list, ps_gc_list) { - /* - * if someone is waiting to be deleted, let it - * proceed as soon as possible. - */ - if (atomic_read(&sec_gc_wait_del)) { - CWARN("deletion pending, retry\n"); - mutex_up(&sec_gc_mutex); + /* go through sec list do gc. + * FIXME here we iterate through the whole list each time which + * is not optimal. we perhaps want to use balanced binary tree + * to trace each sec as order of expiry time. + * another issue here is we wakeup as fixed interval instead of + * according to each sec's expiry time */ + cfs_mutex_lock(&sec_gc_mutex); + cfs_list_for_each_entry(sec, &sec_gc_list, ps_gc_list) { + /* if someone is waiting to be deleted, let it + * proceed as soon as possible. */ + if (cfs_atomic_read(&sec_gc_wait_del)) { + CWARN("deletion pending, start over\n"); + cfs_mutex_unlock(&sec_gc_mutex); goto again; } sec_do_gc(sec); } - mutex_up(&sec_gc_mutex); + cfs_mutex_unlock(&sec_gc_mutex); - lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * HZ, NULL, NULL); + /* check ctx list again before sleep */ + sec_process_ctx_list(); + + lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * CFS_HZ, NULL, NULL); l_wait_event(thread->t_ctl_waitq, thread->t_flags & (SVC_STOPPING | SVC_SIGNAL), &lwi); @@ -198,11 +223,15 @@ again: return 0; } -int sptlrpc_gc_start_thread(void) +int sptlrpc_gc_init(void) { struct l_wait_info lwi = { 0 }; int rc; + cfs_mutex_init(&sec_gc_mutex); + cfs_spin_lock_init(&sec_gc_list_lock); + cfs_spin_lock_init(&sec_gc_ctx_list_lock); + /* initialize thread control */ memset(&sec_gc_thread, 0, sizeof(sec_gc_thread)); cfs_waitq_init(&sec_gc_thread.t_ctl_waitq); @@ -219,7 +248,7 @@ int sptlrpc_gc_start_thread(void) return 0; } -void sptlrpc_gc_stop_thread(void) +void sptlrpc_gc_fini(void) { struct l_wait_info lwi = { 0 }; @@ -238,11 +267,11 @@ void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec) void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec) { } -int sptlrpc_gc_start_thread(void) +int sptlrpc_gc_init(void) { return 0; } -void sptlrpc_gc_stop_thread(void) +void sptlrpc_gc_fini(void) { }