X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fptlrpc%2Fgss%2Fgss_keyring.c;h=465e714211cc2f4508ddcf2e23b396efe2ab9355;hb=09803193a151902acc39720946b831b90655c4a8;hp=23a684ef0362a36d1e2555fd303dede122ce57d5;hpb=de3c3fdf2597ba110a5247e271470b5dd626df75;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/gss/gss_keyring.c b/lustre/ptlrpc/gss/gss_keyring.c index 23a684ef..465e714 100644 --- a/lustre/ptlrpc/gss/gss_keyring.c +++ b/lustre/ptlrpc/gss/gss_keyring.c @@ -1,23 +1,41 @@ /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * - * Copyright (C) 2007 Cluster File Systems, Inc. - * Author: Eric Mei + * GPL HEADER START * - * This file is part of Lustre, http://www.lustre.org. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * Lustre is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. * - * Lustre is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * along with Lustre; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Use is subject to license terms. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * lustre/ptlrpc/gss/gss_keyring.c + * + * Author: Eric Mei */ #ifndef EXPORT_SYMTAB @@ -59,10 +77,6 @@ static struct key_type gss_key_type; static int sec_install_rctx_kr(struct ptlrpc_sec *sec, struct ptlrpc_svc_ctx *svc_ctx); -#ifndef task_aux -#define task_aux(tsk) (tsk) -#endif - /* * the timeout is only for the case that upcall child process die abnormally. * in any other cases it should finally update kernel key. @@ -85,10 +99,10 @@ static int sec_install_rctx_kr(struct ptlrpc_sec *sec, tsk->comm, tsk->pid, tsk->uid, tsk->fsuid, \ tsk->parent->comm, tsk->parent->pid, \ tsk->parent->uid, tsk->parent->fsuid, \ - task_aux(tsk)->request_key_auth ? \ - task_aux(tsk)->request_key_auth->serial : 0, \ - task_aux(tsk)->thread_keyring ? \ - task_aux(tsk)->thread_keyring->serial : 0, \ + tsk->request_key_auth ? \ + tsk->request_key_auth->serial : 0, \ + tsk->thread_keyring ? \ + tsk->thread_keyring->serial : 0, \ tsk->signal->process_keyring ? \ tsk->signal->process_keyring->serial : 0, \ tsk->signal->session_keyring ? \ @@ -97,7 +111,7 @@ static int sec_install_rctx_kr(struct ptlrpc_sec *sec, tsk->user->uid_keyring->serial : 0, \ tsk->user->session_keyring ? \ tsk->user->session_keyring->serial : 0, \ - task_aux(tsk)->jit_keyring \ + tsk->jit_keyring \ ); \ } @@ -114,14 +128,14 @@ static int sec_install_rctx_kr(struct ptlrpc_sec *sec, static inline void keyring_upcall_lock(struct gss_sec_keyring *gsec_kr) { #ifdef HAVE_KEYRING_UPCALL_SERIALIZED - mutex_lock(&gsec_kr->gsk_uc_lock); + cfs_mutex_lock(&gsec_kr->gsk_uc_lock); #endif } static inline void keyring_upcall_unlock(struct gss_sec_keyring *gsec_kr) { #ifdef HAVE_KEYRING_UPCALL_SERIALIZED - mutex_unlock(&gsec_kr->gsk_uc_lock); + cfs_mutex_unlock(&gsec_kr->gsk_uc_lock); #endif } @@ -141,7 +155,6 @@ static void ctx_upcall_timeout_kr(unsigned long data) cli_ctx_expire(ctx); key_revoke_locked(key); - sptlrpc_cli_ctx_wakeup(ctx); } static @@ -153,7 +166,7 @@ void ctx_start_timer_kr(struct ptlrpc_cli_ctx *ctx, long timeout) LASSERT(timer); CDEBUG(D_SEC, "ctx %p: start timer %lds\n", ctx, timeout); - timeout = timeout * HZ + cfs_time_current(); + timeout = timeout * CFS_HZ + cfs_time_current(); init_timer(timer); timer->expires = timeout; @@ -211,8 +224,8 @@ struct ptlrpc_cli_ctx *ctx_create_kr(struct ptlrpc_sec *sec, } ctx->cc_expire = cfs_time_current_sec() + KEYRING_UPCALL_TIMEOUT; - clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags); - atomic_inc(&ctx->cc_refcount); /* for the caller */ + cfs_clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags); + cfs_atomic_inc(&ctx->cc_refcount); /* for the caller */ return ctx; } @@ -226,9 +239,9 @@ static void ctx_destroy_kr(struct ptlrpc_cli_ctx *ctx) /* at this time the association with key has been broken. */ LASSERT(sec); - LASSERT(atomic_read(&sec->ps_refcount) > 0); - LASSERT(atomic_read(&sec->ps_nctx) > 0); - LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0); + LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0); + LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0); + LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0); LASSERT(gctx_kr->gck_key == NULL); ctx_clear_timer_kr(ctx); @@ -239,7 +252,7 @@ static void ctx_destroy_kr(struct ptlrpc_cli_ctx *ctx) OBD_FREE_PTR(gctx_kr); - atomic_dec(&sec->ps_nctx); + cfs_atomic_dec(&sec->ps_nctx); sptlrpc_sec_put(sec); } @@ -248,16 +261,16 @@ static void ctx_release_kr(struct ptlrpc_cli_ctx *ctx, int sync) if (sync) { ctx_destroy_kr(ctx); } else { - atomic_inc(&ctx->cc_refcount); + cfs_atomic_inc(&ctx->cc_refcount); sptlrpc_gc_add_ctx(ctx); } } static void ctx_put_kr(struct ptlrpc_cli_ctx *ctx, int sync) { - LASSERT(atomic_read(&ctx->cc_refcount) > 0); + LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0); - if (atomic_dec_and_test(&ctx->cc_refcount)) + if (cfs_atomic_dec_and_test(&ctx->cc_refcount)) ctx_release_kr(ctx, sync); } @@ -275,16 +288,16 @@ static void ctx_put_kr(struct ptlrpc_cli_ctx *ctx, int sync) * - lock ctx -> unlist -> unlock ctx -> lock key -> unbind -> unlock key */ -static inline void spin_lock_if(spinlock_t *lock, int condition) +static inline void spin_lock_if(cfs_spinlock_t *lock, int condition) { if (condition) - spin_lock(lock); + cfs_spin_lock(lock); } -static inline void spin_unlock_if(spinlock_t *lock, int condition) +static inline void spin_unlock_if(cfs_spinlock_t *lock, int condition) { if (condition) - spin_unlock(lock); + cfs_spin_unlock(lock); } static void ctx_enlist_kr(struct ptlrpc_cli_ctx *ctx, int is_root, int locked) @@ -292,14 +305,14 @@ static void ctx_enlist_kr(struct ptlrpc_cli_ctx *ctx, int is_root, int locked) struct ptlrpc_sec *sec = ctx->cc_sec; struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec); - LASSERT(!test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)); - LASSERT(atomic_read(&ctx->cc_refcount) > 0); + LASSERT(!cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)); + LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0); spin_lock_if(&sec->ps_lock, !locked); - atomic_inc(&ctx->cc_refcount); - set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags); - hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist); + cfs_atomic_inc(&ctx->cc_refcount); + cfs_set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags); + cfs_hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist); if (is_root) gsec_kr->gsk_root_ctx = ctx; @@ -319,7 +332,7 @@ static int ctx_unlist_kr(struct ptlrpc_cli_ctx *ctx, int locked) struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec); /* if hashed bit has gone, leave the job to somebody who is doing it */ - if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0) + if (cfs_test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0) return 0; /* drop ref inside spin lock to prevent race with other operations */ @@ -327,8 +340,8 @@ static int ctx_unlist_kr(struct ptlrpc_cli_ctx *ctx, int locked) if (gsec_kr->gsk_root_ctx == ctx) gsec_kr->gsk_root_ctx = NULL; - hlist_del_init(&ctx->cc_cache); - atomic_dec(&ctx->cc_refcount); + cfs_hlist_del_init(&ctx->cc_cache); + cfs_atomic_dec(&ctx->cc_refcount); spin_unlock_if(&sec->ps_lock, !locked); @@ -341,14 +354,14 @@ static int ctx_unlist_kr(struct ptlrpc_cli_ctx *ctx, int locked) */ static void bind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx) { - LASSERT(atomic_read(&ctx->cc_refcount) > 0); + LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0); LASSERT(atomic_read(&key->usage) > 0); LASSERT(ctx2gctx_keyring(ctx)->gck_key == NULL); LASSERT(key->payload.data == NULL); /* at this time context may or may not in list. */ key_get(key); - atomic_inc(&ctx->cc_refcount); + cfs_atomic_inc(&ctx->cc_refcount); ctx2gctx_keyring(ctx)->gck_key = key; key->payload.data = ctx; } @@ -360,7 +373,7 @@ static void bind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx) static void unbind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx) { LASSERT(key->payload.data == ctx); - LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0); + LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0); /* must revoke the key, or others may treat it as newly created */ key_revoke_locked(key); @@ -431,14 +444,14 @@ static void kill_key_locked(struct key *key) /* * caller should hold one ref on contexts in freelist. */ -static void dispose_ctx_list_kr(struct hlist_head *freelist) +static void dispose_ctx_list_kr(cfs_hlist_head_t *freelist) { - struct hlist_node *pos, *next; + cfs_hlist_node_t *pos, *next; struct ptlrpc_cli_ctx *ctx; struct gss_cli_ctx *gctx; - hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) { - hlist_del_init(&ctx->cc_cache); + cfs_hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) { + cfs_hlist_del_init(&ctx->cc_cache); /* reverse ctx: update current seq to buddy svcctx if exist. * ideally this should be done at gss_cli_ctx_finalize(), but @@ -452,7 +465,7 @@ static void dispose_ctx_list_kr(struct hlist_head *freelist) if (!rawobj_empty(&gctx->gc_svc_handle) && sec_is_reverse(gctx->gc_base.cc_sec)) { gss_svc_upcall_update_sequence(&gctx->gc_svc_handle, - (__u32) atomic_read(&gctx->gc_seq)); + (__u32) cfs_atomic_read(&gctx->gc_seq)); } /* we need to wakeup waiting reqs here. the context might @@ -475,18 +488,19 @@ struct ptlrpc_cli_ctx * sec_lookup_root_ctx_kr(struct ptlrpc_sec *sec) struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec); struct ptlrpc_cli_ctx *ctx = NULL; - spin_lock(&sec->ps_lock); + cfs_spin_lock(&sec->ps_lock); ctx = gsec_kr->gsk_root_ctx; if (ctx == NULL && unlikely(sec_is_reverse(sec))) { - struct hlist_node *node; + cfs_hlist_node_t *node; struct ptlrpc_cli_ctx *tmp; /* reverse ctx, search root ctx in list, choose the one * with shortest expire time, which is most possibly have * an established peer ctx at client side. */ - hlist_for_each_entry(tmp, node, &gsec_kr->gsk_clist, cc_cache) { + cfs_hlist_for_each_entry(tmp, node, &gsec_kr->gsk_clist, + cc_cache) { if (ctx == NULL || ctx->cc_expire == 0 || ctx->cc_expire > tmp->cc_expire) { ctx = tmp; @@ -497,12 +511,12 @@ struct ptlrpc_cli_ctx * sec_lookup_root_ctx_kr(struct ptlrpc_sec *sec) } if (ctx) { - LASSERT(atomic_read(&ctx->cc_refcount) > 0); - LASSERT(!hlist_empty(&gsec_kr->gsk_clist)); - atomic_inc(&ctx->cc_refcount); + LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0); + LASSERT(!cfs_hlist_empty(&gsec_kr->gsk_clist)); + cfs_atomic_inc(&ctx->cc_refcount); } - spin_unlock(&sec->ps_lock); + cfs_spin_unlock(&sec->ps_lock); return ctx; } @@ -515,19 +529,19 @@ void rvs_sec_install_root_ctx_kr(struct ptlrpc_sec *sec, struct key *key) { struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec); - struct hlist_node *hnode; + cfs_hlist_node_t *hnode; struct ptlrpc_cli_ctx *ctx; cfs_time_t now; ENTRY; LASSERT(sec_is_reverse(sec)); - spin_lock(&sec->ps_lock); + cfs_spin_lock(&sec->ps_lock); now = cfs_time_current_sec(); /* set all existing ctxs short expiry */ - hlist_for_each_entry(ctx, hnode, &gsec_kr->gsk_clist, cc_cache) { + cfs_hlist_for_each_entry(ctx, hnode, &gsec_kr->gsk_clist, cc_cache) { if (ctx->cc_expire > now + RVS_CTX_EXPIRE_NICE) { ctx->cc_early_expire = 1; ctx->cc_expire = now + RVS_CTX_EXPIRE_NICE; @@ -543,7 +557,7 @@ void rvs_sec_install_root_ctx_kr(struct ptlrpc_sec *sec, if (key) bind_key_ctx(key, new_ctx); - spin_unlock(&sec->ps_lock); + cfs_spin_unlock(&sec->ps_lock); } static void construct_key_desc(void *buf, int bufsize, @@ -571,9 +585,9 @@ struct ptlrpc_sec * gss_sec_create_kr(struct obd_import *imp, CFS_INIT_HLIST_HEAD(&gsec_kr->gsk_clist); gsec_kr->gsk_root_ctx = NULL; - mutex_init(&gsec_kr->gsk_root_uc_lock); + cfs_mutex_init(&gsec_kr->gsk_root_uc_lock); #ifdef HAVE_KEYRING_UPCALL_SERIALIZED - mutex_init(&gsec_kr->gsk_uc_lock); + cfs_mutex_init(&gsec_kr->gsk_uc_lock); #endif if (gss_sec_create_common(&gsec_kr->gsk_base, &gss_policy_keyring, @@ -601,7 +615,7 @@ void gss_sec_destroy_kr(struct ptlrpc_sec *sec) CDEBUG(D_SEC, "destroy %s@%p\n", sec->ps_policy->sp_name, sec); - LASSERT(hlist_empty(&gsec_kr->gsk_clist)); + LASSERT(cfs_hlist_empty(&gsec_kr->gsk_clist)); LASSERT(gsec_kr->gsk_root_ctx == NULL); gss_sec_destroy_common(gsec); @@ -630,10 +644,10 @@ static void request_key_unlink(struct key *key) struct task_struct *tsk = current; struct key *ring; - switch (task_aux(tsk)->jit_keyring) { + switch (tsk->jit_keyring) { case KEY_REQKEY_DEFL_DEFAULT: case KEY_REQKEY_DEFL_THREAD_KEYRING: - ring = key_get(task_aux(tsk)->thread_keyring); + ring = key_get(tsk->thread_keyring); if (ring) break; case KEY_REQKEY_DEFL_PROCESS_KEYRING: @@ -699,7 +713,7 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec, * the root upcall lock, make sure nobody else populated new root * context after last check. */ if (is_root) { - mutex_lock(&gsec_kr->gsk_root_uc_lock); + cfs_mutex_lock(&gsec_kr->gsk_root_uc_lock); ctx = sec_lookup_root_ctx_kr(sec); if (ctx) @@ -737,7 +751,7 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec, co_flags, import_to_gss_svc(imp), imp->imp_connection->c_peer.nid, imp->imp_obd->obd_name); - CDEBUG(D_SEC, "requesting key for %s", desc); + CDEBUG(D_SEC, "requesting key for %s\n", desc); keyring_upcall_lock(gsec_kr); key = request_key(&gss_key_type, desc, coinfo); @@ -749,7 +763,7 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec, CERROR("failed request key: %ld\n", PTR_ERR(key)); goto out; } - CDEBUG(D_SEC, "obtained key %08x for %s", key->serial, desc); + CDEBUG(D_SEC, "obtained key %08x for %s\n", key->serial, desc); /* once payload.data was pointed to a ctx, it never changes until * we de-associate them; but parallel request_key() may return @@ -760,13 +774,13 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec, if (likely(key->payload.data != NULL)) { ctx = key->payload.data; - LASSERT(atomic_read(&ctx->cc_refcount) >= 1); + LASSERT(cfs_atomic_read(&ctx->cc_refcount) >= 1); LASSERT(ctx2gctx_keyring(ctx)->gck_key == key); LASSERT(atomic_read(&key->usage) >= 2); /* simply take a ref and return. it's upper layer's * responsibility to detect & replace dead ctx. */ - atomic_inc(&ctx->cc_refcount); + cfs_atomic_inc(&ctx->cc_refcount); } else { /* pre initialization with a cli_ctx. this can't be done in * key_instantiate() because we'v no enough information @@ -797,7 +811,7 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec, key_put(key); out: if (is_root) - mutex_unlock(&gsec_kr->gsk_root_uc_lock); + cfs_mutex_unlock(&gsec_kr->gsk_root_uc_lock); RETURN(ctx); } @@ -806,8 +820,8 @@ void gss_sec_release_ctx_kr(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx, int sync) { - LASSERT(atomic_read(&sec->ps_refcount) > 0); - LASSERT(atomic_read(&ctx->cc_refcount) == 0); + LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0); + LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0); ctx_release_kr(ctx, sync); } @@ -865,46 +879,46 @@ void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec, int grace, int force) { struct gss_sec_keyring *gsec_kr; - struct hlist_head freelist = CFS_HLIST_HEAD_INIT; - struct hlist_node *pos, *next; + cfs_hlist_head_t freelist = CFS_HLIST_HEAD_INIT; + cfs_hlist_node_t *pos, *next; struct ptlrpc_cli_ctx *ctx; ENTRY; gsec_kr = sec2gsec_keyring(sec); - spin_lock(&sec->ps_lock); - hlist_for_each_entry_safe(ctx, pos, next, - &gsec_kr->gsk_clist, cc_cache) { - LASSERT(atomic_read(&ctx->cc_refcount) > 0); + cfs_spin_lock(&sec->ps_lock); + cfs_hlist_for_each_entry_safe(ctx, pos, next, + &gsec_kr->gsk_clist, cc_cache) { + LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0); if (uid != -1 && uid != ctx->cc_vcred.vc_uid) continue; /* at this moment there's at least 2 base reference: * key association and in-list. */ - if (atomic_read(&ctx->cc_refcount) > 2) { + if (cfs_atomic_read(&ctx->cc_refcount) > 2) { if (!force) continue; CWARN("flush busy ctx %p(%u->%s, extra ref %d)\n", ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec), - atomic_read(&ctx->cc_refcount) - 2); + cfs_atomic_read(&ctx->cc_refcount) - 2); } - set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags); + cfs_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags); if (!grace) - clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags); + cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags); - atomic_inc(&ctx->cc_refcount); + cfs_atomic_inc(&ctx->cc_refcount); if (ctx_unlist_kr(ctx, 1)) { - hlist_add_head(&ctx->cc_cache, &freelist); + cfs_hlist_add_head(&ctx->cc_cache, &freelist); } else { - LASSERT(atomic_read(&ctx->cc_refcount) >= 2); - atomic_dec(&ctx->cc_refcount); + LASSERT(cfs_atomic_read(&ctx->cc_refcount) >= 2); + cfs_atomic_dec(&ctx->cc_refcount); } } - spin_unlock(&sec->ps_lock); + cfs_spin_unlock(&sec->ps_lock); dispose_ctx_list_kr(&freelist); EXIT; @@ -918,7 +932,8 @@ int gss_sec_flush_ctx_cache_kr(struct ptlrpc_sec *sec, ENTRY; CDEBUG(D_SEC, "sec %p(%d, nctx %d), uid %d, grace %d, force %d\n", - sec, atomic_read(&sec->ps_refcount), atomic_read(&sec->ps_nctx), + sec, cfs_atomic_read(&sec->ps_refcount), + cfs_atomic_read(&sec->ps_nctx), uid, grace, force); if (uid != -1 && uid != 0) @@ -933,29 +948,29 @@ static void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec) { struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec); - struct hlist_head freelist = CFS_HLIST_HEAD_INIT; - struct hlist_node *pos, *next; + cfs_hlist_head_t freelist = CFS_HLIST_HEAD_INIT; + cfs_hlist_node_t *pos, *next; struct ptlrpc_cli_ctx *ctx; ENTRY; CWARN("running gc\n"); - spin_lock(&sec->ps_lock); - hlist_for_each_entry_safe(ctx, pos, next, - &gsec_kr->gsk_clist, cc_cache) { - LASSERT(atomic_read(&ctx->cc_refcount) > 0); + cfs_spin_lock(&sec->ps_lock); + cfs_hlist_for_each_entry_safe(ctx, pos, next, + &gsec_kr->gsk_clist, cc_cache) { + LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0); - atomic_inc(&ctx->cc_refcount); + cfs_atomic_inc(&ctx->cc_refcount); if (cli_ctx_check_death(ctx) && ctx_unlist_kr(ctx, 1)) { - hlist_add_head(&ctx->cc_cache, &freelist); + cfs_hlist_add_head(&ctx->cc_cache, &freelist); CWARN("unhashed ctx %p\n", ctx); } else { - LASSERT(atomic_read(&ctx->cc_refcount) >= 2); - atomic_dec(&ctx->cc_refcount); + LASSERT(cfs_atomic_read(&ctx->cc_refcount) >= 2); + cfs_atomic_dec(&ctx->cc_refcount); } } - spin_unlock(&sec->ps_lock); + cfs_spin_unlock(&sec->ps_lock); dispose_ctx_list_kr(&freelist); EXIT; @@ -966,14 +981,14 @@ static int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq) { struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec); - struct hlist_node *pos, *next; + cfs_hlist_node_t *pos, *next; struct ptlrpc_cli_ctx *ctx; struct gss_cli_ctx *gctx; time_t now = cfs_time_current_sec(); ENTRY; - spin_lock(&sec->ps_lock); - hlist_for_each_entry_safe(ctx, pos, next, + cfs_spin_lock(&sec->ps_lock); + cfs_hlist_for_each_entry_safe(ctx, pos, next, &gsec_kr->gsk_clist, cc_cache) { struct key *key; char flags_str[40]; @@ -995,11 +1010,11 @@ int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq) "seq %d, win %u, key %08x(ref %d), " "hdl "LPX64":"LPX64", mech: %s\n", ctx, ctx->cc_vcred.vc_uid, - atomic_read(&ctx->cc_refcount), + cfs_atomic_read(&ctx->cc_refcount), ctx->cc_expire, ctx->cc_expire ? ctx->cc_expire - now : 0, flags_str, - atomic_read(&gctx->gc_seq), + cfs_atomic_read(&gctx->gc_seq), gctx->gc_win, key ? key->serial : 0, key ? atomic_read(&key->usage) : 0, @@ -1007,7 +1022,7 @@ int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq) gss_handle_to_u64(&gctx->gc_svc_handle), mech); } - spin_unlock(&sec->ps_lock); + cfs_spin_unlock(&sec->ps_lock); RETURN(0); } @@ -1026,7 +1041,7 @@ int gss_cli_ctx_refresh_kr(struct ptlrpc_cli_ctx *ctx) static int gss_cli_ctx_validate_kr(struct ptlrpc_cli_ctx *ctx) { - LASSERT(atomic_read(&ctx->cc_refcount) > 0); + LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0); LASSERT(ctx->cc_sec); if (cli_ctx_check_death(ctx)) { @@ -1042,7 +1057,7 @@ int gss_cli_ctx_validate_kr(struct ptlrpc_cli_ctx *ctx) static void gss_cli_ctx_die_kr(struct ptlrpc_cli_ctx *ctx, int grace) { - LASSERT(atomic_read(&ctx->cc_refcount) > 0); + LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0); LASSERT(ctx->cc_sec); cli_ctx_expire(ctx); @@ -1197,7 +1212,7 @@ int gss_kt_instantiate(struct key *key, const void *data, size_t datalen) ENTRY; if (data != NULL || datalen != 0) { - CERROR("invalid: data %p, len %d\n", data, datalen); + CERROR("invalid: data %p, len %lu\n", data, (long)datalen); RETURN(-EINVAL); } @@ -1218,7 +1233,9 @@ int gss_kt_instantiate(struct key *key, const void *data, size_t datalen) */ LASSERT(cfs_current()->signal->session_keyring); + cfs_lockdep_off(); rc = key_link(cfs_current()->signal->session_keyring, key); + cfs_lockdep_on(); if (unlikely(rc)) { CERROR("failed to link key %08x to keyring %08x: %d\n", key->serial, @@ -1240,21 +1257,22 @@ int gss_kt_update(struct key *key, const void *data, size_t datalen) struct ptlrpc_cli_ctx *ctx = key->payload.data; struct gss_cli_ctx *gctx; rawobj_t tmpobj = RAWOBJ_EMPTY; + __u32 datalen32 = (__u32) datalen; int rc; ENTRY; if (data == NULL || datalen == 0) { - CWARN("invalid: data %p, len %d\n", data, datalen); + CWARN("invalid: data %p, len %lu\n", data, (long)datalen); RETURN(-EINVAL); } - /* there's a race between userspace parent - child processes. if - * child finish negotiation too fast and call kt_update(), the ctx + /* if upcall finished negotiation too fast (mostly likely because + * of local error happened) and call kt_update(), the ctx * might be still NULL. but the key will finally be associate * with a context, or be revoked. if key status is fine, return * -EAGAIN to allow userspace sleep a while and call again. */ if (ctx == NULL) { - CWARN("race in userspace. key %p(%x) flags %lx\n", + CDEBUG(D_SEC, "update too soon: key %p(%x) flags %lx\n", key, key->serial, key->flags); rc = key_validate(key); @@ -1264,7 +1282,7 @@ int gss_kt_update(struct key *key, const void *data, size_t datalen) RETURN(rc); } - LASSERT(atomic_read(&ctx->cc_refcount) > 0); + LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0); LASSERT(ctx->cc_sec); ctx_clear_timer_kr(ctx); @@ -1272,14 +1290,13 @@ int gss_kt_update(struct key *key, const void *data, size_t datalen) /* don't proceed if already refreshed */ if (cli_ctx_is_refreshed(ctx)) { CWARN("ctx already done refresh\n"); - sptlrpc_cli_ctx_wakeup(ctx); RETURN(0); } sptlrpc_cli_ctx_get(ctx); gctx = ctx2gctx(ctx); - rc = buffer_extract_bytes(&data, &datalen, &gctx->gc_win, + rc = buffer_extract_bytes(&data, &datalen32, &gctx->gc_win, sizeof(gctx->gc_win)); if (rc) { CERROR("failed extract seq_win\n"); @@ -1289,14 +1306,14 @@ int gss_kt_update(struct key *key, const void *data, size_t datalen) if (gctx->gc_win == 0) { __u32 nego_rpc_err, nego_gss_err; - rc = buffer_extract_bytes(&data, &datalen, &nego_rpc_err, + rc = buffer_extract_bytes(&data, &datalen32, &nego_rpc_err, sizeof(nego_rpc_err)); if (rc) { CERROR("failed to extrace rpc rc\n"); goto out; } - rc = buffer_extract_bytes(&data, &datalen, &nego_gss_err, + rc = buffer_extract_bytes(&data, &datalen32, &nego_gss_err, sizeof(nego_gss_err)); if (rc) { CERROR("failed to extrace gss rc\n"); @@ -1309,13 +1326,13 @@ int gss_kt_update(struct key *key, const void *data, size_t datalen) rc = nego_rpc_err ? nego_rpc_err : -EACCES; } else { rc = rawobj_extract_local_alloc(&gctx->gc_handle, - (__u32 **) &data, &datalen); + (__u32 **) &data, &datalen32); if (rc) { CERROR("failed extract handle\n"); goto out; } - rc = rawobj_extract_local(&tmpobj, (__u32 **) &data, &datalen); + rc = rawobj_extract_local(&tmpobj, (__u32 **) &data,&datalen32); if (rc) { CERROR("failed extract mech\n"); goto out; @@ -1343,11 +1360,9 @@ out: cli_ctx_expire(ctx); if (rc != -ERESTART) - set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags); + cfs_set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags); } - sptlrpc_cli_ctx_wakeup(ctx); - /* let user space think it's a success */ sptlrpc_cli_ctx_put(ctx, 1); RETURN(0); @@ -1429,6 +1444,7 @@ static struct ptlrpc_sec_sops gss_sec_keyring_sops = { .authorize = gss_svc_authorize, .free_rs = gss_svc_free_rs, .free_ctx = gss_svc_free_ctx, + .prep_bulk = gss_svc_prep_bulk, .unwrap_bulk = gss_svc_unwrap_bulk, .wrap_bulk = gss_svc_wrap_bulk, .install_rctx = gss_svc_install_rctx_kr,