X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fptlrpc%2Fgss%2Fgss_keyring.c;h=204cf7cb989acb00a32120fb98b5212c07259a1b;hp=c90ff98e3b5d5c3752e22a94823775058a875898;hb=97301a491d46cf2cf829185b52b8690287ab7ed6;hpb=e3a7c58aebafce40323db54bf6056029e5af4a70;ds=sidebyside diff --git a/lustre/ptlrpc/gss/gss_keyring.c b/lustre/ptlrpc/gss/gss_keyring.c index c90ff98..204cf7c 100644 --- a/lustre/ptlrpc/gss/gss_keyring.c +++ b/lustre/ptlrpc/gss/gss_keyring.c @@ -15,17 +15,15 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ /* * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2012, 2014, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -36,11 +34,7 @@ * Author: Eric Mei */ -#ifndef EXPORT_SYMTAB -# define EXPORT_SYMTAB -#endif #define DEBUG_SUBSYSTEM S_SEC -#ifdef __KERNEL__ #include #include #include @@ -49,16 +43,15 @@ #include #include #include +#include #include #include -#else -#include -#endif +#include #include #include #include -#include +#include #include #include #include @@ -67,6 +60,10 @@ #include "gss_internal.h" #include "gss_api.h" +#ifdef HAVE_GET_REQUEST_KEY_AUTH +#include +#endif + static struct ptlrpc_sec_policy gss_policy_keyring; static struct ptlrpc_ctx_ops gss_keyring_ctxops; static struct key_type gss_key_type; @@ -77,7 +74,7 @@ static int sec_install_rctx_kr(struct ptlrpc_sec *sec, /* * the timeout is only for the case that upcall child process die abnormally. * in any other cases it should finally update kernel key. - * + * * FIXME we'd better to incorporate the client & server side upcall timeouts * into the framework of Adaptive Timeouts, but we need to figure out how to * make sure that kernel knows the upcall processes is in-progress or died @@ -89,50 +86,17 @@ static int sec_install_rctx_kr(struct ptlrpc_sec *sec, * internal helpers * ****************************************/ -#define DUMP_PROCESS_KEYRINGS(tsk) \ -{ \ - CWARN("DUMP PK: %s[%u,%u/%u](<-%s[%u,%u/%u]): " \ - "a %d, t %d, p %d, s %d, u %d, us %d, df %d\n", \ - tsk->comm, tsk->pid, tsk->uid, tsk->fsuid, \ - tsk->parent->comm, tsk->parent->pid, \ - tsk->parent->uid, tsk->parent->fsuid, \ - tsk->request_key_auth ? \ - tsk->request_key_auth->serial : 0, \ - tsk->thread_keyring ? \ - tsk->thread_keyring->serial : 0, \ - tsk->signal->process_keyring ? \ - tsk->signal->process_keyring->serial : 0, \ - tsk->signal->session_keyring ? \ - tsk->signal->session_keyring->serial : 0, \ - tsk->user->uid_keyring ? \ - tsk->user->uid_keyring->serial : 0, \ - tsk->user->session_keyring ? \ - tsk->user->session_keyring->serial : 0, \ - tsk->jit_keyring \ - ); \ -} - -#define DUMP_KEY(key) \ -{ \ - CWARN("DUMP KEY: %p(%d) ref %d u%u/g%u desc %s\n", \ - key, key->serial, atomic_read(&key->usage), \ - key->uid, key->gid, \ - key->description ? key->description : "n/a" \ - ); \ -} - - static inline void keyring_upcall_lock(struct gss_sec_keyring *gsec_kr) { #ifdef HAVE_KEYRING_UPCALL_SERIALIZED - cfs_mutex_lock(&gsec_kr->gsk_uc_lock); + mutex_lock(&gsec_kr->gsk_uc_lock); #endif } static inline void keyring_upcall_unlock(struct gss_sec_keyring *gsec_kr) { #ifdef HAVE_KEYRING_UPCALL_SERIALIZED - cfs_mutex_unlock(&gsec_kr->gsk_uc_lock); + mutex_unlock(&gsec_kr->gsk_uc_lock); #endif } @@ -141,10 +105,12 @@ static inline void key_revoke_locked(struct key *key) set_bit(KEY_FLAG_REVOKED, &key->flags); } -static void ctx_upcall_timeout_kr(unsigned long data) +static void ctx_upcall_timeout_kr(cfs_timer_cb_arg_t data) { - struct ptlrpc_cli_ctx *ctx = (struct ptlrpc_cli_ctx *) data; - struct key *key = ctx2gctx_keyring(ctx)->gck_key; + struct gss_cli_ctx_keyring *gctx_kr = cfs_from_timer(gctx_kr, + data, gck_timer); + struct ptlrpc_cli_ctx *ctx = &(gctx_kr->gck_base.gc_base); + struct key *key = gctx_kr->gck_key; CWARN("ctx %p, key %p\n", ctx, key); @@ -154,23 +120,19 @@ static void ctx_upcall_timeout_kr(unsigned long data) key_revoke_locked(key); } -static -void ctx_start_timer_kr(struct ptlrpc_cli_ctx *ctx, long timeout) +static void ctx_start_timer_kr(struct ptlrpc_cli_ctx *ctx, time64_t timeout) { - struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx); - struct timer_list *timer = gctx_kr->gck_timer; + struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx); + struct timer_list *timer = &gctx_kr->gck_timer; - LASSERT(timer); + LASSERT(timer); - CDEBUG(D_SEC, "ctx %p: start timer %lds\n", ctx, timeout); - timeout = timeout * CFS_HZ + cfs_time_current(); + CDEBUG(D_SEC, "ctx %p: start timer %llds\n", ctx, timeout); - init_timer(timer); - timer->expires = timeout; - timer->data = (unsigned long ) ctx; - timer->function = ctx_upcall_timeout_kr; - - add_timer(timer); + cfs_timer_setup(timer, ctx_upcall_timeout_kr, + (unsigned long)gctx_kr, 0); + timer->expires = cfs_time_seconds(timeout) + jiffies; + add_timer(timer); } /* @@ -180,95 +142,81 @@ static void ctx_clear_timer_kr(struct ptlrpc_cli_ctx *ctx) { struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx); - struct timer_list *timer = gctx_kr->gck_timer; - - if (timer == NULL) - return; + struct timer_list *timer = &gctx_kr->gck_timer; CDEBUG(D_SEC, "ctx %p, key %p\n", ctx, gctx_kr->gck_key); - gctx_kr->gck_timer = NULL; - del_singleshot_timer_sync(timer); - - OBD_FREE_PTR(timer); } static struct ptlrpc_cli_ctx *ctx_create_kr(struct ptlrpc_sec *sec, struct vfs_cred *vcred) { - struct ptlrpc_cli_ctx *ctx; - struct gss_cli_ctx_keyring *gctx_kr; + struct ptlrpc_cli_ctx *ctx; + struct gss_cli_ctx_keyring *gctx_kr; - OBD_ALLOC_PTR(gctx_kr); - if (gctx_kr == NULL) - return NULL; + OBD_ALLOC_PTR(gctx_kr); + if (gctx_kr == NULL) + return NULL; - OBD_ALLOC_PTR(gctx_kr->gck_timer); - if (gctx_kr->gck_timer == NULL) { - OBD_FREE_PTR(gctx_kr); - return NULL; - } - init_timer(gctx_kr->gck_timer); + cfs_timer_setup(&gctx_kr->gck_timer, NULL, 0, 0); - ctx = &gctx_kr->gck_base.gc_base; + ctx = &gctx_kr->gck_base.gc_base; - if (gss_cli_ctx_init_common(sec, ctx, &gss_keyring_ctxops, vcred)) { - OBD_FREE_PTR(gctx_kr->gck_timer); - OBD_FREE_PTR(gctx_kr); - return NULL; - } + if (gss_cli_ctx_init_common(sec, ctx, &gss_keyring_ctxops, vcred)) { + OBD_FREE_PTR(gctx_kr); + return NULL; + } - ctx->cc_expire = cfs_time_current_sec() + KEYRING_UPCALL_TIMEOUT; - cfs_clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags); - cfs_atomic_inc(&ctx->cc_refcount); /* for the caller */ + ctx->cc_expire = ktime_get_real_seconds() + KEYRING_UPCALL_TIMEOUT; + clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags); + atomic_inc(&ctx->cc_refcount); /* for the caller */ - return ctx; + return ctx; } static void ctx_destroy_kr(struct ptlrpc_cli_ctx *ctx) { - struct ptlrpc_sec *sec = ctx->cc_sec; - struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx); + struct ptlrpc_sec *sec = ctx->cc_sec; + struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx); - CDEBUG(D_SEC, "destroying ctx %p\n", ctx); + CDEBUG(D_SEC, "destroying ctx %p\n", ctx); /* at this time the association with key has been broken. */ LASSERT(sec); - LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0); - LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0); - LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0); + LASSERT(atomic_read(&sec->ps_refcount) > 0); + LASSERT(atomic_read(&sec->ps_nctx) > 0); + LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0); LASSERT(gctx_kr->gck_key == NULL); - ctx_clear_timer_kr(ctx); - LASSERT(gctx_kr->gck_timer == NULL); + ctx_clear_timer_kr(ctx); - if (gss_cli_ctx_fini_common(sec, ctx)) - return; + if (gss_cli_ctx_fini_common(sec, ctx)) + return; - OBD_FREE_PTR(gctx_kr); + OBD_FREE_PTR(gctx_kr); - cfs_atomic_dec(&sec->ps_nctx); - sptlrpc_sec_put(sec); + atomic_dec(&sec->ps_nctx); + sptlrpc_sec_put(sec); } static void ctx_release_kr(struct ptlrpc_cli_ctx *ctx, int sync) { - if (sync) { - ctx_destroy_kr(ctx); - } else { - cfs_atomic_inc(&ctx->cc_refcount); - sptlrpc_gc_add_ctx(ctx); - } + if (sync) { + ctx_destroy_kr(ctx); + } else { + atomic_inc(&ctx->cc_refcount); + sptlrpc_gc_add_ctx(ctx); + } } static void ctx_put_kr(struct ptlrpc_cli_ctx *ctx, int sync) { - LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0); + LASSERT(atomic_read(&ctx->cc_refcount) > 0); - if (cfs_atomic_dec_and_test(&ctx->cc_refcount)) - ctx_release_kr(ctx, sync); + if (atomic_dec_and_test(&ctx->cc_refcount)) + ctx_release_kr(ctx, sync); } /* @@ -285,35 +233,35 @@ static void ctx_put_kr(struct ptlrpc_cli_ctx *ctx, int sync) * - lock ctx -> unlist -> unlock ctx -> lock key -> unbind -> unlock key */ -static inline void spin_lock_if(cfs_spinlock_t *lock, int condition) +static inline void spin_lock_if(spinlock_t *lock, int condition) { - if (condition) - cfs_spin_lock(lock); + if (condition) + spin_lock(lock); } -static inline void spin_unlock_if(cfs_spinlock_t *lock, int condition) +static inline void spin_unlock_if(spinlock_t *lock, int condition) { - if (condition) - cfs_spin_unlock(lock); + if (condition) + spin_unlock(lock); } static void ctx_enlist_kr(struct ptlrpc_cli_ctx *ctx, int is_root, int locked) { - struct ptlrpc_sec *sec = ctx->cc_sec; - struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec); + struct ptlrpc_sec *sec = ctx->cc_sec; + struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec); - LASSERT(!cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)); - LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0); + LASSERT(!test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)); + LASSERT(atomic_read(&ctx->cc_refcount) > 0); - spin_lock_if(&sec->ps_lock, !locked); + spin_lock_if(&sec->ps_lock, !locked); - cfs_atomic_inc(&ctx->cc_refcount); - cfs_set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags); - cfs_hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist); - if (is_root) - gsec_kr->gsk_root_ctx = ctx; + atomic_inc(&ctx->cc_refcount); + set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags); + hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist); + if (is_root) + gsec_kr->gsk_root_ctx = ctx; - spin_unlock_if(&sec->ps_lock, !locked); + spin_unlock_if(&sec->ps_lock, !locked); } /* @@ -325,24 +273,61 @@ static void ctx_enlist_kr(struct ptlrpc_cli_ctx *ctx, int is_root, int locked) */ static int ctx_unlist_kr(struct ptlrpc_cli_ctx *ctx, int locked) { - struct ptlrpc_sec *sec = ctx->cc_sec; - struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec); + struct ptlrpc_sec *sec = ctx->cc_sec; + struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec); - /* if hashed bit has gone, leave the job to somebody who is doing it */ - if (cfs_test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0) - return 0; + /* if hashed bit has gone, leave the job to somebody who is doing it */ + if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0) + return 0; - /* drop ref inside spin lock to prevent race with other operations */ - spin_lock_if(&sec->ps_lock, !locked); + /* drop ref inside spin lock to prevent race with other operations */ + spin_lock_if(&sec->ps_lock, !locked); - if (gsec_kr->gsk_root_ctx == ctx) - gsec_kr->gsk_root_ctx = NULL; - cfs_hlist_del_init(&ctx->cc_cache); - cfs_atomic_dec(&ctx->cc_refcount); + if (gsec_kr->gsk_root_ctx == ctx) + gsec_kr->gsk_root_ctx = NULL; + hlist_del_init(&ctx->cc_cache); + atomic_dec(&ctx->cc_refcount); - spin_unlock_if(&sec->ps_lock, !locked); + spin_unlock_if(&sec->ps_lock, !locked); - return 1; + return 1; +} + +/* + * Get specific payload. Newer kernels support 4 slots. + */ +static void * +key_get_payload(struct key *key, unsigned int index) +{ + void *key_ptr = NULL; + +#ifdef HAVE_KEY_PAYLOAD_DATA_ARRAY + key_ptr = key->payload.data[index]; +#else + if (!index) + key_ptr = key->payload.data; +#endif + return key_ptr; +} + +/* + * Set specific payload. Newer kernels support 4 slots. + */ +static int key_set_payload(struct key *key, unsigned int index, + struct ptlrpc_cli_ctx *ctx) +{ + int rc = -EINVAL; + +#ifdef HAVE_KEY_PAYLOAD_DATA_ARRAY + if (index < 4) { + key->payload.data[index] = ctx; +#else + if (!index) { + key->payload.data = ctx; +#endif + rc = 0; + } + return rc; } /* @@ -351,16 +336,16 @@ static int ctx_unlist_kr(struct ptlrpc_cli_ctx *ctx, int locked) */ static void bind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx) { - LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0); - LASSERT(atomic_read(&key->usage) > 0); - LASSERT(ctx2gctx_keyring(ctx)->gck_key == NULL); - LASSERT(key->payload.data == NULL); - - /* at this time context may or may not in list. */ - key_get(key); - cfs_atomic_inc(&ctx->cc_refcount); - ctx2gctx_keyring(ctx)->gck_key = key; - key->payload.data = ctx; + LASSERT(atomic_read(&ctx->cc_refcount) > 0); + LASSERT(ll_read_key_usage(key) > 0); + LASSERT(ctx2gctx_keyring(ctx)->gck_key == NULL); + LASSERT(!key_get_payload(key, 0)); + + /* at this time context may or may not in list. */ + key_get(key); + atomic_inc(&ctx->cc_refcount); + ctx2gctx_keyring(ctx)->gck_key = key; + LASSERT(!key_set_payload(key, 0, ctx)); } /* @@ -369,13 +354,13 @@ static void bind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx) */ static void unbind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx) { - LASSERT(key->payload.data == ctx); - LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0); + LASSERT(key_get_payload(key, 0) == ctx); + LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0); /* must revoke the key, or others may treat it as newly created */ key_revoke_locked(key); - key->payload.data = NULL; + key_set_payload(key, 0, NULL); ctx2gctx_keyring(ctx)->gck_key = NULL; /* once ctx get split from key, the timer is meaningless */ @@ -395,7 +380,7 @@ static void unbind_ctx_kr(struct ptlrpc_cli_ctx *ctx) struct key *key = ctx2gctx_keyring(ctx)->gck_key; if (key) { - LASSERT(key->payload.data == ctx); + LASSERT(key_get_payload(key, 0) == ctx); key_get(key); down_write(&key->sem); @@ -411,7 +396,7 @@ static void unbind_ctx_kr(struct ptlrpc_cli_ctx *ctx) */ static void unbind_key_locked(struct key *key) { - struct ptlrpc_cli_ctx *ctx = key->payload.data; + struct ptlrpc_cli_ctx *ctx = key_get_payload(key, 0); if (ctx) unbind_key_ctx(key, ctx); @@ -432,7 +417,7 @@ static void kill_ctx_kr(struct ptlrpc_cli_ctx *ctx) */ static void kill_key_locked(struct key *key) { - struct ptlrpc_cli_ctx *ctx = key->payload.data; + struct ptlrpc_cli_ctx *ctx = key_get_payload(key, 0); if (ctx && ctx_unlist_kr(ctx, 0)) unbind_key_locked(key); @@ -441,38 +426,38 @@ static void kill_key_locked(struct key *key) /* * caller should hold one ref on contexts in freelist. */ -static void dispose_ctx_list_kr(cfs_hlist_head_t *freelist) +static void dispose_ctx_list_kr(struct hlist_head *freelist) { - cfs_hlist_node_t *pos, *next; - struct ptlrpc_cli_ctx *ctx; - struct gss_cli_ctx *gctx; - - cfs_hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) { - cfs_hlist_del_init(&ctx->cc_cache); - - /* reverse ctx: update current seq to buddy svcctx if exist. - * ideally this should be done at gss_cli_ctx_finalize(), but - * the ctx destroy could be delayed by: - * 1) ctx still has reference; - * 2) ctx destroy is asynchronous; - * and reverse import call inval_all_ctx() require this be done - *_immediately_ otherwise newly created reverse ctx might copy - * the very old sequence number from svcctx. */ - gctx = ctx2gctx(ctx); - if (!rawobj_empty(&gctx->gc_svc_handle) && - sec_is_reverse(gctx->gc_base.cc_sec)) { - gss_svc_upcall_update_sequence(&gctx->gc_svc_handle, - (__u32) cfs_atomic_read(&gctx->gc_seq)); - } - - /* we need to wakeup waiting reqs here. the context might - * be forced released before upcall finished, then the - * late-arrived downcall can't find the ctx even. */ - sptlrpc_cli_ctx_wakeup(ctx); - - unbind_ctx_kr(ctx); - ctx_put_kr(ctx, 0); - } + struct hlist_node __maybe_unused *pos, *next; + struct ptlrpc_cli_ctx *ctx; + struct gss_cli_ctx *gctx; + + cfs_hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) { + hlist_del_init(&ctx->cc_cache); + + /* reverse ctx: update current seq to buddy svcctx if exist. + * ideally this should be done at gss_cli_ctx_finalize(), but + * the ctx destroy could be delayed by: + * 1) ctx still has reference; + * 2) ctx destroy is asynchronous; + * and reverse import call inval_all_ctx() require this be done + * _immediately_ otherwise newly created reverse ctx might copy + * the very old sequence number from svcctx. */ + gctx = ctx2gctx(ctx); + if (!rawobj_empty(&gctx->gc_svc_handle) && + sec_is_reverse(gctx->gc_base.cc_sec)) { + gss_svc_upcall_update_sequence(&gctx->gc_svc_handle, + (__u32) atomic_read(&gctx->gc_seq)); + } + + /* we need to wakeup waiting reqs here. the context might + * be forced released before upcall finished, then the + * late-arrived downcall can't find the ctx even. */ + sptlrpc_cli_ctx_wakeup(ctx); + + unbind_ctx_kr(ctx); + ctx_put_kr(ctx, 0); + } } /* @@ -482,16 +467,16 @@ static void dispose_ctx_list_kr(cfs_hlist_head_t *freelist) static struct ptlrpc_cli_ctx * sec_lookup_root_ctx_kr(struct ptlrpc_sec *sec) { - struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec); - struct ptlrpc_cli_ctx *ctx = NULL; + struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec); + struct ptlrpc_cli_ctx *ctx = NULL; - cfs_spin_lock(&sec->ps_lock); + spin_lock(&sec->ps_lock); ctx = gsec_kr->gsk_root_ctx; if (ctx == NULL && unlikely(sec_is_reverse(sec))) { - cfs_hlist_node_t *node; - struct ptlrpc_cli_ctx *tmp; + struct hlist_node __maybe_unused *node; + struct ptlrpc_cli_ctx *tmp; /* reverse ctx, search root ctx in list, choose the one * with shortest expire time, which is most possibly have @@ -507,15 +492,15 @@ struct ptlrpc_cli_ctx * sec_lookup_root_ctx_kr(struct ptlrpc_sec *sec) } } - if (ctx) { - LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0); - LASSERT(!cfs_hlist_empty(&gsec_kr->gsk_clist)); - cfs_atomic_inc(&ctx->cc_refcount); - } + if (ctx) { + LASSERT(atomic_read(&ctx->cc_refcount) > 0); + LASSERT(!hlist_empty(&gsec_kr->gsk_clist)); + atomic_inc(&ctx->cc_refcount); + } - cfs_spin_unlock(&sec->ps_lock); + spin_unlock(&sec->ps_lock); - return ctx; + return ctx; } #define RVS_CTX_EXPIRE_NICE (10) @@ -525,17 +510,17 @@ void rvs_sec_install_root_ctx_kr(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *new_ctx, struct key *key) { - struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec); - cfs_hlist_node_t *hnode; - struct ptlrpc_cli_ctx *ctx; - cfs_time_t now; - ENTRY; + struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec); + struct hlist_node __maybe_unused *hnode; + struct ptlrpc_cli_ctx *ctx; + time64_t now; - LASSERT(sec_is_reverse(sec)); + ENTRY; + LASSERT(sec_is_reverse(sec)); - cfs_spin_lock(&sec->ps_lock); + spin_lock(&sec->ps_lock); - now = cfs_time_current_sec(); + now = ktime_get_real_seconds(); /* set all existing ctxs short expiry */ cfs_hlist_for_each_entry(ctx, hnode, &gsec_kr->gsk_clist, cc_cache) { @@ -554,7 +539,7 @@ void rvs_sec_install_root_ctx_kr(struct ptlrpc_sec *sec, if (key) bind_key_ctx(key, new_ctx); - cfs_spin_unlock(&sec->ps_lock); + spin_unlock(&sec->ps_lock); } static void construct_key_desc(void *buf, int bufsize, @@ -580,11 +565,11 @@ struct ptlrpc_sec * gss_sec_create_kr(struct obd_import *imp, if (gsec_kr == NULL) RETURN(NULL); - CFS_INIT_HLIST_HEAD(&gsec_kr->gsk_clist); + INIT_HLIST_HEAD(&gsec_kr->gsk_clist); gsec_kr->gsk_root_ctx = NULL; - cfs_mutex_init(&gsec_kr->gsk_root_uc_lock); + mutex_init(&gsec_kr->gsk_root_uc_lock); #ifdef HAVE_KEYRING_UPCALL_SERIALIZED - cfs_mutex_init(&gsec_kr->gsk_uc_lock); + mutex_init(&gsec_kr->gsk_uc_lock); #endif if (gss_sec_create_common(&gsec_kr->gsk_base, &gss_policy_keyring, @@ -612,7 +597,7 @@ void gss_sec_destroy_kr(struct ptlrpc_sec *sec) CDEBUG(D_SEC, "destroy %s@%p\n", sec->ps_policy->sp_name, sec); - LASSERT(cfs_hlist_empty(&gsec_kr->gsk_clist)); + LASSERT(hlist_empty(&gsec_kr->gsk_clist)); LASSERT(gsec_kr->gsk_root_ctx == NULL); gss_sec_destroy_common(gsec); @@ -631,46 +616,112 @@ static inline int user_is_root(struct ptlrpc_sec *sec, struct vfs_cred *vcred) } /* + * kernel 5.3: commit 0f44e4d976f96c6439da0d6717238efa4b91196e + * keys: Move the user and user-session keyrings to the user_namespace + * + * When lookup_user_key is available use the kernel API rather than directly + * accessing the uid_keyring and session_keyring via the current process + * credentials. + */ +#ifdef HAVE_LOOKUP_USER_KEY + +/* from Linux security/keys/internal.h: */ +#ifndef KEY_LOOKUP_FOR_UNLINK +#define KEY_LOOKUP_FOR_UNLINK 0x04 +#endif + +static struct key *_user_key(key_serial_t id) +{ + key_ref_t ref; + + might_sleep(); + ref = lookup_user_key(id, KEY_LOOKUP_FOR_UNLINK, 0); + if (IS_ERR(ref)) + return NULL; + return key_ref_to_ptr(ref); +} + +static inline struct key *get_user_session_keyring(const struct cred *cred) +{ + return _user_key(KEY_SPEC_USER_SESSION_KEYRING); +} + +static inline struct key *get_user_keyring(const struct cred *cred) +{ + return _user_key(KEY_SPEC_USER_KEYRING); +} +#else +static inline struct key *get_user_session_keyring(const struct cred *cred) +{ + return key_get(cred->user->session_keyring); +} + +static inline struct key *get_user_keyring(const struct cred *cred) +{ + return key_get(cred->user->uid_keyring); +} +#endif + +/* * unlink request key from it's ring, which is linked during request_key(). * sadly, we have to 'guess' which keyring it's linked to. * - * FIXME this code is fragile, depend on how request_key_link() is implemented. + * FIXME this code is fragile, it depends on how request_key() is implemented. */ static void request_key_unlink(struct key *key) { - struct task_struct *tsk = current; - struct key *ring; - - switch (tsk->jit_keyring) { - case KEY_REQKEY_DEFL_DEFAULT: - case KEY_REQKEY_DEFL_THREAD_KEYRING: - ring = key_get(tsk->thread_keyring); - if (ring) - break; - case KEY_REQKEY_DEFL_PROCESS_KEYRING: - ring = key_get(tsk->signal->process_keyring); - if (ring) - break; - case KEY_REQKEY_DEFL_SESSION_KEYRING: - rcu_read_lock(); - ring = key_get(rcu_dereference(tsk->signal->session_keyring)); - rcu_read_unlock(); - if (ring) - break; - case KEY_REQKEY_DEFL_USER_SESSION_KEYRING: - ring = key_get(tsk->user->session_keyring); - break; - case KEY_REQKEY_DEFL_USER_KEYRING: - ring = key_get(tsk->user->uid_keyring); - break; - case KEY_REQKEY_DEFL_GROUP_KEYRING: - default: - LBUG(); - } - - LASSERT(ring); - key_unlink(ring, key); - key_put(ring); + const struct cred *cred = current_cred(); + struct key *ring = NULL; + + switch (cred->jit_keyring) { + case KEY_REQKEY_DEFL_DEFAULT: + case KEY_REQKEY_DEFL_REQUESTOR_KEYRING: +#ifdef HAVE_GET_REQUEST_KEY_AUTH + if (cred->request_key_auth) { + struct request_key_auth *rka; + struct key *authkey = cred->request_key_auth; + + down_read(&authkey->sem); + rka = get_request_key_auth(authkey); + if (!test_bit(KEY_FLAG_REVOKED, &authkey->flags)) + ring = key_get(rka->dest_keyring); + up_read(&authkey->sem); + if (ring) + break; + } +#endif + /* fall through */ + case KEY_REQKEY_DEFL_THREAD_KEYRING: + ring = key_get(cred->thread_keyring); + if (ring) + break; + /* fallthrough */ + case KEY_REQKEY_DEFL_PROCESS_KEYRING: + ring = key_get(cred->process_keyring); + if (ring) + break; + /* fallthrough */ + case KEY_REQKEY_DEFL_SESSION_KEYRING: + rcu_read_lock(); + ring = key_get(rcu_dereference(cred->session_keyring)); + rcu_read_unlock(); + if (ring) + break; + /* fallthrough */ + case KEY_REQKEY_DEFL_USER_SESSION_KEYRING: + ring = get_user_session_keyring(cred); + break; + case KEY_REQKEY_DEFL_USER_KEYRING: + ring = get_user_keyring(cred); + break; + case KEY_REQKEY_DEFL_GROUP_KEYRING: + default: + LBUG(); + } + + LASSERT(ring); + key_unlink(ring, key); + key_put(ring); } static @@ -686,7 +737,8 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec, char desc[24]; char *coinfo; int coinfo_size; - char *co_flags = ""; + const char *sec_part_flags = ""; + char svc_flag = '-'; ENTRY; LASSERT(imp != NULL); @@ -710,7 +762,7 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec, * the root upcall lock, make sure nobody else populated new root * context after last check. */ if (is_root) { - cfs_mutex_lock(&gsec_kr->gsk_root_uc_lock); + mutex_lock(&gsec_kr->gsk_root_uc_lock); ctx = sec_lookup_root_ctx_kr(sec); if (ctx) @@ -719,50 +771,87 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec, /* update reverse handle for root user */ sec2gsec(sec)->gs_rvs_hdl = gss_get_next_ctx_index(); - switch (sec->ps_part) { - case LUSTRE_SP_MDT: - co_flags = "m"; - break; - case LUSTRE_SP_OST: - co_flags = "o"; - break; - case LUSTRE_SP_MGC: - co_flags = "rmo"; - break; - case LUSTRE_SP_CLI: - co_flags = "r"; - break; - case LUSTRE_SP_MGS: - default: - LBUG(); + switch (sec->ps_part) { + case LUSTRE_SP_MDT: + sec_part_flags = "m"; + break; + case LUSTRE_SP_OST: + sec_part_flags = "o"; + break; + case LUSTRE_SP_MGC: + sec_part_flags = "rmo"; + break; + case LUSTRE_SP_CLI: + sec_part_flags = "r"; + break; + case LUSTRE_SP_MGS: + default: + LBUG(); } - } - - /* in case of setuid, key will be constructed as owner of fsuid/fsgid, - * but we do authentication based on real uid/gid. the key permission - * bits will be exactly as POS_ALL, so only processes who subscribed - * this key could have the access, although the quota might be counted - * on others (fsuid/fsgid). - * - * keyring will use fsuid/fsgid as upcall parameters, so we have to - * encode real uid/gid into callout info. - */ - - construct_key_desc(desc, sizeof(desc), sec, vcred->vc_uid); - - /* callout info format: - * secid:mech:uid:gid:flags:svc_type:peer_nid:target_uuid - */ - coinfo_size = sizeof(struct obd_uuid) + MAX_OBD_NAME + 64; - OBD_ALLOC(coinfo, coinfo_size); - if (coinfo == NULL) - goto out; - snprintf(coinfo, coinfo_size, "%d:%s:%u:%u:%s:%d:"LPX64":%s", - sec->ps_id, sec2gsec(sec)->gs_mech->gm_name, - vcred->vc_uid, vcred->vc_gid, - co_flags, import_to_gss_svc(imp), - imp->imp_connection->c_peer.nid, imp->imp_obd->obd_name); + switch (SPTLRPC_FLVR_SVC(sec->ps_flvr.sf_rpc)) { + case SPTLRPC_SVC_NULL: + svc_flag = 'n'; + break; + case SPTLRPC_SVC_AUTH: + svc_flag = 'a'; + break; + case SPTLRPC_SVC_INTG: + svc_flag = 'i'; + break; + case SPTLRPC_SVC_PRIV: + svc_flag = 'p'; + break; + default: + LBUG(); + } + } + + /* in case of setuid, key will be constructed as owner of fsuid/fsgid, + * but we do authentication based on real uid/gid. the key permission + * bits will be exactly as POS_ALL, so only processes who subscribed + * this key could have the access, although the quota might be counted + * on others (fsuid/fsgid). + * + * keyring will use fsuid/fsgid as upcall parameters, so we have to + * encode real uid/gid into callout info. + */ + + /* But first we need to make sure the obd type is supported */ + if (strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_MDC_NAME) && + strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_OSC_NAME) && + strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_MGC_NAME) && + strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_LWP_NAME) && + strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_OSP_NAME)) { + CERROR("obd %s is not a supported device\n", + imp->imp_obd->obd_name); + GOTO(out, ctx = NULL); + } + + construct_key_desc(desc, sizeof(desc), sec, vcred->vc_uid); + + /* callout info format: + * secid:mech:uid:gid:sec_flags:svc_flag:svc_type:peer_nid:target_uuid: + * self_nid:pid + */ + coinfo_size = sizeof(struct obd_uuid) + MAX_OBD_NAME + 64; + OBD_ALLOC(coinfo, coinfo_size); + if (coinfo == NULL) + goto out; + + /* Last callout parameter is pid of process whose namespace will be used + * for credentials' retrieval. + * For user's credentials (in which case sec_part_flags is empty), use + * current PID instead of import's reference PID to get reference + * namespace. */ + snprintf(coinfo, coinfo_size, "%d:%s:%u:%u:%s:%c:%d:%#llx:%s:%#llx:%d", + sec->ps_id, sec2gsec(sec)->gs_mech->gm_name, + vcred->vc_uid, vcred->vc_gid, + sec_part_flags, svc_flag, import_to_gss_svc(imp), + imp->imp_connection->c_peer.nid, imp->imp_obd->obd_name, + imp->imp_connection->c_self, + sec_part_flags[0] == '\0' ? + current_pid() : imp->imp_sec_refpid); CDEBUG(D_SEC, "requesting key for %s\n", desc); @@ -784,37 +873,36 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec, * need wirtelock of key->sem to serialize them. */ down_write(&key->sem); - if (likely(key->payload.data != NULL)) { - ctx = key->payload.data; - - LASSERT(cfs_atomic_read(&ctx->cc_refcount) >= 1); - LASSERT(ctx2gctx_keyring(ctx)->gck_key == key); - LASSERT(atomic_read(&key->usage) >= 2); - - /* simply take a ref and return. it's upper layer's - * responsibility to detect & replace dead ctx. */ - cfs_atomic_inc(&ctx->cc_refcount); - } else { - /* pre initialization with a cli_ctx. this can't be done in - * key_instantiate() because we'v no enough information - * there. */ - ctx = ctx_create_kr(sec, vcred); - if (ctx != NULL) { - ctx_enlist_kr(ctx, is_root, 0); - bind_key_ctx(key, ctx); - - ctx_start_timer_kr(ctx, KEYRING_UPCALL_TIMEOUT); - - CDEBUG(D_SEC, "installed key %p <-> ctx %p (sec %p)\n", - key, ctx, sec); - } else { - /* we'd prefer to call key_revoke(), but we more like - * to revoke it within this key->sem locked period. */ - key_revoke_locked(key); - } - - create_new = 1; - } + ctx = key_get_payload(key, 0); + if (likely(ctx)) { + LASSERT(atomic_read(&ctx->cc_refcount) >= 1); + LASSERT(ctx2gctx_keyring(ctx)->gck_key == key); + LASSERT(ll_read_key_usage(key) >= 2); + + /* simply take a ref and return. it's upper layer's + * responsibility to detect & replace dead ctx. */ + atomic_inc(&ctx->cc_refcount); + } else { + /* pre initialization with a cli_ctx. this can't be done in + * key_instantiate() because we'v no enough information + * there. */ + ctx = ctx_create_kr(sec, vcred); + if (ctx != NULL) { + ctx_enlist_kr(ctx, is_root, 0); + bind_key_ctx(key, ctx); + + ctx_start_timer_kr(ctx, KEYRING_UPCALL_TIMEOUT); + + CDEBUG(D_SEC, "installed key %p <-> ctx %p (sec %p)\n", + key, ctx, sec); + } else { + /* we'd prefer to call key_revoke(), but we more like + * to revoke it within this key->sem locked period. */ + key_revoke_locked(key); + } + + create_new = 1; + } up_write(&key->sem); @@ -824,7 +912,7 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec, key_put(key); out: if (is_root) - cfs_mutex_unlock(&gsec_kr->gsk_root_uc_lock); + mutex_unlock(&gsec_kr->gsk_root_uc_lock); RETURN(ctx); } @@ -833,8 +921,8 @@ void gss_sec_release_ctx_kr(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx, int sync) { - LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0); - LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0); + LASSERT(atomic_read(&sec->ps_refcount) > 0); + LASSERT(atomic_read(&ctx->cc_refcount) == 0); ctx_release_kr(ctx, sync); } @@ -859,149 +947,149 @@ void flush_user_ctx_cache_kr(struct ptlrpc_sec *sec, construct_key_desc(desc, sizeof(desc), sec, uid); - /* there should be only one valid key, but we put it in the - * loop in case of any weird cases */ - for (;;) { - key = request_key(&gss_key_type, desc, NULL); - if (IS_ERR(key)) { - CDEBUG(D_SEC, "No more key found for current user\n"); - break; - } + /* there should be only one valid key, but we put it in the + * loop in case of any weird cases */ + for (;;) { + key = request_key(&gss_key_type, desc, NULL); + if (IS_ERR(key)) { + CDEBUG(D_SEC, "No more key found for current user\n"); + break; + } - down_write(&key->sem); + down_write(&key->sem); - kill_key_locked(key); + kill_key_locked(key); - /* kill_key_locked() should usually revoke the key, but we - * revoke it again to make sure, e.g. some case the key may - * not well coupled with a context. */ - key_revoke_locked(key); + /* kill_key_locked() should usually revoke the key, but we + * revoke it again to make sure, e.g. some case the key may + * not well coupled with a context. */ + key_revoke_locked(key); - up_write(&key->sem); + up_write(&key->sem); - key_put(key); - } + request_key_unlink(key); + + key_put(key); + } } /* * flush context of root or all, we iterate through the list. */ static -void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec, - uid_t uid, - int grace, int force) +void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec, uid_t uid, int grace, + int force) { - struct gss_sec_keyring *gsec_kr; - cfs_hlist_head_t freelist = CFS_HLIST_HEAD_INIT; - cfs_hlist_node_t *pos, *next; - struct ptlrpc_cli_ctx *ctx; - ENTRY; + struct gss_sec_keyring *gsec_kr; + struct hlist_head freelist = HLIST_HEAD_INIT; + struct hlist_node __maybe_unused *pos, *next; + struct ptlrpc_cli_ctx *ctx; + ENTRY; gsec_kr = sec2gsec_keyring(sec); - cfs_spin_lock(&sec->ps_lock); - cfs_hlist_for_each_entry_safe(ctx, pos, next, - &gsec_kr->gsk_clist, cc_cache) { - LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0); - - if (uid != -1 && uid != ctx->cc_vcred.vc_uid) - continue; - - /* at this moment there's at least 2 base reference: - * key association and in-list. */ - if (cfs_atomic_read(&ctx->cc_refcount) > 2) { - if (!force) - continue; - CWARN("flush busy ctx %p(%u->%s, extra ref %d)\n", - ctx, ctx->cc_vcred.vc_uid, - sec2target_str(ctx->cc_sec), - cfs_atomic_read(&ctx->cc_refcount) - 2); - } - - cfs_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags); - if (!grace) - cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags); - - cfs_atomic_inc(&ctx->cc_refcount); - - if (ctx_unlist_kr(ctx, 1)) { - cfs_hlist_add_head(&ctx->cc_cache, &freelist); - } else { - LASSERT(cfs_atomic_read(&ctx->cc_refcount) >= 2); - cfs_atomic_dec(&ctx->cc_refcount); - } - } - cfs_spin_unlock(&sec->ps_lock); - - dispose_ctx_list_kr(&freelist); - EXIT; + spin_lock(&sec->ps_lock); + cfs_hlist_for_each_entry_safe(ctx, pos, next, + &gsec_kr->gsk_clist, cc_cache) { + LASSERT(atomic_read(&ctx->cc_refcount) > 0); + + if (uid != -1 && uid != ctx->cc_vcred.vc_uid) + continue; + + /* at this moment there's at least 2 base reference: + * key association and in-list. */ + if (atomic_read(&ctx->cc_refcount) > 2) { + if (!force) + continue; + CWARN("flush busy ctx %p(%u->%s, extra ref %d)\n", + ctx, ctx->cc_vcred.vc_uid, + sec2target_str(ctx->cc_sec), + atomic_read(&ctx->cc_refcount) - 2); + } + + set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags); + if (!grace) + clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags); + + atomic_inc(&ctx->cc_refcount); + + if (ctx_unlist_kr(ctx, 1)) { + hlist_add_head(&ctx->cc_cache, &freelist); + } else { + LASSERT(atomic_read(&ctx->cc_refcount) >= 2); + atomic_dec(&ctx->cc_refcount); + } + } + spin_unlock(&sec->ps_lock); + + dispose_ctx_list_kr(&freelist); + EXIT; } static int gss_sec_flush_ctx_cache_kr(struct ptlrpc_sec *sec, uid_t uid, int grace, int force) { - ENTRY; + ENTRY; - CDEBUG(D_SEC, "sec %p(%d, nctx %d), uid %d, grace %d, force %d\n", - sec, cfs_atomic_read(&sec->ps_refcount), - cfs_atomic_read(&sec->ps_nctx), - uid, grace, force); + CDEBUG(D_SEC, "sec %p(%d, nctx %d), uid %d, grace %d, force %d\n", + sec, atomic_read(&sec->ps_refcount), + atomic_read(&sec->ps_nctx), + uid, grace, force); - if (uid != -1 && uid != 0) - flush_user_ctx_cache_kr(sec, uid, grace, force); - else - flush_spec_ctx_cache_kr(sec, uid, grace, force); + if (uid != -1 && uid != 0) + flush_user_ctx_cache_kr(sec, uid, grace, force); + else + flush_spec_ctx_cache_kr(sec, uid, grace, force); - RETURN(0); + RETURN(0); } static void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec) { - struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec); - cfs_hlist_head_t freelist = CFS_HLIST_HEAD_INIT; - cfs_hlist_node_t *pos, *next; - struct ptlrpc_cli_ctx *ctx; - ENTRY; - - CWARN("running gc\n"); - - cfs_spin_lock(&sec->ps_lock); - cfs_hlist_for_each_entry_safe(ctx, pos, next, - &gsec_kr->gsk_clist, cc_cache) { - LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0); - - cfs_atomic_inc(&ctx->cc_refcount); - - if (cli_ctx_check_death(ctx) && ctx_unlist_kr(ctx, 1)) { - cfs_hlist_add_head(&ctx->cc_cache, &freelist); - CWARN("unhashed ctx %p\n", ctx); - } else { - LASSERT(cfs_atomic_read(&ctx->cc_refcount) >= 2); - cfs_atomic_dec(&ctx->cc_refcount); - } - } - cfs_spin_unlock(&sec->ps_lock); - - dispose_ctx_list_kr(&freelist); - EXIT; - return; + struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec); + struct hlist_head freelist = HLIST_HEAD_INIT; + struct hlist_node __maybe_unused *pos, *next; + struct ptlrpc_cli_ctx *ctx; + ENTRY; + + CWARN("running gc\n"); + + spin_lock(&sec->ps_lock); + cfs_hlist_for_each_entry_safe(ctx, pos, next, + &gsec_kr->gsk_clist, cc_cache) { + LASSERT(atomic_read(&ctx->cc_refcount) > 0); + + atomic_inc(&ctx->cc_refcount); + + if (cli_ctx_check_death(ctx) && ctx_unlist_kr(ctx, 1)) { + hlist_add_head(&ctx->cc_cache, &freelist); + CWARN("unhashed ctx %p\n", ctx); + } else { + LASSERT(atomic_read(&ctx->cc_refcount) >= 2); + atomic_dec(&ctx->cc_refcount); + } + } + spin_unlock(&sec->ps_lock); + + dispose_ctx_list_kr(&freelist); + EXIT; } static int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq) { - struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec); - cfs_hlist_node_t *pos, *next; - struct ptlrpc_cli_ctx *ctx; - struct gss_cli_ctx *gctx; - time_t now = cfs_time_current_sec(); - ENTRY; - - cfs_spin_lock(&sec->ps_lock); + struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec); + struct hlist_node __maybe_unused *pos, *next; + struct ptlrpc_cli_ctx *ctx; + struct gss_cli_ctx *gctx; + time64_t now = ktime_get_real_seconds(); + + ENTRY; + spin_lock(&sec->ps_lock); cfs_hlist_for_each_entry_safe(ctx, pos, next, - &gsec_kr->gsk_clist, cc_cache) { + &gsec_kr->gsk_clist, cc_cache) { struct key *key; char flags_str[40]; char mech[40]; @@ -1018,25 +1106,24 @@ int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq) snprintf(mech, sizeof(mech), "N/A"); mech[sizeof(mech) - 1] = '\0'; - seq_printf(seq, "%p: uid %u, ref %d, expire %ld(%+ld), fl %s, " - "seq %d, win %u, key %08x(ref %d), " - "hdl "LPX64":"LPX64", mech: %s\n", - ctx, ctx->cc_vcred.vc_uid, - cfs_atomic_read(&ctx->cc_refcount), - ctx->cc_expire, - ctx->cc_expire ? ctx->cc_expire - now : 0, - flags_str, - cfs_atomic_read(&gctx->gc_seq), - gctx->gc_win, - key ? key->serial : 0, - key ? atomic_read(&key->usage) : 0, - gss_handle_to_u64(&gctx->gc_handle), - gss_handle_to_u64(&gctx->gc_svc_handle), - mech); - } - cfs_spin_unlock(&sec->ps_lock); - - RETURN(0); + seq_printf(seq, + "%p: uid %u, ref %d, expire %lld(%+lld), fl %s, seq %d, win %u, key %08x(ref %d), hdl %#llx:%#llx, mech: %s\n", + ctx, ctx->cc_vcred.vc_uid, + atomic_read(&ctx->cc_refcount), + ctx->cc_expire, + ctx->cc_expire ? ctx->cc_expire - now : 0, + flags_str, + atomic_read(&gctx->gc_seq), + gctx->gc_win, + key ? key->serial : 0, + key ? ll_read_key_usage(key) : 0, + gss_handle_to_u64(&gctx->gc_handle), + gss_handle_to_u64(&gctx->gc_svc_handle), + mech); + } + spin_unlock(&sec->ps_lock); + + RETURN(0); } /**************************************** @@ -1053,27 +1140,27 @@ int gss_cli_ctx_refresh_kr(struct ptlrpc_cli_ctx *ctx) static int gss_cli_ctx_validate_kr(struct ptlrpc_cli_ctx *ctx) { - LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0); - LASSERT(ctx->cc_sec); + LASSERT(atomic_read(&ctx->cc_refcount) > 0); + LASSERT(ctx->cc_sec); - if (cli_ctx_check_death(ctx)) { - kill_ctx_kr(ctx); - return 1; - } + if (cli_ctx_check_death(ctx)) { + kill_ctx_kr(ctx); + return 1; + } - if (cli_ctx_is_ready(ctx)) - return 0; - return 1; + if (cli_ctx_is_ready(ctx)) + return 0; + return 1; } static void gss_cli_ctx_die_kr(struct ptlrpc_cli_ctx *ctx, int grace) { - LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0); - LASSERT(ctx->cc_sec); + LASSERT(atomic_read(&ctx->cc_refcount) > 0); + LASSERT(ctx->cc_sec); - cli_ctx_expire(ctx); - kill_ctx_kr(ctx); + cli_ctx_expire(ctx); + kill_ctx_kr(ctx); } /**************************************** @@ -1090,11 +1177,11 @@ void gss_cli_ctx_die_kr(struct ptlrpc_cli_ctx *ctx, int grace) static int sec_install_rctx_kr(struct ptlrpc_sec *sec, - struct ptlrpc_svc_ctx *svc_ctx) + struct ptlrpc_svc_ctx *svc_ctx) { - struct ptlrpc_cli_ctx *cli_ctx; - struct vfs_cred vcred = { 0, 0 }; - int rc; + struct ptlrpc_cli_ctx *cli_ctx; + struct vfs_cred vcred = { .vc_uid = 0 }; + int rc; LASSERT(sec); LASSERT(svc_ctx); @@ -1122,13 +1209,13 @@ int sec_install_rctx_kr(struct ptlrpc_sec *sec, static int sec_install_rctx_kr(struct ptlrpc_sec *sec, - struct ptlrpc_svc_ctx *svc_ctx) + struct ptlrpc_svc_ctx *svc_ctx) { - struct ptlrpc_cli_ctx *cli_ctx = NULL; - struct key *key; - struct vfs_cred vcred = { 0, 0 }; - char desc[64]; - int rc; + struct ptlrpc_cli_ctx *cli_ctx = NULL; + struct key *key; + struct vfs_cred vcred = { .vc_uid = 0 }; + char desc[64]; + int rc; LASSERT(sec); LASSERT(svc_ctx); @@ -1151,7 +1238,7 @@ int sec_install_rctx_kr(struct ptlrpc_sec *sec, down_write(&key->sem); - LASSERT(key->payload.data == NULL); + LASSERT(!key_get_payload(key, 0)); cli_ctx = ctx_create_kr(sec, &vcred); if (cli_ctx == NULL) { @@ -1218,8 +1305,15 @@ int gss_svc_install_rctx_kr(struct obd_import *imp, ****************************************/ static +#ifdef HAVE_KEY_TYPE_INSTANTIATE_2ARGS +int gss_kt_instantiate(struct key *key, struct key_preparsed_payload *prep) +{ + const void *data = prep->data; + size_t datalen = prep->datalen; +#else int gss_kt_instantiate(struct key *key, const void *data, size_t datalen) { +#endif int rc; ENTRY; @@ -1228,7 +1322,7 @@ int gss_kt_instantiate(struct key *key, const void *data, size_t datalen) RETURN(-EINVAL); } - if (key->payload.data != 0) { + if (key_get_payload(key, 0)) { CERROR("key already have payload\n"); RETURN(-EINVAL); } @@ -1243,20 +1337,21 @@ int gss_kt_instantiate(struct key *key, const void *data, size_t datalen) * the session keyring is created upon upcall, and don't change all * the way until upcall finished, so rcu lock is not needed here. */ - LASSERT(cfs_current()->signal->session_keyring); - - cfs_lockdep_off(); - rc = key_link(cfs_current()->signal->session_keyring, key); - cfs_lockdep_on(); - if (unlikely(rc)) { - CERROR("failed to link key %08x to keyring %08x: %d\n", - key->serial, - cfs_current()->signal->session_keyring->serial, rc); - RETURN(rc); - } - - CDEBUG(D_SEC, "key %p instantiated, ctx %p\n", key, key->payload.data); - RETURN(0); + LASSERT(current_cred()->session_keyring); + + lockdep_off(); + rc = key_link(current_cred()->session_keyring, key); + lockdep_on(); + if (unlikely(rc)) { + CERROR("failed to link key %08x to keyring %08x: %d\n", + key->serial, + current_cred()->session_keyring->serial, rc); + RETURN(rc); + } + + CDEBUG(D_SEC, "key %p instantiated, ctx %p\n", key, + key_get_payload(key, 0)); + RETURN(0); } /* @@ -1264,19 +1359,26 @@ int gss_kt_instantiate(struct key *key, const void *data, size_t datalen) * on the context without fear of loosing refcount. */ static +#ifdef HAVE_KEY_TYPE_INSTANTIATE_2ARGS +int gss_kt_update(struct key *key, struct key_preparsed_payload *prep) +{ + const void *data = prep->data; + __u32 datalen32 = (__u32) prep->datalen; +#else int gss_kt_update(struct key *key, const void *data, size_t datalen) { - struct ptlrpc_cli_ctx *ctx = key->payload.data; + __u32 datalen32 = (__u32) datalen; +#endif + struct ptlrpc_cli_ctx *ctx = key_get_payload(key, 0); struct gss_cli_ctx *gctx; rawobj_t tmpobj = RAWOBJ_EMPTY; - __u32 datalen32 = (__u32) datalen; int rc; ENTRY; - if (data == NULL || datalen == 0) { - CWARN("invalid: data %p, len %lu\n", data, (long)datalen); - RETURN(-EINVAL); - } + if (data == NULL || datalen32 == 0) { + CWARN("invalid: data %p, len %lu\n", data, (long)datalen32); + RETURN(-EINVAL); + } /* if upcall finished negotiation too fast (mostly likely because * of local error happened) and call kt_update(), the ctx @@ -1294,10 +1396,10 @@ int gss_kt_update(struct key *key, const void *data, size_t datalen) RETURN(rc); } - LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0); - LASSERT(ctx->cc_sec); + LASSERT(atomic_read(&ctx->cc_refcount) > 0); + LASSERT(ctx->cc_sec); - ctx_clear_timer_kr(ctx); + ctx_clear_timer_kr(ctx); /* don't proceed if already refreshed */ if (cli_ctx_is_refreshed(ctx)) { @@ -1315,49 +1417,50 @@ int gss_kt_update(struct key *key, const void *data, size_t datalen) goto out; } - if (gctx->gc_win == 0) { - __u32 nego_rpc_err, nego_gss_err; - - rc = buffer_extract_bytes(&data, &datalen32, &nego_rpc_err, - sizeof(nego_rpc_err)); - if (rc) { - CERROR("failed to extrace rpc rc\n"); - goto out; - } - - rc = buffer_extract_bytes(&data, &datalen32, &nego_gss_err, - sizeof(nego_gss_err)); - if (rc) { - CERROR("failed to extrace gss rc\n"); - goto out; - } - - CERROR("negotiation: rpc err %d, gss err %x\n", - nego_rpc_err, nego_gss_err); - - rc = nego_rpc_err ? nego_rpc_err : -EACCES; - } else { - rc = rawobj_extract_local_alloc(&gctx->gc_handle, - (__u32 **) &data, &datalen32); - if (rc) { - CERROR("failed extract handle\n"); - goto out; - } - - rc = rawobj_extract_local(&tmpobj, (__u32 **) &data,&datalen32); - if (rc) { - CERROR("failed extract mech\n"); - goto out; - } - - rc = lgss_import_sec_context(&tmpobj, - sec2gsec(ctx->cc_sec)->gs_mech, - &gctx->gc_mechctx); - if (rc != GSS_S_COMPLETE) - CERROR("failed import context\n"); - else - rc = 0; - } + if (gctx->gc_win == 0) { + __u32 nego_rpc_err, nego_gss_err; + + rc = buffer_extract_bytes(&data, &datalen32, &nego_rpc_err, + sizeof(nego_rpc_err)); + if (rc) { + CERROR("cannot extract RPC: rc = %d\n", rc); + goto out; + } + + rc = buffer_extract_bytes(&data, &datalen32, &nego_gss_err, + sizeof(nego_gss_err)); + if (rc) { + CERROR("failed to extract gss rc = %d\n", rc); + goto out; + } + + CERROR("negotiation: rpc err %d, gss err %x\n", + nego_rpc_err, nego_gss_err); + + rc = nego_rpc_err ? nego_rpc_err : -EACCES; + } else { + rc = rawobj_extract_local_alloc(&gctx->gc_handle, + (__u32 **) &data, &datalen32); + if (rc) { + CERROR("failed extract handle\n"); + goto out; + } + + rc = rawobj_extract_local(&tmpobj, + (__u32 **) &data, &datalen32); + if (rc) { + CERROR("failed extract mech\n"); + goto out; + } + + rc = lgss_import_sec_context(&tmpobj, + sec2gsec(ctx->cc_sec)->gs_mech, + &gctx->gc_mechctx); + if (rc != GSS_S_COMPLETE) + CERROR("failed import context\n"); + else + rc = 0; + } out: /* we don't care what current status of this ctx, even someone else * is operating on the ctx at the same time. we just add up our own @@ -1372,7 +1475,7 @@ out: cli_ctx_expire(ctx); if (rc != -ERESTART) - cfs_set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags); + set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags); } /* let user space think it's a success */ @@ -1380,17 +1483,39 @@ out: RETURN(0); } -static -int gss_kt_match(const struct key *key, const void *desc) +#ifndef HAVE_KEY_MATCH_DATA +static int +gss_kt_match(const struct key *key, const void *desc) +{ + return strcmp(key->description, (const char *) desc) == 0 && + !test_bit(KEY_FLAG_REVOKED, &key->flags); +} +#else /* ! HAVE_KEY_MATCH_DATA */ +static bool +gss_kt_match(const struct key *key, const struct key_match_data *match_data) { - return (strcmp(key->description, (const char *) desc) == 0); + const char *desc = match_data->raw_data; + + return strcmp(key->description, desc) == 0 && + !test_bit(KEY_FLAG_REVOKED, &key->flags); } +/* + * Preparse the match criterion. + */ +static int gss_kt_match_preparse(struct key_match_data *match_data) +{ + match_data->lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT; + match_data->cmp = gss_kt_match; + return 0; +} +#endif /* HAVE_KEY_MATCH_DATA */ + static void gss_kt_destroy(struct key *key) { ENTRY; - LASSERT(key->payload.data == NULL); + LASSERT(!key_get_payload(key, 0)); CDEBUG(D_SEC, "destroy key %p\n", key); EXIT; } @@ -1406,13 +1531,17 @@ void gss_kt_describe(const struct key *key, struct seq_file *s) static struct key_type gss_key_type = { - .name = "lgssc", - .def_datalen = 0, - .instantiate = gss_kt_instantiate, - .update = gss_kt_update, - .match = gss_kt_match, - .destroy = gss_kt_destroy, - .describe = gss_kt_describe, + .name = "lgssc", + .def_datalen = 0, + .instantiate = gss_kt_instantiate, + .update = gss_kt_update, +#ifdef HAVE_KEY_MATCH_DATA + .match_preparse = gss_kt_match_preparse, +#else + .match = gss_kt_match, +#endif + .destroy = gss_kt_destroy, + .describe = gss_kt_describe, }; /****************************************