4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2014, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/ptlrpc/gss/gss_keyring.c
34 * Author: Eric Mei <ericm@clusterfs.com>
37 #define DEBUG_SUBSYSTEM S_SEC
38 #include <linux/init.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
41 #include <linux/dcache.h>
43 #include <linux/crypto.h>
44 #include <linux/key.h>
45 #include <linux/keyctl.h>
46 #include <linux/key-type.h>
47 #include <linux/mutex.h>
48 #include <asm/atomic.h>
50 #include <libcfs/linux/linux-list.h>
52 #include <obd_class.h>
53 #include <obd_support.h>
54 #include <uapi/linux/lustre/lustre_idl.h>
55 #include <lustre_sec.h>
56 #include <lustre_net.h>
57 #include <lustre_import.h>
60 #include "gss_internal.h"
63 #ifdef HAVE_GET_REQUEST_KEY_AUTH
64 #include <keys/request_key_auth-type.h>
67 static struct ptlrpc_sec_policy gss_policy_keyring;
68 static struct ptlrpc_ctx_ops gss_keyring_ctxops;
69 static struct key_type gss_key_type;
71 static int sec_install_rctx_kr(struct ptlrpc_sec *sec,
72 struct ptlrpc_svc_ctx *svc_ctx);
75 * the timeout is only for the case that upcall child process die abnormally.
76 * in any other cases it should finally update kernel key.
78 * FIXME we'd better to incorporate the client & server side upcall timeouts
79 * into the framework of Adaptive Timeouts, but we need to figure out how to
80 * make sure that kernel knows the upcall processes is in-progress or died
83 #define KEYRING_UPCALL_TIMEOUT (obd_timeout + obd_timeout)
85 /****************************************
87 ****************************************/
89 static inline void keyring_upcall_lock(struct gss_sec_keyring *gsec_kr)
91 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
92 mutex_lock(&gsec_kr->gsk_uc_lock);
96 static inline void keyring_upcall_unlock(struct gss_sec_keyring *gsec_kr)
98 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
99 mutex_unlock(&gsec_kr->gsk_uc_lock);
103 static inline void key_revoke_locked(struct key *key)
105 set_bit(KEY_FLAG_REVOKED, &key->flags);
108 static void ctx_upcall_timeout_kr(cfs_timer_cb_arg_t data)
110 struct gss_cli_ctx_keyring *gctx_kr = cfs_from_timer(gctx_kr,
112 struct ptlrpc_cli_ctx *ctx = &(gctx_kr->gck_base.gc_base);
113 struct key *key = gctx_kr->gck_key;
115 CWARN("ctx %p, key %p\n", ctx, key);
120 key_revoke_locked(key);
123 static void ctx_start_timer_kr(struct ptlrpc_cli_ctx *ctx, time64_t timeout)
125 struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
126 struct timer_list *timer = &gctx_kr->gck_timer;
130 CDEBUG(D_SEC, "ctx %p: start timer %llds\n", ctx, timeout);
132 cfs_timer_setup(timer, ctx_upcall_timeout_kr,
133 (unsigned long)gctx_kr, 0);
134 timer->expires = cfs_time_seconds(timeout) + jiffies;
139 * caller should make sure no race with other threads
142 void ctx_clear_timer_kr(struct ptlrpc_cli_ctx *ctx)
144 struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
145 struct timer_list *timer = &gctx_kr->gck_timer;
147 CDEBUG(D_SEC, "ctx %p, key %p\n", ctx, gctx_kr->gck_key);
149 del_singleshot_timer_sync(timer);
153 struct ptlrpc_cli_ctx *ctx_create_kr(struct ptlrpc_sec *sec,
154 struct vfs_cred *vcred)
156 struct ptlrpc_cli_ctx *ctx;
157 struct gss_cli_ctx_keyring *gctx_kr;
159 OBD_ALLOC_PTR(gctx_kr);
163 cfs_timer_setup(&gctx_kr->gck_timer, NULL, 0, 0);
165 ctx = &gctx_kr->gck_base.gc_base;
167 if (gss_cli_ctx_init_common(sec, ctx, &gss_keyring_ctxops, vcred)) {
168 OBD_FREE_PTR(gctx_kr);
172 ctx->cc_expire = ktime_get_real_seconds() + KEYRING_UPCALL_TIMEOUT;
173 clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
174 atomic_inc(&ctx->cc_refcount); /* for the caller */
179 static void ctx_destroy_kr(struct ptlrpc_cli_ctx *ctx)
181 struct ptlrpc_sec *sec = ctx->cc_sec;
182 struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
184 CDEBUG(D_SEC, "destroying ctx %p\n", ctx);
186 /* at this time the association with key has been broken. */
188 LASSERT(atomic_read(&sec->ps_refcount) > 0);
189 LASSERT(atomic_read(&sec->ps_nctx) > 0);
190 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
191 LASSERT(gctx_kr->gck_key == NULL);
193 ctx_clear_timer_kr(ctx);
195 if (gss_cli_ctx_fini_common(sec, ctx))
198 OBD_FREE_PTR(gctx_kr);
200 atomic_dec(&sec->ps_nctx);
201 sptlrpc_sec_put(sec);
204 static void ctx_release_kr(struct ptlrpc_cli_ctx *ctx, int sync)
209 atomic_inc(&ctx->cc_refcount);
210 sptlrpc_gc_add_ctx(ctx);
214 static void ctx_put_kr(struct ptlrpc_cli_ctx *ctx, int sync)
216 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
218 if (atomic_dec_and_test(&ctx->cc_refcount))
219 ctx_release_kr(ctx, sync);
223 * key <-> ctx association and rules:
224 * - ctx might not bind with any key
225 * - key/ctx binding is protected by key semaphore (if the key present)
226 * - key and ctx each take a reference of the other
227 * - ctx enlist/unlist is protected by ctx spinlock
228 * - never enlist a ctx after it's been unlisted
229 * - whoever do enlist should also do bind, lock key before enlist:
230 * - lock key -> lock ctx -> enlist -> unlock ctx -> bind -> unlock key
231 * - whoever do unlist should also do unbind:
232 * - lock key -> lock ctx -> unlist -> unlock ctx -> unbind -> unlock key
233 * - lock ctx -> unlist -> unlock ctx -> lock key -> unbind -> unlock key
236 static inline void spin_lock_if(spinlock_t *lock, int condition)
242 static inline void spin_unlock_if(spinlock_t *lock, int condition)
248 static void ctx_enlist_kr(struct ptlrpc_cli_ctx *ctx, int is_root, int locked)
250 struct ptlrpc_sec *sec = ctx->cc_sec;
251 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
253 LASSERT(!test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
254 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
256 spin_lock_if(&sec->ps_lock, !locked);
258 atomic_inc(&ctx->cc_refcount);
259 set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
260 hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
262 gsec_kr->gsk_root_ctx = ctx;
264 spin_unlock_if(&sec->ps_lock, !locked);
268 * Note after this get called, caller should not access ctx again because
269 * it might have been freed, unless caller hold at least one refcount of
272 * return non-zero if we indeed unlist this ctx.
274 static int ctx_unlist_kr(struct ptlrpc_cli_ctx *ctx, int locked)
276 struct ptlrpc_sec *sec = ctx->cc_sec;
277 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
279 /* if hashed bit has gone, leave the job to somebody who is doing it */
280 if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
283 /* drop ref inside spin lock to prevent race with other operations */
284 spin_lock_if(&sec->ps_lock, !locked);
286 if (gsec_kr->gsk_root_ctx == ctx)
287 gsec_kr->gsk_root_ctx = NULL;
288 hlist_del_init(&ctx->cc_cache);
289 atomic_dec(&ctx->cc_refcount);
291 spin_unlock_if(&sec->ps_lock, !locked);
297 * Get specific payload. Newer kernels support 4 slots.
300 key_get_payload(struct key *key, unsigned int index)
302 void *key_ptr = NULL;
304 #ifdef HAVE_KEY_PAYLOAD_DATA_ARRAY
305 key_ptr = key->payload.data[index];
308 key_ptr = key->payload.data;
314 * Set specific payload. Newer kernels support 4 slots.
316 static int key_set_payload(struct key *key, unsigned int index,
317 struct ptlrpc_cli_ctx *ctx)
321 #ifdef HAVE_KEY_PAYLOAD_DATA_ARRAY
323 key->payload.data[index] = ctx;
326 key->payload.data = ctx;
334 * bind a key with a ctx together.
335 * caller must hold write lock of the key, as well as ref on key & ctx.
337 static void bind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
339 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
340 LASSERT(ll_read_key_usage(key) > 0);
341 LASSERT(ctx2gctx_keyring(ctx)->gck_key == NULL);
342 LASSERT(!key_get_payload(key, 0));
344 /* at this time context may or may not in list. */
346 atomic_inc(&ctx->cc_refcount);
347 ctx2gctx_keyring(ctx)->gck_key = key;
348 LASSERT(!key_set_payload(key, 0, ctx));
352 * unbind a key and a ctx.
353 * caller must hold write lock, as well as a ref of the key.
355 static void unbind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
357 LASSERT(key_get_payload(key, 0) == ctx);
358 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
360 /* must revoke the key, or others may treat it as newly created */
361 key_revoke_locked(key);
363 key_set_payload(key, 0, NULL);
364 ctx2gctx_keyring(ctx)->gck_key = NULL;
366 /* once ctx get split from key, the timer is meaningless */
367 ctx_clear_timer_kr(ctx);
374 * given a ctx, unbind with its coupled key, if any.
375 * unbind could only be called once, so we don't worry the key be released
378 static void unbind_ctx_kr(struct ptlrpc_cli_ctx *ctx)
380 struct key *key = ctx2gctx_keyring(ctx)->gck_key;
383 LASSERT(key_get_payload(key, 0) == ctx);
386 down_write(&key->sem);
387 unbind_key_ctx(key, ctx);
394 * given a key, unbind with its coupled ctx, if any.
395 * caller must hold write lock, as well as a ref of the key.
397 static void unbind_key_locked(struct key *key)
399 struct ptlrpc_cli_ctx *ctx = key_get_payload(key, 0);
402 unbind_key_ctx(key, ctx);
406 * unlist a ctx, and unbind from coupled key
408 static void kill_ctx_kr(struct ptlrpc_cli_ctx *ctx)
410 if (ctx_unlist_kr(ctx, 0))
415 * given a key, unlist and unbind with the coupled ctx (if any).
416 * caller must hold write lock, as well as a ref of the key.
418 static void kill_key_locked(struct key *key)
420 struct ptlrpc_cli_ctx *ctx = key_get_payload(key, 0);
422 if (ctx && ctx_unlist_kr(ctx, 0))
423 unbind_key_locked(key);
427 * caller should hold one ref on contexts in freelist.
429 static void dispose_ctx_list_kr(struct hlist_head *freelist)
431 struct hlist_node __maybe_unused *pos, *next;
432 struct ptlrpc_cli_ctx *ctx;
433 struct gss_cli_ctx *gctx;
435 cfs_hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) {
436 hlist_del_init(&ctx->cc_cache);
438 /* reverse ctx: update current seq to buddy svcctx if exist.
439 * ideally this should be done at gss_cli_ctx_finalize(), but
440 * the ctx destroy could be delayed by:
441 * 1) ctx still has reference;
442 * 2) ctx destroy is asynchronous;
443 * and reverse import call inval_all_ctx() require this be done
444 * _immediately_ otherwise newly created reverse ctx might copy
445 * the very old sequence number from svcctx. */
446 gctx = ctx2gctx(ctx);
447 if (!rawobj_empty(&gctx->gc_svc_handle) &&
448 sec_is_reverse(gctx->gc_base.cc_sec)) {
449 gss_svc_upcall_update_sequence(&gctx->gc_svc_handle,
450 (__u32) atomic_read(&gctx->gc_seq));
453 /* we need to wakeup waiting reqs here. the context might
454 * be forced released before upcall finished, then the
455 * late-arrived downcall can't find the ctx even. */
456 sptlrpc_cli_ctx_wakeup(ctx);
464 * lookup a root context directly in a sec, return root ctx with a
465 * reference taken or NULL.
468 struct ptlrpc_cli_ctx * sec_lookup_root_ctx_kr(struct ptlrpc_sec *sec)
470 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
471 struct ptlrpc_cli_ctx *ctx = NULL;
473 spin_lock(&sec->ps_lock);
475 ctx = gsec_kr->gsk_root_ctx;
477 if (ctx == NULL && unlikely(sec_is_reverse(sec))) {
478 struct hlist_node __maybe_unused *node;
479 struct ptlrpc_cli_ctx *tmp;
481 /* reverse ctx, search root ctx in list, choose the one
482 * with shortest expire time, which is most possibly have
483 * an established peer ctx at client side. */
484 cfs_hlist_for_each_entry(tmp, node, &gsec_kr->gsk_clist,
486 if (ctx == NULL || ctx->cc_expire == 0 ||
487 ctx->cc_expire > tmp->cc_expire) {
489 /* promote to be root_ctx */
490 gsec_kr->gsk_root_ctx = ctx;
496 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
497 LASSERT(!hlist_empty(&gsec_kr->gsk_clist));
498 atomic_inc(&ctx->cc_refcount);
501 spin_unlock(&sec->ps_lock);
506 #define RVS_CTX_EXPIRE_NICE (10)
509 void rvs_sec_install_root_ctx_kr(struct ptlrpc_sec *sec,
510 struct ptlrpc_cli_ctx *new_ctx,
513 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
514 struct hlist_node __maybe_unused *hnode;
515 struct ptlrpc_cli_ctx *ctx;
519 LASSERT(sec_is_reverse(sec));
521 spin_lock(&sec->ps_lock);
523 now = ktime_get_real_seconds();
525 /* set all existing ctxs short expiry */
526 cfs_hlist_for_each_entry(ctx, hnode, &gsec_kr->gsk_clist, cc_cache) {
527 if (ctx->cc_expire > now + RVS_CTX_EXPIRE_NICE) {
528 ctx->cc_early_expire = 1;
529 ctx->cc_expire = now + RVS_CTX_EXPIRE_NICE;
533 /* if there's root_ctx there, instead obsolete the current
534 * immediately, we leave it continue operating for a little while.
535 * hopefully when the first backward rpc with newest ctx send out,
536 * the client side already have the peer ctx well established. */
537 ctx_enlist_kr(new_ctx, gsec_kr->gsk_root_ctx ? 0 : 1, 1);
540 bind_key_ctx(key, new_ctx);
542 spin_unlock(&sec->ps_lock);
545 static void construct_key_desc(void *buf, int bufsize,
546 struct ptlrpc_sec *sec, uid_t uid)
548 snprintf(buf, bufsize, "%d@%x", uid, sec->ps_id);
549 ((char *)buf)[bufsize - 1] = '\0';
552 /****************************************
554 ****************************************/
557 struct ptlrpc_sec * gss_sec_create_kr(struct obd_import *imp,
558 struct ptlrpc_svc_ctx *svcctx,
559 struct sptlrpc_flavor *sf)
561 struct gss_sec_keyring *gsec_kr;
564 OBD_ALLOC(gsec_kr, sizeof(*gsec_kr));
568 INIT_HLIST_HEAD(&gsec_kr->gsk_clist);
569 gsec_kr->gsk_root_ctx = NULL;
570 mutex_init(&gsec_kr->gsk_root_uc_lock);
571 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
572 mutex_init(&gsec_kr->gsk_uc_lock);
575 if (gss_sec_create_common(&gsec_kr->gsk_base, &gss_policy_keyring,
579 if (svcctx != NULL &&
580 sec_install_rctx_kr(&gsec_kr->gsk_base.gs_base, svcctx)) {
581 gss_sec_destroy_common(&gsec_kr->gsk_base);
585 RETURN(&gsec_kr->gsk_base.gs_base);
588 OBD_FREE(gsec_kr, sizeof(*gsec_kr));
593 void gss_sec_destroy_kr(struct ptlrpc_sec *sec)
595 struct gss_sec *gsec = sec2gsec(sec);
596 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
598 CDEBUG(D_SEC, "destroy %s@%p\n", sec->ps_policy->sp_name, sec);
600 LASSERT(hlist_empty(&gsec_kr->gsk_clist));
601 LASSERT(gsec_kr->gsk_root_ctx == NULL);
603 gss_sec_destroy_common(gsec);
605 OBD_FREE(gsec_kr, sizeof(*gsec_kr));
608 static inline int user_is_root(struct ptlrpc_sec *sec, struct vfs_cred *vcred)
610 /* except the ROOTONLY flag, treat it as root user only if real uid
611 * is 0, euid/fsuid being 0 are handled as setuid scenarios */
612 if (sec_is_rootonly(sec) || (vcred->vc_uid == 0))
619 * kernel 5.3: commit 0f44e4d976f96c6439da0d6717238efa4b91196e
620 * keys: Move the user and user-session keyrings to the user_namespace
622 * When lookup_user_key is available use the kernel API rather than directly
623 * accessing the uid_keyring and session_keyring via the current process
626 #ifdef HAVE_LOOKUP_USER_KEY
628 /* from Linux security/keys/internal.h: */
629 #ifndef KEY_LOOKUP_FOR_UNLINK
630 #define KEY_LOOKUP_FOR_UNLINK 0x04
633 static struct key *_user_key(key_serial_t id)
638 ref = lookup_user_key(id, KEY_LOOKUP_FOR_UNLINK, 0);
641 return key_ref_to_ptr(ref);
644 static inline struct key *get_user_session_keyring(const struct cred *cred)
646 return _user_key(KEY_SPEC_USER_SESSION_KEYRING);
649 static inline struct key *get_user_keyring(const struct cred *cred)
651 return _user_key(KEY_SPEC_USER_KEYRING);
654 static inline struct key *get_user_session_keyring(const struct cred *cred)
656 return key_get(cred->user->session_keyring);
659 static inline struct key *get_user_keyring(const struct cred *cred)
661 return key_get(cred->user->uid_keyring);
666 * unlink request key from it's ring, which is linked during request_key().
667 * sadly, we have to 'guess' which keyring it's linked to.
669 * FIXME this code is fragile, it depends on how request_key() is implemented.
671 static void request_key_unlink(struct key *key)
673 const struct cred *cred = current_cred();
674 struct key *ring = NULL;
676 switch (cred->jit_keyring) {
677 case KEY_REQKEY_DEFL_DEFAULT:
678 case KEY_REQKEY_DEFL_REQUESTOR_KEYRING:
679 #ifdef HAVE_GET_REQUEST_KEY_AUTH
680 if (cred->request_key_auth) {
681 struct request_key_auth *rka;
682 struct key *authkey = cred->request_key_auth;
684 down_read(&authkey->sem);
685 rka = get_request_key_auth(authkey);
686 if (!test_bit(KEY_FLAG_REVOKED, &authkey->flags))
687 ring = key_get(rka->dest_keyring);
688 up_read(&authkey->sem);
694 case KEY_REQKEY_DEFL_THREAD_KEYRING:
695 ring = key_get(cred->thread_keyring);
699 case KEY_REQKEY_DEFL_PROCESS_KEYRING:
700 ring = key_get(cred->process_keyring);
704 case KEY_REQKEY_DEFL_SESSION_KEYRING:
706 ring = key_get(rcu_dereference(cred->session_keyring));
711 case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
712 ring = get_user_session_keyring(cred);
714 case KEY_REQKEY_DEFL_USER_KEYRING:
715 ring = get_user_keyring(cred);
717 case KEY_REQKEY_DEFL_GROUP_KEYRING:
723 key_unlink(ring, key);
728 struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec,
729 struct vfs_cred *vcred,
730 int create, int remove_dead)
732 struct obd_import *imp = sec->ps_import;
733 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
734 struct ptlrpc_cli_ctx *ctx = NULL;
735 unsigned int is_root = 0, create_new = 0;
740 const char *sec_part_flags = "";
744 LASSERT(imp != NULL);
746 is_root = user_is_root(sec, vcred);
748 /* a little bit optimization for root context */
750 ctx = sec_lookup_root_ctx_kr(sec);
752 * Only lookup directly for REVERSE sec, which should
755 if (ctx || sec_is_reverse(sec))
759 LASSERT(create != 0);
761 /* for root context, obtain lock and check again, this time hold
762 * the root upcall lock, make sure nobody else populated new root
763 * context after last check. */
765 mutex_lock(&gsec_kr->gsk_root_uc_lock);
767 ctx = sec_lookup_root_ctx_kr(sec);
771 /* update reverse handle for root user */
772 sec2gsec(sec)->gs_rvs_hdl = gss_get_next_ctx_index();
774 switch (sec->ps_part) {
776 sec_part_flags = "m";
779 sec_part_flags = "o";
782 sec_part_flags = "rmo";
785 sec_part_flags = "r";
792 switch (SPTLRPC_FLVR_SVC(sec->ps_flvr.sf_rpc)) {
793 case SPTLRPC_SVC_NULL:
796 case SPTLRPC_SVC_AUTH:
799 case SPTLRPC_SVC_INTG:
802 case SPTLRPC_SVC_PRIV:
810 /* in case of setuid, key will be constructed as owner of fsuid/fsgid,
811 * but we do authentication based on real uid/gid. the key permission
812 * bits will be exactly as POS_ALL, so only processes who subscribed
813 * this key could have the access, although the quota might be counted
814 * on others (fsuid/fsgid).
816 * keyring will use fsuid/fsgid as upcall parameters, so we have to
817 * encode real uid/gid into callout info.
820 /* But first we need to make sure the obd type is supported */
821 if (strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_MDC_NAME) &&
822 strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_OSC_NAME) &&
823 strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_MGC_NAME) &&
824 strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_LWP_NAME) &&
825 strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_OSP_NAME)) {
826 CERROR("obd %s is not a supported device\n",
827 imp->imp_obd->obd_name);
828 GOTO(out, ctx = NULL);
831 construct_key_desc(desc, sizeof(desc), sec, vcred->vc_uid);
833 /* callout info format:
834 * secid:mech:uid:gid:sec_flags:svc_flag:svc_type:peer_nid:target_uuid:
837 coinfo_size = sizeof(struct obd_uuid) + MAX_OBD_NAME + 64;
838 OBD_ALLOC(coinfo, coinfo_size);
842 /* Last callout parameter is pid of process whose namespace will be used
843 * for credentials' retrieval.
844 * For user's credentials (in which case sec_part_flags is empty), use
845 * current PID instead of import's reference PID to get reference
847 snprintf(coinfo, coinfo_size, "%d:%s:%u:%u:%s:%c:%d:%#llx:%s:%#llx:%d",
848 sec->ps_id, sec2gsec(sec)->gs_mech->gm_name,
849 vcred->vc_uid, vcred->vc_gid,
850 sec_part_flags, svc_flag, import_to_gss_svc(imp),
851 imp->imp_connection->c_peer.nid, imp->imp_obd->obd_name,
852 imp->imp_connection->c_self,
853 sec_part_flags[0] == '\0' ?
854 current_pid() : imp->imp_sec_refpid);
856 CDEBUG(D_SEC, "requesting key for %s\n", desc);
858 keyring_upcall_lock(gsec_kr);
859 key = request_key(&gss_key_type, desc, coinfo);
860 keyring_upcall_unlock(gsec_kr);
862 OBD_FREE(coinfo, coinfo_size);
865 CERROR("failed request key: %ld\n", PTR_ERR(key));
868 CDEBUG(D_SEC, "obtained key %08x for %s\n", key->serial, desc);
870 /* once payload.data was pointed to a ctx, it never changes until
871 * we de-associate them; but parallel request_key() may return
872 * a key with payload.data == NULL at the same time. so we still
873 * need wirtelock of key->sem to serialize them. */
874 down_write(&key->sem);
876 ctx = key_get_payload(key, 0);
878 LASSERT(atomic_read(&ctx->cc_refcount) >= 1);
879 LASSERT(ctx2gctx_keyring(ctx)->gck_key == key);
880 LASSERT(ll_read_key_usage(key) >= 2);
882 /* simply take a ref and return. it's upper layer's
883 * responsibility to detect & replace dead ctx. */
884 atomic_inc(&ctx->cc_refcount);
886 /* pre initialization with a cli_ctx. this can't be done in
887 * key_instantiate() because we'v no enough information
889 ctx = ctx_create_kr(sec, vcred);
891 ctx_enlist_kr(ctx, is_root, 0);
892 bind_key_ctx(key, ctx);
894 ctx_start_timer_kr(ctx, KEYRING_UPCALL_TIMEOUT);
896 CDEBUG(D_SEC, "installed key %p <-> ctx %p (sec %p)\n",
899 /* we'd prefer to call key_revoke(), but we more like
900 * to revoke it within this key->sem locked period. */
901 key_revoke_locked(key);
909 if (is_root && create_new)
910 request_key_unlink(key);
915 mutex_unlock(&gsec_kr->gsk_root_uc_lock);
920 void gss_sec_release_ctx_kr(struct ptlrpc_sec *sec,
921 struct ptlrpc_cli_ctx *ctx,
924 LASSERT(atomic_read(&sec->ps_refcount) > 0);
925 LASSERT(atomic_read(&ctx->cc_refcount) == 0);
926 ctx_release_kr(ctx, sync);
930 * flush context of normal user, we must resort to keyring itself to find out
931 * contexts which belong to me.
933 * Note here we suppose only to flush _my_ context, the "uid" will
934 * be ignored in the search.
937 void flush_user_ctx_cache_kr(struct ptlrpc_sec *sec,
939 int grace, int force)
944 /* nothing to do for reverse or rootonly sec */
945 if (sec_is_reverse(sec) || sec_is_rootonly(sec))
948 construct_key_desc(desc, sizeof(desc), sec, uid);
950 /* there should be only one valid key, but we put it in the
951 * loop in case of any weird cases */
953 key = request_key(&gss_key_type, desc, NULL);
955 CDEBUG(D_SEC, "No more key found for current user\n");
959 down_write(&key->sem);
961 kill_key_locked(key);
963 /* kill_key_locked() should usually revoke the key, but we
964 * revoke it again to make sure, e.g. some case the key may
965 * not well coupled with a context. */
966 key_revoke_locked(key);
970 request_key_unlink(key);
977 * flush context of root or all, we iterate through the list.
980 void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec, uid_t uid, int grace,
983 struct gss_sec_keyring *gsec_kr;
984 struct hlist_head freelist = HLIST_HEAD_INIT;
985 struct hlist_node __maybe_unused *pos, *next;
986 struct ptlrpc_cli_ctx *ctx;
989 gsec_kr = sec2gsec_keyring(sec);
991 spin_lock(&sec->ps_lock);
992 cfs_hlist_for_each_entry_safe(ctx, pos, next,
993 &gsec_kr->gsk_clist, cc_cache) {
994 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
996 if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
999 /* at this moment there's at least 2 base reference:
1000 * key association and in-list. */
1001 if (atomic_read(&ctx->cc_refcount) > 2) {
1004 CWARN("flush busy ctx %p(%u->%s, extra ref %d)\n",
1005 ctx, ctx->cc_vcred.vc_uid,
1006 sec2target_str(ctx->cc_sec),
1007 atomic_read(&ctx->cc_refcount) - 2);
1010 set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
1012 clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
1014 atomic_inc(&ctx->cc_refcount);
1016 if (ctx_unlist_kr(ctx, 1)) {
1017 hlist_add_head(&ctx->cc_cache, &freelist);
1019 LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
1020 atomic_dec(&ctx->cc_refcount);
1023 spin_unlock(&sec->ps_lock);
1025 dispose_ctx_list_kr(&freelist);
1030 int gss_sec_flush_ctx_cache_kr(struct ptlrpc_sec *sec,
1031 uid_t uid, int grace, int force)
1035 CDEBUG(D_SEC, "sec %p(%d, nctx %d), uid %d, grace %d, force %d\n",
1036 sec, atomic_read(&sec->ps_refcount),
1037 atomic_read(&sec->ps_nctx),
1040 if (uid != -1 && uid != 0)
1041 flush_user_ctx_cache_kr(sec, uid, grace, force);
1043 flush_spec_ctx_cache_kr(sec, uid, grace, force);
1049 void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec)
1051 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
1052 struct hlist_head freelist = HLIST_HEAD_INIT;
1053 struct hlist_node __maybe_unused *pos, *next;
1054 struct ptlrpc_cli_ctx *ctx;
1057 CWARN("running gc\n");
1059 spin_lock(&sec->ps_lock);
1060 cfs_hlist_for_each_entry_safe(ctx, pos, next,
1061 &gsec_kr->gsk_clist, cc_cache) {
1062 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1064 atomic_inc(&ctx->cc_refcount);
1066 if (cli_ctx_check_death(ctx) && ctx_unlist_kr(ctx, 1)) {
1067 hlist_add_head(&ctx->cc_cache, &freelist);
1068 CWARN("unhashed ctx %p\n", ctx);
1070 LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
1071 atomic_dec(&ctx->cc_refcount);
1074 spin_unlock(&sec->ps_lock);
1076 dispose_ctx_list_kr(&freelist);
1081 int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq)
1083 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
1084 struct hlist_node __maybe_unused *pos, *next;
1085 struct ptlrpc_cli_ctx *ctx;
1086 struct gss_cli_ctx *gctx;
1087 time64_t now = ktime_get_real_seconds();
1090 spin_lock(&sec->ps_lock);
1091 cfs_hlist_for_each_entry_safe(ctx, pos, next,
1092 &gsec_kr->gsk_clist, cc_cache) {
1097 gctx = ctx2gctx(ctx);
1098 key = ctx2gctx_keyring(ctx)->gck_key;
1100 gss_cli_ctx_flags2str(ctx->cc_flags,
1101 flags_str, sizeof(flags_str));
1103 if (gctx->gc_mechctx)
1104 lgss_display(gctx->gc_mechctx, mech, sizeof(mech));
1106 snprintf(mech, sizeof(mech), "N/A");
1107 mech[sizeof(mech) - 1] = '\0';
1110 "%p: uid %u, ref %d, expire %lld(%+lld), fl %s, seq %d, win %u, key %08x(ref %d), hdl %#llx:%#llx, mech: %s\n",
1111 ctx, ctx->cc_vcred.vc_uid,
1112 atomic_read(&ctx->cc_refcount),
1114 ctx->cc_expire ? ctx->cc_expire - now : 0,
1116 atomic_read(&gctx->gc_seq),
1118 key ? key->serial : 0,
1119 key ? ll_read_key_usage(key) : 0,
1120 gss_handle_to_u64(&gctx->gc_handle),
1121 gss_handle_to_u64(&gctx->gc_svc_handle),
1124 spin_unlock(&sec->ps_lock);
1129 /****************************************
1131 ****************************************/
1134 int gss_cli_ctx_refresh_kr(struct ptlrpc_cli_ctx *ctx)
1136 /* upcall is already on the way */
1141 int gss_cli_ctx_validate_kr(struct ptlrpc_cli_ctx *ctx)
1143 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1144 LASSERT(ctx->cc_sec);
1146 if (cli_ctx_check_death(ctx)) {
1151 if (cli_ctx_is_ready(ctx))
1157 void gss_cli_ctx_die_kr(struct ptlrpc_cli_ctx *ctx, int grace)
1159 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1160 LASSERT(ctx->cc_sec);
1162 cli_ctx_expire(ctx);
1166 /****************************************
1167 * (reverse) service *
1168 ****************************************/
1171 * reverse context could have nothing to do with keyrings. here we still keep
1172 * the version which bind to a key, for future reference.
1174 #define HAVE_REVERSE_CTX_NOKEY
1176 #ifdef HAVE_REVERSE_CTX_NOKEY
1179 int sec_install_rctx_kr(struct ptlrpc_sec *sec,
1180 struct ptlrpc_svc_ctx *svc_ctx)
1182 struct ptlrpc_cli_ctx *cli_ctx;
1183 struct vfs_cred vcred = { .vc_uid = 0 };
1189 cli_ctx = ctx_create_kr(sec, &vcred);
1190 if (cli_ctx == NULL)
1193 rc = gss_copy_rvc_cli_ctx(cli_ctx, svc_ctx);
1195 CERROR("failed copy reverse cli ctx: %d\n", rc);
1197 ctx_put_kr(cli_ctx, 1);
1201 rvs_sec_install_root_ctx_kr(sec, cli_ctx, NULL);
1203 ctx_put_kr(cli_ctx, 1);
1208 #else /* ! HAVE_REVERSE_CTX_NOKEY */
1211 int sec_install_rctx_kr(struct ptlrpc_sec *sec,
1212 struct ptlrpc_svc_ctx *svc_ctx)
1214 struct ptlrpc_cli_ctx *cli_ctx = NULL;
1216 struct vfs_cred vcred = { .vc_uid = 0 };
1224 construct_key_desc(desc, sizeof(desc), sec, 0);
1226 key = key_alloc(&gss_key_type, desc, 0, 0,
1227 KEY_POS_ALL | KEY_USR_ALL, 1);
1229 CERROR("failed to alloc key: %ld\n", PTR_ERR(key));
1230 return PTR_ERR(key);
1233 rc = key_instantiate_and_link(key, NULL, 0, NULL, NULL);
1235 CERROR("failed to instantiate key: %d\n", rc);
1239 down_write(&key->sem);
1241 LASSERT(!key_get_payload(key, 0));
1243 cli_ctx = ctx_create_kr(sec, &vcred);
1244 if (cli_ctx == NULL) {
1249 rc = gss_copy_rvc_cli_ctx(cli_ctx, svc_ctx);
1251 CERROR("failed copy reverse cli ctx: %d\n", rc);
1255 rvs_sec_install_root_ctx_kr(sec, cli_ctx, key);
1257 ctx_put_kr(cli_ctx, 1);
1258 up_write(&key->sem);
1267 ctx_put_kr(cli_ctx, 1);
1269 up_write(&key->sem);
1275 #endif /* HAVE_REVERSE_CTX_NOKEY */
1277 /****************************************
1279 ****************************************/
1282 int gss_svc_accept_kr(struct ptlrpc_request *req)
1284 return gss_svc_accept(&gss_policy_keyring, req);
1288 int gss_svc_install_rctx_kr(struct obd_import *imp,
1289 struct ptlrpc_svc_ctx *svc_ctx)
1291 struct ptlrpc_sec *sec;
1294 sec = sptlrpc_import_sec_ref(imp);
1297 rc = sec_install_rctx_kr(sec, svc_ctx);
1298 sptlrpc_sec_put(sec);
1303 /****************************************
1305 ****************************************/
1308 #ifdef HAVE_KEY_TYPE_INSTANTIATE_2ARGS
1309 int gss_kt_instantiate(struct key *key, struct key_preparsed_payload *prep)
1311 const void *data = prep->data;
1312 size_t datalen = prep->datalen;
1314 int gss_kt_instantiate(struct key *key, const void *data, size_t datalen)
1320 if (data != NULL || datalen != 0) {
1321 CERROR("invalid: data %p, len %lu\n", data, (long)datalen);
1325 if (key_get_payload(key, 0)) {
1326 CERROR("key already have payload\n");
1330 /* link the key to session keyring, so following context negotiation
1331 * rpc fired from user space could find this key. This will be unlinked
1332 * automatically when upcall processes die.
1334 * we can't do this through keyctl from userspace, because the upcall
1335 * might be neither possessor nor owner of the key (setuid).
1337 * the session keyring is created upon upcall, and don't change all
1338 * the way until upcall finished, so rcu lock is not needed here.
1340 LASSERT(current_cred()->session_keyring);
1343 rc = key_link(current_cred()->session_keyring, key);
1346 CERROR("failed to link key %08x to keyring %08x: %d\n",
1348 current_cred()->session_keyring->serial, rc);
1352 CDEBUG(D_SEC, "key %p instantiated, ctx %p\n", key,
1353 key_get_payload(key, 0));
1358 * called with key semaphore write locked. it means we can operate
1359 * on the context without fear of loosing refcount.
1362 #ifdef HAVE_KEY_TYPE_INSTANTIATE_2ARGS
1363 int gss_kt_update(struct key *key, struct key_preparsed_payload *prep)
1365 const void *data = prep->data;
1366 __u32 datalen32 = (__u32) prep->datalen;
1368 int gss_kt_update(struct key *key, const void *data, size_t datalen)
1370 __u32 datalen32 = (__u32) datalen;
1372 struct ptlrpc_cli_ctx *ctx = key_get_payload(key, 0);
1373 struct gss_cli_ctx *gctx;
1374 rawobj_t tmpobj = RAWOBJ_EMPTY;
1378 if (data == NULL || datalen32 == 0) {
1379 CWARN("invalid: data %p, len %lu\n", data, (long)datalen32);
1383 /* if upcall finished negotiation too fast (mostly likely because
1384 * of local error happened) and call kt_update(), the ctx
1385 * might be still NULL. but the key will finally be associate
1386 * with a context, or be revoked. if key status is fine, return
1387 * -EAGAIN to allow userspace sleep a while and call again. */
1389 CDEBUG(D_SEC, "update too soon: key %p(%x) flags %lx\n",
1390 key, key->serial, key->flags);
1392 rc = key_validate(key);
1399 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1400 LASSERT(ctx->cc_sec);
1402 ctx_clear_timer_kr(ctx);
1404 /* don't proceed if already refreshed */
1405 if (cli_ctx_is_refreshed(ctx)) {
1406 CWARN("ctx already done refresh\n");
1410 sptlrpc_cli_ctx_get(ctx);
1411 gctx = ctx2gctx(ctx);
1413 rc = buffer_extract_bytes(&data, &datalen32, &gctx->gc_win,
1414 sizeof(gctx->gc_win));
1416 CERROR("failed extract seq_win\n");
1420 if (gctx->gc_win == 0) {
1421 __u32 nego_rpc_err, nego_gss_err;
1423 rc = buffer_extract_bytes(&data, &datalen32, &nego_rpc_err,
1424 sizeof(nego_rpc_err));
1426 CERROR("cannot extract RPC: rc = %d\n", rc);
1430 rc = buffer_extract_bytes(&data, &datalen32, &nego_gss_err,
1431 sizeof(nego_gss_err));
1433 CERROR("failed to extract gss rc = %d\n", rc);
1437 CERROR("negotiation: rpc err %d, gss err %x\n",
1438 nego_rpc_err, nego_gss_err);
1440 rc = nego_rpc_err ? nego_rpc_err : -EACCES;
1442 rc = rawobj_extract_local_alloc(&gctx->gc_handle,
1443 (__u32 **) &data, &datalen32);
1445 CERROR("failed extract handle\n");
1449 rc = rawobj_extract_local(&tmpobj,
1450 (__u32 **) &data, &datalen32);
1452 CERROR("failed extract mech\n");
1456 rc = lgss_import_sec_context(&tmpobj,
1457 sec2gsec(ctx->cc_sec)->gs_mech,
1459 if (rc != GSS_S_COMPLETE)
1460 CERROR("failed import context\n");
1465 /* we don't care what current status of this ctx, even someone else
1466 * is operating on the ctx at the same time. we just add up our own
1469 gss_cli_ctx_uptodate(gctx);
1471 /* this will also revoke the key. has to be done before
1472 * wakeup waiters otherwise they can find the stale key */
1473 kill_key_locked(key);
1475 cli_ctx_expire(ctx);
1477 if (rc != -ERESTART)
1478 set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
1481 /* let user space think it's a success */
1482 sptlrpc_cli_ctx_put(ctx, 1);
1486 #ifndef HAVE_KEY_MATCH_DATA
1488 gss_kt_match(const struct key *key, const void *desc)
1490 return strcmp(key->description, (const char *) desc) == 0 &&
1491 !test_bit(KEY_FLAG_REVOKED, &key->flags);
1493 #else /* ! HAVE_KEY_MATCH_DATA */
1495 gss_kt_match(const struct key *key, const struct key_match_data *match_data)
1497 const char *desc = match_data->raw_data;
1499 return strcmp(key->description, desc) == 0 &&
1500 !test_bit(KEY_FLAG_REVOKED, &key->flags);
1504 * Preparse the match criterion.
1506 static int gss_kt_match_preparse(struct key_match_data *match_data)
1508 match_data->lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT;
1509 match_data->cmp = gss_kt_match;
1512 #endif /* HAVE_KEY_MATCH_DATA */
1515 void gss_kt_destroy(struct key *key)
1518 LASSERT(!key_get_payload(key, 0));
1519 CDEBUG(D_SEC, "destroy key %p\n", key);
1524 void gss_kt_describe(const struct key *key, struct seq_file *s)
1526 if (key->description == NULL)
1527 seq_puts(s, "[null]");
1529 seq_puts(s, key->description);
1532 static struct key_type gss_key_type =
1536 .instantiate = gss_kt_instantiate,
1537 .update = gss_kt_update,
1538 #ifdef HAVE_KEY_MATCH_DATA
1539 .match_preparse = gss_kt_match_preparse,
1541 .match = gss_kt_match,
1543 .destroy = gss_kt_destroy,
1544 .describe = gss_kt_describe,
1547 /****************************************
1548 * lustre gss keyring policy *
1549 ****************************************/
1551 static struct ptlrpc_ctx_ops gss_keyring_ctxops = {
1552 .match = gss_cli_ctx_match,
1553 .refresh = gss_cli_ctx_refresh_kr,
1554 .validate = gss_cli_ctx_validate_kr,
1555 .die = gss_cli_ctx_die_kr,
1556 .sign = gss_cli_ctx_sign,
1557 .verify = gss_cli_ctx_verify,
1558 .seal = gss_cli_ctx_seal,
1559 .unseal = gss_cli_ctx_unseal,
1560 .wrap_bulk = gss_cli_ctx_wrap_bulk,
1561 .unwrap_bulk = gss_cli_ctx_unwrap_bulk,
1564 static struct ptlrpc_sec_cops gss_sec_keyring_cops = {
1565 .create_sec = gss_sec_create_kr,
1566 .destroy_sec = gss_sec_destroy_kr,
1567 .kill_sec = gss_sec_kill,
1568 .lookup_ctx = gss_sec_lookup_ctx_kr,
1569 .release_ctx = gss_sec_release_ctx_kr,
1570 .flush_ctx_cache = gss_sec_flush_ctx_cache_kr,
1571 .gc_ctx = gss_sec_gc_ctx_kr,
1572 .install_rctx = gss_sec_install_rctx,
1573 .alloc_reqbuf = gss_alloc_reqbuf,
1574 .free_reqbuf = gss_free_reqbuf,
1575 .alloc_repbuf = gss_alloc_repbuf,
1576 .free_repbuf = gss_free_repbuf,
1577 .enlarge_reqbuf = gss_enlarge_reqbuf,
1578 .display = gss_sec_display_kr,
1581 static struct ptlrpc_sec_sops gss_sec_keyring_sops = {
1582 .accept = gss_svc_accept_kr,
1583 .invalidate_ctx = gss_svc_invalidate_ctx,
1584 .alloc_rs = gss_svc_alloc_rs,
1585 .authorize = gss_svc_authorize,
1586 .free_rs = gss_svc_free_rs,
1587 .free_ctx = gss_svc_free_ctx,
1588 .prep_bulk = gss_svc_prep_bulk,
1589 .unwrap_bulk = gss_svc_unwrap_bulk,
1590 .wrap_bulk = gss_svc_wrap_bulk,
1591 .install_rctx = gss_svc_install_rctx_kr,
1594 static struct ptlrpc_sec_policy gss_policy_keyring = {
1595 .sp_owner = THIS_MODULE,
1596 .sp_name = "gss.keyring",
1597 .sp_policy = SPTLRPC_POLICY_GSS,
1598 .sp_cops = &gss_sec_keyring_cops,
1599 .sp_sops = &gss_sec_keyring_sops,
1603 int __init gss_init_keyring(void)
1607 rc = register_key_type(&gss_key_type);
1609 CERROR("failed to register keyring type: %d\n", rc);
1613 rc = sptlrpc_register_policy(&gss_policy_keyring);
1615 unregister_key_type(&gss_key_type);
1622 void __exit gss_exit_keyring(void)
1624 unregister_key_type(&gss_key_type);
1625 sptlrpc_unregister_policy(&gss_policy_keyring);