1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
5 * Use is subject to license terms.
7 * Copyright (c) 2012, 2014, Intel Corporation.
11 * This file is part of Lustre, http://www.lustre.org/
13 * Author: Eric Mei <ericm@clusterfs.com>
16 #define DEBUG_SUBSYSTEM S_SEC
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/dcache.h>
22 #include <linux/crypto.h>
23 #include <linux/key.h>
24 #include <linux/keyctl.h>
25 #include <linux/key-type.h>
26 #include <linux/mutex.h>
27 #include <linux/list.h>
28 #include <asm/atomic.h>
31 #include <obd_class.h>
32 #include <obd_support.h>
33 #include <uapi/linux/lustre/lustre_idl.h>
34 #include <lustre_sec.h>
35 #include <lustre_net.h>
36 #include <lustre_import.h>
39 #include "gss_internal.h"
42 #ifdef HAVE_GET_REQUEST_KEY_AUTH
43 #include <keys/request_key_auth-type.h>
46 static struct ptlrpc_sec_policy gss_policy_keyring;
47 static struct ptlrpc_ctx_ops gss_keyring_ctxops;
48 static struct key_type gss_key_type;
50 static int sec_install_rctx_kr(struct ptlrpc_sec *sec,
51 struct ptlrpc_svc_ctx *svc_ctx);
52 static void request_key_unlink(struct key *key, bool fullsearch);
55 * the timeout is only for the case that upcall child process die abnormally.
56 * in any other cases it should finally update kernel key.
58 * FIXME we'd better to incorporate the client & server side upcall timeouts
59 * into the framework of Adaptive Timeouts, but we need to figure out how to
60 * make sure that kernel knows the upcall processes is in-progress or died
63 #define KEYRING_UPCALL_TIMEOUT (obd_timeout + obd_timeout)
65 /* Check caller's namespace in gss_keyring upcall */
66 unsigned int gss_check_upcall_ns = 1;
68 /****************************************
70 ****************************************/
72 static inline void keyring_upcall_lock(struct gss_sec_keyring *gsec_kr)
74 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
75 mutex_lock(&gsec_kr->gsk_uc_lock);
79 static inline void keyring_upcall_unlock(struct gss_sec_keyring *gsec_kr)
81 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
82 mutex_unlock(&gsec_kr->gsk_uc_lock);
86 static inline void key_invalidate_locked(struct key *key)
88 set_bit(KEY_FLAG_INVALIDATED, &key->flags);
91 static void ctx_upcall_timeout_kr(cfs_timer_cb_arg_t data)
93 struct gss_cli_ctx_keyring *gctx_kr = cfs_from_timer(gctx_kr,
95 struct ptlrpc_cli_ctx *ctx = &(gctx_kr->gck_base.gc_base);
96 struct obd_import *imp = ctx->cc_sec->ps_import;
97 struct key *key = gctx_kr->gck_key;
101 "%s: GSS context (%p) negotiation timeout, invalidating key (%p)\n",
102 imp->imp_obd->obd_name, ctx, key);
105 "%s: GSS context (%p) negotiation timeout, ignoring already unlinked key\n",
106 imp->imp_obd->obd_name, ctx);
110 key_invalidate_locked(key);
113 static void ctx_start_timer_kr(struct ptlrpc_cli_ctx *ctx, time64_t timeout)
115 struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
116 struct timer_list *timer = &gctx_kr->gck_timer;
120 CDEBUG(D_SEC, "ctx %p: start timer %llds\n", ctx, timeout);
122 cfs_timer_setup(timer, ctx_upcall_timeout_kr,
123 (unsigned long)gctx_kr, 0);
124 timer->expires = cfs_time_seconds(timeout) + jiffies;
129 * caller should make sure no race with other threads
132 void ctx_clear_timer_kr(struct ptlrpc_cli_ctx *ctx)
134 struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
135 struct timer_list *timer = &gctx_kr->gck_timer;
137 CDEBUG(D_SEC, "ctx %p, key %p\n", ctx, gctx_kr->gck_key);
139 timer_delete_sync(timer);
143 struct ptlrpc_cli_ctx *ctx_create_kr(struct ptlrpc_sec *sec,
144 struct vfs_cred *vcred)
146 struct ptlrpc_cli_ctx *ctx;
147 struct gss_cli_ctx_keyring *gctx_kr;
149 OBD_ALLOC_PTR(gctx_kr);
153 cfs_timer_setup(&gctx_kr->gck_timer, NULL, 0, 0);
155 ctx = &gctx_kr->gck_base.gc_base;
157 if (gss_cli_ctx_init_common(sec, ctx, &gss_keyring_ctxops, vcred)) {
158 OBD_FREE_PTR(gctx_kr);
162 ctx->cc_expire = ktime_get_real_seconds() + KEYRING_UPCALL_TIMEOUT;
163 clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
164 atomic_inc(&ctx->cc_refcount); /* for the caller */
169 static void ctx_destroy_kr(struct ptlrpc_cli_ctx *ctx)
171 struct ptlrpc_sec *sec = ctx->cc_sec;
172 struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
174 CDEBUG(D_SEC, "destroying ctx %p\n", ctx);
176 /* at this time the association with key has been broken. */
178 LASSERT(atomic_read(&sec->ps_refcount) > 0);
179 LASSERT(atomic_read(&sec->ps_nctx) > 0);
180 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
181 LASSERT(gctx_kr->gck_key == NULL);
183 ctx_clear_timer_kr(ctx);
185 if (gss_cli_ctx_fini_common(sec, ctx))
188 OBD_FREE_PTR(gctx_kr);
190 atomic_dec(&sec->ps_nctx);
191 sptlrpc_sec_put(sec);
194 static void ctx_release_kr(struct ptlrpc_cli_ctx *ctx, int sync)
199 atomic_inc(&ctx->cc_refcount);
200 sptlrpc_gc_add_ctx(ctx);
204 static void ctx_put_kr(struct ptlrpc_cli_ctx *ctx, int sync)
206 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
208 if (atomic_dec_and_test(&ctx->cc_refcount))
209 ctx_release_kr(ctx, sync);
213 * key <-> ctx association and rules:
214 * - ctx might not bind with any key
215 * - key/ctx binding is protected by key semaphore (if the key present)
216 * - key and ctx each take a reference of the other
217 * - ctx enlist/unlist is protected by ctx spinlock
218 * - never enlist a ctx after it's been unlisted
219 * - whoever do enlist should also do bind, lock key before enlist:
220 * - lock key -> lock ctx -> enlist -> unlock ctx -> bind -> unlock key
221 * - whoever do unlist should also do unbind:
222 * - lock key -> lock ctx -> unlist -> unlock ctx -> unbind -> unlock key
223 * - lock ctx -> unlist -> unlock ctx -> lock key -> unbind -> unlock key
226 static inline void spin_lock_if(spinlock_t *lock, int condition)
232 static inline void spin_unlock_if(spinlock_t *lock, int condition)
238 static void ctx_enlist_kr(struct ptlrpc_cli_ctx *ctx, int is_root, int locked)
240 struct ptlrpc_sec *sec = ctx->cc_sec;
241 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
243 LASSERT(!test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
244 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
246 spin_lock_if(&sec->ps_lock, !locked);
248 atomic_inc(&ctx->cc_refcount);
249 set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
250 hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
252 gsec_kr->gsk_root_ctx = ctx;
254 spin_unlock_if(&sec->ps_lock, !locked);
258 * Note after this get called, caller should not access ctx again because
259 * it might have been freed, unless caller hold at least one refcount of
262 * return non-zero if we indeed unlist this ctx.
264 static int ctx_unlist_kr(struct ptlrpc_cli_ctx *ctx, int locked)
266 struct ptlrpc_sec *sec = ctx->cc_sec;
267 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
269 /* if hashed bit has gone, leave the job to somebody who is doing it */
270 if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
273 /* drop ref inside spin lock to prevent race with other operations */
274 spin_lock_if(&sec->ps_lock, !locked);
276 if (gsec_kr->gsk_root_ctx == ctx)
277 gsec_kr->gsk_root_ctx = NULL;
278 hlist_del_init(&ctx->cc_cache);
279 atomic_dec(&ctx->cc_refcount);
281 spin_unlock_if(&sec->ps_lock, !locked);
287 * Get specific payload. Newer kernels support 4 slots.
290 key_get_payload(struct key *key, unsigned int index)
292 void *key_ptr = NULL;
294 #ifdef HAVE_KEY_PAYLOAD_DATA_ARRAY
295 key_ptr = key->payload.data[index];
298 key_ptr = key->payload.data;
304 * Set specific payload. Newer kernels support 4 slots.
306 static int key_set_payload(struct key *key, unsigned int index,
307 struct ptlrpc_cli_ctx *ctx)
311 #ifdef HAVE_KEY_PAYLOAD_DATA_ARRAY
313 key->payload.data[index] = ctx;
316 key->payload.data = ctx;
324 * bind a key with a ctx together.
325 * caller must hold write lock of the key, as well as ref on key & ctx.
327 static void bind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
329 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
330 LASSERT(ll_read_key_usage(key) > 0);
331 LASSERT(ctx2gctx_keyring(ctx)->gck_key == NULL);
332 LASSERT(!key_get_payload(key, 0));
334 /* at this time context may or may not in list. */
336 atomic_inc(&ctx->cc_refcount);
337 ctx2gctx_keyring(ctx)->gck_key = key;
338 LASSERT(!key_set_payload(key, 0, ctx));
342 * unbind a key and a ctx.
343 * caller must hold write lock, as well as a ref of the key.
345 static void unbind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
347 /* give up on invalidated or empty key,
348 * someone else already took care of it
350 if (test_bit(KEY_FLAG_INVALIDATED, &key->flags) ||
351 key_get_payload(key, 0) != ctx) {
352 CDEBUG(D_SEC, "key %08x already handled\n", key->serial);
356 /* must invalidate the key, or others may find it during lookup */
357 key_invalidate_locked(key);
358 request_key_unlink(key, false);
360 key_set_payload(key, 0, NULL);
361 ctx2gctx_keyring(ctx)->gck_key = NULL;
363 /* once ctx get split from key, the timer is meaningless */
364 ctx_clear_timer_kr(ctx);
371 * given a ctx, unbind with its coupled key, if any.
372 * unbind could only be called once, so we don't worry the key be released
375 static void unbind_ctx_kr(struct ptlrpc_cli_ctx *ctx)
377 struct key *key = ctx2gctx_keyring(ctx)->gck_key;
381 down_write(&key->sem);
382 unbind_key_ctx(key, ctx);
389 * given a key, unbind with its coupled ctx, if any.
390 * caller must hold write lock, as well as a ref of the key.
392 static void unbind_key_locked(struct key *key)
394 struct ptlrpc_cli_ctx *ctx = key_get_payload(key, 0);
397 unbind_key_ctx(key, ctx);
401 * unlist a ctx, and unbind from coupled key
403 static void kill_ctx_kr(struct ptlrpc_cli_ctx *ctx)
405 if (ctx_unlist_kr(ctx, 0))
410 * given a key, unlist and unbind with the coupled ctx (if any).
411 * caller must hold write lock, as well as a ref of the key.
413 static void kill_key_locked(struct key *key)
415 struct ptlrpc_cli_ctx *ctx = key_get_payload(key, 0);
417 if (ctx && ctx_unlist_kr(ctx, 0))
418 unbind_key_locked(key);
422 * caller should hold one ref on contexts in freelist.
424 static void dispose_ctx_list_kr(struct hlist_head *freelist)
426 struct hlist_node *next;
427 struct ptlrpc_cli_ctx *ctx;
428 struct gss_cli_ctx *gctx;
430 hlist_for_each_entry_safe(ctx, next, freelist, cc_cache) {
431 hlist_del_init(&ctx->cc_cache);
433 /* reverse ctx: update current seq to buddy svcctx if exist.
434 * ideally this should be done at gss_cli_ctx_finalize(), but
435 * the ctx destroy could be delayed by:
436 * 1) ctx still has reference;
437 * 2) ctx destroy is asynchronous;
438 * and reverse import call inval_all_ctx() require this be done
439 * _immediately_ otherwise newly created reverse ctx might copy
440 * the very old sequence number from svcctx. */
441 gctx = ctx2gctx(ctx);
442 if (!rawobj_empty(&gctx->gc_svc_handle) &&
443 sec_is_reverse(gctx->gc_base.cc_sec)) {
444 gss_svc_upcall_update_sequence(&gctx->gc_svc_handle,
445 (__u32) atomic_read(&gctx->gc_seq));
448 /* we need to wakeup waiting reqs here. the context might
449 * be forced released before upcall finished, then the
450 * late-arrived downcall can't find the ctx even. */
451 sptlrpc_cli_ctx_wakeup(ctx);
459 * lookup a root context directly in a sec, return root ctx with a
460 * reference taken or NULL.
463 struct ptlrpc_cli_ctx * sec_lookup_root_ctx_kr(struct ptlrpc_sec *sec)
465 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
466 struct ptlrpc_cli_ctx *ctx = NULL;
467 time64_t now = ktime_get_real_seconds();
469 spin_lock(&sec->ps_lock);
471 ctx = gsec_kr->gsk_root_ctx;
473 /* Need to find valid rev ctx if we do not have one yet,
474 * or if it is expired.
476 if (unlikely(sec_is_reverse(sec)) &&
477 (ctx == NULL || ctx->cc_expire < now)) {
478 struct ptlrpc_cli_ctx *tmp;
480 /* For reverse context, browse list and pick the one with
481 * shortest expire time and that has not expired yet.
482 * This one is most likely to have an established peer context
485 hlist_for_each_entry(tmp, &gsec_kr->gsk_clist, cc_cache) {
486 if (ctx == NULL || ctx->cc_expire == 0 ||
487 (tmp->cc_expire > now &&
488 tmp->cc_expire < ctx->cc_expire) ||
489 (ctx->cc_expire < now &&
490 tmp->cc_expire > ctx->cc_expire)) {
492 /* promote to be root_ctx */
493 gsec_kr->gsk_root_ctx = ctx;
499 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
500 LASSERT(!hlist_empty(&gsec_kr->gsk_clist));
501 atomic_inc(&ctx->cc_refcount);
504 spin_unlock(&sec->ps_lock);
509 #define RVS_CTX_EXPIRE_NICE (10)
512 void rvs_sec_install_root_ctx_kr(struct ptlrpc_sec *sec,
513 struct ptlrpc_cli_ctx *new_ctx,
516 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
517 struct ptlrpc_cli_ctx *ctx;
518 struct hlist_node *next;
522 LASSERT(sec_is_reverse(sec));
524 spin_lock(&sec->ps_lock);
526 now = ktime_get_real_seconds();
528 /* set all existing ctxs short expiry */
529 hlist_for_each_entry_safe(ctx, next, &gsec_kr->gsk_clist, cc_cache) {
530 if (ctx->cc_expire > now + RVS_CTX_EXPIRE_NICE) {
531 ctx->cc_early_expire = 1;
532 ctx->cc_expire = now + RVS_CTX_EXPIRE_NICE;
533 } else if (ctx != gsec_kr->gsk_root_ctx &&
534 ctx->cc_expire < now) {
535 /* unlist expired context to remove it from gsk_clist */
536 if (ctx_unlist_kr(ctx, 1)) {
537 /* release unlisted ctx to destroy it */
538 set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
539 ctx_release_kr(ctx, 1);
544 /* If there's root_ctx there, instead obsolete the current
545 * immediately, we leave it continue operating for a little while.
546 * hopefully when the first backward rpc with newest ctx send out,
547 * the client side already have the peer ctx well established.
549 ctx_enlist_kr(new_ctx, gsec_kr->gsk_root_ctx ? 0 : 1, 1);
552 bind_key_ctx(key, new_ctx);
554 spin_unlock(&sec->ps_lock);
557 static void construct_key_desc(void *buf, int bufsize,
558 struct ptlrpc_sec *sec, uid_t uid)
560 snprintf(buf, bufsize, "%d@%x", uid, sec->ps_id);
561 ((char *)buf)[bufsize - 1] = '\0';
564 /****************************************
566 ****************************************/
569 struct ptlrpc_sec * gss_sec_create_kr(struct obd_import *imp,
570 struct ptlrpc_svc_ctx *svcctx,
571 struct sptlrpc_flavor *sf)
573 struct gss_sec_keyring *gsec_kr;
576 OBD_ALLOC(gsec_kr, sizeof(*gsec_kr));
580 INIT_HLIST_HEAD(&gsec_kr->gsk_clist);
581 gsec_kr->gsk_root_ctx = NULL;
582 mutex_init(&gsec_kr->gsk_root_uc_lock);
583 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
584 mutex_init(&gsec_kr->gsk_uc_lock);
587 if (gss_sec_create_common(&gsec_kr->gsk_base, &gss_policy_keyring,
591 if (svcctx != NULL &&
592 sec_install_rctx_kr(&gsec_kr->gsk_base.gs_base, svcctx)) {
593 gss_sec_destroy_common(&gsec_kr->gsk_base);
597 RETURN(&gsec_kr->gsk_base.gs_base);
600 OBD_FREE(gsec_kr, sizeof(*gsec_kr));
605 void gss_sec_destroy_kr(struct ptlrpc_sec *sec)
607 struct gss_sec *gsec = sec2gsec(sec);
608 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
610 CDEBUG(D_SEC, "destroy %s@%p\n", sec->ps_policy->sp_name, sec);
612 LASSERT(atomic_read(&sec->ps_nctx) == 0);
613 LASSERT(hlist_empty(&gsec_kr->gsk_clist));
614 LASSERT(gsec_kr->gsk_root_ctx == NULL);
616 gss_sec_destroy_common(gsec);
618 OBD_FREE(gsec_kr, sizeof(*gsec_kr));
621 static inline int user_is_root(struct ptlrpc_sec *sec, struct vfs_cred *vcred)
623 /* except the ROOTONLY flag, treat it as root user only if real uid
624 * is 0, euid/fsuid being 0 are handled as setuid scenarios */
625 if (sec_is_rootonly(sec) || (vcred->vc_uid == 0))
632 * When lookup_user_key is available use the kernel API rather than directly
633 * accessing the uid_keyring and session_keyring via the current process
636 #ifdef HAVE_LOOKUP_USER_KEY
638 #ifdef HAVE_KEY_NEED_UNLINK
639 /* from Linux security/keys/internal.h: */
640 # ifndef KEY_LOOKUP_PARTIAL
641 # define KEY_LOOKUP_PARTIAL 0x2
644 # define KEY_NEED_UNLINK 0
645 # ifndef KEY_LOOKUP_FOR_UNLINK
646 # define KEY_LOOKUP_FOR_UNLINK 0x4
648 # define KEY_LOOKUP_PARTIAL KEY_LOOKUP_FOR_UNLINK
649 #endif /* HAVE_KEY_NEED_UNLINK */
651 static struct key *_user_key(key_serial_t id)
656 ref = lookup_user_key(id, KEY_LOOKUP_PARTIAL, KEY_NEED_UNLINK);
659 return key_ref_to_ptr(ref);
662 static inline struct key *get_user_session_keyring(const struct cred *cred)
664 return _user_key(KEY_SPEC_USER_SESSION_KEYRING);
667 static inline struct key *get_user_keyring(const struct cred *cred)
669 return _user_key(KEY_SPEC_USER_KEYRING);
672 static inline struct key *get_session_keyring(const struct cred *cred)
674 return _user_key(KEY_SPEC_SESSION_KEYRING);
677 static inline struct key *get_user_session_keyring(const struct cred *cred)
679 return key_get(cred->user->session_keyring);
682 static inline struct key *get_user_keyring(const struct cred *cred)
684 return key_get(cred->user->uid_keyring);
687 static inline struct key *get_session_keyring(const struct cred *cred)
689 return key_get(cred->session_keyring);
694 * Get the appropriate destination keyring for the request.
696 * The keyring selected is returned with an extra reference upon it which the
697 * caller must release.
700 * Function inspired from the kernel's one, unfortunately not exported.
702 static int construct_get_dest_keyring(struct key **_dest_keyring)
704 struct key *dest_keyring = *_dest_keyring;
705 const struct cred *cred = current_cred();
708 /* the caller supplied one */
709 key_get(dest_keyring);
713 switch (cred->jit_keyring) {
714 case KEY_REQKEY_DEFL_DEFAULT:
715 case KEY_REQKEY_DEFL_REQUESTOR_KEYRING:
716 #ifdef HAVE_GET_REQUEST_KEY_AUTH
717 if (cred->request_key_auth) {
718 struct request_key_auth *rka;
719 struct key *authkey = cred->request_key_auth;
721 down_read(&authkey->sem);
722 rka = get_request_key_auth(authkey);
723 if (!test_bit(KEY_FLAG_REVOKED, &authkey->flags))
724 dest_keyring = key_get(rka->dest_keyring);
725 up_read(&authkey->sem);
731 case KEY_REQKEY_DEFL_THREAD_KEYRING:
732 dest_keyring = key_get(cred->thread_keyring);
736 case KEY_REQKEY_DEFL_PROCESS_KEYRING:
737 dest_keyring = key_get(cred->process_keyring);
741 case KEY_REQKEY_DEFL_SESSION_KEYRING:
742 dest_keyring = get_session_keyring(cred);
744 if (!test_bit(KEY_FLAG_REVOKED, &dest_keyring->flags))
746 key_put(dest_keyring);
749 case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
750 dest_keyring = get_user_session_keyring(cred);
752 case KEY_REQKEY_DEFL_USER_KEYRING:
753 dest_keyring = get_user_keyring(cred);
755 case KEY_REQKEY_DEFL_GROUP_KEYRING:
760 *_dest_keyring = dest_keyring;
765 * Unlink key from its keyring, which was linked during request_key().
767 static void request_key_unlink(struct key *key, bool fullsearch)
769 kuid_t kuid_orig = current_cred()->user->uid;
770 #ifdef HAVE_USER_UID_KEYRING
771 struct key *root_uid_keyring = NULL;
773 const struct cred *old_cred = NULL;
774 struct cred *new_cred = NULL;
775 struct key *ring = NULL;
779 uid = from_kuid(current_user_ns(), kuid_orig);
780 key_uid = from_kuid(&init_user_ns, key->uid);
781 /* unlink key with user's creds if it's a user key */
782 if (key_uid != uid) {
783 new_cred = prepare_creds();
784 if (new_cred == NULL)
787 new_cred->uid = key->uid;
788 new_cred->user->uid = key->uid;
789 if (new_cred->user_ns != &init_user_ns) {
790 put_user_ns(new_cred->user_ns);
791 new_cred->user_ns = get_user_ns(&init_user_ns);
793 #ifdef HAVE_USER_UID_KEYRING
794 root_uid_keyring = current_cred()->user->uid_keyring;
795 new_cred->user->uid_keyring = NULL;
797 old_cred = override_creds(new_cred);
800 /* User keys are linked to the user keyring. So get it now. */
801 if (key_uid && !fullsearch) {
802 /* Getting a key(ring) normally increases its refcount by 1.
803 * But if we overrode creds above, calling get_user_keyring()
804 * will add one more ref, because of the user switch.
806 ring = get_user_keyring(current_cred());
809 if (construct_get_dest_keyring(&ring))
814 res = key_unlink(ring, key);
816 "Unlink key %08x (%p) from keyring %08x: %d\n",
817 key->serial, key, ring->serial, res);
818 /* matches key_get()/get_user_keyring() above */
822 "Missing keyring, key %08x (%p) could not be unlinked, ignored\n",
827 revert_creds(old_cred);
829 current_cred()->user->uid = kuid_orig;
830 #ifdef HAVE_USER_UID_KEYRING
831 /* We are switching creds back, so need to drop ref on keyring
832 * for kernel implementation based on user keyring pinned from
833 * the user_struct struct.
835 if (key_uid && !fullsearch)
837 if (root_uid_keyring)
838 current_cred()->user->uid_keyring = root_uid_keyring;
844 * \retval a valid context on success
845 * \retval -ev error number or NULL on error
848 struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec,
849 struct vfs_cred *vcred,
850 int create, int remove_dead)
852 struct obd_import *imp = sec->ps_import;
853 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
854 struct ptlrpc_cli_ctx *ctx = NULL;
855 unsigned int is_root = 0, create_new = 0;
856 const struct cred *old_cred = NULL;
857 struct cred *new_cred = NULL;
862 const char *sec_part_flags = "";
865 struct lnet_nid primary;
868 LASSERT(imp != NULL);
870 is_root = user_is_root(sec, vcred);
872 /* a little bit optimization for root context */
874 ctx = sec_lookup_root_ctx_kr(sec);
876 * Only lookup directly for REVERSE sec, which should
879 if (ctx || sec_is_reverse(sec))
884 RETURN(ERR_PTR(-ENODATA));
886 /* for root context, obtain lock and check again, this time hold
887 * the root upcall lock, make sure nobody else populated new root
888 * context after last check.
891 mutex_lock(&gsec_kr->gsk_root_uc_lock);
893 ctx = sec_lookup_root_ctx_kr(sec);
897 /* update reverse handle for root user */
898 sec2gsec(sec)->gs_rvs_hdl = gss_get_next_ctx_index();
900 switch (sec->ps_part) {
902 sec_part_flags = "m";
905 sec_part_flags = "o";
908 sec_part_flags = "rmo";
911 sec_part_flags = "r";
918 switch (SPTLRPC_FLVR_SVC(sec->ps_flvr.sf_rpc)) {
919 case SPTLRPC_SVC_NULL:
922 case SPTLRPC_SVC_AUTH:
925 case SPTLRPC_SVC_INTG:
928 case SPTLRPC_SVC_PRIV:
936 /* in case of setuid, key will be constructed as owner of fsuid/fsgid,
937 * but we do authentication based on real uid/gid. the key permission
938 * bits will be exactly as POS_ALL, so only processes who subscribed
939 * this key could have the access, although the quota might be counted
940 * on others (fsuid/fsgid).
942 * keyring will use fsuid/fsgid as upcall parameters, so we have to
943 * encode real uid/gid into callout info.
946 /* But first we need to make sure the obd type is supported */
947 if (strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_MDC_NAME) &&
948 strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_OSC_NAME) &&
949 strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_MGC_NAME) &&
950 strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_LWP_NAME) &&
951 strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_OSP_NAME)) {
952 CERROR("obd %s is not a supported device\n",
953 imp->imp_obd->obd_name);
954 GOTO(out, ctx = NULL);
957 construct_key_desc(desc, sizeof(desc), sec, vcred->vc_uid);
959 /* callout info format:
960 * secid:mech:uid:gid:sec_flags:svc_flag:svc_type:peer_nid:target_uuid:
963 coinfo_size = sizeof(struct obd_uuid) + MAX_OBD_NAME + 64;
964 OBD_ALLOC(coinfo, coinfo_size);
968 /* Last callout parameter is pid of process whose namespace will be used
969 * for credentials' retrieval.
971 if (gss_check_upcall_ns) {
972 /* For user's credentials (in which case sec_part_flags is
973 * empty), use current PID instead of import's reference
974 * PID to get reference namespace.
976 if (sec_part_flags[0] == '\0')
977 caller_pid = current->pid;
979 caller_pid = imp->imp_sec_refpid;
981 /* Do not switch namespace in gss keyring upcall. */
984 primary = imp->imp_connection->c_self;
985 LNetPrimaryNID(&primary);
987 /* FIXME !! Needs to support larger NIDs */
988 snprintf(coinfo, coinfo_size, "%d:%s:%u:%u:%s:%c:%d:%#llx:%s:%#llx:%d",
989 sec->ps_id, sec2gsec(sec)->gs_mech->gm_name,
990 vcred->vc_uid, vcred->vc_gid,
991 sec_part_flags, svc_flag, import_to_gss_svc(imp),
992 lnet_nid_to_nid4(&imp->imp_connection->c_peer.nid),
993 imp->imp_obd->obd_name,
994 lnet_nid_to_nid4(&primary),
997 CDEBUG(D_SEC, "requesting key for %s\n", desc);
1000 /* If the session keyring is revoked, it must not be used by
1001 * request_key(), otherwise we would get -EKEYREVOKED and
1002 * the user keyring would not even be searched.
1003 * So prepare new creds with no session keyring.
1005 if (current_cred()->session_keyring &&
1006 test_bit(KEY_FLAG_REVOKED,
1007 ¤t_cred()->session_keyring->flags)) {
1008 new_cred = prepare_creds();
1010 new_cred->session_keyring = NULL;
1011 old_cred = override_creds(new_cred);
1016 keyring_upcall_lock(gsec_kr);
1017 key = request_key(&gss_key_type, desc, coinfo);
1018 keyring_upcall_unlock(gsec_kr);
1020 revert_creds(old_cred);
1024 OBD_FREE(coinfo, coinfo_size);
1027 CERROR("%s: request key failed for uid %d: rc = %ld\n",
1028 imp->imp_obd->obd_name, vcred->vc_uid,
1030 ctx = ERR_CAST(key);
1033 CDEBUG(D_SEC, "obtained key %08x for %s\n", key->serial, desc);
1035 /* once payload.data was pointed to a ctx, it never changes until
1036 * we de-associate them; but parallel request_key() may return
1037 * a key with payload.data == NULL at the same time. so we still
1038 * need wirtelock of key->sem to serialize them.
1040 down_write(&key->sem);
1042 ctx = key_get_payload(key, 0);
1044 LASSERT(atomic_read(&ctx->cc_refcount) >= 1);
1045 LASSERT(ctx2gctx_keyring(ctx)->gck_key == key);
1046 LASSERT(ll_read_key_usage(key) >= 2);
1048 /* simply take a ref and return. it's upper layer's
1049 * responsibility to detect & replace dead ctx.
1051 atomic_inc(&ctx->cc_refcount);
1053 /* pre initialization with a cli_ctx. this can't be done in
1054 * key_instantiate() because we'v no enough information
1057 ctx = ctx_create_kr(sec, vcred);
1059 ctx_enlist_kr(ctx, is_root, 0);
1060 bind_key_ctx(key, ctx);
1062 ctx_start_timer_kr(ctx, KEYRING_UPCALL_TIMEOUT);
1064 CDEBUG(D_SEC, "installed key %p <-> ctx %p (sec %p)\n",
1067 CDEBUG(D_SEC, "invalidating key %08x (%p)\n",
1069 key_invalidate_locked(key);
1075 up_write(&key->sem);
1077 /* We want user keys to be linked to the user keyring (see call to
1078 * keyctl_instantiate() from prepare_and_instantiate() in userspace).
1079 * But internally request_key() links the key to the session or
1080 * user session keyring, depending on jit_keyring value. Avoid that by
1081 * unlinking the key from this keyring. It will spare
1082 * us pain when we need to remove the key later on.
1084 if (!is_root || create_new)
1085 request_key_unlink(key, true);
1090 mutex_unlock(&gsec_kr->gsk_root_uc_lock);
1095 void gss_sec_release_ctx_kr(struct ptlrpc_sec *sec,
1096 struct ptlrpc_cli_ctx *ctx,
1099 LASSERT(atomic_read(&sec->ps_refcount) > 0);
1100 LASSERT(atomic_read(&ctx->cc_refcount) == 0);
1101 ctx_release_kr(ctx, sync);
1105 * flush context of normal user, we must resort to keyring itself to find out
1106 * contexts which belong to me.
1108 * Note here we suppose only to flush _my_ context, the "uid" will
1109 * be ignored in the search.
1111 static void flush_user_ctx_cache_kr(struct ptlrpc_sec *sec, uid_t uid,
1112 int grace, int force)
1114 const struct cred *old_cred = NULL;
1115 struct cred *new_cred = NULL;
1119 /* nothing to do for reverse or rootonly sec */
1120 if (sec_is_reverse(sec) || sec_is_rootonly(sec))
1123 construct_key_desc(desc, sizeof(desc), sec, uid);
1126 /* If the session keyring is revoked, it must not be used by
1127 * request_key(), otherwise we would get -EKEYREVOKED and
1128 * the user keyring would not even be searched.
1129 * So prepare new creds with no session keyring.
1131 if (current_cred()->session_keyring &&
1132 test_bit(KEY_FLAG_REVOKED,
1133 ¤t_cred()->session_keyring->flags)) {
1134 new_cred = prepare_creds();
1136 new_cred->session_keyring = NULL;
1137 old_cred = override_creds(new_cred);
1142 /* there should be only one valid key, but we put it in the
1143 * loop in case of any weird cases */
1145 key = request_key(&gss_key_type, desc, NULL);
1148 "No more key found for current user: rc=%ld\n",
1153 down_write(&key->sem);
1155 kill_key_locked(key);
1157 /* kill_key_locked() should usually revoke the key, but we
1158 * invalidate it as well to completely get rid of it.
1160 key_invalidate_locked(key);
1162 up_write(&key->sem);
1167 revert_creds(old_cred);
1173 * flush context of root or all, we iterate through the list.
1176 void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec, uid_t uid, int grace,
1179 struct gss_sec_keyring *gsec_kr;
1180 struct hlist_head freelist = HLIST_HEAD_INIT;
1181 struct hlist_node *next;
1182 struct ptlrpc_cli_ctx *ctx;
1185 gsec_kr = sec2gsec_keyring(sec);
1187 spin_lock(&sec->ps_lock);
1188 hlist_for_each_entry_safe(ctx, next, &gsec_kr->gsk_clist,
1190 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1192 if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
1195 /* at this moment there's at least 2 base reference:
1196 * key association and in-list. */
1197 if (atomic_read(&ctx->cc_refcount) > 2) {
1200 CWARN("flush busy ctx %p(%u->%s, extra ref %d)\n",
1201 ctx, ctx->cc_vcred.vc_uid,
1202 sec2target_str(ctx->cc_sec),
1203 atomic_read(&ctx->cc_refcount) - 2);
1206 set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
1208 clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
1210 atomic_inc(&ctx->cc_refcount);
1212 if (ctx_unlist_kr(ctx, 1)) {
1213 hlist_add_head(&ctx->cc_cache, &freelist);
1215 LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
1216 atomic_dec(&ctx->cc_refcount);
1219 spin_unlock(&sec->ps_lock);
1221 dispose_ctx_list_kr(&freelist);
1226 int gss_sec_flush_ctx_cache_kr(struct ptlrpc_sec *sec,
1227 uid_t uid, int grace, int force)
1231 CDEBUG(D_SEC, "sec %p(%d, nctx %d), uid %d, grace %d, force %d\n",
1232 sec, atomic_read(&sec->ps_refcount),
1233 atomic_read(&sec->ps_nctx),
1236 if (uid != -1 && uid != 0)
1237 flush_user_ctx_cache_kr(sec, uid, grace, force);
1239 flush_spec_ctx_cache_kr(sec, uid, grace, force);
1245 void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec)
1247 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
1248 struct hlist_head freelist = HLIST_HEAD_INIT;
1249 struct ptlrpc_cli_ctx *ctx;
1250 struct gss_cli_ctx *gctx;
1251 struct hlist_node *next;
1254 CDEBUG(D_SEC, "running gc\n");
1256 spin_lock(&sec->ps_lock);
1257 hlist_for_each_entry_safe(ctx, next, &gsec_kr->gsk_clist,
1259 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1261 atomic_inc(&ctx->cc_refcount);
1263 if (cli_ctx_check_death(ctx) && ctx_unlist_kr(ctx, 1)) {
1264 gctx = ctx2gctx(ctx);
1266 hlist_add_head(&ctx->cc_cache, &freelist);
1267 CWARN("%s: cleaning gss ctx hdl %#llx:%#llx\n",
1268 ctx->cc_sec->ps_import->imp_obd->obd_name,
1269 gss_handle_to_u64(&gctx->gc_handle),
1270 gss_handle_to_u64(&gctx->gc_svc_handle));
1272 LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
1273 atomic_dec(&ctx->cc_refcount);
1276 spin_unlock(&sec->ps_lock);
1278 dispose_ctx_list_kr(&freelist);
1283 int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq)
1285 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
1286 struct hlist_node *next;
1287 struct ptlrpc_cli_ctx *ctx;
1288 struct gss_cli_ctx *gctx;
1289 struct ptlrpc_connection *conn;
1290 time64_t now = ktime_get_real_seconds();
1293 spin_lock(&sec->ps_lock);
1294 hlist_for_each_entry_safe(ctx, next, &gsec_kr->gsk_clist,
1300 gctx = ctx2gctx(ctx);
1301 key = ctx2gctx_keyring(ctx)->gck_key;
1302 if (sec_is_reverse(sec) &&
1303 ctx->cc_sec && ctx->cc_sec->ps_import &&
1304 ctx->cc_sec->ps_import->imp_connection)
1305 conn = ctx->cc_sec->ps_import->imp_connection;
1309 gss_cli_ctx_flags2str(ctx->cc_flags,
1310 flags_str, sizeof(flags_str));
1312 if (gctx->gc_mechctx)
1313 lgss_display(gctx->gc_mechctx, mech, sizeof(mech));
1315 snprintf(mech, sizeof(mech), "N/A");
1316 mech[sizeof(mech) - 1] = '\0';
1319 "- { %s%s%suid: %u, ctxref: %d, expire: %lld, delta: %lld, flags: [%s], seq: %d, win: %u, key: %08x, keyref: %d, hdl: \"%#llx:%#llx\", mech: \"%s\" }\n",
1320 conn ? "peer_nid: " : "",
1321 conn ? libcfs_nidstr(&conn->c_peer.nid) : "",
1323 ctx->cc_vcred.vc_uid, atomic_read(&ctx->cc_refcount),
1325 ctx->cc_expire ? ctx->cc_expire - now : 0,
1326 flags_str, atomic_read(&gctx->gc_seq),
1327 gctx->gc_win, key ? key->serial : 0,
1328 key ? ll_read_key_usage(key) : 0,
1329 gss_handle_to_u64(&gctx->gc_handle),
1330 gss_handle_to_u64(&gctx->gc_svc_handle),
1333 spin_unlock(&sec->ps_lock);
1338 /****************************************
1340 ****************************************/
1343 int gss_cli_ctx_refresh_kr(struct ptlrpc_cli_ctx *ctx)
1345 /* upcall is already on the way */
1346 struct gss_cli_ctx *gctx = ctx ? ctx2gctx(ctx) : NULL;
1348 /* record latest sequence number in buddy svcctx */
1349 if (gctx && !rawobj_empty(&gctx->gc_svc_handle) &&
1350 sec_is_reverse(gctx->gc_base.cc_sec)) {
1351 return gss_svc_upcall_update_sequence(&gctx->gc_svc_handle,
1352 (__u32)atomic_read(&gctx->gc_seq));
1358 int gss_cli_ctx_validate_kr(struct ptlrpc_cli_ctx *ctx)
1360 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1361 LASSERT(ctx->cc_sec);
1363 if (cli_ctx_check_death(ctx)) {
1368 if (cli_ctx_is_ready(ctx))
1374 void gss_cli_ctx_die_kr(struct ptlrpc_cli_ctx *ctx, int grace)
1376 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1377 LASSERT(ctx->cc_sec);
1379 cli_ctx_expire(ctx);
1383 /****************************************
1384 * (reverse) service *
1385 ****************************************/
1388 * reverse context could have nothing to do with keyrings. here we still keep
1389 * the version which bind to a key, for future reference.
1391 #define HAVE_REVERSE_CTX_NOKEY
1393 #ifdef HAVE_REVERSE_CTX_NOKEY
1396 int sec_install_rctx_kr(struct ptlrpc_sec *sec,
1397 struct ptlrpc_svc_ctx *svc_ctx)
1399 struct ptlrpc_cli_ctx *cli_ctx;
1400 struct vfs_cred vcred = { .vc_uid = 0 };
1406 cli_ctx = ctx_create_kr(sec, &vcred);
1407 if (cli_ctx == NULL)
1410 rc = gss_copy_rvc_cli_ctx(cli_ctx, svc_ctx);
1412 CERROR("failed copy reverse cli ctx: %d\n", rc);
1414 ctx_put_kr(cli_ctx, 1);
1418 rvs_sec_install_root_ctx_kr(sec, cli_ctx, NULL);
1420 ctx_put_kr(cli_ctx, 1);
1425 #else /* ! HAVE_REVERSE_CTX_NOKEY */
1428 int sec_install_rctx_kr(struct ptlrpc_sec *sec,
1429 struct ptlrpc_svc_ctx *svc_ctx)
1431 struct ptlrpc_cli_ctx *cli_ctx = NULL;
1433 struct vfs_cred vcred = { .vc_uid = 0 };
1441 construct_key_desc(desc, sizeof(desc), sec, 0);
1443 key = key_alloc(&gss_key_type, desc, 0, 0,
1444 KEY_POS_ALL | KEY_USR_ALL, 1);
1446 CERROR("failed to alloc key: %ld\n", PTR_ERR(key));
1447 return PTR_ERR(key);
1450 rc = key_instantiate_and_link(key, NULL, 0, NULL, NULL);
1452 CERROR("failed to instantiate key: %d\n", rc);
1456 down_write(&key->sem);
1458 LASSERT(!key_get_payload(key, 0));
1460 cli_ctx = ctx_create_kr(sec, &vcred);
1461 if (cli_ctx == NULL) {
1466 rc = gss_copy_rvc_cli_ctx(cli_ctx, svc_ctx);
1468 CERROR("failed copy reverse cli ctx: %d\n", rc);
1472 rvs_sec_install_root_ctx_kr(sec, cli_ctx, key);
1474 ctx_put_kr(cli_ctx, 1);
1475 up_write(&key->sem);
1484 ctx_put_kr(cli_ctx, 1);
1486 up_write(&key->sem);
1492 #endif /* HAVE_REVERSE_CTX_NOKEY */
1494 /****************************************
1496 ****************************************/
1499 int gss_svc_accept_kr(struct ptlrpc_request *req)
1501 return gss_svc_accept(&gss_policy_keyring, req);
1505 int gss_svc_install_rctx_kr(struct obd_import *imp,
1506 struct ptlrpc_svc_ctx *svc_ctx)
1508 struct ptlrpc_sec *sec;
1511 sec = sptlrpc_import_sec_ref(imp);
1514 rc = sec_install_rctx_kr(sec, svc_ctx);
1515 sptlrpc_sec_put(sec);
1520 /****************************************
1522 ****************************************/
1525 #ifdef HAVE_KEY_TYPE_INSTANTIATE_2ARGS
1526 int gss_kt_instantiate(struct key *key, struct key_preparsed_payload *prep)
1528 const void *data = prep->data;
1529 size_t datalen = prep->datalen;
1531 int gss_kt_instantiate(struct key *key, const void *data, size_t datalen)
1534 struct key *keyring;
1539 CDEBUG(D_SEC, "instantiating key %08x (%p)\n", key->serial, key);
1541 if (data != NULL || datalen != 0) {
1542 CERROR("invalid: data %p, len %lu\n", data, (long)datalen);
1546 if (key_get_payload(key, 0)) {
1547 CERROR("key already have payload\n");
1551 /* link the key to session keyring, so following context negotiation
1552 * rpc fired from user space could find this key. This will be unlinked
1553 * automatically when upcall processes die.
1555 * we can't do this through keyctl from userspace, because the upcall
1556 * might be neither possessor nor owner of the key (setuid).
1558 * the session keyring is created upon upcall, and don't change all
1559 * the way until upcall finished, so rcu lock is not needed here.
1561 * But for end users, link to the user keyring. This simplifies key
1562 * management, makes them shared accross all user sessions, and avoids
1563 * unfortunate key leak if lfs flushctx is not called at user logout.
1565 uid = from_kuid(&init_user_ns, current_uid());
1567 keyring = get_session_keyring(current_cred());
1569 keyring = get_user_keyring(current_cred());
1572 rc = key_link(keyring, key);
1575 CERROR("failed to link key %08x to keyring %08x: %d\n",
1576 key->serial, keyring->serial, rc);
1581 "key %08x (%p) linked to keyring %08x and instantiated, ctx %p\n",
1582 key->serial, key, keyring->serial, key_get_payload(key, 0));
1589 * called with key semaphore write locked. it means we can operate
1590 * on the context without fear of loosing refcount.
1593 #ifdef HAVE_KEY_TYPE_INSTANTIATE_2ARGS
1594 int gss_kt_update(struct key *key, struct key_preparsed_payload *prep)
1596 const void *data = prep->data;
1597 __u32 datalen32 = (__u32) prep->datalen;
1599 int gss_kt_update(struct key *key, const void *data, size_t datalen)
1601 __u32 datalen32 = (__u32) datalen;
1603 struct ptlrpc_cli_ctx *ctx = key_get_payload(key, 0);
1604 struct gss_cli_ctx *gctx;
1605 rawobj_t tmpobj = RAWOBJ_EMPTY;
1609 CDEBUG(D_SEC, "updating key %08x (%p)\n", key->serial, key);
1611 if (data == NULL || datalen32 == 0) {
1612 CWARN("invalid: data %p, len %lu\n", data, (long)datalen32);
1616 /* if upcall finished negotiation too fast (mostly likely because
1617 * of local error happened) and call kt_update(), the ctx
1618 * might be still NULL. but the key will finally be associate
1619 * with a context, or be revoked. if key status is fine, return
1620 * -EAGAIN to allow userspace sleep a while and call again. */
1622 CDEBUG(D_SEC, "update too soon: key %08x (%p) flags %lx\n",
1623 key->serial, key, key->flags);
1625 rc = key_validate(key);
1632 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1633 LASSERT(ctx->cc_sec);
1635 ctx_clear_timer_kr(ctx);
1637 /* don't proceed if already refreshed */
1638 if (cli_ctx_is_refreshed(ctx)) {
1639 CWARN("ctx already done refresh\n");
1643 sptlrpc_cli_ctx_get(ctx);
1644 gctx = ctx2gctx(ctx);
1646 rc = buffer_extract_bytes(&data, &datalen32, &gctx->gc_win,
1647 sizeof(gctx->gc_win));
1649 CERROR("failed extract seq_win\n");
1653 if (gctx->gc_win == 0) {
1654 __u32 nego_rpc_err, nego_gss_err;
1656 rc = buffer_extract_bytes(&data, &datalen32, &nego_rpc_err,
1657 sizeof(nego_rpc_err));
1659 CERROR("cannot extract RPC: rc = %d\n", rc);
1663 rc = buffer_extract_bytes(&data, &datalen32, &nego_gss_err,
1664 sizeof(nego_gss_err));
1666 CERROR("failed to extract gss rc = %d\n", rc);
1670 CERROR("negotiation: rpc err %d, gss err %x\n",
1671 nego_rpc_err, nego_gss_err);
1673 rc = nego_rpc_err ? nego_rpc_err : -EACCES;
1675 rc = rawobj_extract_local_alloc(&gctx->gc_handle,
1676 (__u32 **) &data, &datalen32);
1678 CERROR("failed extract handle\n");
1682 rc = rawobj_extract_local(&tmpobj,
1683 (__u32 **) &data, &datalen32);
1685 CERROR("failed extract mech\n");
1689 rc = lgss_import_sec_context(&tmpobj,
1690 sec2gsec(ctx->cc_sec)->gs_mech,
1692 if (rc != GSS_S_COMPLETE)
1693 CERROR("failed import context\n");
1698 CDEBUG(D_SEC, "update of key %08x (%p): %d\n", key->serial, key, rc);
1699 /* we don't care what current status of this ctx, even someone else
1700 * is operating on the ctx at the same time. we just add up our own
1703 gss_cli_ctx_uptodate(gctx);
1704 /* In case of success, only the companion key for root ctx can
1705 * be unbound. User keys are required to be able to retrieve
1706 * the associated gss context.
1708 if (ctx->cc_vcred.vc_uid == 0)
1709 unbind_key_ctx(key, ctx);
1711 /* In case of failure, unbind the companion key for all contexts
1712 * i.e root and regular users. It will also invalidate the key.
1714 unbind_key_ctx(key, ctx);
1715 if (rc != -ERESTART)
1716 set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
1717 cli_ctx_expire(ctx);
1720 /* let user space think it's a success */
1721 sptlrpc_cli_ctx_put(ctx, 1);
1725 #ifndef HAVE_KEY_MATCH_DATA
1727 gss_kt_match(const struct key *key, const void *desc)
1729 return strcmp(key->description, (const char *) desc) == 0 &&
1730 !test_bit(KEY_FLAG_REVOKED, &key->flags);
1732 #else /* ! HAVE_KEY_MATCH_DATA */
1734 gss_kt_match(const struct key *key, const struct key_match_data *match_data)
1736 const char *desc = match_data->raw_data;
1738 return strcmp(key->description, desc) == 0 &&
1739 !test_bit(KEY_FLAG_REVOKED, &key->flags);
1743 * Preparse the match criterion.
1745 static int gss_kt_match_preparse(struct key_match_data *match_data)
1747 match_data->lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT;
1748 match_data->cmp = gss_kt_match;
1751 #endif /* HAVE_KEY_MATCH_DATA */
1754 void gss_kt_destroy(struct key *key)
1757 LASSERT(!key_get_payload(key, 0));
1758 CDEBUG(D_SEC, "destroy key %08x %p\n", key->serial, key);
1763 void gss_kt_describe(const struct key *key, struct seq_file *s)
1765 if (key->description == NULL)
1766 seq_puts(s, "[null]");
1768 seq_puts(s, key->description);
1771 static void gss_kt_revoke(struct key *key)
1773 CDEBUG(D_SEC, "revoking key %08x (%p) ref %d\n",
1774 key->serial, key, ll_read_key_usage(key));
1775 kill_key_locked(key);
1776 CDEBUG(D_SEC, "key %08x (%p) revoked ref %d\n",
1777 key->serial, key, ll_read_key_usage(key));
1780 static struct key_type gss_key_type =
1784 .instantiate = gss_kt_instantiate,
1785 .update = gss_kt_update,
1786 #ifdef HAVE_KEY_MATCH_DATA
1787 .match_preparse = gss_kt_match_preparse,
1789 .match = gss_kt_match,
1791 .destroy = gss_kt_destroy,
1792 .describe = gss_kt_describe,
1793 .revoke = gss_kt_revoke,
1796 /****************************************
1797 * lustre gss keyring policy *
1798 ****************************************/
1800 static struct ptlrpc_ctx_ops gss_keyring_ctxops = {
1801 .match = gss_cli_ctx_match,
1802 .refresh = gss_cli_ctx_refresh_kr,
1803 .validate = gss_cli_ctx_validate_kr,
1804 .die = gss_cli_ctx_die_kr,
1805 .sign = gss_cli_ctx_sign,
1806 .verify = gss_cli_ctx_verify,
1807 .seal = gss_cli_ctx_seal,
1808 .unseal = gss_cli_ctx_unseal,
1809 .wrap_bulk = gss_cli_ctx_wrap_bulk,
1810 .unwrap_bulk = gss_cli_ctx_unwrap_bulk,
1813 static struct ptlrpc_sec_cops gss_sec_keyring_cops = {
1814 .create_sec = gss_sec_create_kr,
1815 .destroy_sec = gss_sec_destroy_kr,
1816 .kill_sec = gss_sec_kill,
1817 .lookup_ctx = gss_sec_lookup_ctx_kr,
1818 .release_ctx = gss_sec_release_ctx_kr,
1819 .flush_ctx_cache = gss_sec_flush_ctx_cache_kr,
1820 .gc_ctx = gss_sec_gc_ctx_kr,
1821 .install_rctx = gss_sec_install_rctx,
1822 .alloc_reqbuf = gss_alloc_reqbuf,
1823 .free_reqbuf = gss_free_reqbuf,
1824 .alloc_repbuf = gss_alloc_repbuf,
1825 .free_repbuf = gss_free_repbuf,
1826 .enlarge_reqbuf = gss_enlarge_reqbuf,
1827 .display = gss_sec_display_kr,
1830 static struct ptlrpc_sec_sops gss_sec_keyring_sops = {
1831 .accept = gss_svc_accept_kr,
1832 .invalidate_ctx = gss_svc_invalidate_ctx,
1833 .alloc_rs = gss_svc_alloc_rs,
1834 .authorize = gss_svc_authorize,
1835 .free_rs = gss_svc_free_rs,
1836 .free_ctx = gss_svc_free_ctx,
1837 .prep_bulk = gss_svc_prep_bulk,
1838 .unwrap_bulk = gss_svc_unwrap_bulk,
1839 .wrap_bulk = gss_svc_wrap_bulk,
1840 .install_rctx = gss_svc_install_rctx_kr,
1843 static struct ptlrpc_sec_policy gss_policy_keyring = {
1844 .sp_owner = THIS_MODULE,
1845 .sp_name = "gss.keyring",
1846 .sp_policy = SPTLRPC_POLICY_GSS,
1847 .sp_cops = &gss_sec_keyring_cops,
1848 .sp_sops = &gss_sec_keyring_sops,
1852 int __init gss_init_keyring(void)
1856 rc = register_key_type(&gss_key_type);
1858 CERROR("failed to register keyring type: %d\n", rc);
1862 rc = sptlrpc_register_policy(&gss_policy_keyring);
1864 unregister_key_type(&gss_key_type);
1871 void __exit gss_exit_keyring(void)
1873 unregister_key_type(&gss_key_type);
1874 sptlrpc_unregister_policy(&gss_policy_keyring);