4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2014, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/ptlrpc/gss/gss_keyring.c
33 * Author: Eric Mei <ericm@clusterfs.com>
36 #define DEBUG_SUBSYSTEM S_SEC
37 #include <linux/init.h>
38 #include <linux/module.h>
39 #include <linux/slab.h>
40 #include <linux/dcache.h>
42 #include <linux/crypto.h>
43 #include <linux/key.h>
44 #include <linux/keyctl.h>
45 #include <linux/key-type.h>
46 #include <linux/mutex.h>
47 #include <asm/atomic.h>
49 #include <libcfs/linux/linux-list.h>
51 #include <obd_class.h>
52 #include <obd_support.h>
53 #include <uapi/linux/lustre/lustre_idl.h>
54 #include <lustre_sec.h>
55 #include <lustre_net.h>
56 #include <lustre_import.h>
59 #include "gss_internal.h"
62 #ifdef HAVE_GET_REQUEST_KEY_AUTH
63 #include <keys/request_key_auth-type.h>
66 static struct ptlrpc_sec_policy gss_policy_keyring;
67 static struct ptlrpc_ctx_ops gss_keyring_ctxops;
68 static struct key_type gss_key_type;
70 static int sec_install_rctx_kr(struct ptlrpc_sec *sec,
71 struct ptlrpc_svc_ctx *svc_ctx);
72 static void request_key_unlink(struct key *key);
75 * the timeout is only for the case that upcall child process die abnormally.
76 * in any other cases it should finally update kernel key.
78 * FIXME we'd better to incorporate the client & server side upcall timeouts
79 * into the framework of Adaptive Timeouts, but we need to figure out how to
80 * make sure that kernel knows the upcall processes is in-progress or died
83 #define KEYRING_UPCALL_TIMEOUT (obd_timeout + obd_timeout)
85 /* Check caller's namespace in gss_keyring upcall */
86 unsigned int gss_check_upcall_ns = 1;
88 /****************************************
90 ****************************************/
92 static inline void keyring_upcall_lock(struct gss_sec_keyring *gsec_kr)
94 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
95 mutex_lock(&gsec_kr->gsk_uc_lock);
99 static inline void keyring_upcall_unlock(struct gss_sec_keyring *gsec_kr)
101 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
102 mutex_unlock(&gsec_kr->gsk_uc_lock);
106 static inline void key_revoke_locked(struct key *key)
108 set_bit(KEY_FLAG_REVOKED, &key->flags);
111 static void ctx_upcall_timeout_kr(cfs_timer_cb_arg_t data)
113 struct gss_cli_ctx_keyring *gctx_kr = cfs_from_timer(gctx_kr,
115 struct ptlrpc_cli_ctx *ctx = &(gctx_kr->gck_base.gc_base);
116 struct obd_import *imp = ctx->cc_sec->ps_import;
117 struct key *key = gctx_kr->gck_key;
121 "%s: GSS context (%p) negotiation timeout, revoking key (%p)\n",
122 imp->imp_obd->obd_name, ctx, key);
125 "%s: GSS context (%p) negotiation timeout, ignoring already unlinked key\n",
126 imp->imp_obd->obd_name, ctx);
130 key_revoke_locked(key);
133 static void ctx_start_timer_kr(struct ptlrpc_cli_ctx *ctx, time64_t timeout)
135 struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
136 struct timer_list *timer = &gctx_kr->gck_timer;
140 CDEBUG(D_SEC, "ctx %p: start timer %llds\n", ctx, timeout);
142 cfs_timer_setup(timer, ctx_upcall_timeout_kr,
143 (unsigned long)gctx_kr, 0);
144 timer->expires = cfs_time_seconds(timeout) + jiffies;
149 * caller should make sure no race with other threads
152 void ctx_clear_timer_kr(struct ptlrpc_cli_ctx *ctx)
154 struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
155 struct timer_list *timer = &gctx_kr->gck_timer;
157 CDEBUG(D_SEC, "ctx %p, key %p\n", ctx, gctx_kr->gck_key);
159 timer_delete_sync(timer);
163 struct ptlrpc_cli_ctx *ctx_create_kr(struct ptlrpc_sec *sec,
164 struct vfs_cred *vcred)
166 struct ptlrpc_cli_ctx *ctx;
167 struct gss_cli_ctx_keyring *gctx_kr;
169 OBD_ALLOC_PTR(gctx_kr);
173 cfs_timer_setup(&gctx_kr->gck_timer, NULL, 0, 0);
175 ctx = &gctx_kr->gck_base.gc_base;
177 if (gss_cli_ctx_init_common(sec, ctx, &gss_keyring_ctxops, vcred)) {
178 OBD_FREE_PTR(gctx_kr);
182 ctx->cc_expire = ktime_get_real_seconds() + KEYRING_UPCALL_TIMEOUT;
183 clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
184 atomic_inc(&ctx->cc_refcount); /* for the caller */
189 static void ctx_destroy_kr(struct ptlrpc_cli_ctx *ctx)
191 struct ptlrpc_sec *sec = ctx->cc_sec;
192 struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
194 CDEBUG(D_SEC, "destroying ctx %p\n", ctx);
196 /* at this time the association with key has been broken. */
198 LASSERT(atomic_read(&sec->ps_refcount) > 0);
199 LASSERT(atomic_read(&sec->ps_nctx) > 0);
200 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
201 LASSERT(gctx_kr->gck_key == NULL);
203 ctx_clear_timer_kr(ctx);
205 if (gss_cli_ctx_fini_common(sec, ctx))
208 OBD_FREE_PTR(gctx_kr);
210 atomic_dec(&sec->ps_nctx);
211 sptlrpc_sec_put(sec);
214 static void ctx_release_kr(struct ptlrpc_cli_ctx *ctx, int sync)
219 atomic_inc(&ctx->cc_refcount);
220 sptlrpc_gc_add_ctx(ctx);
224 static void ctx_put_kr(struct ptlrpc_cli_ctx *ctx, int sync)
226 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
228 if (atomic_dec_and_test(&ctx->cc_refcount))
229 ctx_release_kr(ctx, sync);
233 * key <-> ctx association and rules:
234 * - ctx might not bind with any key
235 * - key/ctx binding is protected by key semaphore (if the key present)
236 * - key and ctx each take a reference of the other
237 * - ctx enlist/unlist is protected by ctx spinlock
238 * - never enlist a ctx after it's been unlisted
239 * - whoever do enlist should also do bind, lock key before enlist:
240 * - lock key -> lock ctx -> enlist -> unlock ctx -> bind -> unlock key
241 * - whoever do unlist should also do unbind:
242 * - lock key -> lock ctx -> unlist -> unlock ctx -> unbind -> unlock key
243 * - lock ctx -> unlist -> unlock ctx -> lock key -> unbind -> unlock key
246 static inline void spin_lock_if(spinlock_t *lock, int condition)
252 static inline void spin_unlock_if(spinlock_t *lock, int condition)
258 static void ctx_enlist_kr(struct ptlrpc_cli_ctx *ctx, int is_root, int locked)
260 struct ptlrpc_sec *sec = ctx->cc_sec;
261 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
263 LASSERT(!test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
264 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
266 spin_lock_if(&sec->ps_lock, !locked);
268 atomic_inc(&ctx->cc_refcount);
269 set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
270 hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
272 gsec_kr->gsk_root_ctx = ctx;
274 spin_unlock_if(&sec->ps_lock, !locked);
278 * Note after this get called, caller should not access ctx again because
279 * it might have been freed, unless caller hold at least one refcount of
282 * return non-zero if we indeed unlist this ctx.
284 static int ctx_unlist_kr(struct ptlrpc_cli_ctx *ctx, int locked)
286 struct ptlrpc_sec *sec = ctx->cc_sec;
287 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
289 /* if hashed bit has gone, leave the job to somebody who is doing it */
290 if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
293 /* drop ref inside spin lock to prevent race with other operations */
294 spin_lock_if(&sec->ps_lock, !locked);
296 if (gsec_kr->gsk_root_ctx == ctx)
297 gsec_kr->gsk_root_ctx = NULL;
298 hlist_del_init(&ctx->cc_cache);
299 atomic_dec(&ctx->cc_refcount);
301 spin_unlock_if(&sec->ps_lock, !locked);
307 * Get specific payload. Newer kernels support 4 slots.
310 key_get_payload(struct key *key, unsigned int index)
312 void *key_ptr = NULL;
314 #ifdef HAVE_KEY_PAYLOAD_DATA_ARRAY
315 key_ptr = key->payload.data[index];
318 key_ptr = key->payload.data;
324 * Set specific payload. Newer kernels support 4 slots.
326 static int key_set_payload(struct key *key, unsigned int index,
327 struct ptlrpc_cli_ctx *ctx)
331 #ifdef HAVE_KEY_PAYLOAD_DATA_ARRAY
333 key->payload.data[index] = ctx;
336 key->payload.data = ctx;
344 * bind a key with a ctx together.
345 * caller must hold write lock of the key, as well as ref on key & ctx.
347 static void bind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
349 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
350 LASSERT(ll_read_key_usage(key) > 0);
351 LASSERT(ctx2gctx_keyring(ctx)->gck_key == NULL);
352 LASSERT(!key_get_payload(key, 0));
354 /* at this time context may or may not in list. */
356 atomic_inc(&ctx->cc_refcount);
357 ctx2gctx_keyring(ctx)->gck_key = key;
358 LASSERT(!key_set_payload(key, 0, ctx));
362 * unbind a key and a ctx.
363 * caller must hold write lock, as well as a ref of the key.
365 static void unbind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
367 LASSERT(key_get_payload(key, 0) == ctx);
368 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
370 /* must revoke the key, or others may treat it as newly created */
371 key_revoke_locked(key);
372 request_key_unlink(key);
374 key_set_payload(key, 0, NULL);
375 ctx2gctx_keyring(ctx)->gck_key = NULL;
377 /* once ctx get split from key, the timer is meaningless */
378 ctx_clear_timer_kr(ctx);
385 * given a ctx, unbind with its coupled key, if any.
386 * unbind could only be called once, so we don't worry the key be released
389 static void unbind_ctx_kr(struct ptlrpc_cli_ctx *ctx)
391 struct key *key = ctx2gctx_keyring(ctx)->gck_key;
394 LASSERT(key_get_payload(key, 0) == ctx);
397 down_write(&key->sem);
398 unbind_key_ctx(key, ctx);
405 * given a key, unbind with its coupled ctx, if any.
406 * caller must hold write lock, as well as a ref of the key.
408 static void unbind_key_locked(struct key *key)
410 struct ptlrpc_cli_ctx *ctx = key_get_payload(key, 0);
413 unbind_key_ctx(key, ctx);
417 * unlist a ctx, and unbind from coupled key
419 static void kill_ctx_kr(struct ptlrpc_cli_ctx *ctx)
421 if (ctx_unlist_kr(ctx, 0))
426 * given a key, unlist and unbind with the coupled ctx (if any).
427 * caller must hold write lock, as well as a ref of the key.
429 static void kill_key_locked(struct key *key)
431 struct ptlrpc_cli_ctx *ctx = key_get_payload(key, 0);
433 if (ctx && ctx_unlist_kr(ctx, 0))
434 unbind_key_locked(key);
438 * caller should hold one ref on contexts in freelist.
440 static void dispose_ctx_list_kr(struct hlist_head *freelist)
442 struct hlist_node *next;
443 struct ptlrpc_cli_ctx *ctx;
444 struct gss_cli_ctx *gctx;
446 hlist_for_each_entry_safe(ctx, next, freelist, cc_cache) {
447 hlist_del_init(&ctx->cc_cache);
449 /* reverse ctx: update current seq to buddy svcctx if exist.
450 * ideally this should be done at gss_cli_ctx_finalize(), but
451 * the ctx destroy could be delayed by:
452 * 1) ctx still has reference;
453 * 2) ctx destroy is asynchronous;
454 * and reverse import call inval_all_ctx() require this be done
455 * _immediately_ otherwise newly created reverse ctx might copy
456 * the very old sequence number from svcctx. */
457 gctx = ctx2gctx(ctx);
458 if (!rawobj_empty(&gctx->gc_svc_handle) &&
459 sec_is_reverse(gctx->gc_base.cc_sec)) {
460 gss_svc_upcall_update_sequence(&gctx->gc_svc_handle,
461 (__u32) atomic_read(&gctx->gc_seq));
464 /* we need to wakeup waiting reqs here. the context might
465 * be forced released before upcall finished, then the
466 * late-arrived downcall can't find the ctx even. */
467 sptlrpc_cli_ctx_wakeup(ctx);
475 * lookup a root context directly in a sec, return root ctx with a
476 * reference taken or NULL.
479 struct ptlrpc_cli_ctx * sec_lookup_root_ctx_kr(struct ptlrpc_sec *sec)
481 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
482 struct ptlrpc_cli_ctx *ctx = NULL;
483 time64_t now = ktime_get_real_seconds();
485 spin_lock(&sec->ps_lock);
487 ctx = gsec_kr->gsk_root_ctx;
489 if (ctx == NULL && unlikely(sec_is_reverse(sec))) {
490 struct ptlrpc_cli_ctx *tmp;
492 /* For reverse context, browse list and pick the one with
493 * shortest expire time and that has not expired yet.
494 * This one is most likely to have an established peer context
497 hlist_for_each_entry(tmp, &gsec_kr->gsk_clist, cc_cache) {
498 if (ctx == NULL || ctx->cc_expire == 0 ||
499 (tmp->cc_expire > now &&
500 tmp->cc_expire < ctx->cc_expire) ||
501 (ctx->cc_expire < now &&
502 tmp->cc_expire > ctx->cc_expire)) {
504 /* promote to be root_ctx */
505 gsec_kr->gsk_root_ctx = ctx;
511 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
512 LASSERT(!hlist_empty(&gsec_kr->gsk_clist));
513 atomic_inc(&ctx->cc_refcount);
516 spin_unlock(&sec->ps_lock);
521 #define RVS_CTX_EXPIRE_NICE (10)
524 void rvs_sec_install_root_ctx_kr(struct ptlrpc_sec *sec,
525 struct ptlrpc_cli_ctx *new_ctx,
528 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
529 struct ptlrpc_cli_ctx *ctx;
533 LASSERT(sec_is_reverse(sec));
535 spin_lock(&sec->ps_lock);
537 now = ktime_get_real_seconds();
539 /* set all existing ctxs short expiry */
540 hlist_for_each_entry(ctx, &gsec_kr->gsk_clist, cc_cache) {
541 if (ctx->cc_expire > now + RVS_CTX_EXPIRE_NICE) {
542 ctx->cc_early_expire = 1;
543 ctx->cc_expire = now + RVS_CTX_EXPIRE_NICE;
547 /* if there's root_ctx there, instead obsolete the current
548 * immediately, we leave it continue operating for a little while.
549 * hopefully when the first backward rpc with newest ctx send out,
550 * the client side already have the peer ctx well established. */
551 ctx_enlist_kr(new_ctx, gsec_kr->gsk_root_ctx ? 0 : 1, 1);
554 bind_key_ctx(key, new_ctx);
556 spin_unlock(&sec->ps_lock);
559 static void construct_key_desc(void *buf, int bufsize,
560 struct ptlrpc_sec *sec, uid_t uid)
562 snprintf(buf, bufsize, "%d@%x", uid, sec->ps_id);
563 ((char *)buf)[bufsize - 1] = '\0';
566 /****************************************
568 ****************************************/
571 struct ptlrpc_sec * gss_sec_create_kr(struct obd_import *imp,
572 struct ptlrpc_svc_ctx *svcctx,
573 struct sptlrpc_flavor *sf)
575 struct gss_sec_keyring *gsec_kr;
578 OBD_ALLOC(gsec_kr, sizeof(*gsec_kr));
582 INIT_HLIST_HEAD(&gsec_kr->gsk_clist);
583 gsec_kr->gsk_root_ctx = NULL;
584 mutex_init(&gsec_kr->gsk_root_uc_lock);
585 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
586 mutex_init(&gsec_kr->gsk_uc_lock);
589 if (gss_sec_create_common(&gsec_kr->gsk_base, &gss_policy_keyring,
593 if (svcctx != NULL &&
594 sec_install_rctx_kr(&gsec_kr->gsk_base.gs_base, svcctx)) {
595 gss_sec_destroy_common(&gsec_kr->gsk_base);
599 RETURN(&gsec_kr->gsk_base.gs_base);
602 OBD_FREE(gsec_kr, sizeof(*gsec_kr));
607 void gss_sec_destroy_kr(struct ptlrpc_sec *sec)
609 struct gss_sec *gsec = sec2gsec(sec);
610 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
612 CDEBUG(D_SEC, "destroy %s@%p\n", sec->ps_policy->sp_name, sec);
614 LASSERT(atomic_read(&sec->ps_nctx) == 0);
615 LASSERT(hlist_empty(&gsec_kr->gsk_clist));
616 LASSERT(gsec_kr->gsk_root_ctx == NULL);
618 gss_sec_destroy_common(gsec);
620 OBD_FREE(gsec_kr, sizeof(*gsec_kr));
623 static inline int user_is_root(struct ptlrpc_sec *sec, struct vfs_cred *vcred)
625 /* except the ROOTONLY flag, treat it as root user only if real uid
626 * is 0, euid/fsuid being 0 are handled as setuid scenarios */
627 if (sec_is_rootonly(sec) || (vcred->vc_uid == 0))
634 * When lookup_user_key is available use the kernel API rather than directly
635 * accessing the uid_keyring and session_keyring via the current process
638 #ifdef HAVE_LOOKUP_USER_KEY
640 #ifdef HAVE_KEY_NEED_UNLINK
641 /* from Linux security/keys/internal.h: */
642 # ifndef KEY_LOOKUP_PARTIAL
643 # define KEY_LOOKUP_PARTIAL 0x2
646 # define KEY_NEED_UNLINK 0
647 # ifndef KEY_LOOKUP_FOR_UNLINK
648 # define KEY_LOOKUP_FOR_UNLINK 0x4
650 # define KEY_LOOKUP_PARTIAL KEY_LOOKUP_FOR_UNLINK
651 #endif /* HAVE_KEY_NEED_UNLINK */
653 static struct key *_user_key(key_serial_t id)
658 ref = lookup_user_key(id, KEY_LOOKUP_PARTIAL, KEY_NEED_UNLINK);
661 return key_ref_to_ptr(ref);
664 static inline struct key *get_user_session_keyring(const struct cred *cred)
666 return _user_key(KEY_SPEC_USER_SESSION_KEYRING);
669 static inline struct key *get_user_keyring(const struct cred *cred)
671 return _user_key(KEY_SPEC_USER_KEYRING);
674 static inline struct key *get_user_session_keyring(const struct cred *cred)
676 return key_get(cred->user->session_keyring);
679 static inline struct key *get_user_keyring(const struct cred *cred)
681 return key_get(cred->user->uid_keyring);
686 * unlink request key from it's ring, which is linked during request_key().
687 * sadly, we have to 'guess' which keyring it's linked to.
689 static void request_key_unlink(struct key *key)
691 struct cred *cred = (struct cred *)current_cred(), *new_cred = NULL;
692 #ifdef HAVE_USER_UID_KEYRING
693 struct key *root_uid_keyring = NULL;
695 const struct cred *old_cred = NULL;
696 kuid_t uid = current_uid();
697 struct key *ring = NULL;
700 /* unlink key with user's creds if it's a user key */
701 if (!uid_eq(key->uid, current_uid())) {
702 new_cred = prepare_creds();
703 if (new_cred == NULL)
706 new_cred->uid = key->uid;
707 new_cred->user->uid = key->uid;
708 #ifdef HAVE_USER_UID_KEYRING
709 root_uid_keyring = current_cred()->user->uid_keyring;
710 new_cred->user->uid_keyring = NULL;
712 old_cred = override_creds(new_cred);
716 /* User keys are linked to the user keyring. So get it now. */
717 if (from_kuid(&init_user_ns, key->uid)) {
718 /* Getting a key(ring) normally increases its refcount by 1.
719 * But if we overrode creds above, calling get_user_keyring()
720 * will add one more ref, because of the user switch.
722 ring = get_user_keyring(cred);
727 switch (cred->jit_keyring) {
728 case KEY_REQKEY_DEFL_DEFAULT:
729 case KEY_REQKEY_DEFL_REQUESTOR_KEYRING:
730 #ifdef HAVE_GET_REQUEST_KEY_AUTH
731 if (cred->request_key_auth) {
732 struct request_key_auth *rka;
733 struct key *authkey = cred->request_key_auth;
735 down_read(&authkey->sem);
736 rka = get_request_key_auth(authkey);
737 if (!test_bit(KEY_FLAG_REVOKED, &authkey->flags))
738 ring = key_get(rka->dest_keyring);
739 up_read(&authkey->sem);
745 case KEY_REQKEY_DEFL_THREAD_KEYRING:
746 ring = key_get(cred->thread_keyring);
750 case KEY_REQKEY_DEFL_PROCESS_KEYRING:
751 ring = key_get(cred->process_keyring);
755 case KEY_REQKEY_DEFL_SESSION_KEYRING:
757 ring = key_get(rcu_dereference(cred->session_keyring));
762 case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
763 ring = get_user_session_keyring(cred);
765 case KEY_REQKEY_DEFL_USER_KEYRING:
766 ring = get_user_keyring(cred);
768 case KEY_REQKEY_DEFL_GROUP_KEYRING:
775 res = key_unlink(ring, key);
777 "Unlink key %08x (%p) from keyring %08x: %d\n",
778 key->serial, key, ring->serial, res);
779 /* matches key_get()/get_user_keyring() above */
781 /* If this is a user key, it added an extra ref on the user
782 * keyring at link/instantiate stage. This ref needs to be
783 * removed now that the key has been unlinked.
785 if (from_kuid(&init_user_ns, key->uid))
789 "Missing keyring, key %08x (%p) could not be unlinked, ignored\n",
794 revert_creds(old_cred);
796 current_cred()->user->uid = uid;
797 #ifdef HAVE_USER_UID_KEYRING
798 /* We are switching creds back, so need to drop ref on keyring
799 * for kernel implementation based on user keyring pinned from
800 * the user_struct struct.
803 if (root_uid_keyring)
804 current_cred()->user->uid_keyring = root_uid_keyring;
810 * \retval a valid context on success
811 * \retval -ev error number or NULL on error
814 struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec,
815 struct vfs_cred *vcred,
816 int create, int remove_dead)
818 struct obd_import *imp = sec->ps_import;
819 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
820 struct ptlrpc_cli_ctx *ctx = NULL;
821 unsigned int is_root = 0, create_new = 0;
826 const char *sec_part_flags = "";
829 struct lnet_nid primary;
832 LASSERT(imp != NULL);
834 is_root = user_is_root(sec, vcred);
836 /* a little bit optimization for root context */
838 ctx = sec_lookup_root_ctx_kr(sec);
840 * Only lookup directly for REVERSE sec, which should
843 if (ctx || sec_is_reverse(sec))
847 LASSERT(create != 0);
849 /* for root context, obtain lock and check again, this time hold
850 * the root upcall lock, make sure nobody else populated new root
851 * context after last check.
854 mutex_lock(&gsec_kr->gsk_root_uc_lock);
856 ctx = sec_lookup_root_ctx_kr(sec);
860 /* update reverse handle for root user */
861 sec2gsec(sec)->gs_rvs_hdl = gss_get_next_ctx_index();
863 switch (sec->ps_part) {
865 sec_part_flags = "m";
868 sec_part_flags = "o";
871 sec_part_flags = "rmo";
874 sec_part_flags = "r";
881 switch (SPTLRPC_FLVR_SVC(sec->ps_flvr.sf_rpc)) {
882 case SPTLRPC_SVC_NULL:
885 case SPTLRPC_SVC_AUTH:
888 case SPTLRPC_SVC_INTG:
891 case SPTLRPC_SVC_PRIV:
899 /* in case of setuid, key will be constructed as owner of fsuid/fsgid,
900 * but we do authentication based on real uid/gid. the key permission
901 * bits will be exactly as POS_ALL, so only processes who subscribed
902 * this key could have the access, although the quota might be counted
903 * on others (fsuid/fsgid).
905 * keyring will use fsuid/fsgid as upcall parameters, so we have to
906 * encode real uid/gid into callout info.
909 /* But first we need to make sure the obd type is supported */
910 if (strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_MDC_NAME) &&
911 strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_OSC_NAME) &&
912 strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_MGC_NAME) &&
913 strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_LWP_NAME) &&
914 strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_OSP_NAME)) {
915 CERROR("obd %s is not a supported device\n",
916 imp->imp_obd->obd_name);
917 GOTO(out, ctx = NULL);
920 construct_key_desc(desc, sizeof(desc), sec, vcred->vc_uid);
922 /* callout info format:
923 * secid:mech:uid:gid:sec_flags:svc_flag:svc_type:peer_nid:target_uuid:
926 coinfo_size = sizeof(struct obd_uuid) + MAX_OBD_NAME + 64;
927 OBD_ALLOC(coinfo, coinfo_size);
931 /* Last callout parameter is pid of process whose namespace will be used
932 * for credentials' retrieval.
934 if (gss_check_upcall_ns) {
935 /* For user's credentials (in which case sec_part_flags is
936 * empty), use current PID instead of import's reference
937 * PID to get reference namespace.
939 if (sec_part_flags[0] == '\0')
940 caller_pid = current->pid;
942 caller_pid = imp->imp_sec_refpid;
944 /* Do not switch namespace in gss keyring upcall. */
947 primary = imp->imp_connection->c_self;
948 LNetPrimaryNID(&primary);
950 /* FIXME !! Needs to support larger NIDs */
951 snprintf(coinfo, coinfo_size, "%d:%s:%u:%u:%s:%c:%d:%#llx:%s:%#llx:%d",
952 sec->ps_id, sec2gsec(sec)->gs_mech->gm_name,
953 vcred->vc_uid, vcred->vc_gid,
954 sec_part_flags, svc_flag, import_to_gss_svc(imp),
955 lnet_nid_to_nid4(&imp->imp_connection->c_peer.nid),
956 imp->imp_obd->obd_name,
957 lnet_nid_to_nid4(&primary),
960 CDEBUG(D_SEC, "requesting key for %s\n", desc);
962 keyring_upcall_lock(gsec_kr);
963 key = request_key(&gss_key_type, desc, coinfo);
964 keyring_upcall_unlock(gsec_kr);
966 OBD_FREE(coinfo, coinfo_size);
969 CERROR("%s: request key failed for uid %d: rc = %ld\n",
970 imp->imp_obd->obd_name, vcred->vc_uid,
975 CDEBUG(D_SEC, "obtained key %08x for %s\n", key->serial, desc);
977 /* We want user keys to be linked to the user keyring (see call to
978 * keyctl_instantiate() from prepare_and_instantiate() in userspace).
979 * But internally request_key() tends to also link the key to the
980 * session keyring. So do our best to avoid that by trying to unlink
981 * the key from the session keyring right now. It will spare us pain
982 * when we need to remove the key later on.
984 if (!is_root && current_cred()->session_keyring) {
985 key_get(current_cred()->session_keyring);
986 (void)key_unlink(current_cred()->session_keyring, key);
987 key_put(current_cred()->session_keyring);
989 /* once payload.data was pointed to a ctx, it never changes until
990 * we de-associate them; but parallel request_key() may return
991 * a key with payload.data == NULL at the same time. so we still
992 * need wirtelock of key->sem to serialize them.
994 down_write(&key->sem);
996 ctx = key_get_payload(key, 0);
998 LASSERT(atomic_read(&ctx->cc_refcount) >= 1);
999 LASSERT(ctx2gctx_keyring(ctx)->gck_key == key);
1000 LASSERT(ll_read_key_usage(key) >= 2);
1002 /* simply take a ref and return. it's upper layer's
1003 * responsibility to detect & replace dead ctx.
1005 atomic_inc(&ctx->cc_refcount);
1007 /* pre initialization with a cli_ctx. this can't be done in
1008 * key_instantiate() because we'v no enough information
1011 ctx = ctx_create_kr(sec, vcred);
1013 ctx_enlist_kr(ctx, is_root, 0);
1014 bind_key_ctx(key, ctx);
1016 ctx_start_timer_kr(ctx, KEYRING_UPCALL_TIMEOUT);
1018 CDEBUG(D_SEC, "installed key %p <-> ctx %p (sec %p)\n",
1021 /* we'd prefer to call key_revoke(), but we more like
1022 * to revoke it within this key->sem locked period.
1024 CDEBUG(D_SEC, "revoking key %08x (%p)\n",
1026 key_revoke_locked(key);
1032 up_write(&key->sem);
1034 if (is_root && create_new)
1035 request_key_unlink(key);
1040 mutex_unlock(&gsec_kr->gsk_root_uc_lock);
1045 void gss_sec_release_ctx_kr(struct ptlrpc_sec *sec,
1046 struct ptlrpc_cli_ctx *ctx,
1049 LASSERT(atomic_read(&sec->ps_refcount) > 0);
1050 LASSERT(atomic_read(&ctx->cc_refcount) == 0);
1051 ctx_release_kr(ctx, sync);
1055 * flush context of normal user, we must resort to keyring itself to find out
1056 * contexts which belong to me.
1058 * Note here we suppose only to flush _my_ context, the "uid" will
1059 * be ignored in the search.
1062 void flush_user_ctx_cache_kr(struct ptlrpc_sec *sec,
1064 int grace, int force)
1069 /* nothing to do for reverse or rootonly sec */
1070 if (sec_is_reverse(sec) || sec_is_rootonly(sec))
1073 construct_key_desc(desc, sizeof(desc), sec, uid);
1075 /* there should be only one valid key, but we put it in the
1076 * loop in case of any weird cases */
1078 key = request_key(&gss_key_type, desc, NULL);
1080 CDEBUG(D_SEC, "No more key found for current user\n");
1084 down_write(&key->sem);
1086 kill_key_locked(key);
1088 /* kill_key_locked() should usually revoke the key, but we
1089 * revoke it again to make sure, e.g. some case the key may
1090 * not well coupled with a context. */
1091 key_revoke_locked(key);
1093 up_write(&key->sem);
1099 * flush context of root or all, we iterate through the list.
1102 void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec, uid_t uid, int grace,
1105 struct gss_sec_keyring *gsec_kr;
1106 struct hlist_head freelist = HLIST_HEAD_INIT;
1107 struct hlist_node *next;
1108 struct ptlrpc_cli_ctx *ctx;
1111 gsec_kr = sec2gsec_keyring(sec);
1113 spin_lock(&sec->ps_lock);
1114 hlist_for_each_entry_safe(ctx, next, &gsec_kr->gsk_clist,
1116 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1118 if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
1121 /* at this moment there's at least 2 base reference:
1122 * key association and in-list. */
1123 if (atomic_read(&ctx->cc_refcount) > 2) {
1126 CWARN("flush busy ctx %p(%u->%s, extra ref %d)\n",
1127 ctx, ctx->cc_vcred.vc_uid,
1128 sec2target_str(ctx->cc_sec),
1129 atomic_read(&ctx->cc_refcount) - 2);
1132 set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
1134 clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
1136 atomic_inc(&ctx->cc_refcount);
1138 if (ctx_unlist_kr(ctx, 1)) {
1139 hlist_add_head(&ctx->cc_cache, &freelist);
1141 LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
1142 atomic_dec(&ctx->cc_refcount);
1145 spin_unlock(&sec->ps_lock);
1147 dispose_ctx_list_kr(&freelist);
1152 int gss_sec_flush_ctx_cache_kr(struct ptlrpc_sec *sec,
1153 uid_t uid, int grace, int force)
1157 CDEBUG(D_SEC, "sec %p(%d, nctx %d), uid %d, grace %d, force %d\n",
1158 sec, atomic_read(&sec->ps_refcount),
1159 atomic_read(&sec->ps_nctx),
1162 if (uid != -1 && uid != 0)
1163 flush_user_ctx_cache_kr(sec, uid, grace, force);
1165 flush_spec_ctx_cache_kr(sec, uid, grace, force);
1171 void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec)
1173 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
1174 struct hlist_head freelist = HLIST_HEAD_INIT;
1175 struct hlist_node *next;
1176 struct ptlrpc_cli_ctx *ctx;
1179 CDEBUG(D_SEC, "running gc\n");
1181 spin_lock(&sec->ps_lock);
1182 hlist_for_each_entry_safe(ctx, next, &gsec_kr->gsk_clist,
1184 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1186 atomic_inc(&ctx->cc_refcount);
1188 if (cli_ctx_check_death(ctx) && ctx_unlist_kr(ctx, 1)) {
1189 hlist_add_head(&ctx->cc_cache, &freelist);
1190 CWARN("unhashed ctx %p\n", ctx);
1192 LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
1193 atomic_dec(&ctx->cc_refcount);
1196 spin_unlock(&sec->ps_lock);
1198 dispose_ctx_list_kr(&freelist);
1203 int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq)
1205 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
1206 struct hlist_node *next;
1207 struct ptlrpc_cli_ctx *ctx;
1208 struct gss_cli_ctx *gctx;
1209 struct ptlrpc_connection *conn;
1210 time64_t now = ktime_get_real_seconds();
1213 spin_lock(&sec->ps_lock);
1214 hlist_for_each_entry_safe(ctx, next, &gsec_kr->gsk_clist,
1220 gctx = ctx2gctx(ctx);
1221 key = ctx2gctx_keyring(ctx)->gck_key;
1222 if (sec_is_reverse(sec) &&
1223 ctx->cc_sec && ctx->cc_sec->ps_import &&
1224 ctx->cc_sec->ps_import->imp_connection)
1225 conn = ctx->cc_sec->ps_import->imp_connection;
1229 gss_cli_ctx_flags2str(ctx->cc_flags,
1230 flags_str, sizeof(flags_str));
1232 if (gctx->gc_mechctx)
1233 lgss_display(gctx->gc_mechctx, mech, sizeof(mech));
1235 snprintf(mech, sizeof(mech), "N/A");
1236 mech[sizeof(mech) - 1] = '\0';
1239 "- { %s%s%suid: %u, ctxref: %d, expire: %lld, delta: %lld, flags: [%s], seq: %d, win: %u, key: %08x, keyref: %d, hdl: \"%#llx:%#llx\", mech: \"%s\" }\n",
1240 conn ? "peer_nid: " : "",
1241 conn ? libcfs_nidstr(&conn->c_peer.nid) : "",
1243 ctx->cc_vcred.vc_uid, atomic_read(&ctx->cc_refcount),
1245 ctx->cc_expire ? ctx->cc_expire - now : 0,
1246 flags_str, atomic_read(&gctx->gc_seq),
1247 gctx->gc_win, key ? key->serial : 0,
1248 key ? ll_read_key_usage(key) : 0,
1249 gss_handle_to_u64(&gctx->gc_handle),
1250 gss_handle_to_u64(&gctx->gc_svc_handle),
1253 spin_unlock(&sec->ps_lock);
1258 /****************************************
1260 ****************************************/
1263 int gss_cli_ctx_refresh_kr(struct ptlrpc_cli_ctx *ctx)
1265 /* upcall is already on the way */
1266 struct gss_cli_ctx *gctx = ctx ? ctx2gctx(ctx) : NULL;
1268 /* record latest sequence number in buddy svcctx */
1269 if (gctx && !rawobj_empty(&gctx->gc_svc_handle) &&
1270 sec_is_reverse(gctx->gc_base.cc_sec)) {
1271 return gss_svc_upcall_update_sequence(&gctx->gc_svc_handle,
1272 (__u32)atomic_read(&gctx->gc_seq));
1278 int gss_cli_ctx_validate_kr(struct ptlrpc_cli_ctx *ctx)
1280 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1281 LASSERT(ctx->cc_sec);
1283 if (cli_ctx_check_death(ctx)) {
1288 if (cli_ctx_is_ready(ctx))
1294 void gss_cli_ctx_die_kr(struct ptlrpc_cli_ctx *ctx, int grace)
1296 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1297 LASSERT(ctx->cc_sec);
1299 cli_ctx_expire(ctx);
1303 /****************************************
1304 * (reverse) service *
1305 ****************************************/
1308 * reverse context could have nothing to do with keyrings. here we still keep
1309 * the version which bind to a key, for future reference.
1311 #define HAVE_REVERSE_CTX_NOKEY
1313 #ifdef HAVE_REVERSE_CTX_NOKEY
1316 int sec_install_rctx_kr(struct ptlrpc_sec *sec,
1317 struct ptlrpc_svc_ctx *svc_ctx)
1319 struct ptlrpc_cli_ctx *cli_ctx;
1320 struct vfs_cred vcred = { .vc_uid = 0 };
1326 cli_ctx = ctx_create_kr(sec, &vcred);
1327 if (cli_ctx == NULL)
1330 rc = gss_copy_rvc_cli_ctx(cli_ctx, svc_ctx);
1332 CERROR("failed copy reverse cli ctx: %d\n", rc);
1334 ctx_put_kr(cli_ctx, 1);
1338 rvs_sec_install_root_ctx_kr(sec, cli_ctx, NULL);
1340 ctx_put_kr(cli_ctx, 1);
1345 #else /* ! HAVE_REVERSE_CTX_NOKEY */
1348 int sec_install_rctx_kr(struct ptlrpc_sec *sec,
1349 struct ptlrpc_svc_ctx *svc_ctx)
1351 struct ptlrpc_cli_ctx *cli_ctx = NULL;
1353 struct vfs_cred vcred = { .vc_uid = 0 };
1361 construct_key_desc(desc, sizeof(desc), sec, 0);
1363 key = key_alloc(&gss_key_type, desc, 0, 0,
1364 KEY_POS_ALL | KEY_USR_ALL, 1);
1366 CERROR("failed to alloc key: %ld\n", PTR_ERR(key));
1367 return PTR_ERR(key);
1370 rc = key_instantiate_and_link(key, NULL, 0, NULL, NULL);
1372 CERROR("failed to instantiate key: %d\n", rc);
1376 down_write(&key->sem);
1378 LASSERT(!key_get_payload(key, 0));
1380 cli_ctx = ctx_create_kr(sec, &vcred);
1381 if (cli_ctx == NULL) {
1386 rc = gss_copy_rvc_cli_ctx(cli_ctx, svc_ctx);
1388 CERROR("failed copy reverse cli ctx: %d\n", rc);
1392 rvs_sec_install_root_ctx_kr(sec, cli_ctx, key);
1394 ctx_put_kr(cli_ctx, 1);
1395 up_write(&key->sem);
1404 ctx_put_kr(cli_ctx, 1);
1406 up_write(&key->sem);
1412 #endif /* HAVE_REVERSE_CTX_NOKEY */
1414 /****************************************
1416 ****************************************/
1419 int gss_svc_accept_kr(struct ptlrpc_request *req)
1421 return gss_svc_accept(&gss_policy_keyring, req);
1425 int gss_svc_install_rctx_kr(struct obd_import *imp,
1426 struct ptlrpc_svc_ctx *svc_ctx)
1428 struct ptlrpc_sec *sec;
1431 sec = sptlrpc_import_sec_ref(imp);
1434 rc = sec_install_rctx_kr(sec, svc_ctx);
1435 sptlrpc_sec_put(sec);
1440 /****************************************
1442 ****************************************/
1445 #ifdef HAVE_KEY_TYPE_INSTANTIATE_2ARGS
1446 int gss_kt_instantiate(struct key *key, struct key_preparsed_payload *prep)
1448 const void *data = prep->data;
1449 size_t datalen = prep->datalen;
1451 int gss_kt_instantiate(struct key *key, const void *data, size_t datalen)
1454 struct key *keyring;
1459 CDEBUG(D_SEC, "instantiating key %08x (%p)\n", key->serial, key);
1461 if (data != NULL || datalen != 0) {
1462 CERROR("invalid: data %p, len %lu\n", data, (long)datalen);
1466 if (key_get_payload(key, 0)) {
1467 CERROR("key already have payload\n");
1471 /* link the key to session keyring, so following context negotiation
1472 * rpc fired from user space could find this key. This will be unlinked
1473 * automatically when upcall processes die.
1475 * we can't do this through keyctl from userspace, because the upcall
1476 * might be neither possessor nor owner of the key (setuid).
1478 * the session keyring is created upon upcall, and don't change all
1479 * the way until upcall finished, so rcu lock is not needed here.
1481 * But for end users, link to the user keyring. This simplifies key
1482 * management, makes them shared accross all user sessions, and avoids
1483 * unfortunate key leak if lfs flushctx is not called at user logout.
1485 uid = from_kuid(&init_user_ns, current_uid());
1487 keyring = current_cred()->session_keyring;
1489 keyring = get_user_keyring(current_cred());
1492 rc = key_link(keyring, key);
1495 CERROR("failed to link key %08x to keyring %08x: %d\n",
1496 key->serial, keyring->serial, rc);
1501 "key %08x (%p) linked to keyring %08x and instantiated, ctx %p\n",
1502 key->serial, key, keyring->serial, key_get_payload(key, 0));
1507 * called with key semaphore write locked. it means we can operate
1508 * on the context without fear of loosing refcount.
1511 #ifdef HAVE_KEY_TYPE_INSTANTIATE_2ARGS
1512 int gss_kt_update(struct key *key, struct key_preparsed_payload *prep)
1514 const void *data = prep->data;
1515 __u32 datalen32 = (__u32) prep->datalen;
1517 int gss_kt_update(struct key *key, const void *data, size_t datalen)
1519 __u32 datalen32 = (__u32) datalen;
1521 struct ptlrpc_cli_ctx *ctx = key_get_payload(key, 0);
1522 struct gss_cli_ctx *gctx;
1523 rawobj_t tmpobj = RAWOBJ_EMPTY;
1527 CDEBUG(D_SEC, "updating key %08x (%p)\n", key->serial, key);
1529 if (data == NULL || datalen32 == 0) {
1530 CWARN("invalid: data %p, len %lu\n", data, (long)datalen32);
1534 /* if upcall finished negotiation too fast (mostly likely because
1535 * of local error happened) and call kt_update(), the ctx
1536 * might be still NULL. but the key will finally be associate
1537 * with a context, or be revoked. if key status is fine, return
1538 * -EAGAIN to allow userspace sleep a while and call again. */
1540 CDEBUG(D_SEC, "update too soon: key %08x (%p) flags %lx\n",
1541 key->serial, key, key->flags);
1543 rc = key_validate(key);
1550 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1551 LASSERT(ctx->cc_sec);
1553 ctx_clear_timer_kr(ctx);
1555 /* don't proceed if already refreshed */
1556 if (cli_ctx_is_refreshed(ctx)) {
1557 CWARN("ctx already done refresh\n");
1561 sptlrpc_cli_ctx_get(ctx);
1562 gctx = ctx2gctx(ctx);
1564 rc = buffer_extract_bytes(&data, &datalen32, &gctx->gc_win,
1565 sizeof(gctx->gc_win));
1567 CERROR("failed extract seq_win\n");
1571 if (gctx->gc_win == 0) {
1572 __u32 nego_rpc_err, nego_gss_err;
1574 rc = buffer_extract_bytes(&data, &datalen32, &nego_rpc_err,
1575 sizeof(nego_rpc_err));
1577 CERROR("cannot extract RPC: rc = %d\n", rc);
1581 rc = buffer_extract_bytes(&data, &datalen32, &nego_gss_err,
1582 sizeof(nego_gss_err));
1584 CERROR("failed to extract gss rc = %d\n", rc);
1588 CERROR("negotiation: rpc err %d, gss err %x\n",
1589 nego_rpc_err, nego_gss_err);
1591 rc = nego_rpc_err ? nego_rpc_err : -EACCES;
1593 rc = rawobj_extract_local_alloc(&gctx->gc_handle,
1594 (__u32 **) &data, &datalen32);
1596 CERROR("failed extract handle\n");
1600 rc = rawobj_extract_local(&tmpobj,
1601 (__u32 **) &data, &datalen32);
1603 CERROR("failed extract mech\n");
1607 rc = lgss_import_sec_context(&tmpobj,
1608 sec2gsec(ctx->cc_sec)->gs_mech,
1610 if (rc != GSS_S_COMPLETE)
1611 CERROR("failed import context\n");
1616 CDEBUG(D_SEC, "update of key %08x (%p): %d\n", key->serial, key, rc);
1617 /* we don't care what current status of this ctx, even someone else
1618 * is operating on the ctx at the same time. we just add up our own
1621 gss_cli_ctx_uptodate(gctx);
1623 /* this will also revoke the key. has to be done before
1624 * wakeup waiters otherwise they can find the stale key */
1625 kill_key_locked(key);
1627 cli_ctx_expire(ctx);
1629 if (rc != -ERESTART)
1630 set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
1633 /* let user space think it's a success */
1634 sptlrpc_cli_ctx_put(ctx, 1);
1638 #ifndef HAVE_KEY_MATCH_DATA
1640 gss_kt_match(const struct key *key, const void *desc)
1642 return strcmp(key->description, (const char *) desc) == 0 &&
1643 !test_bit(KEY_FLAG_REVOKED, &key->flags);
1645 #else /* ! HAVE_KEY_MATCH_DATA */
1647 gss_kt_match(const struct key *key, const struct key_match_data *match_data)
1649 const char *desc = match_data->raw_data;
1651 return strcmp(key->description, desc) == 0 &&
1652 !test_bit(KEY_FLAG_REVOKED, &key->flags);
1656 * Preparse the match criterion.
1658 static int gss_kt_match_preparse(struct key_match_data *match_data)
1660 match_data->lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT;
1661 match_data->cmp = gss_kt_match;
1664 #endif /* HAVE_KEY_MATCH_DATA */
1667 void gss_kt_destroy(struct key *key)
1670 LASSERT(!key_get_payload(key, 0));
1671 CDEBUG(D_SEC, "destroy key %08x %p\n", key->serial, key);
1676 void gss_kt_describe(const struct key *key, struct seq_file *s)
1678 if (key->description == NULL)
1679 seq_puts(s, "[null]");
1681 seq_puts(s, key->description);
1684 static struct key_type gss_key_type =
1688 .instantiate = gss_kt_instantiate,
1689 .update = gss_kt_update,
1690 #ifdef HAVE_KEY_MATCH_DATA
1691 .match_preparse = gss_kt_match_preparse,
1693 .match = gss_kt_match,
1695 .destroy = gss_kt_destroy,
1696 .describe = gss_kt_describe,
1699 /****************************************
1700 * lustre gss keyring policy *
1701 ****************************************/
1703 static struct ptlrpc_ctx_ops gss_keyring_ctxops = {
1704 .match = gss_cli_ctx_match,
1705 .refresh = gss_cli_ctx_refresh_kr,
1706 .validate = gss_cli_ctx_validate_kr,
1707 .die = gss_cli_ctx_die_kr,
1708 .sign = gss_cli_ctx_sign,
1709 .verify = gss_cli_ctx_verify,
1710 .seal = gss_cli_ctx_seal,
1711 .unseal = gss_cli_ctx_unseal,
1712 .wrap_bulk = gss_cli_ctx_wrap_bulk,
1713 .unwrap_bulk = gss_cli_ctx_unwrap_bulk,
1716 static struct ptlrpc_sec_cops gss_sec_keyring_cops = {
1717 .create_sec = gss_sec_create_kr,
1718 .destroy_sec = gss_sec_destroy_kr,
1719 .kill_sec = gss_sec_kill,
1720 .lookup_ctx = gss_sec_lookup_ctx_kr,
1721 .release_ctx = gss_sec_release_ctx_kr,
1722 .flush_ctx_cache = gss_sec_flush_ctx_cache_kr,
1723 .gc_ctx = gss_sec_gc_ctx_kr,
1724 .install_rctx = gss_sec_install_rctx,
1725 .alloc_reqbuf = gss_alloc_reqbuf,
1726 .free_reqbuf = gss_free_reqbuf,
1727 .alloc_repbuf = gss_alloc_repbuf,
1728 .free_repbuf = gss_free_repbuf,
1729 .enlarge_reqbuf = gss_enlarge_reqbuf,
1730 .display = gss_sec_display_kr,
1733 static struct ptlrpc_sec_sops gss_sec_keyring_sops = {
1734 .accept = gss_svc_accept_kr,
1735 .invalidate_ctx = gss_svc_invalidate_ctx,
1736 .alloc_rs = gss_svc_alloc_rs,
1737 .authorize = gss_svc_authorize,
1738 .free_rs = gss_svc_free_rs,
1739 .free_ctx = gss_svc_free_ctx,
1740 .prep_bulk = gss_svc_prep_bulk,
1741 .unwrap_bulk = gss_svc_unwrap_bulk,
1742 .wrap_bulk = gss_svc_wrap_bulk,
1743 .install_rctx = gss_svc_install_rctx_kr,
1746 static struct ptlrpc_sec_policy gss_policy_keyring = {
1747 .sp_owner = THIS_MODULE,
1748 .sp_name = "gss.keyring",
1749 .sp_policy = SPTLRPC_POLICY_GSS,
1750 .sp_cops = &gss_sec_keyring_cops,
1751 .sp_sops = &gss_sec_keyring_sops,
1755 int __init gss_init_keyring(void)
1759 rc = register_key_type(&gss_key_type);
1761 CERROR("failed to register keyring type: %d\n", rc);
1765 rc = sptlrpc_register_policy(&gss_policy_keyring);
1767 unregister_key_type(&gss_key_type);
1774 void __exit gss_exit_keyring(void)
1776 unregister_key_type(&gss_key_type);
1777 sptlrpc_unregister_policy(&gss_policy_keyring);