4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2014, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/ptlrpc/gss/gss_keyring.c
34 * Author: Eric Mei <ericm@clusterfs.com>
37 #define DEBUG_SUBSYSTEM S_SEC
38 #include <linux/init.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
41 #include <linux/dcache.h>
43 #include <linux/crypto.h>
44 #include <linux/key.h>
45 #include <linux/keyctl.h>
46 #include <linux/key-type.h>
47 #include <linux/mutex.h>
48 #include <asm/atomic.h>
50 #include <libcfs/linux/linux-list.h>
52 #include <obd_class.h>
53 #include <obd_support.h>
54 #include <lustre/lustre_idl.h>
55 #include <lustre_sec.h>
56 #include <lustre_net.h>
57 #include <lustre_import.h>
60 #include "gss_internal.h"
63 static struct ptlrpc_sec_policy gss_policy_keyring;
64 static struct ptlrpc_ctx_ops gss_keyring_ctxops;
65 static struct key_type gss_key_type;
67 static int sec_install_rctx_kr(struct ptlrpc_sec *sec,
68 struct ptlrpc_svc_ctx *svc_ctx);
71 * the timeout is only for the case that upcall child process die abnormally.
72 * in any other cases it should finally update kernel key.
74 * FIXME we'd better to incorporate the client & server side upcall timeouts
75 * into the framework of Adaptive Timeouts, but we need to figure out how to
76 * make sure that kernel knows the upcall processes is in-progress or died
79 #define KEYRING_UPCALL_TIMEOUT (obd_timeout + obd_timeout)
81 /****************************************
83 ****************************************/
85 #define DUMP_PROCESS_KEYRINGS(tsk) \
87 CWARN("DUMP PK: %s[%u,%u/%u](<-%s[%u,%u/%u]): " \
88 "a %d, t %d, p %d, s %d, u %d, us %d, df %d\n", \
89 tsk->comm, tsk->pid, tsk->uid, tsk->fsuid, \
90 tsk->parent->comm, tsk->parent->pid, \
91 tsk->parent->uid, tsk->parent->fsuid, \
92 tsk->request_key_auth ? \
93 tsk->request_key_auth->serial : 0, \
94 key_cred(tsk)->thread_keyring ? \
95 key_cred(tsk)->thread_keyring->serial : 0, \
96 key_tgcred(tsk)->process_keyring ? \
97 key_tgcred(tsk)->process_keyring->serial : 0, \
98 key_tgcred(tsk)->session_keyring ? \
99 key_tgcred(tsk)->session_keyring->serial : 0, \
100 key_cred(tsk)->user->uid_keyring ? \
101 key_cred(tsk)->user->uid_keyring->serial : 0, \
102 key_cred(tsk)->user->session_keyring ? \
103 key_cred(tsk)->user->session_keyring->serial : 0, \
104 key_cred(tsk)->jit_keyring \
108 #define DUMP_KEY(key) \
110 CWARN("DUMP KEY: %p(%d) ref %d u%u/g%u desc %s\n", \
111 key, key->serial, atomic_read(&key->usage), \
112 key->uid, key->gid, \
113 key->description ? key->description : "n/a" \
117 #define key_cred(tsk) ((tsk)->cred)
118 #ifdef HAVE_CRED_TGCRED
119 #define key_tgcred(tsk) ((tsk)->cred->tgcred)
121 #define key_tgcred(tsk) key_cred(tsk)
124 static inline void keyring_upcall_lock(struct gss_sec_keyring *gsec_kr)
126 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
127 mutex_lock(&gsec_kr->gsk_uc_lock);
131 static inline void keyring_upcall_unlock(struct gss_sec_keyring *gsec_kr)
133 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
134 mutex_unlock(&gsec_kr->gsk_uc_lock);
138 static inline void key_revoke_locked(struct key *key)
140 set_bit(KEY_FLAG_REVOKED, &key->flags);
143 static void ctx_upcall_timeout_kr(unsigned long data)
145 struct ptlrpc_cli_ctx *ctx = (struct ptlrpc_cli_ctx *) data;
146 struct key *key = ctx2gctx_keyring(ctx)->gck_key;
148 CWARN("ctx %p, key %p\n", ctx, key);
153 key_revoke_locked(key);
156 static void ctx_start_timer_kr(struct ptlrpc_cli_ctx *ctx, long timeout)
158 struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
159 struct timer_list *timer = gctx_kr->gck_timer;
163 CDEBUG(D_SEC, "ctx %p: start timer %lds\n", ctx, timeout);
164 timeout = msecs_to_jiffies(timeout * MSEC_PER_SEC) +
168 timer->expires = timeout;
169 timer->data = (unsigned long ) ctx;
170 timer->function = ctx_upcall_timeout_kr;
176 * caller should make sure no race with other threads
179 void ctx_clear_timer_kr(struct ptlrpc_cli_ctx *ctx)
181 struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
182 struct timer_list *timer = gctx_kr->gck_timer;
187 CDEBUG(D_SEC, "ctx %p, key %p\n", ctx, gctx_kr->gck_key);
189 gctx_kr->gck_timer = NULL;
191 del_singleshot_timer_sync(timer);
197 struct ptlrpc_cli_ctx *ctx_create_kr(struct ptlrpc_sec *sec,
198 struct vfs_cred *vcred)
200 struct ptlrpc_cli_ctx *ctx;
201 struct gss_cli_ctx_keyring *gctx_kr;
203 OBD_ALLOC_PTR(gctx_kr);
207 OBD_ALLOC_PTR(gctx_kr->gck_timer);
208 if (gctx_kr->gck_timer == NULL) {
209 OBD_FREE_PTR(gctx_kr);
212 init_timer(gctx_kr->gck_timer);
214 ctx = &gctx_kr->gck_base.gc_base;
216 if (gss_cli_ctx_init_common(sec, ctx, &gss_keyring_ctxops, vcred)) {
217 OBD_FREE_PTR(gctx_kr->gck_timer);
218 OBD_FREE_PTR(gctx_kr);
222 ctx->cc_expire = cfs_time_current_sec() + KEYRING_UPCALL_TIMEOUT;
223 clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
224 atomic_inc(&ctx->cc_refcount); /* for the caller */
229 static void ctx_destroy_kr(struct ptlrpc_cli_ctx *ctx)
231 struct ptlrpc_sec *sec = ctx->cc_sec;
232 struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
234 CDEBUG(D_SEC, "destroying ctx %p\n", ctx);
236 /* at this time the association with key has been broken. */
238 LASSERT(atomic_read(&sec->ps_refcount) > 0);
239 LASSERT(atomic_read(&sec->ps_nctx) > 0);
240 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
241 LASSERT(gctx_kr->gck_key == NULL);
243 ctx_clear_timer_kr(ctx);
244 LASSERT(gctx_kr->gck_timer == NULL);
246 if (gss_cli_ctx_fini_common(sec, ctx))
249 OBD_FREE_PTR(gctx_kr);
251 atomic_dec(&sec->ps_nctx);
252 sptlrpc_sec_put(sec);
255 static void ctx_release_kr(struct ptlrpc_cli_ctx *ctx, int sync)
260 atomic_inc(&ctx->cc_refcount);
261 sptlrpc_gc_add_ctx(ctx);
265 static void ctx_put_kr(struct ptlrpc_cli_ctx *ctx, int sync)
267 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
269 if (atomic_dec_and_test(&ctx->cc_refcount))
270 ctx_release_kr(ctx, sync);
274 * key <-> ctx association and rules:
275 * - ctx might not bind with any key
276 * - key/ctx binding is protected by key semaphore (if the key present)
277 * - key and ctx each take a reference of the other
278 * - ctx enlist/unlist is protected by ctx spinlock
279 * - never enlist a ctx after it's been unlisted
280 * - whoever do enlist should also do bind, lock key before enlist:
281 * - lock key -> lock ctx -> enlist -> unlock ctx -> bind -> unlock key
282 * - whoever do unlist should also do unbind:
283 * - lock key -> lock ctx -> unlist -> unlock ctx -> unbind -> unlock key
284 * - lock ctx -> unlist -> unlock ctx -> lock key -> unbind -> unlock key
287 static inline void spin_lock_if(spinlock_t *lock, int condition)
293 static inline void spin_unlock_if(spinlock_t *lock, int condition)
299 static void ctx_enlist_kr(struct ptlrpc_cli_ctx *ctx, int is_root, int locked)
301 struct ptlrpc_sec *sec = ctx->cc_sec;
302 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
304 LASSERT(!test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
305 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
307 spin_lock_if(&sec->ps_lock, !locked);
309 atomic_inc(&ctx->cc_refcount);
310 set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
311 hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
313 gsec_kr->gsk_root_ctx = ctx;
315 spin_unlock_if(&sec->ps_lock, !locked);
319 * Note after this get called, caller should not access ctx again because
320 * it might have been freed, unless caller hold at least one refcount of
323 * return non-zero if we indeed unlist this ctx.
325 static int ctx_unlist_kr(struct ptlrpc_cli_ctx *ctx, int locked)
327 struct ptlrpc_sec *sec = ctx->cc_sec;
328 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
330 /* if hashed bit has gone, leave the job to somebody who is doing it */
331 if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
334 /* drop ref inside spin lock to prevent race with other operations */
335 spin_lock_if(&sec->ps_lock, !locked);
337 if (gsec_kr->gsk_root_ctx == ctx)
338 gsec_kr->gsk_root_ctx = NULL;
339 hlist_del_init(&ctx->cc_cache);
340 atomic_dec(&ctx->cc_refcount);
342 spin_unlock_if(&sec->ps_lock, !locked);
348 * Get specific payload. Newer kernels support 4 slots.
351 key_get_payload(struct key *key, unsigned int index)
353 void *key_ptr = NULL;
355 #ifdef HAVE_KEY_PAYLOAD_DATA_ARRAY
356 key_ptr = key->payload.data[index];
359 key_ptr = key->payload.data;
365 * Set specific payload. Newer kernels support 4 slots.
367 static int key_set_payload(struct key *key, unsigned int index,
368 struct ptlrpc_cli_ctx *ctx)
372 #ifdef HAVE_KEY_PAYLOAD_DATA_ARRAY
374 key->payload.data[index] = ctx;
377 key->payload.data = ctx;
385 * bind a key with a ctx together.
386 * caller must hold write lock of the key, as well as ref on key & ctx.
388 static void bind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
390 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
391 LASSERT(atomic_read(&key->usage) > 0);
392 LASSERT(ctx2gctx_keyring(ctx)->gck_key == NULL);
393 LASSERT(!key_get_payload(key, 0));
395 /* at this time context may or may not in list. */
397 atomic_inc(&ctx->cc_refcount);
398 ctx2gctx_keyring(ctx)->gck_key = key;
399 LASSERT(!key_set_payload(key, 0, ctx));
403 * unbind a key and a ctx.
404 * caller must hold write lock, as well as a ref of the key.
406 static void unbind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
408 LASSERT(key_get_payload(key, 0) == ctx);
409 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
411 /* must revoke the key, or others may treat it as newly created */
412 key_revoke_locked(key);
414 key_set_payload(key, 0, NULL);
415 ctx2gctx_keyring(ctx)->gck_key = NULL;
417 /* once ctx get split from key, the timer is meaningless */
418 ctx_clear_timer_kr(ctx);
425 * given a ctx, unbind with its coupled key, if any.
426 * unbind could only be called once, so we don't worry the key be released
429 static void unbind_ctx_kr(struct ptlrpc_cli_ctx *ctx)
431 struct key *key = ctx2gctx_keyring(ctx)->gck_key;
434 LASSERT(key_get_payload(key, 0) == ctx);
437 down_write(&key->sem);
438 unbind_key_ctx(key, ctx);
445 * given a key, unbind with its coupled ctx, if any.
446 * caller must hold write lock, as well as a ref of the key.
448 static void unbind_key_locked(struct key *key)
450 struct ptlrpc_cli_ctx *ctx = key_get_payload(key, 0);
453 unbind_key_ctx(key, ctx);
457 * unlist a ctx, and unbind from coupled key
459 static void kill_ctx_kr(struct ptlrpc_cli_ctx *ctx)
461 if (ctx_unlist_kr(ctx, 0))
466 * given a key, unlist and unbind with the coupled ctx (if any).
467 * caller must hold write lock, as well as a ref of the key.
469 static void kill_key_locked(struct key *key)
471 struct ptlrpc_cli_ctx *ctx = key_get_payload(key, 0);
473 if (ctx && ctx_unlist_kr(ctx, 0))
474 unbind_key_locked(key);
478 * caller should hold one ref on contexts in freelist.
480 static void dispose_ctx_list_kr(struct hlist_head *freelist)
482 struct hlist_node __maybe_unused *pos, *next;
483 struct ptlrpc_cli_ctx *ctx;
484 struct gss_cli_ctx *gctx;
486 cfs_hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) {
487 hlist_del_init(&ctx->cc_cache);
489 /* reverse ctx: update current seq to buddy svcctx if exist.
490 * ideally this should be done at gss_cli_ctx_finalize(), but
491 * the ctx destroy could be delayed by:
492 * 1) ctx still has reference;
493 * 2) ctx destroy is asynchronous;
494 * and reverse import call inval_all_ctx() require this be done
495 * _immediately_ otherwise newly created reverse ctx might copy
496 * the very old sequence number from svcctx. */
497 gctx = ctx2gctx(ctx);
498 if (!rawobj_empty(&gctx->gc_svc_handle) &&
499 sec_is_reverse(gctx->gc_base.cc_sec)) {
500 gss_svc_upcall_update_sequence(&gctx->gc_svc_handle,
501 (__u32) atomic_read(&gctx->gc_seq));
504 /* we need to wakeup waiting reqs here. the context might
505 * be forced released before upcall finished, then the
506 * late-arrived downcall can't find the ctx even. */
507 sptlrpc_cli_ctx_wakeup(ctx);
515 * lookup a root context directly in a sec, return root ctx with a
516 * reference taken or NULL.
519 struct ptlrpc_cli_ctx * sec_lookup_root_ctx_kr(struct ptlrpc_sec *sec)
521 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
522 struct ptlrpc_cli_ctx *ctx = NULL;
524 spin_lock(&sec->ps_lock);
526 ctx = gsec_kr->gsk_root_ctx;
528 if (ctx == NULL && unlikely(sec_is_reverse(sec))) {
529 struct hlist_node __maybe_unused *node;
530 struct ptlrpc_cli_ctx *tmp;
532 /* reverse ctx, search root ctx in list, choose the one
533 * with shortest expire time, which is most possibly have
534 * an established peer ctx at client side. */
535 cfs_hlist_for_each_entry(tmp, node, &gsec_kr->gsk_clist,
537 if (ctx == NULL || ctx->cc_expire == 0 ||
538 ctx->cc_expire > tmp->cc_expire) {
540 /* promote to be root_ctx */
541 gsec_kr->gsk_root_ctx = ctx;
547 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
548 LASSERT(!hlist_empty(&gsec_kr->gsk_clist));
549 atomic_inc(&ctx->cc_refcount);
552 spin_unlock(&sec->ps_lock);
557 #define RVS_CTX_EXPIRE_NICE (10)
560 void rvs_sec_install_root_ctx_kr(struct ptlrpc_sec *sec,
561 struct ptlrpc_cli_ctx *new_ctx,
564 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
565 struct hlist_node __maybe_unused *hnode;
566 struct ptlrpc_cli_ctx *ctx;
570 LASSERT(sec_is_reverse(sec));
572 spin_lock(&sec->ps_lock);
574 now = cfs_time_current_sec();
576 /* set all existing ctxs short expiry */
577 cfs_hlist_for_each_entry(ctx, hnode, &gsec_kr->gsk_clist, cc_cache) {
578 if (ctx->cc_expire > now + RVS_CTX_EXPIRE_NICE) {
579 ctx->cc_early_expire = 1;
580 ctx->cc_expire = now + RVS_CTX_EXPIRE_NICE;
584 /* if there's root_ctx there, instead obsolete the current
585 * immediately, we leave it continue operating for a little while.
586 * hopefully when the first backward rpc with newest ctx send out,
587 * the client side already have the peer ctx well established. */
588 ctx_enlist_kr(new_ctx, gsec_kr->gsk_root_ctx ? 0 : 1, 1);
591 bind_key_ctx(key, new_ctx);
593 spin_unlock(&sec->ps_lock);
596 static void construct_key_desc(void *buf, int bufsize,
597 struct ptlrpc_sec *sec, uid_t uid)
599 snprintf(buf, bufsize, "%d@%x", uid, sec->ps_id);
600 ((char *)buf)[bufsize - 1] = '\0';
603 /****************************************
605 ****************************************/
608 struct ptlrpc_sec * gss_sec_create_kr(struct obd_import *imp,
609 struct ptlrpc_svc_ctx *svcctx,
610 struct sptlrpc_flavor *sf)
612 struct gss_sec_keyring *gsec_kr;
615 OBD_ALLOC(gsec_kr, sizeof(*gsec_kr));
619 INIT_HLIST_HEAD(&gsec_kr->gsk_clist);
620 gsec_kr->gsk_root_ctx = NULL;
621 mutex_init(&gsec_kr->gsk_root_uc_lock);
622 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
623 mutex_init(&gsec_kr->gsk_uc_lock);
626 if (gss_sec_create_common(&gsec_kr->gsk_base, &gss_policy_keyring,
630 if (svcctx != NULL &&
631 sec_install_rctx_kr(&gsec_kr->gsk_base.gs_base, svcctx)) {
632 gss_sec_destroy_common(&gsec_kr->gsk_base);
636 RETURN(&gsec_kr->gsk_base.gs_base);
639 OBD_FREE(gsec_kr, sizeof(*gsec_kr));
644 void gss_sec_destroy_kr(struct ptlrpc_sec *sec)
646 struct gss_sec *gsec = sec2gsec(sec);
647 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
649 CDEBUG(D_SEC, "destroy %s@%p\n", sec->ps_policy->sp_name, sec);
651 LASSERT(hlist_empty(&gsec_kr->gsk_clist));
652 LASSERT(gsec_kr->gsk_root_ctx == NULL);
654 gss_sec_destroy_common(gsec);
656 OBD_FREE(gsec_kr, sizeof(*gsec_kr));
659 static inline int user_is_root(struct ptlrpc_sec *sec, struct vfs_cred *vcred)
661 /* except the ROOTONLY flag, treat it as root user only if real uid
662 * is 0, euid/fsuid being 0 are handled as setuid scenarios */
663 if (sec_is_rootonly(sec) || (vcred->vc_uid == 0))
670 * unlink request key from it's ring, which is linked during request_key().
671 * sadly, we have to 'guess' which keyring it's linked to.
673 * FIXME this code is fragile, depend on how request_key_link() is implemented.
675 static void request_key_unlink(struct key *key)
677 struct task_struct *tsk = current;
680 switch (key_cred(tsk)->jit_keyring) {
681 case KEY_REQKEY_DEFL_DEFAULT:
682 case KEY_REQKEY_DEFL_THREAD_KEYRING:
683 ring = key_get(key_cred(tsk)->thread_keyring);
686 case KEY_REQKEY_DEFL_PROCESS_KEYRING:
687 ring = key_get(key_tgcred(tsk)->process_keyring);
690 case KEY_REQKEY_DEFL_SESSION_KEYRING:
692 ring = key_get(rcu_dereference(key_tgcred(tsk)
697 case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
698 ring = key_get(key_cred(tsk)->user->session_keyring);
700 case KEY_REQKEY_DEFL_USER_KEYRING:
701 ring = key_get(key_cred(tsk)->user->uid_keyring);
703 case KEY_REQKEY_DEFL_GROUP_KEYRING:
709 key_unlink(ring, key);
714 struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec,
715 struct vfs_cred *vcred,
716 int create, int remove_dead)
718 struct obd_import *imp = sec->ps_import;
719 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
720 struct ptlrpc_cli_ctx *ctx = NULL;
721 unsigned int is_root = 0, create_new = 0;
726 const char *sec_part_flags = "";
730 LASSERT(imp != NULL);
732 is_root = user_is_root(sec, vcred);
734 /* a little bit optimization for root context */
736 ctx = sec_lookup_root_ctx_kr(sec);
738 * Only lookup directly for REVERSE sec, which should
741 if (ctx || sec_is_reverse(sec))
745 LASSERT(create != 0);
747 /* for root context, obtain lock and check again, this time hold
748 * the root upcall lock, make sure nobody else populated new root
749 * context after last check. */
751 mutex_lock(&gsec_kr->gsk_root_uc_lock);
753 ctx = sec_lookup_root_ctx_kr(sec);
757 /* update reverse handle for root user */
758 sec2gsec(sec)->gs_rvs_hdl = gss_get_next_ctx_index();
760 switch (sec->ps_part) {
762 sec_part_flags = "m";
765 sec_part_flags = "o";
768 sec_part_flags = "rmo";
771 sec_part_flags = "r";
778 switch (SPTLRPC_FLVR_SVC(sec->ps_flvr.sf_rpc)) {
779 case SPTLRPC_SVC_NULL:
782 case SPTLRPC_SVC_AUTH:
785 case SPTLRPC_SVC_INTG:
788 case SPTLRPC_SVC_PRIV:
796 /* in case of setuid, key will be constructed as owner of fsuid/fsgid,
797 * but we do authentication based on real uid/gid. the key permission
798 * bits will be exactly as POS_ALL, so only processes who subscribed
799 * this key could have the access, although the quota might be counted
800 * on others (fsuid/fsgid).
802 * keyring will use fsuid/fsgid as upcall parameters, so we have to
803 * encode real uid/gid into callout info.
806 /* But first we need to make sure the obd type is supported */
807 if (strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_MDC_NAME) &&
808 strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_OSC_NAME) &&
809 strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_MGC_NAME) &&
810 strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_LWP_NAME) &&
811 strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_OSP_NAME)) {
812 CERROR("obd %s is not a supported device\n",
813 imp->imp_obd->obd_name);
814 GOTO(out, ctx = NULL);
817 construct_key_desc(desc, sizeof(desc), sec, vcred->vc_uid);
819 /* callout info format:
820 * secid:mech:uid:gid:sec_flags:svc_flag:svc_type:peer_nid:target_uuid
822 coinfo_size = sizeof(struct obd_uuid) + MAX_OBD_NAME + 64;
823 OBD_ALLOC(coinfo, coinfo_size);
827 snprintf(coinfo, coinfo_size, "%d:%s:%u:%u:%s:%c:%d:%#llx:%s:%#llx",
828 sec->ps_id, sec2gsec(sec)->gs_mech->gm_name,
829 vcred->vc_uid, vcred->vc_gid,
830 sec_part_flags, svc_flag, import_to_gss_svc(imp),
831 imp->imp_connection->c_peer.nid, imp->imp_obd->obd_name,
832 imp->imp_connection->c_self);
834 CDEBUG(D_SEC, "requesting key for %s\n", desc);
836 keyring_upcall_lock(gsec_kr);
837 key = request_key(&gss_key_type, desc, coinfo);
838 keyring_upcall_unlock(gsec_kr);
840 OBD_FREE(coinfo, coinfo_size);
843 CERROR("failed request key: %ld\n", PTR_ERR(key));
846 CDEBUG(D_SEC, "obtained key %08x for %s\n", key->serial, desc);
848 /* once payload.data was pointed to a ctx, it never changes until
849 * we de-associate them; but parallel request_key() may return
850 * a key with payload.data == NULL at the same time. so we still
851 * need wirtelock of key->sem to serialize them. */
852 down_write(&key->sem);
854 ctx = key_get_payload(key, 0);
856 LASSERT(atomic_read(&ctx->cc_refcount) >= 1);
857 LASSERT(ctx2gctx_keyring(ctx)->gck_key == key);
858 LASSERT(atomic_read(&key->usage) >= 2);
860 /* simply take a ref and return. it's upper layer's
861 * responsibility to detect & replace dead ctx. */
862 atomic_inc(&ctx->cc_refcount);
864 /* pre initialization with a cli_ctx. this can't be done in
865 * key_instantiate() because we'v no enough information
867 ctx = ctx_create_kr(sec, vcred);
869 ctx_enlist_kr(ctx, is_root, 0);
870 bind_key_ctx(key, ctx);
872 ctx_start_timer_kr(ctx, KEYRING_UPCALL_TIMEOUT);
874 CDEBUG(D_SEC, "installed key %p <-> ctx %p (sec %p)\n",
877 /* we'd prefer to call key_revoke(), but we more like
878 * to revoke it within this key->sem locked period. */
879 key_revoke_locked(key);
887 if (is_root && create_new)
888 request_key_unlink(key);
893 mutex_unlock(&gsec_kr->gsk_root_uc_lock);
898 void gss_sec_release_ctx_kr(struct ptlrpc_sec *sec,
899 struct ptlrpc_cli_ctx *ctx,
902 LASSERT(atomic_read(&sec->ps_refcount) > 0);
903 LASSERT(atomic_read(&ctx->cc_refcount) == 0);
904 ctx_release_kr(ctx, sync);
908 * flush context of normal user, we must resort to keyring itself to find out
909 * contexts which belong to me.
911 * Note here we suppose only to flush _my_ context, the "uid" will
912 * be ignored in the search.
915 void flush_user_ctx_cache_kr(struct ptlrpc_sec *sec,
917 int grace, int force)
922 /* nothing to do for reverse or rootonly sec */
923 if (sec_is_reverse(sec) || sec_is_rootonly(sec))
926 construct_key_desc(desc, sizeof(desc), sec, uid);
928 /* there should be only one valid key, but we put it in the
929 * loop in case of any weird cases */
931 key = request_key(&gss_key_type, desc, NULL);
933 CDEBUG(D_SEC, "No more key found for current user\n");
937 down_write(&key->sem);
939 kill_key_locked(key);
941 /* kill_key_locked() should usually revoke the key, but we
942 * revoke it again to make sure, e.g. some case the key may
943 * not well coupled with a context. */
944 key_revoke_locked(key);
948 request_key_unlink(key);
955 * flush context of root or all, we iterate through the list.
958 void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec, uid_t uid, int grace,
961 struct gss_sec_keyring *gsec_kr;
962 struct hlist_head freelist = HLIST_HEAD_INIT;
963 struct hlist_node __maybe_unused *pos, *next;
964 struct ptlrpc_cli_ctx *ctx;
967 gsec_kr = sec2gsec_keyring(sec);
969 spin_lock(&sec->ps_lock);
970 cfs_hlist_for_each_entry_safe(ctx, pos, next,
971 &gsec_kr->gsk_clist, cc_cache) {
972 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
974 if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
977 /* at this moment there's at least 2 base reference:
978 * key association and in-list. */
979 if (atomic_read(&ctx->cc_refcount) > 2) {
982 CWARN("flush busy ctx %p(%u->%s, extra ref %d)\n",
983 ctx, ctx->cc_vcred.vc_uid,
984 sec2target_str(ctx->cc_sec),
985 atomic_read(&ctx->cc_refcount) - 2);
988 set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
990 clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
992 atomic_inc(&ctx->cc_refcount);
994 if (ctx_unlist_kr(ctx, 1)) {
995 hlist_add_head(&ctx->cc_cache, &freelist);
997 LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
998 atomic_dec(&ctx->cc_refcount);
1001 spin_unlock(&sec->ps_lock);
1003 dispose_ctx_list_kr(&freelist);
1008 int gss_sec_flush_ctx_cache_kr(struct ptlrpc_sec *sec,
1009 uid_t uid, int grace, int force)
1013 CDEBUG(D_SEC, "sec %p(%d, nctx %d), uid %d, grace %d, force %d\n",
1014 sec, atomic_read(&sec->ps_refcount),
1015 atomic_read(&sec->ps_nctx),
1018 if (uid != -1 && uid != 0)
1019 flush_user_ctx_cache_kr(sec, uid, grace, force);
1021 flush_spec_ctx_cache_kr(sec, uid, grace, force);
1027 void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec)
1029 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
1030 struct hlist_head freelist = HLIST_HEAD_INIT;
1031 struct hlist_node __maybe_unused *pos, *next;
1032 struct ptlrpc_cli_ctx *ctx;
1035 CWARN("running gc\n");
1037 spin_lock(&sec->ps_lock);
1038 cfs_hlist_for_each_entry_safe(ctx, pos, next,
1039 &gsec_kr->gsk_clist, cc_cache) {
1040 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1042 atomic_inc(&ctx->cc_refcount);
1044 if (cli_ctx_check_death(ctx) && ctx_unlist_kr(ctx, 1)) {
1045 hlist_add_head(&ctx->cc_cache, &freelist);
1046 CWARN("unhashed ctx %p\n", ctx);
1048 LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
1049 atomic_dec(&ctx->cc_refcount);
1052 spin_unlock(&sec->ps_lock);
1054 dispose_ctx_list_kr(&freelist);
1060 int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq)
1062 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
1063 struct hlist_node __maybe_unused *pos, *next;
1064 struct ptlrpc_cli_ctx *ctx;
1065 struct gss_cli_ctx *gctx;
1066 time_t now = cfs_time_current_sec();
1069 spin_lock(&sec->ps_lock);
1070 cfs_hlist_for_each_entry_safe(ctx, pos, next,
1071 &gsec_kr->gsk_clist, cc_cache) {
1076 gctx = ctx2gctx(ctx);
1077 key = ctx2gctx_keyring(ctx)->gck_key;
1079 gss_cli_ctx_flags2str(ctx->cc_flags,
1080 flags_str, sizeof(flags_str));
1082 if (gctx->gc_mechctx)
1083 lgss_display(gctx->gc_mechctx, mech, sizeof(mech));
1085 snprintf(mech, sizeof(mech), "N/A");
1086 mech[sizeof(mech) - 1] = '\0';
1088 seq_printf(seq, "%p: uid %u, ref %d, expire %lu(%+ld), fl %s, "
1089 "seq %d, win %u, key %08x(ref %d), "
1090 "hdl %#llx:%#llx, mech: %s\n",
1091 ctx, ctx->cc_vcred.vc_uid,
1092 atomic_read(&ctx->cc_refcount),
1094 ctx->cc_expire ? ctx->cc_expire - now : 0,
1096 atomic_read(&gctx->gc_seq),
1098 key ? key->serial : 0,
1099 key ? atomic_read(&key->usage) : 0,
1100 gss_handle_to_u64(&gctx->gc_handle),
1101 gss_handle_to_u64(&gctx->gc_svc_handle),
1104 spin_unlock(&sec->ps_lock);
1109 /****************************************
1111 ****************************************/
1114 int gss_cli_ctx_refresh_kr(struct ptlrpc_cli_ctx *ctx)
1116 /* upcall is already on the way */
1121 int gss_cli_ctx_validate_kr(struct ptlrpc_cli_ctx *ctx)
1123 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1124 LASSERT(ctx->cc_sec);
1126 if (cli_ctx_check_death(ctx)) {
1131 if (cli_ctx_is_ready(ctx))
1137 void gss_cli_ctx_die_kr(struct ptlrpc_cli_ctx *ctx, int grace)
1139 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1140 LASSERT(ctx->cc_sec);
1142 cli_ctx_expire(ctx);
1146 /****************************************
1147 * (reverse) service *
1148 ****************************************/
1151 * reverse context could have nothing to do with keyrings. here we still keep
1152 * the version which bind to a key, for future reference.
1154 #define HAVE_REVERSE_CTX_NOKEY
1156 #ifdef HAVE_REVERSE_CTX_NOKEY
1159 int sec_install_rctx_kr(struct ptlrpc_sec *sec,
1160 struct ptlrpc_svc_ctx *svc_ctx)
1162 struct ptlrpc_cli_ctx *cli_ctx;
1163 struct vfs_cred vcred = { 0, 0 };
1169 cli_ctx = ctx_create_kr(sec, &vcred);
1170 if (cli_ctx == NULL)
1173 rc = gss_copy_rvc_cli_ctx(cli_ctx, svc_ctx);
1175 CERROR("failed copy reverse cli ctx: %d\n", rc);
1177 ctx_put_kr(cli_ctx, 1);
1181 rvs_sec_install_root_ctx_kr(sec, cli_ctx, NULL);
1183 ctx_put_kr(cli_ctx, 1);
1188 #else /* ! HAVE_REVERSE_CTX_NOKEY */
1191 int sec_install_rctx_kr(struct ptlrpc_sec *sec,
1192 struct ptlrpc_svc_ctx *svc_ctx)
1194 struct ptlrpc_cli_ctx *cli_ctx = NULL;
1196 struct vfs_cred vcred = { 0, 0 };
1204 construct_key_desc(desc, sizeof(desc), sec, 0);
1206 key = key_alloc(&gss_key_type, desc, 0, 0,
1207 KEY_POS_ALL | KEY_USR_ALL, 1);
1209 CERROR("failed to alloc key: %ld\n", PTR_ERR(key));
1210 return PTR_ERR(key);
1213 rc = key_instantiate_and_link(key, NULL, 0, NULL, NULL);
1215 CERROR("failed to instantiate key: %d\n", rc);
1219 down_write(&key->sem);
1221 LASSERT(!key_get_payload(key, 0));
1223 cli_ctx = ctx_create_kr(sec, &vcred);
1224 if (cli_ctx == NULL) {
1229 rc = gss_copy_rvc_cli_ctx(cli_ctx, svc_ctx);
1231 CERROR("failed copy reverse cli ctx: %d\n", rc);
1235 rvs_sec_install_root_ctx_kr(sec, cli_ctx, key);
1237 ctx_put_kr(cli_ctx, 1);
1238 up_write(&key->sem);
1247 ctx_put_kr(cli_ctx, 1);
1249 up_write(&key->sem);
1255 #endif /* HAVE_REVERSE_CTX_NOKEY */
1257 /****************************************
1259 ****************************************/
1262 int gss_svc_accept_kr(struct ptlrpc_request *req)
1264 return gss_svc_accept(&gss_policy_keyring, req);
1268 int gss_svc_install_rctx_kr(struct obd_import *imp,
1269 struct ptlrpc_svc_ctx *svc_ctx)
1271 struct ptlrpc_sec *sec;
1274 sec = sptlrpc_import_sec_ref(imp);
1277 rc = sec_install_rctx_kr(sec, svc_ctx);
1278 sptlrpc_sec_put(sec);
1283 /****************************************
1285 ****************************************/
1288 #ifdef HAVE_KEY_TYPE_INSTANTIATE_2ARGS
1289 int gss_kt_instantiate(struct key *key, struct key_preparsed_payload *prep)
1291 const void *data = prep->data;
1292 size_t datalen = prep->datalen;
1294 int gss_kt_instantiate(struct key *key, const void *data, size_t datalen)
1300 if (data != NULL || datalen != 0) {
1301 CERROR("invalid: data %p, len %lu\n", data, (long)datalen);
1305 if (key_get_payload(key, 0)) {
1306 CERROR("key already have payload\n");
1310 /* link the key to session keyring, so following context negotiation
1311 * rpc fired from user space could find this key. This will be unlinked
1312 * automatically when upcall processes die.
1314 * we can't do this through keyctl from userspace, because the upcall
1315 * might be neither possessor nor owner of the key (setuid).
1317 * the session keyring is created upon upcall, and don't change all
1318 * the way until upcall finished, so rcu lock is not needed here.
1320 LASSERT(key_tgcred(current)->session_keyring);
1323 rc = key_link(key_tgcred(current)->session_keyring, key);
1326 CERROR("failed to link key %08x to keyring %08x: %d\n",
1328 key_tgcred(current)->session_keyring->serial, rc);
1332 CDEBUG(D_SEC, "key %p instantiated, ctx %p\n", key,
1333 key_get_payload(key, 0));
1338 * called with key semaphore write locked. it means we can operate
1339 * on the context without fear of loosing refcount.
1342 #ifdef HAVE_KEY_TYPE_INSTANTIATE_2ARGS
1343 int gss_kt_update(struct key *key, struct key_preparsed_payload *prep)
1345 const void *data = prep->data;
1346 __u32 datalen32 = (__u32) prep->datalen;
1348 int gss_kt_update(struct key *key, const void *data, size_t datalen)
1350 __u32 datalen32 = (__u32) datalen;
1352 struct ptlrpc_cli_ctx *ctx = key_get_payload(key, 0);
1353 struct gss_cli_ctx *gctx;
1354 rawobj_t tmpobj = RAWOBJ_EMPTY;
1358 if (data == NULL || datalen32 == 0) {
1359 CWARN("invalid: data %p, len %lu\n", data, (long)datalen32);
1363 /* if upcall finished negotiation too fast (mostly likely because
1364 * of local error happened) and call kt_update(), the ctx
1365 * might be still NULL. but the key will finally be associate
1366 * with a context, or be revoked. if key status is fine, return
1367 * -EAGAIN to allow userspace sleep a while and call again. */
1369 CDEBUG(D_SEC, "update too soon: key %p(%x) flags %lx\n",
1370 key, key->serial, key->flags);
1372 rc = key_validate(key);
1379 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1380 LASSERT(ctx->cc_sec);
1382 ctx_clear_timer_kr(ctx);
1384 /* don't proceed if already refreshed */
1385 if (cli_ctx_is_refreshed(ctx)) {
1386 CWARN("ctx already done refresh\n");
1390 sptlrpc_cli_ctx_get(ctx);
1391 gctx = ctx2gctx(ctx);
1393 rc = buffer_extract_bytes(&data, &datalen32, &gctx->gc_win,
1394 sizeof(gctx->gc_win));
1396 CERROR("failed extract seq_win\n");
1400 if (gctx->gc_win == 0) {
1401 __u32 nego_rpc_err, nego_gss_err;
1403 rc = buffer_extract_bytes(&data, &datalen32, &nego_rpc_err,
1404 sizeof(nego_rpc_err));
1406 CERROR("cannot extract RPC: rc = %d\n", rc);
1410 rc = buffer_extract_bytes(&data, &datalen32, &nego_gss_err,
1411 sizeof(nego_gss_err));
1413 CERROR("failed to extract gss rc = %d\n", rc);
1417 CERROR("negotiation: rpc err %d, gss err %x\n",
1418 nego_rpc_err, nego_gss_err);
1420 rc = nego_rpc_err ? nego_rpc_err : -EACCES;
1422 rc = rawobj_extract_local_alloc(&gctx->gc_handle,
1423 (__u32 **) &data, &datalen32);
1425 CERROR("failed extract handle\n");
1429 rc = rawobj_extract_local(&tmpobj,
1430 (__u32 **) &data, &datalen32);
1432 CERROR("failed extract mech\n");
1436 rc = lgss_import_sec_context(&tmpobj,
1437 sec2gsec(ctx->cc_sec)->gs_mech,
1439 if (rc != GSS_S_COMPLETE)
1440 CERROR("failed import context\n");
1445 /* we don't care what current status of this ctx, even someone else
1446 * is operating on the ctx at the same time. we just add up our own
1449 gss_cli_ctx_uptodate(gctx);
1451 /* this will also revoke the key. has to be done before
1452 * wakeup waiters otherwise they can find the stale key */
1453 kill_key_locked(key);
1455 cli_ctx_expire(ctx);
1457 if (rc != -ERESTART)
1458 set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
1461 /* let user space think it's a success */
1462 sptlrpc_cli_ctx_put(ctx, 1);
1466 #ifndef HAVE_KEY_MATCH_DATA
1468 gss_kt_match(const struct key *key, const void *desc)
1470 return strcmp(key->description, (const char *) desc) == 0 &&
1471 !test_bit(KEY_FLAG_REVOKED, &key->flags);
1473 #else /* ! HAVE_KEY_MATCH_DATA */
1475 gss_kt_match(const struct key *key, const struct key_match_data *match_data)
1477 const char *desc = match_data->raw_data;
1479 return strcmp(key->description, desc) == 0 &&
1480 !test_bit(KEY_FLAG_REVOKED, &key->flags);
1484 * Preparse the match criterion.
1486 static int gss_kt_match_preparse(struct key_match_data *match_data)
1488 match_data->lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT;
1489 match_data->cmp = gss_kt_match;
1492 #endif /* HAVE_KEY_MATCH_DATA */
1495 void gss_kt_destroy(struct key *key)
1498 LASSERT(!key_get_payload(key, 0));
1499 CDEBUG(D_SEC, "destroy key %p\n", key);
1504 void gss_kt_describe(const struct key *key, struct seq_file *s)
1506 if (key->description == NULL)
1507 seq_puts(s, "[null]");
1509 seq_puts(s, key->description);
1512 static struct key_type gss_key_type =
1516 .instantiate = gss_kt_instantiate,
1517 .update = gss_kt_update,
1518 #ifdef HAVE_KEY_MATCH_DATA
1519 .match_preparse = gss_kt_match_preparse,
1521 .match = gss_kt_match,
1523 .destroy = gss_kt_destroy,
1524 .describe = gss_kt_describe,
1527 /****************************************
1528 * lustre gss keyring policy *
1529 ****************************************/
1531 static struct ptlrpc_ctx_ops gss_keyring_ctxops = {
1532 .match = gss_cli_ctx_match,
1533 .refresh = gss_cli_ctx_refresh_kr,
1534 .validate = gss_cli_ctx_validate_kr,
1535 .die = gss_cli_ctx_die_kr,
1536 .sign = gss_cli_ctx_sign,
1537 .verify = gss_cli_ctx_verify,
1538 .seal = gss_cli_ctx_seal,
1539 .unseal = gss_cli_ctx_unseal,
1540 .wrap_bulk = gss_cli_ctx_wrap_bulk,
1541 .unwrap_bulk = gss_cli_ctx_unwrap_bulk,
1544 static struct ptlrpc_sec_cops gss_sec_keyring_cops = {
1545 .create_sec = gss_sec_create_kr,
1546 .destroy_sec = gss_sec_destroy_kr,
1547 .kill_sec = gss_sec_kill,
1548 .lookup_ctx = gss_sec_lookup_ctx_kr,
1549 .release_ctx = gss_sec_release_ctx_kr,
1550 .flush_ctx_cache = gss_sec_flush_ctx_cache_kr,
1551 .gc_ctx = gss_sec_gc_ctx_kr,
1552 .install_rctx = gss_sec_install_rctx,
1553 .alloc_reqbuf = gss_alloc_reqbuf,
1554 .free_reqbuf = gss_free_reqbuf,
1555 .alloc_repbuf = gss_alloc_repbuf,
1556 .free_repbuf = gss_free_repbuf,
1557 .enlarge_reqbuf = gss_enlarge_reqbuf,
1558 .display = gss_sec_display_kr,
1561 static struct ptlrpc_sec_sops gss_sec_keyring_sops = {
1562 .accept = gss_svc_accept_kr,
1563 .invalidate_ctx = gss_svc_invalidate_ctx,
1564 .alloc_rs = gss_svc_alloc_rs,
1565 .authorize = gss_svc_authorize,
1566 .free_rs = gss_svc_free_rs,
1567 .free_ctx = gss_svc_free_ctx,
1568 .prep_bulk = gss_svc_prep_bulk,
1569 .unwrap_bulk = gss_svc_unwrap_bulk,
1570 .wrap_bulk = gss_svc_wrap_bulk,
1571 .install_rctx = gss_svc_install_rctx_kr,
1574 static struct ptlrpc_sec_policy gss_policy_keyring = {
1575 .sp_owner = THIS_MODULE,
1576 .sp_name = "gss.keyring",
1577 .sp_policy = SPTLRPC_POLICY_GSS,
1578 .sp_cops = &gss_sec_keyring_cops,
1579 .sp_sops = &gss_sec_keyring_sops,
1583 int __init gss_init_keyring(void)
1587 rc = register_key_type(&gss_key_type);
1589 CERROR("failed to register keyring type: %d\n", rc);
1593 rc = sptlrpc_register_policy(&gss_policy_keyring);
1595 unregister_key_type(&gss_key_type);
1602 void __exit gss_exit_keyring(void)
1604 unregister_key_type(&gss_key_type);
1605 sptlrpc_unregister_policy(&gss_policy_keyring);