4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/gss/gss_keyring.c
38 * Author: Eric Mei <ericm@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_SEC
43 #include <linux/init.h>
44 #include <linux/module.h>
45 #include <linux/slab.h>
46 #include <linux/dcache.h>
48 #include <linux/crypto.h>
49 #include <linux/key.h>
50 #include <linux/keyctl.h>
51 #ifdef HAVE_LINUX_KEYTYPE_H
52 #include <linux/key-type.h>
54 #include <linux/mutex.h>
55 #include <asm/atomic.h>
57 #include <liblustre.h>
61 #include <obd_class.h>
62 #include <obd_support.h>
63 #include <lustre/lustre_idl.h>
64 #include <lustre_sec.h>
65 #include <lustre_net.h>
66 #include <lustre_import.h>
69 #include "gss_internal.h"
72 static struct ptlrpc_sec_policy gss_policy_keyring;
73 static struct ptlrpc_ctx_ops gss_keyring_ctxops;
74 static struct key_type gss_key_type;
76 static int sec_install_rctx_kr(struct ptlrpc_sec *sec,
77 struct ptlrpc_svc_ctx *svc_ctx);
80 * the timeout is only for the case that upcall child process die abnormally.
81 * in any other cases it should finally update kernel key.
83 * FIXME we'd better to incorporate the client & server side upcall timeouts
84 * into the framework of Adaptive Timeouts, but we need to figure out how to
85 * make sure that kernel knows the upcall processes is in-progress or died
88 #define KEYRING_UPCALL_TIMEOUT (obd_timeout + obd_timeout)
90 /****************************************
92 ****************************************/
94 #define DUMP_PROCESS_KEYRINGS(tsk) \
96 CWARN("DUMP PK: %s[%u,%u/%u](<-%s[%u,%u/%u]): " \
97 "a %d, t %d, p %d, s %d, u %d, us %d, df %d\n", \
98 tsk->comm, tsk->pid, tsk->uid, tsk->fsuid, \
99 tsk->parent->comm, tsk->parent->pid, \
100 tsk->parent->uid, tsk->parent->fsuid, \
101 tsk->request_key_auth ? \
102 tsk->request_key_auth->serial : 0, \
103 key_cred(tsk)->thread_keyring ? \
104 key_cred(tsk)->thread_keyring->serial : 0, \
105 key_tgcred(tsk)->process_keyring ? \
106 key_tgcred(tsk)->process_keyring->serial : 0, \
107 key_tgcred(tsk)->session_keyring ? \
108 key_tgcred(tsk)->session_keyring->serial : 0, \
109 key_cred(tsk)->user->uid_keyring ? \
110 key_cred(tsk)->user->uid_keyring->serial : 0, \
111 key_cred(tsk)->user->session_keyring ? \
112 key_cred(tsk)->user->session_keyring->serial : 0, \
113 key_cred(tsk)->jit_keyring \
117 #define DUMP_KEY(key) \
119 CWARN("DUMP KEY: %p(%d) ref %d u%u/g%u desc %s\n", \
120 key, key->serial, atomic_read(&key->usage), \
121 key->uid, key->gid, \
122 key->description ? key->description : "n/a" \
126 #ifdef HAVE_STRUCT_CRED /* Since 2.6.29 */
127 #define key_cred(tsk) ((tsk)->cred)
128 #define key_tgcred(tsk) ((tsk)->cred->tgcred)
130 #define key_cred(tsk) (tsk)
131 #define key_tgcred(tsk) ((tsk)->signal)
134 static inline void keyring_upcall_lock(struct gss_sec_keyring *gsec_kr)
136 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
137 mutex_lock(&gsec_kr->gsk_uc_lock);
141 static inline void keyring_upcall_unlock(struct gss_sec_keyring *gsec_kr)
143 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
144 mutex_unlock(&gsec_kr->gsk_uc_lock);
148 static inline void key_revoke_locked(struct key *key)
150 set_bit(KEY_FLAG_REVOKED, &key->flags);
153 static void ctx_upcall_timeout_kr(unsigned long data)
155 struct ptlrpc_cli_ctx *ctx = (struct ptlrpc_cli_ctx *) data;
156 struct key *key = ctx2gctx_keyring(ctx)->gck_key;
158 CWARN("ctx %p, key %p\n", ctx, key);
163 key_revoke_locked(key);
166 static void ctx_start_timer_kr(struct ptlrpc_cli_ctx *ctx, long timeout)
168 struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
169 struct timer_list *timer = gctx_kr->gck_timer;
173 CDEBUG(D_SEC, "ctx %p: start timer %lds\n", ctx, timeout);
174 timeout = timeout * HZ + cfs_time_current();
177 timer->expires = timeout;
178 timer->data = (unsigned long ) ctx;
179 timer->function = ctx_upcall_timeout_kr;
185 * caller should make sure no race with other threads
188 void ctx_clear_timer_kr(struct ptlrpc_cli_ctx *ctx)
190 struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
191 struct timer_list *timer = gctx_kr->gck_timer;
196 CDEBUG(D_SEC, "ctx %p, key %p\n", ctx, gctx_kr->gck_key);
198 gctx_kr->gck_timer = NULL;
200 del_singleshot_timer_sync(timer);
206 struct ptlrpc_cli_ctx *ctx_create_kr(struct ptlrpc_sec *sec,
207 struct vfs_cred *vcred)
209 struct ptlrpc_cli_ctx *ctx;
210 struct gss_cli_ctx_keyring *gctx_kr;
212 OBD_ALLOC_PTR(gctx_kr);
216 OBD_ALLOC_PTR(gctx_kr->gck_timer);
217 if (gctx_kr->gck_timer == NULL) {
218 OBD_FREE_PTR(gctx_kr);
221 init_timer(gctx_kr->gck_timer);
223 ctx = &gctx_kr->gck_base.gc_base;
225 if (gss_cli_ctx_init_common(sec, ctx, &gss_keyring_ctxops, vcred)) {
226 OBD_FREE_PTR(gctx_kr->gck_timer);
227 OBD_FREE_PTR(gctx_kr);
231 ctx->cc_expire = cfs_time_current_sec() + KEYRING_UPCALL_TIMEOUT;
232 clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
233 cfs_atomic_inc(&ctx->cc_refcount); /* for the caller */
238 static void ctx_destroy_kr(struct ptlrpc_cli_ctx *ctx)
240 struct ptlrpc_sec *sec = ctx->cc_sec;
241 struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
243 CDEBUG(D_SEC, "destroying ctx %p\n", ctx);
245 /* at this time the association with key has been broken. */
247 LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
248 LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0);
249 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
250 LASSERT(gctx_kr->gck_key == NULL);
252 ctx_clear_timer_kr(ctx);
253 LASSERT(gctx_kr->gck_timer == NULL);
255 if (gss_cli_ctx_fini_common(sec, ctx))
258 OBD_FREE_PTR(gctx_kr);
260 cfs_atomic_dec(&sec->ps_nctx);
261 sptlrpc_sec_put(sec);
264 static void ctx_release_kr(struct ptlrpc_cli_ctx *ctx, int sync)
269 cfs_atomic_inc(&ctx->cc_refcount);
270 sptlrpc_gc_add_ctx(ctx);
274 static void ctx_put_kr(struct ptlrpc_cli_ctx *ctx, int sync)
276 LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
278 if (cfs_atomic_dec_and_test(&ctx->cc_refcount))
279 ctx_release_kr(ctx, sync);
283 * key <-> ctx association and rules:
284 * - ctx might not bind with any key
285 * - key/ctx binding is protected by key semaphore (if the key present)
286 * - key and ctx each take a reference of the other
287 * - ctx enlist/unlist is protected by ctx spinlock
288 * - never enlist a ctx after it's been unlisted
289 * - whoever do enlist should also do bind, lock key before enlist:
290 * - lock key -> lock ctx -> enlist -> unlock ctx -> bind -> unlock key
291 * - whoever do unlist should also do unbind:
292 * - lock key -> lock ctx -> unlist -> unlock ctx -> unbind -> unlock key
293 * - lock ctx -> unlist -> unlock ctx -> lock key -> unbind -> unlock key
296 static inline void spin_lock_if(spinlock_t *lock, int condition)
302 static inline void spin_unlock_if(spinlock_t *lock, int condition)
308 static void ctx_enlist_kr(struct ptlrpc_cli_ctx *ctx, int is_root, int locked)
310 struct ptlrpc_sec *sec = ctx->cc_sec;
311 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
313 LASSERT(!test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
314 LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
316 spin_lock_if(&sec->ps_lock, !locked);
318 cfs_atomic_inc(&ctx->cc_refcount);
319 set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
320 cfs_hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
322 gsec_kr->gsk_root_ctx = ctx;
324 spin_unlock_if(&sec->ps_lock, !locked);
328 * Note after this get called, caller should not access ctx again because
329 * it might have been freed, unless caller hold at least one refcount of
332 * return non-zero if we indeed unlist this ctx.
334 static int ctx_unlist_kr(struct ptlrpc_cli_ctx *ctx, int locked)
336 struct ptlrpc_sec *sec = ctx->cc_sec;
337 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
339 /* if hashed bit has gone, leave the job to somebody who is doing it */
340 if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
343 /* drop ref inside spin lock to prevent race with other operations */
344 spin_lock_if(&sec->ps_lock, !locked);
346 if (gsec_kr->gsk_root_ctx == ctx)
347 gsec_kr->gsk_root_ctx = NULL;
348 cfs_hlist_del_init(&ctx->cc_cache);
349 cfs_atomic_dec(&ctx->cc_refcount);
351 spin_unlock_if(&sec->ps_lock, !locked);
357 * bind a key with a ctx together.
358 * caller must hold write lock of the key, as well as ref on key & ctx.
360 static void bind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
362 LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
363 LASSERT(atomic_read(&key->usage) > 0);
364 LASSERT(ctx2gctx_keyring(ctx)->gck_key == NULL);
365 LASSERT(key->payload.data == NULL);
367 /* at this time context may or may not in list. */
369 cfs_atomic_inc(&ctx->cc_refcount);
370 ctx2gctx_keyring(ctx)->gck_key = key;
371 key->payload.data = ctx;
375 * unbind a key and a ctx.
376 * caller must hold write lock, as well as a ref of the key.
378 static void unbind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
380 LASSERT(key->payload.data == ctx);
381 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
383 /* must revoke the key, or others may treat it as newly created */
384 key_revoke_locked(key);
386 key->payload.data = NULL;
387 ctx2gctx_keyring(ctx)->gck_key = NULL;
389 /* once ctx get split from key, the timer is meaningless */
390 ctx_clear_timer_kr(ctx);
397 * given a ctx, unbind with its coupled key, if any.
398 * unbind could only be called once, so we don't worry the key be released
401 static void unbind_ctx_kr(struct ptlrpc_cli_ctx *ctx)
403 struct key *key = ctx2gctx_keyring(ctx)->gck_key;
406 LASSERT(key->payload.data == ctx);
409 down_write(&key->sem);
410 unbind_key_ctx(key, ctx);
417 * given a key, unbind with its coupled ctx, if any.
418 * caller must hold write lock, as well as a ref of the key.
420 static void unbind_key_locked(struct key *key)
422 struct ptlrpc_cli_ctx *ctx = key->payload.data;
425 unbind_key_ctx(key, ctx);
429 * unlist a ctx, and unbind from coupled key
431 static void kill_ctx_kr(struct ptlrpc_cli_ctx *ctx)
433 if (ctx_unlist_kr(ctx, 0))
438 * given a key, unlist and unbind with the coupled ctx (if any).
439 * caller must hold write lock, as well as a ref of the key.
441 static void kill_key_locked(struct key *key)
443 struct ptlrpc_cli_ctx *ctx = key->payload.data;
445 if (ctx && ctx_unlist_kr(ctx, 0))
446 unbind_key_locked(key);
450 * caller should hold one ref on contexts in freelist.
452 static void dispose_ctx_list_kr(cfs_hlist_head_t *freelist)
454 cfs_hlist_node_t *pos, *next;
455 struct ptlrpc_cli_ctx *ctx;
456 struct gss_cli_ctx *gctx;
458 cfs_hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) {
459 cfs_hlist_del_init(&ctx->cc_cache);
461 /* reverse ctx: update current seq to buddy svcctx if exist.
462 * ideally this should be done at gss_cli_ctx_finalize(), but
463 * the ctx destroy could be delayed by:
464 * 1) ctx still has reference;
465 * 2) ctx destroy is asynchronous;
466 * and reverse import call inval_all_ctx() require this be done
467 *_immediately_ otherwise newly created reverse ctx might copy
468 * the very old sequence number from svcctx. */
469 gctx = ctx2gctx(ctx);
470 if (!rawobj_empty(&gctx->gc_svc_handle) &&
471 sec_is_reverse(gctx->gc_base.cc_sec)) {
472 gss_svc_upcall_update_sequence(&gctx->gc_svc_handle,
473 (__u32) cfs_atomic_read(&gctx->gc_seq));
476 /* we need to wakeup waiting reqs here. the context might
477 * be forced released before upcall finished, then the
478 * late-arrived downcall can't find the ctx even. */
479 sptlrpc_cli_ctx_wakeup(ctx);
487 * lookup a root context directly in a sec, return root ctx with a
488 * reference taken or NULL.
491 struct ptlrpc_cli_ctx * sec_lookup_root_ctx_kr(struct ptlrpc_sec *sec)
493 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
494 struct ptlrpc_cli_ctx *ctx = NULL;
496 spin_lock(&sec->ps_lock);
498 ctx = gsec_kr->gsk_root_ctx;
500 if (ctx == NULL && unlikely(sec_is_reverse(sec))) {
501 cfs_hlist_node_t *node;
502 struct ptlrpc_cli_ctx *tmp;
504 /* reverse ctx, search root ctx in list, choose the one
505 * with shortest expire time, which is most possibly have
506 * an established peer ctx at client side. */
507 cfs_hlist_for_each_entry(tmp, node, &gsec_kr->gsk_clist,
509 if (ctx == NULL || ctx->cc_expire == 0 ||
510 ctx->cc_expire > tmp->cc_expire) {
512 /* promote to be root_ctx */
513 gsec_kr->gsk_root_ctx = ctx;
519 LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
520 LASSERT(!cfs_hlist_empty(&gsec_kr->gsk_clist));
521 cfs_atomic_inc(&ctx->cc_refcount);
524 spin_unlock(&sec->ps_lock);
529 #define RVS_CTX_EXPIRE_NICE (10)
532 void rvs_sec_install_root_ctx_kr(struct ptlrpc_sec *sec,
533 struct ptlrpc_cli_ctx *new_ctx,
536 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
537 cfs_hlist_node_t *hnode;
538 struct ptlrpc_cli_ctx *ctx;
542 LASSERT(sec_is_reverse(sec));
544 spin_lock(&sec->ps_lock);
546 now = cfs_time_current_sec();
548 /* set all existing ctxs short expiry */
549 cfs_hlist_for_each_entry(ctx, hnode, &gsec_kr->gsk_clist, cc_cache) {
550 if (ctx->cc_expire > now + RVS_CTX_EXPIRE_NICE) {
551 ctx->cc_early_expire = 1;
552 ctx->cc_expire = now + RVS_CTX_EXPIRE_NICE;
556 /* if there's root_ctx there, instead obsolete the current
557 * immediately, we leave it continue operating for a little while.
558 * hopefully when the first backward rpc with newest ctx send out,
559 * the client side already have the peer ctx well established. */
560 ctx_enlist_kr(new_ctx, gsec_kr->gsk_root_ctx ? 0 : 1, 1);
563 bind_key_ctx(key, new_ctx);
565 spin_unlock(&sec->ps_lock);
568 static void construct_key_desc(void *buf, int bufsize,
569 struct ptlrpc_sec *sec, uid_t uid)
571 snprintf(buf, bufsize, "%d@%x", uid, sec->ps_id);
572 ((char *)buf)[bufsize - 1] = '\0';
575 /****************************************
577 ****************************************/
580 struct ptlrpc_sec * gss_sec_create_kr(struct obd_import *imp,
581 struct ptlrpc_svc_ctx *svcctx,
582 struct sptlrpc_flavor *sf)
584 struct gss_sec_keyring *gsec_kr;
587 OBD_ALLOC(gsec_kr, sizeof(*gsec_kr));
591 CFS_INIT_HLIST_HEAD(&gsec_kr->gsk_clist);
592 gsec_kr->gsk_root_ctx = NULL;
593 mutex_init(&gsec_kr->gsk_root_uc_lock);
594 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
595 mutex_init(&gsec_kr->gsk_uc_lock);
598 if (gss_sec_create_common(&gsec_kr->gsk_base, &gss_policy_keyring,
602 if (svcctx != NULL &&
603 sec_install_rctx_kr(&gsec_kr->gsk_base.gs_base, svcctx)) {
604 gss_sec_destroy_common(&gsec_kr->gsk_base);
608 RETURN(&gsec_kr->gsk_base.gs_base);
611 OBD_FREE(gsec_kr, sizeof(*gsec_kr));
616 void gss_sec_destroy_kr(struct ptlrpc_sec *sec)
618 struct gss_sec *gsec = sec2gsec(sec);
619 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
621 CDEBUG(D_SEC, "destroy %s@%p\n", sec->ps_policy->sp_name, sec);
623 LASSERT(cfs_hlist_empty(&gsec_kr->gsk_clist));
624 LASSERT(gsec_kr->gsk_root_ctx == NULL);
626 gss_sec_destroy_common(gsec);
628 OBD_FREE(gsec_kr, sizeof(*gsec_kr));
631 static inline int user_is_root(struct ptlrpc_sec *sec, struct vfs_cred *vcred)
633 /* except the ROOTONLY flag, treat it as root user only if real uid
634 * is 0, euid/fsuid being 0 are handled as setuid scenarios */
635 if (sec_is_rootonly(sec) || (vcred->vc_uid == 0))
642 * unlink request key from it's ring, which is linked during request_key().
643 * sadly, we have to 'guess' which keyring it's linked to.
645 * FIXME this code is fragile, depend on how request_key_link() is implemented.
647 static void request_key_unlink(struct key *key)
649 struct task_struct *tsk = current;
652 switch (key_cred(tsk)->jit_keyring) {
653 case KEY_REQKEY_DEFL_DEFAULT:
654 case KEY_REQKEY_DEFL_THREAD_KEYRING:
655 ring = key_get(key_cred(tsk)->thread_keyring);
658 case KEY_REQKEY_DEFL_PROCESS_KEYRING:
659 ring = key_get(key_tgcred(tsk)->process_keyring);
662 case KEY_REQKEY_DEFL_SESSION_KEYRING:
664 ring = key_get(rcu_dereference(key_tgcred(tsk)
669 case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
670 ring = key_get(key_cred(tsk)->user->session_keyring);
672 case KEY_REQKEY_DEFL_USER_KEYRING:
673 ring = key_get(key_cred(tsk)->user->uid_keyring);
675 case KEY_REQKEY_DEFL_GROUP_KEYRING:
681 key_unlink(ring, key);
686 struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec,
687 struct vfs_cred *vcred,
688 int create, int remove_dead)
690 struct obd_import *imp = sec->ps_import;
691 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
692 struct ptlrpc_cli_ctx *ctx = NULL;
693 unsigned int is_root = 0, create_new = 0;
701 LASSERT(imp != NULL);
703 is_root = user_is_root(sec, vcred);
705 /* a little bit optimization for root context */
707 ctx = sec_lookup_root_ctx_kr(sec);
709 * Only lookup directly for REVERSE sec, which should
712 if (ctx || sec_is_reverse(sec))
716 LASSERT(create != 0);
718 /* for root context, obtain lock and check again, this time hold
719 * the root upcall lock, make sure nobody else populated new root
720 * context after last check. */
722 mutex_lock(&gsec_kr->gsk_root_uc_lock);
724 ctx = sec_lookup_root_ctx_kr(sec);
728 /* update reverse handle for root user */
729 sec2gsec(sec)->gs_rvs_hdl = gss_get_next_ctx_index();
731 switch (sec->ps_part) {
750 /* in case of setuid, key will be constructed as owner of fsuid/fsgid,
751 * but we do authentication based on real uid/gid. the key permission
752 * bits will be exactly as POS_ALL, so only processes who subscribed
753 * this key could have the access, although the quota might be counted
754 * on others (fsuid/fsgid).
756 * keyring will use fsuid/fsgid as upcall parameters, so we have to
757 * encode real uid/gid into callout info.
760 construct_key_desc(desc, sizeof(desc), sec, vcred->vc_uid);
762 /* callout info format:
763 * secid:mech:uid:gid:flags:svc_type:peer_nid:target_uuid
765 coinfo_size = sizeof(struct obd_uuid) + MAX_OBD_NAME + 64;
766 OBD_ALLOC(coinfo, coinfo_size);
770 snprintf(coinfo, coinfo_size, "%d:%s:%u:%u:%s:%d:"LPX64":%s",
771 sec->ps_id, sec2gsec(sec)->gs_mech->gm_name,
772 vcred->vc_uid, vcred->vc_gid,
773 co_flags, import_to_gss_svc(imp),
774 imp->imp_connection->c_peer.nid, imp->imp_obd->obd_name);
776 CDEBUG(D_SEC, "requesting key for %s\n", desc);
778 keyring_upcall_lock(gsec_kr);
779 key = request_key(&gss_key_type, desc, coinfo);
780 keyring_upcall_unlock(gsec_kr);
782 OBD_FREE(coinfo, coinfo_size);
785 CERROR("failed request key: %ld\n", PTR_ERR(key));
788 CDEBUG(D_SEC, "obtained key %08x for %s\n", key->serial, desc);
790 /* once payload.data was pointed to a ctx, it never changes until
791 * we de-associate them; but parallel request_key() may return
792 * a key with payload.data == NULL at the same time. so we still
793 * need wirtelock of key->sem to serialize them. */
794 down_write(&key->sem);
796 if (likely(key->payload.data != NULL)) {
797 ctx = key->payload.data;
799 LASSERT(cfs_atomic_read(&ctx->cc_refcount) >= 1);
800 LASSERT(ctx2gctx_keyring(ctx)->gck_key == key);
801 LASSERT(atomic_read(&key->usage) >= 2);
803 /* simply take a ref and return. it's upper layer's
804 * responsibility to detect & replace dead ctx. */
805 cfs_atomic_inc(&ctx->cc_refcount);
807 /* pre initialization with a cli_ctx. this can't be done in
808 * key_instantiate() because we'v no enough information
810 ctx = ctx_create_kr(sec, vcred);
812 ctx_enlist_kr(ctx, is_root, 0);
813 bind_key_ctx(key, ctx);
815 ctx_start_timer_kr(ctx, KEYRING_UPCALL_TIMEOUT);
817 CDEBUG(D_SEC, "installed key %p <-> ctx %p (sec %p)\n",
820 /* we'd prefer to call key_revoke(), but we more like
821 * to revoke it within this key->sem locked period. */
822 key_revoke_locked(key);
830 if (is_root && create_new)
831 request_key_unlink(key);
836 mutex_unlock(&gsec_kr->gsk_root_uc_lock);
841 void gss_sec_release_ctx_kr(struct ptlrpc_sec *sec,
842 struct ptlrpc_cli_ctx *ctx,
845 LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
846 LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
847 ctx_release_kr(ctx, sync);
851 * flush context of normal user, we must resort to keyring itself to find out
852 * contexts which belong to me.
854 * Note here we suppose only to flush _my_ context, the "uid" will
855 * be ignored in the search.
858 void flush_user_ctx_cache_kr(struct ptlrpc_sec *sec,
860 int grace, int force)
865 /* nothing to do for reverse or rootonly sec */
866 if (sec_is_reverse(sec) || sec_is_rootonly(sec))
869 construct_key_desc(desc, sizeof(desc), sec, uid);
871 /* there should be only one valid key, but we put it in the
872 * loop in case of any weird cases */
874 key = request_key(&gss_key_type, desc, NULL);
876 CDEBUG(D_SEC, "No more key found for current user\n");
880 down_write(&key->sem);
882 kill_key_locked(key);
884 /* kill_key_locked() should usually revoke the key, but we
885 * revoke it again to make sure, e.g. some case the key may
886 * not well coupled with a context. */
887 key_revoke_locked(key);
896 * flush context of root or all, we iterate through the list.
899 void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec,
901 int grace, int force)
903 struct gss_sec_keyring *gsec_kr;
904 cfs_hlist_head_t freelist = CFS_HLIST_HEAD_INIT;
905 cfs_hlist_node_t *pos, *next;
906 struct ptlrpc_cli_ctx *ctx;
909 gsec_kr = sec2gsec_keyring(sec);
911 spin_lock(&sec->ps_lock);
912 cfs_hlist_for_each_entry_safe(ctx, pos, next,
913 &gsec_kr->gsk_clist, cc_cache) {
914 LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
916 if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
919 /* at this moment there's at least 2 base reference:
920 * key association and in-list. */
921 if (cfs_atomic_read(&ctx->cc_refcount) > 2) {
924 CWARN("flush busy ctx %p(%u->%s, extra ref %d)\n",
925 ctx, ctx->cc_vcred.vc_uid,
926 sec2target_str(ctx->cc_sec),
927 cfs_atomic_read(&ctx->cc_refcount) - 2);
930 set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
932 clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
934 cfs_atomic_inc(&ctx->cc_refcount);
936 if (ctx_unlist_kr(ctx, 1)) {
937 cfs_hlist_add_head(&ctx->cc_cache, &freelist);
939 LASSERT(cfs_atomic_read(&ctx->cc_refcount) >= 2);
940 cfs_atomic_dec(&ctx->cc_refcount);
943 spin_unlock(&sec->ps_lock);
945 dispose_ctx_list_kr(&freelist);
950 int gss_sec_flush_ctx_cache_kr(struct ptlrpc_sec *sec,
951 uid_t uid, int grace, int force)
955 CDEBUG(D_SEC, "sec %p(%d, nctx %d), uid %d, grace %d, force %d\n",
956 sec, cfs_atomic_read(&sec->ps_refcount),
957 cfs_atomic_read(&sec->ps_nctx),
960 if (uid != -1 && uid != 0)
961 flush_user_ctx_cache_kr(sec, uid, grace, force);
963 flush_spec_ctx_cache_kr(sec, uid, grace, force);
969 void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec)
971 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
972 cfs_hlist_head_t freelist = CFS_HLIST_HEAD_INIT;
973 cfs_hlist_node_t *pos, *next;
974 struct ptlrpc_cli_ctx *ctx;
977 CWARN("running gc\n");
979 spin_lock(&sec->ps_lock);
980 cfs_hlist_for_each_entry_safe(ctx, pos, next,
981 &gsec_kr->gsk_clist, cc_cache) {
982 LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
984 cfs_atomic_inc(&ctx->cc_refcount);
986 if (cli_ctx_check_death(ctx) && ctx_unlist_kr(ctx, 1)) {
987 cfs_hlist_add_head(&ctx->cc_cache, &freelist);
988 CWARN("unhashed ctx %p\n", ctx);
990 LASSERT(cfs_atomic_read(&ctx->cc_refcount) >= 2);
991 cfs_atomic_dec(&ctx->cc_refcount);
994 spin_unlock(&sec->ps_lock);
996 dispose_ctx_list_kr(&freelist);
1002 int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq)
1004 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
1005 cfs_hlist_node_t *pos, *next;
1006 struct ptlrpc_cli_ctx *ctx;
1007 struct gss_cli_ctx *gctx;
1008 time_t now = cfs_time_current_sec();
1011 spin_lock(&sec->ps_lock);
1012 cfs_hlist_for_each_entry_safe(ctx, pos, next,
1013 &gsec_kr->gsk_clist, cc_cache) {
1018 gctx = ctx2gctx(ctx);
1019 key = ctx2gctx_keyring(ctx)->gck_key;
1021 gss_cli_ctx_flags2str(ctx->cc_flags,
1022 flags_str, sizeof(flags_str));
1024 if (gctx->gc_mechctx)
1025 lgss_display(gctx->gc_mechctx, mech, sizeof(mech));
1027 snprintf(mech, sizeof(mech), "N/A");
1028 mech[sizeof(mech) - 1] = '\0';
1030 seq_printf(seq, "%p: uid %u, ref %d, expire %ld(%+ld), fl %s, "
1031 "seq %d, win %u, key %08x(ref %d), "
1032 "hdl "LPX64":"LPX64", mech: %s\n",
1033 ctx, ctx->cc_vcred.vc_uid,
1034 cfs_atomic_read(&ctx->cc_refcount),
1036 ctx->cc_expire ? ctx->cc_expire - now : 0,
1038 cfs_atomic_read(&gctx->gc_seq),
1040 key ? key->serial : 0,
1041 key ? atomic_read(&key->usage) : 0,
1042 gss_handle_to_u64(&gctx->gc_handle),
1043 gss_handle_to_u64(&gctx->gc_svc_handle),
1046 spin_unlock(&sec->ps_lock);
1051 /****************************************
1053 ****************************************/
1056 int gss_cli_ctx_refresh_kr(struct ptlrpc_cli_ctx *ctx)
1058 /* upcall is already on the way */
1063 int gss_cli_ctx_validate_kr(struct ptlrpc_cli_ctx *ctx)
1065 LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
1066 LASSERT(ctx->cc_sec);
1068 if (cli_ctx_check_death(ctx)) {
1073 if (cli_ctx_is_ready(ctx))
1079 void gss_cli_ctx_die_kr(struct ptlrpc_cli_ctx *ctx, int grace)
1081 LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
1082 LASSERT(ctx->cc_sec);
1084 cli_ctx_expire(ctx);
1088 /****************************************
1089 * (reverse) service *
1090 ****************************************/
1093 * reverse context could have nothing to do with keyrings. here we still keep
1094 * the version which bind to a key, for future reference.
1096 #define HAVE_REVERSE_CTX_NOKEY
1098 #ifdef HAVE_REVERSE_CTX_NOKEY
1101 int sec_install_rctx_kr(struct ptlrpc_sec *sec,
1102 struct ptlrpc_svc_ctx *svc_ctx)
1104 struct ptlrpc_cli_ctx *cli_ctx;
1105 struct vfs_cred vcred = { 0, 0 };
1111 cli_ctx = ctx_create_kr(sec, &vcred);
1112 if (cli_ctx == NULL)
1115 rc = gss_copy_rvc_cli_ctx(cli_ctx, svc_ctx);
1117 CERROR("failed copy reverse cli ctx: %d\n", rc);
1119 ctx_put_kr(cli_ctx, 1);
1123 rvs_sec_install_root_ctx_kr(sec, cli_ctx, NULL);
1125 ctx_put_kr(cli_ctx, 1);
1130 #else /* ! HAVE_REVERSE_CTX_NOKEY */
1133 int sec_install_rctx_kr(struct ptlrpc_sec *sec,
1134 struct ptlrpc_svc_ctx *svc_ctx)
1136 struct ptlrpc_cli_ctx *cli_ctx = NULL;
1138 struct vfs_cred vcred = { 0, 0 };
1146 construct_key_desc(desc, sizeof(desc), sec, 0);
1148 key = key_alloc(&gss_key_type, desc, 0, 0,
1149 KEY_POS_ALL | KEY_USR_ALL, 1);
1151 CERROR("failed to alloc key: %ld\n", PTR_ERR(key));
1152 return PTR_ERR(key);
1155 rc = key_instantiate_and_link(key, NULL, 0, NULL, NULL);
1157 CERROR("failed to instantiate key: %d\n", rc);
1161 down_write(&key->sem);
1163 LASSERT(key->payload.data == NULL);
1165 cli_ctx = ctx_create_kr(sec, &vcred);
1166 if (cli_ctx == NULL) {
1171 rc = gss_copy_rvc_cli_ctx(cli_ctx, svc_ctx);
1173 CERROR("failed copy reverse cli ctx: %d\n", rc);
1177 rvs_sec_install_root_ctx_kr(sec, cli_ctx, key);
1179 ctx_put_kr(cli_ctx, 1);
1180 up_write(&key->sem);
1189 ctx_put_kr(cli_ctx, 1);
1191 up_write(&key->sem);
1197 #endif /* HAVE_REVERSE_CTX_NOKEY */
1199 /****************************************
1201 ****************************************/
1204 int gss_svc_accept_kr(struct ptlrpc_request *req)
1206 return gss_svc_accept(&gss_policy_keyring, req);
1210 int gss_svc_install_rctx_kr(struct obd_import *imp,
1211 struct ptlrpc_svc_ctx *svc_ctx)
1213 struct ptlrpc_sec *sec;
1216 sec = sptlrpc_import_sec_ref(imp);
1219 rc = sec_install_rctx_kr(sec, svc_ctx);
1220 sptlrpc_sec_put(sec);
1225 /****************************************
1227 ****************************************/
1230 int gss_kt_instantiate(struct key *key, const void *data, size_t datalen)
1235 if (data != NULL || datalen != 0) {
1236 CERROR("invalid: data %p, len %lu\n", data, (long)datalen);
1240 if (key->payload.data != 0) {
1241 CERROR("key already have payload\n");
1245 /* link the key to session keyring, so following context negotiation
1246 * rpc fired from user space could find this key. This will be unlinked
1247 * automatically when upcall processes die.
1249 * we can't do this through keyctl from userspace, because the upcall
1250 * might be neither possessor nor owner of the key (setuid).
1252 * the session keyring is created upon upcall, and don't change all
1253 * the way until upcall finished, so rcu lock is not needed here.
1255 LASSERT(key_tgcred(cfs_current())->session_keyring);
1258 rc = key_link(key_tgcred(cfs_current())->session_keyring, key);
1261 CERROR("failed to link key %08x to keyring %08x: %d\n",
1263 key_tgcred(cfs_current())->session_keyring->serial, rc);
1267 CDEBUG(D_SEC, "key %p instantiated, ctx %p\n", key, key->payload.data);
1272 * called with key semaphore write locked. it means we can operate
1273 * on the context without fear of loosing refcount.
1276 int gss_kt_update(struct key *key, const void *data, size_t datalen)
1278 struct ptlrpc_cli_ctx *ctx = key->payload.data;
1279 struct gss_cli_ctx *gctx;
1280 rawobj_t tmpobj = RAWOBJ_EMPTY;
1281 __u32 datalen32 = (__u32) datalen;
1285 if (data == NULL || datalen == 0) {
1286 CWARN("invalid: data %p, len %lu\n", data, (long)datalen);
1290 /* if upcall finished negotiation too fast (mostly likely because
1291 * of local error happened) and call kt_update(), the ctx
1292 * might be still NULL. but the key will finally be associate
1293 * with a context, or be revoked. if key status is fine, return
1294 * -EAGAIN to allow userspace sleep a while and call again. */
1296 CDEBUG(D_SEC, "update too soon: key %p(%x) flags %lx\n",
1297 key, key->serial, key->flags);
1299 rc = key_validate(key);
1306 LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
1307 LASSERT(ctx->cc_sec);
1309 ctx_clear_timer_kr(ctx);
1311 /* don't proceed if already refreshed */
1312 if (cli_ctx_is_refreshed(ctx)) {
1313 CWARN("ctx already done refresh\n");
1317 sptlrpc_cli_ctx_get(ctx);
1318 gctx = ctx2gctx(ctx);
1320 rc = buffer_extract_bytes(&data, &datalen32, &gctx->gc_win,
1321 sizeof(gctx->gc_win));
1323 CERROR("failed extract seq_win\n");
1327 if (gctx->gc_win == 0) {
1328 __u32 nego_rpc_err, nego_gss_err;
1330 rc = buffer_extract_bytes(&data, &datalen32, &nego_rpc_err,
1331 sizeof(nego_rpc_err));
1333 CERROR("failed to extrace rpc rc\n");
1337 rc = buffer_extract_bytes(&data, &datalen32, &nego_gss_err,
1338 sizeof(nego_gss_err));
1340 CERROR("failed to extrace gss rc\n");
1344 CERROR("negotiation: rpc err %d, gss err %x\n",
1345 nego_rpc_err, nego_gss_err);
1347 rc = nego_rpc_err ? nego_rpc_err : -EACCES;
1349 rc = rawobj_extract_local_alloc(&gctx->gc_handle,
1350 (__u32 **) &data, &datalen32);
1352 CERROR("failed extract handle\n");
1356 rc = rawobj_extract_local(&tmpobj, (__u32 **) &data,&datalen32);
1358 CERROR("failed extract mech\n");
1362 rc = lgss_import_sec_context(&tmpobj,
1363 sec2gsec(ctx->cc_sec)->gs_mech,
1365 if (rc != GSS_S_COMPLETE)
1366 CERROR("failed import context\n");
1371 /* we don't care what current status of this ctx, even someone else
1372 * is operating on the ctx at the same time. we just add up our own
1375 gss_cli_ctx_uptodate(gctx);
1377 /* this will also revoke the key. has to be done before
1378 * wakeup waiters otherwise they can find the stale key */
1379 kill_key_locked(key);
1381 cli_ctx_expire(ctx);
1383 if (rc != -ERESTART)
1384 set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
1387 /* let user space think it's a success */
1388 sptlrpc_cli_ctx_put(ctx, 1);
1393 int gss_kt_match(const struct key *key, const void *desc)
1395 return (strcmp(key->description, (const char *) desc) == 0);
1399 void gss_kt_destroy(struct key *key)
1402 LASSERT(key->payload.data == NULL);
1403 CDEBUG(D_SEC, "destroy key %p\n", key);
1408 void gss_kt_describe(const struct key *key, struct seq_file *s)
1410 if (key->description == NULL)
1411 seq_puts(s, "[null]");
1413 seq_puts(s, key->description);
1416 static struct key_type gss_key_type =
1420 .instantiate = gss_kt_instantiate,
1421 .update = gss_kt_update,
1422 .match = gss_kt_match,
1423 .destroy = gss_kt_destroy,
1424 .describe = gss_kt_describe,
1427 /****************************************
1428 * lustre gss keyring policy *
1429 ****************************************/
1431 static struct ptlrpc_ctx_ops gss_keyring_ctxops = {
1432 .match = gss_cli_ctx_match,
1433 .refresh = gss_cli_ctx_refresh_kr,
1434 .validate = gss_cli_ctx_validate_kr,
1435 .die = gss_cli_ctx_die_kr,
1436 .sign = gss_cli_ctx_sign,
1437 .verify = gss_cli_ctx_verify,
1438 .seal = gss_cli_ctx_seal,
1439 .unseal = gss_cli_ctx_unseal,
1440 .wrap_bulk = gss_cli_ctx_wrap_bulk,
1441 .unwrap_bulk = gss_cli_ctx_unwrap_bulk,
1444 static struct ptlrpc_sec_cops gss_sec_keyring_cops = {
1445 .create_sec = gss_sec_create_kr,
1446 .destroy_sec = gss_sec_destroy_kr,
1447 .kill_sec = gss_sec_kill,
1448 .lookup_ctx = gss_sec_lookup_ctx_kr,
1449 .release_ctx = gss_sec_release_ctx_kr,
1450 .flush_ctx_cache = gss_sec_flush_ctx_cache_kr,
1451 .gc_ctx = gss_sec_gc_ctx_kr,
1452 .install_rctx = gss_sec_install_rctx,
1453 .alloc_reqbuf = gss_alloc_reqbuf,
1454 .free_reqbuf = gss_free_reqbuf,
1455 .alloc_repbuf = gss_alloc_repbuf,
1456 .free_repbuf = gss_free_repbuf,
1457 .enlarge_reqbuf = gss_enlarge_reqbuf,
1458 .display = gss_sec_display_kr,
1461 static struct ptlrpc_sec_sops gss_sec_keyring_sops = {
1462 .accept = gss_svc_accept_kr,
1463 .invalidate_ctx = gss_svc_invalidate_ctx,
1464 .alloc_rs = gss_svc_alloc_rs,
1465 .authorize = gss_svc_authorize,
1466 .free_rs = gss_svc_free_rs,
1467 .free_ctx = gss_svc_free_ctx,
1468 .prep_bulk = gss_svc_prep_bulk,
1469 .unwrap_bulk = gss_svc_unwrap_bulk,
1470 .wrap_bulk = gss_svc_wrap_bulk,
1471 .install_rctx = gss_svc_install_rctx_kr,
1474 static struct ptlrpc_sec_policy gss_policy_keyring = {
1475 .sp_owner = THIS_MODULE,
1476 .sp_name = "gss.keyring",
1477 .sp_policy = SPTLRPC_POLICY_GSS,
1478 .sp_cops = &gss_sec_keyring_cops,
1479 .sp_sops = &gss_sec_keyring_sops,
1483 int __init gss_init_keyring(void)
1487 rc = register_key_type(&gss_key_type);
1489 CERROR("failed to register keyring type: %d\n", rc);
1493 rc = sptlrpc_register_policy(&gss_policy_keyring);
1495 unregister_key_type(&gss_key_type);
1502 void __exit gss_exit_keyring(void)
1504 unregister_key_type(&gss_key_type);
1505 sptlrpc_unregister_policy(&gss_policy_keyring);