1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2007 Cluster File Systems, Inc.
5 * Author: Eric Mei <ericm@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 # define EXPORT_SYMTAB
26 #define DEBUG_SUBSYSTEM S_SEC
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/slab.h>
31 #include <linux/dcache.h>
33 #include <linux/random.h>
34 #include <linux/crypto.h>
35 #include <linux/key.h>
36 #include <linux/keyctl.h>
37 #include <linux/mutex.h>
38 #include <asm/atomic.h>
40 #include <liblustre.h>
44 #include <obd_class.h>
45 #include <obd_support.h>
46 #include <lustre/lustre_idl.h>
47 #include <lustre_sec.h>
48 #include <lustre_net.h>
49 #include <lustre_import.h>
52 #include "gss_internal.h"
55 static struct ptlrpc_sec_policy gss_policy_keyring;
56 static struct ptlrpc_ctx_ops gss_keyring_ctxops;
57 static struct key_type gss_key_type;
59 static int sec_install_rctx_kr(struct ptlrpc_sec *sec,
60 struct ptlrpc_svc_ctx *svc_ctx);
63 #define task_aux(tsk) (tsk)
67 * the timeout is only for the case that upcall child process die abnormally.
68 * in any other cases it should finally update kernel key. so we set this
69 * timeout value excessive long.
71 #define KEYRING_UPCALL_TIMEOUT (obd_timeout + obd_timeout)
73 /****************************************
75 ****************************************/
77 #define DUMP_PROCESS_KEYRINGS(tsk) \
79 CWARN("DUMP PK: %s[%u,%u/%u](<-%s[%u,%u/%u]): " \
80 "a %d, t %d, p %d, s %d, u %d, us %d, df %d\n", \
81 tsk->comm, tsk->pid, tsk->uid, tsk->fsuid, \
82 tsk->parent->comm, tsk->parent->pid, \
83 tsk->parent->uid, tsk->parent->fsuid, \
84 task_aux(tsk)->request_key_auth ? \
85 task_aux(tsk)->request_key_auth->serial : 0, \
86 task_aux(tsk)->thread_keyring ? \
87 task_aux(tsk)->thread_keyring->serial : 0, \
88 tsk->signal->process_keyring ? \
89 tsk->signal->process_keyring->serial : 0, \
90 tsk->signal->session_keyring ? \
91 tsk->signal->session_keyring->serial : 0, \
92 tsk->user->uid_keyring ? \
93 tsk->user->uid_keyring->serial : 0, \
94 tsk->user->session_keyring ? \
95 tsk->user->session_keyring->serial : 0, \
96 task_aux(tsk)->jit_keyring \
100 #define DUMP_KEY(key) \
102 CWARN("DUMP KEY: %p(%d) ref %d u%u/g%u desc %s\n", \
103 key, key->serial, atomic_read(&key->usage), \
104 key->uid, key->gid, \
105 key->description ? key->description : "n/a" \
110 static inline void keyring_upcall_lock(struct gss_sec_keyring *gsec_kr)
112 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
113 mutex_lock(&gsec_kr->gsk_uc_lock);
117 static inline void keyring_upcall_unlock(struct gss_sec_keyring *gsec_kr)
119 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
120 mutex_unlock(&gsec_kr->gsk_uc_lock);
124 static inline void key_revoke_locked(struct key *key)
126 set_bit(KEY_FLAG_REVOKED, &key->flags);
129 static void ctx_upcall_timeout_kr(unsigned long data)
131 struct ptlrpc_cli_ctx *ctx = (struct ptlrpc_cli_ctx *) data;
132 struct key *key = ctx2gctx_keyring(ctx)->gck_key;
134 CWARN("ctx %p, key %p\n", ctx, key);
139 key_revoke_locked(key);
140 sptlrpc_cli_ctx_wakeup(ctx);
144 void ctx_start_timer_kr(struct ptlrpc_cli_ctx *ctx, long timeout)
146 struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
147 struct timer_list *timer = gctx_kr->gck_timer;
151 CDEBUG(D_SEC, "ctx %p: start timer %lds\n", ctx, timeout);
152 timeout = timeout * HZ + cfs_time_current();
155 timer->expires = timeout;
156 timer->data = (unsigned long ) ctx;
157 timer->function = ctx_upcall_timeout_kr;
163 * caller should make sure no race with other threads
166 void ctx_clear_timer_kr(struct ptlrpc_cli_ctx *ctx)
168 struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
169 struct timer_list *timer = gctx_kr->gck_timer;
174 CDEBUG(D_SEC, "ctx %p, key %p\n", ctx, gctx_kr->gck_key);
176 gctx_kr->gck_timer = NULL;
178 del_singleshot_timer_sync(timer);
184 struct ptlrpc_cli_ctx *ctx_create_kr(struct ptlrpc_sec *sec,
185 struct vfs_cred *vcred)
187 struct ptlrpc_cli_ctx *ctx;
188 struct gss_cli_ctx_keyring *gctx_kr;
190 OBD_ALLOC_PTR(gctx_kr);
194 OBD_ALLOC_PTR(gctx_kr->gck_timer);
195 if (gctx_kr->gck_timer == NULL) {
196 OBD_FREE_PTR(gctx_kr);
199 init_timer(gctx_kr->gck_timer);
201 ctx = &gctx_kr->gck_base.gc_base;
203 if (gss_cli_ctx_init_common(sec, ctx, &gss_keyring_ctxops, vcred)) {
204 OBD_FREE_PTR(gctx_kr->gck_timer);
205 OBD_FREE_PTR(gctx_kr);
209 ctx->cc_expire = cfs_time_current_sec() + KEYRING_UPCALL_TIMEOUT;
210 clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
211 atomic_inc(&ctx->cc_refcount); /* for the caller */
216 static void ctx_destroy_kr(struct ptlrpc_cli_ctx *ctx)
218 struct ptlrpc_sec *sec = ctx->cc_sec;
219 struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
221 CDEBUG(D_SEC, "destroying ctx %p\n", ctx);
223 /* at this time the association with key has been broken. */
225 LASSERT(atomic_read(&sec->ps_refcount) > 0);
226 LASSERT(atomic_read(&sec->ps_nctx) > 0);
227 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
228 LASSERT(gctx_kr->gck_key == NULL);
230 ctx_clear_timer_kr(ctx);
231 LASSERT(gctx_kr->gck_timer == NULL);
233 if (gss_cli_ctx_fini_common(sec, ctx))
236 OBD_FREE_PTR(gctx_kr);
238 atomic_dec(&sec->ps_nctx);
239 sptlrpc_sec_put(sec);
242 static void ctx_release_kr(struct ptlrpc_cli_ctx *ctx, int sync)
247 atomic_inc(&ctx->cc_refcount);
248 sptlrpc_gc_add_ctx(ctx);
252 static void ctx_put_kr(struct ptlrpc_cli_ctx *ctx, int sync)
254 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
256 if (atomic_dec_and_test(&ctx->cc_refcount))
257 ctx_release_kr(ctx, sync);
261 * key <-> ctx association and rules:
262 * - ctx might not bind with any key
263 * - key/ctx binding is protected by key semaphore (if the key present)
264 * - key and ctx each take a reference of the other
265 * - ctx enlist/unlist is protected by ctx spinlock
266 * - never enlist a ctx after it's been unlisted
267 * - whoever do enlist should also do bind, lock key before enlist:
268 * - lock key -> lock ctx -> enlist -> unlock ctx -> bind -> unlock key
269 * - whoever do unlist should also do unbind:
270 * - lock key -> lock ctx -> unlist -> unlock ctx -> unbind -> unlock key
271 * - lock ctx -> unlist -> unlock ctx -> lock key -> unbind -> unlock key
274 static inline void spin_lock_if(spinlock_t *lock, int condition)
280 static inline void spin_unlock_if(spinlock_t *lock, int condition)
286 static void ctx_enlist_kr(struct ptlrpc_cli_ctx *ctx, int is_root, int locked)
288 struct ptlrpc_sec *sec = ctx->cc_sec;
289 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
291 LASSERT(!test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
292 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
294 spin_lock_if(&sec->ps_lock, !locked);
296 atomic_inc(&ctx->cc_refcount);
297 set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
298 hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
300 gsec_kr->gsk_root_ctx = ctx;
302 spin_unlock_if(&sec->ps_lock, !locked);
306 * Note after this get called, caller should not access ctx again because
307 * it might have been freed, unless caller hold at least one refcount of
310 * return non-zero if we indeed unlist this ctx.
312 static int ctx_unlist_kr(struct ptlrpc_cli_ctx *ctx, int locked)
314 struct ptlrpc_sec *sec = ctx->cc_sec;
315 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
317 /* if hashed bit has gone, leave the job to somebody who is doing it */
318 if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
321 /* drop ref inside spin lock to prevent race with other operations */
322 spin_lock_if(&sec->ps_lock, !locked);
324 if (gsec_kr->gsk_root_ctx == ctx)
325 gsec_kr->gsk_root_ctx = NULL;
326 hlist_del_init(&ctx->cc_cache);
327 atomic_dec(&ctx->cc_refcount);
329 spin_unlock_if(&sec->ps_lock, !locked);
335 * bind a key with a ctx together.
336 * caller must hold write lock of the key, as well as ref on key & ctx.
338 static void bind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
340 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
341 LASSERT(atomic_read(&key->usage) > 0);
342 LASSERT(ctx2gctx_keyring(ctx)->gck_key == NULL);
343 LASSERT(key->payload.data == NULL);
345 /* at this time context may or may not in list. */
347 atomic_inc(&ctx->cc_refcount);
348 ctx2gctx_keyring(ctx)->gck_key = key;
349 key->payload.data = ctx;
353 * unbind a key and a ctx.
354 * caller must hold write lock, as well as a ref of the key.
356 static void unbind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
358 LASSERT(key->payload.data == ctx);
359 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
361 /* must revoke the key, or others may treat it as newly created */
362 key_revoke_locked(key);
364 key->payload.data = NULL;
365 ctx2gctx_keyring(ctx)->gck_key = NULL;
367 /* once ctx get split from key, the timer is meaningless */
368 ctx_clear_timer_kr(ctx);
375 * given a ctx, unbind with its coupled key, if any.
376 * unbind could only be called once, so we don't worry the key be released
379 static void unbind_ctx_kr(struct ptlrpc_cli_ctx *ctx)
381 struct key *key = ctx2gctx_keyring(ctx)->gck_key;
384 LASSERT(key->payload.data == ctx);
387 down_write(&key->sem);
388 unbind_key_ctx(key, ctx);
395 * given a key, unbind with its coupled ctx, if any.
396 * caller must hold write lock, as well as a ref of the key.
398 static void unbind_key_locked(struct key *key)
400 struct ptlrpc_cli_ctx *ctx = key->payload.data;
403 unbind_key_ctx(key, ctx);
407 * unlist a ctx, and unbind from coupled key
409 static void kill_ctx_kr(struct ptlrpc_cli_ctx *ctx)
411 if (ctx_unlist_kr(ctx, 0))
416 * given a key, unlist and unbind with the coupled ctx (if any).
417 * caller must hold write lock, as well as a ref of the key.
419 static void kill_key_locked(struct key *key)
421 struct ptlrpc_cli_ctx *ctx = key->payload.data;
423 if (ctx && ctx_unlist_kr(ctx, 0))
424 unbind_key_locked(key);
428 * caller should hold one ref on contexts in freelist.
430 static void dispose_ctx_list_kr(struct hlist_head *freelist)
432 struct hlist_node *pos, *next;
433 struct ptlrpc_cli_ctx *ctx;
435 hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) {
436 hlist_del_init(&ctx->cc_cache);
438 /* we need to wakeup waiting reqs here. the context might
439 * be forced released before upcall finished, then the
440 * late-arrived downcall can't find the ctx even. */
441 sptlrpc_cli_ctx_wakeup(ctx);
449 * lookup a root context directly in a sec, return root ctx with a
450 * reference taken or NULL.
453 struct ptlrpc_cli_ctx * sec_lookup_root_ctx_kr(struct ptlrpc_sec *sec)
455 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
456 struct ptlrpc_cli_ctx *ctx = NULL;
458 spin_lock(&sec->ps_lock);
460 ctx = gsec_kr->gsk_root_ctx;
462 if (ctx == NULL && unlikely(sec_is_reverse(sec))) {
463 struct hlist_node *node;
464 struct ptlrpc_cli_ctx *tmp;
466 /* reverse ctx, search root ctx in list, choose the one
467 * with shortest expire time, which is most possibly have
468 * an established peer ctx at client side. */
469 hlist_for_each_entry(tmp, node, &gsec_kr->gsk_clist, cc_cache) {
470 if (ctx == NULL || ctx->cc_expire == 0 ||
471 ctx->cc_expire > tmp->cc_expire) {
473 /* promote to be root_ctx */
474 gsec_kr->gsk_root_ctx = ctx;
480 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
481 LASSERT(!hlist_empty(&gsec_kr->gsk_clist));
482 atomic_inc(&ctx->cc_refcount);
485 spin_unlock(&sec->ps_lock);
490 #define RVS_CTX_EXPIRE_NICE (10)
493 void rvs_sec_install_root_ctx_kr(struct ptlrpc_sec *sec,
494 struct ptlrpc_cli_ctx *new_ctx,
497 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
498 struct hlist_node *hnode;
499 struct ptlrpc_cli_ctx *ctx;
503 LASSERT(sec_is_reverse(sec));
505 spin_lock(&sec->ps_lock);
507 now = cfs_time_current_sec();
509 /* set all existing ctxs short expiry */
510 hlist_for_each_entry(ctx, hnode, &gsec_kr->gsk_clist, cc_cache) {
511 if (ctx->cc_expire > now + RVS_CTX_EXPIRE_NICE) {
512 ctx->cc_early_expire = 1;
513 ctx->cc_expire = now + RVS_CTX_EXPIRE_NICE;
517 /* if there's root_ctx there, instead obsolete the current
518 * immediately, we leave it continue operating for a little while.
519 * hopefully when the first backward rpc with newest ctx send out,
520 * the client side already have the peer ctx well established. */
521 ctx_enlist_kr(new_ctx, gsec_kr->gsk_root_ctx ? 0 : 1, 1);
524 bind_key_ctx(key, new_ctx);
526 spin_unlock(&sec->ps_lock);
529 static void construct_key_desc(void *buf, int bufsize,
530 struct ptlrpc_sec *sec, uid_t uid)
532 snprintf(buf, bufsize, "%d@%x", uid, sec->ps_id);
533 ((char *)buf)[bufsize - 1] = '\0';
536 /****************************************
538 ****************************************/
541 struct ptlrpc_sec * gss_sec_create_kr(struct obd_import *imp,
542 struct ptlrpc_svc_ctx *svcctx,
543 struct sptlrpc_flavor *sf)
545 struct gss_sec_keyring *gsec_kr;
548 OBD_ALLOC(gsec_kr, sizeof(*gsec_kr));
552 CFS_INIT_HLIST_HEAD(&gsec_kr->gsk_clist);
553 gsec_kr->gsk_root_ctx = NULL;
554 mutex_init(&gsec_kr->gsk_root_uc_lock);
555 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
556 mutex_init(&gsec_kr->gsk_uc_lock);
559 if (gss_sec_create_common(&gsec_kr->gsk_base, &gss_policy_keyring,
563 if (svcctx != NULL &&
564 sec_install_rctx_kr(&gsec_kr->gsk_base.gs_base, svcctx)) {
565 gss_sec_destroy_common(&gsec_kr->gsk_base);
569 RETURN(&gsec_kr->gsk_base.gs_base);
572 OBD_FREE(gsec_kr, sizeof(*gsec_kr));
577 void gss_sec_destroy_kr(struct ptlrpc_sec *sec)
579 struct gss_sec *gsec = sec2gsec(sec);
580 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
582 CDEBUG(D_SEC, "destroy %s@%p\n", sec->ps_policy->sp_name, sec);
584 LASSERT(hlist_empty(&gsec_kr->gsk_clist));
585 LASSERT(gsec_kr->gsk_root_ctx == NULL);
587 gss_sec_destroy_common(gsec);
589 OBD_FREE(gsec_kr, sizeof(*gsec_kr));
592 static inline int user_is_root(struct ptlrpc_sec *sec, struct vfs_cred *vcred)
594 /* except the ROOTONLY flag, treat it as root user only if real uid
595 * is 0, euid/fsuid being 0 are handled as setuid scenarios */
596 if (sec_is_rootonly(sec) || (vcred->vc_uid == 0))
603 * unlink request key from it's ring, which is linked during request_key().
604 * sadly, we have to 'guess' which keyring it's linked to.
606 * FIXME this code is fragile, depend on how request_key_link() is implemented.
608 static void request_key_unlink(struct key *key)
610 struct task_struct *tsk = current;
613 switch (task_aux(tsk)->jit_keyring) {
614 case KEY_REQKEY_DEFL_DEFAULT:
615 case KEY_REQKEY_DEFL_THREAD_KEYRING:
616 ring = key_get(task_aux(tsk)->thread_keyring);
619 case KEY_REQKEY_DEFL_PROCESS_KEYRING:
620 ring = key_get(tsk->signal->process_keyring);
623 case KEY_REQKEY_DEFL_SESSION_KEYRING:
625 ring = key_get(rcu_dereference(tsk->signal->session_keyring));
629 case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
630 ring = key_get(tsk->user->session_keyring);
632 case KEY_REQKEY_DEFL_USER_KEYRING:
633 ring = key_get(tsk->user->uid_keyring);
635 case KEY_REQKEY_DEFL_GROUP_KEYRING:
641 key_unlink(ring, key);
646 struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec,
647 struct vfs_cred *vcred,
648 int create, int remove_dead)
650 struct obd_import *imp = sec->ps_import;
651 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
652 struct ptlrpc_cli_ctx *ctx = NULL;
653 unsigned int is_root = 0, create_new = 0;
661 LASSERT(imp != NULL);
663 is_root = user_is_root(sec, vcred);
665 /* a little bit optimization for root context */
667 ctx = sec_lookup_root_ctx_kr(sec);
669 * Only lookup directly for REVERSE sec, which should
672 if (ctx || sec_is_reverse(sec))
676 LASSERT(create != 0);
678 /* for root context, obtain lock and check again, this time hold
679 * the root upcall lock, make sure nobody else populated new root
680 * context after last check. */
682 mutex_lock(&gsec_kr->gsk_root_uc_lock);
684 ctx = sec_lookup_root_ctx_kr(sec);
688 /* update reverse handle for root user */
689 sec2gsec(sec)->gs_rvs_hdl = gss_get_next_ctx_index();
694 /* in case of setuid, key will be constructed as owner of fsuid/fsgid,
695 * but we do authentication based on real uid/gid. the key permission
696 * bits will be exactly as POS_ALL, so only processes who subscribed
697 * this key could have the access, although the quota might be counted
698 * on others (fsuid/fsgid).
700 * keyring will use fsuid/fsgid as upcall parameters, so we have to
701 * encode real uid/gid into callout info.
704 construct_key_desc(desc, sizeof(desc), sec, vcred->vc_uid);
706 /* callout info format:
707 * secid:mech:uid:gid:flags:svc_type:peer_nid:target_uuid
709 coinfo_size = sizeof(struct obd_uuid) + MAX_OBD_NAME + 64;
710 OBD_ALLOC(coinfo, coinfo_size);
714 snprintf(coinfo, coinfo_size, "%d:%s:%u:%u:%s:%d:"LPX64":%s",
715 sec->ps_id, sec2gsec(sec)->gs_mech->gm_name,
716 vcred->vc_uid, vcred->vc_gid,
717 co_flags, import_to_gss_svc(imp),
718 imp->imp_connection->c_peer.nid, imp->imp_obd->obd_name);
720 keyring_upcall_lock(gsec_kr);
721 key = request_key(&gss_key_type, desc, coinfo);
722 keyring_upcall_unlock(gsec_kr);
724 OBD_FREE(coinfo, coinfo_size);
727 CERROR("failed request key: %ld\n", PTR_ERR(key));
731 /* once payload.data was pointed to a ctx, it never changes until
732 * we de-associate them; but parallel request_key() may return
733 * a key with payload.data == NULL at the same time. so we still
734 * need wirtelock of key->sem to serialize them. */
735 down_write(&key->sem);
737 if (likely(key->payload.data != NULL)) {
738 ctx = key->payload.data;
740 LASSERT(atomic_read(&ctx->cc_refcount) >= 1);
741 LASSERT(ctx2gctx_keyring(ctx)->gck_key == key);
742 LASSERT(atomic_read(&key->usage) >= 2);
744 /* simply take a ref and return. it's upper layer's
745 * responsibility to detect & replace dead ctx. */
746 atomic_inc(&ctx->cc_refcount);
748 /* pre initialization with a cli_ctx. this can't be done in
749 * key_instantiate() because we'v no enough information
751 ctx = ctx_create_kr(sec, vcred);
753 ctx_enlist_kr(ctx, is_root, 0);
754 bind_key_ctx(key, ctx);
756 ctx_start_timer_kr(ctx, KEYRING_UPCALL_TIMEOUT);
758 CDEBUG(D_SEC, "installed key %p <-> ctx %p (sec %p)\n",
761 /* we'd prefer to call key_revoke(), but we more like
762 * to revoke it within this key->sem locked period. */
763 key_revoke_locked(key);
771 if (is_root && create_new)
772 request_key_unlink(key);
777 mutex_unlock(&gsec_kr->gsk_root_uc_lock);
782 void gss_sec_release_ctx_kr(struct ptlrpc_sec *sec,
783 struct ptlrpc_cli_ctx *ctx,
786 LASSERT(atomic_read(&sec->ps_refcount) > 0);
787 LASSERT(atomic_read(&ctx->cc_refcount) == 0);
788 ctx_release_kr(ctx, sync);
792 * flush context of normal user, we must resort to keyring itself to find out
793 * contexts which belong to me.
795 * Note here we suppose only to flush _my_ context, the "uid" will
796 * be ignored in the search.
799 void flush_user_ctx_cache_kr(struct ptlrpc_sec *sec,
801 int grace, int force)
806 /* nothing to do for reverse or rootonly sec */
807 if (sec_is_reverse(sec) || sec_is_rootonly(sec))
810 construct_key_desc(desc, sizeof(desc), sec, uid);
812 /* there should be only one valid key, but we put it in the
813 * loop in case of any weird cases */
815 key = request_key(&gss_key_type, desc, NULL);
817 CWARN("No more key found for current user\n");
821 down_write(&key->sem);
823 kill_key_locked(key);
825 /* kill_key_locked() should usually revoke the key, but we
826 * revoke it again to make sure, e.g. some case the key may
827 * not well coupled with a context. */
828 key_revoke_locked(key);
837 * flush context of root or all, we iterate through the list.
840 void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec,
842 int grace, int force)
844 struct gss_sec_keyring *gsec_kr;
845 struct hlist_head freelist = CFS_HLIST_HEAD_INIT;
846 struct hlist_node *pos, *next;
847 struct ptlrpc_cli_ctx *ctx;
850 gsec_kr = sec2gsec_keyring(sec);
852 spin_lock(&sec->ps_lock);
853 hlist_for_each_entry_safe(ctx, pos, next,
854 &gsec_kr->gsk_clist, cc_cache) {
855 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
857 if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
860 /* at this moment there's at least 2 base reference:
861 * key association and in-list. */
862 if (atomic_read(&ctx->cc_refcount) > 2) {
865 CWARN("flush busy ctx %p(%u->%s, extra ref %d)\n",
866 ctx, ctx->cc_vcred.vc_uid,
867 sec2target_str(ctx->cc_sec),
868 atomic_read(&ctx->cc_refcount) - 2);
871 set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
873 clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
875 atomic_inc(&ctx->cc_refcount);
877 if (ctx_unlist_kr(ctx, 1)) {
878 hlist_add_head(&ctx->cc_cache, &freelist);
880 LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
881 atomic_dec(&ctx->cc_refcount);
884 spin_unlock(&sec->ps_lock);
886 dispose_ctx_list_kr(&freelist);
891 int gss_sec_flush_ctx_cache_kr(struct ptlrpc_sec *sec,
893 int grace, int force)
897 CDEBUG(D_SEC, "sec %p(%d, nctx %d), uid %d, grace %d, force %d\n",
898 sec, atomic_read(&sec->ps_refcount), atomic_read(&sec->ps_nctx),
901 if (uid != -1 && uid != 0)
902 flush_user_ctx_cache_kr(sec, uid, grace, force);
904 flush_spec_ctx_cache_kr(sec, uid, grace, force);
910 void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec)
912 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
913 struct hlist_head freelist = CFS_HLIST_HEAD_INIT;
914 struct hlist_node *pos, *next;
915 struct ptlrpc_cli_ctx *ctx;
918 CWARN("running gc\n");
920 spin_lock(&sec->ps_lock);
921 hlist_for_each_entry_safe(ctx, pos, next,
922 &gsec_kr->gsk_clist, cc_cache) {
923 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
925 atomic_inc(&ctx->cc_refcount);
927 if (cli_ctx_check_death(ctx) && ctx_unlist_kr(ctx, 1)) {
928 hlist_add_head(&ctx->cc_cache, &freelist);
929 CWARN("unhashed ctx %p\n", ctx);
931 LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
932 atomic_dec(&ctx->cc_refcount);
935 spin_unlock(&sec->ps_lock);
937 dispose_ctx_list_kr(&freelist);
943 int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq)
945 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
946 struct hlist_node *pos, *next;
947 struct ptlrpc_cli_ctx *ctx;
948 struct gss_cli_ctx *gctx;
949 time_t now = cfs_time_current_sec();
952 spin_lock(&sec->ps_lock);
953 hlist_for_each_entry_safe(ctx, pos, next,
954 &gsec_kr->gsk_clist, cc_cache) {
959 gctx = ctx2gctx(ctx);
960 key = ctx2gctx_keyring(ctx)->gck_key;
962 gss_cli_ctx_flags2str(ctx->cc_flags,
963 flags_str, sizeof(flags_str));
965 if (gctx->gc_mechctx)
966 lgss_display(gctx->gc_mechctx, mech, sizeof(mech));
968 snprintf(mech, sizeof(mech), "N/A");
969 mech[sizeof(mech) - 1] = '\0';
971 seq_printf(seq, "%p: uid %u, ref %d, expire %ld(%+ld), fl %s, "
972 "seq %d, win %u, key %08x(ref %d), "
973 "hdl "LPX64":"LPX64", mech: %s\n",
974 ctx, ctx->cc_vcred.vc_uid,
975 atomic_read(&ctx->cc_refcount),
977 ctx->cc_expire ? ctx->cc_expire - now : 0,
979 atomic_read(&gctx->gc_seq),
981 key ? key->serial : 0,
982 key ? atomic_read(&key->usage) : 0,
983 gss_handle_to_u64(&gctx->gc_handle),
984 gss_handle_to_u64(&gctx->gc_svc_handle),
987 spin_unlock(&sec->ps_lock);
992 /****************************************
994 ****************************************/
997 int gss_cli_ctx_refresh_kr(struct ptlrpc_cli_ctx *ctx)
999 /* upcall is already on the way */
1004 int gss_cli_ctx_validate_kr(struct ptlrpc_cli_ctx *ctx)
1006 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1007 LASSERT(ctx->cc_sec);
1009 if (cli_ctx_check_death(ctx)) {
1014 if (cli_ctx_is_ready(ctx))
1020 void gss_cli_ctx_die_kr(struct ptlrpc_cli_ctx *ctx, int grace)
1022 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1023 LASSERT(ctx->cc_sec);
1025 CWARN("ctx %p(%d)\n", ctx, atomic_read(&ctx->cc_refcount));
1026 cli_ctx_expire(ctx);
1030 /****************************************
1031 * (reverse) service *
1032 ****************************************/
1035 * reverse context could have nothing to do with keyrings. here we still keep
1036 * the version which bind to a key, for future reference.
1038 #define HAVE_REVERSE_CTX_NOKEY
1040 #ifdef HAVE_REVERSE_CTX_NOKEY
1043 int sec_install_rctx_kr(struct ptlrpc_sec *sec,
1044 struct ptlrpc_svc_ctx *svc_ctx)
1046 struct ptlrpc_cli_ctx *cli_ctx;
1047 struct vfs_cred vcred = { 0, 0 };
1053 cli_ctx = ctx_create_kr(sec, &vcred);
1054 if (cli_ctx == NULL)
1057 rc = gss_copy_rvc_cli_ctx(cli_ctx, svc_ctx);
1059 CERROR("failed copy reverse cli ctx: %d\n", rc);
1061 ctx_put_kr(cli_ctx, 1);
1065 rvs_sec_install_root_ctx_kr(sec, cli_ctx, NULL);
1067 ctx_put_kr(cli_ctx, 1);
1072 #else /* ! HAVE_REVERSE_CTX_NOKEY */
1075 int sec_install_rctx_kr(struct ptlrpc_sec *sec,
1076 struct ptlrpc_svc_ctx *svc_ctx)
1078 struct ptlrpc_cli_ctx *cli_ctx = NULL;
1080 struct vfs_cred vcred = { 0, 0 };
1088 construct_key_desc(desc, sizeof(desc), sec, 0);
1090 key = key_alloc(&gss_key_type, desc, 0, 0,
1091 KEY_POS_ALL | KEY_USR_ALL, 1);
1093 CERROR("failed to alloc key: %ld\n", PTR_ERR(key));
1094 return PTR_ERR(key);
1097 rc = key_instantiate_and_link(key, NULL, 0, NULL, NULL);
1099 CERROR("failed to instantiate key: %d\n", rc);
1103 down_write(&key->sem);
1105 LASSERT(key->payload.data == NULL);
1107 cli_ctx = ctx_create_kr(sec, &vcred);
1108 if (cli_ctx == NULL) {
1113 rc = gss_copy_rvc_cli_ctx(cli_ctx, svc_ctx);
1115 CERROR("failed copy reverse cli ctx: %d\n", rc);
1119 rvs_sec_install_root_ctx_kr(sec, cli_ctx, key);
1121 ctx_put_kr(cli_ctx, 1);
1122 up_write(&key->sem);
1131 ctx_put_kr(cli_ctx, 1);
1133 up_write(&key->sem);
1139 #endif /* HAVE_REVERSE_CTX_NOKEY */
1141 /****************************************
1143 ****************************************/
1146 int gss_svc_accept_kr(struct ptlrpc_request *req)
1148 return gss_svc_accept(&gss_policy_keyring, req);
1152 int gss_svc_install_rctx_kr(struct obd_import *imp,
1153 struct ptlrpc_svc_ctx *svc_ctx)
1155 struct ptlrpc_sec *sec;
1158 sec = sptlrpc_import_sec_ref(imp);
1161 rc = sec_install_rctx_kr(sec, svc_ctx);
1162 sptlrpc_sec_put(sec);
1167 /****************************************
1169 ****************************************/
1172 int gss_kt_instantiate(struct key *key, const void *data, size_t datalen)
1177 if (data != NULL || datalen != 0) {
1178 CERROR("invalid: data %p, len %d\n", data, datalen);
1182 if (key->payload.data != 0) {
1183 CERROR("key already have payload\n");
1187 /* link the key to session keyring, so following context negotiation
1188 * rpc fired from user space could find this key. This will be unlinked
1189 * automatically when upcall processes die.
1191 * we can't do this through keyctl from userspace, because the upcall
1192 * might be neither possessor nor owner of the key (setuid).
1194 * the session keyring is created upon upcall, and don't change all
1195 * the way until upcall finished, so rcu lock is not needed here.
1197 LASSERT(cfs_current()->signal->session_keyring);
1199 rc = key_link(cfs_current()->signal->session_keyring, key);
1201 CERROR("failed to link key %08x to keyring %08x: %d\n",
1203 cfs_current()->signal->session_keyring->serial, rc);
1207 CDEBUG(D_SEC, "key %p instantiated, ctx %p\n", key, key->payload.data);
1212 * called with key semaphore write locked. it means we can operate
1213 * on the context without fear of loosing refcount.
1216 int gss_kt_update(struct key *key, const void *data, size_t datalen)
1218 struct ptlrpc_cli_ctx *ctx = key->payload.data;
1219 struct gss_cli_ctx *gctx;
1220 rawobj_t tmpobj = RAWOBJ_EMPTY;
1224 if (data == NULL || datalen == 0) {
1225 CWARN("invalid: data %p, len %d\n", data, datalen);
1229 /* there's a race between userspace parent - child processes. if
1230 * child finish negotiation too fast and call kt_update(), the ctx
1231 * might be still NULL. but the key will finally be associate
1232 * with a context, or be revoked. if key status is fine, return
1233 * -EAGAIN to allow userspace sleep a while and call again. */
1235 CWARN("race in userspace. key %p(%x) flags %lx\n",
1236 key, key->serial, key->flags);
1238 rc = key_validate(key);
1245 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1246 LASSERT(ctx->cc_sec);
1248 ctx_clear_timer_kr(ctx);
1250 /* don't proceed if already refreshed */
1251 if (cli_ctx_is_refreshed(ctx)) {
1252 CWARN("ctx already done refresh\n");
1253 sptlrpc_cli_ctx_wakeup(ctx);
1257 sptlrpc_cli_ctx_get(ctx);
1258 gctx = ctx2gctx(ctx);
1260 rc = buffer_extract_bytes(&data, &datalen, &gctx->gc_win,
1261 sizeof(gctx->gc_win));
1263 CERROR("failed extract seq_win\n");
1267 if (gctx->gc_win == 0) {
1268 __u32 nego_rpc_err, nego_gss_err;
1270 rc = buffer_extract_bytes(&data, &datalen, &nego_rpc_err,
1271 sizeof(nego_rpc_err));
1273 CERROR("failed to extrace rpc rc\n");
1277 rc = buffer_extract_bytes(&data, &datalen, &nego_gss_err,
1278 sizeof(nego_gss_err));
1280 CERROR("failed to extrace gss rc\n");
1284 CERROR("negotiation: rpc err %d, gss err %x\n",
1285 nego_rpc_err, nego_gss_err);
1287 rc = nego_rpc_err ? nego_rpc_err : -EACCES;
1289 rc = rawobj_extract_local_alloc(&gctx->gc_handle,
1290 (__u32 **) &data, &datalen);
1292 CERROR("failed extract handle\n");
1296 rc = rawobj_extract_local(&tmpobj, (__u32 **) &data, &datalen);
1298 CERROR("failed extract mech\n");
1302 rc = lgss_import_sec_context(&tmpobj,
1303 sec2gsec(ctx->cc_sec)->gs_mech,
1305 if (rc != GSS_S_COMPLETE)
1306 CERROR("failed import context\n");
1311 /* we don't care what current status of this ctx, even someone else
1312 * is operating on the ctx at the same time. we just add up our own
1315 gss_cli_ctx_uptodate(gctx);
1317 /* this will also revoke the key. has to be done before
1318 * wakeup waiters otherwise they can find the stale key */
1319 kill_key_locked(key);
1321 cli_ctx_expire(ctx);
1323 if (rc != -ERESTART)
1324 set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
1327 sptlrpc_cli_ctx_wakeup(ctx);
1329 /* let user space think it's a success */
1330 sptlrpc_cli_ctx_put(ctx, 1);
1335 int gss_kt_match(const struct key *key, const void *desc)
1337 return (strcmp(key->description, (const char *) desc) == 0);
1341 void gss_kt_destroy(struct key *key)
1344 LASSERT(key->payload.data == NULL);
1345 CDEBUG(D_SEC, "destroy key %p\n", key);
1350 void gss_kt_describe(const struct key *key, struct seq_file *s)
1352 if (key->description == NULL)
1353 seq_puts(s, "[null]");
1355 seq_puts(s, key->description);
1358 static struct key_type gss_key_type =
1362 .instantiate = gss_kt_instantiate,
1363 .update = gss_kt_update,
1364 .match = gss_kt_match,
1365 .destroy = gss_kt_destroy,
1366 .describe = gss_kt_describe,
1369 /****************************************
1370 * lustre gss keyring policy *
1371 ****************************************/
1373 static struct ptlrpc_ctx_ops gss_keyring_ctxops = {
1374 .match = gss_cli_ctx_match,
1375 .refresh = gss_cli_ctx_refresh_kr,
1376 .validate = gss_cli_ctx_validate_kr,
1377 .die = gss_cli_ctx_die_kr,
1378 .sign = gss_cli_ctx_sign,
1379 .verify = gss_cli_ctx_verify,
1380 .seal = gss_cli_ctx_seal,
1381 .unseal = gss_cli_ctx_unseal,
1382 .wrap_bulk = gss_cli_ctx_wrap_bulk,
1383 .unwrap_bulk = gss_cli_ctx_unwrap_bulk,
1386 static struct ptlrpc_sec_cops gss_sec_keyring_cops = {
1387 .create_sec = gss_sec_create_kr,
1388 .destroy_sec = gss_sec_destroy_kr,
1389 .kill_sec = gss_sec_kill,
1390 .lookup_ctx = gss_sec_lookup_ctx_kr,
1391 .release_ctx = gss_sec_release_ctx_kr,
1392 .flush_ctx_cache = gss_sec_flush_ctx_cache_kr,
1393 .gc_ctx = gss_sec_gc_ctx_kr,
1394 .install_rctx = gss_sec_install_rctx,
1395 .alloc_reqbuf = gss_alloc_reqbuf,
1396 .free_reqbuf = gss_free_reqbuf,
1397 .alloc_repbuf = gss_alloc_repbuf,
1398 .free_repbuf = gss_free_repbuf,
1399 .enlarge_reqbuf = gss_enlarge_reqbuf,
1400 .display = gss_sec_display_kr,
1403 static struct ptlrpc_sec_sops gss_sec_keyring_sops = {
1404 .accept = gss_svc_accept_kr,
1405 .invalidate_ctx = gss_svc_invalidate_ctx,
1406 .alloc_rs = gss_svc_alloc_rs,
1407 .authorize = gss_svc_authorize,
1408 .free_rs = gss_svc_free_rs,
1409 .free_ctx = gss_svc_free_ctx,
1410 .unwrap_bulk = gss_svc_unwrap_bulk,
1411 .wrap_bulk = gss_svc_wrap_bulk,
1412 .install_rctx = gss_svc_install_rctx_kr,
1415 static struct ptlrpc_sec_policy gss_policy_keyring = {
1416 .sp_owner = THIS_MODULE,
1417 .sp_name = "gss.keyring",
1418 .sp_policy = SPTLRPC_POLICY_GSS,
1419 .sp_cops = &gss_sec_keyring_cops,
1420 .sp_sops = &gss_sec_keyring_sops,
1424 int __init gss_init_keyring(void)
1428 rc = register_key_type(&gss_key_type);
1430 CERROR("failed to register keyring type: %d\n", rc);
1434 rc = sptlrpc_register_policy(&gss_policy_keyring);
1436 unregister_key_type(&gss_key_type);
1443 void __exit gss_exit_keyring(void)
1445 unregister_key_type(&gss_key_type);
1446 sptlrpc_unregister_policy(&gss_policy_keyring);