1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Modifications for Lustre
5 * Copyright 2004 - 2006, Cluster File Systems, Inc.
7 * Author: Eric Mei <ericm@clusterfs.com>
11 * linux/net/sunrpc/auth_gss.c
13 * RPCSEC_GSS client authentication.
15 * Copyright (c) 2000 The Regents of the University of Michigan.
16 * All rights reserved.
18 * Dug Song <dugsong@monkey.org>
19 * Andy Adamson <andros@umich.edu>
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
25 * 1. Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * 2. Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in the
29 * documentation and/or other materials provided with the distribution.
30 * 3. Neither the name of the University nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
35 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
36 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
37 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
39 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
40 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
41 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
42 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
43 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
44 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 # define EXPORT_SYMTAB
51 #define DEBUG_SUBSYSTEM S_SEC
53 #include <linux/init.h>
54 #include <linux/module.h>
55 #include <linux/slab.h>
56 #include <linux/dcache.h>
58 #include <linux/random.h>
59 #include <linux/mutex.h>
60 #include <linux/crypto.h>
61 #include <asm/atomic.h>
62 struct rpc_clnt; /* for rpc_pipefs */
63 #include <linux/sunrpc/rpc_pipe_fs.h>
65 #include <liblustre.h>
69 #include <obd_class.h>
70 #include <obd_support.h>
71 #include <lustre/lustre_idl.h>
72 #include <lustre_sec.h>
73 #include <lustre_net.h>
74 #include <lustre_import.h>
77 #include "gss_internal.h"
80 static struct ptlrpc_sec_policy gss_policy_pipefs;
81 static struct ptlrpc_ctx_ops gss_pipefs_ctxops;
83 static int gss_cli_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx);
85 static int gss_sec_pipe_upcall_init(struct gss_sec *gsec)
90 static void gss_sec_pipe_upcall_fini(struct gss_sec *gsec)
94 /****************************************
95 * internel context helpers *
96 ****************************************/
99 struct ptlrpc_cli_ctx *ctx_create_pf(struct ptlrpc_sec *sec,
100 struct vfs_cred *vcred)
102 struct gss_cli_ctx *gctx;
109 rc = gss_cli_ctx_init_common(sec, &gctx->gc_base,
110 &gss_pipefs_ctxops, vcred);
116 return &gctx->gc_base;
120 void ctx_destroy_pf(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx)
122 struct gss_cli_ctx *gctx = ctx2gctx(ctx);
125 rc = gss_cli_ctx_fini_common(sec, ctx);
132 CWARN("released the last ctx, proceed to destroy sec %s@%p\n",
133 sec->ps_policy->sp_name, sec);
134 sptlrpc_sec_destroy(sec);
139 void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *hash)
141 set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
142 atomic_inc(&ctx->cc_refcount);
143 hlist_add_head(&ctx->cc_cache, hash);
147 * caller must hold spinlock
150 void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist)
152 LASSERT_SPIN_LOCKED(&ctx->cc_sec->ps_lock);
153 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
154 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
155 LASSERT(!hlist_unhashed(&ctx->cc_cache));
157 clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
159 if (atomic_dec_and_test(&ctx->cc_refcount)) {
160 __hlist_del(&ctx->cc_cache);
161 hlist_add_head(&ctx->cc_cache, freelist);
163 hlist_del_init(&ctx->cc_cache);
168 * return 1 if the context is dead.
171 int ctx_check_death_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist)
173 if (cli_ctx_check_death(ctx)) {
175 ctx_unhash_pf(ctx, freelist);
183 int ctx_check_death_locked_pf(struct ptlrpc_cli_ctx *ctx,
184 struct hlist_head *freelist)
186 LASSERT(ctx->cc_sec);
187 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
188 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
190 return ctx_check_death_pf(ctx, freelist);
194 int ctx_match_pf(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred)
196 /* a little bit optimization for null policy */
197 if (!ctx->cc_ops->match)
200 return ctx->cc_ops->match(ctx, vcred);
204 void ctx_list_destroy_pf(struct hlist_head *head)
206 struct ptlrpc_cli_ctx *ctx;
208 while (!hlist_empty(head)) {
209 ctx = hlist_entry(head->first, struct ptlrpc_cli_ctx, cc_cache);
211 LASSERT(atomic_read(&ctx->cc_refcount) == 0);
212 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
214 hlist_del_init(&ctx->cc_cache);
215 ctx_destroy_pf(ctx->cc_sec, ctx);
219 /****************************************
221 ****************************************/
224 int gss_cli_ctx_validate_pf(struct ptlrpc_cli_ctx *ctx)
226 if (ctx_check_death_pf(ctx, NULL))
228 if (cli_ctx_is_ready(ctx))
234 void gss_cli_ctx_die_pf(struct ptlrpc_cli_ctx *ctx, int grace)
236 LASSERT(ctx->cc_sec);
237 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
241 spin_lock(&ctx->cc_sec->ps_lock);
243 if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
244 LASSERT(!hlist_unhashed(&ctx->cc_cache));
245 LASSERT(atomic_read(&ctx->cc_refcount) > 1);
247 hlist_del_init(&ctx->cc_cache);
248 if (atomic_dec_and_test(&ctx->cc_refcount))
252 spin_unlock(&ctx->cc_sec->ps_lock);
255 /****************************************
256 * reverse context installation *
257 ****************************************/
260 unsigned int ctx_hash_index(int hashsize, __u64 key)
262 return (unsigned int) (key & ((__u64) hashsize - 1));
266 void gss_sec_ctx_replace_pf(struct gss_sec *gsec,
267 struct ptlrpc_cli_ctx *new)
269 struct gss_sec_pipefs *gsec_pf;
270 struct ptlrpc_cli_ctx *ctx;
271 struct hlist_node *pos, *next;
272 CFS_HLIST_HEAD(freelist);
276 gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
278 hash = ctx_hash_index(gsec_pf->gsp_chash_size,
279 (__u64) new->cc_vcred.vc_uid);
280 LASSERT(hash < gsec_pf->gsp_chash_size);
282 spin_lock(&gsec->gs_base.ps_lock);
284 hlist_for_each_entry_safe(ctx, pos, next,
285 &gsec_pf->gsp_chash[hash], cc_cache) {
286 if (!ctx_match_pf(ctx, &new->cc_vcred))
290 ctx_unhash_pf(ctx, &freelist);
294 ctx_enhash_pf(new, &gsec_pf->gsp_chash[hash]);
295 atomic_inc(&gsec->gs_base.ps_busy);
297 spin_unlock(&gsec->gs_base.ps_lock);
299 ctx_list_destroy_pf(&freelist);
304 int gss_install_rvs_cli_ctx_pf(struct gss_sec *gsec,
305 struct ptlrpc_svc_ctx *svc_ctx)
307 struct vfs_cred vcred;
308 struct ptlrpc_cli_ctx *cli_ctx;
315 cli_ctx = ctx_create_pf(&gsec->gs_base, &vcred);
319 rc = gss_copy_rvc_cli_ctx(cli_ctx, svc_ctx);
321 ctx_destroy_pf(cli_ctx->cc_sec, cli_ctx);
325 gss_sec_ctx_replace_pf(gsec, cli_ctx);
330 void gss_ctx_cache_gc_pf(struct gss_sec_pipefs *gsec_pf,
331 struct hlist_head *freelist)
333 struct ptlrpc_sec *sec;
334 struct ptlrpc_cli_ctx *ctx;
335 struct hlist_node *pos, *next;
339 sec = &gsec_pf->gsp_base.gs_base;
341 CDEBUG(D_SEC, "do gc on sec %s@%p\n", sec->ps_policy->sp_name, sec);
343 for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
344 hlist_for_each_entry_safe(ctx, pos, next,
345 &gsec_pf->gsp_chash[i], cc_cache)
346 ctx_check_death_locked_pf(ctx, freelist);
349 sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
354 struct ptlrpc_sec* gss_sec_create_pf(struct obd_import *imp,
355 struct ptlrpc_svc_ctx *ctx,
359 struct gss_sec_pipefs *gsec_pf;
360 int alloc_size, hash_size, i;
363 #define GSS_SEC_PIPEFS_CTX_HASH_SIZE (32)
365 if (ctx || flags & (PTLRPC_SEC_FL_ROOTONLY | PTLRPC_SEC_FL_REVERSE))
368 hash_size = GSS_SEC_PIPEFS_CTX_HASH_SIZE;
370 alloc_size = sizeof(*gsec_pf) +
371 sizeof(struct hlist_head) * hash_size;
373 OBD_ALLOC(gsec_pf, alloc_size);
377 gsec_pf->gsp_chash_size = hash_size;
378 for (i = 0; i < hash_size; i++)
379 CFS_INIT_HLIST_HEAD(&gsec_pf->gsp_chash[i]);
381 if (gss_sec_create_common(&gsec_pf->gsp_base, &gss_policy_pipefs,
382 imp, ctx, flavor, flags))
386 if (gss_sec_pipe_upcall_init(&gsec_pf->gsp_base))
389 if (gss_install_rvs_cli_ctx_pf(&gsec_pf->gsp_base, ctx))
393 RETURN(&gsec_pf->gsp_base.gs_base);
396 gss_sec_destroy_common(&gsec_pf->gsp_base);
398 OBD_FREE(gsec_pf, alloc_size);
403 void gss_sec_destroy_pf(struct ptlrpc_sec *sec)
405 struct gss_sec_pipefs *gsec_pf;
406 struct gss_sec *gsec;
408 CWARN("destroy %s@%p\n", sec->ps_policy->sp_name, sec);
410 gsec = container_of(sec, struct gss_sec, gs_base);
411 gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
413 LASSERT(gsec_pf->gsp_chash);
414 LASSERT(gsec_pf->gsp_chash_size);
416 gss_sec_pipe_upcall_fini(gsec);
418 gss_sec_destroy_common(gsec);
420 OBD_FREE(gsec, sizeof(*gsec_pf) +
421 sizeof(struct hlist_head) * gsec_pf->gsp_chash_size);
425 struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_pf(struct ptlrpc_sec *sec,
426 struct vfs_cred *vcred,
427 int create, int remove_dead)
429 struct gss_sec *gsec;
430 struct gss_sec_pipefs *gsec_pf;
431 struct ptlrpc_cli_ctx *ctx = NULL, *new = NULL;
432 struct hlist_head *hash_head;
433 struct hlist_node *pos, *next;
434 CFS_HLIST_HEAD(freelist);
435 unsigned int hash, gc = 0, found = 0;
440 gsec = container_of(sec, struct gss_sec, gs_base);
441 gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
443 hash = ctx_hash_index(gsec_pf->gsp_chash_size,
444 (__u64) vcred->vc_uid);
445 hash_head = &gsec_pf->gsp_chash[hash];
446 LASSERT(hash < gsec_pf->gsp_chash_size);
449 spin_lock(&sec->ps_lock);
451 /* gc_next == 0 means never do gc */
452 if (remove_dead && sec->ps_gc_next &&
453 cfs_time_after(cfs_time_current_sec(), sec->ps_gc_next)) {
454 gss_ctx_cache_gc_pf(gsec_pf, &freelist);
458 hlist_for_each_entry_safe(ctx, pos, next, hash_head, cc_cache) {
460 ctx_check_death_locked_pf(ctx,
461 remove_dead ? &freelist : NULL))
464 if (ctx_match_pf(ctx, vcred)) {
471 if (new && new != ctx) {
472 /* lost the race, just free it */
473 hlist_add_head(&new->cc_cache, &freelist);
477 /* hot node, move to head */
478 if (hash_head->first != &ctx->cc_cache) {
479 __hlist_del(&ctx->cc_cache);
480 hlist_add_head(&ctx->cc_cache, hash_head);
483 /* don't allocate for reverse sec */
484 if (sec_is_reverse(sec)) {
485 spin_unlock(&sec->ps_lock);
490 ctx_enhash_pf(new, hash_head);
493 spin_unlock(&sec->ps_lock);
494 new = ctx_create_pf(sec, vcred);
496 clear_bit(PTLRPC_CTX_NEW_BIT, &new->cc_flags);
505 atomic_inc(&ctx->cc_refcount);
507 spin_unlock(&sec->ps_lock);
509 /* the allocator of the context must give the first push to refresh */
512 gss_cli_ctx_refresh_pf(new);
515 ctx_list_destroy_pf(&freelist);
520 void gss_sec_release_ctx_pf(struct ptlrpc_sec *sec,
521 struct ptlrpc_cli_ctx *ctx,
524 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
525 LASSERT(hlist_unhashed(&ctx->cc_cache));
527 /* if required async, we must clear the UPTODATE bit to prevent extra
528 * rpcs during destroy procedure. */
530 clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
532 /* destroy this context */
533 ctx_destroy_pf(sec, ctx);
537 * @uid: which user. "-1" means flush all.
538 * @grace: mark context DEAD, allow graceful destroy like notify
540 * @force: also flush busy entries.
542 * return the number of busy context encountered.
544 * In any cases, never touch "eternal" contexts.
547 int gss_sec_flush_ctx_cache_pf(struct ptlrpc_sec *sec,
549 int grace, int force)
551 struct gss_sec *gsec;
552 struct gss_sec_pipefs *gsec_pf;
553 struct ptlrpc_cli_ctx *ctx;
554 struct hlist_node *pos, *next;
555 CFS_HLIST_HEAD(freelist);
559 might_sleep_if(grace);
561 gsec = container_of(sec, struct gss_sec, gs_base);
562 gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
564 spin_lock(&sec->ps_lock);
565 for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
566 hlist_for_each_entry_safe(ctx, pos, next,
567 &gsec_pf->gsp_chash[i], cc_cache) {
568 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
570 if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
573 if (atomic_read(&ctx->cc_refcount) > 1) {
578 CWARN("flush busy(%d) ctx %p(%u->%s) by force, "
580 atomic_read(&ctx->cc_refcount),
581 ctx, ctx->cc_vcred.vc_uid,
582 sec2target_str(ctx->cc_sec), grace);
584 ctx_unhash_pf(ctx, &freelist);
586 set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
588 clear_bit(PTLRPC_CTX_UPTODATE_BIT,
592 spin_unlock(&sec->ps_lock);
594 ctx_list_destroy_pf(&freelist);
598 /****************************************
600 ****************************************/
603 int gss_svc_accept_pf(struct ptlrpc_request *req)
605 return gss_svc_accept(&gss_policy_pipefs, req);
609 int gss_svc_install_rctx_pf(struct obd_import *imp,
610 struct ptlrpc_svc_ctx *ctx)
612 struct gss_sec *gsec;
614 LASSERT(imp->imp_sec);
617 gsec = container_of(imp->imp_sec, struct gss_sec, gs_base);
618 return gss_install_rvs_cli_ctx_pf(gsec, ctx);
621 /****************************************
622 * rpc_pipefs definitions *
623 ****************************************/
625 #define LUSTRE_PIPE_ROOT "/lustre"
626 #define LUSTRE_PIPE_KRB5 LUSTRE_PIPE_ROOT"/krb5"
628 struct gss_upcall_msg_data {
632 __u32 gum_svc; /* MDS/OSS... */
633 __u64 gum_nid; /* peer NID */
634 __u8 gum_obd[64]; /* client obd name */
637 struct gss_upcall_msg {
638 struct rpc_pipe_msg gum_base;
639 atomic_t gum_refcount;
640 struct list_head gum_list;
642 struct gss_sec *gum_gsec;
643 struct gss_cli_ctx *gum_gctx;
644 struct gss_upcall_msg_data gum_data;
647 static atomic_t upcall_seq = ATOMIC_INIT(0);
650 __u32 upcall_get_sequence(void)
652 return (__u32) atomic_inc_return(&upcall_seq);
661 __u32 mech_name2idx(const char *name)
663 LASSERT(!strcmp(name, "krb5"));
667 /* pipefs dentries for each mechanisms */
668 static struct dentry *de_pipes[MECH_MAX] = { NULL, };
669 /* all upcall messgaes linked here */
670 static struct list_head upcall_lists[MECH_MAX];
671 /* and protected by this */
672 static spinlock_t upcall_locks[MECH_MAX];
675 void upcall_list_lock(int idx)
677 spin_lock(&upcall_locks[idx]);
681 void upcall_list_unlock(int idx)
683 spin_unlock(&upcall_locks[idx]);
687 void upcall_msg_enlist(struct gss_upcall_msg *msg)
689 __u32 idx = msg->gum_mechidx;
691 upcall_list_lock(idx);
692 list_add(&msg->gum_list, &upcall_lists[idx]);
693 upcall_list_unlock(idx);
697 void upcall_msg_delist(struct gss_upcall_msg *msg)
699 __u32 idx = msg->gum_mechidx;
701 upcall_list_lock(idx);
702 list_del_init(&msg->gum_list);
703 upcall_list_unlock(idx);
706 /****************************************
707 * rpc_pipefs upcall helpers *
708 ****************************************/
711 void gss_release_msg(struct gss_upcall_msg *gmsg)
714 LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
716 if (!atomic_dec_and_test(&gmsg->gum_refcount)) {
721 if (gmsg->gum_gctx) {
722 sptlrpc_cli_ctx_wakeup(&gmsg->gum_gctx->gc_base);
723 sptlrpc_cli_ctx_put(&gmsg->gum_gctx->gc_base, 1);
724 gmsg->gum_gctx = NULL;
727 LASSERT(list_empty(&gmsg->gum_list));
728 LASSERT(list_empty(&gmsg->gum_base.list));
734 void gss_unhash_msg_nolock(struct gss_upcall_msg *gmsg)
736 __u32 idx = gmsg->gum_mechidx;
738 LASSERT(idx < MECH_MAX);
739 LASSERT_SPIN_LOCKED(&upcall_locks[idx]);
741 if (list_empty(&gmsg->gum_list))
744 list_del_init(&gmsg->gum_list);
745 LASSERT(atomic_read(&gmsg->gum_refcount) > 1);
746 atomic_dec(&gmsg->gum_refcount);
750 void gss_unhash_msg(struct gss_upcall_msg *gmsg)
752 __u32 idx = gmsg->gum_mechidx;
754 LASSERT(idx < MECH_MAX);
755 upcall_list_lock(idx);
756 gss_unhash_msg_nolock(gmsg);
757 upcall_list_unlock(idx);
761 void gss_msg_fail_ctx(struct gss_upcall_msg *gmsg)
763 if (gmsg->gum_gctx) {
764 struct ptlrpc_cli_ctx *ctx = &gmsg->gum_gctx->gc_base;
766 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
767 sptlrpc_cli_ctx_expire(ctx);
768 set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
773 struct gss_upcall_msg * gss_find_upcall(__u32 mechidx, __u32 seq)
775 struct gss_upcall_msg *gmsg;
777 upcall_list_lock(mechidx);
778 list_for_each_entry(gmsg, &upcall_lists[mechidx], gum_list) {
779 if (gmsg->gum_data.gum_seq != seq)
782 LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
783 LASSERT(gmsg->gum_mechidx == mechidx);
785 atomic_inc(&gmsg->gum_refcount);
786 upcall_list_unlock(mechidx);
789 upcall_list_unlock(mechidx);
794 int simple_get_bytes(char **buf, __u32 *buflen, void *res, __u32 reslen)
796 if (*buflen < reslen) {
797 CERROR("buflen %u < %u\n", *buflen, reslen);
801 memcpy(res, *buf, reslen);
807 /****************************************
809 ****************************************/
812 ssize_t gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
813 char *dst, size_t buflen)
815 char *data = (char *)msg->data + msg->copied;
816 ssize_t mlen = msg->len;
822 left = copy_to_user(dst, data, mlen);
834 ssize_t gss_pipe_downcall(struct file *filp, const char *src, size_t mlen)
836 struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
837 struct gss_upcall_msg *gss_msg;
838 struct ptlrpc_cli_ctx *ctx;
839 struct gss_cli_ctx *gctx = NULL;
843 __u32 mechidx, seq, gss_err;
846 mechidx = (__u32) (long) rpci->private;
847 LASSERT(mechidx < MECH_MAX);
849 OBD_ALLOC(buf, mlen);
853 if (copy_from_user(buf, src, mlen)) {
854 CERROR("failed copy user space data\n");
855 GOTO(out_free, rc = -EFAULT);
860 /* data passed down format:
864 * - wire_ctx (rawobj)
865 * - mech_ctx (rawobj)
867 if (simple_get_bytes(&data, &datalen, &seq, sizeof(seq))) {
868 CERROR("fail to get seq\n");
869 GOTO(out_free, rc = -EFAULT);
872 gss_msg = gss_find_upcall(mechidx, seq);
874 CERROR("upcall %u has aborted earlier\n", seq);
875 GOTO(out_free, rc = -EINVAL);
878 gss_unhash_msg(gss_msg);
879 gctx = gss_msg->gum_gctx;
881 LASSERT(atomic_read(&gctx->gc_base.cc_refcount) > 0);
883 /* timeout is not in use for now */
884 if (simple_get_bytes(&data, &datalen, &timeout, sizeof(timeout)))
885 GOTO(out_msg, rc = -EFAULT);
887 /* lgssd signal an error by gc_win == 0 */
888 if (simple_get_bytes(&data, &datalen, &gctx->gc_win,
889 sizeof(gctx->gc_win)))
890 GOTO(out_msg, rc = -EFAULT);
892 if (gctx->gc_win == 0) {
897 if (simple_get_bytes(&data, &datalen, &rc, sizeof(rc)))
898 GOTO(out_msg, rc = -EFAULT);
899 if (simple_get_bytes(&data, &datalen, &gss_err,sizeof(gss_err)))
900 GOTO(out_msg, rc = -EFAULT);
902 if (rc == 0 && gss_err == GSS_S_COMPLETE) {
903 CWARN("both rpc & gss error code not set\n");
910 if (rawobj_extract_local(&tmpobj, (__u32 **) &data, &datalen))
911 GOTO(out_msg, rc = -EFAULT);
912 if (rawobj_dup(&gctx->gc_handle, &tmpobj))
913 GOTO(out_msg, rc = -ENOMEM);
916 if (rawobj_extract_local(&tmpobj, (__u32 **) &data, &datalen))
917 GOTO(out_msg, rc = -EFAULT);
918 gss_err = lgss_import_sec_context(&tmpobj,
919 gss_msg->gum_gsec->gs_mech,
924 if (likely(rc == 0 && gss_err == GSS_S_COMPLETE)) {
925 gss_cli_ctx_uptodate(gctx);
927 ctx = &gctx->gc_base;
928 sptlrpc_cli_ctx_expire(ctx);
929 if (rc != -ERESTART || gss_err != GSS_S_COMPLETE)
930 set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
932 CERROR("refresh ctx %p(uid %d) failed: %d/0x%08x: %s\n",
933 ctx, ctx->cc_vcred.vc_uid, rc, gss_err,
934 test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags) ?
935 "fatal error" : "non-fatal");
941 gss_release_msg(gss_msg);
946 * hack pipefs: always return asked length unless all following
947 * downcalls might be messed up. */
953 void gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
955 struct gss_upcall_msg *gmsg;
956 struct gss_upcall_msg_data *gumd;
957 static cfs_time_t ratelimit = 0;
960 LASSERT(list_empty(&msg->list));
962 /* normally errno is >= 0 */
963 if (msg->errno >= 0) {
968 gmsg = container_of(msg, struct gss_upcall_msg, gum_base);
969 gumd = &gmsg->gum_data;
970 LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
972 CERROR("failed msg %p (seq %u, uid %u, svc %u, nid "LPX64", obd %.*s): "
973 "errno %d\n", msg, gumd->gum_seq, gumd->gum_uid, gumd->gum_svc,
974 gumd->gum_nid, (int) sizeof(gumd->gum_obd),
975 gumd->gum_obd, msg->errno);
977 atomic_inc(&gmsg->gum_refcount);
978 gss_unhash_msg(gmsg);
979 if (msg->errno == -ETIMEDOUT || msg->errno == -EPIPE) {
980 cfs_time_t now = cfs_time_current_sec();
982 if (cfs_time_after(now, ratelimit)) {
983 CWARN("upcall timed out, is lgssd running?\n");
984 ratelimit = now + 15;
987 gss_msg_fail_ctx(gmsg);
988 gss_release_msg(gmsg);
993 void gss_pipe_release(struct inode *inode)
995 struct rpc_inode *rpci = RPC_I(inode);
999 idx = (__u32) (long) rpci->private;
1000 LASSERT(idx < MECH_MAX);
1002 upcall_list_lock(idx);
1003 while (!list_empty(&upcall_lists[idx])) {
1004 struct gss_upcall_msg *gmsg;
1005 struct gss_upcall_msg_data *gumd;
1007 gmsg = list_entry(upcall_lists[idx].next,
1008 struct gss_upcall_msg, gum_list);
1009 gumd = &gmsg->gum_data;
1010 LASSERT(list_empty(&gmsg->gum_base.list));
1012 CERROR("failing remaining msg %p:seq %u, uid %u, svc %u, "
1013 "nid "LPX64", obd %.*s\n", gmsg,
1014 gumd->gum_seq, gumd->gum_uid, gumd->gum_svc,
1015 gumd->gum_nid, (int) sizeof(gumd->gum_obd),
1018 gmsg->gum_base.errno = -EPIPE;
1019 atomic_inc(&gmsg->gum_refcount);
1020 gss_unhash_msg_nolock(gmsg);
1022 gss_msg_fail_ctx(gmsg);
1024 upcall_list_unlock(idx);
1025 gss_release_msg(gmsg);
1026 upcall_list_lock(idx);
1028 upcall_list_unlock(idx);
1032 static struct rpc_pipe_ops gss_upcall_ops = {
1033 .upcall = gss_pipe_upcall,
1034 .downcall = gss_pipe_downcall,
1035 .destroy_msg = gss_pipe_destroy_msg,
1036 .release_pipe = gss_pipe_release,
1039 /****************************************
1040 * upcall helper functions *
1041 ****************************************/
1044 int gss_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx)
1046 struct obd_import *imp;
1047 struct gss_sec *gsec;
1048 struct gss_upcall_msg *gmsg;
1054 LASSERT(ctx->cc_sec);
1055 LASSERT(ctx->cc_sec->ps_import);
1056 LASSERT(ctx->cc_sec->ps_import->imp_obd);
1058 imp = ctx->cc_sec->ps_import;
1059 if (!imp->imp_connection) {
1060 CERROR("import has no connection set\n");
1064 gsec = container_of(ctx->cc_sec, struct gss_sec, gs_base);
1066 OBD_ALLOC_PTR(gmsg);
1070 /* initialize pipefs base msg */
1071 CFS_INIT_LIST_HEAD(&gmsg->gum_base.list);
1072 gmsg->gum_base.data = &gmsg->gum_data;
1073 gmsg->gum_base.len = sizeof(gmsg->gum_data);
1074 gmsg->gum_base.copied = 0;
1075 gmsg->gum_base.errno = 0;
1077 /* init upcall msg */
1078 atomic_set(&gmsg->gum_refcount, 1);
1079 gmsg->gum_mechidx = mech_name2idx(gsec->gs_mech->gm_name);
1080 gmsg->gum_gsec = gsec;
1081 gmsg->gum_gctx = container_of(sptlrpc_cli_ctx_get(ctx),
1082 struct gss_cli_ctx, gc_base);
1083 gmsg->gum_data.gum_seq = upcall_get_sequence();
1084 gmsg->gum_data.gum_uid = ctx->cc_vcred.vc_uid;
1085 gmsg->gum_data.gum_gid = 0; /* not used for now */
1086 gmsg->gum_data.gum_svc = import_to_gss_svc(imp);
1087 gmsg->gum_data.gum_nid = imp->imp_connection->c_peer.nid;
1088 strncpy(gmsg->gum_data.gum_obd, imp->imp_obd->obd_name,
1089 sizeof(gmsg->gum_data.gum_obd));
1091 /* This only could happen when sysadmin set it dead/expired
1092 * using lctl by force. */
1093 if (ctx->cc_flags & PTLRPC_CTX_STATUS_MASK) {
1094 CWARN("ctx %p(%u->%s) was set flags %lx unexpectedly\n",
1095 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
1098 LASSERT(!(ctx->cc_flags & PTLRPC_CTX_UPTODATE));
1099 ctx->cc_flags |= PTLRPC_CTX_DEAD | PTLRPC_CTX_ERROR;
1105 upcall_msg_enlist(gmsg);
1107 rc = rpc_queue_upcall(de_pipes[gmsg->gum_mechidx]->d_inode,
1110 CERROR("rpc_queue_upcall failed: %d\n", rc);
1112 upcall_msg_delist(gmsg);
1123 int gss_cli_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx)
1125 /* if we are refreshing for root, also update the reverse
1126 * handle index, do not confuse reverse contexts. */
1127 if (ctx->cc_vcred.vc_uid == 0) {
1128 struct gss_sec *gsec;
1130 gsec = container_of(ctx->cc_sec, struct gss_sec, gs_base);
1131 gsec->gs_rvs_hdl = gss_get_next_ctx_index();
1134 return gss_ctx_refresh_pf(ctx);
1137 /****************************************
1138 * lustre gss pipefs policy *
1139 ****************************************/
1141 static struct ptlrpc_ctx_ops gss_pipefs_ctxops = {
1142 .match = gss_cli_ctx_match,
1143 .refresh = gss_cli_ctx_refresh_pf,
1144 .validate = gss_cli_ctx_validate_pf,
1145 .die = gss_cli_ctx_die_pf,
1146 .sign = gss_cli_ctx_sign,
1147 .verify = gss_cli_ctx_verify,
1148 .seal = gss_cli_ctx_seal,
1149 .unseal = gss_cli_ctx_unseal,
1150 .wrap_bulk = gss_cli_ctx_wrap_bulk,
1151 .unwrap_bulk = gss_cli_ctx_unwrap_bulk,
1154 static struct ptlrpc_sec_cops gss_sec_pipefs_cops = {
1155 .create_sec = gss_sec_create_pf,
1156 .destroy_sec = gss_sec_destroy_pf,
1157 .lookup_ctx = gss_sec_lookup_ctx_pf,
1158 .release_ctx = gss_sec_release_ctx_pf,
1159 .flush_ctx_cache = gss_sec_flush_ctx_cache_pf,
1160 .install_rctx = gss_sec_install_rctx,
1161 .alloc_reqbuf = gss_alloc_reqbuf,
1162 .free_reqbuf = gss_free_reqbuf,
1163 .alloc_repbuf = gss_alloc_repbuf,
1164 .free_repbuf = gss_free_repbuf,
1165 .enlarge_reqbuf = gss_enlarge_reqbuf,
1168 static struct ptlrpc_sec_sops gss_sec_pipefs_sops = {
1169 .accept = gss_svc_accept_pf,
1170 .invalidate_ctx = gss_svc_invalidate_ctx,
1171 .alloc_rs = gss_svc_alloc_rs,
1172 .authorize = gss_svc_authorize,
1173 .free_rs = gss_svc_free_rs,
1174 .free_ctx = gss_svc_free_ctx,
1175 .unwrap_bulk = gss_svc_unwrap_bulk,
1176 .wrap_bulk = gss_svc_wrap_bulk,
1177 .install_rctx = gss_svc_install_rctx_pf,
1180 static struct ptlrpc_sec_policy gss_policy_pipefs = {
1181 .sp_owner = THIS_MODULE,
1182 .sp_name = "gss.pipefs",
1183 .sp_policy = SPTLRPC_POLICY_GSS_PIPEFS,
1184 .sp_cops = &gss_sec_pipefs_cops,
1185 .sp_sops = &gss_sec_pipefs_sops,
1189 int __init gss_init_pipefs_upcall(void)
1194 de = rpc_mkdir(LUSTRE_PIPE_ROOT, NULL);
1195 if (IS_ERR(de) && PTR_ERR(de) != -EEXIST) {
1196 CERROR("Failed to create gss pipe dir: %ld\n", PTR_ERR(de));
1200 /* FIXME hack pipefs: dput will sometimes cause oops during module
1201 * unload and lgssd close the pipe fds. */
1203 /* krb5 mechanism */
1204 de = rpc_mkpipe(LUSTRE_PIPE_KRB5, (void *) MECH_KRB5, &gss_upcall_ops,
1205 RPC_PIPE_WAIT_FOR_OPEN);
1206 if (!de || IS_ERR(de)) {
1207 CERROR("failed to make rpc_pipe %s: %ld\n",
1208 LUSTRE_PIPE_KRB5, PTR_ERR(de));
1209 rpc_rmdir(LUSTRE_PIPE_ROOT);
1213 de_pipes[MECH_KRB5] = de;
1214 CFS_INIT_LIST_HEAD(&upcall_lists[MECH_KRB5]);
1215 upcall_locks[MECH_KRB5] = SPIN_LOCK_UNLOCKED;
1221 void __exit gss_exit_pipefs_upcall(void)
1225 for (i = 0; i < MECH_MAX; i++) {
1226 LASSERT(list_empty(&upcall_lists[i]));
1228 /* dput pipe dentry here might cause lgssd oops. */
1232 rpc_unlink(LUSTRE_PIPE_KRB5);
1233 rpc_rmdir(LUSTRE_PIPE_ROOT);
1236 int __init gss_init_pipefs(void)
1240 rc = gss_init_pipefs_upcall();
1244 rc = sptlrpc_register_policy(&gss_policy_pipefs);
1246 gss_exit_pipefs_upcall();
1253 void __exit gss_exit_pipefs(void)
1255 gss_exit_pipefs_upcall();
1256 sptlrpc_unregister_policy(&gss_policy_pipefs);