2 * Modifications for Lustre
4 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
6 * Copyright (c) 2012, Intel Corporation.
8 * Author: Eric Mei <ericm@clusterfs.com>
12 * linux/net/sunrpc/auth_gss.c
14 * RPCSEC_GSS client authentication.
16 * Copyright (c) 2000 The Regents of the University of Michigan.
17 * All rights reserved.
19 * Dug Song <dugsong@monkey.org>
20 * Andy Adamson <andros@umich.edu>
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 3. Neither the name of the University nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
36 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
37 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
38 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
40 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
41 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
42 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
43 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
44 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
45 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 #define DEBUG_SUBSYSTEM S_SEC
51 #include <linux/init.h>
52 #include <linux/module.h>
53 #include <linux/slab.h>
54 #include <linux/dcache.h>
56 #include <linux/mutex.h>
57 #include <linux/crypto.h>
58 #include <asm/atomic.h>
59 struct rpc_clnt; /* for rpc_pipefs */
60 #include <linux/sunrpc/rpc_pipe_fs.h>
62 #include <liblustre.h>
66 #include <obd_class.h>
67 #include <obd_support.h>
68 #include <lustre/lustre_idl.h>
69 #include <lustre_sec.h>
70 #include <lustre_net.h>
71 #include <lustre_import.h>
74 #include "gss_internal.h"
77 static struct ptlrpc_sec_policy gss_policy_pipefs;
78 static struct ptlrpc_ctx_ops gss_pipefs_ctxops;
80 static int gss_cli_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx);
82 static int gss_sec_pipe_upcall_init(struct gss_sec *gsec)
87 static void gss_sec_pipe_upcall_fini(struct gss_sec *gsec)
91 /****************************************
92 * internel context helpers *
93 ****************************************/
96 struct ptlrpc_cli_ctx *ctx_create_pf(struct ptlrpc_sec *sec,
97 struct vfs_cred *vcred)
99 struct gss_cli_ctx *gctx;
106 rc = gss_cli_ctx_init_common(sec, &gctx->gc_base,
107 &gss_pipefs_ctxops, vcred);
113 return &gctx->gc_base;
117 void ctx_destroy_pf(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx)
119 struct gss_cli_ctx *gctx = ctx2gctx(ctx);
121 if (gss_cli_ctx_fini_common(sec, ctx))
126 cfs_atomic_dec(&sec->ps_nctx);
127 sptlrpc_sec_put(sec);
131 void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *hash)
133 set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
134 cfs_atomic_inc(&ctx->cc_refcount);
135 cfs_hlist_add_head(&ctx->cc_cache, hash);
139 * caller must hold spinlock
142 void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *freelist)
144 LASSERT_SPIN_LOCKED(&ctx->cc_sec->ps_lock);
145 LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
146 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
147 LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
149 clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
151 if (cfs_atomic_dec_and_test(&ctx->cc_refcount)) {
152 __cfs_hlist_del(&ctx->cc_cache);
153 cfs_hlist_add_head(&ctx->cc_cache, freelist);
155 cfs_hlist_del_init(&ctx->cc_cache);
160 * return 1 if the context is dead.
163 int ctx_check_death_pf(struct ptlrpc_cli_ctx *ctx,
164 cfs_hlist_head_t *freelist)
166 if (cli_ctx_check_death(ctx)) {
168 ctx_unhash_pf(ctx, freelist);
176 int ctx_check_death_locked_pf(struct ptlrpc_cli_ctx *ctx,
177 cfs_hlist_head_t *freelist)
179 LASSERT(ctx->cc_sec);
180 LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
181 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
183 return ctx_check_death_pf(ctx, freelist);
187 int ctx_match_pf(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred)
189 /* a little bit optimization for null policy */
190 if (!ctx->cc_ops->match)
193 return ctx->cc_ops->match(ctx, vcred);
197 void ctx_list_destroy_pf(cfs_hlist_head_t *head)
199 struct ptlrpc_cli_ctx *ctx;
201 while (!cfs_hlist_empty(head)) {
202 ctx = cfs_hlist_entry(head->first, struct ptlrpc_cli_ctx,
205 LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
206 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT,
207 &ctx->cc_flags) == 0);
209 cfs_hlist_del_init(&ctx->cc_cache);
210 ctx_destroy_pf(ctx->cc_sec, ctx);
214 /****************************************
216 ****************************************/
219 int gss_cli_ctx_validate_pf(struct ptlrpc_cli_ctx *ctx)
221 if (ctx_check_death_pf(ctx, NULL))
223 if (cli_ctx_is_ready(ctx))
229 void gss_cli_ctx_die_pf(struct ptlrpc_cli_ctx *ctx, int grace)
231 LASSERT(ctx->cc_sec);
232 LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
236 spin_lock(&ctx->cc_sec->ps_lock);
238 if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
239 LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
240 LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 1);
242 cfs_hlist_del_init(&ctx->cc_cache);
243 if (cfs_atomic_dec_and_test(&ctx->cc_refcount))
247 spin_unlock(&ctx->cc_sec->ps_lock);
250 /****************************************
251 * reverse context installation *
252 ****************************************/
255 unsigned int ctx_hash_index(int hashsize, __u64 key)
257 return (unsigned int) (key & ((__u64) hashsize - 1));
261 void gss_sec_ctx_replace_pf(struct gss_sec *gsec,
262 struct ptlrpc_cli_ctx *new)
264 struct gss_sec_pipefs *gsec_pf;
265 struct ptlrpc_cli_ctx *ctx;
266 cfs_hlist_node_t *pos, *next;
267 CFS_HLIST_HEAD(freelist);
271 gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
273 hash = ctx_hash_index(gsec_pf->gsp_chash_size,
274 (__u64) new->cc_vcred.vc_uid);
275 LASSERT(hash < gsec_pf->gsp_chash_size);
277 spin_lock(&gsec->gs_base.ps_lock);
279 cfs_hlist_for_each_entry_safe(ctx, pos, next,
280 &gsec_pf->gsp_chash[hash], cc_cache) {
281 if (!ctx_match_pf(ctx, &new->cc_vcred))
285 ctx_unhash_pf(ctx, &freelist);
289 ctx_enhash_pf(new, &gsec_pf->gsp_chash[hash]);
291 spin_unlock(&gsec->gs_base.ps_lock);
293 ctx_list_destroy_pf(&freelist);
298 int gss_install_rvs_cli_ctx_pf(struct gss_sec *gsec,
299 struct ptlrpc_svc_ctx *svc_ctx)
301 struct vfs_cred vcred;
302 struct ptlrpc_cli_ctx *cli_ctx;
309 cli_ctx = ctx_create_pf(&gsec->gs_base, &vcred);
313 rc = gss_copy_rvc_cli_ctx(cli_ctx, svc_ctx);
315 ctx_destroy_pf(cli_ctx->cc_sec, cli_ctx);
319 gss_sec_ctx_replace_pf(gsec, cli_ctx);
324 void gss_ctx_cache_gc_pf(struct gss_sec_pipefs *gsec_pf,
325 cfs_hlist_head_t *freelist)
327 struct ptlrpc_sec *sec;
328 struct ptlrpc_cli_ctx *ctx;
329 cfs_hlist_node_t *pos, *next;
333 sec = &gsec_pf->gsp_base.gs_base;
335 CDEBUG(D_SEC, "do gc on sec %s@%p\n", sec->ps_policy->sp_name, sec);
337 for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
338 cfs_hlist_for_each_entry_safe(ctx, pos, next,
339 &gsec_pf->gsp_chash[i], cc_cache)
340 ctx_check_death_locked_pf(ctx, freelist);
343 sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
348 struct ptlrpc_sec* gss_sec_create_pf(struct obd_import *imp,
349 struct ptlrpc_svc_ctx *ctx,
350 struct sptlrpc_flavor *sf)
352 struct gss_sec_pipefs *gsec_pf;
353 int alloc_size, hash_size, i;
356 #define GSS_SEC_PIPEFS_CTX_HASH_SIZE (32)
359 sf->sf_flags & (PTLRPC_SEC_FL_ROOTONLY | PTLRPC_SEC_FL_REVERSE))
362 hash_size = GSS_SEC_PIPEFS_CTX_HASH_SIZE;
364 alloc_size = sizeof(*gsec_pf) +
365 sizeof(cfs_hlist_head_t) * hash_size;
367 OBD_ALLOC(gsec_pf, alloc_size);
371 gsec_pf->gsp_chash_size = hash_size;
372 for (i = 0; i < hash_size; i++)
373 CFS_INIT_HLIST_HEAD(&gsec_pf->gsp_chash[i]);
375 if (gss_sec_create_common(&gsec_pf->gsp_base, &gss_policy_pipefs,
380 if (gss_sec_pipe_upcall_init(&gsec_pf->gsp_base))
383 if (gss_install_rvs_cli_ctx_pf(&gsec_pf->gsp_base, ctx))
387 RETURN(&gsec_pf->gsp_base.gs_base);
390 gss_sec_destroy_common(&gsec_pf->gsp_base);
392 OBD_FREE(gsec_pf, alloc_size);
397 void gss_sec_destroy_pf(struct ptlrpc_sec *sec)
399 struct gss_sec_pipefs *gsec_pf;
400 struct gss_sec *gsec;
402 CWARN("destroy %s@%p\n", sec->ps_policy->sp_name, sec);
404 gsec = container_of(sec, struct gss_sec, gs_base);
405 gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
407 LASSERT(gsec_pf->gsp_chash);
408 LASSERT(gsec_pf->gsp_chash_size);
410 gss_sec_pipe_upcall_fini(gsec);
412 gss_sec_destroy_common(gsec);
414 OBD_FREE(gsec, sizeof(*gsec_pf) +
415 sizeof(cfs_hlist_head_t) * gsec_pf->gsp_chash_size);
419 struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_pf(struct ptlrpc_sec *sec,
420 struct vfs_cred *vcred,
421 int create, int remove_dead)
423 struct gss_sec *gsec;
424 struct gss_sec_pipefs *gsec_pf;
425 struct ptlrpc_cli_ctx *ctx = NULL, *new = NULL;
426 cfs_hlist_head_t *hash_head;
427 cfs_hlist_node_t *pos, *next;
428 CFS_HLIST_HEAD(freelist);
429 unsigned int hash, gc = 0, found = 0;
434 gsec = container_of(sec, struct gss_sec, gs_base);
435 gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
437 hash = ctx_hash_index(gsec_pf->gsp_chash_size,
438 (__u64) vcred->vc_uid);
439 hash_head = &gsec_pf->gsp_chash[hash];
440 LASSERT(hash < gsec_pf->gsp_chash_size);
443 spin_lock(&sec->ps_lock);
445 /* gc_next == 0 means never do gc */
446 if (remove_dead && sec->ps_gc_next &&
447 cfs_time_after(cfs_time_current_sec(), sec->ps_gc_next)) {
448 gss_ctx_cache_gc_pf(gsec_pf, &freelist);
452 cfs_hlist_for_each_entry_safe(ctx, pos, next, hash_head, cc_cache) {
454 ctx_check_death_locked_pf(ctx,
455 remove_dead ? &freelist : NULL))
458 if (ctx_match_pf(ctx, vcred)) {
465 if (new && new != ctx) {
466 /* lost the race, just free it */
467 cfs_hlist_add_head(&new->cc_cache, &freelist);
471 /* hot node, move to head */
472 if (hash_head->first != &ctx->cc_cache) {
473 __cfs_hlist_del(&ctx->cc_cache);
474 cfs_hlist_add_head(&ctx->cc_cache, hash_head);
477 /* don't allocate for reverse sec */
478 if (sec_is_reverse(sec)) {
479 spin_unlock(&sec->ps_lock);
484 ctx_enhash_pf(new, hash_head);
487 spin_unlock(&sec->ps_lock);
488 new = ctx_create_pf(sec, vcred);
490 clear_bit(PTLRPC_CTX_NEW_BIT, &new->cc_flags);
500 cfs_atomic_inc(&ctx->cc_refcount);
502 spin_unlock(&sec->ps_lock);
504 /* the allocator of the context must give the first push to refresh */
507 gss_cli_ctx_refresh_pf(new);
510 ctx_list_destroy_pf(&freelist);
515 void gss_sec_release_ctx_pf(struct ptlrpc_sec *sec,
516 struct ptlrpc_cli_ctx *ctx,
519 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
520 LASSERT(cfs_hlist_unhashed(&ctx->cc_cache));
522 /* if required async, we must clear the UPTODATE bit to prevent extra
523 * rpcs during destroy procedure. */
525 clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
527 /* destroy this context */
528 ctx_destroy_pf(sec, ctx);
532 * @uid: which user. "-1" means flush all.
533 * @grace: mark context DEAD, allow graceful destroy like notify
535 * @force: also flush busy entries.
537 * return the number of busy context encountered.
539 * In any cases, never touch "eternal" contexts.
542 int gss_sec_flush_ctx_cache_pf(struct ptlrpc_sec *sec,
544 int grace, int force)
546 struct gss_sec *gsec;
547 struct gss_sec_pipefs *gsec_pf;
548 struct ptlrpc_cli_ctx *ctx;
549 cfs_hlist_node_t *pos, *next;
550 CFS_HLIST_HEAD(freelist);
554 might_sleep_if(grace);
556 gsec = container_of(sec, struct gss_sec, gs_base);
557 gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
559 spin_lock(&sec->ps_lock);
560 for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
561 cfs_hlist_for_each_entry_safe(ctx, pos, next,
562 &gsec_pf->gsp_chash[i],
564 LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
566 if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
569 if (cfs_atomic_read(&ctx->cc_refcount) > 1) {
574 CWARN("flush busy(%d) ctx %p(%u->%s) by force, "
576 cfs_atomic_read(&ctx->cc_refcount),
577 ctx, ctx->cc_vcred.vc_uid,
578 sec2target_str(ctx->cc_sec), grace);
580 ctx_unhash_pf(ctx, &freelist);
582 set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
584 clear_bit(PTLRPC_CTX_UPTODATE_BIT,
588 spin_unlock(&sec->ps_lock);
590 ctx_list_destroy_pf(&freelist);
594 /****************************************
596 ****************************************/
599 int gss_svc_accept_pf(struct ptlrpc_request *req)
601 return gss_svc_accept(&gss_policy_pipefs, req);
605 int gss_svc_install_rctx_pf(struct obd_import *imp,
606 struct ptlrpc_svc_ctx *ctx)
608 struct ptlrpc_sec *sec;
611 sec = sptlrpc_import_sec_ref(imp);
613 rc = gss_install_rvs_cli_ctx_pf(sec2gsec(sec), ctx);
615 sptlrpc_sec_put(sec);
619 /****************************************
620 * rpc_pipefs definitions *
621 ****************************************/
623 #define LUSTRE_PIPE_ROOT "/lustre"
624 #define LUSTRE_PIPE_KRB5 LUSTRE_PIPE_ROOT"/krb5"
626 struct gss_upcall_msg_data {
630 __u32 gum_svc; /* MDS/OSS... */
631 __u64 gum_nid; /* peer NID */
632 __u8 gum_obd[64]; /* client obd name */
635 struct gss_upcall_msg {
636 struct rpc_pipe_msg gum_base;
637 cfs_atomic_t gum_refcount;
640 struct gss_sec *gum_gsec;
641 struct gss_cli_ctx *gum_gctx;
642 struct gss_upcall_msg_data gum_data;
645 static cfs_atomic_t upcall_seq = CFS_ATOMIC_INIT(0);
648 __u32 upcall_get_sequence(void)
650 return (__u32) cfs_atomic_inc_return(&upcall_seq);
659 __u32 mech_name2idx(const char *name)
661 LASSERT(!strcmp(name, "krb5"));
665 /* pipefs dentries for each mechanisms */
666 static struct dentry *de_pipes[MECH_MAX] = { NULL, };
667 /* all upcall messgaes linked here */
668 static cfs_list_t upcall_lists[MECH_MAX];
669 /* and protected by this */
670 static spinlock_t upcall_locks[MECH_MAX];
673 void upcall_list_lock(int idx)
675 spin_lock(&upcall_locks[idx]);
679 void upcall_list_unlock(int idx)
681 spin_unlock(&upcall_locks[idx]);
685 void upcall_msg_enlist(struct gss_upcall_msg *msg)
687 __u32 idx = msg->gum_mechidx;
689 upcall_list_lock(idx);
690 cfs_list_add(&msg->gum_list, &upcall_lists[idx]);
691 upcall_list_unlock(idx);
695 void upcall_msg_delist(struct gss_upcall_msg *msg)
697 __u32 idx = msg->gum_mechidx;
699 upcall_list_lock(idx);
700 cfs_list_del_init(&msg->gum_list);
701 upcall_list_unlock(idx);
704 /****************************************
705 * rpc_pipefs upcall helpers *
706 ****************************************/
709 void gss_release_msg(struct gss_upcall_msg *gmsg)
712 LASSERT(cfs_atomic_read(&gmsg->gum_refcount) > 0);
714 if (!cfs_atomic_dec_and_test(&gmsg->gum_refcount)) {
719 if (gmsg->gum_gctx) {
720 sptlrpc_cli_ctx_wakeup(&gmsg->gum_gctx->gc_base);
721 sptlrpc_cli_ctx_put(&gmsg->gum_gctx->gc_base, 1);
722 gmsg->gum_gctx = NULL;
725 LASSERT(cfs_list_empty(&gmsg->gum_list));
726 LASSERT(cfs_list_empty(&gmsg->gum_base.list));
732 void gss_unhash_msg_nolock(struct gss_upcall_msg *gmsg)
734 __u32 idx = gmsg->gum_mechidx;
736 LASSERT(idx < MECH_MAX);
737 LASSERT_SPIN_LOCKED(&upcall_locks[idx]);
739 if (cfs_list_empty(&gmsg->gum_list))
742 cfs_list_del_init(&gmsg->gum_list);
743 LASSERT(cfs_atomic_read(&gmsg->gum_refcount) > 1);
744 cfs_atomic_dec(&gmsg->gum_refcount);
748 void gss_unhash_msg(struct gss_upcall_msg *gmsg)
750 __u32 idx = gmsg->gum_mechidx;
752 LASSERT(idx < MECH_MAX);
753 upcall_list_lock(idx);
754 gss_unhash_msg_nolock(gmsg);
755 upcall_list_unlock(idx);
759 void gss_msg_fail_ctx(struct gss_upcall_msg *gmsg)
761 if (gmsg->gum_gctx) {
762 struct ptlrpc_cli_ctx *ctx = &gmsg->gum_gctx->gc_base;
764 LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
765 sptlrpc_cli_ctx_expire(ctx);
766 set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
771 struct gss_upcall_msg * gss_find_upcall(__u32 mechidx, __u32 seq)
773 struct gss_upcall_msg *gmsg;
775 upcall_list_lock(mechidx);
776 cfs_list_for_each_entry(gmsg, &upcall_lists[mechidx], gum_list) {
777 if (gmsg->gum_data.gum_seq != seq)
780 LASSERT(cfs_atomic_read(&gmsg->gum_refcount) > 0);
781 LASSERT(gmsg->gum_mechidx == mechidx);
783 cfs_atomic_inc(&gmsg->gum_refcount);
784 upcall_list_unlock(mechidx);
787 upcall_list_unlock(mechidx);
792 int simple_get_bytes(char **buf, __u32 *buflen, void *res, __u32 reslen)
794 if (*buflen < reslen) {
795 CERROR("buflen %u < %u\n", *buflen, reslen);
799 memcpy(res, *buf, reslen);
805 /****************************************
807 ****************************************/
810 ssize_t gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
811 char *dst, size_t buflen)
813 char *data = (char *)msg->data + msg->copied;
814 ssize_t mlen = msg->len;
820 left = cfs_copy_to_user(dst, data, mlen);
832 ssize_t gss_pipe_downcall(struct file *filp, const char *src, size_t mlen)
834 struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
835 struct gss_upcall_msg *gss_msg;
836 struct ptlrpc_cli_ctx *ctx;
837 struct gss_cli_ctx *gctx = NULL;
841 __u32 mechidx, seq, gss_err;
844 mechidx = (__u32) (long) rpci->private;
845 LASSERT(mechidx < MECH_MAX);
847 OBD_ALLOC(buf, mlen);
851 if (cfs_copy_from_user(buf, src, mlen)) {
852 CERROR("failed copy user space data\n");
853 GOTO(out_free, rc = -EFAULT);
858 /* data passed down format:
862 * - wire_ctx (rawobj)
863 * - mech_ctx (rawobj)
865 if (simple_get_bytes(&data, &datalen, &seq, sizeof(seq))) {
866 CERROR("fail to get seq\n");
867 GOTO(out_free, rc = -EFAULT);
870 gss_msg = gss_find_upcall(mechidx, seq);
872 CERROR("upcall %u has aborted earlier\n", seq);
873 GOTO(out_free, rc = -EINVAL);
876 gss_unhash_msg(gss_msg);
877 gctx = gss_msg->gum_gctx;
879 LASSERT(cfs_atomic_read(&gctx->gc_base.cc_refcount) > 0);
881 /* timeout is not in use for now */
882 if (simple_get_bytes(&data, &datalen, &timeout, sizeof(timeout)))
883 GOTO(out_msg, rc = -EFAULT);
885 /* lgssd signal an error by gc_win == 0 */
886 if (simple_get_bytes(&data, &datalen, &gctx->gc_win,
887 sizeof(gctx->gc_win)))
888 GOTO(out_msg, rc = -EFAULT);
890 if (gctx->gc_win == 0) {
895 if (simple_get_bytes(&data, &datalen, &rc, sizeof(rc)))
896 GOTO(out_msg, rc = -EFAULT);
897 if (simple_get_bytes(&data, &datalen, &gss_err,sizeof(gss_err)))
898 GOTO(out_msg, rc = -EFAULT);
900 if (rc == 0 && gss_err == GSS_S_COMPLETE) {
901 CWARN("both rpc & gss error code not set\n");
908 if (rawobj_extract_local(&tmpobj, (__u32 **) &data, &datalen))
909 GOTO(out_msg, rc = -EFAULT);
910 if (rawobj_dup(&gctx->gc_handle, &tmpobj))
911 GOTO(out_msg, rc = -ENOMEM);
914 if (rawobj_extract_local(&tmpobj, (__u32 **) &data, &datalen))
915 GOTO(out_msg, rc = -EFAULT);
916 gss_err = lgss_import_sec_context(&tmpobj,
917 gss_msg->gum_gsec->gs_mech,
922 if (likely(rc == 0 && gss_err == GSS_S_COMPLETE)) {
923 gss_cli_ctx_uptodate(gctx);
925 ctx = &gctx->gc_base;
926 sptlrpc_cli_ctx_expire(ctx);
927 if (rc != -ERESTART || gss_err != GSS_S_COMPLETE)
928 set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
930 CERROR("refresh ctx %p(uid %d) failed: %d/0x%08x: %s\n",
931 ctx, ctx->cc_vcred.vc_uid, rc, gss_err,
932 test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags) ?
933 "fatal error" : "non-fatal");
939 gss_release_msg(gss_msg);
944 * hack pipefs: always return asked length unless all following
945 * downcalls might be messed up. */
951 void gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
953 struct gss_upcall_msg *gmsg;
954 struct gss_upcall_msg_data *gumd;
955 static cfs_time_t ratelimit = 0;
958 LASSERT(cfs_list_empty(&msg->list));
960 /* normally errno is >= 0 */
961 if (msg->errno >= 0) {
966 gmsg = container_of(msg, struct gss_upcall_msg, gum_base);
967 gumd = &gmsg->gum_data;
968 LASSERT(cfs_atomic_read(&gmsg->gum_refcount) > 0);
970 CERROR("failed msg %p (seq %u, uid %u, svc %u, nid "LPX64", obd %.*s): "
971 "errno %d\n", msg, gumd->gum_seq, gumd->gum_uid, gumd->gum_svc,
972 gumd->gum_nid, (int) sizeof(gumd->gum_obd),
973 gumd->gum_obd, msg->errno);
975 cfs_atomic_inc(&gmsg->gum_refcount);
976 gss_unhash_msg(gmsg);
977 if (msg->errno == -ETIMEDOUT || msg->errno == -EPIPE) {
978 cfs_time_t now = cfs_time_current_sec();
980 if (cfs_time_after(now, ratelimit)) {
981 CWARN("upcall timed out, is lgssd running?\n");
982 ratelimit = now + 15;
985 gss_msg_fail_ctx(gmsg);
986 gss_release_msg(gmsg);
991 void gss_pipe_release(struct inode *inode)
993 struct rpc_inode *rpci = RPC_I(inode);
997 idx = (__u32) (long) rpci->private;
998 LASSERT(idx < MECH_MAX);
1000 upcall_list_lock(idx);
1001 while (!cfs_list_empty(&upcall_lists[idx])) {
1002 struct gss_upcall_msg *gmsg;
1003 struct gss_upcall_msg_data *gumd;
1005 gmsg = cfs_list_entry(upcall_lists[idx].next,
1006 struct gss_upcall_msg, gum_list);
1007 gumd = &gmsg->gum_data;
1008 LASSERT(cfs_list_empty(&gmsg->gum_base.list));
1010 CERROR("failing remaining msg %p:seq %u, uid %u, svc %u, "
1011 "nid "LPX64", obd %.*s\n", gmsg,
1012 gumd->gum_seq, gumd->gum_uid, gumd->gum_svc,
1013 gumd->gum_nid, (int) sizeof(gumd->gum_obd),
1016 gmsg->gum_base.errno = -EPIPE;
1017 cfs_atomic_inc(&gmsg->gum_refcount);
1018 gss_unhash_msg_nolock(gmsg);
1020 gss_msg_fail_ctx(gmsg);
1022 upcall_list_unlock(idx);
1023 gss_release_msg(gmsg);
1024 upcall_list_lock(idx);
1026 upcall_list_unlock(idx);
1030 static struct rpc_pipe_ops gss_upcall_ops = {
1031 .upcall = gss_pipe_upcall,
1032 .downcall = gss_pipe_downcall,
1033 .destroy_msg = gss_pipe_destroy_msg,
1034 .release_pipe = gss_pipe_release,
1037 /****************************************
1038 * upcall helper functions *
1039 ****************************************/
1042 int gss_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx)
1044 struct obd_import *imp;
1045 struct gss_sec *gsec;
1046 struct gss_upcall_msg *gmsg;
1052 LASSERT(ctx->cc_sec);
1053 LASSERT(ctx->cc_sec->ps_import);
1054 LASSERT(ctx->cc_sec->ps_import->imp_obd);
1056 imp = ctx->cc_sec->ps_import;
1057 if (!imp->imp_connection) {
1058 CERROR("import has no connection set\n");
1062 gsec = container_of(ctx->cc_sec, struct gss_sec, gs_base);
1064 OBD_ALLOC_PTR(gmsg);
1068 /* initialize pipefs base msg */
1069 CFS_INIT_LIST_HEAD(&gmsg->gum_base.list);
1070 gmsg->gum_base.data = &gmsg->gum_data;
1071 gmsg->gum_base.len = sizeof(gmsg->gum_data);
1072 gmsg->gum_base.copied = 0;
1073 gmsg->gum_base.errno = 0;
1075 /* init upcall msg */
1076 cfs_atomic_set(&gmsg->gum_refcount, 1);
1077 gmsg->gum_mechidx = mech_name2idx(gsec->gs_mech->gm_name);
1078 gmsg->gum_gsec = gsec;
1079 gmsg->gum_gctx = container_of(sptlrpc_cli_ctx_get(ctx),
1080 struct gss_cli_ctx, gc_base);
1081 gmsg->gum_data.gum_seq = upcall_get_sequence();
1082 gmsg->gum_data.gum_uid = ctx->cc_vcred.vc_uid;
1083 gmsg->gum_data.gum_gid = 0; /* not used for now */
1084 gmsg->gum_data.gum_svc = import_to_gss_svc(imp);
1085 gmsg->gum_data.gum_nid = imp->imp_connection->c_peer.nid;
1086 strncpy(gmsg->gum_data.gum_obd, imp->imp_obd->obd_name,
1087 sizeof(gmsg->gum_data.gum_obd));
1089 /* This only could happen when sysadmin set it dead/expired
1090 * using lctl by force. */
1091 if (ctx->cc_flags & PTLRPC_CTX_STATUS_MASK) {
1092 CWARN("ctx %p(%u->%s) was set flags %lx unexpectedly\n",
1093 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
1096 LASSERT(!(ctx->cc_flags & PTLRPC_CTX_UPTODATE));
1097 ctx->cc_flags |= PTLRPC_CTX_DEAD | PTLRPC_CTX_ERROR;
1103 upcall_msg_enlist(gmsg);
1105 rc = rpc_queue_upcall(de_pipes[gmsg->gum_mechidx]->d_inode,
1108 CERROR("rpc_queue_upcall failed: %d\n", rc);
1110 upcall_msg_delist(gmsg);
1121 int gss_cli_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx)
1123 /* if we are refreshing for root, also update the reverse
1124 * handle index, do not confuse reverse contexts. */
1125 if (ctx->cc_vcred.vc_uid == 0) {
1126 struct gss_sec *gsec;
1128 gsec = container_of(ctx->cc_sec, struct gss_sec, gs_base);
1129 gsec->gs_rvs_hdl = gss_get_next_ctx_index();
1132 return gss_ctx_refresh_pf(ctx);
1135 /****************************************
1136 * lustre gss pipefs policy *
1137 ****************************************/
1139 static struct ptlrpc_ctx_ops gss_pipefs_ctxops = {
1140 .match = gss_cli_ctx_match,
1141 .refresh = gss_cli_ctx_refresh_pf,
1142 .validate = gss_cli_ctx_validate_pf,
1143 .die = gss_cli_ctx_die_pf,
1144 .sign = gss_cli_ctx_sign,
1145 .verify = gss_cli_ctx_verify,
1146 .seal = gss_cli_ctx_seal,
1147 .unseal = gss_cli_ctx_unseal,
1148 .wrap_bulk = gss_cli_ctx_wrap_bulk,
1149 .unwrap_bulk = gss_cli_ctx_unwrap_bulk,
1152 static struct ptlrpc_sec_cops gss_sec_pipefs_cops = {
1153 .create_sec = gss_sec_create_pf,
1154 .destroy_sec = gss_sec_destroy_pf,
1155 .kill_sec = gss_sec_kill,
1156 .lookup_ctx = gss_sec_lookup_ctx_pf,
1157 .release_ctx = gss_sec_release_ctx_pf,
1158 .flush_ctx_cache = gss_sec_flush_ctx_cache_pf,
1159 .install_rctx = gss_sec_install_rctx,
1160 .alloc_reqbuf = gss_alloc_reqbuf,
1161 .free_reqbuf = gss_free_reqbuf,
1162 .alloc_repbuf = gss_alloc_repbuf,
1163 .free_repbuf = gss_free_repbuf,
1164 .enlarge_reqbuf = gss_enlarge_reqbuf,
1167 static struct ptlrpc_sec_sops gss_sec_pipefs_sops = {
1168 .accept = gss_svc_accept_pf,
1169 .invalidate_ctx = gss_svc_invalidate_ctx,
1170 .alloc_rs = gss_svc_alloc_rs,
1171 .authorize = gss_svc_authorize,
1172 .free_rs = gss_svc_free_rs,
1173 .free_ctx = gss_svc_free_ctx,
1174 .unwrap_bulk = gss_svc_unwrap_bulk,
1175 .wrap_bulk = gss_svc_wrap_bulk,
1176 .install_rctx = gss_svc_install_rctx_pf,
1179 static struct ptlrpc_sec_policy gss_policy_pipefs = {
1180 .sp_owner = THIS_MODULE,
1181 .sp_name = "gss.pipefs",
1182 .sp_policy = SPTLRPC_POLICY_GSS_PIPEFS,
1183 .sp_cops = &gss_sec_pipefs_cops,
1184 .sp_sops = &gss_sec_pipefs_sops,
1188 int __init gss_init_pipefs_upcall(void)
1193 de = rpc_mkdir(LUSTRE_PIPE_ROOT, NULL);
1194 if (IS_ERR(de) && PTR_ERR(de) != -EEXIST) {
1195 CERROR("Failed to create gss pipe dir: %ld\n", PTR_ERR(de));
1199 /* FIXME hack pipefs: dput will sometimes cause oops during module
1200 * unload and lgssd close the pipe fds. */
1202 /* krb5 mechanism */
1203 de = rpc_mkpipe(LUSTRE_PIPE_KRB5, (void *) MECH_KRB5, &gss_upcall_ops,
1204 RPC_PIPE_WAIT_FOR_OPEN);
1205 if (!de || IS_ERR(de)) {
1206 CERROR("failed to make rpc_pipe %s: %ld\n",
1207 LUSTRE_PIPE_KRB5, PTR_ERR(de));
1208 rpc_rmdir(LUSTRE_PIPE_ROOT);
1212 de_pipes[MECH_KRB5] = de;
1213 CFS_INIT_LIST_HEAD(&upcall_lists[MECH_KRB5]);
1214 spin_lock_init(&upcall_locks[MECH_KRB5]);
1220 void __exit gss_exit_pipefs_upcall(void)
1224 for (i = 0; i < MECH_MAX; i++) {
1225 LASSERT(cfs_list_empty(&upcall_lists[i]));
1227 /* dput pipe dentry here might cause lgssd oops. */
1231 rpc_unlink(LUSTRE_PIPE_KRB5);
1232 rpc_rmdir(LUSTRE_PIPE_ROOT);
1235 int __init gss_init_pipefs(void)
1239 rc = gss_init_pipefs_upcall();
1243 rc = sptlrpc_register_policy(&gss_policy_pipefs);
1245 gss_exit_pipefs_upcall();
1252 void __exit gss_exit_pipefs(void)
1254 gss_exit_pipefs_upcall();
1255 sptlrpc_unregister_policy(&gss_policy_pipefs);