1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Modifications for Lustre
6 * Copyright 2008, Sun Microsystems, Inc.
7 * Author: Eric Mei <eric.mei@sun.com>
9 * Copyright 2004 - 2006, Cluster File Systems, Inc.
11 * Author: Eric Mei <ericm@clusterfs.com>
15 * Neil Brown <neilb@cse.unsw.edu.au>
16 * J. Bruce Fields <bfields@umich.edu>
17 * Andy Adamson <andros@umich.edu>
18 * Dug Song <dugsong@monkey.org>
20 * RPCSEC_GSS server authentication.
21 * This implements RPCSEC_GSS as defined in rfc2203 (rpcsec_gss) and rfc2078
24 * The RPCSEC_GSS involves three stages:
27 * 3/ context destruction
29 * Context creation is handled largely by upcalls to user-space.
30 * In particular, GSS_Accept_sec_context is handled by an upcall
31 * Data exchange is handled entirely within the kernel
32 * In particular, GSS_GetMIC, GSS_VerifyMIC, GSS_Seal, GSS_Unseal are in-kernel.
33 * Context destruction is handled in-kernel
34 * GSS_Delete_sec_context is in-kernel
36 * Context creation is initiated by a RPCSEC_GSS_INIT request arriving.
37 * The context handle and gss_token are used as a key into the rpcsec_init cache.
38 * The content of this cache includes some of the outputs of GSS_Accept_sec_context,
39 * being major_status, minor_status, context_handle, reply_token.
40 * These are sent back to the client.
41 * Sequence window management is handled by the kernel. The window size if currently
42 * a compile time constant.
44 * When user-space is happy that a context is established, it places an entry
45 * in the rpcsec_context cache. The key for this cache is the context_handle.
46 * The content includes:
47 * uid/gidlist - for determining access rights
49 * mechanism specific information, such as a key
53 #define DEBUG_SUBSYSTEM S_SEC
55 #include <linux/types.h>
56 #include <linux/init.h>
57 #include <linux/module.h>
58 #include <linux/slab.h>
59 #include <linux/hash.h>
60 #include <linux/mutex.h>
61 #include <linux/sunrpc/cache.h>
63 #include <liblustre.h>
67 #include <obd_class.h>
68 #include <obd_support.h>
69 #include <lustre/lustre_idl.h>
70 #include <lustre_net.h>
71 #include <lustre_import.h>
72 #include <lustre_sec.h>
75 #include "gss_internal.h"
78 #define GSS_SVC_UPCALL_TIMEOUT (20)
80 static spinlock_t __ctx_index_lock = SPIN_LOCK_UNLOCKED;
81 static __u64 __ctx_index;
83 __u64 gss_get_next_ctx_index(void)
87 spin_lock(&__ctx_index_lock);
89 spin_unlock(&__ctx_index_lock);
94 static inline unsigned long hash_mem(char *buf, int length, int bits)
96 unsigned long hash = 0;
111 if ((len & (BITS_PER_LONG/8-1)) == 0)
112 hash = hash_long(hash^l, BITS_PER_LONG);
115 return hash >> (BITS_PER_LONG - bits);
118 /****************************************
120 ****************************************/
122 #define RSI_HASHBITS (6)
123 #define RSI_HASHMAX (1 << RSI_HASHBITS)
124 #define RSI_HASHMASK (RSI_HASHMAX - 1)
130 wait_queue_head_t waitq;
131 rawobj_t in_handle, in_token;
132 rawobj_t out_handle, out_token;
133 int major_status, minor_status;
136 static struct cache_head *rsi_table[RSI_HASHMAX];
137 static struct cache_detail rsi_cache;
138 #ifdef HAVE_SUNRPC_CACHE_V2
139 static struct rsi *rsi_update(struct rsi *new, struct rsi *old);
140 static struct rsi *rsi_lookup(struct rsi *item);
142 static struct rsi *rsi_lookup(struct rsi *item, int set);
145 static inline int rsi_hash(struct rsi *item)
147 return hash_mem((char *)item->in_handle.data, item->in_handle.len,
149 hash_mem((char *)item->in_token.data, item->in_token.len,
153 static inline int __rsi_match(struct rsi *item, struct rsi *tmp)
155 return (rawobj_equal(&item->in_handle, &tmp->in_handle) &&
156 rawobj_equal(&item->in_token, &tmp->in_token));
159 static void rsi_free(struct rsi *rsi)
161 rawobj_free(&rsi->in_handle);
162 rawobj_free(&rsi->in_token);
163 rawobj_free(&rsi->out_handle);
164 rawobj_free(&rsi->out_token);
167 static void rsi_request(struct cache_detail *cd,
168 struct cache_head *h,
169 char **bpp, int *blen)
171 struct rsi *rsi = container_of(h, struct rsi, h);
174 /* if in_handle is null, provide kernel suggestion */
175 if (rsi->in_handle.len == 0)
176 index = gss_get_next_ctx_index();
178 qword_addhex(bpp, blen, (char *) &rsi->lustre_svc,
179 sizeof(rsi->lustre_svc));
180 qword_addhex(bpp, blen, (char *) &rsi->nid, sizeof(rsi->nid));
181 qword_addhex(bpp, blen, (char *) &index, sizeof(index));
182 qword_addhex(bpp, blen, rsi->in_handle.data, rsi->in_handle.len);
183 qword_addhex(bpp, blen, rsi->in_token.data, rsi->in_token.len);
187 static inline void __rsi_init(struct rsi *new, struct rsi *item)
189 new->out_handle = RAWOBJ_EMPTY;
190 new->out_token = RAWOBJ_EMPTY;
192 new->in_handle = item->in_handle;
193 item->in_handle = RAWOBJ_EMPTY;
194 new->in_token = item->in_token;
195 item->in_token = RAWOBJ_EMPTY;
197 new->lustre_svc = item->lustre_svc;
198 new->nid = item->nid;
199 init_waitqueue_head(&new->waitq);
202 static inline void __rsi_update(struct rsi *new, struct rsi *item)
204 LASSERT(new->out_handle.len == 0);
205 LASSERT(new->out_token.len == 0);
207 new->out_handle = item->out_handle;
208 item->out_handle = RAWOBJ_EMPTY;
209 new->out_token = item->out_token;
210 item->out_token = RAWOBJ_EMPTY;
212 new->major_status = item->major_status;
213 new->minor_status = item->minor_status;
216 #ifdef HAVE_SUNRPC_CACHE_V2
218 static void rsi_put(struct kref *ref)
220 struct rsi *rsi = container_of(ref, struct rsi, h.ref);
222 LASSERT(rsi->h.next == NULL);
227 static int rsi_match(struct cache_head *a, struct cache_head *b)
229 struct rsi *item = container_of(a, struct rsi, h);
230 struct rsi *tmp = container_of(b, struct rsi, h);
232 return __rsi_match(item, tmp);
235 static void rsi_init(struct cache_head *cnew, struct cache_head *citem)
237 struct rsi *new = container_of(cnew, struct rsi, h);
238 struct rsi *item = container_of(citem, struct rsi, h);
240 __rsi_init(new, item);
243 static void update_rsi(struct cache_head *cnew, struct cache_head *citem)
245 struct rsi *new = container_of(cnew, struct rsi, h);
246 struct rsi *item = container_of(citem, struct rsi, h);
248 __rsi_update(new, item);
251 static struct cache_head *rsi_alloc(void)
262 static int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
267 struct rsi rsii, *rsip = NULL;
269 int status = -EINVAL;
273 memset(&rsii, 0, sizeof(rsii));
276 len = qword_get(&mesg, buf, mlen);
279 if (rawobj_alloc(&rsii.in_handle, buf, len)) {
285 len = qword_get(&mesg, buf, mlen);
288 if (rawobj_alloc(&rsii.in_token, buf, len)) {
293 rsip = rsi_lookup(&rsii);
299 expiry = get_expiry(&mesg);
303 len = qword_get(&mesg, buf, mlen);
308 rsii.major_status = simple_strtol(buf, &ep, 10);
313 len = qword_get(&mesg, buf, mlen);
316 rsii.minor_status = simple_strtol(buf, &ep, 10);
321 len = qword_get(&mesg, buf, mlen);
324 if (rawobj_alloc(&rsii.out_handle, buf, len)) {
330 len = qword_get(&mesg, buf, mlen);
333 if (rawobj_alloc(&rsii.out_token, buf, len)) {
338 rsii.h.expiry_time = expiry;
339 rsip = rsi_update(&rsii, rsip);
344 wake_up_all(&rsip->waitq);
345 cache_put(&rsip->h, &rsi_cache);
351 CERROR("rsi parse error %d\n", status);
355 #else /* !HAVE_SUNRPC_CACHE_V2 */
357 static void rsi_put(struct cache_head *item, struct cache_detail *cd)
359 struct rsi *rsi = container_of(item, struct rsi, h);
361 LASSERT(atomic_read(&item->refcnt) > 0);
363 if (cache_put(item, cd)) {
364 LASSERT(item->next == NULL);
366 kfree(rsi); /* created by cache mgmt using kmalloc */
370 static inline int rsi_match(struct rsi *item, struct rsi *tmp)
372 return __rsi_match(item, tmp);
375 static inline void rsi_init(struct rsi *new, struct rsi *item)
377 __rsi_init(new, item);
380 static inline void rsi_update(struct rsi *new, struct rsi *item)
382 __rsi_update(new, item);
385 static int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
390 struct rsi rsii, *rsip = NULL;
392 int status = -EINVAL;
396 memset(&rsii, 0, sizeof(rsii));
399 len = qword_get(&mesg, buf, mlen);
402 if (rawobj_alloc(&rsii.in_handle, buf, len)) {
408 len = qword_get(&mesg, buf, mlen);
411 if (rawobj_alloc(&rsii.in_token, buf, len)) {
417 expiry = get_expiry(&mesg);
421 len = qword_get(&mesg, buf, mlen);
426 rsii.major_status = simple_strtol(buf, &ep, 10);
431 len = qword_get(&mesg, buf, mlen);
434 rsii.minor_status = simple_strtol(buf, &ep, 10);
439 len = qword_get(&mesg, buf, mlen);
442 if (rawobj_alloc(&rsii.out_handle, buf, len)) {
448 len = qword_get(&mesg, buf, mlen);
451 if (rawobj_alloc(&rsii.out_token, buf, len)) {
456 rsii.h.expiry_time = expiry;
457 rsip = rsi_lookup(&rsii, 1);
462 wake_up_all(&rsip->waitq);
463 rsi_put(&rsip->h, &rsi_cache);
467 CERROR("rsi parse error %d\n", status);
471 #endif /* HAVE_SUNRPC_CACHE_V2 */
473 static struct cache_detail rsi_cache = {
474 .hash_size = RSI_HASHMAX,
475 .hash_table = rsi_table,
476 .name = "auth.sptlrpc.init",
477 .cache_put = rsi_put,
478 .cache_request = rsi_request,
479 .cache_parse = rsi_parse,
480 #ifdef HAVE_SUNRPC_CACHE_V2
483 .update = update_rsi,
488 #ifdef HAVE_SUNRPC_CACHE_V2
490 static struct rsi *rsi_lookup(struct rsi *item)
492 struct cache_head *ch;
493 int hash = rsi_hash(item);
495 ch = sunrpc_cache_lookup(&rsi_cache, &item->h, hash);
497 return container_of(ch, struct rsi, h);
502 static struct rsi *rsi_update(struct rsi *new, struct rsi *old)
504 struct cache_head *ch;
505 int hash = rsi_hash(new);
507 ch = sunrpc_cache_update(&rsi_cache, &new->h, &old->h, hash);
509 return container_of(ch, struct rsi, h);
516 static DefineSimpleCacheLookup(rsi, 0)
520 /****************************************
522 ****************************************/
524 #define RSC_HASHBITS (10)
525 #define RSC_HASHMAX (1 << RSC_HASHBITS)
526 #define RSC_HASHMASK (RSC_HASHMAX - 1)
530 struct obd_device *target;
532 struct gss_svc_ctx ctx;
535 static struct cache_head *rsc_table[RSC_HASHMAX];
536 static struct cache_detail rsc_cache;
537 #ifdef HAVE_SUNRPC_CACHE_V2
538 static struct rsc *rsc_update(struct rsc *new, struct rsc *old);
539 static struct rsc *rsc_lookup(struct rsc *item);
541 static struct rsc *rsc_lookup(struct rsc *item, int set);
544 static void rsc_free(struct rsc *rsci)
546 rawobj_free(&rsci->handle);
547 rawobj_free(&rsci->ctx.gsc_rvs_hdl);
548 lgss_delete_sec_context(&rsci->ctx.gsc_mechctx);
551 static inline int rsc_hash(struct rsc *rsci)
553 return hash_mem((char *)rsci->handle.data,
554 rsci->handle.len, RSC_HASHBITS);
557 static inline int __rsc_match(struct rsc *new, struct rsc *tmp)
559 return rawobj_equal(&new->handle, &tmp->handle);
562 static inline void __rsc_init(struct rsc *new, struct rsc *tmp)
564 new->handle = tmp->handle;
565 tmp->handle = RAWOBJ_EMPTY;
568 memset(&new->ctx, 0, sizeof(new->ctx));
569 new->ctx.gsc_rvs_hdl = RAWOBJ_EMPTY;
572 static inline void __rsc_update(struct rsc *new, struct rsc *tmp)
575 tmp->ctx.gsc_rvs_hdl = RAWOBJ_EMPTY;
576 tmp->ctx.gsc_mechctx = NULL;
578 memset(&new->ctx.gsc_seqdata, 0, sizeof(new->ctx.gsc_seqdata));
579 spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
582 #ifdef HAVE_SUNRPC_CACHE_V2
584 static void rsc_put(struct kref *ref)
586 struct rsc *rsci = container_of(ref, struct rsc, h.ref);
588 LASSERT(rsci->h.next == NULL);
593 static int rsc_match(struct cache_head *a, struct cache_head *b)
595 struct rsc *new = container_of(a, struct rsc, h);
596 struct rsc *tmp = container_of(b, struct rsc, h);
598 return __rsc_match(new, tmp);
601 static void rsc_init(struct cache_head *cnew, struct cache_head *ctmp)
603 struct rsc *new = container_of(cnew, struct rsc, h);
604 struct rsc *tmp = container_of(ctmp, struct rsc, h);
606 __rsc_init(new, tmp);
609 static void update_rsc(struct cache_head *cnew, struct cache_head *ctmp)
611 struct rsc *new = container_of(cnew, struct rsc, h);
612 struct rsc *tmp = container_of(ctmp, struct rsc, h);
614 __rsc_update(new, tmp);
617 static struct cache_head * rsc_alloc(void)
628 static int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
631 int len, rv, tmp_int;
632 struct rsc rsci, *rscp = NULL;
634 int status = -EINVAL;
635 struct gss_api_mech *gm = NULL;
637 memset(&rsci, 0, sizeof(rsci));
640 len = qword_get(&mesg, buf, mlen);
641 if (len < 0) goto out;
643 if (rawobj_alloc(&rsci.handle, buf, len))
648 expiry = get_expiry(&mesg);
654 rv = get_int(&mesg, &tmp_int);
656 CERROR("fail to get remote flag\n");
659 rsci.ctx.gsc_remote = (tmp_int != 0);
662 rv = get_int(&mesg, &tmp_int);
664 CERROR("fail to get oss user flag\n");
667 rsci.ctx.gsc_usr_root = (tmp_int != 0);
670 rv = get_int(&mesg, &tmp_int);
672 CERROR("fail to get mds user flag\n");
675 rsci.ctx.gsc_usr_mds = (tmp_int != 0);
678 rv = get_int(&mesg, (int *) &rsci.ctx.gsc_mapped_uid);
680 CERROR("fail to get mapped uid\n");
684 rscp = rsc_lookup(&rsci);
688 /* uid, or NEGATIVE */
689 rv = get_int(&mesg, (int *) &rsci.ctx.gsc_uid);
693 CERROR("NOENT? set rsc entry negative\n");
694 set_bit(CACHE_NEGATIVE, &rsci.h.flags);
697 unsigned long ctx_expiry;
700 if (get_int(&mesg, (int *) &rsci.ctx.gsc_gid))
704 len = qword_get(&mesg, buf, mlen);
707 gm = lgss_name_to_mech(buf);
708 status = -EOPNOTSUPP;
713 /* mech-specific data: */
714 len = qword_get(&mesg, buf, mlen);
719 tmp_buf.data = (unsigned char *)buf;
720 if (lgss_import_sec_context(&tmp_buf, gm,
721 &rsci.ctx.gsc_mechctx))
724 /* currently the expiry time passed down from user-space
725 * is invalid, here we retrive it from mech. */
726 if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
727 CERROR("unable to get expire time, drop it\n");
730 expiry = (time_t) ctx_expiry;
733 rsci.h.expiry_time = expiry;
734 rscp = rsc_update(&rsci, rscp);
741 cache_put(&rscp->h, &rsc_cache);
746 CERROR("parse rsc error %d\n", status);
750 #else /* !HAVE_SUNRPC_CACHE_V2 */
752 static void rsc_put(struct cache_head *item, struct cache_detail *cd)
754 struct rsc *rsci = container_of(item, struct rsc, h);
756 LASSERT(atomic_read(&item->refcnt) > 0);
758 if (cache_put(item, cd)) {
759 LASSERT(item->next == NULL);
761 kfree(rsci); /* created by cache mgmt using kmalloc */
765 static inline int rsc_match(struct rsc *new, struct rsc *tmp)
767 return __rsc_match(new, tmp);
770 static inline void rsc_init(struct rsc *new, struct rsc *tmp)
772 __rsc_init(new, tmp);
775 static inline void rsc_update(struct rsc *new, struct rsc *tmp)
777 __rsc_update(new, tmp);
780 static int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
783 int len, rv, tmp_int;
784 struct rsc rsci, *rscp = NULL;
786 int status = -EINVAL;
788 memset(&rsci, 0, sizeof(rsci));
791 len = qword_get(&mesg, buf, mlen);
792 if (len < 0) goto out;
794 if (rawobj_alloc(&rsci.handle, buf, len))
799 expiry = get_expiry(&mesg);
805 rv = get_int(&mesg, &tmp_int);
807 CERROR("fail to get remote flag\n");
810 rsci.ctx.gsc_remote = (tmp_int != 0);
813 rv = get_int(&mesg, &tmp_int);
815 CERROR("fail to get oss user flag\n");
818 rsci.ctx.gsc_usr_root = (tmp_int != 0);
821 rv = get_int(&mesg, &tmp_int);
823 CERROR("fail to get mds user flag\n");
826 rsci.ctx.gsc_usr_mds = (tmp_int != 0);
829 rv = get_int(&mesg, (int *) &rsci.ctx.gsc_mapped_uid);
831 CERROR("fail to get mapped uid\n");
835 /* uid, or NEGATIVE */
836 rv = get_int(&mesg, (int *) &rsci.ctx.gsc_uid);
840 CERROR("NOENT? set rsc entry negative\n");
841 set_bit(CACHE_NEGATIVE, &rsci.h.flags);
843 struct gss_api_mech *gm;
845 unsigned long ctx_expiry;
848 if (get_int(&mesg, (int *) &rsci.ctx.gsc_gid))
852 len = qword_get(&mesg, buf, mlen);
855 gm = lgss_name_to_mech(buf);
856 status = -EOPNOTSUPP;
861 /* mech-specific data: */
862 len = qword_get(&mesg, buf, mlen);
868 tmp_buf.data = (unsigned char *)buf;
869 if (lgss_import_sec_context(&tmp_buf, gm,
870 &rsci.ctx.gsc_mechctx)) {
875 /* currently the expiry time passed down from user-space
876 * is invalid, here we retrive it from mech. */
877 if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
878 CERROR("unable to get expire time, drop it\n");
882 expiry = (time_t) ctx_expiry;
887 rsci.h.expiry_time = expiry;
888 rscp = rsc_lookup(&rsci, 1);
893 rsc_put(&rscp->h, &rsc_cache);
896 CERROR("parse rsc error %d\n", status);
900 #endif /* HAVE_SUNRPC_CACHE_V2 */
903 static struct cache_detail rsc_cache = {
904 .hash_size = RSC_HASHMAX,
905 .hash_table = rsc_table,
906 .name = "auth.sptlrpc.context",
907 .cache_put = rsc_put,
908 .cache_parse = rsc_parse,
909 #ifdef HAVE_SUNRPC_CACHE_V2
912 .update = update_rsc,
917 #ifdef HAVE_SUNRPC_CACHE_V2
919 static struct rsc *rsc_lookup(struct rsc *item)
921 struct cache_head *ch;
922 int hash = rsc_hash(item);
924 ch = sunrpc_cache_lookup(&rsc_cache, &item->h, hash);
926 return container_of(ch, struct rsc, h);
931 static struct rsc *rsc_update(struct rsc *new, struct rsc *old)
933 struct cache_head *ch;
934 int hash = rsc_hash(new);
936 ch = sunrpc_cache_update(&rsc_cache, &new->h, &old->h, hash);
938 return container_of(ch, struct rsc, h);
943 #define COMPAT_RSC_PUT(item, cd) cache_put((item), (cd))
947 static DefineSimpleCacheLookup(rsc, 0);
949 #define COMPAT_RSC_PUT(item, cd) rsc_put((item), (cd))
953 /****************************************
955 ****************************************/
957 typedef int rsc_entry_match(struct rsc *rscp, long data);
959 static void rsc_flush(rsc_entry_match *match, long data)
961 struct cache_head **ch;
966 write_lock(&rsc_cache.hash_lock);
967 for (n = 0; n < RSC_HASHMAX; n++) {
968 for (ch = &rsc_cache.hash_table[n]; *ch;) {
969 rscp = container_of(*ch, struct rsc, h);
971 if (!match(rscp, data)) {
976 /* it seems simply set NEGATIVE doesn't work */
980 set_bit(CACHE_NEGATIVE, &rscp->h.flags);
981 COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
985 write_unlock(&rsc_cache.hash_lock);
989 static int match_uid(struct rsc *rscp, long uid)
993 return ((int) rscp->ctx.gsc_uid == (int) uid);
996 static int match_target(struct rsc *rscp, long target)
998 return (rscp->target == (struct obd_device *) target);
1001 static inline void rsc_flush_uid(int uid)
1004 CWARN("flush all gss contexts...\n");
1006 rsc_flush(match_uid, (long) uid);
1009 static inline void rsc_flush_target(struct obd_device *target)
1011 rsc_flush(match_target, (long) target);
1014 void gss_secsvc_flush(struct obd_device *target)
1016 rsc_flush_target(target);
1018 EXPORT_SYMBOL(gss_secsvc_flush);
1020 static struct rsc *gss_svc_searchbyctx(rawobj_t *handle)
1025 memset(&rsci, 0, sizeof(rsci));
1026 if (rawobj_dup(&rsci.handle, handle))
1029 #ifdef HAVE_SUNRPC_CACHE_V2
1030 found = rsc_lookup(&rsci);
1032 found = rsc_lookup(&rsci, 0);
1037 if (cache_check(&rsc_cache, &found->h, NULL))
1042 #ifdef HAVE_SUNRPC_CACHE_V2
1044 int gss_svc_upcall_install_rvs_ctx(struct obd_import *imp,
1045 struct gss_sec *gsec,
1046 struct gss_cli_ctx *gctx)
1048 struct rsc rsci, *rscp = NULL;
1049 unsigned long ctx_expiry;
1054 memset(&rsci, 0, sizeof(rsci));
1056 if (rawobj_alloc(&rsci.handle, (char *) &gsec->gs_rvs_hdl,
1057 sizeof(gsec->gs_rvs_hdl)))
1058 GOTO(out, rc = -ENOMEM);
1060 rscp = rsc_lookup(&rsci);
1062 GOTO(out, rc = -ENOMEM);
1064 major = lgss_copy_reverse_context(gctx->gc_mechctx,
1065 &rsci.ctx.gsc_mechctx);
1066 if (major != GSS_S_COMPLETE)
1067 GOTO(out, rc = -ENOMEM);
1069 if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
1070 CERROR("unable to get expire time, drop it\n");
1071 GOTO(out, rc = -EINVAL);
1073 rsci.h.expiry_time = (time_t) ctx_expiry;
1076 rsci.ctx.gsc_usr_root = 1;
1077 rsci.ctx.gsc_usr_mds= 1;
1078 rsci.ctx.gsc_reverse = 1;
1080 rscp = rsc_update(&rsci, rscp);
1082 GOTO(out, rc = -ENOMEM);
1084 rscp->target = imp->imp_obd;
1085 rawobj_dup(&gctx->gc_svc_handle, &rscp->handle);
1087 CWARN("create reverse svc ctx %p to %s: idx "LPX64"\n",
1088 &rscp->ctx, obd2cli_tgt(imp->imp_obd), gsec->gs_rvs_hdl);
1092 cache_put(&rscp->h, &rsc_cache);
1096 CERROR("create reverse svc ctx: idx "LPX64", rc %d\n",
1097 gsec->gs_rvs_hdl, rc);
1101 #else /* !HAVE_SUNRPC_CACHE_V2 */
1103 int gss_svc_upcall_install_rvs_ctx(struct obd_import *imp,
1104 struct gss_sec *gsec,
1105 struct gss_cli_ctx *gctx)
1107 struct rsc rsci, *rscp;
1108 unsigned long ctx_expiry;
1113 memset(&rsci, 0, sizeof(rsci));
1115 if (rawobj_alloc(&rsci.handle, (char *) &gsec->gs_rvs_hdl,
1116 sizeof(gsec->gs_rvs_hdl)))
1117 GOTO(out, rc = -ENOMEM);
1119 major = lgss_copy_reverse_context(gctx->gc_mechctx,
1120 &rsci.ctx.gsc_mechctx);
1121 if (major != GSS_S_COMPLETE)
1122 GOTO(out, rc = -ENOMEM);
1124 if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
1125 CERROR("unable to get expire time, drop it\n");
1126 GOTO(out, rc = -ENOMEM);
1128 rsci.h.expiry_time = (time_t) ctx_expiry;
1131 rsci.ctx.gsc_usr_root = 1;
1132 rsci.ctx.gsc_usr_mds= 1;
1133 rsci.ctx.gsc_reverse = 1;
1135 rscp = rsc_lookup(&rsci, 1);
1137 CERROR("rsc lookup failed\n");
1138 GOTO(out, rc = -ENOMEM);
1141 rscp->target = imp->imp_obd;
1142 rawobj_dup(&gctx->gc_svc_handle, &rscp->handle);
1144 CWARN("create reverse svc ctx %p to %s: idx "LPX64"\n",
1145 &rscp->ctx, obd2cli_tgt(imp->imp_obd), gsec->gs_rvs_hdl);
1146 rsc_put(&rscp->h, &rsc_cache);
1151 CERROR("create reverse svc ctx: idx "LPX64", rc %d\n",
1152 gsec->gs_rvs_hdl, rc);
1156 #endif /* HAVE_SUNRPC_CACHE_V2 */
1158 int gss_svc_upcall_expire_rvs_ctx(rawobj_t *handle)
1160 const cfs_time_t expire = 20;
1163 rscp = gss_svc_searchbyctx(handle);
1165 CDEBUG(D_SEC, "reverse svcctx %p (rsc %p) expire soon\n",
1168 rscp->h.expiry_time = cfs_time_current_sec() + expire;
1169 COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
1174 int gss_svc_upcall_dup_handle(rawobj_t *handle, struct gss_svc_ctx *ctx)
1176 struct rsc *rscp = container_of(ctx, struct rsc, ctx);
1178 return rawobj_dup(handle, &rscp->handle);
1181 int gss_svc_upcall_update_sequence(rawobj_t *handle, __u32 seq)
1185 rscp = gss_svc_searchbyctx(handle);
1187 CDEBUG(D_SEC, "reverse svcctx %p (rsc %p) update seq to %u\n",
1188 &rscp->ctx, rscp, seq + 1);
1190 rscp->ctx.gsc_rvs_seq = seq + 1;
1191 COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
1196 static struct cache_deferred_req* cache_upcall_defer(struct cache_req *req)
1200 static struct cache_req cache_upcall_chandle = { cache_upcall_defer };
1202 int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
1203 struct gss_svc_reqctx *grctx,
1204 struct gss_wire_ctx *gw,
1205 struct obd_device *target,
1210 struct ptlrpc_reply_state *rs;
1211 struct rsc *rsci = NULL;
1212 struct rsi *rsip = NULL, rsikey;
1214 int replen = sizeof(struct ptlrpc_body);
1215 struct gss_rep_header *rephdr;
1216 int first_check = 1;
1217 int rc = SECSVC_DROP;
1220 memset(&rsikey, 0, sizeof(rsikey));
1221 rsikey.lustre_svc = lustre_svc;
1222 rsikey.nid = (__u64) req->rq_peer.nid;
1224 /* duplicate context handle. for INIT it always 0 */
1225 if (rawobj_dup(&rsikey.in_handle, &gw->gw_handle)) {
1226 CERROR("fail to dup context handle\n");
1230 if (rawobj_dup(&rsikey.in_token, in_token)) {
1231 CERROR("can't duplicate token\n");
1232 rawobj_free(&rsikey.in_handle);
1236 #ifdef HAVE_SUNRPC_CACHE_V2
1237 rsip = rsi_lookup(&rsikey);
1239 rsip = rsi_lookup(&rsikey, 0);
1243 CERROR("error in rsi_lookup.\n");
1245 if (!gss_pack_err_notify(req, GSS_S_FAILURE, 0))
1246 rc = SECSVC_COMPLETE;
1251 cache_get(&rsip->h); /* take an extra ref */
1252 init_waitqueue_head(&rsip->waitq);
1253 init_waitqueue_entry(&wait, current);
1254 add_wait_queue(&rsip->waitq, &wait);
1257 /* Note each time cache_check() will drop a reference if return
1258 * non-zero. We hold an extra reference on initial rsip, but must
1259 * take care of following calls. */
1260 rc = cache_check(&rsi_cache, &rsip->h, &cache_upcall_chandle);
1268 read_lock(&rsi_cache.hash_lock);
1269 valid = test_bit(CACHE_VALID, &rsip->h.flags);
1271 set_current_state(TASK_INTERRUPTIBLE);
1272 read_unlock(&rsi_cache.hash_lock);
1275 schedule_timeout(GSS_SVC_UPCALL_TIMEOUT * HZ);
1277 cache_get(&rsip->h);
1280 CWARN("waited %ds timeout, drop\n", GSS_SVC_UPCALL_TIMEOUT);
1284 CWARN("cache_check return ENOENT, drop\n");
1287 /* if not the first check, we have to release the extra
1288 * reference we just added on it. */
1290 cache_put(&rsip->h, &rsi_cache);
1291 CDEBUG(D_SEC, "cache_check is good\n");
1295 remove_wait_queue(&rsip->waitq, &wait);
1296 cache_put(&rsip->h, &rsi_cache);
1299 GOTO(out, rc = SECSVC_DROP);
1302 rsci = gss_svc_searchbyctx(&rsip->out_handle);
1304 CERROR("authentication failed\n");
1306 if (!gss_pack_err_notify(req, GSS_S_FAILURE, 0))
1307 rc = SECSVC_COMPLETE;
1311 cache_get(&rsci->h);
1312 grctx->src_ctx = &rsci->ctx;
1315 if (rawobj_dup(&rsci->ctx.gsc_rvs_hdl, rvs_hdl)) {
1316 CERROR("failed duplicate reverse handle\n");
1320 rsci->target = target;
1322 CDEBUG(D_SEC, "server create rsc %p(%u->%s)\n",
1323 rsci, rsci->ctx.gsc_uid, libcfs_nid2str(req->rq_peer.nid));
1325 if (rsip->out_handle.len > PTLRPC_GSS_MAX_HANDLE_SIZE) {
1326 CERROR("handle size %u too large\n", rsip->out_handle.len);
1327 GOTO(out, rc = SECSVC_DROP);
1330 grctx->src_init = 1;
1331 grctx->src_reserve_len = size_round4(rsip->out_token.len);
1333 rc = lustre_pack_reply_v2(req, 1, &replen, NULL);
1335 CERROR("failed to pack reply: %d\n", rc);
1336 GOTO(out, rc = SECSVC_DROP);
1339 rs = req->rq_reply_state;
1340 LASSERT(rs->rs_repbuf->lm_bufcount == 3);
1341 LASSERT(rs->rs_repbuf->lm_buflens[0] >=
1342 sizeof(*rephdr) + rsip->out_handle.len);
1343 LASSERT(rs->rs_repbuf->lm_buflens[2] >= rsip->out_token.len);
1345 rephdr = lustre_msg_buf(rs->rs_repbuf, 0, 0);
1346 rephdr->gh_version = PTLRPC_GSS_VERSION;
1347 rephdr->gh_flags = 0;
1348 rephdr->gh_proc = PTLRPC_GSS_PROC_ERR;
1349 rephdr->gh_major = rsip->major_status;
1350 rephdr->gh_minor = rsip->minor_status;
1351 rephdr->gh_seqwin = GSS_SEQ_WIN;
1352 rephdr->gh_handle.len = rsip->out_handle.len;
1353 memcpy(rephdr->gh_handle.data, rsip->out_handle.data,
1354 rsip->out_handle.len);
1356 memcpy(lustre_msg_buf(rs->rs_repbuf, 2, 0), rsip->out_token.data,
1357 rsip->out_token.len);
1359 rs->rs_repdata_len = lustre_shrink_msg(rs->rs_repbuf, 2,
1360 rsip->out_token.len, 0);
1365 /* it looks like here we should put rsip also, but this mess up
1366 * with NFS cache mgmt code... FIXME */
1369 rsi_put(&rsip->h, &rsi_cache);
1373 /* if anything went wrong, we don't keep the context too */
1374 if (rc != SECSVC_OK)
1375 set_bit(CACHE_NEGATIVE, &rsci->h.flags);
1377 CDEBUG(D_SEC, "create rsc with idx "LPX64"\n",
1378 gss_handle_to_u64(&rsci->handle));
1380 COMPAT_RSC_PUT(&rsci->h, &rsc_cache);
1385 struct gss_svc_ctx *gss_svc_upcall_get_ctx(struct ptlrpc_request *req,
1386 struct gss_wire_ctx *gw)
1390 rsc = gss_svc_searchbyctx(&gw->gw_handle);
1392 CWARN("Invalid gss ctx idx "LPX64" from %s\n",
1393 gss_handle_to_u64(&gw->gw_handle),
1394 libcfs_nid2str(req->rq_peer.nid));
1401 void gss_svc_upcall_put_ctx(struct gss_svc_ctx *ctx)
1403 struct rsc *rsc = container_of(ctx, struct rsc, ctx);
1405 COMPAT_RSC_PUT(&rsc->h, &rsc_cache);
1408 void gss_svc_upcall_destroy_ctx(struct gss_svc_ctx *ctx)
1410 struct rsc *rsc = container_of(ctx, struct rsc, ctx);
1412 /* can't be found */
1413 set_bit(CACHE_NEGATIVE, &rsc->h.flags);
1414 /* to be removed at next scan */
1415 rsc->h.expiry_time = 1;
1418 int __init gss_init_svc_upcall(void)
1422 cache_register(&rsi_cache);
1423 cache_register(&rsc_cache);
1425 /* FIXME this looks stupid. we intend to give lsvcgssd a chance to open
1426 * the init upcall channel, otherwise there's big chance that the first
1427 * upcall issued before the channel be opened thus nfsv4 cache code will
1428 * drop the request direclty, thus lead to unnecessary recovery time.
1429 * here we wait at miximum 1.5 seconds. */
1430 for (i = 0; i < 6; i++) {
1431 if (atomic_read(&rsi_cache.readers) > 0)
1433 set_current_state(TASK_UNINTERRUPTIBLE);
1435 schedule_timeout(HZ / 4);
1438 if (atomic_read(&rsi_cache.readers) == 0)
1439 CWARN("Init channel is not opened by lsvcgssd, following "
1440 "request might be dropped until lsvcgssd is active\n");
1442 /* this helps reducing context index confliction. after server reboot,
1443 * conflicting request from clients might be filtered out by initial
1444 * sequence number checking, thus no chance to sent error notification
1445 * back to clients. */
1446 get_random_bytes(&__ctx_index, sizeof(__ctx_index));
1451 void __exit gss_exit_svc_upcall(void)
1455 cache_purge(&rsi_cache);
1456 if ((rc = cache_unregister(&rsi_cache)))
1457 CERROR("unregister rsi cache: %d\n", rc);
1459 cache_purge(&rsc_cache);
1460 if ((rc = cache_unregister(&rsc_cache)))
1461 CERROR("unregister rsc cache: %d\n", rc);