2 * Modifications for Lustre
4 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
6 * Copyright (c) 2012, 2014, Intel Corporation.
8 * Author: Eric Mei <ericm@clusterfs.com>
12 * Neil Brown <neilb@cse.unsw.edu.au>
13 * J. Bruce Fields <bfields@umich.edu>
14 * Andy Adamson <andros@umich.edu>
15 * Dug Song <dugsong@monkey.org>
17 * RPCSEC_GSS server authentication.
18 * This implements RPCSEC_GSS as defined in rfc2203 (rpcsec_gss) and rfc2078
21 * The RPCSEC_GSS involves three stages:
24 * 3/ context destruction
26 * Context creation is handled largely by upcalls to user-space.
27 * In particular, GSS_Accept_sec_context is handled by an upcall
28 * Data exchange is handled entirely within the kernel
29 * In particular, GSS_GetMIC, GSS_VerifyMIC, GSS_Seal, GSS_Unseal are in-kernel.
30 * Context destruction is handled in-kernel
31 * GSS_Delete_sec_context is in-kernel
33 * Context creation is initiated by a RPCSEC_GSS_INIT request arriving.
34 * The context handle and gss_token are used as a key into the rpcsec_init cache.
35 * The content of this cache includes some of the outputs of GSS_Accept_sec_context,
36 * being major_status, minor_status, context_handle, reply_token.
37 * These are sent back to the client.
38 * Sequence window management is handled by the kernel. The window size if currently
39 * a compile time constant.
41 * When user-space is happy that a context is established, it places an entry
42 * in the rpcsec_context cache. The key for this cache is the context_handle.
43 * The content includes:
44 * uid/gidlist - for determining access rights
46 * mechanism specific information, such as a key
50 #define DEBUG_SUBSYSTEM S_SEC
51 #include <linux/types.h>
52 #include <linux/init.h>
53 #include <linux/module.h>
54 #include <linux/slab.h>
55 #include <linux/hash.h>
56 #include <linux/mutex.h>
57 #include <linux/sunrpc/cache.h>
60 #include <obd_class.h>
61 #include <obd_support.h>
62 #include <lustre/lustre_idl.h>
63 #include <lustre_net.h>
64 #include <lustre_import.h>
65 #include <lustre_sec.h>
68 #include "gss_internal.h"
71 #define GSS_SVC_UPCALL_TIMEOUT (20)
73 static spinlock_t __ctx_index_lock;
74 static __u64 __ctx_index;
76 __u64 gss_get_next_ctx_index(void)
80 spin_lock(&__ctx_index_lock);
82 spin_unlock(&__ctx_index_lock);
87 static inline unsigned long hash_mem(char *buf, int length, int bits)
89 unsigned long hash = 0;
104 if ((len & (BITS_PER_LONG/8-1)) == 0)
105 hash = hash_long(hash^l, BITS_PER_LONG);
108 return hash >> (BITS_PER_LONG - bits);
111 /* This compatibility can be removed once kernel 3.3 is used,
112 * since cache_register_net/cache_unregister_net are exported.
113 * Note that since kernel 3.4 cache_register and cache_unregister
116 static inline int _cache_register_net(struct cache_detail *cd, struct net *net)
118 #ifdef HAVE_CACHE_REGISTER
119 return cache_register(cd);
121 return cache_register_net(cd, net);
124 static inline void _cache_unregister_net(struct cache_detail *cd,
127 #ifdef HAVE_CACHE_REGISTER
128 cache_unregister(cd);
130 cache_unregister_net(cd, net);
133 /****************************************
135 ****************************************/
137 #define RSI_HASHBITS (6)
138 #define RSI_HASHMAX (1 << RSI_HASHBITS)
139 #define RSI_HASHMASK (RSI_HASHMAX - 1)
145 wait_queue_head_t waitq;
146 rawobj_t in_handle, in_token;
147 rawobj_t out_handle, out_token;
148 int major_status, minor_status;
151 static struct cache_head *rsi_table[RSI_HASHMAX];
152 static struct cache_detail rsi_cache;
153 static struct rsi *rsi_update(struct rsi *new, struct rsi *old);
154 static struct rsi *rsi_lookup(struct rsi *item);
156 static inline int rsi_hash(struct rsi *item)
158 return hash_mem((char *)item->in_handle.data, item->in_handle.len,
160 hash_mem((char *)item->in_token.data, item->in_token.len,
164 static inline int __rsi_match(struct rsi *item, struct rsi *tmp)
166 return (rawobj_equal(&item->in_handle, &tmp->in_handle) &&
167 rawobj_equal(&item->in_token, &tmp->in_token));
170 static void rsi_free(struct rsi *rsi)
172 rawobj_free(&rsi->in_handle);
173 rawobj_free(&rsi->in_token);
174 rawobj_free(&rsi->out_handle);
175 rawobj_free(&rsi->out_token);
178 static void rsi_request(struct cache_detail *cd,
179 struct cache_head *h,
180 char **bpp, int *blen)
182 struct rsi *rsi = container_of(h, struct rsi, h);
185 /* if in_handle is null, provide kernel suggestion */
186 if (rsi->in_handle.len == 0)
187 index = gss_get_next_ctx_index();
189 qword_addhex(bpp, blen, (char *) &rsi->lustre_svc,
190 sizeof(rsi->lustre_svc));
191 qword_addhex(bpp, blen, (char *) &rsi->nid, sizeof(rsi->nid));
192 qword_addhex(bpp, blen, (char *) &index, sizeof(index));
193 qword_addhex(bpp, blen, rsi->in_handle.data, rsi->in_handle.len);
194 qword_addhex(bpp, blen, rsi->in_token.data, rsi->in_token.len);
198 #ifdef HAVE_SUNRPC_UPCALL_HAS_3ARGS
199 static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
201 return sunrpc_cache_pipe_upcall(cd, h, rsi_request);
205 static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
207 return sunrpc_cache_pipe_upcall(cd, h);
211 static inline void __rsi_init(struct rsi *new, struct rsi *item)
213 new->out_handle = RAWOBJ_EMPTY;
214 new->out_token = RAWOBJ_EMPTY;
216 new->in_handle = item->in_handle;
217 item->in_handle = RAWOBJ_EMPTY;
218 new->in_token = item->in_token;
219 item->in_token = RAWOBJ_EMPTY;
221 new->lustre_svc = item->lustre_svc;
222 new->nid = item->nid;
223 init_waitqueue_head(&new->waitq);
226 static inline void __rsi_update(struct rsi *new, struct rsi *item)
228 LASSERT(new->out_handle.len == 0);
229 LASSERT(new->out_token.len == 0);
231 new->out_handle = item->out_handle;
232 item->out_handle = RAWOBJ_EMPTY;
233 new->out_token = item->out_token;
234 item->out_token = RAWOBJ_EMPTY;
236 new->major_status = item->major_status;
237 new->minor_status = item->minor_status;
240 static void rsi_put(struct kref *ref)
242 struct rsi *rsi = container_of(ref, struct rsi, h.ref);
244 LASSERT(rsi->h.next == NULL);
249 static int rsi_match(struct cache_head *a, struct cache_head *b)
251 struct rsi *item = container_of(a, struct rsi, h);
252 struct rsi *tmp = container_of(b, struct rsi, h);
254 return __rsi_match(item, tmp);
257 static void rsi_init(struct cache_head *cnew, struct cache_head *citem)
259 struct rsi *new = container_of(cnew, struct rsi, h);
260 struct rsi *item = container_of(citem, struct rsi, h);
262 __rsi_init(new, item);
265 static void update_rsi(struct cache_head *cnew, struct cache_head *citem)
267 struct rsi *new = container_of(cnew, struct rsi, h);
268 struct rsi *item = container_of(citem, struct rsi, h);
270 __rsi_update(new, item);
273 static struct cache_head *rsi_alloc(void)
284 static int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
289 struct rsi rsii, *rsip = NULL;
291 int status = -EINVAL;
295 memset(&rsii, 0, sizeof(rsii));
298 len = qword_get(&mesg, buf, mlen);
301 if (rawobj_alloc(&rsii.in_handle, buf, len)) {
307 len = qword_get(&mesg, buf, mlen);
310 if (rawobj_alloc(&rsii.in_token, buf, len)) {
315 rsip = rsi_lookup(&rsii);
321 expiry = get_expiry(&mesg);
325 len = qword_get(&mesg, buf, mlen);
330 rsii.major_status = simple_strtol(buf, &ep, 10);
335 len = qword_get(&mesg, buf, mlen);
338 rsii.minor_status = simple_strtol(buf, &ep, 10);
343 len = qword_get(&mesg, buf, mlen);
346 if (rawobj_alloc(&rsii.out_handle, buf, len)) {
352 len = qword_get(&mesg, buf, mlen);
355 if (rawobj_alloc(&rsii.out_token, buf, len)) {
360 rsii.h.expiry_time = expiry;
361 rsip = rsi_update(&rsii, rsip);
366 wake_up_all(&rsip->waitq);
367 cache_put(&rsip->h, &rsi_cache);
373 CERROR("rsi parse error %d\n", status);
377 static struct cache_detail rsi_cache = {
378 .hash_size = RSI_HASHMAX,
379 .hash_table = rsi_table,
380 .name = "auth.sptlrpc.init",
381 .cache_put = rsi_put,
382 #ifndef HAVE_SUNRPC_UPCALL_HAS_3ARGS
383 .cache_request = rsi_request,
385 .cache_upcall = rsi_upcall,
386 .cache_parse = rsi_parse,
389 .update = update_rsi,
393 static struct rsi *rsi_lookup(struct rsi *item)
395 struct cache_head *ch;
396 int hash = rsi_hash(item);
398 ch = sunrpc_cache_lookup(&rsi_cache, &item->h, hash);
400 return container_of(ch, struct rsi, h);
405 static struct rsi *rsi_update(struct rsi *new, struct rsi *old)
407 struct cache_head *ch;
408 int hash = rsi_hash(new);
410 ch = sunrpc_cache_update(&rsi_cache, &new->h, &old->h, hash);
412 return container_of(ch, struct rsi, h);
417 /****************************************
419 ****************************************/
421 #define RSC_HASHBITS (10)
422 #define RSC_HASHMAX (1 << RSC_HASHBITS)
423 #define RSC_HASHMASK (RSC_HASHMAX - 1)
427 struct obd_device *target;
429 struct gss_svc_ctx ctx;
432 static struct cache_head *rsc_table[RSC_HASHMAX];
433 static struct cache_detail rsc_cache;
434 static struct rsc *rsc_update(struct rsc *new, struct rsc *old);
435 static struct rsc *rsc_lookup(struct rsc *item);
437 static void rsc_free(struct rsc *rsci)
439 rawobj_free(&rsci->handle);
440 rawobj_free(&rsci->ctx.gsc_rvs_hdl);
441 lgss_delete_sec_context(&rsci->ctx.gsc_mechctx);
444 static inline int rsc_hash(struct rsc *rsci)
446 return hash_mem((char *)rsci->handle.data,
447 rsci->handle.len, RSC_HASHBITS);
450 static inline int __rsc_match(struct rsc *new, struct rsc *tmp)
452 return rawobj_equal(&new->handle, &tmp->handle);
455 static inline void __rsc_init(struct rsc *new, struct rsc *tmp)
457 new->handle = tmp->handle;
458 tmp->handle = RAWOBJ_EMPTY;
461 memset(&new->ctx, 0, sizeof(new->ctx));
462 new->ctx.gsc_rvs_hdl = RAWOBJ_EMPTY;
465 static inline void __rsc_update(struct rsc *new, struct rsc *tmp)
468 tmp->ctx.gsc_rvs_hdl = RAWOBJ_EMPTY;
469 tmp->ctx.gsc_mechctx = NULL;
471 memset(&new->ctx.gsc_seqdata, 0, sizeof(new->ctx.gsc_seqdata));
472 spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
475 static void rsc_put(struct kref *ref)
477 struct rsc *rsci = container_of(ref, struct rsc, h.ref);
479 LASSERT(rsci->h.next == NULL);
484 static int rsc_match(struct cache_head *a, struct cache_head *b)
486 struct rsc *new = container_of(a, struct rsc, h);
487 struct rsc *tmp = container_of(b, struct rsc, h);
489 return __rsc_match(new, tmp);
492 static void rsc_init(struct cache_head *cnew, struct cache_head *ctmp)
494 struct rsc *new = container_of(cnew, struct rsc, h);
495 struct rsc *tmp = container_of(ctmp, struct rsc, h);
497 __rsc_init(new, tmp);
500 static void update_rsc(struct cache_head *cnew, struct cache_head *ctmp)
502 struct rsc *new = container_of(cnew, struct rsc, h);
503 struct rsc *tmp = container_of(ctmp, struct rsc, h);
505 __rsc_update(new, tmp);
508 static struct cache_head * rsc_alloc(void)
519 static int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
522 int len, rv, tmp_int;
523 struct rsc rsci, *rscp = NULL;
525 int status = -EINVAL;
526 struct gss_api_mech *gm = NULL;
528 memset(&rsci, 0, sizeof(rsci));
531 len = qword_get(&mesg, buf, mlen);
532 if (len < 0) goto out;
534 if (rawobj_alloc(&rsci.handle, buf, len))
539 expiry = get_expiry(&mesg);
545 rv = get_int(&mesg, &tmp_int);
547 CERROR("fail to get remote flag\n");
550 rsci.ctx.gsc_remote = (tmp_int != 0);
553 rv = get_int(&mesg, &tmp_int);
555 CERROR("fail to get oss user flag\n");
558 rsci.ctx.gsc_usr_root = (tmp_int != 0);
561 rv = get_int(&mesg, &tmp_int);
563 CERROR("fail to get mds user flag\n");
566 rsci.ctx.gsc_usr_mds = (tmp_int != 0);
569 rv = get_int(&mesg, &tmp_int);
571 CERROR("fail to get oss user flag\n");
574 rsci.ctx.gsc_usr_oss = (tmp_int != 0);
577 rv = get_int(&mesg, (int *) &rsci.ctx.gsc_mapped_uid);
579 CERROR("fail to get mapped uid\n");
583 rscp = rsc_lookup(&rsci);
587 /* uid, or NEGATIVE */
588 rv = get_int(&mesg, (int *) &rsci.ctx.gsc_uid);
592 CERROR("NOENT? set rsc entry negative\n");
593 set_bit(CACHE_NEGATIVE, &rsci.h.flags);
596 unsigned long ctx_expiry;
599 if (get_int(&mesg, (int *) &rsci.ctx.gsc_gid))
603 len = qword_get(&mesg, buf, mlen);
606 gm = lgss_name_to_mech(buf);
607 status = -EOPNOTSUPP;
612 /* mech-specific data: */
613 len = qword_get(&mesg, buf, mlen);
618 tmp_buf.data = (unsigned char *)buf;
619 if (lgss_import_sec_context(&tmp_buf, gm,
620 &rsci.ctx.gsc_mechctx))
623 /* currently the expiry time passed down from user-space
624 * is invalid, here we retrive it from mech. */
625 if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
626 CERROR("unable to get expire time, drop it\n");
629 expiry = (time_t) ctx_expiry;
632 rsci.h.expiry_time = expiry;
633 rscp = rsc_update(&rsci, rscp);
640 cache_put(&rscp->h, &rsc_cache);
645 CERROR("parse rsc error %d\n", status);
649 static struct cache_detail rsc_cache = {
650 .hash_size = RSC_HASHMAX,
651 .hash_table = rsc_table,
652 .name = "auth.sptlrpc.context",
653 .cache_put = rsc_put,
654 .cache_parse = rsc_parse,
657 .update = update_rsc,
661 static struct rsc *rsc_lookup(struct rsc *item)
663 struct cache_head *ch;
664 int hash = rsc_hash(item);
666 ch = sunrpc_cache_lookup(&rsc_cache, &item->h, hash);
668 return container_of(ch, struct rsc, h);
673 static struct rsc *rsc_update(struct rsc *new, struct rsc *old)
675 struct cache_head *ch;
676 int hash = rsc_hash(new);
678 ch = sunrpc_cache_update(&rsc_cache, &new->h, &old->h, hash);
680 return container_of(ch, struct rsc, h);
685 #define COMPAT_RSC_PUT(item, cd) cache_put((item), (cd))
687 /****************************************
689 ****************************************/
691 typedef int rsc_entry_match(struct rsc *rscp, long data);
693 static void rsc_flush(rsc_entry_match *match, long data)
695 struct cache_head **ch;
700 write_lock(&rsc_cache.hash_lock);
701 for (n = 0; n < RSC_HASHMAX; n++) {
702 for (ch = &rsc_cache.hash_table[n]; *ch;) {
703 rscp = container_of(*ch, struct rsc, h);
705 if (!match(rscp, data)) {
710 /* it seems simply set NEGATIVE doesn't work */
714 set_bit(CACHE_NEGATIVE, &rscp->h.flags);
715 COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
719 write_unlock(&rsc_cache.hash_lock);
723 static int match_uid(struct rsc *rscp, long uid)
727 return ((int) rscp->ctx.gsc_uid == (int) uid);
730 static int match_target(struct rsc *rscp, long target)
732 return (rscp->target == (struct obd_device *) target);
735 static inline void rsc_flush_uid(int uid)
738 CWARN("flush all gss contexts...\n");
740 rsc_flush(match_uid, (long) uid);
743 static inline void rsc_flush_target(struct obd_device *target)
745 rsc_flush(match_target, (long) target);
748 void gss_secsvc_flush(struct obd_device *target)
750 rsc_flush_target(target);
753 static struct rsc *gss_svc_searchbyctx(rawobj_t *handle)
758 memset(&rsci, 0, sizeof(rsci));
759 if (rawobj_dup(&rsci.handle, handle))
762 found = rsc_lookup(&rsci);
766 if (cache_check(&rsc_cache, &found->h, NULL))
771 int gss_svc_upcall_install_rvs_ctx(struct obd_import *imp,
772 struct gss_sec *gsec,
773 struct gss_cli_ctx *gctx)
775 struct rsc rsci, *rscp = NULL;
776 unsigned long ctx_expiry;
781 memset(&rsci, 0, sizeof(rsci));
783 if (rawobj_alloc(&rsci.handle, (char *) &gsec->gs_rvs_hdl,
784 sizeof(gsec->gs_rvs_hdl)))
785 GOTO(out, rc = -ENOMEM);
787 rscp = rsc_lookup(&rsci);
789 GOTO(out, rc = -ENOMEM);
791 major = lgss_copy_reverse_context(gctx->gc_mechctx,
792 &rsci.ctx.gsc_mechctx);
793 if (major != GSS_S_COMPLETE)
794 GOTO(out, rc = -ENOMEM);
796 if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
797 CERROR("unable to get expire time, drop it\n");
798 GOTO(out, rc = -EINVAL);
800 rsci.h.expiry_time = (time_t) ctx_expiry;
802 if (strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0)
803 rsci.ctx.gsc_usr_mds = 1;
804 else if (strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_OSC_NAME) == 0)
805 rsci.ctx.gsc_usr_oss = 1;
807 rsci.ctx.gsc_usr_root = 1;
809 rscp = rsc_update(&rsci, rscp);
811 GOTO(out, rc = -ENOMEM);
813 rscp->target = imp->imp_obd;
814 rawobj_dup(&gctx->gc_svc_handle, &rscp->handle);
816 CWARN("create reverse svc ctx %p to %s: idx "LPX64"\n",
817 &rscp->ctx, obd2cli_tgt(imp->imp_obd), gsec->gs_rvs_hdl);
821 cache_put(&rscp->h, &rsc_cache);
825 CERROR("create reverse svc ctx: idx "LPX64", rc %d\n",
826 gsec->gs_rvs_hdl, rc);
830 int gss_svc_upcall_expire_rvs_ctx(rawobj_t *handle)
832 const cfs_time_t expire = 20;
835 rscp = gss_svc_searchbyctx(handle);
837 CDEBUG(D_SEC, "reverse svcctx %p (rsc %p) expire soon\n",
840 rscp->h.expiry_time = cfs_time_current_sec() + expire;
841 COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
846 int gss_svc_upcall_dup_handle(rawobj_t *handle, struct gss_svc_ctx *ctx)
848 struct rsc *rscp = container_of(ctx, struct rsc, ctx);
850 return rawobj_dup(handle, &rscp->handle);
853 int gss_svc_upcall_update_sequence(rawobj_t *handle, __u32 seq)
857 rscp = gss_svc_searchbyctx(handle);
859 CDEBUG(D_SEC, "reverse svcctx %p (rsc %p) update seq to %u\n",
860 &rscp->ctx, rscp, seq + 1);
862 rscp->ctx.gsc_rvs_seq = seq + 1;
863 COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
868 static struct cache_deferred_req* cache_upcall_defer(struct cache_req *req)
872 static struct cache_req cache_upcall_chandle = { cache_upcall_defer };
874 int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
875 struct gss_svc_reqctx *grctx,
876 struct gss_wire_ctx *gw,
877 struct obd_device *target,
882 struct ptlrpc_reply_state *rs;
883 struct rsc *rsci = NULL;
884 struct rsi *rsip = NULL, rsikey;
886 int replen = sizeof(struct ptlrpc_body);
887 struct gss_rep_header *rephdr;
889 int rc = SECSVC_DROP;
892 memset(&rsikey, 0, sizeof(rsikey));
893 rsikey.lustre_svc = lustre_svc;
894 rsikey.nid = (__u64) req->rq_peer.nid;
896 /* duplicate context handle. for INIT it always 0 */
897 if (rawobj_dup(&rsikey.in_handle, &gw->gw_handle)) {
898 CERROR("fail to dup context handle\n");
902 if (rawobj_dup(&rsikey.in_token, in_token)) {
903 CERROR("can't duplicate token\n");
904 rawobj_free(&rsikey.in_handle);
908 rsip = rsi_lookup(&rsikey);
911 CERROR("error in rsi_lookup.\n");
913 if (!gss_pack_err_notify(req, GSS_S_FAILURE, 0))
914 rc = SECSVC_COMPLETE;
919 cache_get(&rsip->h); /* take an extra ref */
920 init_waitqueue_head(&rsip->waitq);
921 init_waitqueue_entry_current(&wait);
922 add_wait_queue(&rsip->waitq, &wait);
925 /* Note each time cache_check() will drop a reference if return
926 * non-zero. We hold an extra reference on initial rsip, but must
927 * take care of following calls. */
928 rc = cache_check(&rsi_cache, &rsip->h, &cache_upcall_chandle);
937 read_lock(&rsi_cache.hash_lock);
938 valid = test_bit(CACHE_VALID, &rsip->h.flags);
940 set_current_state(TASK_INTERRUPTIBLE);
941 read_unlock(&rsi_cache.hash_lock);
944 unsigned long jiffies;
945 jiffies = msecs_to_jiffies(MSEC_PER_SEC *
946 GSS_SVC_UPCALL_TIMEOUT);
947 schedule_timeout(jiffies);
952 CWARN("waited %ds timeout, drop\n", GSS_SVC_UPCALL_TIMEOUT);
956 CWARN("cache_check return ENOENT, drop\n");
959 /* if not the first check, we have to release the extra
960 * reference we just added on it. */
962 cache_put(&rsip->h, &rsi_cache);
963 CDEBUG(D_SEC, "cache_check is good\n");
967 remove_wait_queue(&rsip->waitq, &wait);
968 cache_put(&rsip->h, &rsi_cache);
971 GOTO(out, rc = SECSVC_DROP);
974 rsci = gss_svc_searchbyctx(&rsip->out_handle);
976 CERROR("authentication failed\n");
978 if (!gss_pack_err_notify(req, GSS_S_FAILURE, 0))
979 rc = SECSVC_COMPLETE;
984 grctx->src_ctx = &rsci->ctx;
987 if (rawobj_dup(&rsci->ctx.gsc_rvs_hdl, rvs_hdl)) {
988 CERROR("failed duplicate reverse handle\n");
992 rsci->target = target;
994 CDEBUG(D_SEC, "server create rsc %p(%u->%s)\n",
995 rsci, rsci->ctx.gsc_uid, libcfs_nid2str(req->rq_peer.nid));
997 if (rsip->out_handle.len > PTLRPC_GSS_MAX_HANDLE_SIZE) {
998 CERROR("handle size %u too large\n", rsip->out_handle.len);
999 GOTO(out, rc = SECSVC_DROP);
1002 grctx->src_init = 1;
1003 grctx->src_reserve_len = cfs_size_round4(rsip->out_token.len);
1005 rc = lustre_pack_reply_v2(req, 1, &replen, NULL, 0);
1007 CERROR("failed to pack reply: %d\n", rc);
1008 GOTO(out, rc = SECSVC_DROP);
1011 rs = req->rq_reply_state;
1012 LASSERT(rs->rs_repbuf->lm_bufcount == 3);
1013 LASSERT(rs->rs_repbuf->lm_buflens[0] >=
1014 sizeof(*rephdr) + rsip->out_handle.len);
1015 LASSERT(rs->rs_repbuf->lm_buflens[2] >= rsip->out_token.len);
1017 rephdr = lustre_msg_buf(rs->rs_repbuf, 0, 0);
1018 rephdr->gh_version = PTLRPC_GSS_VERSION;
1019 rephdr->gh_flags = 0;
1020 rephdr->gh_proc = PTLRPC_GSS_PROC_ERR;
1021 rephdr->gh_major = rsip->major_status;
1022 rephdr->gh_minor = rsip->minor_status;
1023 rephdr->gh_seqwin = GSS_SEQ_WIN;
1024 rephdr->gh_handle.len = rsip->out_handle.len;
1025 memcpy(rephdr->gh_handle.data, rsip->out_handle.data,
1026 rsip->out_handle.len);
1028 memcpy(lustre_msg_buf(rs->rs_repbuf, 2, 0), rsip->out_token.data,
1029 rsip->out_token.len);
1031 rs->rs_repdata_len = lustre_shrink_msg(rs->rs_repbuf, 2,
1032 rsip->out_token.len, 0);
1037 /* it looks like here we should put rsip also, but this mess up
1038 * with NFS cache mgmt code... FIXME */
1041 rsi_put(&rsip->h, &rsi_cache);
1045 /* if anything went wrong, we don't keep the context too */
1046 if (rc != SECSVC_OK)
1047 set_bit(CACHE_NEGATIVE, &rsci->h.flags);
1049 CDEBUG(D_SEC, "create rsc with idx "LPX64"\n",
1050 gss_handle_to_u64(&rsci->handle));
1052 COMPAT_RSC_PUT(&rsci->h, &rsc_cache);
1057 struct gss_svc_ctx *gss_svc_upcall_get_ctx(struct ptlrpc_request *req,
1058 struct gss_wire_ctx *gw)
1062 rsc = gss_svc_searchbyctx(&gw->gw_handle);
1064 CWARN("Invalid gss ctx idx "LPX64" from %s\n",
1065 gss_handle_to_u64(&gw->gw_handle),
1066 libcfs_nid2str(req->rq_peer.nid));
1073 void gss_svc_upcall_put_ctx(struct gss_svc_ctx *ctx)
1075 struct rsc *rsc = container_of(ctx, struct rsc, ctx);
1077 COMPAT_RSC_PUT(&rsc->h, &rsc_cache);
1080 void gss_svc_upcall_destroy_ctx(struct gss_svc_ctx *ctx)
1082 struct rsc *rsc = container_of(ctx, struct rsc, ctx);
1084 /* can't be found */
1085 set_bit(CACHE_NEGATIVE, &rsc->h.flags);
1086 /* to be removed at next scan */
1087 rsc->h.expiry_time = 1;
1090 int __init gss_init_svc_upcall(void)
1094 spin_lock_init(&__ctx_index_lock);
1096 * this helps reducing context index confliction. after server reboot,
1097 * conflicting request from clients might be filtered out by initial
1098 * sequence number checking, thus no chance to sent error notification
1101 cfs_get_random_bytes(&__ctx_index, sizeof(__ctx_index));
1103 rc = _cache_register_net(&rsi_cache, &init_net);
1107 rc = _cache_register_net(&rsc_cache, &init_net);
1109 _cache_unregister_net(&rsi_cache, &init_net);
1113 /* FIXME this looks stupid. we intend to give lsvcgssd a chance to open
1114 * the init upcall channel, otherwise there's big chance that the first
1115 * upcall issued before the channel be opened thus nfsv4 cache code will
1116 * drop the request direclty, thus lead to unnecessary recovery time.
1117 * here we wait at miximum 1.5 seconds. */
1118 for (i = 0; i < 6; i++) {
1119 if (atomic_read(&rsi_cache.readers) > 0)
1121 set_current_state(TASK_UNINTERRUPTIBLE);
1122 LASSERT(msecs_to_jiffies(MSEC_PER_SEC) >= 4);
1123 schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC / 4));
1126 if (atomic_read(&rsi_cache.readers) == 0)
1127 CWARN("Init channel is not opened by lsvcgssd, following "
1128 "request might be dropped until lsvcgssd is active\n");
1133 void gss_exit_svc_upcall(void)
1135 cache_purge(&rsi_cache);
1136 _cache_unregister_net(&rsi_cache, &init_net);
1138 cache_purge(&rsc_cache);
1139 _cache_unregister_net(&rsc_cache, &init_net);