2 * Modifications for Lustre
4 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
6 * Copyright (c) 2012, 2014, Intel Corporation.
8 * Author: Eric Mei <ericm@clusterfs.com>
12 * Neil Brown <neilb@cse.unsw.edu.au>
13 * J. Bruce Fields <bfields@umich.edu>
14 * Andy Adamson <andros@umich.edu>
15 * Dug Song <dugsong@monkey.org>
17 * RPCSEC_GSS server authentication.
18 * This implements RPCSEC_GSS as defined in rfc2203 (rpcsec_gss) and rfc2078
21 * The RPCSEC_GSS involves three stages:
24 * 3/ context destruction
26 * Context creation is handled largely by upcalls to user-space.
27 * In particular, GSS_Accept_sec_context is handled by an upcall
28 * Data exchange is handled entirely within the kernel
29 * In particular, GSS_GetMIC, GSS_VerifyMIC, GSS_Seal, GSS_Unseal are in-kernel.
30 * Context destruction is handled in-kernel
31 * GSS_Delete_sec_context is in-kernel
33 * Context creation is initiated by a RPCSEC_GSS_INIT request arriving.
34 * The context handle and gss_token are used as a key into the rpcsec_init cache.
35 * The content of this cache includes some of the outputs of GSS_Accept_sec_context,
36 * being major_status, minor_status, context_handle, reply_token.
37 * These are sent back to the client.
38 * Sequence window management is handled by the kernel. The window size if currently
39 * a compile time constant.
41 * When user-space is happy that a context is established, it places an entry
42 * in the rpcsec_context cache. The key for this cache is the context_handle.
43 * The content includes:
44 * uid/gidlist - for determining access rights
46 * mechanism specific information, such as a key
50 #define DEBUG_SUBSYSTEM S_SEC
51 #include <linux/types.h>
52 #include <linux/init.h>
53 #include <linux/module.h>
54 #include <linux/random.h>
55 #include <linux/slab.h>
56 #include <linux/hash.h>
57 #include <linux/mutex.h>
58 #include <linux/sunrpc/cache.h>
62 #include <obd_class.h>
63 #include <obd_support.h>
64 #include <lustre_import.h>
65 #include <lustre_net.h>
66 #include <lustre_nodemap.h>
67 #include <lustre_sec.h>
70 #include "gss_internal.h"
72 #include "gss_crypto.h"
74 #define GSS_SVC_UPCALL_TIMEOUT (20)
76 static DEFINE_SPINLOCK(__ctx_index_lock);
77 static __u64 __ctx_index;
79 unsigned int krb5_allow_old_client_csum;
81 __u64 gss_get_next_ctx_index(void)
85 spin_lock(&__ctx_index_lock);
87 spin_unlock(&__ctx_index_lock);
92 static inline unsigned long hash_mem(char *buf, int length, int bits)
94 unsigned long hash = 0;
109 if ((len & (BITS_PER_LONG/8-1)) == 0)
110 hash = hash_long(hash^l, BITS_PER_LONG);
113 return hash >> (BITS_PER_LONG - bits);
116 /****************************************
117 * rpc sec init (rsi) cache *
118 ****************************************/
120 #define RSI_HASHBITS (6)
121 #define RSI_HASHMAX (1 << RSI_HASHBITS)
122 #define RSI_HASHMASK (RSI_HASHMAX - 1)
128 char nm_name[LUSTRE_NODEMAP_NAME_LENGTH + 1];
129 wait_queue_head_t waitq;
130 rawobj_t in_handle, in_token;
131 rawobj_t out_handle, out_token;
132 int major_status, minor_status;
135 #ifdef HAVE_CACHE_HEAD_HLIST
136 static struct hlist_head rsi_table[RSI_HASHMAX];
138 static struct cache_head *rsi_table[RSI_HASHMAX];
140 static struct cache_detail rsi_cache;
141 static struct rsi *rsi_update(struct rsi *new, struct rsi *old);
142 static struct rsi *rsi_lookup(struct rsi *item);
144 #ifdef HAVE_CACHE_DETAIL_WRITERS
145 static inline int channel_users(struct cache_detail *cd)
147 return atomic_read(&cd->writers);
150 static inline int channel_users(struct cache_detail *cd)
152 return atomic_read(&cd->readers);
156 static inline int rsi_hash(struct rsi *item)
158 return hash_mem((char *)item->in_handle.data, item->in_handle.len,
160 hash_mem((char *)item->in_token.data, item->in_token.len,
164 static inline int __rsi_match(struct rsi *item, struct rsi *tmp)
166 return (rawobj_equal(&item->in_handle, &tmp->in_handle) &&
167 rawobj_equal(&item->in_token, &tmp->in_token));
170 static void rsi_free(struct rsi *rsi)
172 rawobj_free(&rsi->in_handle);
173 rawobj_free(&rsi->in_token);
174 rawobj_free(&rsi->out_handle);
175 rawobj_free(&rsi->out_token);
178 /* See handle_channel_req() userspace for where the upcall data is read */
179 static void rsi_request(struct cache_detail *cd,
180 struct cache_head *h,
181 char **bpp, int *blen)
183 struct rsi *rsi = container_of(h, struct rsi, h);
186 /* if in_handle is null, provide kernel suggestion */
187 if (rsi->in_handle.len == 0)
188 index = gss_get_next_ctx_index();
190 qword_addhex(bpp, blen, (char *) &rsi->lustre_svc,
191 sizeof(rsi->lustre_svc));
192 qword_addhex(bpp, blen, (char *) &rsi->nid, sizeof(rsi->nid));
193 qword_addhex(bpp, blen, (char *) &index, sizeof(index));
194 qword_addhex(bpp, blen, (char *) rsi->nm_name,
195 strlen(rsi->nm_name) + 1);
196 qword_addhex(bpp, blen, rsi->in_handle.data, rsi->in_handle.len);
197 qword_addhex(bpp, blen, rsi->in_token.data, rsi->in_token.len);
201 static inline void __rsi_init(struct rsi *new, struct rsi *item)
203 new->out_handle = RAWOBJ_EMPTY;
204 new->out_token = RAWOBJ_EMPTY;
206 new->in_handle = item->in_handle;
207 item->in_handle = RAWOBJ_EMPTY;
208 new->in_token = item->in_token;
209 item->in_token = RAWOBJ_EMPTY;
211 new->lustre_svc = item->lustre_svc;
212 new->nid = item->nid;
213 memcpy(new->nm_name, item->nm_name, sizeof(item->nm_name));
214 init_waitqueue_head(&new->waitq);
217 static inline void __rsi_update(struct rsi *new, struct rsi *item)
219 LASSERT(new->out_handle.len == 0);
220 LASSERT(new->out_token.len == 0);
222 new->out_handle = item->out_handle;
223 item->out_handle = RAWOBJ_EMPTY;
224 new->out_token = item->out_token;
225 item->out_token = RAWOBJ_EMPTY;
227 new->major_status = item->major_status;
228 new->minor_status = item->minor_status;
231 static void rsi_put(struct kref *ref)
233 struct rsi *rsi = container_of(ref, struct rsi, h.ref);
235 #ifdef HAVE_CACHE_HEAD_HLIST
236 LASSERT(rsi->h.cache_list.next == NULL);
238 LASSERT(rsi->h.next == NULL);
244 static int rsi_match(struct cache_head *a, struct cache_head *b)
246 struct rsi *item = container_of(a, struct rsi, h);
247 struct rsi *tmp = container_of(b, struct rsi, h);
249 return __rsi_match(item, tmp);
252 static void rsi_init(struct cache_head *cnew, struct cache_head *citem)
254 struct rsi *new = container_of(cnew, struct rsi, h);
255 struct rsi *item = container_of(citem, struct rsi, h);
257 __rsi_init(new, item);
260 static void update_rsi(struct cache_head *cnew, struct cache_head *citem)
262 struct rsi *new = container_of(cnew, struct rsi, h);
263 struct rsi *item = container_of(citem, struct rsi, h);
265 __rsi_update(new, item);
268 static struct cache_head *rsi_alloc(void)
279 static int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
283 struct rsi rsii, *rsip = NULL;
285 int status = -EINVAL;
289 memset(&rsii, 0, sizeof(rsii));
292 len = qword_get(&mesg, buf, mlen);
295 if (rawobj_alloc(&rsii.in_handle, buf, len)) {
301 len = qword_get(&mesg, buf, mlen);
304 if (rawobj_alloc(&rsii.in_token, buf, len)) {
309 rsip = rsi_lookup(&rsii);
315 expiry = get_expiry(&mesg);
319 len = qword_get(&mesg, buf, mlen);
324 status = kstrtoint(buf, 10, &rsii.major_status);
329 len = qword_get(&mesg, buf, mlen);
335 status = kstrtoint(buf, 10, &rsii.minor_status);
340 len = qword_get(&mesg, buf, mlen);
343 if (rawobj_alloc(&rsii.out_handle, buf, len)) {
349 len = qword_get(&mesg, buf, mlen);
352 if (rawobj_alloc(&rsii.out_token, buf, len)) {
357 rsii.h.expiry_time = expiry;
358 rsip = rsi_update(&rsii, rsip);
363 wake_up_all(&rsip->waitq);
364 cache_put(&rsip->h, &rsi_cache);
370 CERROR("rsi parse error %d\n", status);
374 static struct cache_detail rsi_cache = {
375 .hash_size = RSI_HASHMAX,
376 .hash_table = rsi_table,
377 .name = "auth.sptlrpc.init",
378 .cache_put = rsi_put,
379 .cache_request = rsi_request,
380 .cache_upcall = sunrpc_cache_pipe_upcall,
381 .cache_parse = rsi_parse,
384 .update = update_rsi,
388 static struct rsi *rsi_lookup(struct rsi *item)
390 struct cache_head *ch;
391 int hash = rsi_hash(item);
393 ch = sunrpc_cache_lookup(&rsi_cache, &item->h, hash);
395 return container_of(ch, struct rsi, h);
400 static struct rsi *rsi_update(struct rsi *new, struct rsi *old)
402 struct cache_head *ch;
403 int hash = rsi_hash(new);
405 ch = sunrpc_cache_update(&rsi_cache, &new->h, &old->h, hash);
407 return container_of(ch, struct rsi, h);
412 /****************************************
413 * rpc sec context (rsc) cache *
414 ****************************************/
416 #define RSC_HASHBITS (10)
417 #define RSC_HASHMAX (1 << RSC_HASHBITS)
418 #define RSC_HASHMASK (RSC_HASHMAX - 1)
422 struct obd_device *target;
424 struct gss_svc_ctx ctx;
427 #ifdef HAVE_CACHE_HEAD_HLIST
428 static struct hlist_head rsc_table[RSC_HASHMAX];
430 static struct cache_head *rsc_table[RSC_HASHMAX];
432 static struct cache_detail rsc_cache;
433 static struct rsc *rsc_update(struct rsc *new, struct rsc *old);
434 static struct rsc *rsc_lookup(struct rsc *item);
436 static void rsc_free(struct rsc *rsci)
438 rawobj_free(&rsci->handle);
439 rawobj_free(&rsci->ctx.gsc_rvs_hdl);
440 lgss_delete_sec_context(&rsci->ctx.gsc_mechctx);
443 static inline int rsc_hash(struct rsc *rsci)
445 return hash_mem((char *)rsci->handle.data,
446 rsci->handle.len, RSC_HASHBITS);
449 static inline int __rsc_match(struct rsc *new, struct rsc *tmp)
451 return rawobj_equal(&new->handle, &tmp->handle);
454 static inline void __rsc_init(struct rsc *new, struct rsc *tmp)
456 new->handle = tmp->handle;
457 tmp->handle = RAWOBJ_EMPTY;
460 memset(&new->ctx, 0, sizeof(new->ctx));
461 new->ctx.gsc_rvs_hdl = RAWOBJ_EMPTY;
464 static inline void __rsc_update(struct rsc *new, struct rsc *tmp)
467 tmp->ctx.gsc_rvs_hdl = RAWOBJ_EMPTY;
468 tmp->ctx.gsc_mechctx = NULL;
470 memset(&new->ctx.gsc_seqdata, 0, sizeof(new->ctx.gsc_seqdata));
471 spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
474 static void rsc_put(struct kref *ref)
476 struct rsc *rsci = container_of(ref, struct rsc, h.ref);
478 #ifdef HAVE_CACHE_HEAD_HLIST
479 LASSERT(rsci->h.cache_list.next == NULL);
481 LASSERT(rsci->h.next == NULL);
487 static int rsc_match(struct cache_head *a, struct cache_head *b)
489 struct rsc *new = container_of(a, struct rsc, h);
490 struct rsc *tmp = container_of(b, struct rsc, h);
492 return __rsc_match(new, tmp);
495 static void rsc_init(struct cache_head *cnew, struct cache_head *ctmp)
497 struct rsc *new = container_of(cnew, struct rsc, h);
498 struct rsc *tmp = container_of(ctmp, struct rsc, h);
500 __rsc_init(new, tmp);
503 static void update_rsc(struct cache_head *cnew, struct cache_head *ctmp)
505 struct rsc *new = container_of(cnew, struct rsc, h);
506 struct rsc *tmp = container_of(ctmp, struct rsc, h);
508 __rsc_update(new, tmp);
511 static struct cache_head * rsc_alloc(void)
522 static int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
525 int len, rv, tmp_int;
526 struct rsc rsci, *rscp = NULL;
528 int status = -EINVAL;
529 struct gss_api_mech *gm = NULL;
531 memset(&rsci, 0, sizeof(rsci));
534 len = qword_get(&mesg, buf, mlen);
535 if (len < 0) goto out;
537 if (rawobj_alloc(&rsci.handle, buf, len))
542 expiry = get_expiry(&mesg);
548 rv = get_int(&mesg, &tmp_int);
550 CERROR("fail to get remote flag\n");
553 rsci.ctx.gsc_remote = (tmp_int != 0);
556 rv = get_int(&mesg, &tmp_int);
558 CERROR("fail to get root user flag\n");
561 rsci.ctx.gsc_usr_root = (tmp_int != 0);
564 rv = get_int(&mesg, &tmp_int);
566 CERROR("fail to get mds user flag\n");
569 rsci.ctx.gsc_usr_mds = (tmp_int != 0);
572 rv = get_int(&mesg, &tmp_int);
574 CERROR("fail to get oss user flag\n");
577 rsci.ctx.gsc_usr_oss = (tmp_int != 0);
580 rv = get_int(&mesg, (int *) &rsci.ctx.gsc_mapped_uid);
582 CERROR("fail to get mapped uid\n");
586 rscp = rsc_lookup(&rsci);
590 /* uid, or NEGATIVE */
591 rv = get_int(&mesg, (int *) &rsci.ctx.gsc_uid);
595 CERROR("NOENT? set rsc entry negative\n");
596 set_bit(CACHE_NEGATIVE, &rsci.h.flags);
602 if (get_int(&mesg, (int *) &rsci.ctx.gsc_gid))
606 len = qword_get(&mesg, buf, mlen);
609 gm = lgss_name_to_mech(buf);
610 status = -EOPNOTSUPP;
615 /* mech-specific data: */
616 len = qword_get(&mesg, buf, mlen);
621 tmp_buf.data = (unsigned char *)buf;
622 if (lgss_import_sec_context(&tmp_buf, gm,
623 &rsci.ctx.gsc_mechctx))
626 /* set to seconds since machine booted */
627 expiry = ktime_get_seconds();
629 /* currently the expiry time passed down from user-space
630 * is invalid, here we retrive it from mech.
632 if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
633 CERROR("unable to get expire time, drop it\n");
637 /* ctx_expiry is the number of seconds since Jan 1 1970.
638 * We want just the number of seconds into the future.
640 expiry += ctx_expiry - ktime_get_real_seconds();
643 rsci.h.expiry_time = expiry;
644 rscp = rsc_update(&rsci, rscp);
651 cache_put(&rscp->h, &rsc_cache);
656 CERROR("parse rsc error %d\n", status);
660 static struct cache_detail rsc_cache = {
661 .hash_size = RSC_HASHMAX,
662 .hash_table = rsc_table,
663 .name = "auth.sptlrpc.context",
664 .cache_put = rsc_put,
665 .cache_parse = rsc_parse,
668 .update = update_rsc,
672 static struct rsc *rsc_lookup(struct rsc *item)
674 struct cache_head *ch;
675 int hash = rsc_hash(item);
677 ch = sunrpc_cache_lookup(&rsc_cache, &item->h, hash);
679 return container_of(ch, struct rsc, h);
684 static struct rsc *rsc_update(struct rsc *new, struct rsc *old)
686 struct cache_head *ch;
687 int hash = rsc_hash(new);
689 ch = sunrpc_cache_update(&rsc_cache, &new->h, &old->h, hash);
691 return container_of(ch, struct rsc, h);
696 #define COMPAT_RSC_PUT(item, cd) cache_put((item), (cd))
698 /****************************************
700 ****************************************/
702 static struct rsc *gss_svc_searchbyctx(rawobj_t *handle)
707 memset(&rsci, 0, sizeof(rsci));
708 if (rawobj_dup(&rsci.handle, handle))
711 found = rsc_lookup(&rsci);
715 if (cache_check(&rsc_cache, &found->h, NULL))
720 int gss_svc_upcall_install_rvs_ctx(struct obd_import *imp,
721 struct gss_sec *gsec,
722 struct gss_cli_ctx *gctx)
724 struct rsc rsci, *rscp = NULL;
730 memset(&rsci, 0, sizeof(rsci));
732 if (rawobj_alloc(&rsci.handle, (char *) &gsec->gs_rvs_hdl,
733 sizeof(gsec->gs_rvs_hdl)))
734 GOTO(out, rc = -ENOMEM);
736 rscp = rsc_lookup(&rsci);
738 GOTO(out, rc = -ENOMEM);
740 major = lgss_copy_reverse_context(gctx->gc_mechctx,
741 &rsci.ctx.gsc_mechctx);
742 if (major != GSS_S_COMPLETE)
743 GOTO(out, rc = -ENOMEM);
745 if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
746 CERROR("unable to get expire time, drop it\n");
747 GOTO(out, rc = -EINVAL);
749 rsci.h.expiry_time = (time_t) ctx_expiry;
751 switch (imp->imp_obd->u.cli.cl_sp_to) {
753 rsci.ctx.gsc_usr_mds = 1;
756 rsci.ctx.gsc_usr_oss = 1;
759 rsci.ctx.gsc_usr_root = 1;
762 /* by convention, all 3 set to 1 means MGS */
763 rsci.ctx.gsc_usr_mds = 1;
764 rsci.ctx.gsc_usr_oss = 1;
765 rsci.ctx.gsc_usr_root = 1;
771 rscp = rsc_update(&rsci, rscp);
773 GOTO(out, rc = -ENOMEM);
775 rscp->target = imp->imp_obd;
776 rawobj_dup(&gctx->gc_svc_handle, &rscp->handle);
778 CWARN("create reverse svc ctx %p to %s: idx %#llx\n",
779 &rscp->ctx, obd2cli_tgt(imp->imp_obd), gsec->gs_rvs_hdl);
783 cache_put(&rscp->h, &rsc_cache);
787 CERROR("create reverse svc ctx: idx %#llx, rc %d\n",
788 gsec->gs_rvs_hdl, rc);
792 int gss_svc_upcall_expire_rvs_ctx(rawobj_t *handle)
794 const time64_t expire = 20;
797 rscp = gss_svc_searchbyctx(handle);
799 CDEBUG(D_SEC, "reverse svcctx %p (rsc %p) expire soon\n",
802 rscp->h.expiry_time = ktime_get_real_seconds() + expire;
803 COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
808 int gss_svc_upcall_dup_handle(rawobj_t *handle, struct gss_svc_ctx *ctx)
810 struct rsc *rscp = container_of(ctx, struct rsc, ctx);
812 return rawobj_dup(handle, &rscp->handle);
815 int gss_svc_upcall_update_sequence(rawobj_t *handle, __u32 seq)
819 rscp = gss_svc_searchbyctx(handle);
821 CDEBUG(D_SEC, "reverse svcctx %p (rsc %p) update seq to %u\n",
822 &rscp->ctx, rscp, seq + 1);
824 rscp->ctx.gsc_rvs_seq = seq + 1;
825 COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
830 static struct cache_deferred_req* cache_upcall_defer(struct cache_req *req)
834 static struct cache_req cache_upcall_chandle = { cache_upcall_defer };
836 int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
837 struct gss_svc_reqctx *grctx,
838 struct gss_wire_ctx *gw,
839 struct obd_device *target,
844 struct ptlrpc_reply_state *rs;
845 struct rsc *rsci = NULL;
846 struct rsi *rsip = NULL, rsikey;
847 wait_queue_entry_t wait;
848 int replen = sizeof(struct ptlrpc_body);
849 struct gss_rep_header *rephdr;
851 int rc = SECSVC_DROP;
854 memset(&rsikey, 0, sizeof(rsikey));
855 rsikey.lustre_svc = lustre_svc;
856 /* In case of MR, rq_peer is not the NID from which request is received,
857 * but primary NID of peer.
858 * So we need rq_source, which contains the NID actually in use.
860 rsikey.nid = (__u64) req->rq_source.nid;
861 nodemap_test_nid(req->rq_peer.nid, rsikey.nm_name,
862 sizeof(rsikey.nm_name));
864 /* duplicate context handle. for INIT it always 0 */
865 if (rawobj_dup(&rsikey.in_handle, &gw->gw_handle)) {
866 CERROR("fail to dup context handle\n");
870 if (rawobj_dup(&rsikey.in_token, in_token)) {
871 CERROR("can't duplicate token\n");
872 rawobj_free(&rsikey.in_handle);
876 rsip = rsi_lookup(&rsikey);
879 CERROR("error in rsi_lookup.\n");
881 if (!gss_pack_err_notify(req, GSS_S_FAILURE, 0))
882 rc = SECSVC_COMPLETE;
887 cache_get(&rsip->h); /* take an extra ref */
888 init_waitqueue_head(&rsip->waitq);
889 init_waitqueue_entry(&wait, current);
890 add_wait_queue(&rsip->waitq, &wait);
893 /* Note each time cache_check() will drop a reference if return
894 * non-zero. We hold an extra reference on initial rsip, but must
895 * take care of following calls. */
896 rc = cache_check(&rsi_cache, &rsip->h, &cache_upcall_chandle);
905 cache_read_lock(&rsi_cache);
906 valid = test_bit(CACHE_VALID, &rsip->h.flags);
908 set_current_state(TASK_INTERRUPTIBLE);
909 cache_read_unlock(&rsi_cache);
912 unsigned long timeout;
914 timeout = cfs_time_seconds(GSS_SVC_UPCALL_TIMEOUT);
915 schedule_timeout(timeout);
920 CWARN("waited %ds timeout, drop\n", GSS_SVC_UPCALL_TIMEOUT);
924 CDEBUG(D_SEC, "cache_check return ENOENT, drop\n");
927 /* if not the first check, we have to release the extra
928 * reference we just added on it. */
930 cache_put(&rsip->h, &rsi_cache);
931 CDEBUG(D_SEC, "cache_check is good\n");
935 remove_wait_queue(&rsip->waitq, &wait);
936 cache_put(&rsip->h, &rsi_cache);
939 GOTO(out, rc = SECSVC_DROP);
942 rsci = gss_svc_searchbyctx(&rsip->out_handle);
944 CERROR("authentication failed\n");
946 /* gss mechanism returned major and minor code so we return
947 * those in error message */
948 if (!gss_pack_err_notify(req, rsip->major_status,
950 rc = SECSVC_COMPLETE;
955 grctx->src_ctx = &rsci->ctx;
958 if (gw->gw_flags & LUSTRE_GSS_PACK_KCSUM) {
959 grctx->src_ctx->gsc_mechctx->hash_func = gss_digest_hash;
960 } else if (!strcmp(grctx->src_ctx->gsc_mechctx->mech_type->gm_name,
962 !krb5_allow_old_client_csum) {
963 CWARN("%s: deny connection from '%s' due to missing 'krb_csum' feature, set 'sptlrpc.gss.krb5_allow_old_client_csum=1' to allow, but recommend client upgrade: rc = %d\n",
964 target->obd_name, libcfs_nid2str(req->rq_peer.nid),
966 GOTO(out, rc = SECSVC_DROP);
968 grctx->src_ctx->gsc_mechctx->hash_func =
969 gss_digest_hash_compat;
972 if (rawobj_dup(&rsci->ctx.gsc_rvs_hdl, rvs_hdl)) {
973 CERROR("failed duplicate reverse handle\n");
977 rsci->target = target;
979 CDEBUG(D_SEC, "server create rsc %p(%u->%s)\n",
980 rsci, rsci->ctx.gsc_uid, libcfs_nid2str(req->rq_peer.nid));
982 if (rsip->out_handle.len > PTLRPC_GSS_MAX_HANDLE_SIZE) {
983 CERROR("handle size %u too large\n", rsip->out_handle.len);
984 GOTO(out, rc = SECSVC_DROP);
988 grctx->src_reserve_len = cfs_size_round4(rsip->out_token.len);
990 rc = lustre_pack_reply_v2(req, 1, &replen, NULL, 0);
992 CERROR("failed to pack reply: %d\n", rc);
993 GOTO(out, rc = SECSVC_DROP);
996 rs = req->rq_reply_state;
997 LASSERT(rs->rs_repbuf->lm_bufcount == 3);
998 LASSERT(rs->rs_repbuf->lm_buflens[0] >=
999 sizeof(*rephdr) + rsip->out_handle.len);
1000 LASSERT(rs->rs_repbuf->lm_buflens[2] >= rsip->out_token.len);
1002 rephdr = lustre_msg_buf(rs->rs_repbuf, 0, 0);
1003 rephdr->gh_version = PTLRPC_GSS_VERSION;
1004 rephdr->gh_flags = 0;
1005 rephdr->gh_proc = PTLRPC_GSS_PROC_ERR;
1006 rephdr->gh_major = rsip->major_status;
1007 rephdr->gh_minor = rsip->minor_status;
1008 rephdr->gh_seqwin = GSS_SEQ_WIN;
1009 rephdr->gh_handle.len = rsip->out_handle.len;
1010 memcpy(rephdr->gh_handle.data, rsip->out_handle.data,
1011 rsip->out_handle.len);
1013 memcpy(lustre_msg_buf(rs->rs_repbuf, 2, 0), rsip->out_token.data,
1014 rsip->out_token.len);
1016 rs->rs_repdata_len = lustre_shrink_msg(rs->rs_repbuf, 2,
1017 rsip->out_token.len, 0);
1022 /* it looks like here we should put rsip also, but this mess up
1023 * with NFS cache mgmt code... FIXME
1026 * rsi_put(&rsip->h, &rsi_cache); */
1029 /* if anything went wrong, we don't keep the context too */
1030 if (rc != SECSVC_OK)
1031 set_bit(CACHE_NEGATIVE, &rsci->h.flags);
1033 CDEBUG(D_SEC, "create rsc with idx %#llx\n",
1034 gss_handle_to_u64(&rsci->handle));
1036 COMPAT_RSC_PUT(&rsci->h, &rsc_cache);
1041 struct gss_svc_ctx *gss_svc_upcall_get_ctx(struct ptlrpc_request *req,
1042 struct gss_wire_ctx *gw)
1046 rsc = gss_svc_searchbyctx(&gw->gw_handle);
1048 CWARN("Invalid gss ctx idx %#llx from %s\n",
1049 gss_handle_to_u64(&gw->gw_handle),
1050 libcfs_nid2str(req->rq_peer.nid));
1057 void gss_svc_upcall_put_ctx(struct gss_svc_ctx *ctx)
1059 struct rsc *rsc = container_of(ctx, struct rsc, ctx);
1061 COMPAT_RSC_PUT(&rsc->h, &rsc_cache);
1064 void gss_svc_upcall_destroy_ctx(struct gss_svc_ctx *ctx)
1066 struct rsc *rsc = container_of(ctx, struct rsc, ctx);
1068 /* can't be found */
1069 set_bit(CACHE_NEGATIVE, &rsc->h.flags);
1070 /* to be removed at next scan */
1071 rsc->h.expiry_time = 1;
1074 int __init gss_init_svc_upcall(void)
1079 * this helps reducing context index confliction. after server reboot,
1080 * conflicting request from clients might be filtered out by initial
1081 * sequence number checking, thus no chance to sent error notification
1084 get_random_bytes(&__ctx_index, sizeof(__ctx_index));
1086 rc = cache_register_net(&rsi_cache, &init_net);
1090 rc = cache_register_net(&rsc_cache, &init_net);
1092 cache_unregister_net(&rsi_cache, &init_net);
1096 /* FIXME this looks stupid. we intend to give lsvcgssd a chance to open
1097 * the init upcall channel, otherwise there's big chance that the first
1098 * upcall issued before the channel be opened thus nfsv4 cache code will
1099 * drop the request directly, thus lead to unnecessary recovery time.
1100 * Here we wait at minimum 1.5 seconds.
1102 for (i = 0; i < 6; i++) {
1103 if (channel_users(&rsi_cache) > 0)
1105 schedule_timeout_uninterruptible(cfs_time_seconds(1) / 4);
1108 if (channel_users(&rsi_cache) == 0)
1109 CWARN("Init channel is not opened by lsvcgssd, following "
1110 "request might be dropped until lsvcgssd is active\n");
1115 void gss_exit_svc_upcall(void)
1117 cache_purge(&rsi_cache);
1118 cache_unregister_net(&rsi_cache, &init_net);
1120 cache_purge(&rsc_cache);
1121 cache_unregister_net(&rsc_cache, &init_net);