2 * Modifications for Lustre
4 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
6 * Copyright (c) 2012, 2014, Intel Corporation.
8 * Author: Eric Mei <ericm@clusterfs.com>
12 * Neil Brown <neilb@cse.unsw.edu.au>
13 * J. Bruce Fields <bfields@umich.edu>
14 * Andy Adamson <andros@umich.edu>
15 * Dug Song <dugsong@monkey.org>
17 * RPCSEC_GSS server authentication.
18 * This implements RPCSEC_GSS as defined in rfc2203 (rpcsec_gss) and rfc2078
21 * The RPCSEC_GSS involves three stages:
24 * 3/ context destruction
26 * Context creation is handled largely by upcalls to user-space.
27 * In particular, GSS_Accept_sec_context is handled by an upcall
28 * Data exchange is handled entirely within the kernel
29 * In particular, GSS_GetMIC, GSS_VerifyMIC, GSS_Seal, GSS_Unseal are in-kernel.
30 * Context destruction is handled in-kernel
31 * GSS_Delete_sec_context is in-kernel
33 * Context creation is initiated by a RPCSEC_GSS_INIT request arriving.
34 * The context handle and gss_token are used as a key into the rpcsec_init cache.
35 * The content of this cache includes some of the outputs of GSS_Accept_sec_context,
36 * being major_status, minor_status, context_handle, reply_token.
37 * These are sent back to the client.
38 * Sequence window management is handled by the kernel. The window size if currently
39 * a compile time constant.
41 * When user-space is happy that a context is established, it places an entry
42 * in the rpcsec_context cache. The key for this cache is the context_handle.
43 * The content includes:
44 * uid/gidlist - for determining access rights
46 * mechanism specific information, such as a key
50 #define DEBUG_SUBSYSTEM S_SEC
51 #include <linux/types.h>
52 #include <linux/init.h>
53 #include <linux/module.h>
54 #include <linux/random.h>
55 #include <linux/slab.h>
56 #include <linux/mutex.h>
57 #include <linux/binfmts.h>
62 #include <obd_class.h>
63 #include <obd_support.h>
64 #include <lustre_import.h>
65 #include <lustre_net.h>
66 #include <lustre_nodemap.h>
67 #include <lustre_sec.h>
68 #include <libcfs/linux/linux-hash.h>
71 #include "gss_internal.h"
73 #include "gss_crypto.h"
75 static DEFINE_SPINLOCK(__ctx_index_lock);
76 static __u64 __ctx_index;
78 unsigned int krb5_allow_old_client_csum;
80 __u64 gss_get_next_ctx_index(void)
84 spin_lock(&__ctx_index_lock);
86 spin_unlock(&__ctx_index_lock);
91 static inline unsigned long hash_mem(char *buf, int length, int bits)
93 unsigned long hash = 0;
108 if ((len & (BITS_PER_LONG/8-1)) == 0)
109 hash = cfs_hash_long(hash^l, BITS_PER_LONG);
112 return hash >> (BITS_PER_LONG - bits);
115 /* This is a little bit of a concern but we need to make our own hash64 function
116 * as the one from the kernel seems to be buggy by returning a u32:
117 * static __always_inline u32 hash_64_generic(u64 val, unsigned int bits)
119 #if BITS_PER_LONG == 64
120 static __always_inline __u64 gss_hash_64(__u64 val, unsigned int bits)
123 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
139 /* High bits are more random, so use them. */
140 return hash >> (64 - bits);
143 static inline unsigned long hash_mem_64(char *buf, int length, int bits)
145 unsigned long hash = 0;
160 if ((len & (BITS_PER_LONG/8-1)) == 0)
161 hash = gss_hash_64(hash^l, BITS_PER_LONG);
164 return hash >> (BITS_PER_LONG - bits);
166 #endif /* BITS_PER_LONG == 64 */
168 /****************************************
169 * rpc sec init (rsi) cache *
170 ****************************************/
172 #define RSI_HASHBITS (6)
174 static void rsi_entry_init(struct upcall_cache_entry *entry,
177 struct gss_rsi *rsi = &entry->u.rsi;
178 struct gss_rsi *tmp = args;
180 rsi->si_uc_entry = entry;
181 rawobj_dup(&rsi->si_in_handle, &tmp->si_in_handle);
182 rawobj_dup(&rsi->si_in_token, &tmp->si_in_token);
183 rsi->si_out_handle = RAWOBJ_EMPTY;
184 rsi->si_out_token = RAWOBJ_EMPTY;
186 rsi->si_lustre_svc = tmp->si_lustre_svc;
187 rsi->si_nid4 = tmp->si_nid4;
188 memcpy(rsi->si_nm_name, tmp->si_nm_name, sizeof(tmp->si_nm_name));
191 static void __rsi_free(struct gss_rsi *rsi)
193 rawobj_free(&rsi->si_in_handle);
194 rawobj_free(&rsi->si_in_token);
195 rawobj_free(&rsi->si_out_handle);
196 rawobj_free(&rsi->si_out_token);
199 static void rsi_entry_free(struct upcall_cache *cache,
200 struct upcall_cache_entry *entry)
202 struct gss_rsi *rsi = &entry->u.rsi;
207 static inline int rsi_entry_hash(struct gss_rsi *rsi)
209 #if BITS_PER_LONG == 64
210 return hash_mem_64((char *)rsi->si_in_handle.data,
211 rsi->si_in_handle.len, RSI_HASHBITS) ^
212 hash_mem_64((char *)rsi->si_in_token.data,
213 rsi->si_in_token.len, RSI_HASHBITS);
215 return hash_mem((char *)rsi->si_in_handle.data, rsi->si_in_handle.len,
217 hash_mem((char *)rsi->si_in_token.data, rsi->si_in_token.len,
222 static inline int __rsi_entry_match(rawobj_t *h1, rawobj_t *h2,
223 rawobj_t *t1, rawobj_t *t2)
225 return !(rawobj_equal(h1, h2) && rawobj_equal(t1, t2));
228 static inline int rsi_entry_match(struct gss_rsi *rsi, struct gss_rsi *tmp)
230 return __rsi_entry_match(&rsi->si_in_handle, &tmp->si_in_handle,
231 &rsi->si_in_token, &tmp->si_in_token);
234 /* Returns 0 to tell this is a match */
235 static inline int rsi_upcall_compare(struct upcall_cache *cache,
236 struct upcall_cache_entry *entry,
237 __u64 key, void *args)
239 struct gss_rsi *rsi1 = &entry->u.rsi;
240 struct gss_rsi *rsi2 = args;
242 return rsi_entry_match(rsi1, rsi2);
245 /* See handle_channel_request() userspace for where the upcall data is read */
246 static int rsi_do_upcall(struct upcall_cache *cache,
247 struct upcall_cache_entry *entry)
249 int size, len, *blen;
250 char *buffer, *bp, **bpp;
252 [0] = cache->uc_upcall,
254 [2] = cache->uc_name,
261 [1] = "PATH=/sbin:/usr/sbin",
265 struct gss_rsi *rsi = &entry->u.rsi;
270 CDEBUG(D_SEC, "rsi upcall '%s' on '%s'\n",
271 cache->uc_upcall, cache->uc_name);
273 size = 24 + 1 + /* ue_key is uint64_t */
274 12 + 1 + /* si_lustre_svc is __u32*/
275 18 + 1 + /* si_nid4 is lnet_nid_t, hex with leading 0x */
276 18 + 1 + /* index is __u64, hex with leading 0x */
277 strlen(rsi->si_nm_name) + 1 +
278 BASE64URL_CHARS(rsi->si_in_handle.len) + 1 +
279 BASE64URL_CHARS(rsi->si_in_token.len) + 1 +
281 if (size > MAX_ARG_STRLEN)
283 OBD_ALLOC_LARGE(buffer, size);
292 /* if in_handle is null, provide kernel suggestion */
293 if (rsi->si_in_handle.len == 0)
294 index = gss_get_next_ctx_index();
296 /* entry->ue_key is put into args sent via upcall, so that it can be
297 * returned by userspace. This will help find cache entry at downcall,
298 * without unnecessary recomputation of the hash.
300 gss_u64_write_string(bpp, blen, entry->ue_key);
301 gss_u64_write_string(bpp, blen, rsi->si_lustre_svc);
302 gss_u64_write_hex_string(bpp, blen, rsi->si_nid4);
303 gss_u64_write_hex_string(bpp, blen, index);
304 gss_string_write(bpp, blen, (char *) rsi->si_nm_name);
305 gss_base64url_encode(bpp, blen, rsi->si_in_handle.data,
306 rsi->si_in_handle.len);
307 gss_base64url_encode(bpp, blen, rsi->si_in_token.data,
308 rsi->si_in_token.len);
313 down_read(&cache->uc_upcall_rwsem);
315 rc = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
317 up_read(&cache->uc_upcall_rwsem);
319 CERROR("%s: error invoking upcall %s %s (time %ldus): rc = %d\n",
320 cache->uc_name, argv[0], argv[2],
321 (long)ktime_us_delta(end, start), rc);
323 CDEBUG(D_SEC, "%s: invoked upcall %s %s (time %ldus)\n",
324 cache->uc_name, argv[0], argv[2],
325 (long)ktime_us_delta(end, start));
329 OBD_FREE_LARGE(buffer, size);
333 static inline int rsi_downcall_compare(struct upcall_cache *cache,
334 struct upcall_cache_entry *entry,
335 __u64 key, void *args)
337 struct gss_rsi *rsi = &entry->u.rsi;
338 struct rsi_downcall_data *sid = args;
339 char *mesg = sid->sid_val;
340 rawobj_t handle, token;
344 /* sid_val starts with handle and token */
347 len = gss_buffer_get(&mesg, &handle.len, &handle.data);
348 sid->sid_offset = mesg - p;
352 len = gss_buffer_get(&mesg, &token.len, &token.data);
353 sid->sid_offset += mesg - p;
355 return __rsi_entry_match(&rsi->si_in_handle, &handle,
356 &rsi->si_in_token, &token);
359 static int rsi_parse_downcall(struct upcall_cache *cache,
360 struct upcall_cache_entry *entry,
363 struct gss_rsi *rsi = &entry->u.rsi;
364 struct rsi_downcall_data *sid = args;
365 int mlen = sid->sid_len;
366 char *mesg = sid->sid_val + sid->sid_offset;
367 char *buf = sid->sid_val;
368 int status = -EINVAL;
376 rsi->si_major_status = sid->sid_maj_stat;
377 rsi->si_minor_status = sid->sid_min_stat;
379 /* in_handle and in_token have already been consumed in
380 * rsi_downcall_compare(). sid_offset gives next field.
384 len = gss_buffer_read(&mesg, buf, mlen);
387 if (rawobj_alloc(&rsi->si_out_handle, buf, len)) {
393 len = gss_buffer_read(&mesg, buf, mlen);
396 if (rawobj_alloc(&rsi->si_out_token, buf, len)) {
401 entry->ue_expire = 0;
405 CDEBUG(D_OTHER, "rsi parse %p: %d\n", rsi, status);
409 struct gss_rsi *rsi_entry_get(struct upcall_cache *cache, struct gss_rsi *rsi)
411 struct upcall_cache_entry *entry;
412 int hash = rsi_entry_hash(rsi);
415 return ERR_PTR(-ENOENT);
417 entry = upcall_cache_get_entry(cache, (__u64)hash, rsi);
418 if (unlikely(!entry))
419 return ERR_PTR(-ENOENT);
421 return ERR_CAST(entry);
423 return &entry->u.rsi;
426 void rsi_entry_put(struct upcall_cache *cache, struct gss_rsi *rsi)
431 upcall_cache_put_entry(cache, rsi->si_uc_entry);
434 void rsi_flush(struct upcall_cache *cache, int hash)
437 upcall_cache_flush_idle(cache);
439 upcall_cache_flush_one(cache, (__u64)hash, NULL);
442 struct upcall_cache_ops rsi_upcall_cache_ops = {
443 .init_entry = rsi_entry_init,
444 .free_entry = rsi_entry_free,
445 .upcall_compare = rsi_upcall_compare,
446 .downcall_compare = rsi_downcall_compare,
447 .do_upcall = rsi_do_upcall,
448 .parse_downcall = rsi_parse_downcall,
451 struct upcall_cache *rsicache;
453 /****************************************
454 * rpc sec context (rsc) cache *
455 ****************************************/
457 #define RSC_HASHBITS (10)
459 static void rsc_entry_init(struct upcall_cache_entry *entry,
462 struct gss_rsc *rsc = &entry->u.rsc;
463 struct gss_rsc *tmp = args;
465 rsc->sc_uc_entry = entry;
466 rawobj_dup(&rsc->sc_handle, &tmp->sc_handle);
468 rsc->sc_target = NULL;
469 memset(&rsc->sc_ctx, 0, sizeof(rsc->sc_ctx));
470 rsc->sc_ctx.gsc_rvs_hdl = RAWOBJ_EMPTY;
472 memset(&rsc->sc_ctx.gsc_seqdata, 0, sizeof(rsc->sc_ctx.gsc_seqdata));
473 spin_lock_init(&rsc->sc_ctx.gsc_seqdata.ssd_lock);
476 void __rsc_free(struct gss_rsc *rsc)
478 rawobj_free(&rsc->sc_handle);
479 rawobj_free(&rsc->sc_ctx.gsc_rvs_hdl);
480 lgss_delete_sec_context(&rsc->sc_ctx.gsc_mechctx);
483 static void rsc_entry_free(struct upcall_cache *cache,
484 struct upcall_cache_entry *entry)
486 struct gss_rsc *rsc = &entry->u.rsc;
491 static inline int rsc_entry_hash(struct gss_rsc *rsc)
493 #if BITS_PER_LONG == 64
494 return hash_mem_64((char *)rsc->sc_handle.data,
495 rsc->sc_handle.len, RSC_HASHBITS);
497 return hash_mem((char *)rsc->sc_handle.data,
498 rsc->sc_handle.len, RSC_HASHBITS);
502 static inline int __rsc_entry_match(rawobj_t *h1, rawobj_t *h2)
504 return !(rawobj_equal(h1, h2));
507 static inline int rsc_entry_match(struct gss_rsc *rsc, struct gss_rsc *tmp)
509 return __rsc_entry_match(&rsc->sc_handle, &tmp->sc_handle);
512 /* Returns 0 to tell this is a match */
513 static inline int rsc_upcall_compare(struct upcall_cache *cache,
514 struct upcall_cache_entry *entry,
515 __u64 key, void *args)
517 struct gss_rsc *rsc1 = &entry->u.rsc;
518 struct gss_rsc *rsc2 = args;
520 return rsc_entry_match(rsc1, rsc2);
523 /* rsc upcall is a no-op, we just need a valid entry */
524 static inline int rsc_do_upcall(struct upcall_cache *cache,
525 struct upcall_cache_entry *entry)
527 upcall_cache_update_entry(cache, entry,
528 ktime_get_seconds() + cache->uc_entry_expire,
533 static inline int rsc_downcall_compare(struct upcall_cache *cache,
534 struct upcall_cache_entry *entry,
535 __u64 key, void *args)
537 struct gss_rsc *rsc = &entry->u.rsc;
538 struct rsc_downcall_data *scd = args;
539 char *mesg = scd->scd_val;
543 /* scd_val starts with handle */
544 len = gss_buffer_get(&mesg, &handle.len, &handle.data);
545 scd->scd_offset = mesg - scd->scd_val;
547 return __rsc_entry_match(&rsc->sc_handle, &handle);
550 static int rsc_parse_downcall(struct upcall_cache *cache,
551 struct upcall_cache_entry *entry,
554 struct gss_api_mech *gm = NULL;
555 struct gss_rsc *rsc = &entry->u.rsc;
556 struct rsc_downcall_data *scd = args;
557 int mlen = scd->scd_len;
558 char *mesg = scd->scd_val + scd->scd_offset;
559 char *buf = scd->scd_val;
560 int status = -EINVAL;
570 rsc->sc_ctx.gsc_remote = !!(scd->scd_flags & RSC_DATA_FLAG_REMOTE);
571 rsc->sc_ctx.gsc_usr_root = !!(scd->scd_flags & RSC_DATA_FLAG_ROOT);
572 rsc->sc_ctx.gsc_usr_mds = !!(scd->scd_flags & RSC_DATA_FLAG_MDS);
573 rsc->sc_ctx.gsc_usr_oss = !!(scd->scd_flags & RSC_DATA_FLAG_OSS);
574 rsc->sc_ctx.gsc_mapped_uid = scd->scd_mapped_uid;
575 rsc->sc_ctx.gsc_uid = scd->scd_uid;
577 rsc->sc_ctx.gsc_gid = scd->scd_gid;
578 gm = lgss_name_to_mech(scd->scd_mechname);
580 status = -EOPNOTSUPP;
584 /* handle has already been consumed in rsc_downcall_compare().
585 * scd_offset gives next field.
589 len = gss_buffer_read(&mesg, buf, mlen);
593 tmp_buf.data = (unsigned char *)buf;
594 if (lgss_import_sec_context(&tmp_buf, gm,
595 &rsc->sc_ctx.gsc_mechctx))
598 if (lgss_inquire_context(rsc->sc_ctx.gsc_mechctx, &ctx_expiry))
601 /* ctx_expiry is the number of seconds since Jan 1 1970.
602 * We just want the number of seconds into the future.
604 entry->ue_expire = ktime_get_seconds() +
605 (ctx_expiry - ktime_get_real_seconds());
611 CDEBUG(D_OTHER, "rsc parse %p: %d\n", rsc, status);
615 struct gss_rsc *rsc_entry_get(struct upcall_cache *cache, struct gss_rsc *rsc)
617 struct upcall_cache_entry *entry;
618 int hash = rsc_entry_hash(rsc);
621 return ERR_PTR(-ENOENT);
623 entry = upcall_cache_get_entry(cache, (__u64)hash, rsc);
624 if (unlikely(!entry))
625 return ERR_PTR(-ENOENT);
627 return ERR_CAST(entry);
629 return &entry->u.rsc;
632 void rsc_entry_put(struct upcall_cache *cache, struct gss_rsc *rsc)
637 upcall_cache_put_entry(cache, rsc->sc_uc_entry);
640 void rsc_flush(struct upcall_cache *cache, int hash)
643 upcall_cache_flush_idle(cache);
645 upcall_cache_flush_one(cache, (__u64)hash, NULL);
648 struct upcall_cache_ops rsc_upcall_cache_ops = {
649 .init_entry = rsc_entry_init,
650 .free_entry = rsc_entry_free,
651 .upcall_compare = rsc_upcall_compare,
652 .downcall_compare = rsc_downcall_compare,
653 .do_upcall = rsc_do_upcall,
654 .parse_downcall = rsc_parse_downcall,
657 struct upcall_cache *rsccache;
659 /****************************************
661 ****************************************/
663 static struct gss_rsc *gss_svc_searchbyctx(rawobj_t *handle)
666 struct gss_rsc *found;
668 memset(&rsc, 0, sizeof(rsc));
669 if (rawobj_dup(&rsc.sc_handle, handle))
672 found = rsc_entry_get(rsccache, &rsc);
674 if (IS_ERR_OR_NULL(found))
676 if (!found->sc_ctx.gsc_mechctx) {
677 rsc_entry_put(rsccache, found);
678 return ERR_PTR(-ENOENT);
683 int gss_svc_upcall_install_rvs_ctx(struct obd_import *imp,
684 struct gss_sec *gsec,
685 struct gss_cli_ctx *gctx)
687 struct gss_rsc rsc, *rscp = NULL;
693 memset(&rsc, 0, sizeof(rsc));
695 if (!imp || !imp->imp_obd) {
696 CERROR("invalid imp, drop\n");
700 if (rawobj_alloc(&rsc.sc_handle, (char *)&gsec->gs_rvs_hdl,
701 sizeof(gsec->gs_rvs_hdl)))
702 GOTO(out, rc = -ENOMEM);
704 rscp = rsc_entry_get(rsccache, &rsc);
706 if (IS_ERR_OR_NULL(rscp))
707 GOTO(out, rc = -ENOMEM);
709 major = lgss_copy_reverse_context(gctx->gc_mechctx,
710 &rscp->sc_ctx.gsc_mechctx);
711 if (major != GSS_S_COMPLETE)
712 GOTO(out, rc = -ENOMEM);
714 if (lgss_inquire_context(rscp->sc_ctx.gsc_mechctx, &ctx_expiry)) {
715 CERROR("%s: unable to get expire time, drop\n",
716 imp->imp_obd->obd_name);
717 GOTO(out, rc = -EINVAL);
719 rscp->sc_uc_entry->ue_expire = ktime_get_seconds() +
720 (ctx_expiry - ktime_get_real_seconds());
722 switch (imp->imp_obd->u.cli.cl_sp_to) {
724 rscp->sc_ctx.gsc_usr_mds = 1;
727 rscp->sc_ctx.gsc_usr_oss = 1;
730 rscp->sc_ctx.gsc_usr_root = 1;
733 /* by convention, all 3 set to 1 means MGS */
734 rscp->sc_ctx.gsc_usr_mds = 1;
735 rscp->sc_ctx.gsc_usr_oss = 1;
736 rscp->sc_ctx.gsc_usr_root = 1;
742 rscp->sc_target = imp->imp_obd;
743 rawobj_dup(&gctx->gc_svc_handle, &rscp->sc_handle);
745 CDEBUG(D_SEC, "%s: create reverse svc ctx %p to %s: idx %#llx\n",
746 imp->imp_obd->obd_name, &rscp->sc_ctx, obd2cli_tgt(imp->imp_obd),
750 if (!IS_ERR_OR_NULL(rscp))
751 rsc_entry_put(rsccache, rscp);
753 CERROR("%s: can't create reverse svc ctx idx %#llx: rc = %d\n",
754 imp->imp_obd->obd_name, gsec->gs_rvs_hdl, rc);
758 int gss_svc_upcall_expire_rvs_ctx(rawobj_t *handle)
760 const time64_t expire = 20;
761 struct gss_rsc *rscp;
763 rscp = gss_svc_searchbyctx(handle);
764 if (!IS_ERR_OR_NULL(rscp)) {
766 "reverse svcctx %p (rsc %p) expire in %lld seconds\n",
767 &rscp->sc_ctx, rscp, expire);
769 rscp->sc_uc_entry->ue_expire = ktime_get_seconds() + expire;
770 rsc_entry_put(rsccache, rscp);
775 int gss_svc_upcall_dup_handle(rawobj_t *handle, struct gss_svc_ctx *ctx)
777 struct gss_rsc *rscp = container_of(ctx, struct gss_rsc, sc_ctx);
779 return rawobj_dup(handle, &rscp->sc_handle);
782 int gss_svc_upcall_update_sequence(rawobj_t *handle, __u32 seq)
784 struct gss_rsc *rscp;
786 rscp = gss_svc_searchbyctx(handle);
787 if (!IS_ERR_OR_NULL(rscp)) {
788 CDEBUG(D_SEC, "reverse svcctx %p (rsc %p) update seq to %u\n",
789 &rscp->sc_ctx, rscp, seq + 1);
791 rscp->sc_ctx.gsc_rvs_seq = seq + 1;
792 rsc_entry_put(rsccache, rscp);
797 int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
798 struct gss_svc_reqctx *grctx,
799 struct gss_wire_ctx *gw,
800 struct obd_device *target,
805 struct gss_rsi rsi = { 0 }, *rsip = NULL;
806 struct ptlrpc_reply_state *rs;
807 struct gss_rsc *rscp = NULL;
808 int replen = sizeof(struct ptlrpc_body);
809 struct gss_rep_header *rephdr;
814 rsi.si_lustre_svc = lustre_svc;
815 /* In case of MR, rq_peer is not the NID from which request is received,
816 * but primary NID of peer.
817 * So we need LNetPrimaryNID(rq_source) to match what the clients uses.
819 LNetPrimaryNID(&req->rq_source.nid);
820 rsi.si_nid4 = lnet_nid_to_nid4(&req->rq_source.nid);
821 nodemap_test_nid(lnet_nid_to_nid4(&req->rq_peer.nid), rsi.si_nm_name,
822 sizeof(rsi.si_nm_name));
824 /* Note that context handle is always 0 for for INIT. */
825 rc2 = rawobj_dup(&rsi.si_in_handle, &gw->gw_handle);
827 CERROR("%s: failed to duplicate context handle: rc = %d\n",
828 target->obd_name, rc2);
829 GOTO(out, rc = SECSVC_DROP);
832 rc2 = rawobj_dup(&rsi.si_in_token, in_token);
834 CERROR("%s: failed to duplicate token: rc = %d\n",
835 target->obd_name, rc2);
836 rawobj_free(&rsi.si_in_handle);
837 GOTO(out, rc = SECSVC_DROP);
840 rsip = rsi_entry_get(rsicache, &rsi);
842 if (IS_ERR_OR_NULL(rsip)) {
847 CERROR("%s: failed to get entry from rsi cache (nid %s): rc = %d\n",
849 libcfs_nid2str(lnet_nid_to_nid4(&req->rq_source.nid)),
852 if (!gss_pack_err_notify(req, GSS_S_FAILURE, 0))
853 rc = SECSVC_COMPLETE;
860 rscp = gss_svc_searchbyctx(&rsip->si_out_handle);
861 if (IS_ERR_OR_NULL(rscp)) {
862 /* gss mechanism returned major and minor code so we return
863 * those in error message */
865 if (!gss_pack_err_notify(req, rsip->si_major_status,
866 rsip->si_minor_status))
867 rc = SECSVC_COMPLETE;
871 CERROR("%s: authentication failed: rc = %d\n",
872 target->obd_name, rc);
875 /* we need to take an extra ref on the cache entry,
876 * as a pointer to sc_ctx is stored in grctx
878 upcall_cache_get_entry_raw(rscp->sc_uc_entry);
879 grctx->src_ctx = &rscp->sc_ctx;
882 if (gw->gw_flags & LUSTRE_GSS_PACK_KCSUM) {
883 grctx->src_ctx->gsc_mechctx->hash_func = gss_digest_hash;
884 } else if (!strcmp(grctx->src_ctx->gsc_mechctx->mech_type->gm_name,
886 !krb5_allow_old_client_csum) {
887 CWARN("%s: deny connection from '%s' due to missing 'krb_csum' feature, set 'sptlrpc.gss.krb5_allow_old_client_csum=1' to allow, but recommend client upgrade: rc = %d\n",
888 target->obd_name, libcfs_nidstr(&req->rq_peer.nid),
890 GOTO(out, rc = SECSVC_DROP);
892 grctx->src_ctx->gsc_mechctx->hash_func =
893 gss_digest_hash_compat;
896 if (rawobj_dup(&rscp->sc_ctx.gsc_rvs_hdl, rvs_hdl)) {
897 CERROR("%s: failed duplicate reverse handle\n",
899 GOTO(out, rc = SECSVC_DROP);
902 rscp->sc_target = target;
904 CDEBUG(D_SEC, "%s: server create rsc %p(%u->%s)\n",
905 target->obd_name, rscp, rscp->sc_ctx.gsc_uid,
906 libcfs_nidstr(&req->rq_peer.nid));
908 if (rsip->si_out_handle.len > PTLRPC_GSS_MAX_HANDLE_SIZE) {
909 CERROR("%s: handle size %u too large\n",
910 target->obd_name, rsip->si_out_handle.len);
911 GOTO(out, rc = SECSVC_DROP);
915 grctx->src_reserve_len = round_up(rsip->si_out_token.len, 4);
917 rc = lustre_pack_reply_v2(req, 1, &replen, NULL, 0);
919 CERROR("%s: failed to pack reply: rc = %d\n",
920 target->obd_name, rc);
921 GOTO(out, rc = SECSVC_DROP);
924 rs = req->rq_reply_state;
925 LASSERT(rs->rs_repbuf->lm_bufcount == 3);
926 LASSERT(rs->rs_repbuf->lm_buflens[0] >=
927 sizeof(*rephdr) + rsip->si_out_handle.len);
928 LASSERT(rs->rs_repbuf->lm_buflens[2] >= rsip->si_out_token.len);
930 rephdr = lustre_msg_buf(rs->rs_repbuf, 0, 0);
931 rephdr->gh_version = PTLRPC_GSS_VERSION;
932 rephdr->gh_flags = 0;
933 rephdr->gh_proc = PTLRPC_GSS_PROC_ERR;
934 rephdr->gh_major = rsip->si_major_status;
935 rephdr->gh_minor = rsip->si_minor_status;
936 rephdr->gh_seqwin = GSS_SEQ_WIN;
937 rephdr->gh_handle.len = rsip->si_out_handle.len;
938 memcpy(rephdr->gh_handle.data, rsip->si_out_handle.data,
939 rsip->si_out_handle.len);
941 memcpy(lustre_msg_buf(rs->rs_repbuf, 2, 0), rsip->si_out_token.data,
942 rsip->si_out_token.len);
944 rs->rs_repdata_len = lustre_shrink_msg(rs->rs_repbuf, 2,
945 rsip->si_out_token.len, 0);
950 if (!IS_ERR_OR_NULL(rsip))
951 rsi_entry_put(rsicache, rsip);
952 if (!IS_ERR_OR_NULL(rscp)) {
953 /* if anything went wrong, we don't keep the context too */
955 UC_CACHE_SET_INVALID(rscp->sc_uc_entry);
957 CDEBUG(D_SEC, "%s: create rsc with idx %#llx\n",
959 gss_handle_to_u64(&rscp->sc_handle));
961 rsc_entry_put(rsccache, rscp);
966 struct gss_svc_ctx *gss_svc_upcall_get_ctx(struct ptlrpc_request *req,
967 struct gss_wire_ctx *gw)
969 struct gss_rsc *rscp;
971 rscp = gss_svc_searchbyctx(&gw->gw_handle);
972 if (IS_ERR_OR_NULL(rscp)) {
973 CWARN("Invalid gss ctx idx %#llx from %s\n",
974 gss_handle_to_u64(&gw->gw_handle),
975 libcfs_nidstr(&req->rq_peer.nid));
979 return &rscp->sc_ctx;
982 void gss_svc_upcall_put_ctx(struct gss_svc_ctx *ctx)
984 struct gss_rsc *rscp = container_of(ctx, struct gss_rsc, sc_ctx);
986 rsc_entry_put(rsccache, rscp);
989 void gss_svc_upcall_destroy_ctx(struct gss_svc_ctx *ctx)
991 struct gss_rsc *rscp = container_of(ctx, struct gss_rsc, sc_ctx);
993 UC_CACHE_SET_INVALID(rscp->sc_uc_entry);
994 rscp->sc_uc_entry->ue_expire = 1;
997 /* Wait for userspace daemon to open socket, approx 1.5 s.
998 * If socket is not open, upcall requests might fail.
1000 static int check_gssd_socket(void)
1002 struct sockaddr_un *sun;
1003 struct socket *sock;
1007 #ifdef HAVE_SOCK_CREATE_KERN_USE_NET
1008 err = sock_create_kern(current->nsproxy->net_ns,
1009 AF_UNIX, SOCK_STREAM, 0, &sock);
1011 err = sock_create_kern(AF_UNIX, SOCK_STREAM, 0, &sock);
1014 CDEBUG(D_SEC, "Failed to create socket: %d\n", err);
1018 OBD_ALLOC(sun, sizeof(*sun));
1023 memset(sun, 0, sizeof(*sun));
1024 sun->sun_family = AF_UNIX;
1025 strncpy(sun->sun_path, GSS_SOCKET_PATH, sizeof(sun->sun_path));
1027 /* Try to connect to the socket */
1028 while (tries++ < 6) {
1029 err = kernel_connect(sock, (struct sockaddr *)sun,
1033 schedule_timeout_uninterruptible(cfs_time_seconds(1) / 4);
1036 CDEBUG(D_SEC, "Failed to connect to socket: %d\n", err);
1038 kernel_sock_shutdown(sock, SHUT_RDWR);
1041 OBD_FREE(sun, sizeof(*sun));
1045 int __init gss_init_svc_upcall(void)
1050 * this helps reducing context index confliction. after server reboot,
1051 * conflicting request from clients might be filtered out by initial
1052 * sequence number checking, thus no chance to sent error notification
1055 get_random_bytes(&__ctx_index, sizeof(__ctx_index));
1057 rsicache = upcall_cache_init(RSI_CACHE_NAME, RSI_UPCALL_PATH,
1058 UC_RSICACHE_HASH_SIZE,
1059 3600, /* entry expire: 1 h */
1060 30, /* acquire expire: 30 s */
1061 false, /* can't replay acquire */
1062 &rsi_upcall_cache_ops);
1063 if (IS_ERR(rsicache)) {
1064 rc = PTR_ERR(rsicache);
1068 rsccache = upcall_cache_init(RSC_CACHE_NAME, RSC_UPCALL_PATH,
1069 UC_RSCCACHE_HASH_SIZE,
1070 3600, /* replaced with one from mech */
1071 100, /* arbitrary, not used */
1072 false, /* can't replay acquire */
1073 &rsc_upcall_cache_ops);
1074 if (IS_ERR(rsccache)) {
1075 upcall_cache_cleanup(rsicache);
1077 rc = PTR_ERR(rsccache);
1082 if (check_gssd_socket())
1084 "Init channel not opened by lsvcgssd, GSS might not work on server side until daemon is active\n");
1089 void gss_exit_svc_upcall(void)
1091 upcall_cache_cleanup(rsicache);
1092 upcall_cache_cleanup(rsccache);