1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Modifications for Lustre
5 * Copyright 2004, Cluster File Systems, Inc.
7 * Author: Eric Mei <ericm@clusterfs.com>
11 * Neil Brown <neilb@cse.unsw.edu.au>
12 * J. Bruce Fields <bfields@umich.edu>
13 * Andy Adamson <andros@umich.edu>
14 * Dug Song <dugsong@monkey.org>
16 * RPCSEC_GSS server authentication.
17 * This implements RPCSEC_GSS as defined in rfc2203 (rpcsec_gss) and rfc2078
20 * The RPCSEC_GSS involves three stages:
23 * 3/ context destruction
25 * Context creation is handled largely by upcalls to user-space.
26 * In particular, GSS_Accept_sec_context is handled by an upcall
27 * Data exchange is handled entirely within the kernel
28 * In particular, GSS_GetMIC, GSS_VerifyMIC, GSS_Seal, GSS_Unseal are in-kernel.
29 * Context destruction is handled in-kernel
30 * GSS_Delete_sec_context is in-kernel
32 * Context creation is initiated by a RPCSEC_GSS_INIT request arriving.
33 * The context handle and gss_token are used as a key into the rpcsec_init cache.
34 * The content of this cache includes some of the outputs of GSS_Accept_sec_context,
35 * being major_status, minor_status, context_handle, reply_token.
36 * These are sent back to the client.
37 * Sequence window management is handled by the kernel. The window size if currently
38 * a compile time constant.
40 * When user-space is happy that a context is established, it places an entry
41 * in the rpcsec_context cache. The key for this cache is the context_handle.
42 * The content includes:
43 * uid/gidlist - for determining access rights
45 * mechanism specific information, such as a key
49 #define DEBUG_SUBSYSTEM S_SEC
51 #include <linux/types.h>
52 #include <linux/init.h>
53 #include <linux/module.h>
54 #include <linux/slab.h>
55 #include <linux/hash.h>
57 #include <liblustre.h>
60 #include <linux/sunrpc/cache.h>
62 #include <libcfs/kp30.h>
63 #include <linux/obd.h>
64 #include <linux/obd_class.h>
65 #include <linux/obd_support.h>
66 #include <linux/lustre_idl.h>
67 #include <linux/lustre_net.h>
68 #include <linux/lustre_import.h>
69 #include <linux/lustre_sec.h>
72 #include "gss_internal.h"
75 static inline unsigned long hash_mem(char *buf, int length, int bits)
77 unsigned long hash = 0;
83 c = (char)len; len = -1;
88 if ((len & (BITS_PER_LONG/8-1))==0)
89 hash = hash_long(hash^l, BITS_PER_LONG);
91 return hash >> (BITS_PER_LONG - bits);
94 /* The rpcsec_init cache is used for mapping RPCSEC_GSS_{,CONT_}INIT requests
97 * Key is context handle (\x if empty) and gss_token.
98 * Content is major_status minor_status (integers) context_handle, reply_token.
102 #define RSI_HASHBITS 6
103 #define RSI_HASHMAX (1<<RSI_HASHBITS)
104 #define RSI_HASHMASK (RSI_HASHMAX-1)
111 rawobj_t in_handle, in_token;
112 rawobj_t out_handle, out_token;
113 int major_status, minor_status;
116 static struct cache_head *rsi_table[RSI_HASHMAX];
117 static struct cache_detail rsi_cache;
119 static void rsi_free(struct rsi *rsii)
121 rawobj_free(&rsii->in_handle);
122 rawobj_free(&rsii->in_token);
123 rawobj_free(&rsii->out_handle);
124 rawobj_free(&rsii->out_token);
127 static void rsi_put(struct cache_head *item, struct cache_detail *cd)
129 struct rsi *rsii = container_of(item, struct rsi, h);
130 if (cache_put(item, cd)) {
132 OBD_FREE(rsii, sizeof(*rsii));
136 static inline int rsi_hash(struct rsi *item)
138 return hash_mem((char *)item->in_handle.data, item->in_handle.len, RSI_HASHBITS)
139 ^ hash_mem((char *)item->in_token.data, item->in_token.len, RSI_HASHBITS);
142 static inline int rsi_match(struct rsi *item, struct rsi *tmp)
144 return (rawobj_equal(&item->in_handle, &tmp->in_handle) &&
145 rawobj_equal(&item->in_token, &tmp->in_token));
148 static void rsi_request(struct cache_detail *cd,
149 struct cache_head *h,
150 char **bpp, int *blen)
152 struct rsi *rsii = container_of(h, struct rsi, h);
154 qword_addhex(bpp, blen, (char *)&rsii->naltype, sizeof(rsii->naltype));
155 qword_addhex(bpp, blen, (char *)&rsii->netid, sizeof(rsii->netid));
156 qword_addhex(bpp, blen, (char *)&rsii->nid, sizeof(rsii->nid));
157 qword_addhex(bpp, blen, (char *)rsii->in_handle.data, rsii->in_handle.len);
158 qword_addhex(bpp, blen, (char *)rsii->in_token.data, rsii->in_token.len);
163 gssd_reply(struct rsi *item)
166 struct cache_head **hp, **head;
169 head = &rsi_cache.hash_table[rsi_hash(item)];
170 write_lock(&rsi_cache.hash_lock);
171 for (hp = head; *hp != NULL; hp = &tmp->h.next) {
172 tmp = container_of(*hp, struct rsi, h);
173 if (rsi_match(tmp, item)) {
175 clear_bit(CACHE_HASHED, &tmp->h.flags);
179 if (test_bit(CACHE_VALID, &tmp->h.flags)) {
180 write_unlock(&rsi_cache.hash_lock);
181 rsi_put(&tmp->h, &rsi_cache);
184 set_bit(CACHE_HASHED, &item->h.flags);
188 set_bit(CACHE_VALID, &item->h.flags);
189 item->h.last_refresh = get_seconds();
190 write_unlock(&rsi_cache.hash_lock);
191 cache_fresh(&rsi_cache, &tmp->h, 0);
192 rsi_put(&tmp->h, &rsi_cache);
196 write_unlock(&rsi_cache.hash_lock);
201 * here we just wait here for its completion or timedout. it's a
202 * hacking but works, and we'll comeup with real fix if we decided
203 * to still stick with NFS4 cache code
206 gssd_upcall(struct rsi *item, struct cache_req *chandle)
209 struct cache_head **hp, **head;
210 unsigned long starttime;
213 head = &rsi_cache.hash_table[rsi_hash(item)];
214 read_lock(&rsi_cache.hash_lock);
215 for (hp = head; *hp != NULL; hp = &tmp->h.next) {
216 tmp = container_of(*hp, struct rsi, h);
217 if (rsi_match(tmp, item)) {
219 if (!test_bit(CACHE_VALID, &tmp->h.flags)) {
220 CERROR("found rsi without VALID\n");
221 read_unlock(&rsi_cache.hash_lock);
228 read_unlock(&rsi_cache.hash_lock);
232 // cache_get(&item->h);
233 set_bit(CACHE_HASHED, &item->h.flags);
234 item->h.next = *head;
237 read_unlock(&rsi_cache.hash_lock);
240 cache_check(&rsi_cache, &item->h, chandle);
241 starttime = get_seconds();
244 read_lock(&rsi_cache.hash_lock);
245 for (hp = head; *hp != NULL; hp = &tmp->h.next) {
246 tmp = container_of(*hp, struct rsi, h);
249 if (rsi_match(tmp, item)) {
250 if (!test_bit(CACHE_VALID, &tmp->h.flags)) {
251 read_unlock(&rsi_cache.hash_lock);
255 clear_bit(CACHE_HASHED, &tmp->h.flags);
259 read_unlock(&rsi_cache.hash_lock);
263 read_unlock(&rsi_cache.hash_lock);
264 } while ((get_seconds() - starttime) <= 5);
265 CERROR("5s timeout while waiting cache refill\n");
269 static int rsi_parse(struct cache_detail *cd,
270 char *mesg, int mlen)
272 /* context token expiry major minor context token */
278 int status = -EINVAL;
281 OBD_ALLOC(rsii, sizeof(*rsii));
283 CERROR("failed to alloc rsii\n");
286 cache_init(&rsii->h);
289 len = qword_get(&mesg, buf, mlen);
293 if (rawobj_alloc(&rsii->in_handle, buf, len))
297 len = qword_get(&mesg, buf, mlen);
302 if (rawobj_alloc(&rsii->in_token, buf, len))
306 expiry = get_expiry(&mesg);
312 len = qword_get(&mesg, buf, mlen);
318 rsii->major_status = simple_strtoul(buf, &ep, 10);
321 len = qword_get(&mesg, buf, mlen);
324 rsii->minor_status = simple_strtoul(buf, &ep, 10);
329 len = qword_get(&mesg, buf, mlen);
333 if (rawobj_alloc(&rsii->out_handle, buf, len))
337 len = qword_get(&mesg, buf, mlen);
342 if (rawobj_alloc(&rsii->out_token, buf, len))
345 rsii->h.expiry_time = expiry;
346 status = gssd_reply(rsii);
349 rsi_put(&rsii->h, &rsi_cache);
353 static struct cache_detail rsi_cache = {
354 .hash_size = RSI_HASHMAX,
355 .hash_table = rsi_table,
356 .name = "auth.ptlrpcs.init",
357 .cache_put = rsi_put,
358 .cache_request = rsi_request,
359 .cache_parse = rsi_parse,
363 * The rpcsec_context cache is used to store a context that is
364 * used in data exchange.
365 * The key is a context handle. The content is:
366 * uid, gidlist, mechanism, service-set, mech-specific-data
369 #define RSC_HASHBITS 10
370 #define RSC_HASHMAX (1<<RSC_HASHBITS)
371 #define RSC_HASHMASK (RSC_HASHMAX-1)
373 #define GSS_SEQ_WIN 512
375 struct gss_svc_seq_data {
376 /* highest seq number seen so far: */
378 /* for i such that sd_max-GSS_SEQ_WIN < i <= sd_max, the i-th bit of
379 * sd_win is nonzero iff sequence number i has been seen already: */
380 unsigned long sd_win[GSS_SEQ_WIN/BITS_PER_LONG];
388 struct vfs_cred cred;
390 struct gss_svc_seq_data seqdata;
391 struct gss_ctx *mechctx;
394 static struct cache_head *rsc_table[RSC_HASHMAX];
395 static struct cache_detail rsc_cache;
397 static void rsc_free(struct rsc *rsci)
399 rawobj_free(&rsci->handle);
401 kgss_delete_sec_context(&rsci->mechctx);
403 if (rsci->cred.vc_ginfo)
404 put_group_info(rsci->cred.vc_ginfo);
408 static void rsc_put(struct cache_head *item, struct cache_detail *cd)
410 struct rsc *rsci = container_of(item, struct rsc, h);
412 if (cache_put(item, cd)) {
414 OBD_FREE(rsci, sizeof(*rsci));
419 rsc_hash(struct rsc *rsci)
421 return hash_mem((char *)rsci->handle.data,
422 rsci->handle.len, RSC_HASHBITS);
426 rsc_match(struct rsc *new, struct rsc *tmp)
428 return rawobj_equal(&new->handle, &tmp->handle);
431 static struct rsc *rsc_lookup(struct rsc *item, int set)
433 struct rsc *tmp = NULL;
434 struct cache_head **hp, **head;
435 head = &rsc_cache.hash_table[rsc_hash(item)];
439 write_lock(&rsc_cache.hash_lock);
441 read_lock(&rsc_cache.hash_lock);
442 for (hp = head; *hp != NULL; hp = &tmp->h.next) {
443 tmp = container_of(*hp, struct rsc, h);
444 if (!rsc_match(tmp, item))
452 clear_bit(CACHE_HASHED, &tmp->h.flags);
453 rsc_put(&tmp->h, &rsc_cache);
456 /* Didn't find anything */
461 set_bit(CACHE_HASHED, &item->h.flags);
462 item->h.next = *head;
464 write_unlock(&rsc_cache.hash_lock);
465 cache_fresh(&rsc_cache, &item->h, item->h.expiry_time);
469 read_unlock(&rsc_cache.hash_lock);
473 static int rsc_parse(struct cache_detail *cd,
474 char *mesg, int mlen)
476 /* contexthandle expiry [ uid gid N <n gids> mechname
477 * ...mechdata... ] */
480 struct rsc *rsci, *res = NULL;
482 int status = -EINVAL;
484 OBD_ALLOC(rsci, sizeof(*rsci));
486 CERROR("fail to alloc rsci\n");
489 cache_init(&rsci->h);
492 len = qword_get(&mesg, buf, mlen);
493 if (len < 0) goto out;
495 if (rawobj_alloc(&rsci->handle, buf, len))
499 expiry = get_expiry(&mesg);
505 rv = get_int(&mesg, (int *)&rsci->remote_realm);
507 CERROR("fail to get remote flag\n");
512 rv = get_int(&mesg, (int *)&rsci->mapped_uid);
514 CERROR("fail to get mapped uid\n");
518 /* uid, or NEGATIVE */
519 rv = get_int(&mesg, (int *)&rsci->cred.vc_uid);
523 CERROR("NOENT? set rsc entry negative\n");
524 set_bit(CACHE_NEGATIVE, &rsci->h.flags);
526 struct gss_api_mech *gm;
531 if (get_int(&mesg, (int *)&rsci->cred.vc_gid))
535 len = qword_get(&mesg, buf, mlen);
538 gm = kgss_name_to_mech(buf);
539 status = -EOPNOTSUPP;
544 /* mech-specific data: */
545 len = qword_get(&mesg, buf, mlen);
551 tmp_buf.data = (unsigned char *)buf;
552 if (kgss_import_sec_context(&tmp_buf, gm, &rsci->mechctx)) {
557 /* currently the expiry time passed down from user-space
558 * is invalid, here we retrive it from mech.
560 if (kgss_inquire_context(rsci->mechctx, &ctx_expiry)) {
561 CERROR("unable to get expire time, drop it\n");
562 set_bit(CACHE_NEGATIVE, &rsci->h.flags);
566 expiry = (time_t) ctx_expiry;
570 rsci->h.expiry_time = expiry;
571 spin_lock_init(&rsci->seqdata.sd_lock);
572 res = rsc_lookup(rsci, 1);
573 rsc_put(&res->h, &rsc_cache);
577 rsc_put(&rsci->h, &rsc_cache);
582 * flush all entries with @uid. @uid == -1 will match all.
583 * we only know the uid, maybe netid/nid in the future, in all cases
584 * we must search the whole cache
586 static void rsc_flush(uid_t uid)
588 struct cache_head **ch;
593 write_lock(&rsc_cache.hash_lock);
594 for (n = 0; n < RSC_HASHMAX; n++) {
595 for (ch = &rsc_cache.hash_table[n]; *ch;) {
596 rscp = container_of(*ch, struct rsc, h);
597 if (uid == -1 || rscp->cred.vc_uid == uid) {
598 /* it seems simply set NEGATIVE doesn't work */
602 set_bit(CACHE_NEGATIVE, &rscp->h.flags);
603 clear_bit(CACHE_HASHED, &rscp->h.flags);
604 CWARN("flush rsc %p for uid %u\n",
605 rscp, rscp->cred.vc_uid);
606 rsc_put(&rscp->h, &rsc_cache);
613 write_unlock(&rsc_cache.hash_lock);
617 static struct cache_detail rsc_cache = {
618 .hash_size = RSC_HASHMAX,
619 .hash_table = rsc_table,
620 .name = "auth.ptlrpcs.context",
621 .cache_put = rsc_put,
622 .cache_parse = rsc_parse,
626 gss_svc_searchbyctx(rawobj_t *handle)
631 rsci.handle = *handle;
632 found = rsc_lookup(&rsci, 0);
636 if (cache_check(&rsc_cache, &found->h, NULL))
642 struct gss_svc_data {
643 /* decoded gss client cred: */
644 struct rpc_gss_wire_cred clcred;
645 /* internal used status */
646 unsigned int is_init:1,
654 * again hacking: only try to give the svcgssd a chance to handle
657 struct cache_deferred_req* my_defer(struct cache_req *req)
662 static struct cache_req my_chandle = {my_defer};
664 /* Implements sequence number algorithm as specified in RFC 2203. */
666 gss_check_seq_num(struct gss_svc_seq_data *sd, __u32 seq_num)
670 spin_lock(&sd->sd_lock);
671 if (seq_num > sd->sd_max) {
672 if (seq_num >= sd->sd_max + GSS_SEQ_WIN) {
673 memset(sd->sd_win, 0, sizeof(sd->sd_win));
674 sd->sd_max = seq_num;
676 while(sd->sd_max < seq_num) {
678 __clear_bit(sd->sd_max % GSS_SEQ_WIN,
682 __set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win);
684 } else if (seq_num + GSS_SEQ_WIN <= sd->sd_max) {
685 CERROR("seq %u too low: max %u, win %d\n",
686 seq_num, sd->sd_max, GSS_SEQ_WIN);
691 if (__test_and_set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win)) {
692 CERROR("seq %u is replay: max %u, win %d\n",
693 seq_num, sd->sd_max, GSS_SEQ_WIN);
697 spin_unlock(&sd->sd_lock);
702 gss_svc_verify_request(struct ptlrpc_request *req,
704 struct rpc_gss_wire_cred *gc,
705 __u32 *vp, __u32 vlen)
707 struct ptlrpcs_wire_hdr *sec_hdr;
708 struct gss_ctx *ctx = rsci->mechctx;
714 sec_hdr = (struct ptlrpcs_wire_hdr *) req->rq_reqbuf;
716 req->rq_reqmsg = (struct lustre_msg *) (req->rq_reqbuf + sizeof(*sec_hdr));
717 req->rq_reqlen = sec_hdr->msg_len;
719 msg.len = sec_hdr->msg_len;
720 msg.data = (__u8 *)req->rq_reqmsg;
722 mic.len = le32_to_cpu(*vp++);
723 mic.data = (unsigned char *)vp;
726 if (mic.len > vlen) {
727 CERROR("checksum len %d, while buffer len %d\n",
729 RETURN(GSS_S_CALL_BAD_STRUCTURE);
733 CERROR("invalid mic len %d\n", mic.len);
734 RETURN(GSS_S_CALL_BAD_STRUCTURE);
737 maj_stat = kgss_verify_mic(ctx, &msg, &mic, NULL);
738 if (maj_stat != GSS_S_COMPLETE) {
739 CERROR("MIC verification error: major %x\n", maj_stat);
743 if (gss_check_seq_num(&rsci->seqdata, gc->gc_seq)) {
744 CERROR("discard request %p with old seq_num %u\n",
746 RETURN(GSS_S_DUPLICATE_TOKEN);
749 RETURN(GSS_S_COMPLETE);
753 gss_svc_unseal_request(struct ptlrpc_request *req,
755 struct rpc_gss_wire_cred *gc,
756 __u32 *vp, __u32 vlen)
758 struct ptlrpcs_wire_hdr *sec_hdr;
759 struct gss_ctx *ctx = rsci->mechctx;
760 rawobj_t cipher_text, plain_text;
764 sec_hdr = (struct ptlrpcs_wire_hdr *) req->rq_reqbuf;
767 CERROR("vlen only %u\n", vlen);
768 RETURN(GSS_S_CALL_BAD_STRUCTURE);
771 cipher_text.len = le32_to_cpu(*vp++);
772 cipher_text.data = (__u8 *) vp;
775 if (cipher_text.len > vlen) {
776 CERROR("cipher claimed %u while buf only %u\n",
777 cipher_text.len, vlen);
778 RETURN(GSS_S_CALL_BAD_STRUCTURE);
781 plain_text = cipher_text;
783 major = kgss_unwrap(ctx, GSS_C_QOP_DEFAULT, &cipher_text, &plain_text);
785 CERROR("unwrap error 0x%x\n", major);
789 if (gss_check_seq_num(&rsci->seqdata, gc->gc_seq)) {
790 CERROR("discard request %p with old seq_num %u\n",
792 RETURN(GSS_S_DUPLICATE_TOKEN);
795 req->rq_reqmsg = (struct lustre_msg *) (vp);
796 req->rq_reqlen = plain_text.len;
798 CDEBUG(D_SEC, "msg len %d\n", req->rq_reqlen);
800 RETURN(GSS_S_COMPLETE);
804 gss_pack_err_notify(struct ptlrpc_request *req,
805 __u32 major, __u32 minor)
807 struct gss_svc_data *svcdata = req->rq_sec_svcdata;
808 __u32 reslen, *resp, *reslenp;
809 char nidstr[PTL_NALFMT_SIZE];
810 const __u32 secdata_len = 7 * 4;
814 OBD_FAIL_RETURN(OBD_FAIL_SVCGSS_ERR_NOTIFY|OBD_FAIL_ONCE, -EINVAL);
817 svcdata->is_err_notify = 1;
818 svcdata->reserve_len = 7 * 4;
820 rc = lustre_pack_reply(req, 0, NULL, NULL);
822 CERROR("could not pack reply, err %d\n", rc);
826 LASSERT(req->rq_reply_state);
827 LASSERT(req->rq_reply_state->rs_repbuf);
828 LASSERT(req->rq_reply_state->rs_repbuf_len >= secdata_len);
829 resp = (__u32 *) req->rq_reply_state->rs_repbuf;
832 *resp++ = cpu_to_le32(PTLRPC_SEC_GSS);
833 *resp++ = cpu_to_le32(PTLRPC_SEC_TYPE_NONE);
834 *resp++ = cpu_to_le32(req->rq_replen);
837 /* skip lustre msg */
838 resp += req->rq_replen / 4;
839 reslen = svcdata->reserve_len;
842 * version, subflavor, notify, major, minor,
843 * obj1(fake), obj2(fake)
845 *resp++ = cpu_to_le32(PTLRPC_SEC_GSS_VERSION);
846 *resp++ = cpu_to_le32(PTLRPC_SEC_GSS_KRB5I);
847 *resp++ = cpu_to_le32(PTLRPC_GSS_PROC_ERR);
848 *resp++ = cpu_to_le32(major);
849 *resp++ = cpu_to_le32(minor);
853 /* the actual sec data length */
854 *reslenp = cpu_to_le32(secdata_len);
856 req->rq_reply_state->rs_repdata_len += (secdata_len);
857 CWARN("prepare gss error notify(0x%x/0x%x) to %s\n", major, minor,
858 portals_nid2str(req->rq_peer.peer_ni->pni_number,
859 req->rq_peer.peer_id.nid, nidstr));
864 gss_svcsec_handle_init(struct ptlrpc_request *req,
865 struct rpc_gss_wire_cred *gc,
866 __u32 *secdata, __u32 seclen,
867 enum ptlrpcs_error *res)
869 struct gss_svc_data *svcdata = req->rq_sec_svcdata;
871 struct rsi *rsikey, *rsip;
873 __u32 reslen, *resp, *reslenp;
874 char nidstr[PTL_NALFMT_SIZE];
880 CWARN("processing gss init(%d) request from %s\n", gc->gc_proc,
881 portals_nid2str(req->rq_peer.peer_ni->pni_number,
882 req->rq_peer.peer_id.nid, nidstr));
884 *res = PTLRPCS_BADCRED;
885 OBD_FAIL_RETURN(OBD_FAIL_SVCGSS_INIT_REQ|OBD_FAIL_ONCE, SVC_DROP);
887 if (gc->gc_proc == RPC_GSS_PROC_INIT &&
888 gc->gc_ctx.len != 0) {
889 CERROR("proc %d, ctx_len %d: not really init?\n",
890 gc->gc_proc == RPC_GSS_PROC_INIT, gc->gc_ctx.len);
894 OBD_ALLOC(rsikey, sizeof(*rsikey));
896 CERROR("out of memory\n");
899 cache_init(&rsikey->h);
901 if (rawobj_dup(&rsikey->in_handle, &gc->gc_ctx)) {
902 CERROR("fail to dup context handle\n");
903 GOTO(out_rsikey, rc = SVC_DROP);
905 *res = PTLRPCS_BADVERF;
906 if (rawobj_extract(&tmpobj, &secdata, &seclen)) {
907 CERROR("can't extract token\n");
908 GOTO(out_rsikey, rc = SVC_DROP);
910 if (rawobj_dup(&rsikey->in_token, &tmpobj)) {
911 CERROR("can't duplicate token\n");
912 GOTO(out_rsikey, rc = SVC_DROP);
915 rsikey->naltype = (__u32) req->rq_peer.peer_ni->pni_number;
917 rsikey->nid = (__u64) req->rq_peer.peer_id.nid;
919 rsip = gssd_upcall(rsikey, &my_chandle);
921 CERROR("error in gssd_upcall.\n");
922 GOTO(out_rsikey, rc = SVC_DROP);
925 rsci = gss_svc_searchbyctx(&rsip->out_handle);
927 CERROR("rsci still not mature yet?\n");
929 if (gss_pack_err_notify(req, GSS_S_FAILURE, 0))
936 CWARN("svcsec create gss context %p(%u@%s)\n",
937 rsci, rsci->cred.vc_uid,
938 portals_nid2str(req->rq_peer.peer_ni->pni_number,
939 req->rq_peer.peer_id.nid, nidstr));
941 svcdata->is_init = 1;
942 svcdata->reserve_len = 6 * 4 +
943 size_round4(rsip->out_handle.len) +
944 size_round4(rsip->out_token.len);
946 rc = lustre_pack_reply(req, 0, NULL, NULL);
948 CERROR("failed to pack reply, rc = %d\n", rc);
949 set_bit(CACHE_NEGATIVE, &rsci->h.flags);
950 GOTO(out, rc = SVC_DROP);
954 resp = (__u32 *) req->rq_reply_state->rs_repbuf;
955 *resp++ = cpu_to_le32(PTLRPC_SEC_GSS);
956 *resp++ = cpu_to_le32(PTLRPC_SEC_TYPE_NONE);
957 *resp++ = cpu_to_le32(req->rq_replen);
960 resp += req->rq_replen / 4;
961 reslen = svcdata->reserve_len;
964 * status, major, minor, seq, out_handle, out_token
966 *resp++ = cpu_to_le32(PTLRPCS_OK);
967 *resp++ = cpu_to_le32(rsip->major_status);
968 *resp++ = cpu_to_le32(rsip->minor_status);
969 *resp++ = cpu_to_le32(GSS_SEQ_WIN);
971 if (rawobj_serialize(&rsip->out_handle,
974 if (rawobj_serialize(&rsip->out_token,
977 /* the actual sec data length */
978 *reslenp = cpu_to_le32(svcdata->reserve_len - reslen);
980 req->rq_reply_state->rs_repdata_len += le32_to_cpu(*reslenp);
981 CDEBUG(D_SEC, "req %p: msgsize %d, authsize %d, "
982 "total size %d\n", req, req->rq_replen,
983 le32_to_cpu(*reslenp),
984 req->rq_reply_state->rs_repdata_len);
988 req->rq_auth_uid = rsci->cred.vc_uid;
989 req->rq_remote_realm = rsci->remote_realm;
990 req->rq_mapped_uid = rsci->mapped_uid;
992 /* This is simplified since right now we doesn't support
995 if (gc->gc_proc == RPC_GSS_PROC_INIT) {
996 struct ptlrpcs_wire_hdr *hdr;
998 hdr = buf_to_sec_hdr(req->rq_reqbuf);
999 req->rq_reqmsg = buf_to_lustre_msg(req->rq_reqbuf);
1000 req->rq_reqlen = hdr->msg_len;
1007 rsc_put(&rsci->h, &rsc_cache);
1009 rsi_put(&rsip->h, &rsi_cache);
1011 rsi_put(&rsikey->h, &rsi_cache);
1017 gss_svcsec_handle_data(struct ptlrpc_request *req,
1018 struct rpc_gss_wire_cred *gc,
1019 __u32 *secdata, __u32 seclen,
1020 enum ptlrpcs_error *res)
1023 char nidstr[PTL_NALFMT_SIZE];
1028 *res = PTLRPCS_GSS_CREDPROBLEM;
1030 rsci = gss_svc_searchbyctx(&gc->gc_ctx);
1032 CWARN("Invalid gss context handle from %s\n",
1033 portals_nid2str(req->rq_peer.peer_ni->pni_number,
1034 req->rq_peer.peer_id.nid, nidstr));
1035 major = GSS_S_NO_CONTEXT;
1039 switch (gc->gc_svc) {
1040 case PTLRPC_GSS_SVC_INTEGRITY:
1041 major = gss_svc_verify_request(req, rsci, gc, secdata, seclen);
1042 if (major == GSS_S_COMPLETE)
1045 CWARN("fail in verify:0x%x: ctx %p@%s\n", major, rsci,
1046 portals_nid2str(req->rq_peer.peer_ni->pni_number,
1047 req->rq_peer.peer_id.nid, nidstr));
1049 case PTLRPC_GSS_SVC_PRIVACY:
1050 major = gss_svc_unseal_request(req, rsci, gc, secdata, seclen);
1051 if (major == GSS_S_COMPLETE)
1054 CWARN("fail in decrypt:0x%x: ctx %p@%s\n", major, rsci,
1055 portals_nid2str(req->rq_peer.peer_ni->pni_number,
1056 req->rq_peer.peer_id.nid, nidstr));
1059 CERROR("unsupported gss service %d\n", gc->gc_svc);
1060 GOTO(out, rc = SVC_DROP);
1063 req->rq_auth_uid = rsci->cred.vc_uid;
1064 req->rq_remote_realm = rsci->remote_realm;
1065 req->rq_mapped_uid = rsci->mapped_uid;
1068 GOTO(out, rc = SVC_OK);
1071 if (gss_pack_err_notify(req, major, 0))
1077 rsc_put(&rsci->h, &rsc_cache);
1082 gss_svcsec_handle_destroy(struct ptlrpc_request *req,
1083 struct rpc_gss_wire_cred *gc,
1084 __u32 *secdata, __u32 seclen,
1085 enum ptlrpcs_error *res)
1087 struct gss_svc_data *svcdata = req->rq_sec_svcdata;
1089 char nidstr[PTL_NALFMT_SIZE];
1094 *res = PTLRPCS_GSS_CREDPROBLEM;
1096 rsci = gss_svc_searchbyctx(&gc->gc_ctx);
1098 CWARN("invalid gss context handle for destroy.\n");
1102 if (gc->gc_svc != PTLRPC_GSS_SVC_INTEGRITY) {
1103 CERROR("service %d is not supported in destroy.\n",
1105 GOTO(out, rc = SVC_DROP);
1108 *res = gss_svc_verify_request(req, rsci, gc, secdata, seclen);
1110 GOTO(out, rc = SVC_DROP);
1112 /* compose reply, which is actually nothing */
1113 svcdata->is_fini = 1;
1114 if (lustre_pack_reply(req, 0, NULL, NULL))
1115 GOTO(out, rc = SVC_DROP);
1117 CWARN("svcsec destroy gss context %p(%u@%s)\n",
1118 rsci, rsci->cred.vc_uid,
1119 portals_nid2str(req->rq_peer.peer_ni->pni_number,
1120 req->rq_peer.peer_id.nid, nidstr));
1122 set_bit(CACHE_NEGATIVE, &rsci->h.flags);
1126 rsc_put(&rsci->h, &rsc_cache);
1131 * let incomming request go through security check:
1132 * o context establishment: invoke user space helper
1133 * o data exchange: verify/decrypt
1134 * o context destruction: mark context invalid
1136 * in most cases, error will result to drop the packet silently.
1139 gss_svcsec_accept(struct ptlrpc_request *req, enum ptlrpcs_error *res)
1141 struct gss_svc_data *svcdata;
1142 struct rpc_gss_wire_cred *gc;
1143 struct ptlrpcs_wire_hdr *sec_hdr;
1144 __u32 seclen, *secdata, version, subflavor;
1148 CDEBUG(D_SEC, "request %p\n", req);
1149 LASSERT(req->rq_reqbuf);
1150 LASSERT(req->rq_reqbuf_len);
1152 *res = PTLRPCS_BADCRED;
1154 sec_hdr = buf_to_sec_hdr(req->rq_reqbuf);
1155 LASSERT(sec_hdr->flavor == PTLRPC_SEC_GSS);
1157 seclen = req->rq_reqbuf_len - sizeof(*sec_hdr) - sec_hdr->msg_len;
1158 secdata = (__u32 *) buf_to_sec_data(req->rq_reqbuf);
1160 if (sec_hdr->sec_len > seclen) {
1161 CERROR("seclen %d, while max buf %d\n",
1162 sec_hdr->sec_len, seclen);
1166 if (seclen < 6 * 4) {
1167 CERROR("sec size %d too small\n", seclen);
1171 LASSERT(!req->rq_sec_svcdata);
1172 OBD_ALLOC(svcdata, sizeof(*svcdata));
1174 CERROR("fail to alloc svcdata\n");
1177 req->rq_sec_svcdata = svcdata;
1178 gc = &svcdata->clcred;
1180 /* Now secdata/seclen is what we want to parse
1182 version = le32_to_cpu(*secdata++); /* version */
1183 subflavor = le32_to_cpu(*secdata++); /* subflavor */
1184 gc->gc_proc = le32_to_cpu(*secdata++); /* proc */
1185 gc->gc_seq = le32_to_cpu(*secdata++); /* seq */
1186 gc->gc_svc = le32_to_cpu(*secdata++); /* service */
1189 CDEBUG(D_SEC, "wire gss_hdr: %u/%u/%u/%u/%u\n",
1190 version, subflavor, gc->gc_proc, gc->gc_seq, gc->gc_svc);
1192 if (version != PTLRPC_SEC_GSS_VERSION) {
1193 CERROR("gss version mismatch: %d - %d\n",
1194 version, PTLRPC_SEC_GSS_VERSION);
1195 GOTO(err_free, rc = SVC_DROP);
1198 if (rawobj_extract(&gc->gc_ctx, &secdata, &seclen)) {
1199 CERROR("fail to obtain gss context handle\n");
1200 GOTO(err_free, rc = SVC_DROP);
1203 *res = PTLRPCS_BADVERF;
1204 switch(gc->gc_proc) {
1205 case RPC_GSS_PROC_INIT:
1206 case RPC_GSS_PROC_CONTINUE_INIT:
1207 rc = gss_svcsec_handle_init(req, gc, secdata, seclen, res);
1209 case RPC_GSS_PROC_DATA:
1210 rc = gss_svcsec_handle_data(req, gc, secdata, seclen, res);
1212 case RPC_GSS_PROC_DESTROY:
1213 rc = gss_svcsec_handle_destroy(req, gc, secdata, seclen, res);
1221 if (rc == SVC_DROP && req->rq_sec_svcdata) {
1222 OBD_FREE(req->rq_sec_svcdata, sizeof(struct gss_svc_data));
1223 req->rq_sec_svcdata = NULL;
1230 gss_svcsec_authorize(struct ptlrpc_request *req)
1232 struct ptlrpc_reply_state *rs = req->rq_reply_state;
1233 struct gss_svc_data *gsd = (struct gss_svc_data *)req->rq_sec_svcdata;
1234 struct rpc_gss_wire_cred *gc = &gsd->clcred;
1236 struct ptlrpcs_wire_hdr *sec_hdr;
1237 rawobj_buf_t msg_buf;
1238 rawobj_t cipher_buf;
1239 __u32 *vp, *vpsave, major, vlen, seclen;
1245 LASSERT(rs->rs_repbuf);
1248 if (gsd->is_init || gsd->is_init_continue ||
1249 gsd->is_err_notify || gsd->is_fini) {
1250 /* nothing to do in these cases */
1251 CDEBUG(D_SEC, "req %p: init/fini/err\n", req);
1255 if (gc->gc_proc != RPC_GSS_PROC_DATA) {
1256 CERROR("proc %d not support\n", gc->gc_proc);
1260 rscp = gss_svc_searchbyctx(&gc->gc_ctx);
1262 CERROR("ctx disapeared under us?\n");
1266 sec_hdr = (struct ptlrpcs_wire_hdr *) rs->rs_repbuf;
1267 switch (gc->gc_svc) {
1268 case PTLRPC_GSS_SVC_INTEGRITY:
1269 /* prepare various pointers */
1270 lmsg.len = req->rq_replen;
1271 lmsg.data = (__u8 *) (rs->rs_repbuf + sizeof(*sec_hdr));
1272 vp = (__u32 *) (lmsg.data + lmsg.len);
1273 vlen = rs->rs_repbuf_len - sizeof(*sec_hdr) - lmsg.len;
1276 sec_hdr->flavor = cpu_to_le32(PTLRPC_SEC_GSS);
1277 sec_hdr->sectype = cpu_to_le32(PTLRPC_SEC_TYPE_AUTH);
1278 sec_hdr->msg_len = cpu_to_le32(req->rq_replen);
1280 /* standard gss hdr */
1281 LASSERT(vlen >= 7 * 4);
1282 *vp++ = cpu_to_le32(PTLRPC_SEC_GSS_VERSION);
1283 *vp++ = cpu_to_le32(PTLRPC_SEC_GSS_KRB5I);
1284 *vp++ = cpu_to_le32(RPC_GSS_PROC_DATA);
1285 *vp++ = cpu_to_le32(gc->gc_seq);
1286 *vp++ = cpu_to_le32(PTLRPC_GSS_SVC_INTEGRITY);
1287 *vp++ = 0; /* fake ctx handle */
1288 vpsave = vp++; /* reserve size */
1292 mic.data = (unsigned char *)vp;
1294 major = kgss_get_mic(rscp->mechctx, 0, &lmsg, &mic);
1296 CERROR("fail to get MIC: 0x%x\n", major);
1297 GOTO(out, ret = -EINVAL);
1299 *vpsave = cpu_to_le32(mic.len);
1300 seclen = seclen - vlen + mic.len;
1301 sec_hdr->sec_len = cpu_to_le32(seclen);
1302 rs->rs_repdata_len += size_round(seclen);
1304 case PTLRPC_GSS_SVC_PRIVACY:
1305 vp = (__u32 *) (rs->rs_repbuf + sizeof(*sec_hdr));
1306 vlen = rs->rs_repbuf_len - sizeof(*sec_hdr);
1309 sec_hdr->flavor = cpu_to_le32(PTLRPC_SEC_GSS);
1310 sec_hdr->sectype = cpu_to_le32(PTLRPC_SEC_TYPE_PRIV);
1311 sec_hdr->msg_len = cpu_to_le32(0);
1313 /* standard gss hdr */
1314 LASSERT(vlen >= 7 * 4);
1315 *vp++ = cpu_to_le32(PTLRPC_SEC_GSS_VERSION);
1316 *vp++ = cpu_to_le32(PTLRPC_SEC_GSS_KRB5I);
1317 *vp++ = cpu_to_le32(RPC_GSS_PROC_DATA);
1318 *vp++ = cpu_to_le32(gc->gc_seq);
1319 *vp++ = cpu_to_le32(PTLRPC_GSS_SVC_PRIVACY);
1320 *vp++ = 0; /* fake ctx handle */
1321 vpsave = vp++; /* reserve size */
1324 msg_buf.buf = (__u8 *) rs->rs_msg - GSS_PRIVBUF_PREFIX_LEN;
1325 msg_buf.buflen = req->rq_replen + GSS_PRIVBUF_PREFIX_LEN +
1326 GSS_PRIVBUF_SUFFIX_LEN;
1327 msg_buf.dataoff = GSS_PRIVBUF_PREFIX_LEN;
1328 msg_buf.datalen = req->rq_replen;
1330 cipher_buf.data = (__u8 *) vp;
1331 cipher_buf.len = vlen;
1333 major = kgss_wrap(rscp->mechctx, GSS_C_QOP_DEFAULT,
1334 &msg_buf, &cipher_buf);
1336 CERROR("failed to wrap: 0x%x\n", major);
1337 GOTO(out, ret = -EINVAL);
1340 *vpsave = cpu_to_le32(cipher_buf.len);
1341 seclen = seclen - vlen + cipher_buf.len;
1342 sec_hdr->sec_len = cpu_to_le32(seclen);
1343 rs->rs_repdata_len += size_round(seclen);
1346 CERROR("Unknown service %d\n", gc->gc_svc);
1347 GOTO(out, ret = -EINVAL);
1351 rsc_put(&rscp->h, &rsc_cache);
1357 void gss_svcsec_cleanup_req(struct ptlrpc_svcsec *svcsec,
1358 struct ptlrpc_request *req)
1360 struct gss_svc_data *gsd = (struct gss_svc_data *) req->rq_sec_svcdata;
1363 CDEBUG(D_SEC, "no svc_data present. do nothing\n");
1367 /* gsd->clclred.gc_ctx is NOT allocated, just set pointer
1368 * to the incoming packet buffer, so don't need free it
1370 OBD_FREE(gsd, sizeof(*gsd));
1371 req->rq_sec_svcdata = NULL;
1376 int gss_svcsec_est_payload(struct ptlrpc_svcsec *svcsec,
1377 struct ptlrpc_request *req,
1380 struct gss_svc_data *svcdata = req->rq_sec_svcdata;
1383 /* just return the pre-set reserve_len for init/fini/err cases.
1386 if (svcdata->is_init) {
1387 CDEBUG(D_SEC, "is_init, reserver size %d(%d)\n",
1388 size_round(svcdata->reserve_len),
1389 svcdata->reserve_len);
1390 LASSERT(svcdata->reserve_len);
1391 LASSERT(svcdata->reserve_len % 4 == 0);
1392 RETURN(size_round(svcdata->reserve_len));
1393 } else if (svcdata->is_err_notify) {
1394 CDEBUG(D_SEC, "is_err_notify, reserver size %d(%d)\n",
1395 size_round(svcdata->reserve_len),
1396 svcdata->reserve_len);
1397 RETURN(size_round(svcdata->reserve_len));
1398 } else if (svcdata->is_fini) {
1399 CDEBUG(D_SEC, "is_fini, reserver size 0\n");
1402 if (svcdata->clcred.gc_svc == PTLRPC_GSS_SVC_NONE ||
1403 svcdata->clcred.gc_svc == PTLRPC_GSS_SVC_INTEGRITY)
1404 RETURN(size_round(GSS_MAX_AUTH_PAYLOAD));
1405 else if (svcdata->clcred.gc_svc == PTLRPC_GSS_SVC_PRIVACY)
1406 RETURN(size_round16(GSS_MAX_AUTH_PAYLOAD + msgsize +
1407 GSS_PRIVBUF_PREFIX_LEN +
1408 GSS_PRIVBUF_SUFFIX_LEN));
1410 CERROR("unknown gss svc %u\n", svcdata->clcred.gc_svc);
1418 int gss_svcsec_alloc_repbuf(struct ptlrpc_svcsec *svcsec,
1419 struct ptlrpc_request *req,
1422 struct gss_svc_data *gsd = (struct gss_svc_data *) req->rq_sec_svcdata;
1423 struct ptlrpc_reply_state *rs;
1424 int msg_payload, sec_payload;
1428 /* determine the security type: none/auth or priv, we have
1429 * different pack scheme for them.
1430 * init/fini/err will always be treated as none/auth.
1433 if (!gsd->is_init && !gsd->is_init_continue &&
1434 !gsd->is_fini && !gsd->is_err_notify &&
1435 gsd->clcred.gc_svc == PTLRPC_GSS_SVC_PRIVACY)
1440 msg_payload = privacy ? 0 : msgsize;
1441 sec_payload = gss_svcsec_est_payload(svcsec, req, msgsize);
1443 rc = svcsec_alloc_reply_state(req, msg_payload, sec_payload);
1447 rs = req->rq_reply_state;
1449 rs->rs_msg_len = msgsize;
1452 /* we can choose to let msg simply point to the rear of the
1453 * buffer, which lead to buffer overlap when doing encryption.
1454 * usually it's ok and it indeed passed all existing tests.
1455 * but not sure if there will be subtle problems in the future.
1456 * so right now we choose to alloc another new buffer. we'll
1460 rs->rs_msg = (struct lustre_msg *)
1461 (rs->rs_repbuf + rs->rs_repbuf_len -
1462 msgsize - GSS_PRIVBUF_SUFFIX_LEN);
1466 msgsize += GSS_PRIVBUF_PREFIX_LEN + GSS_PRIVBUF_SUFFIX_LEN;
1467 OBD_ALLOC(msgbuf, msgsize);
1469 CERROR("can't alloc %d\n", msgsize);
1470 svcsec_free_reply_state(rs);
1471 req->rq_reply_state = NULL;
1474 rs->rs_msg = (struct lustre_msg *)
1475 (msgbuf + GSS_PRIVBUF_PREFIX_LEN);
1478 req->rq_repmsg = rs->rs_msg;
1484 void gss_svcsec_free_repbuf(struct ptlrpc_svcsec *svcsec,
1485 struct ptlrpc_reply_state *rs)
1487 unsigned long p1 = (unsigned long) rs->rs_msg;
1488 unsigned long p2 = (unsigned long) rs->rs_buf;
1490 LASSERT(rs->rs_buf);
1491 LASSERT(rs->rs_msg);
1492 LASSERT(rs->rs_msg_len);
1494 if (p1 < p2 || p1 >= p2 + rs->rs_buf_len) {
1495 char *start = (char*) rs->rs_msg - GSS_PRIVBUF_PREFIX_LEN;
1496 int size = rs->rs_msg_len + GSS_PRIVBUF_PREFIX_LEN +
1497 GSS_PRIVBUF_SUFFIX_LEN;
1498 OBD_FREE(start, size);
1501 svcsec_free_reply_state(rs);
1504 struct ptlrpc_svcsec svcsec_gss = {
1505 .pss_owner = THIS_MODULE,
1506 .pss_name = "GSS_SVCSEC",
1507 .pss_flavor = {PTLRPC_SEC_GSS, 0},
1508 .accept = gss_svcsec_accept,
1509 .authorize = gss_svcsec_authorize,
1510 .alloc_repbuf = gss_svcsec_alloc_repbuf,
1511 .free_repbuf = gss_svcsec_free_repbuf,
1512 .cleanup_req = gss_svcsec_cleanup_req,
1516 void lgss_svc_cache_purge_all(void)
1518 cache_purge(&rsi_cache);
1519 cache_purge(&rsc_cache);
1521 EXPORT_SYMBOL(lgss_svc_cache_purge_all);
1523 void lgss_svc_cache_flush(__u32 uid)
1527 EXPORT_SYMBOL(lgss_svc_cache_flush);
1529 int gss_svc_init(void)
1533 rc = svcsec_register(&svcsec_gss);
1535 cache_register(&rsc_cache);
1536 cache_register(&rsi_cache);
1541 void gss_svc_exit(void)
1544 if ((rc = cache_unregister(&rsi_cache)))
1545 CERROR("unregister rsi cache: %d\n", rc);
1546 if ((rc = cache_unregister(&rsc_cache)))
1547 CERROR("unregister rsc cache: %d\n", rc);
1548 if ((rc = svcsec_unregister(&svcsec_gss)))
1549 CERROR("unregister svcsec_gss: %d\n", rc);