1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Modifications for Lustre
5 * Copyright 2004 - 2006, Cluster File Systems, Inc.
7 * Author: Eric Mei <ericm@clusterfs.com>
11 * linux/net/sunrpc/auth_gss.c
13 * RPCSEC_GSS client authentication.
15 * Copyright (c) 2000 The Regents of the University of Michigan.
16 * All rights reserved.
18 * Dug Song <dugsong@monkey.org>
19 * Andy Adamson <andros@umich.edu>
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
25 * 1. Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * 2. Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in the
29 * documentation and/or other materials provided with the distribution.
30 * 3. Neither the name of the University nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
35 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
36 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
37 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
39 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
40 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
41 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
42 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
43 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
44 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 # define EXPORT_SYMTAB
51 #define DEBUG_SUBSYSTEM S_SEC
53 #include <linux/init.h>
54 #include <linux/module.h>
55 #include <linux/slab.h>
56 #include <linux/dcache.h>
58 #include <linux/random.h>
59 #include <asm/atomic.h>
61 #include <liblustre.h>
65 #include <obd_class.h>
66 #include <obd_support.h>
67 #include <lustre/lustre_idl.h>
68 #include <lustre_net.h>
69 #include <lustre_import.h>
70 #include <lustre_sec.h>
73 #include "gss_internal.h"
76 #include <linux/crypto.h>
79 static struct ptlrpc_sec_policy gss_policy;
80 static struct ptlrpc_cli_ctx * gss_sec_create_ctx(struct ptlrpc_sec *sec,
81 struct vfs_cred *vcred);
82 static void gss_sec_destroy_ctx(struct ptlrpc_sec *sec,
83 struct ptlrpc_cli_ctx *ctx);
84 /********************************************
86 ********************************************/
89 void gss_header_swabber(struct gss_header *ghdr)
91 __swab32s(&ghdr->gh_version);
92 __swab32s(&ghdr->gh_flags);
93 __swab32s(&ghdr->gh_proc);
94 __swab32s(&ghdr->gh_seq);
95 __swab32s(&ghdr->gh_svc);
96 __swab32s(&ghdr->gh_pad1);
97 __swab32s(&ghdr->gh_pad2);
98 __swab32s(&ghdr->gh_pad3);
99 __swab32s(&ghdr->gh_handle.len);
102 struct gss_header *gss_swab_header(struct lustre_msg *msg, int segment)
104 struct gss_header *ghdr;
106 ghdr = lustre_swab_buf(msg, segment, sizeof(*ghdr),
110 sizeof(*ghdr) + ghdr->gh_handle.len > msg->lm_buflens[segment]) {
111 CERROR("gss header require length %u, now %u received\n",
112 (unsigned int) sizeof(*ghdr) + ghdr->gh_handle.len,
113 msg->lm_buflens[segment]);
121 void gss_netobj_swabber(netobj_t *obj)
123 __swab32s(&obj->len);
126 netobj_t *gss_swab_netobj(struct lustre_msg *msg, int segment)
130 obj = lustre_swab_buf(msg, segment, sizeof(*obj), gss_netobj_swabber);
131 if (obj && sizeof(*obj) + obj->len > msg->lm_buflens[segment]) {
132 CERROR("netobj require length %u but only %u received\n",
133 (unsigned int) sizeof(*obj) + obj->len,
134 msg->lm_buflens[segment]);
142 * payload should be obtained from mechanism. but currently since we
143 * only support kerberos, we could simply use fixed value.
147 #define GSS_KRB5_INTEG_MAX_PAYLOAD (40)
150 int gss_estimate_payload(struct gss_ctx *mechctx, int msgsize, int privacy)
153 /* we suppose max cipher block size is 16 bytes. here we
154 * add 16 for confounder and 16 for padding.
156 return GSS_KRB5_INTEG_MAX_PAYLOAD + msgsize + 16 + 16 + 16;
158 return GSS_KRB5_INTEG_MAX_PAYLOAD;
163 * return signature size, otherwise < 0 to indicate error
166 int gss_sign_msg(struct lustre_msg *msg,
167 struct gss_ctx *mechctx,
168 __u32 proc, __u32 seq,
171 struct gss_header *ghdr;
172 rawobj_t text[3], mic;
173 int textcnt, mic_idx = msg->lm_bufcount - 1;
176 LASSERT(msg->lm_bufcount >= 3);
179 LASSERT(msg->lm_buflens[0] >=
180 sizeof(*ghdr) + (handle ? handle->len : 0));
181 ghdr = lustre_msg_buf(msg, 0, 0);
183 ghdr->gh_version = PTLRPC_GSS_VERSION;
185 ghdr->gh_proc = proc;
187 ghdr->gh_svc = PTLRPC_GSS_SVC_INTEGRITY;
189 /* fill in a fake one */
190 ghdr->gh_handle.len = 0;
192 ghdr->gh_handle.len = handle->len;
193 memcpy(ghdr->gh_handle.data, handle->data, handle->len);
197 for (textcnt = 0; textcnt < mic_idx; textcnt++) {
198 text[textcnt].len = msg->lm_buflens[textcnt];
199 text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
202 mic.len = msg->lm_buflens[mic_idx];
203 mic.data = lustre_msg_buf(msg, mic_idx, 0);
205 major = lgss_get_mic(mechctx, textcnt, text, &mic);
206 if (major != GSS_S_COMPLETE) {
207 CERROR("fail to generate MIC: %08x\n", major);
210 LASSERT(mic.len <= msg->lm_buflens[mic_idx]);
212 return lustre_shrink_msg(msg, mic_idx, mic.len, 0);
219 __u32 gss_verify_msg(struct lustre_msg *msg,
220 struct gss_ctx *mechctx)
224 int textcnt, mic_idx = msg->lm_bufcount - 1;
227 for (textcnt = 0; textcnt < mic_idx; textcnt++) {
228 text[textcnt].len = msg->lm_buflens[textcnt];
229 text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
232 mic.len = msg->lm_buflens[mic_idx];
233 mic.data = lustre_msg_buf(msg, mic_idx, 0);
235 major = lgss_verify_mic(mechctx, textcnt, text, &mic);
236 if (major != GSS_S_COMPLETE)
237 CERROR("mic verify error: %08x\n", major);
243 * return gss error code
246 __u32 gss_unseal_msg(struct gss_ctx *mechctx,
247 struct lustre_msg *msgbuf,
248 int *msg_len, int msgbuf_len)
250 rawobj_t clear_obj, micobj, msgobj, token;
256 if (msgbuf->lm_bufcount != 3) {
257 CERROR("invalid bufcount %d\n", msgbuf->lm_bufcount);
258 RETURN(GSS_S_FAILURE);
261 /* verify gss header */
262 msgobj.len = msgbuf->lm_buflens[0];
263 msgobj.data = lustre_msg_buf(msgbuf, 0, 0);
264 micobj.len = msgbuf->lm_buflens[1];
265 micobj.data = lustre_msg_buf(msgbuf, 1, 0);
267 major = lgss_verify_mic(mechctx, 1, &msgobj, &micobj);
268 if (major != GSS_S_COMPLETE) {
269 CERROR("priv: mic verify error: %08x\n", major);
273 /* temporary clear text buffer */
274 clear_buflen = msgbuf->lm_buflens[2];
275 OBD_ALLOC(clear_buf, clear_buflen);
277 RETURN(GSS_S_FAILURE);
279 token.len = msgbuf->lm_buflens[2];
280 token.data = lustre_msg_buf(msgbuf, 2, 0);
282 clear_obj.len = clear_buflen;
283 clear_obj.data = clear_buf;
285 major = lgss_unwrap(mechctx, &token, &clear_obj);
286 if (major != GSS_S_COMPLETE) {
287 CERROR("priv: unwrap message error: %08x\n", major);
288 GOTO(out_free, major = GSS_S_FAILURE);
290 LASSERT(clear_obj.len <= clear_buflen);
292 /* now the decrypted message */
293 memcpy(msgbuf, clear_obj.data, clear_obj.len);
294 *msg_len = clear_obj.len;
296 major = GSS_S_COMPLETE;
298 OBD_FREE(clear_buf, clear_buflen);
302 /********************************************
303 * gss client context manipulation helpers *
304 ********************************************/
306 void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx)
308 struct ptlrpc_cli_ctx *ctx = &gctx->gc_base;
309 unsigned long ctx_expiry;
311 if (lgss_inquire_context(gctx->gc_mechctx, &ctx_expiry)) {
312 CERROR("ctx %p(%u): unable to inquire, expire it now\n",
313 gctx, ctx->cc_vcred.vc_uid);
314 ctx_expiry = 1; /* make it expired now */
317 ctx->cc_expire = gss_round_ctx_expiry(ctx_expiry,
318 ctx->cc_sec->ps_flags);
320 /* At this point this ctx might have been marked as dead by
321 * someone else, in which case nobody will make further use
322 * of it. we don't care, and mark it UPTODATE will help
323 * destroying server side context when it be destroied.
325 set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
327 CWARN("%s ctx %p(%u->%s), will expire at %lu(%lds lifetime)\n",
328 (ctx->cc_sec->ps_flags & PTLRPC_SEC_FL_REVERSE ?
329 "server installed reverse" : "client refreshed"),
330 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
331 ctx->cc_expire, (long) (ctx->cc_expire - get_seconds()));
335 void gss_cli_ctx_finalize(struct gss_cli_ctx *gctx)
337 if (gctx->gc_mechctx)
338 lgss_delete_sec_context(&gctx->gc_mechctx);
340 rawobj_free(&gctx->gc_handle);
344 * Based on sequence number algorithm as specified in RFC 2203.
346 * modified for our own problem: arriving request has valid sequence number,
347 * but unwrapping request might cost a long time, after that its sequence
348 * are not valid anymore (fall behind the window). It rarely happen, mostly
349 * under extreme load.
351 * note we should not check sequence before verify the integrity of incoming
352 * request, because just one attacking request with high sequence number might
353 * cause all following request be dropped.
355 * so here we use a multi-phase approach: prepare 2 sequence windows,
356 * "main window" for normal sequence and "back window" for fall behind sequence.
357 * and 3-phase checking mechanism:
358 * 0 - before integrity verification, perform a initial sequence checking in
359 * main window, which only try and don't actually set any bits. if the
360 * sequence is high above the window or fit in the window and the bit
361 * is 0, then accept and proceed to integrity verification. otherwise
362 * reject this sequence.
363 * 1 - after integrity verification, check in main window again. if this
364 * sequence is high above the window or fit in the window and the bit
365 * is 0, then set the bit and accept; if it fit in the window but bit
366 * already set, then reject; if it fall behind the window, then proceed
368 * 2 - check in back window. if it is high above the window or fit in the
369 * window and the bit is 0, then set the bit and accept. otherwise reject.
372 * 1: looks like a replay
376 * note phase 0 is necessary, because otherwise replay attacking request of
377 * sequence which between the 2 windows can't be detected.
379 * this mechanism can't totally solve the problem, but could help much less
380 * number of valid requests be dropped.
383 int gss_do_check_seq(unsigned long *window, __u32 win_size, __u32 *max_seq,
384 __u32 seq_num, int phase)
386 LASSERT(phase >= 0 && phase <= 2);
388 if (seq_num > *max_seq) {
390 * 1. high above the window
395 if (seq_num >= *max_seq + win_size) {
396 memset(window, 0, win_size / 8);
399 while(*max_seq < seq_num) {
401 __clear_bit((*max_seq) % win_size, window);
404 __set_bit(seq_num % win_size, window);
405 } else if (seq_num + win_size <= *max_seq) {
407 * 2. low behind the window
409 if (phase == 0 || phase == 2)
412 CWARN("seq %u is %u behind (size %d), check backup window\n",
413 seq_num, *max_seq - win_size - seq_num, win_size);
417 * 3. fit into the window
421 if (test_bit(seq_num % win_size, window))
426 if (__test_and_set_bit(seq_num % win_size, window))
435 CERROR("seq %u (%s %s window) is a replay: max %u, winsize %d\n",
437 seq_num + win_size > *max_seq ? "in" : "behind",
438 phase == 2 ? "backup " : "main",
444 * Based on sequence number algorithm as specified in RFC 2203.
446 * if @set == 0: initial check, don't set any bit in window
447 * if @sec == 1: final check, set bit in window
449 int gss_check_seq_num(struct gss_svc_seq_data *ssd, __u32 seq_num, int set)
453 spin_lock(&ssd->ssd_lock);
459 rc = gss_do_check_seq(ssd->ssd_win_main, GSS_SEQ_WIN_MAIN,
460 &ssd->ssd_max_main, seq_num, 0);
462 gss_stat_oos_record_svc(0, 1);
465 * phase 1 checking main window
467 rc = gss_do_check_seq(ssd->ssd_win_main, GSS_SEQ_WIN_MAIN,
468 &ssd->ssd_max_main, seq_num, 1);
471 gss_stat_oos_record_svc(1, 1);
477 * phase 2 checking back window
479 rc = gss_do_check_seq(ssd->ssd_win_back, GSS_SEQ_WIN_BACK,
480 &ssd->ssd_max_back, seq_num, 2);
482 gss_stat_oos_record_svc(2, 1);
484 gss_stat_oos_record_svc(2, 0);
487 spin_unlock(&ssd->ssd_lock);
491 /***************************************
493 ***************************************/
496 int gss_cli_payload(struct ptlrpc_cli_ctx *ctx,
497 int msgsize, int privacy)
499 return gss_estimate_payload(NULL, msgsize, privacy);
503 int gss_cli_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
505 /* if we are refreshing for root, also update the reverse
506 * handle index, do not confuse reverse contexts.
508 if (ctx->cc_vcred.vc_uid == 0) {
509 struct gss_sec *gsec;
511 gsec = container_of(ctx->cc_sec, struct gss_sec, gs_base);
512 gsec->gs_rvs_hdl = gss_get_next_ctx_index();
515 return gss_ctx_refresh_pipefs(ctx);
519 int gss_cli_ctx_match(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred)
521 return (ctx->cc_vcred.vc_uid == vcred->vc_uid);
525 void gss_cli_ctx_flags2str(unsigned long flags, char *buf, int bufsize)
529 if (flags & PTLRPC_CTX_UPTODATE)
530 strncat(buf, "uptodate,", bufsize);
531 if (flags & PTLRPC_CTX_DEAD)
532 strncat(buf, "dead,", bufsize);
533 if (flags & PTLRPC_CTX_ERROR)
534 strncat(buf, "error,", bufsize);
535 if (flags & PTLRPC_CTX_HASHED)
536 strncat(buf, "hashed,", bufsize);
537 if (flags & PTLRPC_CTX_ETERNAL)
538 strncat(buf, "eternal,", bufsize);
540 strncat(buf, "-,", bufsize);
542 buf[strlen(buf) - 1] = '\0';
546 int gss_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
548 struct gss_cli_ctx *gctx;
552 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
554 gss_cli_ctx_flags2str(ctx->cc_flags, flags_str, sizeof(flags_str));
556 written = snprintf(buf, bufsize,
561 ctx->cc_vcred.vc_uid,
564 atomic_read(&gctx->gc_seq));
566 if (gctx->gc_mechctx) {
567 written += lgss_display(gctx->gc_mechctx,
568 buf + written, bufsize - written);
575 int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx,
576 struct ptlrpc_request *req)
578 struct gss_cli_ctx *gctx;
583 LASSERT(req->rq_reqbuf);
584 LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
585 LASSERT(req->rq_cli_ctx == ctx);
587 /* nothing to do for context negotiation RPCs */
588 if (req->rq_ctx_init)
591 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
593 seq = atomic_inc_return(&gctx->gc_seq);
595 rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
596 gctx->gc_proc, seq, &gctx->gc_handle);
600 /* gss_sign_msg() msg might take long time to finish, in which period
601 * more rpcs could be wrapped up and sent out. if we found too many
602 * of them we should repack this rpc, because sent it too late might
603 * lead to the sequence number fall behind the window on server and
604 * be dropped. also applies to gss_cli_ctx_seal().
606 if (atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
607 int behind = atomic_read(&gctx->gc_seq) - seq;
609 gss_stat_oos_record_cli(behind);
610 CWARN("req %p: %u behind, retry signing\n", req, behind);
614 req->rq_reqdata_len = rc;
619 int gss_cli_ctx_handle_err_notify(struct ptlrpc_cli_ctx *ctx,
620 struct ptlrpc_request *req,
621 struct gss_header *ghdr)
623 struct gss_err_header *errhdr;
626 LASSERT(ghdr->gh_proc == PTLRPC_GSS_PROC_ERR);
628 errhdr = (struct gss_err_header *) ghdr;
630 /* server return NO_CONTEXT might be caused by context expire
631 * or server reboot/failover. we refresh the cred transparently
633 * In some cases, our gss handle is possible to be incidentally
634 * identical to another handle since the handle itself is not
635 * fully random. In krb5 case, the GSS_S_BAD_SIG will be
636 * returned, maybe other gss error for other mechanism.
638 * if we add new mechanism, make sure the correct error are
639 * returned in this case.
641 * but in any cases, don't resend ctx destroying rpc, don't resend
644 if (req->rq_ctx_fini) {
645 CWARN("server respond error (%08x/%08x) for ctx fini\n",
646 errhdr->gh_major, errhdr->gh_minor);
648 } else if (ctx->cc_sec->ps_flags & PTLRPC_SEC_FL_REVERSE) {
649 CWARN("reverse server respond error (%08x/%08x)\n",
650 errhdr->gh_major, errhdr->gh_minor);
652 } else if (errhdr->gh_major == GSS_S_NO_CONTEXT ||
653 errhdr->gh_major == GSS_S_BAD_SIG) {
654 CWARN("req x"LPU64"/t"LPU64": server respond ctx %p(%u->%s) "
655 "%s, server might lost the context.\n",
656 req->rq_xid, req->rq_transno, ctx, ctx->cc_vcred.vc_uid,
657 sec2target_str(ctx->cc_sec),
658 errhdr->gh_major == GSS_S_NO_CONTEXT ?
659 "NO_CONTEXT" : "BAD_SIG");
661 sptlrpc_ctx_expire(ctx);
663 * we need replace the ctx right here, otherwise during
664 * resent we'll hit the logic in sptlrpc_req_refresh_ctx()
665 * which keep the ctx with RESEND flag, thus we'll never
666 * get rid of this ctx.
668 rc = sptlrpc_req_replace_dead_ctx(req);
672 CERROR("req %p: server report gss error (%x/%x)\n",
673 req, errhdr->gh_major, errhdr->gh_minor);
681 int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx,
682 struct ptlrpc_request *req)
684 struct gss_cli_ctx *gctx;
685 struct gss_header *ghdr, *reqhdr;
686 struct lustre_msg *msg = req->rq_repbuf;
691 LASSERT(req->rq_cli_ctx == ctx);
694 req->rq_repdata_len = req->rq_nob_received;
695 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
697 /* special case for context negotiation, rq_repmsg/rq_replen actually
698 * are not used currently.
700 if (req->rq_ctx_init) {
701 req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
702 req->rq_replen = msg->lm_buflens[1];
706 if (msg->lm_bufcount < 3 || msg->lm_bufcount > 4) {
707 CERROR("unexpected bufcount %u\n", msg->lm_bufcount);
711 ghdr = gss_swab_header(msg, 0);
713 CERROR("can't decode gss header\n");
718 reqhdr = lustre_msg_buf(msg, 0, sizeof(*reqhdr));
721 if (ghdr->gh_version != reqhdr->gh_version) {
722 CERROR("gss version %u mismatch, expect %u\n",
723 ghdr->gh_version, reqhdr->gh_version);
727 switch (ghdr->gh_proc) {
728 case PTLRPC_GSS_PROC_DATA:
729 if (ghdr->gh_seq != reqhdr->gh_seq) {
730 CERROR("seqnum %u mismatch, expect %u\n",
731 ghdr->gh_seq, reqhdr->gh_seq);
735 if (ghdr->gh_svc != PTLRPC_GSS_SVC_INTEGRITY) {
736 CERROR("unexpected svc %d\n", ghdr->gh_svc);
740 if (lustre_msg_swabbed(msg))
741 gss_header_swabber(ghdr);
743 major = gss_verify_msg(msg, gctx->gc_mechctx);
744 if (major != GSS_S_COMPLETE)
747 req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
748 req->rq_replen = msg->lm_buflens[1];
750 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
751 if (msg->lm_bufcount < 4) {
752 CERROR("Invalid reply bufcount %u\n",
757 /* bulk checksum is the second last segment */
758 rc = bulk_sec_desc_unpack(msg, msg->lm_bufcount - 2);
761 case PTLRPC_GSS_PROC_ERR:
762 rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
765 CERROR("unknown gss proc %d\n", ghdr->gh_proc);
773 int gss_cli_ctx_seal(struct ptlrpc_cli_ctx *ctx,
774 struct ptlrpc_request *req)
776 struct gss_cli_ctx *gctx;
777 rawobj_t msgobj, cipher_obj, micobj;
778 struct gss_header *ghdr;
779 int buflens[3], wiresize, rc;
783 LASSERT(req->rq_clrbuf);
784 LASSERT(req->rq_cli_ctx == ctx);
785 LASSERT(req->rq_reqlen);
787 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
789 /* close clear data length */
790 req->rq_clrdata_len = lustre_msg_size_v2(req->rq_clrbuf->lm_bufcount,
791 req->rq_clrbuf->lm_buflens);
793 /* calculate wire data length */
794 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
795 buflens[1] = gss_cli_payload(&gctx->gc_base, buflens[0], 0);
796 buflens[2] = gss_cli_payload(&gctx->gc_base, req->rq_clrdata_len, 1);
797 wiresize = lustre_msg_size_v2(3, buflens);
799 /* allocate wire buffer */
802 LASSERT(req->rq_reqbuf);
803 LASSERT(req->rq_reqbuf != req->rq_clrbuf);
804 LASSERT(req->rq_reqbuf_len >= wiresize);
806 OBD_ALLOC(req->rq_reqbuf, wiresize);
809 req->rq_reqbuf_len = wiresize;
812 lustre_init_msg_v2(req->rq_reqbuf, 3, buflens, NULL);
813 req->rq_reqbuf->lm_secflvr = req->rq_sec_flavor;
816 ghdr = lustre_msg_buf(req->rq_reqbuf, 0, 0);
817 ghdr->gh_version = PTLRPC_GSS_VERSION;
819 ghdr->gh_proc = gctx->gc_proc;
820 ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
821 ghdr->gh_svc = PTLRPC_GSS_SVC_PRIVACY;
822 ghdr->gh_handle.len = gctx->gc_handle.len;
823 memcpy(ghdr->gh_handle.data, gctx->gc_handle.data, gctx->gc_handle.len);
826 /* header signature */
827 msgobj.len = req->rq_reqbuf->lm_buflens[0];
828 msgobj.data = lustre_msg_buf(req->rq_reqbuf, 0, 0);
829 micobj.len = req->rq_reqbuf->lm_buflens[1];
830 micobj.data = lustre_msg_buf(req->rq_reqbuf, 1, 0);
832 major = lgss_get_mic(gctx->gc_mechctx, 1, &msgobj, &micobj);
833 if (major != GSS_S_COMPLETE) {
834 CERROR("priv: sign message error: %08x\n", major);
835 GOTO(err_free, rc = -EPERM);
837 /* perhaps shrink msg has potential problem in re-packing???
838 * ship a little bit more data is fine.
839 lustre_shrink_msg(req->rq_reqbuf, 1, micobj.len, 0);
843 msgobj.len = req->rq_clrdata_len;
844 msgobj.data = (__u8 *) req->rq_clrbuf;
847 cipher_obj.len = req->rq_reqbuf->lm_buflens[2];
848 cipher_obj.data = lustre_msg_buf(req->rq_reqbuf, 2, 0);
850 major = lgss_wrap(gctx->gc_mechctx, &msgobj, req->rq_clrbuf_len,
852 if (major != GSS_S_COMPLETE) {
853 CERROR("priv: wrap message error: %08x\n", major);
854 GOTO(err_free, rc = -EPERM);
856 LASSERT(cipher_obj.len <= buflens[2]);
858 /* see explain in gss_cli_ctx_sign() */
859 if (atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
860 GSS_SEQ_REPACK_THRESHOLD) {
861 int behind = atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
863 gss_stat_oos_record_cli(behind);
864 CWARN("req %p: %u behind, retry sealing\n", req, behind);
866 ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
870 /* now set the final wire data length */
871 req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, 2,
878 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
879 req->rq_reqbuf = NULL;
880 req->rq_reqbuf_len = 0;
886 int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx,
887 struct ptlrpc_request *req)
889 struct gss_cli_ctx *gctx;
890 struct gss_header *ghdr;
895 LASSERT(req->rq_repbuf);
896 LASSERT(req->rq_cli_ctx == ctx);
898 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
900 ghdr = gss_swab_header(req->rq_repbuf, 0);
902 CERROR("can't decode gss header\n");
907 if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
908 CERROR("gss version %u mismatch, expect %u\n",
909 ghdr->gh_version, PTLRPC_GSS_VERSION);
913 switch (ghdr->gh_proc) {
914 case PTLRPC_GSS_PROC_DATA:
915 if (lustre_msg_swabbed(req->rq_repbuf))
916 gss_header_swabber(ghdr);
918 major = gss_unseal_msg(gctx->gc_mechctx, req->rq_repbuf,
919 &msglen, req->rq_repbuf_len);
920 if (major != GSS_S_COMPLETE) {
925 if (lustre_unpack_msg(req->rq_repbuf, msglen)) {
926 CERROR("Failed to unpack after decryption\n");
929 req->rq_repdata_len = msglen;
931 if (req->rq_repbuf->lm_bufcount < 1) {
932 CERROR("Invalid reply buffer: empty\n");
936 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
937 if (req->rq_repbuf->lm_bufcount < 2) {
938 CERROR("Too few request buffer segments %d\n",
939 req->rq_repbuf->lm_bufcount);
943 /* bulk checksum is the last segment */
944 if (bulk_sec_desc_unpack(req->rq_repbuf,
945 req->rq_repbuf->lm_bufcount-1))
949 req->rq_repmsg = lustre_msg_buf(req->rq_repbuf, 0, 0);
950 req->rq_replen = req->rq_repbuf->lm_buflens[0];
954 case PTLRPC_GSS_PROC_ERR:
955 rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
958 CERROR("unexpected proc %d\n", ghdr->gh_proc);
965 static struct ptlrpc_ctx_ops gss_ctxops = {
966 .refresh = gss_cli_ctx_refresh,
967 .match = gss_cli_ctx_match,
968 .display = gss_cli_ctx_display,
969 .sign = gss_cli_ctx_sign,
970 .verify = gss_cli_ctx_verify,
971 .seal = gss_cli_ctx_seal,
972 .unseal = gss_cli_ctx_unseal,
973 .wrap_bulk = gss_cli_ctx_wrap_bulk,
974 .unwrap_bulk = gss_cli_ctx_unwrap_bulk,
977 /*********************************************
978 * reverse context installation *
979 *********************************************/
981 int gss_install_rvs_cli_ctx(struct gss_sec *gsec,
982 struct ptlrpc_svc_ctx *svc_ctx)
984 struct vfs_cred vcred;
985 struct gss_svc_reqctx *grctx;
986 struct ptlrpc_cli_ctx *cli_ctx;
987 struct gss_cli_ctx *cli_gctx;
988 struct gss_ctx *mechctx = NULL;
996 cli_ctx = gss_sec_create_ctx(&gsec->gs_base, &vcred);
1000 grctx = container_of(svc_ctx, struct gss_svc_reqctx, src_base);
1002 LASSERT(grctx->src_ctx);
1003 LASSERT(grctx->src_ctx->gsc_mechctx);
1005 major = lgss_copy_reverse_context(grctx->src_ctx->gsc_mechctx, &mechctx);
1006 if (major != GSS_S_COMPLETE)
1007 GOTO(err_ctx, rc = -ENOMEM);
1009 cli_gctx = container_of(cli_ctx, struct gss_cli_ctx, gc_base);
1011 cli_gctx->gc_proc = PTLRPC_GSS_PROC_DATA;
1012 cli_gctx->gc_win = GSS_SEQ_WIN;
1013 atomic_set(&cli_gctx->gc_seq, 0);
1015 if (rawobj_dup(&cli_gctx->gc_handle, &grctx->src_ctx->gsc_rvs_hdl))
1016 GOTO(err_mechctx, rc = -ENOMEM);
1018 cli_gctx->gc_mechctx = mechctx;
1019 gss_cli_ctx_uptodate(cli_gctx);
1021 sptlrpc_ctx_replace(&gsec->gs_base, cli_ctx);
1025 lgss_delete_sec_context(&mechctx);
1027 gss_sec_destroy_ctx(cli_ctx->cc_sec, cli_ctx);
1033 int gss_install_rvs_svc_ctx(struct obd_import *imp,
1034 struct gss_sec *gsec,
1035 struct gss_cli_ctx *gctx)
1037 return gss_svc_upcall_install_rvs_ctx(imp, gsec, gctx);
1040 /*********************************************
1041 * GSS security APIs *
1042 *********************************************/
1045 struct ptlrpc_cli_ctx * gss_sec_create_ctx(struct ptlrpc_sec *sec,
1046 struct vfs_cred *vcred)
1048 struct gss_cli_ctx *gctx;
1049 struct ptlrpc_cli_ctx *ctx;
1052 OBD_ALLOC_PTR(gctx);
1057 atomic_set(&gctx->gc_seq, 0);
1059 ctx = &gctx->gc_base;
1060 INIT_HLIST_NODE(&ctx->cc_hash);
1061 atomic_set(&ctx->cc_refcount, 0);
1063 ctx->cc_ops = &gss_ctxops;
1066 ctx->cc_vcred = *vcred;
1067 spin_lock_init(&ctx->cc_lock);
1068 INIT_LIST_HEAD(&ctx->cc_req_list);
1070 CDEBUG(D_SEC, "create a gss cred at %p(uid %u)\n", ctx, vcred->vc_uid);
1075 void gss_sec_destroy_ctx(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx)
1077 struct gss_cli_ctx *gctx;
1081 LASSERT(atomic_read(&ctx->cc_refcount) == 0);
1083 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
1084 if (gctx->gc_mechctx) {
1085 gss_do_ctx_fini_rpc(gctx);
1086 gss_cli_ctx_finalize(gctx);
1089 CWARN("%s@%p: destroy ctx %p(%u->%s)\n",
1090 ctx->cc_sec->ps_policy->sp_name, ctx->cc_sec,
1091 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
1097 #define GSS_CCACHE_SIZE (32)
1100 struct ptlrpc_sec* gss_sec_create(struct obd_import *imp,
1101 struct ptlrpc_svc_ctx *ctx,
1103 unsigned long flags)
1105 struct gss_sec *gsec;
1106 struct ptlrpc_sec *sec;
1107 int alloc_size, cache_size, i;
1111 LASSERT(SEC_FLAVOR_POLICY(flavor) == SPTLRPC_POLICY_GSS);
1113 if (ctx || flags & (PTLRPC_SEC_FL_ROOTONLY | PTLRPC_SEC_FL_REVERSE))
1116 cache_size = GSS_CCACHE_SIZE;
1118 alloc_size = sizeof(*gsec) + sizeof(struct list_head) * cache_size;
1120 OBD_ALLOC(gsec, alloc_size);
1124 gsec->gs_mech = lgss_subflavor_to_mech(SEC_FLAVOR_SUB(flavor));
1125 if (!gsec->gs_mech) {
1126 CERROR("gss backend 0x%x not found\n", SEC_FLAVOR_SUB(flavor));
1130 spin_lock_init(&gsec->gs_lock);
1131 gsec->gs_rvs_hdl = 0ULL; /* will be updated later */
1133 sec = &gsec->gs_base;
1134 sec->ps_policy = &gss_policy;
1135 sec->ps_flavor = flavor;
1136 sec->ps_flags = flags;
1137 sec->ps_import = class_import_get(imp);
1138 sec->ps_lock = SPIN_LOCK_UNLOCKED;
1139 sec->ps_ccache_size = cache_size;
1140 sec->ps_ccache = (struct hlist_head *) (gsec + 1);
1141 atomic_set(&sec->ps_busy, 0);
1143 for (i = 0; i < cache_size; i++)
1144 INIT_HLIST_HEAD(&sec->ps_ccache[i]);
1147 if (gss_sec_upcall_init(gsec))
1150 sec->ps_gc_interval = 30 * 60; /* 30 minutes */
1151 sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
1153 LASSERT(sec->ps_flags & PTLRPC_SEC_FL_REVERSE);
1155 if (gss_install_rvs_cli_ctx(gsec, ctx))
1158 /* never do gc on reverse sec */
1159 sec->ps_gc_interval = 0;
1160 sec->ps_gc_next = 0;
1163 if (SEC_FLAVOR_SVC(flavor) == SPTLRPC_SVC_PRIV &&
1164 flags & PTLRPC_SEC_FL_BULK)
1165 sptlrpc_enc_pool_add_user();
1167 CWARN("create %s%s@%p\n", (ctx ? "reverse " : ""),
1168 gss_policy.sp_name, gsec);
1172 lgss_mech_put(gsec->gs_mech);
1174 OBD_FREE(gsec, alloc_size);
1179 void gss_sec_destroy(struct ptlrpc_sec *sec)
1181 struct gss_sec *gsec;
1184 gsec = container_of(sec, struct gss_sec, gs_base);
1185 CWARN("destroy %s@%p\n", gss_policy.sp_name, gsec);
1187 LASSERT(gsec->gs_mech);
1188 LASSERT(sec->ps_import);
1189 LASSERT(sec->ps_ccache);
1190 LASSERT(sec->ps_ccache_size);
1191 LASSERT(atomic_read(&sec->ps_refcount) == 0);
1192 LASSERT(atomic_read(&sec->ps_busy) == 0);
1194 gss_sec_upcall_cleanup(gsec);
1195 lgss_mech_put(gsec->gs_mech);
1197 class_import_put(sec->ps_import);
1199 if (SEC_FLAVOR_SVC(sec->ps_flavor) == SPTLRPC_SVC_PRIV &&
1200 sec->ps_flags & PTLRPC_SEC_FL_BULK)
1201 sptlrpc_enc_pool_del_user();
1203 OBD_FREE(gsec, sizeof(*gsec) +
1204 sizeof(struct list_head) * sec->ps_ccache_size);
1209 int gss_alloc_reqbuf_auth(struct ptlrpc_sec *sec,
1210 struct ptlrpc_request *req,
1213 struct sec_flavor_config *conf;
1214 int bufsize, txtsize;
1215 int buflens[5], bufcnt = 2;
1222 * - bulk sec descriptor
1225 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1226 buflens[1] = msgsize;
1227 txtsize = buflens[0] + buflens[1];
1229 if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
1230 buflens[bufcnt] = sptlrpc_current_user_desc_size();
1231 txtsize += buflens[bufcnt];
1235 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
1236 conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
1237 buflens[bufcnt] = bulk_sec_desc_size(conf->sfc_bulk_csum, 1,
1239 txtsize += buflens[bufcnt];
1243 buflens[bufcnt++] = req->rq_ctx_init ? GSS_CTX_INIT_MAX_LEN :
1244 gss_cli_payload(req->rq_cli_ctx, txtsize, 0);
1246 bufsize = lustre_msg_size_v2(bufcnt, buflens);
1248 if (!req->rq_reqbuf) {
1249 bufsize = size_roundup_power2(bufsize);
1251 OBD_ALLOC(req->rq_reqbuf, bufsize);
1252 if (!req->rq_reqbuf)
1255 req->rq_reqbuf_len = bufsize;
1257 LASSERT(req->rq_pool);
1258 LASSERT(req->rq_reqbuf_len >= bufsize);
1259 memset(req->rq_reqbuf, 0, bufsize);
1262 lustre_init_msg_v2(req->rq_reqbuf, bufcnt, buflens, NULL);
1263 req->rq_reqbuf->lm_secflvr = req->rq_sec_flavor;
1265 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, msgsize);
1266 LASSERT(req->rq_reqmsg);
1268 /* pack user desc here, later we might leave current user's process */
1269 if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor))
1270 sptlrpc_pack_user_desc(req->rq_reqbuf, 2);
1276 int gss_alloc_reqbuf_priv(struct ptlrpc_sec *sec,
1277 struct ptlrpc_request *req,
1280 struct sec_flavor_config *conf;
1281 int ibuflens[3], ibufcnt;
1283 int clearsize, wiresize;
1286 LASSERT(req->rq_clrbuf == NULL);
1287 LASSERT(req->rq_clrbuf_len == 0);
1289 /* Inner (clear) buffers
1295 ibuflens[0] = msgsize;
1297 if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor))
1298 ibuflens[ibufcnt++] = sptlrpc_current_user_desc_size();
1299 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
1300 conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
1301 ibuflens[ibufcnt++] = bulk_sec_desc_size(conf->sfc_bulk_csum, 1,
1304 clearsize = lustre_msg_size_v2(ibufcnt, ibuflens);
1305 /* to allow append padding during encryption */
1306 clearsize += GSS_MAX_CIPHER_BLOCK;
1308 /* Wrapper (wire) buffers
1310 * - signature of gss header
1313 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1314 buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
1315 buflens[2] = gss_cli_payload(req->rq_cli_ctx, clearsize, 1);
1316 wiresize = lustre_msg_size_v2(3, buflens);
1319 /* rq_reqbuf is preallocated */
1320 LASSERT(req->rq_reqbuf);
1321 LASSERT(req->rq_reqbuf_len >= wiresize);
1323 memset(req->rq_reqbuf, 0, req->rq_reqbuf_len);
1325 /* if the pre-allocated buffer is big enough, we just pack
1326 * both clear buf & request buf in it, to avoid more alloc.
1328 if (clearsize + wiresize <= req->rq_reqbuf_len) {
1330 (void *) (((char *) req->rq_reqbuf) + wiresize);
1332 CWARN("pre-allocated buf size %d is not enough for "
1333 "both clear (%d) and cipher (%d) text, proceed "
1334 "with extra allocation\n", req->rq_reqbuf_len,
1335 clearsize, wiresize);
1339 if (!req->rq_clrbuf) {
1340 clearsize = size_roundup_power2(clearsize);
1342 OBD_ALLOC(req->rq_clrbuf, clearsize);
1343 if (!req->rq_clrbuf)
1346 req->rq_clrbuf_len = clearsize;
1348 lustre_init_msg_v2(req->rq_clrbuf, ibufcnt, ibuflens, NULL);
1349 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, msgsize);
1351 if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor))
1352 sptlrpc_pack_user_desc(req->rq_clrbuf, 1);
1358 * NOTE: any change of request buffer allocation should also consider
1359 * changing enlarge_reqbuf() series functions.
1362 int gss_alloc_reqbuf(struct ptlrpc_sec *sec,
1363 struct ptlrpc_request *req,
1366 LASSERT(!SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor) ||
1367 (req->rq_bulk_read || req->rq_bulk_write));
1369 switch (SEC_FLAVOR_SVC(req->rq_sec_flavor)) {
1370 case SPTLRPC_SVC_NONE:
1371 case SPTLRPC_SVC_AUTH:
1372 return gss_alloc_reqbuf_auth(sec, req, msgsize);
1373 case SPTLRPC_SVC_PRIV:
1374 return gss_alloc_reqbuf_priv(sec, req, msgsize);
1382 void gss_free_reqbuf(struct ptlrpc_sec *sec,
1383 struct ptlrpc_request *req)
1388 LASSERT(!req->rq_pool || req->rq_reqbuf);
1389 privacy = SEC_FLAVOR_SVC(req->rq_sec_flavor) == SPTLRPC_SVC_PRIV;
1391 if (!req->rq_clrbuf)
1392 goto release_reqbuf;
1394 /* release clear buffer */
1396 LASSERT(req->rq_clrbuf_len);
1399 req->rq_clrbuf >= req->rq_reqbuf &&
1400 (char *) req->rq_clrbuf <
1401 (char *) req->rq_reqbuf + req->rq_reqbuf_len)
1402 goto release_reqbuf;
1404 OBD_FREE(req->rq_clrbuf, req->rq_clrbuf_len);
1405 req->rq_clrbuf = NULL;
1406 req->rq_clrbuf_len = 0;
1409 if (!req->rq_pool && req->rq_reqbuf) {
1410 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
1411 req->rq_reqbuf = NULL;
1412 req->rq_reqbuf_len = 0;
1419 int gss_alloc_repbuf(struct ptlrpc_sec *sec,
1420 struct ptlrpc_request *req,
1423 struct sec_flavor_config *conf;
1424 int privacy = (SEC_FLAVOR_SVC(req->rq_sec_flavor) == SPTLRPC_SVC_PRIV);
1425 int bufsize, txtsize;
1426 int buflens[4], bufcnt;
1429 LASSERT(!SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor) ||
1430 (req->rq_bulk_read || req->rq_bulk_write));
1434 buflens[0] = msgsize;
1435 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
1436 conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
1437 buflens[bufcnt++] = bulk_sec_desc_size(
1438 conf->sfc_bulk_csum, 0,
1441 txtsize = lustre_msg_size_v2(bufcnt, buflens);
1442 txtsize += GSS_MAX_CIPHER_BLOCK;
1445 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1446 buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
1447 buflens[2] = gss_cli_payload(req->rq_cli_ctx, txtsize, 1);
1450 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1451 buflens[1] = msgsize;
1452 txtsize = buflens[0] + buflens[1];
1454 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
1455 conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
1456 buflens[bufcnt] = bulk_sec_desc_size(
1457 conf->sfc_bulk_csum, 0,
1459 txtsize += buflens[bufcnt];
1462 buflens[bufcnt++] = req->rq_ctx_init ? GSS_CTX_INIT_MAX_LEN :
1463 gss_cli_payload(req->rq_cli_ctx, txtsize, 0);
1466 bufsize = lustre_msg_size_v2(bufcnt, buflens);
1467 bufsize = size_roundup_power2(bufsize);
1469 OBD_ALLOC(req->rq_repbuf, bufsize);
1470 if (!req->rq_repbuf)
1473 req->rq_repbuf_len = bufsize;
1478 void gss_free_repbuf(struct ptlrpc_sec *sec,
1479 struct ptlrpc_request *req)
1481 OBD_FREE(req->rq_repbuf, req->rq_repbuf_len);
1482 req->rq_repbuf = NULL;
1483 req->rq_repbuf_len = 0;
1486 static int get_enlarged_msgsize(struct lustre_msg *msg,
1487 int segment, int newsize)
1489 int save, newmsg_size;
1491 LASSERT(newsize >= msg->lm_buflens[segment]);
1493 save = msg->lm_buflens[segment];
1494 msg->lm_buflens[segment] = newsize;
1495 newmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1496 msg->lm_buflens[segment] = save;
1501 static int get_enlarged_msgsize2(struct lustre_msg *msg,
1502 int segment1, int newsize1,
1503 int segment2, int newsize2)
1505 int save1, save2, newmsg_size;
1507 LASSERT(newsize1 >= msg->lm_buflens[segment1]);
1508 LASSERT(newsize2 >= msg->lm_buflens[segment2]);
1510 save1 = msg->lm_buflens[segment1];
1511 save2 = msg->lm_buflens[segment2];
1512 msg->lm_buflens[segment1] = newsize1;
1513 msg->lm_buflens[segment2] = newsize2;
1514 newmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1515 msg->lm_buflens[segment1] = save1;
1516 msg->lm_buflens[segment2] = save2;
1521 static inline int msg_last_seglen(struct lustre_msg *msg)
1523 return msg->lm_buflens[msg->lm_bufcount - 1];
1527 int gss_enlarge_reqbuf_auth(struct ptlrpc_sec *sec,
1528 struct ptlrpc_request *req,
1529 int segment, int newsize)
1531 struct lustre_msg *newbuf;
1532 int txtsize, sigsize, i;
1533 int newmsg_size, newbuf_size;
1536 * embedded msg is at seg 1; signature is at the last seg
1538 LASSERT(req->rq_reqbuf);
1539 LASSERT(req->rq_reqbuf_len > req->rq_reqlen);
1540 LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
1541 LASSERT(lustre_msg_buf(req->rq_reqbuf, 1, 0) == req->rq_reqmsg);
1543 /* compute new embedded msg size */
1544 newmsg_size = get_enlarged_msgsize(req->rq_reqmsg, segment, newsize);
1545 LASSERT(newmsg_size >= req->rq_reqbuf->lm_buflens[1]);
1547 /* compute new wrapper msg size */
1548 for (txtsize = 0, i = 0; i < req->rq_reqbuf->lm_bufcount; i++)
1549 txtsize += req->rq_reqbuf->lm_buflens[i];
1550 txtsize += newmsg_size - req->rq_reqbuf->lm_buflens[1];
1552 sigsize = gss_cli_payload(req->rq_cli_ctx, txtsize, 0);
1553 LASSERT(sigsize >= msg_last_seglen(req->rq_reqbuf));
1554 newbuf_size = get_enlarged_msgsize2(req->rq_reqbuf, 1, newmsg_size,
1555 req->rq_reqbuf->lm_bufcount - 1,
1558 /* request from pool should always have enough buffer */
1559 LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
1561 if (req->rq_reqbuf_len < newbuf_size) {
1562 newbuf_size = size_roundup_power2(newbuf_size);
1564 OBD_ALLOC(newbuf, newbuf_size);
1568 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
1570 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
1571 req->rq_reqbuf = newbuf;
1572 req->rq_reqbuf_len = newbuf_size;
1573 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, 0);
1576 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf,
1577 req->rq_reqbuf->lm_bufcount - 1, sigsize);
1578 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, 1, newmsg_size);
1579 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
1581 req->rq_reqlen = newmsg_size;
1586 int gss_enlarge_reqbuf_priv(struct ptlrpc_sec *sec,
1587 struct ptlrpc_request *req,
1588 int segment, int newsize)
1590 struct lustre_msg *newclrbuf;
1591 int newmsg_size, newclrbuf_size, newcipbuf_size;
1595 * embedded msg is at seg 0 of clear buffer;
1596 * cipher text is at seg 2 of cipher buffer;
1598 LASSERT(req->rq_pool ||
1599 (req->rq_reqbuf == NULL && req->rq_reqbuf_len == 0));
1600 LASSERT(req->rq_reqbuf == NULL ||
1601 (req->rq_pool && req->rq_reqbuf->lm_bufcount == 3));
1602 LASSERT(req->rq_clrbuf);
1603 LASSERT(req->rq_clrbuf_len > req->rq_reqlen);
1604 LASSERT(lustre_msg_buf(req->rq_clrbuf, 0, 0) == req->rq_reqmsg);
1606 /* compute new embedded msg size */
1607 newmsg_size = get_enlarged_msgsize(req->rq_reqmsg, segment, newsize);
1609 /* compute new clear buffer size */
1610 newclrbuf_size = get_enlarged_msgsize(req->rq_clrbuf, 0, newmsg_size);
1611 newclrbuf_size += GSS_MAX_CIPHER_BLOCK;
1613 /* compute new cipher buffer size */
1614 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1615 buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
1616 buflens[2] = gss_cli_payload(req->rq_cli_ctx, newclrbuf_size, 1);
1617 newcipbuf_size = lustre_msg_size_v2(3, buflens);
1620 * handle the case that we put both clear buf and cipher buf into
1621 * pre-allocated single buffer.
1623 if (unlikely(req->rq_pool) &&
1624 req->rq_clrbuf >= req->rq_reqbuf &&
1625 (char *) req->rq_clrbuf <
1626 (char *) req->rq_reqbuf + req->rq_reqbuf_len) {
1628 * it couldn't be better we still fit into the
1629 * pre-allocated buffer.
1631 if (newclrbuf_size + newcipbuf_size <= req->rq_reqbuf_len) {
1634 /* move clear text backward. */
1635 src = req->rq_clrbuf;
1636 dst = (char *) req->rq_reqbuf + newcipbuf_size;
1638 memmove(dst, src, req->rq_clrbuf_len);
1640 req->rq_clrbuf = (struct lustre_msg *) dst;
1641 req->rq_clrbuf_len = newclrbuf_size;
1642 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
1645 * sadly we have to split out the clear buffer
1647 LASSERT(req->rq_reqbuf_len >= newcipbuf_size);
1648 LASSERT(req->rq_clrbuf_len < newclrbuf_size);
1652 if (req->rq_clrbuf_len < newclrbuf_size) {
1653 newclrbuf_size = size_roundup_power2(newclrbuf_size);
1655 OBD_ALLOC(newclrbuf, newclrbuf_size);
1656 if (newclrbuf == NULL)
1659 memcpy(newclrbuf, req->rq_clrbuf, req->rq_clrbuf_len);
1661 if (req->rq_reqbuf == NULL ||
1662 req->rq_clrbuf < req->rq_reqbuf ||
1663 (char *) req->rq_clrbuf >=
1664 (char *) req->rq_reqbuf + req->rq_reqbuf_len) {
1665 OBD_FREE(req->rq_clrbuf, req->rq_clrbuf_len);
1668 req->rq_clrbuf = newclrbuf;
1669 req->rq_clrbuf_len = newclrbuf_size;
1670 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
1673 _sptlrpc_enlarge_msg_inplace(req->rq_clrbuf, 0, newmsg_size);
1674 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
1675 req->rq_reqlen = newmsg_size;
1681 int gss_enlarge_reqbuf(struct ptlrpc_sec *sec,
1682 struct ptlrpc_request *req,
1683 int segment, int newsize)
1685 LASSERT(!req->rq_ctx_init && !req->rq_ctx_fini);
1687 switch (SEC_FLAVOR_SVC(req->rq_sec_flavor)) {
1688 case SPTLRPC_SVC_AUTH:
1689 return gss_enlarge_reqbuf_auth(sec, req, segment, newsize);
1690 case SPTLRPC_SVC_PRIV:
1691 return gss_enlarge_reqbuf_priv(sec, req, segment, newsize);
1693 LASSERTF(0, "bad flavor %x\n", req->rq_sec_flavor);
1699 int gss_sec_install_rctx(struct obd_import *imp,
1700 struct ptlrpc_sec *sec,
1701 struct ptlrpc_cli_ctx *ctx)
1703 struct gss_sec *gsec;
1704 struct gss_cli_ctx *gctx;
1707 gsec = container_of(sec, struct gss_sec, gs_base);
1708 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
1710 rc = gss_install_rvs_svc_ctx(imp, gsec, gctx);
1714 static struct ptlrpc_sec_cops gss_sec_cops = {
1715 .create_sec = gss_sec_create,
1716 .destroy_sec = gss_sec_destroy,
1717 .create_ctx = gss_sec_create_ctx,
1718 .destroy_ctx = gss_sec_destroy_ctx,
1719 .install_rctx = gss_sec_install_rctx,
1720 .alloc_reqbuf = gss_alloc_reqbuf,
1721 .free_reqbuf = gss_free_reqbuf,
1722 .alloc_repbuf = gss_alloc_repbuf,
1723 .free_repbuf = gss_free_repbuf,
1724 .enlarge_reqbuf = gss_enlarge_reqbuf,
1727 /********************************************
1729 ********************************************/
1732 int gss_svc_reqctx_is_special(struct gss_svc_reqctx *grctx)
1735 return (grctx->src_init || grctx->src_init_continue ||
1736 grctx->src_err_notify);
1740 void gss_svc_reqctx_free(struct gss_svc_reqctx *grctx)
1743 gss_svc_upcall_put_ctx(grctx->src_ctx);
1745 sptlrpc_policy_put(grctx->src_base.sc_policy);
1746 OBD_FREE_PTR(grctx);
1750 void gss_svc_reqctx_addref(struct gss_svc_reqctx *grctx)
1752 LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
1753 atomic_inc(&grctx->src_base.sc_refcount);
1757 void gss_svc_reqctx_decref(struct gss_svc_reqctx *grctx)
1759 LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
1761 if (atomic_dec_and_test(&grctx->src_base.sc_refcount))
1762 gss_svc_reqctx_free(grctx);
1766 int gss_svc_sign(struct ptlrpc_request *req,
1767 struct ptlrpc_reply_state *rs,
1768 struct gss_svc_reqctx *grctx)
1773 LASSERT(rs->rs_msg == lustre_msg_buf(rs->rs_repbuf, 1, 0));
1775 /* embedded lustre_msg might have been shrinked */
1776 if (req->rq_replen != rs->rs_repbuf->lm_buflens[1])
1777 lustre_shrink_msg(rs->rs_repbuf, 1, req->rq_replen, 1);
1779 rc = gss_sign_msg(rs->rs_repbuf, grctx->src_ctx->gsc_mechctx,
1780 PTLRPC_GSS_PROC_DATA, grctx->src_wirectx.gw_seq,
1785 rs->rs_repdata_len = rc;
1789 int gss_pack_err_notify(struct ptlrpc_request *req, __u32 major, __u32 minor)
1791 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
1792 struct ptlrpc_reply_state *rs;
1793 struct gss_err_header *ghdr;
1794 int replen = sizeof(struct ptlrpc_body);
1798 //OBD_FAIL_RETURN(OBD_FAIL_SVCGSS_ERR_NOTIFY|OBD_FAIL_ONCE, -EINVAL);
1800 grctx->src_err_notify = 1;
1801 grctx->src_reserve_len = 0;
1803 rc = lustre_pack_reply_v2(req, 1, &replen, NULL);
1805 CERROR("could not pack reply, err %d\n", rc);
1810 rs = req->rq_reply_state;
1811 LASSERT(rs->rs_repbuf->lm_buflens[1] >= sizeof(*ghdr));
1812 ghdr = lustre_msg_buf(rs->rs_repbuf, 0, 0);
1813 ghdr->gh_version = PTLRPC_GSS_VERSION;
1815 ghdr->gh_proc = PTLRPC_GSS_PROC_ERR;
1816 ghdr->gh_major = major;
1817 ghdr->gh_minor = minor;
1818 ghdr->gh_handle.len = 0; /* fake context handle */
1820 rs->rs_repdata_len = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
1821 rs->rs_repbuf->lm_buflens);
1823 CDEBUG(D_SEC, "prepare gss error notify(0x%x/0x%x) to %s\n",
1824 major, minor, libcfs_nid2str(req->rq_peer.nid));
1829 int gss_svc_handle_init(struct ptlrpc_request *req,
1830 struct gss_wire_ctx *gw)
1832 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
1833 struct lustre_msg *reqbuf = req->rq_reqbuf;
1834 struct obd_uuid *uuid;
1835 struct obd_device *target;
1836 rawobj_t uuid_obj, rvs_hdl, in_token;
1838 __u32 *secdata, seclen;
1842 CDEBUG(D_SEC, "processing gss init(%d) request from %s\n", gw->gw_proc,
1843 libcfs_nid2str(req->rq_peer.nid));
1845 if (gw->gw_proc == PTLRPC_GSS_PROC_INIT && gw->gw_handle.len != 0) {
1846 CERROR("proc %u: invalid handle length %u\n",
1847 gw->gw_proc, gw->gw_handle.len);
1848 RETURN(SECSVC_DROP);
1851 if (reqbuf->lm_bufcount < 3 || reqbuf->lm_bufcount > 4){
1852 CERROR("Invalid bufcount %d\n", reqbuf->lm_bufcount);
1853 RETURN(SECSVC_DROP);
1856 /* ctx initiate payload is in last segment */
1857 secdata = lustre_msg_buf(reqbuf, reqbuf->lm_bufcount - 1, 0);
1858 seclen = reqbuf->lm_buflens[reqbuf->lm_bufcount - 1];
1860 if (seclen < 4 + 4) {
1861 CERROR("sec size %d too small\n", seclen);
1862 RETURN(SECSVC_DROP);
1865 /* lustre svc type */
1866 lustre_svc = le32_to_cpu(*secdata++);
1869 /* extract target uuid, note this code is somewhat fragile
1870 * because touched internal structure of obd_uuid
1872 if (rawobj_extract(&uuid_obj, &secdata, &seclen)) {
1873 CERROR("failed to extract target uuid\n");
1874 RETURN(SECSVC_DROP);
1876 uuid_obj.data[uuid_obj.len - 1] = '\0';
1878 uuid = (struct obd_uuid *) uuid_obj.data;
1879 target = class_uuid2obd(uuid);
1880 if (!target || target->obd_stopping || !target->obd_set_up) {
1881 CERROR("target '%s' is not available for context init (%s)",
1882 uuid->uuid, target == NULL ? "no target" :
1883 (target->obd_stopping ? "stopping" : "not set up"));
1884 RETURN(SECSVC_DROP);
1887 /* extract reverse handle */
1888 if (rawobj_extract(&rvs_hdl, &secdata, &seclen)) {
1889 CERROR("failed extract reverse handle\n");
1890 RETURN(SECSVC_DROP);
1894 if (rawobj_extract(&in_token, &secdata, &seclen)) {
1895 CERROR("can't extract token\n");
1896 RETURN(SECSVC_DROP);
1899 rc = gss_svc_upcall_handle_init(req, grctx, gw, target, lustre_svc,
1900 &rvs_hdl, &in_token);
1901 if (rc != SECSVC_OK)
1904 if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
1905 if (reqbuf->lm_bufcount < 4) {
1906 CERROR("missing user descriptor\n");
1907 RETURN(SECSVC_DROP);
1909 if (sptlrpc_unpack_user_desc(reqbuf, 2)) {
1910 CERROR("Mal-formed user descriptor\n");
1911 RETURN(SECSVC_DROP);
1913 req->rq_user_desc = lustre_msg_buf(reqbuf, 2, 0);
1916 req->rq_reqmsg = lustre_msg_buf(reqbuf, 1, 0);
1917 req->rq_reqlen = lustre_msg_buflen(reqbuf, 1);
1923 * last segment must be the gss signature.
1926 int gss_svc_verify_request(struct ptlrpc_request *req,
1927 struct gss_svc_ctx *gctx,
1928 struct gss_wire_ctx *gw,
1931 struct lustre_msg *msg = req->rq_reqbuf;
1935 *major = GSS_S_COMPLETE;
1937 if (msg->lm_bufcount < 3) {
1938 CERROR("Too few segments (%u) in request\n", msg->lm_bufcount);
1942 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
1943 CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
1944 *major = GSS_S_DUPLICATE_TOKEN;
1948 *major = gss_verify_msg(msg, gctx->gsc_mechctx);
1949 if (*major != GSS_S_COMPLETE)
1952 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
1953 CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
1954 *major = GSS_S_DUPLICATE_TOKEN;
1958 /* user descriptor */
1959 if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
1960 if (msg->lm_bufcount < (offset + 1 + 1)) {
1961 CERROR("no user desc included\n");
1965 if (sptlrpc_unpack_user_desc(msg, offset)) {
1966 CERROR("Mal-formed user descriptor\n");
1970 req->rq_user_desc = lustre_msg_buf(msg, offset, 0);
1974 /* check bulk cksum data */
1975 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
1976 if (msg->lm_bufcount < (offset + 1 + 1)) {
1977 CERROR("no bulk checksum included\n");
1981 if (bulk_sec_desc_unpack(msg, offset))
1985 req->rq_reqmsg = lustre_msg_buf(msg, 1, 0);
1986 req->rq_reqlen = msg->lm_buflens[1];
1991 int gss_svc_unseal_request(struct ptlrpc_request *req,
1992 struct gss_svc_ctx *gctx,
1993 struct gss_wire_ctx *gw,
1996 struct lustre_msg *msg = req->rq_reqbuf;
1997 int msglen, offset = 1;
2000 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
2001 CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
2002 *major = GSS_S_DUPLICATE_TOKEN;
2006 *major = gss_unseal_msg(gctx->gsc_mechctx, msg,
2007 &msglen, req->rq_reqdata_len);
2008 if (*major != GSS_S_COMPLETE)
2011 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
2012 CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
2013 *major = GSS_S_DUPLICATE_TOKEN;
2017 if (lustre_unpack_msg(msg, msglen)) {
2018 CERROR("Failed to unpack after decryption\n");
2021 req->rq_reqdata_len = msglen;
2023 if (msg->lm_bufcount < 1) {
2024 CERROR("Invalid buffer: is empty\n");
2028 if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
2029 if (msg->lm_bufcount < offset + 1) {
2030 CERROR("no user descriptor included\n");
2034 if (sptlrpc_unpack_user_desc(msg, offset)) {
2035 CERROR("Mal-formed user descriptor\n");
2039 req->rq_user_desc = lustre_msg_buf(msg, offset, 0);
2043 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
2044 if (msg->lm_bufcount < offset + 1) {
2045 CERROR("no bulk checksum included\n");
2049 if (bulk_sec_desc_unpack(msg, offset))
2053 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 0, 0);
2054 req->rq_reqlen = req->rq_reqbuf->lm_buflens[0];
2059 int gss_svc_handle_data(struct ptlrpc_request *req,
2060 struct gss_wire_ctx *gw)
2062 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2067 grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
2068 if (!grctx->src_ctx) {
2069 major = GSS_S_NO_CONTEXT;
2073 switch (gw->gw_svc) {
2074 case PTLRPC_GSS_SVC_INTEGRITY:
2075 rc = gss_svc_verify_request(req, grctx->src_ctx, gw, &major);
2077 case PTLRPC_GSS_SVC_PRIVACY:
2078 rc = gss_svc_unseal_request(req, grctx->src_ctx, gw, &major);
2081 CERROR("unsupported gss service %d\n", gw->gw_svc);
2088 CERROR("svc %u failed: major 0x%08x: ctx %p(%u->%s)\n",
2089 gw->gw_svc, major, grctx->src_ctx, grctx->src_ctx->gsc_uid,
2090 libcfs_nid2str(req->rq_peer.nid));
2093 * we only notify client in case of NO_CONTEXT/BAD_SIG, which
2094 * might happen after server reboot, to allow recovery.
2096 if ((major == GSS_S_NO_CONTEXT || major == GSS_S_BAD_SIG) &&
2097 gss_pack_err_notify(req, major, 0) == 0)
2098 RETURN(SECSVC_COMPLETE);
2100 RETURN(SECSVC_DROP);
2104 int gss_svc_handle_destroy(struct ptlrpc_request *req,
2105 struct gss_wire_ctx *gw)
2107 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2108 int replen = sizeof(struct ptlrpc_body);
2112 grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
2113 if (!grctx->src_ctx) {
2114 CWARN("invalid gss context handle for destroy.\n");
2115 RETURN(SECSVC_DROP);
2118 if (gw->gw_svc != PTLRPC_GSS_SVC_INTEGRITY) {
2119 CERROR("svc %u is not supported in destroy.\n", gw->gw_svc);
2120 RETURN(SECSVC_DROP);
2123 if (gss_svc_verify_request(req, grctx->src_ctx, gw, &major))
2124 RETURN(SECSVC_DROP);
2126 if (lustre_pack_reply_v2(req, 1, &replen, NULL))
2127 RETURN(SECSVC_DROP);
2129 CWARN("gss svc destroy ctx %p(%u->%s)\n", grctx->src_ctx,
2130 grctx->src_ctx->gsc_uid, libcfs_nid2str(req->rq_peer.nid));
2132 gss_svc_upcall_destroy_ctx(grctx->src_ctx);
2134 if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
2135 if (req->rq_reqbuf->lm_bufcount < 4) {
2136 CERROR("missing user descriptor, ignore it\n");
2139 if (sptlrpc_unpack_user_desc(req->rq_reqbuf, 2)) {
2140 CERROR("Mal-formed user descriptor, ignore it\n");
2143 req->rq_user_desc = lustre_msg_buf(req->rq_reqbuf, 2, 0);
2150 int gss_svc_accept(struct ptlrpc_request *req)
2152 struct gss_header *ghdr;
2153 struct gss_svc_reqctx *grctx;
2154 struct gss_wire_ctx *gw;
2158 LASSERT(req->rq_reqbuf);
2159 LASSERT(req->rq_svc_ctx == NULL);
2161 if (req->rq_reqbuf->lm_bufcount < 2) {
2162 CERROR("buf count only %d\n", req->rq_reqbuf->lm_bufcount);
2163 RETURN(SECSVC_DROP);
2166 ghdr = gss_swab_header(req->rq_reqbuf, 0);
2168 CERROR("can't decode gss header\n");
2169 RETURN(SECSVC_DROP);
2173 if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
2174 CERROR("gss version %u, expect %u\n", ghdr->gh_version,
2175 PTLRPC_GSS_VERSION);
2176 RETURN(SECSVC_DROP);
2179 /* alloc grctx data */
2180 OBD_ALLOC_PTR(grctx);
2182 CERROR("fail to alloc svc reqctx\n");
2183 RETURN(SECSVC_DROP);
2185 grctx->src_base.sc_policy = sptlrpc_policy_get(&gss_policy);
2186 atomic_set(&grctx->src_base.sc_refcount, 1);
2187 req->rq_svc_ctx = &grctx->src_base;
2188 gw = &grctx->src_wirectx;
2190 /* save wire context */
2191 gw->gw_proc = ghdr->gh_proc;
2192 gw->gw_seq = ghdr->gh_seq;
2193 gw->gw_svc = ghdr->gh_svc;
2194 rawobj_from_netobj(&gw->gw_handle, &ghdr->gh_handle);
2196 /* keep original wire header which subject to checksum verification */
2197 if (lustre_msg_swabbed(req->rq_reqbuf))
2198 gss_header_swabber(ghdr);
2200 switch(ghdr->gh_proc) {
2201 case PTLRPC_GSS_PROC_INIT:
2202 case PTLRPC_GSS_PROC_CONTINUE_INIT:
2203 rc = gss_svc_handle_init(req, gw);
2205 case PTLRPC_GSS_PROC_DATA:
2206 rc = gss_svc_handle_data(req, gw);
2208 case PTLRPC_GSS_PROC_DESTROY:
2209 rc = gss_svc_handle_destroy(req, gw);
2212 CERROR("unknown proc %u\n", gw->gw_proc);
2219 LASSERT (grctx->src_ctx);
2221 req->rq_auth_gss = 1;
2222 req->rq_auth_remote = grctx->src_ctx->gsc_remote;
2223 req->rq_auth_usr_mdt = grctx->src_ctx->gsc_usr_mds;
2224 req->rq_auth_usr_root = grctx->src_ctx->gsc_usr_root;
2225 req->rq_auth_uid = grctx->src_ctx->gsc_uid;
2226 req->rq_auth_mapped_uid = grctx->src_ctx->gsc_mapped_uid;
2228 case SECSVC_COMPLETE:
2231 gss_svc_reqctx_free(grctx);
2232 req->rq_svc_ctx = NULL;
2240 void gss_svc_invalidate_ctx(struct ptlrpc_svc_ctx *svc_ctx)
2242 struct gss_svc_reqctx *grctx;
2245 if (svc_ctx == NULL) {
2250 grctx = gss_svc_ctx2reqctx(svc_ctx);
2252 CWARN("gss svc invalidate ctx %p(%u)\n",
2253 grctx->src_ctx, grctx->src_ctx->gsc_uid);
2254 gss_svc_upcall_destroy_ctx(grctx->src_ctx);
2260 int gss_svc_payload(struct gss_svc_reqctx *grctx, int msgsize, int privacy)
2262 if (gss_svc_reqctx_is_special(grctx))
2263 return grctx->src_reserve_len;
2265 return gss_estimate_payload(NULL, msgsize, privacy);
2269 int gss_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2271 struct gss_svc_reqctx *grctx;
2272 struct ptlrpc_reply_state *rs;
2273 struct ptlrpc_bulk_sec_desc *bsd;
2275 int ibuflens[2], ibufcnt = 0;
2276 int buflens[4], bufcnt;
2277 int txtsize, wmsg_size, rs_size;
2280 LASSERT(msglen % 8 == 0);
2282 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor) &&
2283 !req->rq_bulk_read && !req->rq_bulk_write) {
2284 CERROR("client request bulk sec on non-bulk rpc\n");
2288 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2289 if (gss_svc_reqctx_is_special(grctx))
2292 privacy = (SEC_FLAVOR_SVC(req->rq_sec_flavor) ==
2298 ibuflens[0] = msglen;
2300 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
2301 LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
2302 bsd = lustre_msg_buf(req->rq_reqbuf,
2303 req->rq_reqbuf->lm_bufcount - 1,
2306 ibuflens[ibufcnt++] = bulk_sec_desc_size(
2307 bsd->bsd_csum_alg, 0,
2311 txtsize = lustre_msg_size_v2(ibufcnt, ibuflens);
2312 txtsize += GSS_MAX_CIPHER_BLOCK;
2314 /* wrapper buffer */
2316 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2317 buflens[1] = gss_svc_payload(grctx, buflens[0], 0);
2318 buflens[2] = gss_svc_payload(grctx, txtsize, 1);
2321 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2322 buflens[1] = msglen;
2323 txtsize = buflens[0] + buflens[1];
2325 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
2326 LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
2327 bsd = lustre_msg_buf(req->rq_reqbuf,
2328 req->rq_reqbuf->lm_bufcount - 2,
2331 buflens[bufcnt] = bulk_sec_desc_size(
2332 bsd->bsd_csum_alg, 0,
2334 txtsize += buflens[bufcnt];
2337 buflens[bufcnt++] = gss_svc_payload(grctx, txtsize, 0);
2340 wmsg_size = lustre_msg_size_v2(bufcnt, buflens);
2342 rs_size = sizeof(*rs) + wmsg_size;
2343 rs = req->rq_reply_state;
2347 LASSERT(rs->rs_size >= rs_size);
2349 OBD_ALLOC(rs, rs_size);
2353 rs->rs_size = rs_size;
2356 rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
2357 rs->rs_repbuf_len = wmsg_size;
2360 lustre_init_msg_v2(rs->rs_repbuf, ibufcnt, ibuflens, NULL);
2361 rs->rs_msg = lustre_msg_buf(rs->rs_repbuf, 0, msglen);
2363 lustre_init_msg_v2(rs->rs_repbuf, bufcnt, buflens, NULL);
2364 rs->rs_repbuf->lm_secflvr = req->rq_sec_flavor;
2366 rs->rs_msg = (struct lustre_msg *)
2367 lustre_msg_buf(rs->rs_repbuf, 1, 0);
2370 gss_svc_reqctx_addref(grctx);
2371 rs->rs_svc_ctx = req->rq_svc_ctx;
2373 LASSERT(rs->rs_msg);
2374 req->rq_reply_state = rs;
2379 int gss_svc_seal(struct ptlrpc_request *req,
2380 struct ptlrpc_reply_state *rs,
2381 struct gss_svc_reqctx *grctx)
2383 struct gss_svc_ctx *gctx = grctx->src_ctx;
2384 rawobj_t msgobj, cipher_obj, micobj;
2385 struct gss_header *ghdr;
2387 int cipher_buflen, buflens[3];
2392 /* embedded lustre_msg might have been shrinked */
2393 if (req->rq_replen != rs->rs_repbuf->lm_buflens[0])
2394 lustre_shrink_msg(rs->rs_repbuf, 0, req->rq_replen, 1);
2396 /* clear data length */
2397 msglen = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
2398 rs->rs_repbuf->lm_buflens);
2401 msgobj.len = msglen;
2402 msgobj.data = (__u8 *) rs->rs_repbuf;
2404 /* allocate temporary cipher buffer */
2405 cipher_buflen = gss_estimate_payload(gctx->gsc_mechctx, msglen, 1);
2406 OBD_ALLOC(cipher_buf, cipher_buflen);
2410 cipher_obj.len = cipher_buflen;
2411 cipher_obj.data = cipher_buf;
2413 major = lgss_wrap(gctx->gsc_mechctx, &msgobj, rs->rs_repbuf_len,
2415 if (major != GSS_S_COMPLETE) {
2416 CERROR("priv: wrap message error: %08x\n", major);
2417 GOTO(out_free, rc = -EPERM);
2419 LASSERT(cipher_obj.len <= cipher_buflen);
2421 /* now the real wire data */
2422 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2423 buflens[1] = gss_estimate_payload(gctx->gsc_mechctx, buflens[0], 0);
2424 buflens[2] = cipher_obj.len;
2426 LASSERT(lustre_msg_size_v2(3, buflens) <= rs->rs_repbuf_len);
2427 lustre_init_msg_v2(rs->rs_repbuf, 3, buflens, NULL);
2428 rs->rs_repbuf->lm_secflvr = req->rq_sec_flavor;
2431 ghdr = lustre_msg_buf(rs->rs_repbuf, 0, 0);
2432 ghdr->gh_version = PTLRPC_GSS_VERSION;
2434 ghdr->gh_proc = PTLRPC_GSS_PROC_DATA;
2435 ghdr->gh_seq = grctx->src_wirectx.gw_seq;
2436 ghdr->gh_svc = PTLRPC_GSS_SVC_PRIVACY;
2437 ghdr->gh_handle.len = 0;
2439 /* header signature */
2440 msgobj.len = rs->rs_repbuf->lm_buflens[0];
2441 msgobj.data = lustre_msg_buf(rs->rs_repbuf, 0, 0);
2442 micobj.len = rs->rs_repbuf->lm_buflens[1];
2443 micobj.data = lustre_msg_buf(rs->rs_repbuf, 1, 0);
2445 major = lgss_get_mic(gctx->gsc_mechctx, 1, &msgobj, &micobj);
2446 if (major != GSS_S_COMPLETE) {
2447 CERROR("priv: sign message error: %08x\n", major);
2448 GOTO(out_free, rc = -EPERM);
2450 lustre_shrink_msg(rs->rs_repbuf, 1, micobj.len, 0);
2453 memcpy(lustre_msg_buf(rs->rs_repbuf, 2, 0),
2454 cipher_obj.data, cipher_obj.len);
2456 rs->rs_repdata_len = lustre_shrink_msg(rs->rs_repbuf, 2,
2459 /* to catch upper layer's further access */
2461 req->rq_repmsg = NULL;
2466 OBD_FREE(cipher_buf, cipher_buflen);
2470 int gss_svc_authorize(struct ptlrpc_request *req)
2472 struct ptlrpc_reply_state *rs = req->rq_reply_state;
2473 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2474 struct gss_wire_ctx *gw;
2478 if (gss_svc_reqctx_is_special(grctx))
2481 gw = &grctx->src_wirectx;
2482 if (gw->gw_proc != PTLRPC_GSS_PROC_DATA &&
2483 gw->gw_proc != PTLRPC_GSS_PROC_DESTROY) {
2484 CERROR("proc %d not support\n", gw->gw_proc);
2488 LASSERT(grctx->src_ctx);
2490 switch (gw->gw_svc) {
2491 case PTLRPC_GSS_SVC_INTEGRITY:
2492 rc = gss_svc_sign(req, rs, grctx);
2494 case PTLRPC_GSS_SVC_PRIVACY:
2495 rc = gss_svc_seal(req, rs, grctx);
2498 CERROR("Unknown service %d\n", gw->gw_svc);
2499 GOTO(out, rc = -EINVAL);
2508 void gss_svc_free_rs(struct ptlrpc_reply_state *rs)
2510 struct gss_svc_reqctx *grctx;
2512 LASSERT(rs->rs_svc_ctx);
2513 grctx = container_of(rs->rs_svc_ctx, struct gss_svc_reqctx, src_base);
2515 gss_svc_reqctx_decref(grctx);
2516 rs->rs_svc_ctx = NULL;
2518 if (!rs->rs_prealloc)
2519 OBD_FREE(rs, rs->rs_size);
2523 void gss_svc_free_ctx(struct ptlrpc_svc_ctx *ctx)
2525 LASSERT(atomic_read(&ctx->sc_refcount) == 0);
2526 gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx));
2530 int gss_svc_install_rctx(struct obd_import *imp, struct ptlrpc_svc_ctx *ctx)
2532 struct gss_sec *gsec;
2534 LASSERT(imp->imp_sec);
2537 gsec = container_of(imp->imp_sec, struct gss_sec, gs_base);
2538 return gss_install_rvs_cli_ctx(gsec, ctx);
2541 static struct ptlrpc_sec_sops gss_sec_sops = {
2542 .accept = gss_svc_accept,
2543 .invalidate_ctx = gss_svc_invalidate_ctx,
2544 .alloc_rs = gss_svc_alloc_rs,
2545 .authorize = gss_svc_authorize,
2546 .free_rs = gss_svc_free_rs,
2547 .free_ctx = gss_svc_free_ctx,
2548 .unwrap_bulk = gss_svc_unwrap_bulk,
2549 .wrap_bulk = gss_svc_wrap_bulk,
2550 .install_rctx = gss_svc_install_rctx,
2553 static struct ptlrpc_sec_policy gss_policy = {
2554 .sp_owner = THIS_MODULE,
2555 .sp_name = "sec.gss",
2556 .sp_policy = SPTLRPC_POLICY_GSS,
2557 .sp_cops = &gss_sec_cops,
2558 .sp_sops = &gss_sec_sops,
2561 int __init sptlrpc_gss_init(void)
2565 rc = gss_init_lproc();
2569 rc = gss_init_upcall();
2573 rc = init_kerberos_module();
2578 * register policy after all other stuff be intialized, because it
2579 * might be in used immediately after the registration.
2581 rc = sptlrpc_register_policy(&gss_policy);
2587 cleanup_kerberos_module();
2595 static void __exit sptlrpc_gss_exit(void)
2597 sptlrpc_unregister_policy(&gss_policy);
2598 cleanup_kerberos_module();
2603 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
2604 MODULE_DESCRIPTION("GSS security policy for Lustre");
2605 MODULE_LICENSE("GPL");
2607 module_init(sptlrpc_gss_init);
2608 module_exit(sptlrpc_gss_exit);