1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Modifications for Lustre
5 * Copyright 2004 - 2007, Cluster File Systems, Inc.
7 * Author: Eric Mei <ericm@clusterfs.com>
11 * linux/net/sunrpc/auth_gss.c
13 * RPCSEC_GSS client authentication.
15 * Copyright (c) 2000 The Regents of the University of Michigan.
16 * All rights reserved.
18 * Dug Song <dugsong@monkey.org>
19 * Andy Adamson <andros@umich.edu>
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
25 * 1. Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * 2. Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in the
29 * documentation and/or other materials provided with the distribution.
30 * 3. Neither the name of the University nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
35 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
36 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
37 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
39 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
40 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
41 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
42 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
43 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
44 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 # define EXPORT_SYMTAB
51 #define DEBUG_SUBSYSTEM S_SEC
53 #include <linux/init.h>
54 #include <linux/module.h>
55 #include <linux/slab.h>
56 #include <linux/dcache.h>
58 #include <linux/random.h>
59 #include <linux/mutex.h>
60 #include <asm/atomic.h>
62 #include <liblustre.h>
66 #include <obd_class.h>
67 #include <obd_support.h>
68 #include <lustre/lustre_idl.h>
69 #include <lustre_net.h>
70 #include <lustre_import.h>
71 #include <lustre_sec.h>
74 #include "gss_internal.h"
77 #include <linux/crypto.h>
80 static inline int msg_last_segidx(struct lustre_msg *msg)
82 LASSERT(msg->lm_bufcount > 0);
83 return msg->lm_bufcount - 1;
85 static inline int msg_last_seglen(struct lustre_msg *msg)
87 return msg->lm_buflens[msg_last_segidx(msg)];
90 /********************************************
92 ********************************************/
95 void gss_header_swabber(struct gss_header *ghdr)
97 __swab32s(&ghdr->gh_flags);
98 __swab32s(&ghdr->gh_proc);
99 __swab32s(&ghdr->gh_seq);
100 __swab32s(&ghdr->gh_svc);
101 __swab32s(&ghdr->gh_pad1);
102 __swab32s(&ghdr->gh_handle.len);
105 struct gss_header *gss_swab_header(struct lustre_msg *msg, int segment)
107 struct gss_header *ghdr;
109 ghdr = lustre_swab_buf(msg, segment, sizeof(*ghdr),
113 sizeof(*ghdr) + ghdr->gh_handle.len > msg->lm_buflens[segment]) {
114 CERROR("gss header require length %u, now %u received\n",
115 (unsigned int) sizeof(*ghdr) + ghdr->gh_handle.len,
116 msg->lm_buflens[segment]);
124 void gss_netobj_swabber(netobj_t *obj)
126 __swab32s(&obj->len);
129 netobj_t *gss_swab_netobj(struct lustre_msg *msg, int segment)
133 obj = lustre_swab_buf(msg, segment, sizeof(*obj), gss_netobj_swabber);
134 if (obj && sizeof(*obj) + obj->len > msg->lm_buflens[segment]) {
135 CERROR("netobj require length %u but only %u received\n",
136 (unsigned int) sizeof(*obj) + obj->len,
137 msg->lm_buflens[segment]);
145 * payload should be obtained from mechanism. but currently since we
146 * only support kerberos, we could simply use fixed value.
150 #define GSS_KRB5_INTEG_MAX_PAYLOAD (40)
153 int gss_estimate_payload(struct gss_ctx *mechctx, int msgsize, int privacy)
156 /* we suppose max cipher block size is 16 bytes. here we
157 * add 16 for confounder and 16 for padding. */
158 return GSS_KRB5_INTEG_MAX_PAYLOAD + msgsize + 16 + 16 + 16;
160 return GSS_KRB5_INTEG_MAX_PAYLOAD;
165 * return signature size, otherwise < 0 to indicate error
167 static int gss_sign_msg(struct lustre_msg *msg,
168 struct gss_ctx *mechctx,
169 enum lustre_sec_part sp,
170 __u32 flags, __u32 proc, __u32 seq, __u32 svc,
173 struct gss_header *ghdr;
174 rawobj_t text[3], mic;
175 int textcnt, max_textcnt, mic_idx;
178 LASSERT(msg->lm_bufcount >= 2);
181 LASSERT(msg->lm_buflens[0] >=
182 sizeof(*ghdr) + (handle ? handle->len : 0));
183 ghdr = lustre_msg_buf(msg, 0, 0);
185 ghdr->gh_version = PTLRPC_GSS_VERSION;
186 ghdr->gh_sp = (__u8) sp;
187 ghdr->gh_flags = flags;
188 ghdr->gh_proc = proc;
192 /* fill in a fake one */
193 ghdr->gh_handle.len = 0;
195 ghdr->gh_handle.len = handle->len;
196 memcpy(ghdr->gh_handle.data, handle->data, handle->len);
199 /* no actual signature for null mode */
200 if (svc == SPTLRPC_SVC_NULL)
201 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
204 mic_idx = msg_last_segidx(msg);
205 max_textcnt = (svc == SPTLRPC_SVC_AUTH) ? 1 : mic_idx;
207 for (textcnt = 0; textcnt < max_textcnt; textcnt++) {
208 text[textcnt].len = msg->lm_buflens[textcnt];
209 text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
212 mic.len = msg->lm_buflens[mic_idx];
213 mic.data = lustre_msg_buf(msg, mic_idx, 0);
215 major = lgss_get_mic(mechctx, textcnt, text, &mic);
216 if (major != GSS_S_COMPLETE) {
217 CERROR("fail to generate MIC: %08x\n", major);
220 LASSERT(mic.len <= msg->lm_buflens[mic_idx]);
222 return lustre_shrink_msg(msg, mic_idx, mic.len, 0);
229 __u32 gss_verify_msg(struct lustre_msg *msg,
230 struct gss_ctx *mechctx,
233 rawobj_t text[3], mic;
234 int textcnt, max_textcnt;
238 LASSERT(msg->lm_bufcount >= 2);
240 if (svc == SPTLRPC_SVC_NULL)
241 return GSS_S_COMPLETE;
243 mic_idx = msg_last_segidx(msg);
244 max_textcnt = (svc == SPTLRPC_SVC_AUTH) ? 1 : mic_idx;
246 for (textcnt = 0; textcnt < max_textcnt; textcnt++) {
247 text[textcnt].len = msg->lm_buflens[textcnt];
248 text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
251 mic.len = msg->lm_buflens[mic_idx];
252 mic.data = lustre_msg_buf(msg, mic_idx, 0);
254 major = lgss_verify_mic(mechctx, textcnt, text, &mic);
255 if (major != GSS_S_COMPLETE)
256 CERROR("mic verify error: %08x\n", major);
262 * return gss error code
265 __u32 gss_unseal_msg(struct gss_ctx *mechctx,
266 struct lustre_msg *msgbuf,
267 int *msg_len, int msgbuf_len)
269 rawobj_t clear_obj, micobj, msgobj, token;
275 if (msgbuf->lm_bufcount != 3) {
276 CERROR("invalid bufcount %d\n", msgbuf->lm_bufcount);
277 RETURN(GSS_S_FAILURE);
280 /* verify gss header */
281 msgobj.len = msgbuf->lm_buflens[0];
282 msgobj.data = lustre_msg_buf(msgbuf, 0, 0);
283 micobj.len = msgbuf->lm_buflens[1];
284 micobj.data = lustre_msg_buf(msgbuf, 1, 0);
286 major = lgss_verify_mic(mechctx, 1, &msgobj, &micobj);
287 if (major != GSS_S_COMPLETE) {
288 CERROR("priv: mic verify error: %08x\n", major);
292 /* temporary clear text buffer */
293 clear_buflen = msgbuf->lm_buflens[2];
294 OBD_ALLOC(clear_buf, clear_buflen);
296 RETURN(GSS_S_FAILURE);
298 token.len = msgbuf->lm_buflens[2];
299 token.data = lustre_msg_buf(msgbuf, 2, 0);
301 clear_obj.len = clear_buflen;
302 clear_obj.data = clear_buf;
304 major = lgss_unwrap(mechctx, &token, &clear_obj);
305 if (major != GSS_S_COMPLETE) {
306 CERROR("priv: unwrap message error: %08x\n", major);
307 GOTO(out_free, major = GSS_S_FAILURE);
309 LASSERT(clear_obj.len <= clear_buflen);
311 /* now the decrypted message */
312 memcpy(msgbuf, clear_obj.data, clear_obj.len);
313 *msg_len = clear_obj.len;
315 major = GSS_S_COMPLETE;
317 OBD_FREE(clear_buf, clear_buflen);
321 /********************************************
322 * gss client context manipulation helpers *
323 ********************************************/
325 int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
327 LASSERT(atomic_read(&ctx->cc_refcount));
329 if (!test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
330 if (!ctx->cc_early_expire)
331 clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
333 CWARN("ctx %p(%u->%s) get expired: %lu(%+lds)\n",
334 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
336 ctx->cc_expire == 0 ? 0 :
337 cfs_time_sub(ctx->cc_expire, cfs_time_current_sec()));
346 * return 1 if the context is dead.
348 int cli_ctx_check_death(struct ptlrpc_cli_ctx *ctx)
350 if (unlikely(cli_ctx_is_dead(ctx)))
353 /* expire is 0 means never expire. a newly created gss context
354 * which during upcall may has 0 expiration */
355 if (ctx->cc_expire == 0)
358 /* check real expiration */
359 if (cfs_time_after(ctx->cc_expire, cfs_time_current_sec()))
366 void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx)
368 struct ptlrpc_cli_ctx *ctx = &gctx->gc_base;
369 unsigned long ctx_expiry;
371 if (lgss_inquire_context(gctx->gc_mechctx, &ctx_expiry)) {
372 CERROR("ctx %p(%u): unable to inquire, expire it now\n",
373 gctx, ctx->cc_vcred.vc_uid);
374 ctx_expiry = 1; /* make it expired now */
377 ctx->cc_expire = gss_round_ctx_expiry(ctx_expiry,
378 ctx->cc_sec->ps_flvr.sf_flags);
380 /* At this point this ctx might have been marked as dead by
381 * someone else, in which case nobody will make further use
382 * of it. we don't care, and mark it UPTODATE will help
383 * destroying server side context when it be destroied. */
384 set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
386 if (sec_is_reverse(ctx->cc_sec)) {
387 CWARN("server installed reverse ctx %p idx "LPX64", "
388 "expiry %lu(%+lds)\n", ctx,
389 gss_handle_to_u64(&gctx->gc_handle),
390 ctx->cc_expire, ctx->cc_expire - cfs_time_current_sec());
392 CWARN("client refreshed ctx %p idx "LPX64" (%u->%s), "
393 "expiry %lu(%+lds)\n", ctx,
394 gss_handle_to_u64(&gctx->gc_handle),
395 ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
396 ctx->cc_expire, ctx->cc_expire - cfs_time_current_sec());
398 /* install reverse svc ctx for root context */
399 if (ctx->cc_vcred.vc_uid == 0)
400 gss_sec_install_rctx(ctx->cc_sec->ps_import,
405 static void gss_cli_ctx_finalize(struct gss_cli_ctx *gctx)
407 LASSERT(gctx->gc_base.cc_sec);
409 if (gctx->gc_mechctx) {
410 lgss_delete_sec_context(&gctx->gc_mechctx);
411 gctx->gc_mechctx = NULL;
414 if (!rawobj_empty(&gctx->gc_svc_handle)) {
415 /* forward ctx: mark buddy reverse svcctx soon-expire.
416 * reverse ctx: update current seq to buddy svcctx. */
417 if (!sec_is_reverse(gctx->gc_base.cc_sec))
418 gss_svc_upcall_expire_rvs_ctx(&gctx->gc_svc_handle);
420 gss_svc_upcall_update_sequence(&gctx->gc_svc_handle,
421 (__u32) atomic_read(&gctx->gc_seq));
423 rawobj_free(&gctx->gc_svc_handle);
426 rawobj_free(&gctx->gc_handle);
430 * Based on sequence number algorithm as specified in RFC 2203.
432 * modified for our own problem: arriving request has valid sequence number,
433 * but unwrapping request might cost a long time, after that its sequence
434 * are not valid anymore (fall behind the window). It rarely happen, mostly
435 * under extreme load.
437 * note we should not check sequence before verify the integrity of incoming
438 * request, because just one attacking request with high sequence number might
439 * cause all following request be dropped.
441 * so here we use a multi-phase approach: prepare 2 sequence windows,
442 * "main window" for normal sequence and "back window" for fall behind sequence.
443 * and 3-phase checking mechanism:
444 * 0 - before integrity verification, perform a initial sequence checking in
445 * main window, which only try and don't actually set any bits. if the
446 * sequence is high above the window or fit in the window and the bit
447 * is 0, then accept and proceed to integrity verification. otherwise
448 * reject this sequence.
449 * 1 - after integrity verification, check in main window again. if this
450 * sequence is high above the window or fit in the window and the bit
451 * is 0, then set the bit and accept; if it fit in the window but bit
452 * already set, then reject; if it fall behind the window, then proceed
454 * 2 - check in back window. if it is high above the window or fit in the
455 * window and the bit is 0, then set the bit and accept. otherwise reject.
458 * 1: looks like a replay
462 * note phase 0 is necessary, because otherwise replay attacking request of
463 * sequence which between the 2 windows can't be detected.
465 * this mechanism can't totally solve the problem, but could help much less
466 * number of valid requests be dropped.
469 int gss_do_check_seq(unsigned long *window, __u32 win_size, __u32 *max_seq,
470 __u32 seq_num, int phase)
472 LASSERT(phase >= 0 && phase <= 2);
474 if (seq_num > *max_seq) {
476 * 1. high above the window
481 if (seq_num >= *max_seq + win_size) {
482 memset(window, 0, win_size / 8);
485 while(*max_seq < seq_num) {
487 __clear_bit((*max_seq) % win_size, window);
490 __set_bit(seq_num % win_size, window);
491 } else if (seq_num + win_size <= *max_seq) {
493 * 2. low behind the window
495 if (phase == 0 || phase == 2)
498 CWARN("seq %u is %u behind (size %d), check backup window\n",
499 seq_num, *max_seq - win_size - seq_num, win_size);
503 * 3. fit into the window
507 if (test_bit(seq_num % win_size, window))
512 if (__test_and_set_bit(seq_num % win_size, window))
521 CERROR("seq %u (%s %s window) is a replay: max %u, winsize %d\n",
523 seq_num + win_size > *max_seq ? "in" : "behind",
524 phase == 2 ? "backup " : "main",
530 * Based on sequence number algorithm as specified in RFC 2203.
532 * if @set == 0: initial check, don't set any bit in window
533 * if @sec == 1: final check, set bit in window
535 int gss_check_seq_num(struct gss_svc_seq_data *ssd, __u32 seq_num, int set)
539 spin_lock(&ssd->ssd_lock);
545 rc = gss_do_check_seq(ssd->ssd_win_main, GSS_SEQ_WIN_MAIN,
546 &ssd->ssd_max_main, seq_num, 0);
548 gss_stat_oos_record_svc(0, 1);
551 * phase 1 checking main window
553 rc = gss_do_check_seq(ssd->ssd_win_main, GSS_SEQ_WIN_MAIN,
554 &ssd->ssd_max_main, seq_num, 1);
557 gss_stat_oos_record_svc(1, 1);
563 * phase 2 checking back window
565 rc = gss_do_check_seq(ssd->ssd_win_back, GSS_SEQ_WIN_BACK,
566 &ssd->ssd_max_back, seq_num, 2);
568 gss_stat_oos_record_svc(2, 1);
570 gss_stat_oos_record_svc(2, 0);
573 spin_unlock(&ssd->ssd_lock);
577 /***************************************
579 ***************************************/
582 int gss_cli_payload(struct ptlrpc_cli_ctx *ctx,
583 int msgsize, int privacy)
585 return gss_estimate_payload(NULL, msgsize, privacy);
588 int gss_cli_ctx_match(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred)
590 return (ctx->cc_vcred.vc_uid == vcred->vc_uid);
593 void gss_cli_ctx_flags2str(unsigned long flags, char *buf, int bufsize)
597 if (flags & PTLRPC_CTX_NEW)
598 strncat(buf, "new,", bufsize);
599 if (flags & PTLRPC_CTX_UPTODATE)
600 strncat(buf, "uptodate,", bufsize);
601 if (flags & PTLRPC_CTX_DEAD)
602 strncat(buf, "dead,", bufsize);
603 if (flags & PTLRPC_CTX_ERROR)
604 strncat(buf, "error,", bufsize);
605 if (flags & PTLRPC_CTX_CACHED)
606 strncat(buf, "cached,", bufsize);
607 if (flags & PTLRPC_CTX_ETERNAL)
608 strncat(buf, "eternal,", bufsize);
610 strncat(buf, "-,", bufsize);
612 buf[strlen(buf) - 1] = '\0';
615 int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx,
616 struct ptlrpc_request *req)
618 struct gss_cli_ctx *gctx = ctx2gctx(ctx);
619 __u32 flags = 0, seq, svc;
623 LASSERT(req->rq_reqbuf);
624 LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
625 LASSERT(req->rq_cli_ctx == ctx);
627 /* nothing to do for context negotiation RPCs */
628 if (req->rq_ctx_init)
631 svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
632 if (req->rq_pack_bulk)
633 flags |= LUSTRE_GSS_PACK_BULK;
634 if (req->rq_pack_udesc)
635 flags |= LUSTRE_GSS_PACK_USER;
638 seq = atomic_inc_return(&gctx->gc_seq);
640 rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
641 ctx->cc_sec->ps_part,
642 flags, gctx->gc_proc, seq, svc,
647 /* gss_sign_msg() msg might take long time to finish, in which period
648 * more rpcs could be wrapped up and sent out. if we found too many
649 * of them we should repack this rpc, because sent it too late might
650 * lead to the sequence number fall behind the window on server and
651 * be dropped. also applies to gss_cli_ctx_seal().
653 * Note: null mode dosen't check sequence number. */
654 if (svc != SPTLRPC_SVC_NULL &&
655 atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
656 int behind = atomic_read(&gctx->gc_seq) - seq;
658 gss_stat_oos_record_cli(behind);
659 CWARN("req %p: %u behind, retry signing\n", req, behind);
663 req->rq_reqdata_len = rc;
668 int gss_cli_ctx_handle_err_notify(struct ptlrpc_cli_ctx *ctx,
669 struct ptlrpc_request *req,
670 struct gss_header *ghdr)
672 struct gss_err_header *errhdr;
675 LASSERT(ghdr->gh_proc == PTLRPC_GSS_PROC_ERR);
677 errhdr = (struct gss_err_header *) ghdr;
679 /* server return NO_CONTEXT might be caused by context expire
680 * or server reboot/failover. we refresh the cred transparently
682 * In some cases, our gss handle is possible to be incidentally
683 * identical to another handle since the handle itself is not
684 * fully random. In krb5 case, the GSS_S_BAD_SIG will be
685 * returned, maybe other gss error for other mechanism.
687 * if we add new mechanism, make sure the correct error are
688 * returned in this case.
690 * but in any cases, don't resend ctx destroying rpc, don't resend
692 if (req->rq_ctx_fini) {
693 CWARN("server respond error (%08x/%08x) for ctx fini\n",
694 errhdr->gh_major, errhdr->gh_minor);
696 } else if (sec_is_reverse(ctx->cc_sec)) {
697 CWARN("reverse server respond error (%08x/%08x)\n",
698 errhdr->gh_major, errhdr->gh_minor);
699 sptlrpc_cli_ctx_expire(ctx);
701 } else if (errhdr->gh_major == GSS_S_NO_CONTEXT ||
702 errhdr->gh_major == GSS_S_BAD_SIG) {
703 CWARN("req x"LPU64"/t"LPU64": server respond ctx %p(%u->%s) "
704 "%s, server might lost the context.\n",
705 req->rq_xid, req->rq_transno, ctx, ctx->cc_vcred.vc_uid,
706 sec2target_str(ctx->cc_sec),
707 errhdr->gh_major == GSS_S_NO_CONTEXT ?
708 "NO_CONTEXT" : "BAD_SIG");
710 sptlrpc_cli_ctx_expire(ctx);
712 /* we need replace the ctx right here, otherwise during
713 * resent we'll hit the logic in sptlrpc_req_refresh_ctx()
714 * which keep the ctx with RESEND flag, thus we'll never
715 * get rid of this ctx. */
716 rc = sptlrpc_req_replace_dead_ctx(req);
720 CERROR("req %p: server report gss error (%x/%x)\n",
721 req, errhdr->gh_major, errhdr->gh_minor);
728 int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx,
729 struct ptlrpc_request *req)
731 struct gss_cli_ctx *gctx;
732 struct gss_header *ghdr, *reqhdr;
733 struct lustre_msg *msg = req->rq_repbuf;
738 LASSERT(req->rq_cli_ctx == ctx);
741 req->rq_repdata_len = req->rq_nob_received;
742 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
744 /* special case for context negotiation, rq_repmsg/rq_replen actually
745 * are not used currently. */
746 if (req->rq_ctx_init) {
747 req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
748 req->rq_replen = msg->lm_buflens[1];
752 if (msg->lm_bufcount < 2 || msg->lm_bufcount > 4) {
753 CERROR("unexpected bufcount %u\n", msg->lm_bufcount);
757 ghdr = gss_swab_header(msg, 0);
759 CERROR("can't decode gss header\n");
764 reqhdr = lustre_msg_buf(msg, 0, sizeof(*reqhdr));
767 if (ghdr->gh_version != reqhdr->gh_version) {
768 CERROR("gss version %u mismatch, expect %u\n",
769 ghdr->gh_version, reqhdr->gh_version);
773 switch (ghdr->gh_proc) {
774 case PTLRPC_GSS_PROC_DATA:
775 if (!equi(req->rq_pack_bulk == 1,
776 ghdr->gh_flags & LUSTRE_GSS_PACK_BULK)) {
777 CERROR("%s bulk flag in reply\n",
778 req->rq_pack_bulk ? "missing" : "unexpected");
782 if (ghdr->gh_seq != reqhdr->gh_seq) {
783 CERROR("seqnum %u mismatch, expect %u\n",
784 ghdr->gh_seq, reqhdr->gh_seq);
788 if (ghdr->gh_svc != reqhdr->gh_svc) {
789 CERROR("svc %u mismatch, expect %u\n",
790 ghdr->gh_svc, reqhdr->gh_svc);
794 if (lustre_msg_swabbed(msg))
795 gss_header_swabber(ghdr);
797 major = gss_verify_msg(msg, gctx->gc_mechctx, reqhdr->gh_svc);
798 if (major != GSS_S_COMPLETE)
801 req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
802 req->rq_replen = msg->lm_buflens[1];
804 if (req->rq_pack_bulk) {
805 if (msg->lm_bufcount < 4) {
806 CERROR("Invalid reply bufcount %u\n",
811 /* bulk checksum is the second last segment */
812 rc = bulk_sec_desc_unpack(msg, msg->lm_bufcount - 2);
815 case PTLRPC_GSS_PROC_ERR:
816 rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
819 CERROR("unknown gss proc %d\n", ghdr->gh_proc);
826 int gss_cli_ctx_seal(struct ptlrpc_cli_ctx *ctx,
827 struct ptlrpc_request *req)
829 struct gss_cli_ctx *gctx;
830 rawobj_t msgobj, cipher_obj, micobj;
831 struct gss_header *ghdr;
832 int buflens[3], wiresize, rc;
836 LASSERT(req->rq_clrbuf);
837 LASSERT(req->rq_cli_ctx == ctx);
838 LASSERT(req->rq_reqlen);
840 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
842 /* close clear data length */
843 req->rq_clrdata_len = lustre_msg_size_v2(req->rq_clrbuf->lm_bufcount,
844 req->rq_clrbuf->lm_buflens);
846 /* calculate wire data length */
847 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
848 buflens[1] = gss_cli_payload(&gctx->gc_base, buflens[0], 0);
849 buflens[2] = gss_cli_payload(&gctx->gc_base, req->rq_clrdata_len, 1);
850 wiresize = lustre_msg_size_v2(3, buflens);
852 /* allocate wire buffer */
855 LASSERT(req->rq_reqbuf);
856 LASSERT(req->rq_reqbuf != req->rq_clrbuf);
857 LASSERT(req->rq_reqbuf_len >= wiresize);
859 OBD_ALLOC(req->rq_reqbuf, wiresize);
862 req->rq_reqbuf_len = wiresize;
865 lustre_init_msg_v2(req->rq_reqbuf, 3, buflens, NULL);
866 req->rq_reqbuf->lm_secflvr = req->rq_flvr.sf_rpc;
869 ghdr = lustre_msg_buf(req->rq_reqbuf, 0, 0);
870 ghdr->gh_version = PTLRPC_GSS_VERSION;
871 ghdr->gh_sp = (__u8) ctx->cc_sec->ps_part;
873 ghdr->gh_proc = gctx->gc_proc;
874 ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
875 ghdr->gh_svc = SPTLRPC_SVC_PRIV;
876 ghdr->gh_handle.len = gctx->gc_handle.len;
877 memcpy(ghdr->gh_handle.data, gctx->gc_handle.data, gctx->gc_handle.len);
878 if (req->rq_pack_bulk)
879 ghdr->gh_flags |= LUSTRE_GSS_PACK_BULK;
880 if (req->rq_pack_udesc)
881 ghdr->gh_flags |= LUSTRE_GSS_PACK_USER;
884 /* header signature */
885 msgobj.len = req->rq_reqbuf->lm_buflens[0];
886 msgobj.data = lustre_msg_buf(req->rq_reqbuf, 0, 0);
887 micobj.len = req->rq_reqbuf->lm_buflens[1];
888 micobj.data = lustre_msg_buf(req->rq_reqbuf, 1, 0);
890 major = lgss_get_mic(gctx->gc_mechctx, 1, &msgobj, &micobj);
891 if (major != GSS_S_COMPLETE) {
892 CERROR("priv: sign message error: %08x\n", major);
893 GOTO(err_free, rc = -EPERM);
895 /* perhaps shrink msg has potential problem in re-packing???
896 * ship a little bit more data is fine.
897 lustre_shrink_msg(req->rq_reqbuf, 1, micobj.len, 0);
901 msgobj.len = req->rq_clrdata_len;
902 msgobj.data = (__u8 *) req->rq_clrbuf;
905 cipher_obj.len = req->rq_reqbuf->lm_buflens[2];
906 cipher_obj.data = lustre_msg_buf(req->rq_reqbuf, 2, 0);
908 major = lgss_wrap(gctx->gc_mechctx, &msgobj, req->rq_clrbuf_len,
910 if (major != GSS_S_COMPLETE) {
911 CERROR("priv: wrap message error: %08x\n", major);
912 GOTO(err_free, rc = -EPERM);
914 LASSERT(cipher_obj.len <= buflens[2]);
916 /* see explain in gss_cli_ctx_sign() */
917 if (atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
918 GSS_SEQ_REPACK_THRESHOLD) {
919 int behind = atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
921 gss_stat_oos_record_cli(behind);
922 CWARN("req %p: %u behind, retry sealing\n", req, behind);
924 ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
928 /* now set the final wire data length */
929 req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, 2,
936 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
937 req->rq_reqbuf = NULL;
938 req->rq_reqbuf_len = 0;
943 int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx,
944 struct ptlrpc_request *req)
946 struct gss_cli_ctx *gctx;
947 struct gss_header *ghdr;
952 LASSERT(req->rq_repbuf);
953 LASSERT(req->rq_cli_ctx == ctx);
955 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
957 ghdr = gss_swab_header(req->rq_repbuf, 0);
959 CERROR("can't decode gss header\n");
964 if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
965 CERROR("gss version %u mismatch, expect %u\n",
966 ghdr->gh_version, PTLRPC_GSS_VERSION);
970 switch (ghdr->gh_proc) {
971 case PTLRPC_GSS_PROC_DATA:
972 if (!equi(req->rq_pack_bulk == 1,
973 ghdr->gh_flags & LUSTRE_GSS_PACK_BULK)) {
974 CERROR("%s bulk flag in reply\n",
975 req->rq_pack_bulk ? "missing" : "unexpected");
979 if (lustre_msg_swabbed(req->rq_repbuf))
980 gss_header_swabber(ghdr);
982 major = gss_unseal_msg(gctx->gc_mechctx, req->rq_repbuf,
983 &msglen, req->rq_repbuf_len);
984 if (major != GSS_S_COMPLETE) {
989 if (lustre_unpack_msg(req->rq_repbuf, msglen)) {
990 CERROR("Failed to unpack after decryption\n");
993 req->rq_repdata_len = msglen;
995 if (req->rq_repbuf->lm_bufcount < 1) {
996 CERROR("Invalid reply buffer: empty\n");
1000 if (req->rq_pack_bulk) {
1001 if (req->rq_repbuf->lm_bufcount < 2) {
1002 CERROR("Too few request buffer segments %d\n",
1003 req->rq_repbuf->lm_bufcount);
1007 /* bulk checksum is the last segment */
1008 if (bulk_sec_desc_unpack(req->rq_repbuf,
1009 req->rq_repbuf->lm_bufcount-1))
1013 req->rq_repmsg = lustre_msg_buf(req->rq_repbuf, 0, 0);
1014 req->rq_replen = req->rq_repbuf->lm_buflens[0];
1018 case PTLRPC_GSS_PROC_ERR:
1019 rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
1022 CERROR("unexpected proc %d\n", ghdr->gh_proc);
1029 /*********************************************
1030 * reverse context installation *
1031 *********************************************/
1034 int gss_install_rvs_svc_ctx(struct obd_import *imp,
1035 struct gss_sec *gsec,
1036 struct gss_cli_ctx *gctx)
1038 return gss_svc_upcall_install_rvs_ctx(imp, gsec, gctx);
1041 /*********************************************
1042 * GSS security APIs *
1043 *********************************************/
1044 int gss_sec_create_common(struct gss_sec *gsec,
1045 struct ptlrpc_sec_policy *policy,
1046 struct obd_import *imp,
1047 struct ptlrpc_svc_ctx *svcctx,
1048 struct sptlrpc_flavor *sf)
1050 struct ptlrpc_sec *sec;
1053 LASSERT(RPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_GSS);
1055 gsec->gs_mech = lgss_subflavor_to_mech(RPC_FLVR_SUB(sf->sf_rpc));
1056 if (!gsec->gs_mech) {
1057 CERROR("gss backend 0x%x not found\n",
1058 RPC_FLVR_SUB(sf->sf_rpc));
1062 spin_lock_init(&gsec->gs_lock);
1063 gsec->gs_rvs_hdl = 0ULL;
1065 /* initialize upper ptlrpc_sec */
1066 sec = &gsec->gs_base;
1067 sec->ps_policy = policy;
1068 atomic_set(&sec->ps_refcount, 0);
1069 atomic_set(&sec->ps_nctx, 0);
1070 sec->ps_id = sptlrpc_get_next_secid();
1072 sec->ps_import = class_import_get(imp);
1073 sec->ps_lock = SPIN_LOCK_UNLOCKED;
1074 CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
1077 sec->ps_gc_interval = GSS_GC_INTERVAL;
1079 LASSERT(sec_is_reverse(sec));
1081 /* never do gc on reverse sec */
1082 sec->ps_gc_interval = 0;
1085 if (sec->ps_flvr.sf_bulk_priv != BULK_PRIV_ALG_NULL &&
1086 sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_BULK)
1087 sptlrpc_enc_pool_add_user();
1089 CDEBUG(D_SEC, "create %s%s@%p\n", (svcctx ? "reverse " : ""),
1090 policy->sp_name, gsec);
1094 void gss_sec_destroy_common(struct gss_sec *gsec)
1096 struct ptlrpc_sec *sec = &gsec->gs_base;
1099 LASSERT(sec->ps_import);
1100 LASSERT(atomic_read(&sec->ps_refcount) == 0);
1101 LASSERT(atomic_read(&sec->ps_nctx) == 0);
1103 if (gsec->gs_mech) {
1104 lgss_mech_put(gsec->gs_mech);
1105 gsec->gs_mech = NULL;
1108 class_import_put(sec->ps_import);
1110 if (sec->ps_flvr.sf_bulk_priv != BULK_PRIV_ALG_NULL &&
1111 sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_BULK)
1112 sptlrpc_enc_pool_del_user();
1117 void gss_sec_kill(struct ptlrpc_sec *sec)
1122 int gss_cli_ctx_init_common(struct ptlrpc_sec *sec,
1123 struct ptlrpc_cli_ctx *ctx,
1124 struct ptlrpc_ctx_ops *ctxops,
1125 struct vfs_cred *vcred)
1127 struct gss_cli_ctx *gctx = ctx2gctx(ctx);
1130 atomic_set(&gctx->gc_seq, 0);
1132 CFS_INIT_HLIST_NODE(&ctx->cc_cache);
1133 atomic_set(&ctx->cc_refcount, 0);
1135 ctx->cc_ops = ctxops;
1137 ctx->cc_flags = PTLRPC_CTX_NEW;
1138 ctx->cc_vcred = *vcred;
1139 spin_lock_init(&ctx->cc_lock);
1140 CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
1141 CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
1143 /* take a ref on belonging sec, balanced in ctx destroying */
1144 atomic_inc(&sec->ps_refcount);
1145 /* statistic only */
1146 atomic_inc(&sec->ps_nctx);
1148 CDEBUG(D_SEC, "%s@%p: create ctx %p(%u->%s)\n",
1149 sec->ps_policy->sp_name, ctx->cc_sec,
1150 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
1156 * 1: the context has been taken care of by someone else
1157 * 0: proceed to really destroy the context locally
1159 int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec,
1160 struct ptlrpc_cli_ctx *ctx)
1162 struct gss_cli_ctx *gctx = ctx2gctx(ctx);
1164 LASSERT(atomic_read(&sec->ps_nctx) > 0);
1165 LASSERT(atomic_read(&ctx->cc_refcount) == 0);
1166 LASSERT(ctx->cc_sec == sec);
1168 if (gctx->gc_mechctx) {
1169 /* the final context fini rpc will use this ctx too, and it's
1170 * asynchronous which finished by request_out_callback(). so
1171 * we add refcount, whoever drop finally drop the refcount to
1172 * 0 should responsible for the rest of destroy. */
1173 atomic_inc(&ctx->cc_refcount);
1175 gss_do_ctx_fini_rpc(gctx);
1176 gss_cli_ctx_finalize(gctx);
1178 if (!atomic_dec_and_test(&ctx->cc_refcount))
1182 if (sec_is_reverse(sec))
1183 CWARN("reverse sec %p: destroy ctx %p\n",
1186 CWARN("%s@%p: destroy ctx %p(%u->%s)\n",
1187 sec->ps_policy->sp_name, ctx->cc_sec,
1188 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
1194 int gss_alloc_reqbuf_intg(struct ptlrpc_sec *sec,
1195 struct ptlrpc_request *req,
1196 int svc, int msgsize)
1198 int bufsize, txtsize;
1199 int buflens[5], bufcnt = 2;
1203 * on-wire data layout:
1206 * - user descriptor (optional)
1207 * - bulk sec descriptor (optional)
1208 * - signature (optional)
1209 * - svc == NULL: NULL
1210 * - svc == AUTH: signature of gss header
1211 * - svc == INTG: signature of all above
1213 * if this is context negotiation, reserver fixed space
1214 * at the last (signature) segment regardless of svc mode.
1217 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1218 txtsize = buflens[0];
1220 buflens[1] = msgsize;
1221 if (svc == SPTLRPC_SVC_INTG)
1222 txtsize += buflens[1];
1224 if (req->rq_pack_udesc) {
1225 buflens[bufcnt] = sptlrpc_current_user_desc_size();
1226 if (svc == SPTLRPC_SVC_INTG)
1227 txtsize += buflens[bufcnt];
1231 if (req->rq_pack_bulk) {
1232 buflens[bufcnt] = bulk_sec_desc_size(
1233 req->rq_flvr.sf_bulk_csum, 1,
1235 if (svc == SPTLRPC_SVC_INTG)
1236 txtsize += buflens[bufcnt];
1240 if (req->rq_ctx_init)
1241 buflens[bufcnt++] = GSS_CTX_INIT_MAX_LEN;
1242 else if (svc != SPTLRPC_SVC_NULL)
1243 buflens[bufcnt++] = gss_cli_payload(req->rq_cli_ctx, txtsize,0);
1245 bufsize = lustre_msg_size_v2(bufcnt, buflens);
1247 if (!req->rq_reqbuf) {
1248 bufsize = size_roundup_power2(bufsize);
1250 OBD_ALLOC(req->rq_reqbuf, bufsize);
1251 if (!req->rq_reqbuf)
1254 req->rq_reqbuf_len = bufsize;
1256 LASSERT(req->rq_pool);
1257 LASSERT(req->rq_reqbuf_len >= bufsize);
1258 memset(req->rq_reqbuf, 0, bufsize);
1261 lustre_init_msg_v2(req->rq_reqbuf, bufcnt, buflens, NULL);
1262 req->rq_reqbuf->lm_secflvr = req->rq_flvr.sf_rpc;
1264 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, msgsize);
1265 LASSERT(req->rq_reqmsg);
1267 /* pack user desc here, later we might leave current user's process */
1268 if (req->rq_pack_udesc)
1269 sptlrpc_pack_user_desc(req->rq_reqbuf, 2);
1275 int gss_alloc_reqbuf_priv(struct ptlrpc_sec *sec,
1276 struct ptlrpc_request *req,
1279 int ibuflens[3], ibufcnt;
1281 int clearsize, wiresize;
1284 LASSERT(req->rq_clrbuf == NULL);
1285 LASSERT(req->rq_clrbuf_len == 0);
1287 /* Inner (clear) buffers
1289 * - user descriptor (optional)
1290 * - bulk checksum (optional)
1294 ibuflens[0] = msgsize;
1296 if (req->rq_pack_udesc)
1297 ibuflens[ibufcnt++] = sptlrpc_current_user_desc_size();
1298 if (req->rq_pack_bulk)
1299 ibuflens[ibufcnt++] = bulk_sec_desc_size(
1300 req->rq_flvr.sf_bulk_csum, 1,
1303 clearsize = lustre_msg_size_v2(ibufcnt, ibuflens);
1304 /* to allow append padding during encryption */
1305 clearsize += GSS_MAX_CIPHER_BLOCK;
1307 /* Wrapper (wire) buffers
1309 * - signature of gss header
1313 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1314 buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
1315 buflens[2] = gss_cli_payload(req->rq_cli_ctx, clearsize, 1);
1316 wiresize = lustre_msg_size_v2(3, buflens);
1319 /* rq_reqbuf is preallocated */
1320 LASSERT(req->rq_reqbuf);
1321 LASSERT(req->rq_reqbuf_len >= wiresize);
1323 memset(req->rq_reqbuf, 0, req->rq_reqbuf_len);
1325 /* if the pre-allocated buffer is big enough, we just pack
1326 * both clear buf & request buf in it, to avoid more alloc. */
1327 if (clearsize + wiresize <= req->rq_reqbuf_len) {
1329 (void *) (((char *) req->rq_reqbuf) + wiresize);
1331 CWARN("pre-allocated buf size %d is not enough for "
1332 "both clear (%d) and cipher (%d) text, proceed "
1333 "with extra allocation\n", req->rq_reqbuf_len,
1334 clearsize, wiresize);
1338 if (!req->rq_clrbuf) {
1339 clearsize = size_roundup_power2(clearsize);
1341 OBD_ALLOC(req->rq_clrbuf, clearsize);
1342 if (!req->rq_clrbuf)
1345 req->rq_clrbuf_len = clearsize;
1347 lustre_init_msg_v2(req->rq_clrbuf, ibufcnt, ibuflens, NULL);
1348 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, msgsize);
1350 if (req->rq_pack_udesc)
1351 sptlrpc_pack_user_desc(req->rq_clrbuf, 1);
1357 * NOTE: any change of request buffer allocation should also consider
1358 * changing enlarge_reqbuf() series functions.
1360 int gss_alloc_reqbuf(struct ptlrpc_sec *sec,
1361 struct ptlrpc_request *req,
1364 int svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
1366 LASSERT(!req->rq_pack_bulk ||
1367 (req->rq_bulk_read || req->rq_bulk_write));
1370 case SPTLRPC_SVC_NULL:
1371 case SPTLRPC_SVC_AUTH:
1372 case SPTLRPC_SVC_INTG:
1373 return gss_alloc_reqbuf_intg(sec, req, svc, msgsize);
1374 case SPTLRPC_SVC_PRIV:
1375 return gss_alloc_reqbuf_priv(sec, req, msgsize);
1377 LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
1382 void gss_free_reqbuf(struct ptlrpc_sec *sec,
1383 struct ptlrpc_request *req)
1388 LASSERT(!req->rq_pool || req->rq_reqbuf);
1389 privacy = RPC_FLVR_SVC(req->rq_flvr.sf_rpc) == SPTLRPC_SVC_PRIV;
1391 if (!req->rq_clrbuf)
1392 goto release_reqbuf;
1394 /* release clear buffer */
1396 LASSERT(req->rq_clrbuf_len);
1399 req->rq_clrbuf >= req->rq_reqbuf &&
1400 (char *) req->rq_clrbuf <
1401 (char *) req->rq_reqbuf + req->rq_reqbuf_len)
1402 goto release_reqbuf;
1404 OBD_FREE(req->rq_clrbuf, req->rq_clrbuf_len);
1405 req->rq_clrbuf = NULL;
1406 req->rq_clrbuf_len = 0;
1409 if (!req->rq_pool && req->rq_reqbuf) {
1410 LASSERT(req->rq_reqbuf_len);
1412 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
1413 req->rq_reqbuf = NULL;
1414 req->rq_reqbuf_len = 0;
1417 req->rq_reqmsg = NULL;
1422 static int do_alloc_repbuf(struct ptlrpc_request *req, int bufsize)
1424 bufsize = size_roundup_power2(bufsize);
1426 OBD_ALLOC(req->rq_repbuf, bufsize);
1427 if (!req->rq_repbuf)
1430 req->rq_repbuf_len = bufsize;
1435 int gss_alloc_repbuf_intg(struct ptlrpc_sec *sec,
1436 struct ptlrpc_request *req,
1437 int svc, int msgsize)
1440 int buflens[4], bufcnt = 2;
1443 * on-wire data layout:
1446 * - bulk sec descriptor (optional)
1447 * - signature (optional)
1448 * - svc == NULL: NULL
1449 * - svc == AUTH: signature of gss header
1450 * - svc == INTG: signature of all above
1452 * if this is context negotiation, reserver fixed space
1453 * at the last (signature) segment regardless of svc mode.
1456 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1457 txtsize = buflens[0];
1459 buflens[1] = msgsize;
1460 if (svc == SPTLRPC_SVC_INTG)
1461 txtsize += buflens[1];
1463 if (req->rq_pack_bulk) {
1464 buflens[bufcnt] = bulk_sec_desc_size(
1465 req->rq_flvr.sf_bulk_csum, 0,
1467 if (svc == SPTLRPC_SVC_INTG)
1468 txtsize += buflens[bufcnt];
1472 if (req->rq_ctx_init)
1473 buflens[bufcnt++] = GSS_CTX_INIT_MAX_LEN;
1474 else if (svc != SPTLRPC_SVC_NULL)
1475 buflens[bufcnt++] = gss_cli_payload(req->rq_cli_ctx, txtsize,0);
1477 return do_alloc_repbuf(req, lustre_msg_size_v2(bufcnt, buflens));
1481 int gss_alloc_repbuf_priv(struct ptlrpc_sec *sec,
1482 struct ptlrpc_request *req,
1486 int buflens[3], bufcnt;
1488 /* Inner (clear) buffers
1490 * - bulk checksum (optional)
1494 buflens[0] = msgsize;
1496 if (req->rq_pack_bulk) {
1497 buflens[bufcnt++] = bulk_sec_desc_size(
1498 req->rq_flvr.sf_bulk_csum, 0,
1501 txtsize = lustre_msg_size_v2(bufcnt, buflens);
1502 txtsize += GSS_MAX_CIPHER_BLOCK;
1504 /* Wrapper (wire) buffers
1506 * - signature of gss header
1511 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1512 buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
1513 buflens[2] = gss_cli_payload(req->rq_cli_ctx, txtsize, 1);
1515 return do_alloc_repbuf(req, lustre_msg_size_v2(bufcnt, buflens));
1518 int gss_alloc_repbuf(struct ptlrpc_sec *sec,
1519 struct ptlrpc_request *req,
1522 int svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
1525 LASSERT(!req->rq_pack_bulk ||
1526 (req->rq_bulk_read || req->rq_bulk_write));
1529 case SPTLRPC_SVC_NULL:
1530 case SPTLRPC_SVC_AUTH:
1531 case SPTLRPC_SVC_INTG:
1532 return gss_alloc_repbuf_intg(sec, req, svc, msgsize);
1533 case SPTLRPC_SVC_PRIV:
1534 return gss_alloc_repbuf_priv(sec, req, msgsize);
1536 LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
1541 void gss_free_repbuf(struct ptlrpc_sec *sec,
1542 struct ptlrpc_request *req)
1544 OBD_FREE(req->rq_repbuf, req->rq_repbuf_len);
1545 req->rq_repbuf = NULL;
1546 req->rq_repbuf_len = 0;
1548 req->rq_repmsg = NULL;
1551 static int get_enlarged_msgsize(struct lustre_msg *msg,
1552 int segment, int newsize)
1554 int save, newmsg_size;
1556 LASSERT(newsize >= msg->lm_buflens[segment]);
1558 save = msg->lm_buflens[segment];
1559 msg->lm_buflens[segment] = newsize;
1560 newmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1561 msg->lm_buflens[segment] = save;
1566 static int get_enlarged_msgsize2(struct lustre_msg *msg,
1567 int segment1, int newsize1,
1568 int segment2, int newsize2)
1570 int save1, save2, newmsg_size;
1572 LASSERT(newsize1 >= msg->lm_buflens[segment1]);
1573 LASSERT(newsize2 >= msg->lm_buflens[segment2]);
1575 save1 = msg->lm_buflens[segment1];
1576 save2 = msg->lm_buflens[segment2];
1577 msg->lm_buflens[segment1] = newsize1;
1578 msg->lm_buflens[segment2] = newsize2;
1579 newmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1580 msg->lm_buflens[segment1] = save1;
1581 msg->lm_buflens[segment2] = save2;
1587 int gss_enlarge_reqbuf_intg(struct ptlrpc_sec *sec,
1588 struct ptlrpc_request *req,
1590 int segment, int newsize)
1592 struct lustre_msg *newbuf;
1593 int txtsize, sigsize = 0, i;
1594 int newmsg_size, newbuf_size;
1597 * gss header is at seg 0;
1598 * embedded msg is at seg 1;
1599 * signature (if any) is at the last seg
1601 LASSERT(req->rq_reqbuf);
1602 LASSERT(req->rq_reqbuf_len > req->rq_reqlen);
1603 LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
1604 LASSERT(lustre_msg_buf(req->rq_reqbuf, 1, 0) == req->rq_reqmsg);
1606 /* 1. compute new embedded msg size */
1607 newmsg_size = get_enlarged_msgsize(req->rq_reqmsg, segment, newsize);
1608 LASSERT(newmsg_size >= req->rq_reqbuf->lm_buflens[1]);
1610 /* 2. compute new wrapper msg size */
1611 if (svc == SPTLRPC_SVC_NULL) {
1612 /* no signature, get size directly */
1613 newbuf_size = get_enlarged_msgsize(req->rq_reqbuf,
1616 txtsize = req->rq_reqbuf->lm_buflens[0];
1618 if (svc == SPTLRPC_SVC_INTG) {
1619 for (i = 1; i < req->rq_reqbuf->lm_bufcount; i++)
1620 txtsize += req->rq_reqbuf->lm_buflens[i];
1621 txtsize += newmsg_size - req->rq_reqbuf->lm_buflens[1];
1624 sigsize = gss_cli_payload(req->rq_cli_ctx, txtsize, 0);
1625 LASSERT(sigsize >= msg_last_seglen(req->rq_reqbuf));
1627 newbuf_size = get_enlarged_msgsize2(
1630 msg_last_segidx(req->rq_reqbuf),
1634 /* request from pool should always have enough buffer */
1635 LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
1637 if (req->rq_reqbuf_len < newbuf_size) {
1638 newbuf_size = size_roundup_power2(newbuf_size);
1640 OBD_ALLOC(newbuf, newbuf_size);
1644 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
1646 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
1647 req->rq_reqbuf = newbuf;
1648 req->rq_reqbuf_len = newbuf_size;
1649 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, 0);
1652 /* do enlargement, from wrapper to embedded, from end to begin */
1653 if (svc != SPTLRPC_SVC_NULL)
1654 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf,
1655 msg_last_segidx(req->rq_reqbuf),
1658 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, 1, newmsg_size);
1659 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
1661 req->rq_reqlen = newmsg_size;
1666 int gss_enlarge_reqbuf_priv(struct ptlrpc_sec *sec,
1667 struct ptlrpc_request *req,
1668 int segment, int newsize)
1670 struct lustre_msg *newclrbuf;
1671 int newmsg_size, newclrbuf_size, newcipbuf_size;
1675 * embedded msg is at seg 0 of clear buffer;
1676 * cipher text is at seg 2 of cipher buffer;
1678 LASSERT(req->rq_pool ||
1679 (req->rq_reqbuf == NULL && req->rq_reqbuf_len == 0));
1680 LASSERT(req->rq_reqbuf == NULL ||
1681 (req->rq_pool && req->rq_reqbuf->lm_bufcount == 3));
1682 LASSERT(req->rq_clrbuf);
1683 LASSERT(req->rq_clrbuf_len > req->rq_reqlen);
1684 LASSERT(lustre_msg_buf(req->rq_clrbuf, 0, 0) == req->rq_reqmsg);
1686 /* compute new embedded msg size */
1687 newmsg_size = get_enlarged_msgsize(req->rq_reqmsg, segment, newsize);
1689 /* compute new clear buffer size */
1690 newclrbuf_size = get_enlarged_msgsize(req->rq_clrbuf, 0, newmsg_size);
1691 newclrbuf_size += GSS_MAX_CIPHER_BLOCK;
1693 /* compute new cipher buffer size */
1694 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1695 buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
1696 buflens[2] = gss_cli_payload(req->rq_cli_ctx, newclrbuf_size, 1);
1697 newcipbuf_size = lustre_msg_size_v2(3, buflens);
1699 /* handle the case that we put both clear buf and cipher buf into
1700 * pre-allocated single buffer. */
1701 if (unlikely(req->rq_pool) &&
1702 req->rq_clrbuf >= req->rq_reqbuf &&
1703 (char *) req->rq_clrbuf <
1704 (char *) req->rq_reqbuf + req->rq_reqbuf_len) {
1705 /* it couldn't be better we still fit into the
1706 * pre-allocated buffer. */
1707 if (newclrbuf_size + newcipbuf_size <= req->rq_reqbuf_len) {
1710 /* move clear text backward. */
1711 src = req->rq_clrbuf;
1712 dst = (char *) req->rq_reqbuf + newcipbuf_size;
1714 memmove(dst, src, req->rq_clrbuf_len);
1716 req->rq_clrbuf = (struct lustre_msg *) dst;
1717 req->rq_clrbuf_len = newclrbuf_size;
1718 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
1720 /* sadly we have to split out the clear buffer */
1721 LASSERT(req->rq_reqbuf_len >= newcipbuf_size);
1722 LASSERT(req->rq_clrbuf_len < newclrbuf_size);
1726 if (req->rq_clrbuf_len < newclrbuf_size) {
1727 newclrbuf_size = size_roundup_power2(newclrbuf_size);
1729 OBD_ALLOC(newclrbuf, newclrbuf_size);
1730 if (newclrbuf == NULL)
1733 memcpy(newclrbuf, req->rq_clrbuf, req->rq_clrbuf_len);
1735 if (req->rq_reqbuf == NULL ||
1736 req->rq_clrbuf < req->rq_reqbuf ||
1737 (char *) req->rq_clrbuf >=
1738 (char *) req->rq_reqbuf + req->rq_reqbuf_len) {
1739 OBD_FREE(req->rq_clrbuf, req->rq_clrbuf_len);
1742 req->rq_clrbuf = newclrbuf;
1743 req->rq_clrbuf_len = newclrbuf_size;
1744 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
1747 _sptlrpc_enlarge_msg_inplace(req->rq_clrbuf, 0, newmsg_size);
1748 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
1749 req->rq_reqlen = newmsg_size;
1754 int gss_enlarge_reqbuf(struct ptlrpc_sec *sec,
1755 struct ptlrpc_request *req,
1756 int segment, int newsize)
1758 int svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
1760 LASSERT(!req->rq_ctx_init && !req->rq_ctx_fini);
1763 case SPTLRPC_SVC_NULL:
1764 case SPTLRPC_SVC_AUTH:
1765 case SPTLRPC_SVC_INTG:
1766 return gss_enlarge_reqbuf_intg(sec, req, svc, segment, newsize);
1767 case SPTLRPC_SVC_PRIV:
1768 return gss_enlarge_reqbuf_priv(sec, req, segment, newsize);
1770 LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
1775 int gss_sec_install_rctx(struct obd_import *imp,
1776 struct ptlrpc_sec *sec,
1777 struct ptlrpc_cli_ctx *ctx)
1779 struct gss_sec *gsec;
1780 struct gss_cli_ctx *gctx;
1783 gsec = container_of(sec, struct gss_sec, gs_base);
1784 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
1786 rc = gss_install_rvs_svc_ctx(imp, gsec, gctx);
1790 /********************************************
1792 ********************************************/
1795 int gss_svc_reqctx_is_special(struct gss_svc_reqctx *grctx)
1798 return (grctx->src_init || grctx->src_init_continue ||
1799 grctx->src_err_notify);
1803 void gss_svc_reqctx_free(struct gss_svc_reqctx *grctx)
1806 gss_svc_upcall_put_ctx(grctx->src_ctx);
1808 sptlrpc_policy_put(grctx->src_base.sc_policy);
1809 OBD_FREE_PTR(grctx);
1813 void gss_svc_reqctx_addref(struct gss_svc_reqctx *grctx)
1815 LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
1816 atomic_inc(&grctx->src_base.sc_refcount);
1820 void gss_svc_reqctx_decref(struct gss_svc_reqctx *grctx)
1822 LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
1824 if (atomic_dec_and_test(&grctx->src_base.sc_refcount))
1825 gss_svc_reqctx_free(grctx);
1829 int gss_svc_sign(struct ptlrpc_request *req,
1830 struct ptlrpc_reply_state *rs,
1831 struct gss_svc_reqctx *grctx,
1838 LASSERT(rs->rs_msg == lustre_msg_buf(rs->rs_repbuf, 1, 0));
1840 /* embedded lustre_msg might have been shrinked */
1841 if (req->rq_replen != rs->rs_repbuf->lm_buflens[1])
1842 lustre_shrink_msg(rs->rs_repbuf, 1, req->rq_replen, 1);
1844 if (req->rq_pack_bulk)
1845 flags |= LUSTRE_GSS_PACK_BULK;
1847 rc = gss_sign_msg(rs->rs_repbuf, grctx->src_ctx->gsc_mechctx,
1848 LUSTRE_SP_ANY, flags, PTLRPC_GSS_PROC_DATA,
1849 grctx->src_wirectx.gw_seq, svc, NULL);
1853 rs->rs_repdata_len = rc;
1857 int gss_pack_err_notify(struct ptlrpc_request *req, __u32 major, __u32 minor)
1859 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
1860 struct ptlrpc_reply_state *rs;
1861 struct gss_err_header *ghdr;
1862 int replen = sizeof(struct ptlrpc_body);
1866 //if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_SVCGSS_ERR_NOTIFY, OBD_FAIL_ONCE))
1869 grctx->src_err_notify = 1;
1870 grctx->src_reserve_len = 0;
1872 rc = lustre_pack_reply_v2(req, 1, &replen, NULL);
1874 CERROR("could not pack reply, err %d\n", rc);
1879 rs = req->rq_reply_state;
1880 LASSERT(rs->rs_repbuf->lm_buflens[1] >= sizeof(*ghdr));
1881 ghdr = lustre_msg_buf(rs->rs_repbuf, 0, 0);
1882 ghdr->gh_version = PTLRPC_GSS_VERSION;
1884 ghdr->gh_proc = PTLRPC_GSS_PROC_ERR;
1885 ghdr->gh_major = major;
1886 ghdr->gh_minor = minor;
1887 ghdr->gh_handle.len = 0; /* fake context handle */
1889 rs->rs_repdata_len = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
1890 rs->rs_repbuf->lm_buflens);
1892 CDEBUG(D_SEC, "prepare gss error notify(0x%x/0x%x) to %s\n",
1893 major, minor, libcfs_nid2str(req->rq_peer.nid));
1898 int gss_svc_handle_init(struct ptlrpc_request *req,
1899 struct gss_wire_ctx *gw)
1901 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
1902 struct lustre_msg *reqbuf = req->rq_reqbuf;
1903 struct obd_uuid *uuid;
1904 struct obd_device *target;
1905 rawobj_t uuid_obj, rvs_hdl, in_token;
1907 __u32 *secdata, seclen;
1911 CDEBUG(D_SEC, "processing gss init(%d) request from %s\n", gw->gw_proc,
1912 libcfs_nid2str(req->rq_peer.nid));
1914 req->rq_ctx_init = 1;
1916 if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
1917 CERROR("unexpected bulk flag\n");
1918 RETURN(SECSVC_DROP);
1921 if (gw->gw_proc == PTLRPC_GSS_PROC_INIT && gw->gw_handle.len != 0) {
1922 CERROR("proc %u: invalid handle length %u\n",
1923 gw->gw_proc, gw->gw_handle.len);
1924 RETURN(SECSVC_DROP);
1927 if (reqbuf->lm_bufcount < 3 || reqbuf->lm_bufcount > 4){
1928 CERROR("Invalid bufcount %d\n", reqbuf->lm_bufcount);
1929 RETURN(SECSVC_DROP);
1932 /* ctx initiate payload is in last segment */
1933 secdata = lustre_msg_buf(reqbuf, reqbuf->lm_bufcount - 1, 0);
1934 seclen = reqbuf->lm_buflens[reqbuf->lm_bufcount - 1];
1936 if (seclen < 4 + 4) {
1937 CERROR("sec size %d too small\n", seclen);
1938 RETURN(SECSVC_DROP);
1941 /* lustre svc type */
1942 lustre_svc = le32_to_cpu(*secdata++);
1945 /* extract target uuid, note this code is somewhat fragile
1946 * because touched internal structure of obd_uuid */
1947 if (rawobj_extract(&uuid_obj, &secdata, &seclen)) {
1948 CERROR("failed to extract target uuid\n");
1949 RETURN(SECSVC_DROP);
1951 uuid_obj.data[uuid_obj.len - 1] = '\0';
1953 uuid = (struct obd_uuid *) uuid_obj.data;
1954 target = class_uuid2obd(uuid);
1955 if (!target || target->obd_stopping || !target->obd_set_up) {
1956 CERROR("target '%s' is not available for context init (%s)\n",
1957 uuid->uuid, target == NULL ? "no target" :
1958 (target->obd_stopping ? "stopping" : "not set up"));
1959 RETURN(SECSVC_DROP);
1962 /* extract reverse handle */
1963 if (rawobj_extract(&rvs_hdl, &secdata, &seclen)) {
1964 CERROR("failed extract reverse handle\n");
1965 RETURN(SECSVC_DROP);
1969 if (rawobj_extract(&in_token, &secdata, &seclen)) {
1970 CERROR("can't extract token\n");
1971 RETURN(SECSVC_DROP);
1974 rc = gss_svc_upcall_handle_init(req, grctx, gw, target, lustre_svc,
1975 &rvs_hdl, &in_token);
1976 if (rc != SECSVC_OK)
1979 if (grctx->src_ctx->gsc_usr_mds || grctx->src_ctx->gsc_usr_root)
1980 CWARN("create svc ctx %p: user from %s authenticated as %s\n",
1981 grctx->src_ctx, libcfs_nid2str(req->rq_peer.nid),
1982 grctx->src_ctx->gsc_usr_mds ? "mds" : "root");
1984 CWARN("create svc ctx %p: accept user %u from %s\n",
1985 grctx->src_ctx, grctx->src_ctx->gsc_uid,
1986 libcfs_nid2str(req->rq_peer.nid));
1988 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
1989 if (reqbuf->lm_bufcount < 4) {
1990 CERROR("missing user descriptor\n");
1991 RETURN(SECSVC_DROP);
1993 if (sptlrpc_unpack_user_desc(reqbuf, 2)) {
1994 CERROR("Mal-formed user descriptor\n");
1995 RETURN(SECSVC_DROP);
1998 req->rq_pack_udesc = 1;
1999 req->rq_user_desc = lustre_msg_buf(reqbuf, 2, 0);
2002 req->rq_reqmsg = lustre_msg_buf(reqbuf, 1, 0);
2003 req->rq_reqlen = lustre_msg_buflen(reqbuf, 1);
2009 * last segment must be the gss signature.
2012 int gss_svc_verify_request(struct ptlrpc_request *req,
2013 struct gss_svc_reqctx *grctx,
2014 struct gss_wire_ctx *gw,
2017 struct gss_svc_ctx *gctx = grctx->src_ctx;
2018 struct lustre_msg *msg = req->rq_reqbuf;
2022 *major = GSS_S_COMPLETE;
2024 if (msg->lm_bufcount < 2) {
2025 CERROR("Too few segments (%u) in request\n", msg->lm_bufcount);
2029 if (gw->gw_svc == SPTLRPC_SVC_NULL)
2032 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
2033 CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
2034 *major = GSS_S_DUPLICATE_TOKEN;
2038 *major = gss_verify_msg(msg, gctx->gsc_mechctx, gw->gw_svc);
2039 if (*major != GSS_S_COMPLETE)
2042 if (gctx->gsc_reverse == 0 &&
2043 gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
2044 CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
2045 *major = GSS_S_DUPLICATE_TOKEN;
2050 /* user descriptor */
2051 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
2052 if (msg->lm_bufcount < (offset + 1)) {
2053 CERROR("no user desc included\n");
2057 if (sptlrpc_unpack_user_desc(msg, offset)) {
2058 CERROR("Mal-formed user descriptor\n");
2062 req->rq_pack_udesc = 1;
2063 req->rq_user_desc = lustre_msg_buf(msg, offset, 0);
2067 /* check bulk cksum data */
2068 if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
2069 if (msg->lm_bufcount < (offset + 1)) {
2070 CERROR("no bulk checksum included\n");
2074 if (bulk_sec_desc_unpack(msg, offset))
2077 req->rq_pack_bulk = 1;
2078 grctx->src_reqbsd = lustre_msg_buf(msg, offset, 0);
2079 grctx->src_reqbsd_size = lustre_msg_buflen(msg, offset);
2082 req->rq_reqmsg = lustre_msg_buf(msg, 1, 0);
2083 req->rq_reqlen = msg->lm_buflens[1];
2088 int gss_svc_unseal_request(struct ptlrpc_request *req,
2089 struct gss_svc_reqctx *grctx,
2090 struct gss_wire_ctx *gw,
2093 struct gss_svc_ctx *gctx = grctx->src_ctx;
2094 struct lustre_msg *msg = req->rq_reqbuf;
2095 int msglen, offset = 1;
2098 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
2099 CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
2100 *major = GSS_S_DUPLICATE_TOKEN;
2104 *major = gss_unseal_msg(gctx->gsc_mechctx, msg,
2105 &msglen, req->rq_reqdata_len);
2106 if (*major != GSS_S_COMPLETE)
2109 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
2110 CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
2111 *major = GSS_S_DUPLICATE_TOKEN;
2115 if (lustre_unpack_msg(msg, msglen)) {
2116 CERROR("Failed to unpack after decryption\n");
2119 req->rq_reqdata_len = msglen;
2121 if (msg->lm_bufcount < 1) {
2122 CERROR("Invalid buffer: is empty\n");
2126 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
2127 if (msg->lm_bufcount < offset + 1) {
2128 CERROR("no user descriptor included\n");
2132 if (sptlrpc_unpack_user_desc(msg, offset)) {
2133 CERROR("Mal-formed user descriptor\n");
2137 req->rq_pack_udesc = 1;
2138 req->rq_user_desc = lustre_msg_buf(msg, offset, 0);
2142 if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
2143 if (msg->lm_bufcount < offset + 1) {
2144 CERROR("no bulk checksum included\n");
2148 if (bulk_sec_desc_unpack(msg, offset))
2151 req->rq_pack_bulk = 1;
2152 grctx->src_reqbsd = lustre_msg_buf(msg, offset, 0);
2153 grctx->src_reqbsd_size = lustre_msg_buflen(msg, offset);
2156 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 0, 0);
2157 req->rq_reqlen = req->rq_reqbuf->lm_buflens[0];
2162 int gss_svc_handle_data(struct ptlrpc_request *req,
2163 struct gss_wire_ctx *gw)
2165 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2170 grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
2171 if (!grctx->src_ctx) {
2172 major = GSS_S_NO_CONTEXT;
2176 switch (gw->gw_svc) {
2177 case SPTLRPC_SVC_NULL:
2178 case SPTLRPC_SVC_AUTH:
2179 case SPTLRPC_SVC_INTG:
2180 rc = gss_svc_verify_request(req, grctx, gw, &major);
2182 case SPTLRPC_SVC_PRIV:
2183 rc = gss_svc_unseal_request(req, grctx, gw, &major);
2186 CERROR("unsupported gss service %d\n", gw->gw_svc);
2193 CERROR("svc %u failed: major 0x%08x: ctx %p(%u->%s)\n",
2194 gw->gw_svc, major, grctx->src_ctx, grctx->src_ctx->gsc_uid,
2195 libcfs_nid2str(req->rq_peer.nid));
2197 /* we only notify client in case of NO_CONTEXT/BAD_SIG, which
2198 * might happen after server reboot, to allow recovery. */
2199 if ((major == GSS_S_NO_CONTEXT || major == GSS_S_BAD_SIG) &&
2200 gss_pack_err_notify(req, major, 0) == 0)
2201 RETURN(SECSVC_COMPLETE);
2203 RETURN(SECSVC_DROP);
2207 int gss_svc_handle_destroy(struct ptlrpc_request *req,
2208 struct gss_wire_ctx *gw)
2210 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2214 req->rq_ctx_fini = 1;
2215 req->rq_no_reply = 1;
2217 grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
2218 if (!grctx->src_ctx) {
2219 CDEBUG(D_SEC, "invalid gss context handle for destroy.\n");
2220 RETURN(SECSVC_DROP);
2223 if (gw->gw_svc != SPTLRPC_SVC_INTG) {
2224 CERROR("svc %u is not supported in destroy.\n", gw->gw_svc);
2225 RETURN(SECSVC_DROP);
2228 if (gss_svc_verify_request(req, grctx, gw, &major))
2229 RETURN(SECSVC_DROP);
2231 CWARN("destroy svc ctx %p idx "LPX64" (%u->%s)\n",
2232 grctx->src_ctx, gss_handle_to_u64(&gw->gw_handle),
2233 grctx->src_ctx->gsc_uid, libcfs_nid2str(req->rq_peer.nid));
2235 gss_svc_upcall_destroy_ctx(grctx->src_ctx);
2237 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
2238 if (req->rq_reqbuf->lm_bufcount < 4) {
2239 CERROR("missing user descriptor, ignore it\n");
2242 if (sptlrpc_unpack_user_desc(req->rq_reqbuf, 2)) {
2243 CERROR("Mal-formed user descriptor, ignore it\n");
2247 req->rq_pack_udesc = 1;
2248 req->rq_user_desc = lustre_msg_buf(req->rq_reqbuf, 2, 0);
2254 int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req)
2256 struct gss_header *ghdr;
2257 struct gss_svc_reqctx *grctx;
2258 struct gss_wire_ctx *gw;
2262 LASSERT(req->rq_reqbuf);
2263 LASSERT(req->rq_svc_ctx == NULL);
2265 if (req->rq_reqbuf->lm_bufcount < 2) {
2266 CERROR("buf count only %d\n", req->rq_reqbuf->lm_bufcount);
2267 RETURN(SECSVC_DROP);
2270 ghdr = gss_swab_header(req->rq_reqbuf, 0);
2272 CERROR("can't decode gss header\n");
2273 RETURN(SECSVC_DROP);
2277 if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
2278 CERROR("gss version %u, expect %u\n", ghdr->gh_version,
2279 PTLRPC_GSS_VERSION);
2280 RETURN(SECSVC_DROP);
2283 req->rq_sp_from = ghdr->gh_sp;
2285 /* alloc grctx data */
2286 OBD_ALLOC_PTR(grctx);
2288 CERROR("fail to alloc svc reqctx\n");
2289 RETURN(SECSVC_DROP);
2291 grctx->src_base.sc_policy = sptlrpc_policy_get(policy);
2292 atomic_set(&grctx->src_base.sc_refcount, 1);
2293 req->rq_svc_ctx = &grctx->src_base;
2294 gw = &grctx->src_wirectx;
2296 /* save wire context */
2297 gw->gw_flags = ghdr->gh_flags;
2298 gw->gw_proc = ghdr->gh_proc;
2299 gw->gw_seq = ghdr->gh_seq;
2300 gw->gw_svc = ghdr->gh_svc;
2301 rawobj_from_netobj(&gw->gw_handle, &ghdr->gh_handle);
2303 /* keep original wire header which subject to checksum verification */
2304 if (lustre_msg_swabbed(req->rq_reqbuf))
2305 gss_header_swabber(ghdr);
2307 switch(ghdr->gh_proc) {
2308 case PTLRPC_GSS_PROC_INIT:
2309 case PTLRPC_GSS_PROC_CONTINUE_INIT:
2310 rc = gss_svc_handle_init(req, gw);
2312 case PTLRPC_GSS_PROC_DATA:
2313 rc = gss_svc_handle_data(req, gw);
2315 case PTLRPC_GSS_PROC_DESTROY:
2316 rc = gss_svc_handle_destroy(req, gw);
2319 CERROR("unknown proc %u\n", gw->gw_proc);
2326 LASSERT (grctx->src_ctx);
2328 req->rq_auth_gss = 1;
2329 req->rq_auth_remote = grctx->src_ctx->gsc_remote;
2330 req->rq_auth_usr_mdt = grctx->src_ctx->gsc_usr_mds;
2331 req->rq_auth_usr_root = grctx->src_ctx->gsc_usr_root;
2332 req->rq_auth_uid = grctx->src_ctx->gsc_uid;
2333 req->rq_auth_mapped_uid = grctx->src_ctx->gsc_mapped_uid;
2335 case SECSVC_COMPLETE:
2338 gss_svc_reqctx_free(grctx);
2339 req->rq_svc_ctx = NULL;
2346 void gss_svc_invalidate_ctx(struct ptlrpc_svc_ctx *svc_ctx)
2348 struct gss_svc_reqctx *grctx;
2351 if (svc_ctx == NULL) {
2356 grctx = gss_svc_ctx2reqctx(svc_ctx);
2358 CWARN("gss svc invalidate ctx %p(%u)\n",
2359 grctx->src_ctx, grctx->src_ctx->gsc_uid);
2360 gss_svc_upcall_destroy_ctx(grctx->src_ctx);
2366 int gss_svc_payload(struct gss_svc_reqctx *grctx, int msgsize, int privacy)
2368 if (gss_svc_reqctx_is_special(grctx))
2369 return grctx->src_reserve_len;
2371 return gss_estimate_payload(NULL, msgsize, privacy);
2374 int gss_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2376 struct gss_svc_reqctx *grctx;
2377 struct ptlrpc_reply_state *rs;
2378 int privacy, svc, bsd_off = 0;
2379 int ibuflens[2], ibufcnt = 0;
2380 int buflens[4], bufcnt;
2381 int txtsize, wmsg_size, rs_size;
2384 LASSERT(msglen % 8 == 0);
2386 if (req->rq_pack_bulk && !req->rq_bulk_read && !req->rq_bulk_write) {
2387 CERROR("client request bulk sec on non-bulk rpc\n");
2391 svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
2393 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2394 if (gss_svc_reqctx_is_special(grctx))
2397 privacy = (svc == SPTLRPC_SVC_PRIV);
2402 ibuflens[0] = msglen;
2404 if (req->rq_pack_bulk) {
2405 LASSERT(grctx->src_reqbsd);
2408 ibuflens[ibufcnt++] = bulk_sec_desc_size(
2409 grctx->src_reqbsd->bsd_csum_alg,
2410 0, req->rq_bulk_read);
2413 txtsize = lustre_msg_size_v2(ibufcnt, ibuflens);
2414 txtsize += GSS_MAX_CIPHER_BLOCK;
2416 /* wrapper buffer */
2418 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2419 buflens[1] = gss_svc_payload(grctx, buflens[0], 0);
2420 buflens[2] = gss_svc_payload(grctx, txtsize, 1);
2423 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2424 buflens[1] = msglen;
2426 txtsize = buflens[0];
2427 if (svc == SPTLRPC_SVC_INTG)
2428 txtsize += buflens[1];
2430 if (req->rq_pack_bulk) {
2431 LASSERT(grctx->src_reqbsd);
2434 buflens[bufcnt] = bulk_sec_desc_size(
2435 grctx->src_reqbsd->bsd_csum_alg,
2436 0, req->rq_bulk_read);
2437 if (svc == SPTLRPC_SVC_INTG)
2438 txtsize += buflens[bufcnt];
2442 if (gss_svc_reqctx_is_special(grctx) ||
2443 svc != SPTLRPC_SVC_NULL)
2444 buflens[bufcnt++] = gss_svc_payload(grctx, txtsize, 0);
2447 wmsg_size = lustre_msg_size_v2(bufcnt, buflens);
2449 rs_size = sizeof(*rs) + wmsg_size;
2450 rs = req->rq_reply_state;
2454 LASSERT(rs->rs_size >= rs_size);
2456 OBD_ALLOC(rs, rs_size);
2460 rs->rs_size = rs_size;
2463 rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
2464 rs->rs_repbuf_len = wmsg_size;
2466 /* initialize the buffer */
2468 lustre_init_msg_v2(rs->rs_repbuf, ibufcnt, ibuflens, NULL);
2469 rs->rs_msg = lustre_msg_buf(rs->rs_repbuf, 0, msglen);
2471 lustre_init_msg_v2(rs->rs_repbuf, bufcnt, buflens, NULL);
2472 rs->rs_repbuf->lm_secflvr = req->rq_flvr.sf_rpc;
2474 rs->rs_msg = lustre_msg_buf(rs->rs_repbuf, 1, 0);
2478 grctx->src_repbsd = lustre_msg_buf(rs->rs_repbuf, bsd_off, 0);
2479 grctx->src_repbsd_size = lustre_msg_buflen(rs->rs_repbuf,
2483 gss_svc_reqctx_addref(grctx);
2484 rs->rs_svc_ctx = req->rq_svc_ctx;
2486 LASSERT(rs->rs_msg);
2487 req->rq_reply_state = rs;
2492 int gss_svc_seal(struct ptlrpc_request *req,
2493 struct ptlrpc_reply_state *rs,
2494 struct gss_svc_reqctx *grctx)
2496 struct gss_svc_ctx *gctx = grctx->src_ctx;
2497 rawobj_t msgobj, cipher_obj, micobj;
2498 struct gss_header *ghdr;
2500 int cipher_buflen, buflens[3];
2505 /* embedded lustre_msg might have been shrinked */
2506 if (req->rq_replen != rs->rs_repbuf->lm_buflens[0])
2507 lustre_shrink_msg(rs->rs_repbuf, 0, req->rq_replen, 1);
2509 /* clear data length */
2510 msglen = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
2511 rs->rs_repbuf->lm_buflens);
2514 msgobj.len = msglen;
2515 msgobj.data = (__u8 *) rs->rs_repbuf;
2517 /* allocate temporary cipher buffer */
2518 cipher_buflen = gss_estimate_payload(gctx->gsc_mechctx, msglen, 1);
2519 OBD_ALLOC(cipher_buf, cipher_buflen);
2523 cipher_obj.len = cipher_buflen;
2524 cipher_obj.data = cipher_buf;
2526 major = lgss_wrap(gctx->gsc_mechctx, &msgobj, rs->rs_repbuf_len,
2528 if (major != GSS_S_COMPLETE) {
2529 CERROR("priv: wrap message error: %08x\n", major);
2530 GOTO(out_free, rc = -EPERM);
2532 LASSERT(cipher_obj.len <= cipher_buflen);
2534 /* we are about to override data at rs->rs_repbuf, nullify pointers
2535 * to which to catch further illegal usage. */
2536 grctx->src_repbsd = NULL;
2537 grctx->src_repbsd_size = 0;
2539 /* now the real wire data */
2540 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2541 buflens[1] = gss_estimate_payload(gctx->gsc_mechctx, buflens[0], 0);
2542 buflens[2] = cipher_obj.len;
2544 LASSERT(lustre_msg_size_v2(3, buflens) <= rs->rs_repbuf_len);
2545 lustre_init_msg_v2(rs->rs_repbuf, 3, buflens, NULL);
2546 rs->rs_repbuf->lm_secflvr = req->rq_flvr.sf_rpc;
2549 ghdr = lustre_msg_buf(rs->rs_repbuf, 0, 0);
2550 ghdr->gh_version = PTLRPC_GSS_VERSION;
2552 ghdr->gh_proc = PTLRPC_GSS_PROC_DATA;
2553 ghdr->gh_seq = grctx->src_wirectx.gw_seq;
2554 ghdr->gh_svc = SPTLRPC_SVC_PRIV;
2555 ghdr->gh_handle.len = 0;
2556 if (req->rq_pack_bulk)
2557 ghdr->gh_flags |= LUSTRE_GSS_PACK_BULK;
2559 /* header signature */
2560 msgobj.len = rs->rs_repbuf->lm_buflens[0];
2561 msgobj.data = lustre_msg_buf(rs->rs_repbuf, 0, 0);
2562 micobj.len = rs->rs_repbuf->lm_buflens[1];
2563 micobj.data = lustre_msg_buf(rs->rs_repbuf, 1, 0);
2565 major = lgss_get_mic(gctx->gsc_mechctx, 1, &msgobj, &micobj);
2566 if (major != GSS_S_COMPLETE) {
2567 CERROR("priv: sign message error: %08x\n", major);
2568 GOTO(out_free, rc = -EPERM);
2570 lustre_shrink_msg(rs->rs_repbuf, 1, micobj.len, 0);
2573 memcpy(lustre_msg_buf(rs->rs_repbuf, 2, 0),
2574 cipher_obj.data, cipher_obj.len);
2576 rs->rs_repdata_len = lustre_shrink_msg(rs->rs_repbuf, 2,
2579 /* to catch upper layer's further access */
2581 req->rq_repmsg = NULL;
2586 OBD_FREE(cipher_buf, cipher_buflen);
2590 int gss_svc_authorize(struct ptlrpc_request *req)
2592 struct ptlrpc_reply_state *rs = req->rq_reply_state;
2593 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2594 struct gss_wire_ctx *gw;
2598 if (gss_svc_reqctx_is_special(grctx))
2601 gw = &grctx->src_wirectx;
2602 if (gw->gw_proc != PTLRPC_GSS_PROC_DATA &&
2603 gw->gw_proc != PTLRPC_GSS_PROC_DESTROY) {
2604 CERROR("proc %d not support\n", gw->gw_proc);
2608 LASSERT(grctx->src_ctx);
2610 switch (gw->gw_svc) {
2611 case SPTLRPC_SVC_NULL:
2612 case SPTLRPC_SVC_AUTH:
2613 case SPTLRPC_SVC_INTG:
2614 rc = gss_svc_sign(req, rs, grctx, gw->gw_svc);
2616 case SPTLRPC_SVC_PRIV:
2617 rc = gss_svc_seal(req, rs, grctx);
2620 CERROR("Unknown service %d\n", gw->gw_svc);
2621 GOTO(out, rc = -EINVAL);
2629 void gss_svc_free_rs(struct ptlrpc_reply_state *rs)
2631 struct gss_svc_reqctx *grctx;
2633 LASSERT(rs->rs_svc_ctx);
2634 grctx = container_of(rs->rs_svc_ctx, struct gss_svc_reqctx, src_base);
2636 /* paranoid, maybe not necessary */
2637 grctx->src_reqbsd = NULL;
2638 grctx->src_repbsd = NULL;
2640 gss_svc_reqctx_decref(grctx);
2641 rs->rs_svc_ctx = NULL;
2643 if (!rs->rs_prealloc)
2644 OBD_FREE(rs, rs->rs_size);
2647 void gss_svc_free_ctx(struct ptlrpc_svc_ctx *ctx)
2649 LASSERT(atomic_read(&ctx->sc_refcount) == 0);
2650 gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx));
2653 int gss_copy_rvc_cli_ctx(struct ptlrpc_cli_ctx *cli_ctx,
2654 struct ptlrpc_svc_ctx *svc_ctx)
2656 struct gss_cli_ctx *cli_gctx = ctx2gctx(cli_ctx);
2657 struct gss_svc_ctx *svc_gctx = gss_svc_ctx2gssctx(svc_ctx);
2658 struct gss_ctx *mechctx = NULL;
2661 LASSERT(svc_gctx && svc_gctx->gsc_mechctx);
2663 cli_gctx->gc_proc = PTLRPC_GSS_PROC_DATA;
2664 cli_gctx->gc_win = GSS_SEQ_WIN;
2666 /* The problem is the reverse ctx might get lost in some recovery
2667 * situations, and the same svc_ctx will be used to re-create it.
2668 * if there's callback be sentout before that, new reverse ctx start
2669 * with sequence 0 will lead to future callback rpc be treated as
2672 * each reverse root ctx will record its latest sequence number on its
2673 * buddy svcctx before be destroied, so here we continue use it.
2675 atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
2677 if (gss_svc_upcall_dup_handle(&cli_gctx->gc_svc_handle, svc_gctx)) {
2678 CERROR("failed to dup svc handle\n");
2682 if (lgss_copy_reverse_context(svc_gctx->gsc_mechctx, &mechctx) !=
2684 CERROR("failed to copy mech context\n");
2685 goto err_svc_handle;
2688 if (rawobj_dup(&cli_gctx->gc_handle, &svc_gctx->gsc_rvs_hdl)) {
2689 CERROR("failed to dup reverse handle\n");
2693 cli_gctx->gc_mechctx = mechctx;
2694 gss_cli_ctx_uptodate(cli_gctx);
2699 lgss_delete_sec_context(&mechctx);
2701 rawobj_free(&cli_gctx->gc_svc_handle);
2706 int __init sptlrpc_gss_init(void)
2710 rc = gss_init_lproc();
2714 rc = gss_init_cli_upcall();
2718 rc = gss_init_svc_upcall();
2720 goto out_cli_upcall;
2722 rc = init_kerberos_module();
2724 goto out_svc_upcall;
2726 /* register policy after all other stuff be intialized, because it
2727 * might be in used immediately after the registration. */
2729 rc = gss_init_keyring();
2733 #ifdef HAVE_GSS_PIPEFS
2734 rc = gss_init_pipefs();
2741 #ifdef HAVE_GSS_PIPEFS
2747 cleanup_kerberos_module();
2749 gss_exit_svc_upcall();
2751 gss_exit_cli_upcall();
2757 static void __exit sptlrpc_gss_exit(void)
2760 #ifdef HAVE_GSS_PIPEFS
2763 cleanup_kerberos_module();
2764 gss_exit_svc_upcall();
2765 gss_exit_cli_upcall();
2769 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
2770 MODULE_DESCRIPTION("GSS security policy for Lustre");
2771 MODULE_LICENSE("GPL");
2773 module_init(sptlrpc_gss_init);
2774 module_exit(sptlrpc_gss_exit);