1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Modifications for Lustre
5 * Copyright 2004 - 2007, Cluster File Systems, Inc.
7 * Author: Eric Mei <ericm@clusterfs.com>
11 * linux/net/sunrpc/auth_gss.c
13 * RPCSEC_GSS client authentication.
15 * Copyright (c) 2000 The Regents of the University of Michigan.
16 * All rights reserved.
18 * Dug Song <dugsong@monkey.org>
19 * Andy Adamson <andros@umich.edu>
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
25 * 1. Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * 2. Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in the
29 * documentation and/or other materials provided with the distribution.
30 * 3. Neither the name of the University nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
35 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
36 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
37 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
39 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
40 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
41 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
42 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
43 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
44 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 # define EXPORT_SYMTAB
51 #define DEBUG_SUBSYSTEM S_SEC
53 #include <linux/init.h>
54 #include <linux/module.h>
55 #include <linux/slab.h>
56 #include <linux/dcache.h>
58 #include <linux/random.h>
59 #include <linux/mutex.h>
60 #include <asm/atomic.h>
62 #include <liblustre.h>
66 #include <obd_class.h>
67 #include <obd_support.h>
68 #include <lustre/lustre_idl.h>
69 #include <lustre_net.h>
70 #include <lustre_import.h>
71 #include <lustre_sec.h>
74 #include "gss_internal.h"
77 #include <linux/crypto.h>
80 static inline int msg_last_segidx(struct lustre_msg *msg)
82 LASSERT(msg->lm_bufcount > 0);
83 return msg->lm_bufcount - 1;
85 static inline int msg_last_seglen(struct lustre_msg *msg)
87 return msg->lm_buflens[msg_last_segidx(msg)];
90 /********************************************
92 ********************************************/
95 void gss_header_swabber(struct gss_header *ghdr)
97 __swab32s(&ghdr->gh_version);
98 __swab32s(&ghdr->gh_flags);
99 __swab32s(&ghdr->gh_proc);
100 __swab32s(&ghdr->gh_seq);
101 __swab32s(&ghdr->gh_svc);
102 __swab32s(&ghdr->gh_pad1);
103 __swab32s(&ghdr->gh_pad2);
104 __swab32s(&ghdr->gh_pad3);
105 __swab32s(&ghdr->gh_handle.len);
108 struct gss_header *gss_swab_header(struct lustre_msg *msg, int segment)
110 struct gss_header *ghdr;
112 ghdr = lustre_swab_buf(msg, segment, sizeof(*ghdr),
116 sizeof(*ghdr) + ghdr->gh_handle.len > msg->lm_buflens[segment]) {
117 CERROR("gss header require length %u, now %u received\n",
118 (unsigned int) sizeof(*ghdr) + ghdr->gh_handle.len,
119 msg->lm_buflens[segment]);
127 void gss_netobj_swabber(netobj_t *obj)
129 __swab32s(&obj->len);
132 netobj_t *gss_swab_netobj(struct lustre_msg *msg, int segment)
136 obj = lustre_swab_buf(msg, segment, sizeof(*obj), gss_netobj_swabber);
137 if (obj && sizeof(*obj) + obj->len > msg->lm_buflens[segment]) {
138 CERROR("netobj require length %u but only %u received\n",
139 (unsigned int) sizeof(*obj) + obj->len,
140 msg->lm_buflens[segment]);
148 * payload should be obtained from mechanism. but currently since we
149 * only support kerberos, we could simply use fixed value.
153 #define GSS_KRB5_INTEG_MAX_PAYLOAD (40)
156 int gss_estimate_payload(struct gss_ctx *mechctx, int msgsize, int privacy)
159 /* we suppose max cipher block size is 16 bytes. here we
160 * add 16 for confounder and 16 for padding. */
161 return GSS_KRB5_INTEG_MAX_PAYLOAD + msgsize + 16 + 16 + 16;
163 return GSS_KRB5_INTEG_MAX_PAYLOAD;
168 * return signature size, otherwise < 0 to indicate error
171 int gss_sign_msg(struct lustre_msg *msg,
172 struct gss_ctx *mechctx,
173 __u32 proc, __u32 seq, __u32 svc,
176 struct gss_header *ghdr;
177 rawobj_t text[3], mic;
178 int textcnt, max_textcnt, mic_idx;
181 LASSERT(msg->lm_bufcount >= 2);
184 LASSERT(msg->lm_buflens[0] >=
185 sizeof(*ghdr) + (handle ? handle->len : 0));
186 ghdr = lustre_msg_buf(msg, 0, 0);
188 ghdr->gh_version = PTLRPC_GSS_VERSION;
190 ghdr->gh_proc = proc;
194 /* fill in a fake one */
195 ghdr->gh_handle.len = 0;
197 ghdr->gh_handle.len = handle->len;
198 memcpy(ghdr->gh_handle.data, handle->data, handle->len);
201 /* no actual signature for null mode */
202 if (svc == SPTLRPC_SVC_NULL)
203 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
206 mic_idx = msg_last_segidx(msg);
207 max_textcnt = (svc == SPTLRPC_SVC_AUTH) ? 1 : mic_idx;
209 for (textcnt = 0; textcnt < max_textcnt; textcnt++) {
210 text[textcnt].len = msg->lm_buflens[textcnt];
211 text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
214 mic.len = msg->lm_buflens[mic_idx];
215 mic.data = lustre_msg_buf(msg, mic_idx, 0);
217 major = lgss_get_mic(mechctx, textcnt, text, &mic);
218 if (major != GSS_S_COMPLETE) {
219 CERROR("fail to generate MIC: %08x\n", major);
222 LASSERT(mic.len <= msg->lm_buflens[mic_idx]);
224 return lustre_shrink_msg(msg, mic_idx, mic.len, 0);
231 __u32 gss_verify_msg(struct lustre_msg *msg,
232 struct gss_ctx *mechctx,
235 rawobj_t text[3], mic;
236 int textcnt, max_textcnt;
240 LASSERT(msg->lm_bufcount >= 2);
242 if (svc == SPTLRPC_SVC_NULL)
243 return GSS_S_COMPLETE;
245 mic_idx = msg_last_segidx(msg);
246 max_textcnt = (svc == SPTLRPC_SVC_AUTH) ? 1 : mic_idx;
248 for (textcnt = 0; textcnt < max_textcnt; textcnt++) {
249 text[textcnt].len = msg->lm_buflens[textcnt];
250 text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
253 mic.len = msg->lm_buflens[mic_idx];
254 mic.data = lustre_msg_buf(msg, mic_idx, 0);
256 major = lgss_verify_mic(mechctx, textcnt, text, &mic);
257 if (major != GSS_S_COMPLETE)
258 CERROR("mic verify error: %08x\n", major);
264 * return gss error code
267 __u32 gss_unseal_msg(struct gss_ctx *mechctx,
268 struct lustre_msg *msgbuf,
269 int *msg_len, int msgbuf_len)
271 rawobj_t clear_obj, micobj, msgobj, token;
277 if (msgbuf->lm_bufcount != 3) {
278 CERROR("invalid bufcount %d\n", msgbuf->lm_bufcount);
279 RETURN(GSS_S_FAILURE);
282 /* verify gss header */
283 msgobj.len = msgbuf->lm_buflens[0];
284 msgobj.data = lustre_msg_buf(msgbuf, 0, 0);
285 micobj.len = msgbuf->lm_buflens[1];
286 micobj.data = lustre_msg_buf(msgbuf, 1, 0);
288 major = lgss_verify_mic(mechctx, 1, &msgobj, &micobj);
289 if (major != GSS_S_COMPLETE) {
290 CERROR("priv: mic verify error: %08x\n", major);
294 /* temporary clear text buffer */
295 clear_buflen = msgbuf->lm_buflens[2];
296 OBD_ALLOC(clear_buf, clear_buflen);
298 RETURN(GSS_S_FAILURE);
300 token.len = msgbuf->lm_buflens[2];
301 token.data = lustre_msg_buf(msgbuf, 2, 0);
303 clear_obj.len = clear_buflen;
304 clear_obj.data = clear_buf;
306 major = lgss_unwrap(mechctx, &token, &clear_obj);
307 if (major != GSS_S_COMPLETE) {
308 CERROR("priv: unwrap message error: %08x\n", major);
309 GOTO(out_free, major = GSS_S_FAILURE);
311 LASSERT(clear_obj.len <= clear_buflen);
313 /* now the decrypted message */
314 memcpy(msgbuf, clear_obj.data, clear_obj.len);
315 *msg_len = clear_obj.len;
317 major = GSS_S_COMPLETE;
319 OBD_FREE(clear_buf, clear_buflen);
323 /********************************************
324 * gss client context manipulation helpers *
325 ********************************************/
327 int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
329 LASSERT(atomic_read(&ctx->cc_refcount));
331 if (!test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
334 if (!ctx->cc_early_expire)
335 clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
337 now = cfs_time_current_sec();
338 if (ctx->cc_expire && cfs_time_aftereq(now, ctx->cc_expire))
339 CWARN("ctx %p(%u->%s): get expired (%lds exceeds)\n",
340 ctx, ctx->cc_vcred.vc_uid,
341 sec2target_str(ctx->cc_sec),
342 cfs_time_sub(now, ctx->cc_expire));
344 CWARN("ctx %p(%u->%s): force to die (%lds remains)\n",
345 ctx, ctx->cc_vcred.vc_uid,
346 sec2target_str(ctx->cc_sec),
347 ctx->cc_expire == 0 ? 0 :
348 cfs_time_sub(ctx->cc_expire, now));
356 * return 1 if the context is dead.
358 int cli_ctx_check_death(struct ptlrpc_cli_ctx *ctx)
360 if (unlikely(cli_ctx_is_dead(ctx)))
363 /* expire is 0 means never expire. a newly created gss context
364 * which during upcall may has 0 expiration */
365 if (ctx->cc_expire == 0)
368 /* check real expiration */
369 if (cfs_time_after(ctx->cc_expire, cfs_time_current_sec()))
376 void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx)
378 struct ptlrpc_cli_ctx *ctx = &gctx->gc_base;
379 unsigned long ctx_expiry;
381 if (lgss_inquire_context(gctx->gc_mechctx, &ctx_expiry)) {
382 CERROR("ctx %p(%u): unable to inquire, expire it now\n",
383 gctx, ctx->cc_vcred.vc_uid);
384 ctx_expiry = 1; /* make it expired now */
387 ctx->cc_expire = gss_round_ctx_expiry(ctx_expiry,
388 ctx->cc_sec->ps_flags);
390 /* At this point this ctx might have been marked as dead by
391 * someone else, in which case nobody will make further use
392 * of it. we don't care, and mark it UPTODATE will help
393 * destroying server side context when it be destroied. */
394 set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
396 if (sec_is_reverse(ctx->cc_sec))
397 CDEBUG(D_SEC, "server installed reverse ctx %p, "
398 "will expire at %lu(%lds lifetime)\n",
400 ctx->cc_expire - cfs_time_current_sec());
402 CWARN("client refreshed ctx %p(%u->%s), will expire at "
403 "%lu(%lds lifetime)\n", ctx, ctx->cc_vcred.vc_uid,
404 sec2target_str(ctx->cc_sec), ctx->cc_expire,
405 ctx->cc_expire - cfs_time_current_sec());
407 /* install reverse svc ctx, but only for forward connection
408 * and root context */
409 if (!sec_is_reverse(ctx->cc_sec) && ctx->cc_vcred.vc_uid == 0) {
410 gss_sec_install_rctx(ctx->cc_sec->ps_import,
416 void gss_cli_ctx_finalize(struct gss_cli_ctx *gctx)
418 if (gctx->gc_mechctx) {
419 lgss_delete_sec_context(&gctx->gc_mechctx);
420 gctx->gc_mechctx = NULL;
423 rawobj_free(&gctx->gc_handle);
427 * Based on sequence number algorithm as specified in RFC 2203.
429 * modified for our own problem: arriving request has valid sequence number,
430 * but unwrapping request might cost a long time, after that its sequence
431 * are not valid anymore (fall behind the window). It rarely happen, mostly
432 * under extreme load.
434 * note we should not check sequence before verify the integrity of incoming
435 * request, because just one attacking request with high sequence number might
436 * cause all following request be dropped.
438 * so here we use a multi-phase approach: prepare 2 sequence windows,
439 * "main window" for normal sequence and "back window" for fall behind sequence.
440 * and 3-phase checking mechanism:
441 * 0 - before integrity verification, perform a initial sequence checking in
442 * main window, which only try and don't actually set any bits. if the
443 * sequence is high above the window or fit in the window and the bit
444 * is 0, then accept and proceed to integrity verification. otherwise
445 * reject this sequence.
446 * 1 - after integrity verification, check in main window again. if this
447 * sequence is high above the window or fit in the window and the bit
448 * is 0, then set the bit and accept; if it fit in the window but bit
449 * already set, then reject; if it fall behind the window, then proceed
451 * 2 - check in back window. if it is high above the window or fit in the
452 * window and the bit is 0, then set the bit and accept. otherwise reject.
455 * 1: looks like a replay
459 * note phase 0 is necessary, because otherwise replay attacking request of
460 * sequence which between the 2 windows can't be detected.
462 * this mechanism can't totally solve the problem, but could help much less
463 * number of valid requests be dropped.
466 int gss_do_check_seq(unsigned long *window, __u32 win_size, __u32 *max_seq,
467 __u32 seq_num, int phase)
469 LASSERT(phase >= 0 && phase <= 2);
471 if (seq_num > *max_seq) {
473 * 1. high above the window
478 if (seq_num >= *max_seq + win_size) {
479 memset(window, 0, win_size / 8);
482 while(*max_seq < seq_num) {
484 __clear_bit((*max_seq) % win_size, window);
487 __set_bit(seq_num % win_size, window);
488 } else if (seq_num + win_size <= *max_seq) {
490 * 2. low behind the window
492 if (phase == 0 || phase == 2)
495 CWARN("seq %u is %u behind (size %d), check backup window\n",
496 seq_num, *max_seq - win_size - seq_num, win_size);
500 * 3. fit into the window
504 if (test_bit(seq_num % win_size, window))
509 if (__test_and_set_bit(seq_num % win_size, window))
518 CERROR("seq %u (%s %s window) is a replay: max %u, winsize %d\n",
520 seq_num + win_size > *max_seq ? "in" : "behind",
521 phase == 2 ? "backup " : "main",
527 * Based on sequence number algorithm as specified in RFC 2203.
529 * if @set == 0: initial check, don't set any bit in window
530 * if @sec == 1: final check, set bit in window
532 int gss_check_seq_num(struct gss_svc_seq_data *ssd, __u32 seq_num, int set)
536 spin_lock(&ssd->ssd_lock);
542 rc = gss_do_check_seq(ssd->ssd_win_main, GSS_SEQ_WIN_MAIN,
543 &ssd->ssd_max_main, seq_num, 0);
545 gss_stat_oos_record_svc(0, 1);
548 * phase 1 checking main window
550 rc = gss_do_check_seq(ssd->ssd_win_main, GSS_SEQ_WIN_MAIN,
551 &ssd->ssd_max_main, seq_num, 1);
554 gss_stat_oos_record_svc(1, 1);
560 * phase 2 checking back window
562 rc = gss_do_check_seq(ssd->ssd_win_back, GSS_SEQ_WIN_BACK,
563 &ssd->ssd_max_back, seq_num, 2);
565 gss_stat_oos_record_svc(2, 1);
567 gss_stat_oos_record_svc(2, 0);
570 spin_unlock(&ssd->ssd_lock);
574 /***************************************
576 ***************************************/
579 int gss_cli_payload(struct ptlrpc_cli_ctx *ctx,
580 int msgsize, int privacy)
582 return gss_estimate_payload(NULL, msgsize, privacy);
585 int gss_cli_ctx_match(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred)
587 return (ctx->cc_vcred.vc_uid == vcred->vc_uid);
590 void gss_cli_ctx_flags2str(unsigned long flags, char *buf, int bufsize)
594 if (flags & PTLRPC_CTX_NEW)
595 strncat(buf, "new,", bufsize);
596 if (flags & PTLRPC_CTX_UPTODATE)
597 strncat(buf, "uptodate,", bufsize);
598 if (flags & PTLRPC_CTX_DEAD)
599 strncat(buf, "dead,", bufsize);
600 if (flags & PTLRPC_CTX_ERROR)
601 strncat(buf, "error,", bufsize);
602 if (flags & PTLRPC_CTX_CACHED)
603 strncat(buf, "cached,", bufsize);
604 if (flags & PTLRPC_CTX_ETERNAL)
605 strncat(buf, "eternal,", bufsize);
607 strncat(buf, "-,", bufsize);
609 buf[strlen(buf) - 1] = '\0';
612 int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx,
613 struct ptlrpc_request *req)
615 struct gss_cli_ctx *gctx;
620 LASSERT(req->rq_reqbuf);
621 LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
622 LASSERT(req->rq_cli_ctx == ctx);
624 /* nothing to do for context negotiation RPCs */
625 if (req->rq_ctx_init)
628 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
629 svc = SEC_FLAVOR_SVC(req->rq_sec_flavor);
631 seq = atomic_inc_return(&gctx->gc_seq);
633 rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
634 gctx->gc_proc, seq, svc,
639 /* gss_sign_msg() msg might take long time to finish, in which period
640 * more rpcs could be wrapped up and sent out. if we found too many
641 * of them we should repack this rpc, because sent it too late might
642 * lead to the sequence number fall behind the window on server and
643 * be dropped. also applies to gss_cli_ctx_seal().
645 * Note: null mode dosen't check sequence number. */
646 if (svc != SPTLRPC_SVC_NULL &&
647 atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
648 int behind = atomic_read(&gctx->gc_seq) - seq;
650 gss_stat_oos_record_cli(behind);
651 CWARN("req %p: %u behind, retry signing\n", req, behind);
655 req->rq_reqdata_len = rc;
660 int gss_cli_ctx_handle_err_notify(struct ptlrpc_cli_ctx *ctx,
661 struct ptlrpc_request *req,
662 struct gss_header *ghdr)
664 struct gss_err_header *errhdr;
667 LASSERT(ghdr->gh_proc == PTLRPC_GSS_PROC_ERR);
669 errhdr = (struct gss_err_header *) ghdr;
671 /* server return NO_CONTEXT might be caused by context expire
672 * or server reboot/failover. we refresh the cred transparently
674 * In some cases, our gss handle is possible to be incidentally
675 * identical to another handle since the handle itself is not
676 * fully random. In krb5 case, the GSS_S_BAD_SIG will be
677 * returned, maybe other gss error for other mechanism.
679 * if we add new mechanism, make sure the correct error are
680 * returned in this case.
682 * but in any cases, don't resend ctx destroying rpc, don't resend
684 if (req->rq_ctx_fini) {
685 CWARN("server respond error (%08x/%08x) for ctx fini\n",
686 errhdr->gh_major, errhdr->gh_minor);
688 } else if (sec_is_reverse(ctx->cc_sec)) {
689 CWARN("reverse server respond error (%08x/%08x)\n",
690 errhdr->gh_major, errhdr->gh_minor);
692 } else if (errhdr->gh_major == GSS_S_NO_CONTEXT ||
693 errhdr->gh_major == GSS_S_BAD_SIG) {
694 CWARN("req x"LPU64"/t"LPU64": server respond ctx %p(%u->%s) "
695 "%s, server might lost the context.\n",
696 req->rq_xid, req->rq_transno, ctx, ctx->cc_vcred.vc_uid,
697 sec2target_str(ctx->cc_sec),
698 errhdr->gh_major == GSS_S_NO_CONTEXT ?
699 "NO_CONTEXT" : "BAD_SIG");
701 sptlrpc_cli_ctx_expire(ctx);
703 /* we need replace the ctx right here, otherwise during
704 * resent we'll hit the logic in sptlrpc_req_refresh_ctx()
705 * which keep the ctx with RESEND flag, thus we'll never
706 * get rid of this ctx. */
707 rc = sptlrpc_req_replace_dead_ctx(req);
711 CERROR("req %p: server report gss error (%x/%x)\n",
712 req, errhdr->gh_major, errhdr->gh_minor);
719 int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx,
720 struct ptlrpc_request *req)
722 struct gss_cli_ctx *gctx;
723 struct gss_header *ghdr, *reqhdr;
724 struct lustre_msg *msg = req->rq_repbuf;
729 LASSERT(req->rq_cli_ctx == ctx);
732 req->rq_repdata_len = req->rq_nob_received;
733 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
735 /* special case for context negotiation, rq_repmsg/rq_replen actually
736 * are not used currently. */
737 if (req->rq_ctx_init) {
738 req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
739 req->rq_replen = msg->lm_buflens[1];
743 if (msg->lm_bufcount < 2 || msg->lm_bufcount > 4) {
744 CERROR("unexpected bufcount %u\n", msg->lm_bufcount);
748 ghdr = gss_swab_header(msg, 0);
750 CERROR("can't decode gss header\n");
755 reqhdr = lustre_msg_buf(msg, 0, sizeof(*reqhdr));
758 if (ghdr->gh_version != reqhdr->gh_version) {
759 CERROR("gss version %u mismatch, expect %u\n",
760 ghdr->gh_version, reqhdr->gh_version);
764 switch (ghdr->gh_proc) {
765 case PTLRPC_GSS_PROC_DATA:
766 if (ghdr->gh_seq != reqhdr->gh_seq) {
767 CERROR("seqnum %u mismatch, expect %u\n",
768 ghdr->gh_seq, reqhdr->gh_seq);
772 if (ghdr->gh_svc != reqhdr->gh_svc) {
773 CERROR("svc %u mismatch, expect %u\n",
774 ghdr->gh_svc, reqhdr->gh_svc);
778 if (lustre_msg_swabbed(msg))
779 gss_header_swabber(ghdr);
781 major = gss_verify_msg(msg, gctx->gc_mechctx, reqhdr->gh_svc);
782 if (major != GSS_S_COMPLETE)
785 req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
786 req->rq_replen = msg->lm_buflens[1];
788 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
789 if (msg->lm_bufcount < 4) {
790 CERROR("Invalid reply bufcount %u\n",
795 /* bulk checksum is the second last segment */
796 rc = bulk_sec_desc_unpack(msg, msg->lm_bufcount - 2);
799 case PTLRPC_GSS_PROC_ERR:
800 rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
803 CERROR("unknown gss proc %d\n", ghdr->gh_proc);
810 int gss_cli_ctx_seal(struct ptlrpc_cli_ctx *ctx,
811 struct ptlrpc_request *req)
813 struct gss_cli_ctx *gctx;
814 rawobj_t msgobj, cipher_obj, micobj;
815 struct gss_header *ghdr;
816 int buflens[3], wiresize, rc;
820 LASSERT(req->rq_clrbuf);
821 LASSERT(req->rq_cli_ctx == ctx);
822 LASSERT(req->rq_reqlen);
824 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
826 /* close clear data length */
827 req->rq_clrdata_len = lustre_msg_size_v2(req->rq_clrbuf->lm_bufcount,
828 req->rq_clrbuf->lm_buflens);
830 /* calculate wire data length */
831 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
832 buflens[1] = gss_cli_payload(&gctx->gc_base, buflens[0], 0);
833 buflens[2] = gss_cli_payload(&gctx->gc_base, req->rq_clrdata_len, 1);
834 wiresize = lustre_msg_size_v2(3, buflens);
836 /* allocate wire buffer */
839 LASSERT(req->rq_reqbuf);
840 LASSERT(req->rq_reqbuf != req->rq_clrbuf);
841 LASSERT(req->rq_reqbuf_len >= wiresize);
843 OBD_ALLOC(req->rq_reqbuf, wiresize);
846 req->rq_reqbuf_len = wiresize;
849 lustre_init_msg_v2(req->rq_reqbuf, 3, buflens, NULL);
850 req->rq_reqbuf->lm_secflvr = req->rq_sec_flavor;
853 ghdr = lustre_msg_buf(req->rq_reqbuf, 0, 0);
854 ghdr->gh_version = PTLRPC_GSS_VERSION;
856 ghdr->gh_proc = gctx->gc_proc;
857 ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
858 ghdr->gh_svc = SPTLRPC_SVC_PRIV;
859 ghdr->gh_handle.len = gctx->gc_handle.len;
860 memcpy(ghdr->gh_handle.data, gctx->gc_handle.data, gctx->gc_handle.len);
863 /* header signature */
864 msgobj.len = req->rq_reqbuf->lm_buflens[0];
865 msgobj.data = lustre_msg_buf(req->rq_reqbuf, 0, 0);
866 micobj.len = req->rq_reqbuf->lm_buflens[1];
867 micobj.data = lustre_msg_buf(req->rq_reqbuf, 1, 0);
869 major = lgss_get_mic(gctx->gc_mechctx, 1, &msgobj, &micobj);
870 if (major != GSS_S_COMPLETE) {
871 CERROR("priv: sign message error: %08x\n", major);
872 GOTO(err_free, rc = -EPERM);
874 /* perhaps shrink msg has potential problem in re-packing???
875 * ship a little bit more data is fine.
876 lustre_shrink_msg(req->rq_reqbuf, 1, micobj.len, 0);
880 msgobj.len = req->rq_clrdata_len;
881 msgobj.data = (__u8 *) req->rq_clrbuf;
884 cipher_obj.len = req->rq_reqbuf->lm_buflens[2];
885 cipher_obj.data = lustre_msg_buf(req->rq_reqbuf, 2, 0);
887 major = lgss_wrap(gctx->gc_mechctx, &msgobj, req->rq_clrbuf_len,
889 if (major != GSS_S_COMPLETE) {
890 CERROR("priv: wrap message error: %08x\n", major);
891 GOTO(err_free, rc = -EPERM);
893 LASSERT(cipher_obj.len <= buflens[2]);
895 /* see explain in gss_cli_ctx_sign() */
896 if (atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
897 GSS_SEQ_REPACK_THRESHOLD) {
898 int behind = atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
900 gss_stat_oos_record_cli(behind);
901 CWARN("req %p: %u behind, retry sealing\n", req, behind);
903 ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
907 /* now set the final wire data length */
908 req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, 2,
915 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
916 req->rq_reqbuf = NULL;
917 req->rq_reqbuf_len = 0;
922 int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx,
923 struct ptlrpc_request *req)
925 struct gss_cli_ctx *gctx;
926 struct gss_header *ghdr;
931 LASSERT(req->rq_repbuf);
932 LASSERT(req->rq_cli_ctx == ctx);
934 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
936 ghdr = gss_swab_header(req->rq_repbuf, 0);
938 CERROR("can't decode gss header\n");
943 if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
944 CERROR("gss version %u mismatch, expect %u\n",
945 ghdr->gh_version, PTLRPC_GSS_VERSION);
949 switch (ghdr->gh_proc) {
950 case PTLRPC_GSS_PROC_DATA:
951 if (lustre_msg_swabbed(req->rq_repbuf))
952 gss_header_swabber(ghdr);
954 major = gss_unseal_msg(gctx->gc_mechctx, req->rq_repbuf,
955 &msglen, req->rq_repbuf_len);
956 if (major != GSS_S_COMPLETE) {
961 if (lustre_unpack_msg(req->rq_repbuf, msglen)) {
962 CERROR("Failed to unpack after decryption\n");
965 req->rq_repdata_len = msglen;
967 if (req->rq_repbuf->lm_bufcount < 1) {
968 CERROR("Invalid reply buffer: empty\n");
972 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
973 if (req->rq_repbuf->lm_bufcount < 2) {
974 CERROR("Too few request buffer segments %d\n",
975 req->rq_repbuf->lm_bufcount);
979 /* bulk checksum is the last segment */
980 if (bulk_sec_desc_unpack(req->rq_repbuf,
981 req->rq_repbuf->lm_bufcount-1))
985 req->rq_repmsg = lustre_msg_buf(req->rq_repbuf, 0, 0);
986 req->rq_replen = req->rq_repbuf->lm_buflens[0];
990 case PTLRPC_GSS_PROC_ERR:
991 rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
994 CERROR("unexpected proc %d\n", ghdr->gh_proc);
1001 /*********************************************
1002 * reverse context installation *
1003 *********************************************/
1006 int gss_install_rvs_svc_ctx(struct obd_import *imp,
1007 struct gss_sec *gsec,
1008 struct gss_cli_ctx *gctx)
1010 return gss_svc_upcall_install_rvs_ctx(imp, gsec, gctx);
1013 /*********************************************
1014 * GSS security APIs *
1015 *********************************************/
1016 int gss_sec_create_common(struct gss_sec *gsec,
1017 struct ptlrpc_sec_policy *policy,
1018 struct obd_import *imp,
1019 struct ptlrpc_svc_ctx *ctx,
1021 unsigned long flags)
1023 struct ptlrpc_sec *sec;
1026 LASSERT(SEC_FLAVOR_POLICY(flavor) == SPTLRPC_POLICY_GSS);
1028 gsec->gs_mech = lgss_subflavor_to_mech(SEC_FLAVOR_SUB(flavor));
1029 if (!gsec->gs_mech) {
1030 CERROR("gss backend 0x%x not found\n", SEC_FLAVOR_SUB(flavor));
1034 spin_lock_init(&gsec->gs_lock);
1035 gsec->gs_rvs_hdl = 0ULL;
1037 /* initialize upper ptlrpc_sec */
1038 sec = &gsec->gs_base;
1039 sec->ps_policy = policy;
1040 sec->ps_flavor = flavor;
1041 sec->ps_flags = flags;
1042 sec->ps_import = class_import_get(imp);
1043 sec->ps_lock = SPIN_LOCK_UNLOCKED;
1044 atomic_set(&sec->ps_busy, 0);
1045 CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
1048 sec->ps_gc_interval = GSS_GC_INTERVAL;
1049 sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
1051 LASSERT(sec_is_reverse(sec));
1053 /* never do gc on reverse sec */
1054 sec->ps_gc_interval = 0;
1055 sec->ps_gc_next = 0;
1058 if (SEC_FLAVOR_SVC(flavor) == SPTLRPC_SVC_PRIV &&
1059 flags & PTLRPC_SEC_FL_BULK)
1060 sptlrpc_enc_pool_add_user();
1062 CDEBUG(D_SEC, "create %s%s@%p\n", (ctx ? "reverse " : ""),
1063 policy->sp_name, gsec);
1067 void gss_sec_destroy_common(struct gss_sec *gsec)
1069 struct ptlrpc_sec *sec = &gsec->gs_base;
1072 LASSERT(sec->ps_import);
1073 LASSERT(atomic_read(&sec->ps_refcount) == 0);
1074 LASSERT(atomic_read(&sec->ps_busy) == 0);
1076 if (gsec->gs_mech) {
1077 lgss_mech_put(gsec->gs_mech);
1078 gsec->gs_mech = NULL;
1081 class_import_put(sec->ps_import);
1083 if (SEC_FLAVOR_SVC(sec->ps_flavor) == SPTLRPC_SVC_PRIV &&
1084 sec->ps_flags & PTLRPC_SEC_FL_BULK)
1085 sptlrpc_enc_pool_del_user();
1090 int gss_cli_ctx_init_common(struct ptlrpc_sec *sec,
1091 struct ptlrpc_cli_ctx *ctx,
1092 struct ptlrpc_ctx_ops *ctxops,
1093 struct vfs_cred *vcred)
1095 struct gss_cli_ctx *gctx = ctx2gctx(ctx);
1098 atomic_set(&gctx->gc_seq, 0);
1100 CFS_INIT_HLIST_NODE(&ctx->cc_cache);
1101 atomic_set(&ctx->cc_refcount, 0);
1103 ctx->cc_ops = ctxops;
1105 ctx->cc_flags = PTLRPC_CTX_NEW;
1106 ctx->cc_vcred = *vcred;
1107 spin_lock_init(&ctx->cc_lock);
1108 CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
1109 CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
1111 /* take a ref on belonging sec */
1112 atomic_inc(&sec->ps_busy);
1114 CDEBUG(D_SEC, "%s@%p: create ctx %p(%u->%s)\n",
1115 sec->ps_policy->sp_name, ctx->cc_sec,
1116 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
1122 * -1: the destroy has been taken care of by someone else
1123 * 0: proceed to destroy the ctx
1124 * 1: busy count dropped to 0, proceed to destroy ctx and sec
1126 int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec,
1127 struct ptlrpc_cli_ctx *ctx)
1129 struct gss_cli_ctx *gctx = ctx2gctx(ctx);
1131 LASSERT(ctx->cc_sec == sec);
1132 LASSERT(atomic_read(&ctx->cc_refcount) == 0);
1133 LASSERT(atomic_read(&sec->ps_busy) > 0);
1135 if (gctx->gc_mechctx) {
1136 /* the final context fini rpc will use this ctx too, and it's
1137 * asynchronous which finished by request_out_callback(). so
1138 * we add refcount, whoever drop finally drop the refcount to
1139 * 0 should responsible for the rest of destroy. */
1140 atomic_inc(&ctx->cc_refcount);
1142 gss_do_ctx_fini_rpc(gctx);
1143 gss_cli_ctx_finalize(gctx);
1145 if (!atomic_dec_and_test(&ctx->cc_refcount))
1149 if (sec_is_reverse(sec))
1150 CDEBUG(D_SEC, "reverse sec %p: destroy ctx %p\n",
1153 CWARN("%s@%p: destroy ctx %p(%u->%s)\n",
1154 sec->ps_policy->sp_name, ctx->cc_sec,
1155 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
1157 if (atomic_dec_and_test(&sec->ps_busy)) {
1158 LASSERT(atomic_read(&sec->ps_refcount) == 0);
1166 int gss_alloc_reqbuf_intg(struct ptlrpc_sec *sec,
1167 struct ptlrpc_request *req,
1168 int svc, int msgsize)
1170 struct sec_flavor_config *conf;
1171 int bufsize, txtsize;
1172 int buflens[5], bufcnt = 2;
1176 * on-wire data layout:
1179 * - user descriptor (optional)
1180 * - bulk sec descriptor (optional)
1181 * - signature (optional)
1182 * - svc == NULL: NULL
1183 * - svc == AUTH: signature of gss header
1184 * - svc == INTG: signature of all above
1186 * if this is context negotiation, reserver fixed space
1187 * at the last (signature) segment regardless of svc mode.
1190 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1191 txtsize = buflens[0];
1193 buflens[1] = msgsize;
1194 if (svc == SPTLRPC_SVC_INTG)
1195 txtsize += buflens[1];
1197 if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
1198 buflens[bufcnt] = sptlrpc_current_user_desc_size();
1199 if (svc == SPTLRPC_SVC_INTG)
1200 txtsize += buflens[bufcnt];
1204 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
1205 conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
1206 buflens[bufcnt] = bulk_sec_desc_size(conf->sfc_bulk_csum, 1,
1208 if (svc == SPTLRPC_SVC_INTG)
1209 txtsize += buflens[bufcnt];
1213 if (req->rq_ctx_init)
1214 buflens[bufcnt++] = GSS_CTX_INIT_MAX_LEN;
1215 else if (svc != SPTLRPC_SVC_NULL)
1216 buflens[bufcnt++] = gss_cli_payload(req->rq_cli_ctx, txtsize,0);
1218 bufsize = lustre_msg_size_v2(bufcnt, buflens);
1220 if (!req->rq_reqbuf) {
1221 bufsize = size_roundup_power2(bufsize);
1223 OBD_ALLOC(req->rq_reqbuf, bufsize);
1224 if (!req->rq_reqbuf)
1227 req->rq_reqbuf_len = bufsize;
1229 LASSERT(req->rq_pool);
1230 LASSERT(req->rq_reqbuf_len >= bufsize);
1231 memset(req->rq_reqbuf, 0, bufsize);
1234 lustre_init_msg_v2(req->rq_reqbuf, bufcnt, buflens, NULL);
1235 req->rq_reqbuf->lm_secflvr = req->rq_sec_flavor;
1237 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, msgsize);
1238 LASSERT(req->rq_reqmsg);
1240 /* pack user desc here, later we might leave current user's process */
1241 if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor))
1242 sptlrpc_pack_user_desc(req->rq_reqbuf, 2);
1248 int gss_alloc_reqbuf_priv(struct ptlrpc_sec *sec,
1249 struct ptlrpc_request *req,
1252 struct sec_flavor_config *conf;
1253 int ibuflens[3], ibufcnt;
1255 int clearsize, wiresize;
1258 LASSERT(req->rq_clrbuf == NULL);
1259 LASSERT(req->rq_clrbuf_len == 0);
1261 /* Inner (clear) buffers
1263 * - user descriptor (optional)
1264 * - bulk checksum (optional)
1268 ibuflens[0] = msgsize;
1270 if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor))
1271 ibuflens[ibufcnt++] = sptlrpc_current_user_desc_size();
1272 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
1273 conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
1274 ibuflens[ibufcnt++] = bulk_sec_desc_size(conf->sfc_bulk_csum, 1,
1277 clearsize = lustre_msg_size_v2(ibufcnt, ibuflens);
1278 /* to allow append padding during encryption */
1279 clearsize += GSS_MAX_CIPHER_BLOCK;
1281 /* Wrapper (wire) buffers
1283 * - signature of gss header
1287 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1288 buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
1289 buflens[2] = gss_cli_payload(req->rq_cli_ctx, clearsize, 1);
1290 wiresize = lustre_msg_size_v2(3, buflens);
1293 /* rq_reqbuf is preallocated */
1294 LASSERT(req->rq_reqbuf);
1295 LASSERT(req->rq_reqbuf_len >= wiresize);
1297 memset(req->rq_reqbuf, 0, req->rq_reqbuf_len);
1299 /* if the pre-allocated buffer is big enough, we just pack
1300 * both clear buf & request buf in it, to avoid more alloc. */
1301 if (clearsize + wiresize <= req->rq_reqbuf_len) {
1303 (void *) (((char *) req->rq_reqbuf) + wiresize);
1305 CWARN("pre-allocated buf size %d is not enough for "
1306 "both clear (%d) and cipher (%d) text, proceed "
1307 "with extra allocation\n", req->rq_reqbuf_len,
1308 clearsize, wiresize);
1312 if (!req->rq_clrbuf) {
1313 clearsize = size_roundup_power2(clearsize);
1315 OBD_ALLOC(req->rq_clrbuf, clearsize);
1316 if (!req->rq_clrbuf)
1319 req->rq_clrbuf_len = clearsize;
1321 lustre_init_msg_v2(req->rq_clrbuf, ibufcnt, ibuflens, NULL);
1322 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, msgsize);
1324 if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor))
1325 sptlrpc_pack_user_desc(req->rq_clrbuf, 1);
1331 * NOTE: any change of request buffer allocation should also consider
1332 * changing enlarge_reqbuf() series functions.
1334 int gss_alloc_reqbuf(struct ptlrpc_sec *sec,
1335 struct ptlrpc_request *req,
1338 int svc = SEC_FLAVOR_SVC(req->rq_sec_flavor);
1340 LASSERT(!SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor) ||
1341 (req->rq_bulk_read || req->rq_bulk_write));
1344 case SPTLRPC_SVC_NULL:
1345 case SPTLRPC_SVC_AUTH:
1346 case SPTLRPC_SVC_INTG:
1347 return gss_alloc_reqbuf_intg(sec, req, svc, msgsize);
1348 case SPTLRPC_SVC_PRIV:
1349 return gss_alloc_reqbuf_priv(sec, req, msgsize);
1351 LASSERTF(0, "bad flavor %x\n", req->rq_sec_flavor);
1356 void gss_free_reqbuf(struct ptlrpc_sec *sec,
1357 struct ptlrpc_request *req)
1362 LASSERT(!req->rq_pool || req->rq_reqbuf);
1363 privacy = SEC_FLAVOR_SVC(req->rq_sec_flavor) == SPTLRPC_SVC_PRIV;
1365 if (!req->rq_clrbuf)
1366 goto release_reqbuf;
1368 /* release clear buffer */
1370 LASSERT(req->rq_clrbuf_len);
1373 req->rq_clrbuf >= req->rq_reqbuf &&
1374 (char *) req->rq_clrbuf <
1375 (char *) req->rq_reqbuf + req->rq_reqbuf_len)
1376 goto release_reqbuf;
1378 OBD_FREE(req->rq_clrbuf, req->rq_clrbuf_len);
1379 req->rq_clrbuf = NULL;
1380 req->rq_clrbuf_len = 0;
1383 if (!req->rq_pool && req->rq_reqbuf) {
1384 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
1385 req->rq_reqbuf = NULL;
1386 req->rq_reqbuf_len = 0;
1392 static int do_alloc_repbuf(struct ptlrpc_request *req, int bufsize)
1394 bufsize = size_roundup_power2(bufsize);
1396 OBD_ALLOC(req->rq_repbuf, bufsize);
1397 if (!req->rq_repbuf)
1400 req->rq_repbuf_len = bufsize;
1405 int gss_alloc_repbuf_intg(struct ptlrpc_sec *sec,
1406 struct ptlrpc_request *req,
1407 int svc, int msgsize)
1409 struct sec_flavor_config *conf;
1411 int buflens[4], bufcnt = 2;
1414 * on-wire data layout:
1417 * - bulk sec descriptor (optional)
1418 * - signature (optional)
1419 * - svc == NULL: NULL
1420 * - svc == AUTH: signature of gss header
1421 * - svc == INTG: signature of all above
1423 * if this is context negotiation, reserver fixed space
1424 * at the last (signature) segment regardless of svc mode.
1427 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1428 txtsize = buflens[0];
1430 buflens[1] = msgsize;
1431 if (svc == SPTLRPC_SVC_INTG)
1432 txtsize += buflens[1];
1434 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
1435 conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
1436 buflens[bufcnt] = bulk_sec_desc_size(conf->sfc_bulk_csum, 0,
1438 if (svc == SPTLRPC_SVC_INTG)
1439 txtsize += buflens[bufcnt];
1443 if (req->rq_ctx_init)
1444 buflens[bufcnt++] = GSS_CTX_INIT_MAX_LEN;
1445 else if (svc != SPTLRPC_SVC_NULL)
1446 buflens[bufcnt++] = gss_cli_payload(req->rq_cli_ctx, txtsize,0);
1448 return do_alloc_repbuf(req, lustre_msg_size_v2(bufcnt, buflens));
1452 int gss_alloc_repbuf_priv(struct ptlrpc_sec *sec,
1453 struct ptlrpc_request *req,
1456 struct sec_flavor_config *conf;
1458 int buflens[3], bufcnt;
1460 /* Inner (clear) buffers
1462 * - bulk checksum (optional)
1466 buflens[0] = msgsize;
1468 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
1469 conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
1470 buflens[bufcnt++] = bulk_sec_desc_size(
1471 conf->sfc_bulk_csum, 0,
1474 txtsize = lustre_msg_size_v2(bufcnt, buflens);
1475 txtsize += GSS_MAX_CIPHER_BLOCK;
1477 /* Wrapper (wire) buffers
1479 * - signature of gss header
1484 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1485 buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
1486 buflens[2] = gss_cli_payload(req->rq_cli_ctx, txtsize, 1);
1488 return do_alloc_repbuf(req, lustre_msg_size_v2(bufcnt, buflens));
1491 int gss_alloc_repbuf(struct ptlrpc_sec *sec,
1492 struct ptlrpc_request *req,
1495 int svc = SEC_FLAVOR_SVC(req->rq_sec_flavor);
1498 LASSERT(!SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor) ||
1499 (req->rq_bulk_read || req->rq_bulk_write));
1502 case SPTLRPC_SVC_NULL:
1503 case SPTLRPC_SVC_AUTH:
1504 case SPTLRPC_SVC_INTG:
1505 return gss_alloc_repbuf_intg(sec, req, svc, msgsize);
1506 case SPTLRPC_SVC_PRIV:
1507 return gss_alloc_repbuf_priv(sec, req, msgsize);
1509 LASSERTF(0, "bad flavor %x\n", req->rq_sec_flavor);
1514 void gss_free_repbuf(struct ptlrpc_sec *sec,
1515 struct ptlrpc_request *req)
1517 OBD_FREE(req->rq_repbuf, req->rq_repbuf_len);
1518 req->rq_repbuf = NULL;
1519 req->rq_repbuf_len = 0;
1522 static int get_enlarged_msgsize(struct lustre_msg *msg,
1523 int segment, int newsize)
1525 int save, newmsg_size;
1527 LASSERT(newsize >= msg->lm_buflens[segment]);
1529 save = msg->lm_buflens[segment];
1530 msg->lm_buflens[segment] = newsize;
1531 newmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1532 msg->lm_buflens[segment] = save;
1537 static int get_enlarged_msgsize2(struct lustre_msg *msg,
1538 int segment1, int newsize1,
1539 int segment2, int newsize2)
1541 int save1, save2, newmsg_size;
1543 LASSERT(newsize1 >= msg->lm_buflens[segment1]);
1544 LASSERT(newsize2 >= msg->lm_buflens[segment2]);
1546 save1 = msg->lm_buflens[segment1];
1547 save2 = msg->lm_buflens[segment2];
1548 msg->lm_buflens[segment1] = newsize1;
1549 msg->lm_buflens[segment2] = newsize2;
1550 newmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1551 msg->lm_buflens[segment1] = save1;
1552 msg->lm_buflens[segment2] = save2;
1558 int gss_enlarge_reqbuf_intg(struct ptlrpc_sec *sec,
1559 struct ptlrpc_request *req,
1561 int segment, int newsize)
1563 struct lustre_msg *newbuf;
1564 int txtsize, sigsize = 0, i;
1565 int newmsg_size, newbuf_size;
1568 * gss header is at seg 0;
1569 * embedded msg is at seg 1;
1570 * signature (if any) is at the last seg
1572 LASSERT(req->rq_reqbuf);
1573 LASSERT(req->rq_reqbuf_len > req->rq_reqlen);
1574 LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
1575 LASSERT(lustre_msg_buf(req->rq_reqbuf, 1, 0) == req->rq_reqmsg);
1577 /* 1. compute new embedded msg size */
1578 newmsg_size = get_enlarged_msgsize(req->rq_reqmsg, segment, newsize);
1579 LASSERT(newmsg_size >= req->rq_reqbuf->lm_buflens[1]);
1581 /* 2. compute new wrapper msg size */
1582 if (svc == SPTLRPC_SVC_NULL) {
1583 /* no signature, get size directly */
1584 newbuf_size = get_enlarged_msgsize(req->rq_reqbuf,
1587 txtsize = req->rq_reqbuf->lm_buflens[0];
1589 if (svc == SPTLRPC_SVC_INTG) {
1590 for (i = 1; i < req->rq_reqbuf->lm_bufcount; i++)
1591 txtsize += req->rq_reqbuf->lm_buflens[i];
1592 txtsize += newmsg_size - req->rq_reqbuf->lm_buflens[1];
1595 sigsize = gss_cli_payload(req->rq_cli_ctx, txtsize, 0);
1596 LASSERT(sigsize >= msg_last_seglen(req->rq_reqbuf));
1598 newbuf_size = get_enlarged_msgsize2(
1601 msg_last_segidx(req->rq_reqbuf),
1605 /* request from pool should always have enough buffer */
1606 LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
1608 if (req->rq_reqbuf_len < newbuf_size) {
1609 newbuf_size = size_roundup_power2(newbuf_size);
1611 OBD_ALLOC(newbuf, newbuf_size);
1615 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
1617 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
1618 req->rq_reqbuf = newbuf;
1619 req->rq_reqbuf_len = newbuf_size;
1620 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, 0);
1623 /* do enlargement, from wrapper to embedded, from end to begin */
1624 if (svc != SPTLRPC_SVC_NULL)
1625 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf,
1626 msg_last_segidx(req->rq_reqbuf),
1629 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, 1, newmsg_size);
1630 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
1632 req->rq_reqlen = newmsg_size;
1637 int gss_enlarge_reqbuf_priv(struct ptlrpc_sec *sec,
1638 struct ptlrpc_request *req,
1639 int segment, int newsize)
1641 struct lustre_msg *newclrbuf;
1642 int newmsg_size, newclrbuf_size, newcipbuf_size;
1646 * embedded msg is at seg 0 of clear buffer;
1647 * cipher text is at seg 2 of cipher buffer;
1649 LASSERT(req->rq_pool ||
1650 (req->rq_reqbuf == NULL && req->rq_reqbuf_len == 0));
1651 LASSERT(req->rq_reqbuf == NULL ||
1652 (req->rq_pool && req->rq_reqbuf->lm_bufcount == 3));
1653 LASSERT(req->rq_clrbuf);
1654 LASSERT(req->rq_clrbuf_len > req->rq_reqlen);
1655 LASSERT(lustre_msg_buf(req->rq_clrbuf, 0, 0) == req->rq_reqmsg);
1657 /* compute new embedded msg size */
1658 newmsg_size = get_enlarged_msgsize(req->rq_reqmsg, segment, newsize);
1660 /* compute new clear buffer size */
1661 newclrbuf_size = get_enlarged_msgsize(req->rq_clrbuf, 0, newmsg_size);
1662 newclrbuf_size += GSS_MAX_CIPHER_BLOCK;
1664 /* compute new cipher buffer size */
1665 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1666 buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
1667 buflens[2] = gss_cli_payload(req->rq_cli_ctx, newclrbuf_size, 1);
1668 newcipbuf_size = lustre_msg_size_v2(3, buflens);
1670 /* handle the case that we put both clear buf and cipher buf into
1671 * pre-allocated single buffer. */
1672 if (unlikely(req->rq_pool) &&
1673 req->rq_clrbuf >= req->rq_reqbuf &&
1674 (char *) req->rq_clrbuf <
1675 (char *) req->rq_reqbuf + req->rq_reqbuf_len) {
1676 /* it couldn't be better we still fit into the
1677 * pre-allocated buffer. */
1678 if (newclrbuf_size + newcipbuf_size <= req->rq_reqbuf_len) {
1681 /* move clear text backward. */
1682 src = req->rq_clrbuf;
1683 dst = (char *) req->rq_reqbuf + newcipbuf_size;
1685 memmove(dst, src, req->rq_clrbuf_len);
1687 req->rq_clrbuf = (struct lustre_msg *) dst;
1688 req->rq_clrbuf_len = newclrbuf_size;
1689 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
1691 /* sadly we have to split out the clear buffer */
1692 LASSERT(req->rq_reqbuf_len >= newcipbuf_size);
1693 LASSERT(req->rq_clrbuf_len < newclrbuf_size);
1697 if (req->rq_clrbuf_len < newclrbuf_size) {
1698 newclrbuf_size = size_roundup_power2(newclrbuf_size);
1700 OBD_ALLOC(newclrbuf, newclrbuf_size);
1701 if (newclrbuf == NULL)
1704 memcpy(newclrbuf, req->rq_clrbuf, req->rq_clrbuf_len);
1706 if (req->rq_reqbuf == NULL ||
1707 req->rq_clrbuf < req->rq_reqbuf ||
1708 (char *) req->rq_clrbuf >=
1709 (char *) req->rq_reqbuf + req->rq_reqbuf_len) {
1710 OBD_FREE(req->rq_clrbuf, req->rq_clrbuf_len);
1713 req->rq_clrbuf = newclrbuf;
1714 req->rq_clrbuf_len = newclrbuf_size;
1715 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
1718 _sptlrpc_enlarge_msg_inplace(req->rq_clrbuf, 0, newmsg_size);
1719 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
1720 req->rq_reqlen = newmsg_size;
1725 int gss_enlarge_reqbuf(struct ptlrpc_sec *sec,
1726 struct ptlrpc_request *req,
1727 int segment, int newsize)
1729 int svc = SEC_FLAVOR_SVC(req->rq_sec_flavor);
1731 LASSERT(!req->rq_ctx_init && !req->rq_ctx_fini);
1734 case SPTLRPC_SVC_NULL:
1735 case SPTLRPC_SVC_AUTH:
1736 case SPTLRPC_SVC_INTG:
1737 return gss_enlarge_reqbuf_intg(sec, req, svc, segment, newsize);
1738 case SPTLRPC_SVC_PRIV:
1739 return gss_enlarge_reqbuf_priv(sec, req, segment, newsize);
1741 LASSERTF(0, "bad flavor %x\n", req->rq_sec_flavor);
1746 int gss_sec_install_rctx(struct obd_import *imp,
1747 struct ptlrpc_sec *sec,
1748 struct ptlrpc_cli_ctx *ctx)
1750 struct gss_sec *gsec;
1751 struct gss_cli_ctx *gctx;
1754 gsec = container_of(sec, struct gss_sec, gs_base);
1755 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
1757 rc = gss_install_rvs_svc_ctx(imp, gsec, gctx);
1761 /********************************************
1763 ********************************************/
1766 int gss_svc_reqctx_is_special(struct gss_svc_reqctx *grctx)
1769 return (grctx->src_init || grctx->src_init_continue ||
1770 grctx->src_err_notify);
1774 void gss_svc_reqctx_free(struct gss_svc_reqctx *grctx)
1777 gss_svc_upcall_put_ctx(grctx->src_ctx);
1779 sptlrpc_policy_put(grctx->src_base.sc_policy);
1780 OBD_FREE_PTR(grctx);
1784 void gss_svc_reqctx_addref(struct gss_svc_reqctx *grctx)
1786 LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
1787 atomic_inc(&grctx->src_base.sc_refcount);
1791 void gss_svc_reqctx_decref(struct gss_svc_reqctx *grctx)
1793 LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
1795 if (atomic_dec_and_test(&grctx->src_base.sc_refcount))
1796 gss_svc_reqctx_free(grctx);
1800 int gss_svc_sign(struct ptlrpc_request *req,
1801 struct ptlrpc_reply_state *rs,
1802 struct gss_svc_reqctx *grctx,
1808 LASSERT(rs->rs_msg == lustre_msg_buf(rs->rs_repbuf, 1, 0));
1810 /* embedded lustre_msg might have been shrinked */
1811 if (req->rq_replen != rs->rs_repbuf->lm_buflens[1])
1812 lustre_shrink_msg(rs->rs_repbuf, 1, req->rq_replen, 1);
1814 rc = gss_sign_msg(rs->rs_repbuf, grctx->src_ctx->gsc_mechctx,
1815 PTLRPC_GSS_PROC_DATA, grctx->src_wirectx.gw_seq,
1820 rs->rs_repdata_len = rc;
1824 int gss_pack_err_notify(struct ptlrpc_request *req, __u32 major, __u32 minor)
1826 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
1827 struct ptlrpc_reply_state *rs;
1828 struct gss_err_header *ghdr;
1829 int replen = sizeof(struct ptlrpc_body);
1833 //OBD_FAIL_RETURN(OBD_FAIL_SVCGSS_ERR_NOTIFY|OBD_FAIL_ONCE, -EINVAL);
1835 grctx->src_err_notify = 1;
1836 grctx->src_reserve_len = 0;
1838 rc = lustre_pack_reply_v2(req, 1, &replen, NULL);
1840 CERROR("could not pack reply, err %d\n", rc);
1845 rs = req->rq_reply_state;
1846 LASSERT(rs->rs_repbuf->lm_buflens[1] >= sizeof(*ghdr));
1847 ghdr = lustre_msg_buf(rs->rs_repbuf, 0, 0);
1848 ghdr->gh_version = PTLRPC_GSS_VERSION;
1850 ghdr->gh_proc = PTLRPC_GSS_PROC_ERR;
1851 ghdr->gh_major = major;
1852 ghdr->gh_minor = minor;
1853 ghdr->gh_handle.len = 0; /* fake context handle */
1855 rs->rs_repdata_len = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
1856 rs->rs_repbuf->lm_buflens);
1858 CDEBUG(D_SEC, "prepare gss error notify(0x%x/0x%x) to %s\n",
1859 major, minor, libcfs_nid2str(req->rq_peer.nid));
1864 int gss_svc_handle_init(struct ptlrpc_request *req,
1865 struct gss_wire_ctx *gw)
1867 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
1868 struct lustre_msg *reqbuf = req->rq_reqbuf;
1869 struct obd_uuid *uuid;
1870 struct obd_device *target;
1871 rawobj_t uuid_obj, rvs_hdl, in_token;
1873 __u32 *secdata, seclen;
1877 CDEBUG(D_SEC, "processing gss init(%d) request from %s\n", gw->gw_proc,
1878 libcfs_nid2str(req->rq_peer.nid));
1880 req->rq_ctx_init = 1;
1882 if (gw->gw_proc == PTLRPC_GSS_PROC_INIT && gw->gw_handle.len != 0) {
1883 CERROR("proc %u: invalid handle length %u\n",
1884 gw->gw_proc, gw->gw_handle.len);
1885 RETURN(SECSVC_DROP);
1888 if (reqbuf->lm_bufcount < 3 || reqbuf->lm_bufcount > 4){
1889 CERROR("Invalid bufcount %d\n", reqbuf->lm_bufcount);
1890 RETURN(SECSVC_DROP);
1893 /* ctx initiate payload is in last segment */
1894 secdata = lustre_msg_buf(reqbuf, reqbuf->lm_bufcount - 1, 0);
1895 seclen = reqbuf->lm_buflens[reqbuf->lm_bufcount - 1];
1897 if (seclen < 4 + 4) {
1898 CERROR("sec size %d too small\n", seclen);
1899 RETURN(SECSVC_DROP);
1902 /* lustre svc type */
1903 lustre_svc = le32_to_cpu(*secdata++);
1906 /* extract target uuid, note this code is somewhat fragile
1907 * because touched internal structure of obd_uuid */
1908 if (rawobj_extract(&uuid_obj, &secdata, &seclen)) {
1909 CERROR("failed to extract target uuid\n");
1910 RETURN(SECSVC_DROP);
1912 uuid_obj.data[uuid_obj.len - 1] = '\0';
1914 uuid = (struct obd_uuid *) uuid_obj.data;
1915 target = class_uuid2obd(uuid);
1916 if (!target || target->obd_stopping || !target->obd_set_up) {
1917 CERROR("target '%s' is not available for context init (%s)",
1918 uuid->uuid, target == NULL ? "no target" :
1919 (target->obd_stopping ? "stopping" : "not set up"));
1920 RETURN(SECSVC_DROP);
1923 /* extract reverse handle */
1924 if (rawobj_extract(&rvs_hdl, &secdata, &seclen)) {
1925 CERROR("failed extract reverse handle\n");
1926 RETURN(SECSVC_DROP);
1930 if (rawobj_extract(&in_token, &secdata, &seclen)) {
1931 CERROR("can't extract token\n");
1932 RETURN(SECSVC_DROP);
1935 rc = gss_svc_upcall_handle_init(req, grctx, gw, target, lustre_svc,
1936 &rvs_hdl, &in_token);
1937 if (rc != SECSVC_OK)
1940 if (grctx->src_ctx->gsc_usr_mds || grctx->src_ctx->gsc_usr_root)
1941 CWARN("user from %s authenticated as %s\n",
1942 libcfs_nid2str(req->rq_peer.nid),
1943 grctx->src_ctx->gsc_usr_mds ? "mds" : "root");
1945 CWARN("accept user %u from %s\n", grctx->src_ctx->gsc_uid,
1946 libcfs_nid2str(req->rq_peer.nid));
1948 if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
1949 if (reqbuf->lm_bufcount < 4) {
1950 CERROR("missing user descriptor\n");
1951 RETURN(SECSVC_DROP);
1953 if (sptlrpc_unpack_user_desc(reqbuf, 2)) {
1954 CERROR("Mal-formed user descriptor\n");
1955 RETURN(SECSVC_DROP);
1957 req->rq_user_desc = lustre_msg_buf(reqbuf, 2, 0);
1960 req->rq_reqmsg = lustre_msg_buf(reqbuf, 1, 0);
1961 req->rq_reqlen = lustre_msg_buflen(reqbuf, 1);
1967 * last segment must be the gss signature.
1970 int gss_svc_verify_request(struct ptlrpc_request *req,
1971 struct gss_svc_reqctx *grctx,
1972 struct gss_wire_ctx *gw,
1975 struct gss_svc_ctx *gctx = grctx->src_ctx;
1976 struct lustre_msg *msg = req->rq_reqbuf;
1980 *major = GSS_S_COMPLETE;
1982 if (msg->lm_bufcount < 2) {
1983 CERROR("Too few segments (%u) in request\n", msg->lm_bufcount);
1987 if (gw->gw_svc == SPTLRPC_SVC_NULL)
1990 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
1991 CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
1992 *major = GSS_S_DUPLICATE_TOKEN;
1996 *major = gss_verify_msg(msg, gctx->gsc_mechctx, gw->gw_svc);
1997 if (*major != GSS_S_COMPLETE)
2000 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
2001 CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
2002 *major = GSS_S_DUPLICATE_TOKEN;
2007 /* user descriptor */
2008 if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
2009 if (msg->lm_bufcount < (offset + 1)) {
2010 CERROR("no user desc included\n");
2014 if (sptlrpc_unpack_user_desc(msg, offset)) {
2015 CERROR("Mal-formed user descriptor\n");
2019 req->rq_user_desc = lustre_msg_buf(msg, offset, 0);
2023 /* check bulk cksum data */
2024 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
2025 if (msg->lm_bufcount < (offset + 1)) {
2026 CERROR("no bulk checksum included\n");
2030 if (bulk_sec_desc_unpack(msg, offset))
2033 grctx->src_reqbsd = lustre_msg_buf(msg, offset, 0);
2034 grctx->src_reqbsd_size = lustre_msg_buflen(msg, offset);
2037 req->rq_reqmsg = lustre_msg_buf(msg, 1, 0);
2038 req->rq_reqlen = msg->lm_buflens[1];
2043 int gss_svc_unseal_request(struct ptlrpc_request *req,
2044 struct gss_svc_reqctx *grctx,
2045 struct gss_wire_ctx *gw,
2048 struct gss_svc_ctx *gctx = grctx->src_ctx;
2049 struct lustre_msg *msg = req->rq_reqbuf;
2050 int msglen, offset = 1;
2053 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
2054 CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
2055 *major = GSS_S_DUPLICATE_TOKEN;
2059 *major = gss_unseal_msg(gctx->gsc_mechctx, msg,
2060 &msglen, req->rq_reqdata_len);
2061 if (*major != GSS_S_COMPLETE)
2064 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
2065 CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
2066 *major = GSS_S_DUPLICATE_TOKEN;
2070 if (lustre_unpack_msg(msg, msglen)) {
2071 CERROR("Failed to unpack after decryption\n");
2074 req->rq_reqdata_len = msglen;
2076 if (msg->lm_bufcount < 1) {
2077 CERROR("Invalid buffer: is empty\n");
2081 if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
2082 if (msg->lm_bufcount < offset + 1) {
2083 CERROR("no user descriptor included\n");
2087 if (sptlrpc_unpack_user_desc(msg, offset)) {
2088 CERROR("Mal-formed user descriptor\n");
2092 req->rq_user_desc = lustre_msg_buf(msg, offset, 0);
2096 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
2097 if (msg->lm_bufcount < offset + 1) {
2098 CERROR("no bulk checksum included\n");
2102 if (bulk_sec_desc_unpack(msg, offset))
2105 grctx->src_reqbsd = lustre_msg_buf(msg, offset, 0);
2106 grctx->src_reqbsd_size = lustre_msg_buflen(msg, offset);
2109 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 0, 0);
2110 req->rq_reqlen = req->rq_reqbuf->lm_buflens[0];
2115 int gss_svc_handle_data(struct ptlrpc_request *req,
2116 struct gss_wire_ctx *gw)
2118 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2123 grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
2124 if (!grctx->src_ctx) {
2125 major = GSS_S_NO_CONTEXT;
2129 switch (gw->gw_svc) {
2130 case SPTLRPC_SVC_NULL:
2131 case SPTLRPC_SVC_AUTH:
2132 case SPTLRPC_SVC_INTG:
2133 rc = gss_svc_verify_request(req, grctx, gw, &major);
2135 case SPTLRPC_SVC_PRIV:
2136 rc = gss_svc_unseal_request(req, grctx, gw, &major);
2139 CERROR("unsupported gss service %d\n", gw->gw_svc);
2146 CERROR("svc %u failed: major 0x%08x: ctx %p(%u->%s)\n",
2147 gw->gw_svc, major, grctx->src_ctx, grctx->src_ctx->gsc_uid,
2148 libcfs_nid2str(req->rq_peer.nid));
2150 /* we only notify client in case of NO_CONTEXT/BAD_SIG, which
2151 * might happen after server reboot, to allow recovery. */
2152 if ((major == GSS_S_NO_CONTEXT || major == GSS_S_BAD_SIG) &&
2153 gss_pack_err_notify(req, major, 0) == 0)
2154 RETURN(SECSVC_COMPLETE);
2156 RETURN(SECSVC_DROP);
2160 int gss_svc_handle_destroy(struct ptlrpc_request *req,
2161 struct gss_wire_ctx *gw)
2163 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2167 req->rq_ctx_fini = 1;
2168 req->rq_no_reply = 1;
2170 grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
2171 if (!grctx->src_ctx) {
2172 CWARN("invalid gss context handle for destroy.\n");
2173 RETURN(SECSVC_DROP);
2176 if (gw->gw_svc != SPTLRPC_SVC_INTG) {
2177 CERROR("svc %u is not supported in destroy.\n", gw->gw_svc);
2178 RETURN(SECSVC_DROP);
2181 if (gss_svc_verify_request(req, grctx, gw, &major))
2182 RETURN(SECSVC_DROP);
2184 CWARN("destroy svc ctx %p(%u->%s)\n", grctx->src_ctx,
2185 grctx->src_ctx->gsc_uid, libcfs_nid2str(req->rq_peer.nid));
2187 gss_svc_upcall_destroy_ctx(grctx->src_ctx);
2189 if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
2190 if (req->rq_reqbuf->lm_bufcount < 4) {
2191 CERROR("missing user descriptor, ignore it\n");
2194 if (sptlrpc_unpack_user_desc(req->rq_reqbuf, 2)) {
2195 CERROR("Mal-formed user descriptor, ignore it\n");
2198 req->rq_user_desc = lustre_msg_buf(req->rq_reqbuf, 2, 0);
2204 int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req)
2206 struct gss_header *ghdr;
2207 struct gss_svc_reqctx *grctx;
2208 struct gss_wire_ctx *gw;
2212 LASSERT(req->rq_reqbuf);
2213 LASSERT(req->rq_svc_ctx == NULL);
2215 if (req->rq_reqbuf->lm_bufcount < 2) {
2216 CERROR("buf count only %d\n", req->rq_reqbuf->lm_bufcount);
2217 RETURN(SECSVC_DROP);
2220 ghdr = gss_swab_header(req->rq_reqbuf, 0);
2222 CERROR("can't decode gss header\n");
2223 RETURN(SECSVC_DROP);
2227 if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
2228 CERROR("gss version %u, expect %u\n", ghdr->gh_version,
2229 PTLRPC_GSS_VERSION);
2230 RETURN(SECSVC_DROP);
2233 /* alloc grctx data */
2234 OBD_ALLOC_PTR(grctx);
2236 CERROR("fail to alloc svc reqctx\n");
2237 RETURN(SECSVC_DROP);
2239 grctx->src_base.sc_policy = sptlrpc_policy_get(policy);
2240 atomic_set(&grctx->src_base.sc_refcount, 1);
2241 req->rq_svc_ctx = &grctx->src_base;
2242 gw = &grctx->src_wirectx;
2244 /* save wire context */
2245 gw->gw_proc = ghdr->gh_proc;
2246 gw->gw_seq = ghdr->gh_seq;
2247 gw->gw_svc = ghdr->gh_svc;
2248 rawobj_from_netobj(&gw->gw_handle, &ghdr->gh_handle);
2250 /* keep original wire header which subject to checksum verification */
2251 if (lustre_msg_swabbed(req->rq_reqbuf))
2252 gss_header_swabber(ghdr);
2254 switch(ghdr->gh_proc) {
2255 case PTLRPC_GSS_PROC_INIT:
2256 case PTLRPC_GSS_PROC_CONTINUE_INIT:
2257 rc = gss_svc_handle_init(req, gw);
2259 case PTLRPC_GSS_PROC_DATA:
2260 rc = gss_svc_handle_data(req, gw);
2262 case PTLRPC_GSS_PROC_DESTROY:
2263 rc = gss_svc_handle_destroy(req, gw);
2266 CERROR("unknown proc %u\n", gw->gw_proc);
2273 LASSERT (grctx->src_ctx);
2275 req->rq_auth_gss = 1;
2276 req->rq_auth_remote = grctx->src_ctx->gsc_remote;
2277 req->rq_auth_usr_mdt = grctx->src_ctx->gsc_usr_mds;
2278 req->rq_auth_usr_root = grctx->src_ctx->gsc_usr_root;
2279 req->rq_auth_uid = grctx->src_ctx->gsc_uid;
2280 req->rq_auth_mapped_uid = grctx->src_ctx->gsc_mapped_uid;
2282 case SECSVC_COMPLETE:
2285 gss_svc_reqctx_free(grctx);
2286 req->rq_svc_ctx = NULL;
2293 void gss_svc_invalidate_ctx(struct ptlrpc_svc_ctx *svc_ctx)
2295 struct gss_svc_reqctx *grctx;
2298 if (svc_ctx == NULL) {
2303 grctx = gss_svc_ctx2reqctx(svc_ctx);
2305 CWARN("gss svc invalidate ctx %p(%u)\n",
2306 grctx->src_ctx, grctx->src_ctx->gsc_uid);
2307 gss_svc_upcall_destroy_ctx(grctx->src_ctx);
2313 int gss_svc_payload(struct gss_svc_reqctx *grctx, int msgsize, int privacy)
2315 if (gss_svc_reqctx_is_special(grctx))
2316 return grctx->src_reserve_len;
2318 return gss_estimate_payload(NULL, msgsize, privacy);
2321 int gss_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2323 struct gss_svc_reqctx *grctx;
2324 struct ptlrpc_reply_state *rs;
2325 int privacy, svc, bsd_off = 0;
2326 int ibuflens[2], ibufcnt = 0;
2327 int buflens[4], bufcnt;
2328 int txtsize, wmsg_size, rs_size;
2331 LASSERT(msglen % 8 == 0);
2333 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor) &&
2334 !req->rq_bulk_read && !req->rq_bulk_write) {
2335 CERROR("client request bulk sec on non-bulk rpc\n");
2339 svc = SEC_FLAVOR_SVC(req->rq_sec_flavor);
2341 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2342 if (gss_svc_reqctx_is_special(grctx))
2345 privacy = (svc == SPTLRPC_SVC_PRIV);
2350 ibuflens[0] = msglen;
2352 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
2353 LASSERT(grctx->src_reqbsd);
2356 ibuflens[ibufcnt++] = bulk_sec_desc_size(
2357 grctx->src_reqbsd->bsd_csum_alg,
2358 0, req->rq_bulk_read);
2361 txtsize = lustre_msg_size_v2(ibufcnt, ibuflens);
2362 txtsize += GSS_MAX_CIPHER_BLOCK;
2364 /* wrapper buffer */
2366 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2367 buflens[1] = gss_svc_payload(grctx, buflens[0], 0);
2368 buflens[2] = gss_svc_payload(grctx, txtsize, 1);
2371 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2372 buflens[1] = msglen;
2374 txtsize = buflens[0];
2375 if (svc == SPTLRPC_SVC_INTG)
2376 txtsize += buflens[1];
2378 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
2379 LASSERT(grctx->src_reqbsd);
2382 buflens[bufcnt] = bulk_sec_desc_size(
2383 grctx->src_reqbsd->bsd_csum_alg,
2384 0, req->rq_bulk_read);
2385 if (svc == SPTLRPC_SVC_INTG)
2386 txtsize += buflens[bufcnt];
2390 if (gss_svc_reqctx_is_special(grctx) ||
2391 svc != SPTLRPC_SVC_NULL)
2392 buflens[bufcnt++] = gss_svc_payload(grctx, txtsize, 0);
2395 wmsg_size = lustre_msg_size_v2(bufcnt, buflens);
2397 rs_size = sizeof(*rs) + wmsg_size;
2398 rs = req->rq_reply_state;
2402 LASSERT(rs->rs_size >= rs_size);
2404 OBD_ALLOC(rs, rs_size);
2408 rs->rs_size = rs_size;
2411 rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
2412 rs->rs_repbuf_len = wmsg_size;
2414 /* initialize the buffer */
2416 lustre_init_msg_v2(rs->rs_repbuf, ibufcnt, ibuflens, NULL);
2417 rs->rs_msg = lustre_msg_buf(rs->rs_repbuf, 0, msglen);
2419 lustre_init_msg_v2(rs->rs_repbuf, bufcnt, buflens, NULL);
2420 rs->rs_repbuf->lm_secflvr = req->rq_sec_flavor;
2422 rs->rs_msg = lustre_msg_buf(rs->rs_repbuf, 1, 0);
2426 grctx->src_repbsd = lustre_msg_buf(rs->rs_repbuf, bsd_off, 0);
2427 grctx->src_repbsd_size = lustre_msg_buflen(rs->rs_repbuf,
2431 gss_svc_reqctx_addref(grctx);
2432 rs->rs_svc_ctx = req->rq_svc_ctx;
2434 LASSERT(rs->rs_msg);
2435 req->rq_reply_state = rs;
2440 int gss_svc_seal(struct ptlrpc_request *req,
2441 struct ptlrpc_reply_state *rs,
2442 struct gss_svc_reqctx *grctx)
2444 struct gss_svc_ctx *gctx = grctx->src_ctx;
2445 rawobj_t msgobj, cipher_obj, micobj;
2446 struct gss_header *ghdr;
2448 int cipher_buflen, buflens[3];
2453 /* embedded lustre_msg might have been shrinked */
2454 if (req->rq_replen != rs->rs_repbuf->lm_buflens[0])
2455 lustre_shrink_msg(rs->rs_repbuf, 0, req->rq_replen, 1);
2457 /* clear data length */
2458 msglen = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
2459 rs->rs_repbuf->lm_buflens);
2462 msgobj.len = msglen;
2463 msgobj.data = (__u8 *) rs->rs_repbuf;
2465 /* allocate temporary cipher buffer */
2466 cipher_buflen = gss_estimate_payload(gctx->gsc_mechctx, msglen, 1);
2467 OBD_ALLOC(cipher_buf, cipher_buflen);
2471 cipher_obj.len = cipher_buflen;
2472 cipher_obj.data = cipher_buf;
2474 major = lgss_wrap(gctx->gsc_mechctx, &msgobj, rs->rs_repbuf_len,
2476 if (major != GSS_S_COMPLETE) {
2477 CERROR("priv: wrap message error: %08x\n", major);
2478 GOTO(out_free, rc = -EPERM);
2480 LASSERT(cipher_obj.len <= cipher_buflen);
2482 /* we are about to override data at rs->rs_repbuf, nullify pointers
2483 * to which to catch further illegal usage. */
2484 grctx->src_repbsd = NULL;
2485 grctx->src_repbsd_size = 0;
2487 /* now the real wire data */
2488 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2489 buflens[1] = gss_estimate_payload(gctx->gsc_mechctx, buflens[0], 0);
2490 buflens[2] = cipher_obj.len;
2492 LASSERT(lustre_msg_size_v2(3, buflens) <= rs->rs_repbuf_len);
2493 lustre_init_msg_v2(rs->rs_repbuf, 3, buflens, NULL);
2494 rs->rs_repbuf->lm_secflvr = req->rq_sec_flavor;
2497 ghdr = lustre_msg_buf(rs->rs_repbuf, 0, 0);
2498 ghdr->gh_version = PTLRPC_GSS_VERSION;
2500 ghdr->gh_proc = PTLRPC_GSS_PROC_DATA;
2501 ghdr->gh_seq = grctx->src_wirectx.gw_seq;
2502 ghdr->gh_svc = SPTLRPC_SVC_PRIV;
2503 ghdr->gh_handle.len = 0;
2505 /* header signature */
2506 msgobj.len = rs->rs_repbuf->lm_buflens[0];
2507 msgobj.data = lustre_msg_buf(rs->rs_repbuf, 0, 0);
2508 micobj.len = rs->rs_repbuf->lm_buflens[1];
2509 micobj.data = lustre_msg_buf(rs->rs_repbuf, 1, 0);
2511 major = lgss_get_mic(gctx->gsc_mechctx, 1, &msgobj, &micobj);
2512 if (major != GSS_S_COMPLETE) {
2513 CERROR("priv: sign message error: %08x\n", major);
2514 GOTO(out_free, rc = -EPERM);
2516 lustre_shrink_msg(rs->rs_repbuf, 1, micobj.len, 0);
2519 memcpy(lustre_msg_buf(rs->rs_repbuf, 2, 0),
2520 cipher_obj.data, cipher_obj.len);
2522 rs->rs_repdata_len = lustre_shrink_msg(rs->rs_repbuf, 2,
2525 /* to catch upper layer's further access */
2527 req->rq_repmsg = NULL;
2532 OBD_FREE(cipher_buf, cipher_buflen);
2536 int gss_svc_authorize(struct ptlrpc_request *req)
2538 struct ptlrpc_reply_state *rs = req->rq_reply_state;
2539 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2540 struct gss_wire_ctx *gw;
2544 if (gss_svc_reqctx_is_special(grctx))
2547 gw = &grctx->src_wirectx;
2548 if (gw->gw_proc != PTLRPC_GSS_PROC_DATA &&
2549 gw->gw_proc != PTLRPC_GSS_PROC_DESTROY) {
2550 CERROR("proc %d not support\n", gw->gw_proc);
2554 LASSERT(grctx->src_ctx);
2556 switch (gw->gw_svc) {
2557 case SPTLRPC_SVC_NULL:
2558 case SPTLRPC_SVC_AUTH:
2559 case SPTLRPC_SVC_INTG:
2560 rc = gss_svc_sign(req, rs, grctx, gw->gw_svc);
2562 case SPTLRPC_SVC_PRIV:
2563 rc = gss_svc_seal(req, rs, grctx);
2566 CERROR("Unknown service %d\n", gw->gw_svc);
2567 GOTO(out, rc = -EINVAL);
2575 void gss_svc_free_rs(struct ptlrpc_reply_state *rs)
2577 struct gss_svc_reqctx *grctx;
2579 LASSERT(rs->rs_svc_ctx);
2580 grctx = container_of(rs->rs_svc_ctx, struct gss_svc_reqctx, src_base);
2582 /* paranoid, maybe not necessary */
2583 grctx->src_reqbsd = NULL;
2584 grctx->src_repbsd = NULL;
2586 gss_svc_reqctx_decref(grctx);
2587 rs->rs_svc_ctx = NULL;
2589 if (!rs->rs_prealloc)
2590 OBD_FREE(rs, rs->rs_size);
2593 void gss_svc_free_ctx(struct ptlrpc_svc_ctx *ctx)
2595 LASSERT(atomic_read(&ctx->sc_refcount) == 0);
2596 gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx));
2599 int gss_copy_rvc_cli_ctx(struct ptlrpc_cli_ctx *cli_ctx,
2600 struct ptlrpc_svc_ctx *svc_ctx)
2602 struct gss_cli_ctx *cli_gctx = ctx2gctx(cli_ctx);
2603 struct gss_svc_reqctx *grctx;
2604 struct gss_ctx *mechctx = NULL;
2606 cli_gctx->gc_proc = PTLRPC_GSS_PROC_DATA;
2607 cli_gctx->gc_win = GSS_SEQ_WIN;
2608 atomic_set(&cli_gctx->gc_seq, 0);
2610 grctx = container_of(svc_ctx, struct gss_svc_reqctx, src_base);
2611 LASSERT(grctx->src_ctx);
2612 LASSERT(grctx->src_ctx->gsc_mechctx);
2614 if (lgss_copy_reverse_context(grctx->src_ctx->gsc_mechctx, &mechctx) !=
2616 CERROR("failed to copy mech context\n");
2620 if (rawobj_dup(&cli_gctx->gc_handle, &grctx->src_ctx->gsc_rvs_hdl)) {
2621 CERROR("failed to dup reverse handle\n");
2622 lgss_delete_sec_context(&mechctx);
2626 cli_gctx->gc_mechctx = mechctx;
2627 gss_cli_ctx_uptodate(cli_gctx);
2632 int __init sptlrpc_gss_init(void)
2636 rc = gss_init_lproc();
2640 rc = gss_init_cli_upcall();
2644 rc = gss_init_svc_upcall();
2646 goto out_cli_upcall;
2648 rc = init_kerberos_module();
2650 goto out_svc_upcall;
2652 /* register policy after all other stuff be intialized, because it
2653 * might be in used immediately after the registration. */
2655 rc = gss_init_keyring();
2659 #ifdef HAVE_GSS_PIPEFS
2660 rc = gss_init_pipefs();
2667 #ifdef HAVE_GSS_PIPEFS
2673 cleanup_kerberos_module();
2675 gss_exit_svc_upcall();
2677 gss_exit_cli_upcall();
2683 static void __exit sptlrpc_gss_exit(void)
2686 #ifdef HAVE_GSS_PIPEFS
2689 cleanup_kerberos_module();
2690 gss_exit_svc_upcall();
2691 gss_exit_cli_upcall();
2695 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
2696 MODULE_DESCRIPTION("GSS security policy for Lustre");
2697 MODULE_LICENSE("GPL");
2699 module_init(sptlrpc_gss_init);
2700 module_exit(sptlrpc_gss_exit);