1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Modifications for Lustre
6 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
8 * Author: Eric Mei <ericm@clusterfs.com>
12 * linux/net/sunrpc/auth_gss.c
14 * RPCSEC_GSS client authentication.
16 * Copyright (c) 2000 The Regents of the University of Michigan.
17 * All rights reserved.
19 * Dug Song <dugsong@monkey.org>
20 * Andy Adamson <andros@umich.edu>
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 3. Neither the name of the University nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
36 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
37 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
38 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
40 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
41 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
42 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
43 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
44 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
45 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
50 # define EXPORT_SYMTAB
52 #define DEBUG_SUBSYSTEM S_SEC
54 #include <linux/init.h>
55 #include <linux/module.h>
56 #include <linux/slab.h>
57 #include <linux/dcache.h>
59 #include <linux/random.h>
60 #include <linux/mutex.h>
61 #include <asm/atomic.h>
63 #include <liblustre.h>
67 #include <obd_class.h>
68 #include <obd_support.h>
69 #include <obd_cksum.h>
70 #include <lustre/lustre_idl.h>
71 #include <lustre_net.h>
72 #include <lustre_import.h>
73 #include <lustre_sec.h>
76 #include "gss_internal.h"
79 #include <linux/crypto.h>
82 * early reply have fixed size, respectively in privacy and integrity mode.
83 * so we calculate them only once.
85 static int gss_at_reply_off_integ;
86 static int gss_at_reply_off_priv;
89 static inline int msg_last_segidx(struct lustre_msg *msg)
91 LASSERT(msg->lm_bufcount > 0);
92 return msg->lm_bufcount - 1;
94 static inline int msg_last_seglen(struct lustre_msg *msg)
96 return msg->lm_buflens[msg_last_segidx(msg)];
99 /********************************************
100 * wire data swabber *
101 ********************************************/
104 void gss_header_swabber(struct gss_header *ghdr)
106 __swab32s(&ghdr->gh_flags);
107 __swab32s(&ghdr->gh_proc);
108 __swab32s(&ghdr->gh_seq);
109 __swab32s(&ghdr->gh_svc);
110 __swab32s(&ghdr->gh_pad1);
111 __swab32s(&ghdr->gh_handle.len);
114 struct gss_header *gss_swab_header(struct lustre_msg *msg, int segment)
116 struct gss_header *ghdr;
118 ghdr = lustre_swab_buf(msg, segment, sizeof(*ghdr),
122 sizeof(*ghdr) + ghdr->gh_handle.len > msg->lm_buflens[segment]) {
123 CERROR("gss header require length %u, now %u received\n",
124 (unsigned int) sizeof(*ghdr) + ghdr->gh_handle.len,
125 msg->lm_buflens[segment]);
133 void gss_netobj_swabber(netobj_t *obj)
135 __swab32s(&obj->len);
138 netobj_t *gss_swab_netobj(struct lustre_msg *msg, int segment)
142 obj = lustre_swab_buf(msg, segment, sizeof(*obj), gss_netobj_swabber);
143 if (obj && sizeof(*obj) + obj->len > msg->lm_buflens[segment]) {
144 CERROR("netobj require length %u but only %u received\n",
145 (unsigned int) sizeof(*obj) + obj->len,
146 msg->lm_buflens[segment]);
154 * payload should be obtained from mechanism. but currently since we
155 * only support kerberos, we could simply use fixed value.
158 * - krb5 checksum: 20
160 * for privacy mode, payload also include the cipher text which has the same
161 * size as plain text, plus possible confounder, padding both at maximum cipher
164 #define GSS_KRB5_INTEG_MAX_PAYLOAD (40)
167 int gss_mech_payload(struct gss_ctx *mechctx, int msgsize, int privacy)
170 return GSS_KRB5_INTEG_MAX_PAYLOAD + 16 + 16 + 16 + msgsize;
172 return GSS_KRB5_INTEG_MAX_PAYLOAD;
176 * return signature size, otherwise < 0 to indicate error
178 static int gss_sign_msg(struct lustre_msg *msg,
179 struct gss_ctx *mechctx,
180 enum lustre_sec_part sp,
181 __u32 flags, __u32 proc, __u32 seq, __u32 svc,
184 struct gss_header *ghdr;
185 rawobj_t text[3], mic;
186 int textcnt, max_textcnt, mic_idx;
189 LASSERT(msg->lm_bufcount >= 2);
192 LASSERT(msg->lm_buflens[0] >=
193 sizeof(*ghdr) + (handle ? handle->len : 0));
194 ghdr = lustre_msg_buf(msg, 0, 0);
196 ghdr->gh_version = PTLRPC_GSS_VERSION;
197 ghdr->gh_sp = (__u8) sp;
198 ghdr->gh_flags = flags;
199 ghdr->gh_proc = proc;
203 /* fill in a fake one */
204 ghdr->gh_handle.len = 0;
206 ghdr->gh_handle.len = handle->len;
207 memcpy(ghdr->gh_handle.data, handle->data, handle->len);
210 /* no actual signature for null mode */
211 if (svc == SPTLRPC_SVC_NULL)
212 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
215 mic_idx = msg_last_segidx(msg);
216 max_textcnt = (svc == SPTLRPC_SVC_AUTH) ? 1 : mic_idx;
218 for (textcnt = 0; textcnt < max_textcnt; textcnt++) {
219 text[textcnt].len = msg->lm_buflens[textcnt];
220 text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
223 mic.len = msg->lm_buflens[mic_idx];
224 mic.data = lustre_msg_buf(msg, mic_idx, 0);
226 major = lgss_get_mic(mechctx, textcnt, text, &mic);
227 if (major != GSS_S_COMPLETE) {
228 CERROR("fail to generate MIC: %08x\n", major);
231 LASSERT(mic.len <= msg->lm_buflens[mic_idx]);
233 return lustre_shrink_msg(msg, mic_idx, mic.len, 0);
240 __u32 gss_verify_msg(struct lustre_msg *msg,
241 struct gss_ctx *mechctx,
244 rawobj_t text[3], mic;
245 int textcnt, max_textcnt;
249 LASSERT(msg->lm_bufcount >= 2);
251 if (svc == SPTLRPC_SVC_NULL)
252 return GSS_S_COMPLETE;
254 mic_idx = msg_last_segidx(msg);
255 max_textcnt = (svc == SPTLRPC_SVC_AUTH) ? 1 : mic_idx;
257 for (textcnt = 0; textcnt < max_textcnt; textcnt++) {
258 text[textcnt].len = msg->lm_buflens[textcnt];
259 text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
262 mic.len = msg->lm_buflens[mic_idx];
263 mic.data = lustre_msg_buf(msg, mic_idx, 0);
265 major = lgss_verify_mic(mechctx, textcnt, text, &mic);
266 if (major != GSS_S_COMPLETE)
267 CERROR("mic verify error: %08x\n", major);
273 * return gss error code
276 __u32 gss_unseal_msg(struct gss_ctx *mechctx,
277 struct lustre_msg *msgbuf,
278 int *msg_len, int msgbuf_len)
280 rawobj_t clear_obj, hdrobj, token;
286 if (msgbuf->lm_bufcount != 2) {
287 CERROR("invalid bufcount %d\n", msgbuf->lm_bufcount);
288 RETURN(GSS_S_FAILURE);
291 /* allocate a temporary clear text buffer, same sized as token,
292 * we assume the final clear text size <= token size */
293 clear_buflen = lustre_msg_buflen(msgbuf, 1);
294 OBD_ALLOC(clear_buf, clear_buflen);
296 RETURN(GSS_S_FAILURE);
299 hdrobj.len = lustre_msg_buflen(msgbuf, 0);
300 hdrobj.data = lustre_msg_buf(msgbuf, 0, 0);
301 token.len = lustre_msg_buflen(msgbuf, 1);
302 token.data = lustre_msg_buf(msgbuf, 1, 0);
303 clear_obj.len = clear_buflen;
304 clear_obj.data = clear_buf;
306 major = lgss_unwrap(mechctx, &hdrobj, &token, &clear_obj);
307 if (major != GSS_S_COMPLETE) {
308 CERROR("unwrap message error: %08x\n", major);
309 GOTO(out_free, major = GSS_S_FAILURE);
311 LASSERT(clear_obj.len <= clear_buflen);
312 LASSERT(clear_obj.len <= msgbuf_len);
314 /* now the decrypted message */
315 memcpy(msgbuf, clear_obj.data, clear_obj.len);
316 *msg_len = clear_obj.len;
318 major = GSS_S_COMPLETE;
320 OBD_FREE(clear_buf, clear_buflen);
324 /********************************************
325 * gss client context manipulation helpers *
326 ********************************************/
328 int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
330 LASSERT(atomic_read(&ctx->cc_refcount));
332 if (!test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
333 if (!ctx->cc_early_expire)
334 clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
336 CWARN("ctx %p(%u->%s) get expired: %lu(%+lds)\n",
337 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
339 ctx->cc_expire == 0 ? 0 :
340 cfs_time_sub(ctx->cc_expire, cfs_time_current_sec()));
349 * return 1 if the context is dead.
351 int cli_ctx_check_death(struct ptlrpc_cli_ctx *ctx)
353 if (unlikely(cli_ctx_is_dead(ctx)))
356 /* expire is 0 means never expire. a newly created gss context
357 * which during upcall may has 0 expiration */
358 if (ctx->cc_expire == 0)
361 /* check real expiration */
362 if (cfs_time_after(ctx->cc_expire, cfs_time_current_sec()))
369 void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx)
371 struct ptlrpc_cli_ctx *ctx = &gctx->gc_base;
372 unsigned long ctx_expiry;
374 if (lgss_inquire_context(gctx->gc_mechctx, &ctx_expiry)) {
375 CERROR("ctx %p(%u): unable to inquire, expire it now\n",
376 gctx, ctx->cc_vcred.vc_uid);
377 ctx_expiry = 1; /* make it expired now */
380 ctx->cc_expire = gss_round_ctx_expiry(ctx_expiry,
381 ctx->cc_sec->ps_flvr.sf_flags);
383 /* At this point this ctx might have been marked as dead by
384 * someone else, in which case nobody will make further use
385 * of it. we don't care, and mark it UPTODATE will help
386 * destroying server side context when it be destroied. */
387 set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
389 if (sec_is_reverse(ctx->cc_sec)) {
390 CWARN("server installed reverse ctx %p idx "LPX64", "
391 "expiry %lu(%+lds)\n", ctx,
392 gss_handle_to_u64(&gctx->gc_handle),
393 ctx->cc_expire, ctx->cc_expire - cfs_time_current_sec());
395 CWARN("client refreshed ctx %p idx "LPX64" (%u->%s), "
396 "expiry %lu(%+lds)\n", ctx,
397 gss_handle_to_u64(&gctx->gc_handle),
398 ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
399 ctx->cc_expire, ctx->cc_expire - cfs_time_current_sec());
401 /* install reverse svc ctx for root context */
402 if (ctx->cc_vcred.vc_uid == 0)
403 gss_sec_install_rctx(ctx->cc_sec->ps_import,
408 static void gss_cli_ctx_finalize(struct gss_cli_ctx *gctx)
410 LASSERT(gctx->gc_base.cc_sec);
412 if (gctx->gc_mechctx) {
413 lgss_delete_sec_context(&gctx->gc_mechctx);
414 gctx->gc_mechctx = NULL;
417 if (!rawobj_empty(&gctx->gc_svc_handle)) {
418 /* forward ctx: mark buddy reverse svcctx soon-expire. */
419 if (!sec_is_reverse(gctx->gc_base.cc_sec) &&
420 !rawobj_empty(&gctx->gc_svc_handle))
421 gss_svc_upcall_expire_rvs_ctx(&gctx->gc_svc_handle);
423 rawobj_free(&gctx->gc_svc_handle);
426 rawobj_free(&gctx->gc_handle);
430 * Based on sequence number algorithm as specified in RFC 2203.
432 * modified for our own problem: arriving request has valid sequence number,
433 * but unwrapping request might cost a long time, after that its sequence
434 * are not valid anymore (fall behind the window). It rarely happen, mostly
435 * under extreme load.
437 * note we should not check sequence before verify the integrity of incoming
438 * request, because just one attacking request with high sequence number might
439 * cause all following request be dropped.
441 * so here we use a multi-phase approach: prepare 2 sequence windows,
442 * "main window" for normal sequence and "back window" for fall behind sequence.
443 * and 3-phase checking mechanism:
444 * 0 - before integrity verification, perform a initial sequence checking in
445 * main window, which only try and don't actually set any bits. if the
446 * sequence is high above the window or fit in the window and the bit
447 * is 0, then accept and proceed to integrity verification. otherwise
448 * reject this sequence.
449 * 1 - after integrity verification, check in main window again. if this
450 * sequence is high above the window or fit in the window and the bit
451 * is 0, then set the bit and accept; if it fit in the window but bit
452 * already set, then reject; if it fall behind the window, then proceed
454 * 2 - check in back window. if it is high above the window or fit in the
455 * window and the bit is 0, then set the bit and accept. otherwise reject.
458 * 1: looks like a replay
462 * note phase 0 is necessary, because otherwise replay attacking request of
463 * sequence which between the 2 windows can't be detected.
465 * this mechanism can't totally solve the problem, but could help much less
466 * number of valid requests be dropped.
469 int gss_do_check_seq(unsigned long *window, __u32 win_size, __u32 *max_seq,
470 __u32 seq_num, int phase)
472 LASSERT(phase >= 0 && phase <= 2);
474 if (seq_num > *max_seq) {
476 * 1. high above the window
481 if (seq_num >= *max_seq + win_size) {
482 memset(window, 0, win_size / 8);
485 while(*max_seq < seq_num) {
487 __clear_bit((*max_seq) % win_size, window);
490 __set_bit(seq_num % win_size, window);
491 } else if (seq_num + win_size <= *max_seq) {
493 * 2. low behind the window
495 if (phase == 0 || phase == 2)
498 CWARN("seq %u is %u behind (size %d), check backup window\n",
499 seq_num, *max_seq - win_size - seq_num, win_size);
503 * 3. fit into the window
507 if (test_bit(seq_num % win_size, window))
512 if (__test_and_set_bit(seq_num % win_size, window))
521 CERROR("seq %u (%s %s window) is a replay: max %u, winsize %d\n",
523 seq_num + win_size > *max_seq ? "in" : "behind",
524 phase == 2 ? "backup " : "main",
530 * Based on sequence number algorithm as specified in RFC 2203.
532 * if @set == 0: initial check, don't set any bit in window
533 * if @sec == 1: final check, set bit in window
535 int gss_check_seq_num(struct gss_svc_seq_data *ssd, __u32 seq_num, int set)
539 spin_lock(&ssd->ssd_lock);
545 rc = gss_do_check_seq(ssd->ssd_win_main, GSS_SEQ_WIN_MAIN,
546 &ssd->ssd_max_main, seq_num, 0);
548 gss_stat_oos_record_svc(0, 1);
551 * phase 1 checking main window
553 rc = gss_do_check_seq(ssd->ssd_win_main, GSS_SEQ_WIN_MAIN,
554 &ssd->ssd_max_main, seq_num, 1);
557 gss_stat_oos_record_svc(1, 1);
563 * phase 2 checking back window
565 rc = gss_do_check_seq(ssd->ssd_win_back, GSS_SEQ_WIN_BACK,
566 &ssd->ssd_max_back, seq_num, 2);
568 gss_stat_oos_record_svc(2, 1);
570 gss_stat_oos_record_svc(2, 0);
573 spin_unlock(&ssd->ssd_lock);
577 /***************************************
579 ***************************************/
581 static inline int gss_cli_payload(struct ptlrpc_cli_ctx *ctx,
582 int msgsize, int privacy)
584 return gss_mech_payload(NULL, msgsize, privacy);
587 int gss_cli_ctx_match(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred)
589 return (ctx->cc_vcred.vc_uid == vcred->vc_uid);
592 void gss_cli_ctx_flags2str(unsigned long flags, char *buf, int bufsize)
596 if (flags & PTLRPC_CTX_NEW)
597 strncat(buf, "new,", bufsize);
598 if (flags & PTLRPC_CTX_UPTODATE)
599 strncat(buf, "uptodate,", bufsize);
600 if (flags & PTLRPC_CTX_DEAD)
601 strncat(buf, "dead,", bufsize);
602 if (flags & PTLRPC_CTX_ERROR)
603 strncat(buf, "error,", bufsize);
604 if (flags & PTLRPC_CTX_CACHED)
605 strncat(buf, "cached,", bufsize);
606 if (flags & PTLRPC_CTX_ETERNAL)
607 strncat(buf, "eternal,", bufsize);
609 strncat(buf, "-,", bufsize);
611 buf[strlen(buf) - 1] = '\0';
614 int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx,
615 struct ptlrpc_request *req)
617 struct gss_cli_ctx *gctx = ctx2gctx(ctx);
618 __u32 flags = 0, seq, svc;
622 LASSERT(req->rq_reqbuf);
623 LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
624 LASSERT(req->rq_cli_ctx == ctx);
626 /* nothing to do for context negotiation RPCs */
627 if (req->rq_ctx_init)
630 svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
631 if (req->rq_pack_bulk)
632 flags |= LUSTRE_GSS_PACK_BULK;
633 if (req->rq_pack_udesc)
634 flags |= LUSTRE_GSS_PACK_USER;
637 seq = atomic_inc_return(&gctx->gc_seq);
639 rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
640 ctx->cc_sec->ps_part,
641 flags, gctx->gc_proc, seq, svc,
646 /* gss_sign_msg() msg might take long time to finish, in which period
647 * more rpcs could be wrapped up and sent out. if we found too many
648 * of them we should repack this rpc, because sent it too late might
649 * lead to the sequence number fall behind the window on server and
650 * be dropped. also applies to gss_cli_ctx_seal().
652 * Note: null mode dosen't check sequence number. */
653 if (svc != SPTLRPC_SVC_NULL &&
654 atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
655 int behind = atomic_read(&gctx->gc_seq) - seq;
657 gss_stat_oos_record_cli(behind);
658 CWARN("req %p: %u behind, retry signing\n", req, behind);
662 req->rq_reqdata_len = rc;
667 int gss_cli_ctx_handle_err_notify(struct ptlrpc_cli_ctx *ctx,
668 struct ptlrpc_request *req,
669 struct gss_header *ghdr)
671 struct gss_err_header *errhdr;
674 LASSERT(ghdr->gh_proc == PTLRPC_GSS_PROC_ERR);
676 errhdr = (struct gss_err_header *) ghdr;
678 CWARN("req x"LPU64"/t"LPU64", ctx %p idx "LPX64"(%u->%s): "
679 "%sserver respond (%08x/%08x)\n",
680 req->rq_xid, req->rq_transno, ctx,
681 gss_handle_to_u64(&ctx2gctx(ctx)->gc_handle),
682 ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
683 sec_is_reverse(ctx->cc_sec) ? "reverse" : "",
684 errhdr->gh_major, errhdr->gh_minor);
686 /* context fini rpc, let it failed */
687 if (req->rq_ctx_fini) {
688 CWARN("context fini rpc failed\n");
692 /* reverse sec, just return error, don't expire this ctx because it's
693 * crucial to callback rpcs. note if the callback rpc failed because
694 * of bit flip during network transfer, the client will be evicted
695 * directly. so more gracefully we probably want let it retry for
696 * number of times. */
697 if (sec_is_reverse(ctx->cc_sec))
700 if (errhdr->gh_major != GSS_S_NO_CONTEXT &&
701 errhdr->gh_major != GSS_S_BAD_SIG)
704 /* server return NO_CONTEXT might be caused by context expire
705 * or server reboot/failover. we try to refresh a new ctx which
706 * be transparent to upper layer.
708 * In some cases, our gss handle is possible to be incidentally
709 * identical to another handle since the handle itself is not
710 * fully random. In krb5 case, the GSS_S_BAD_SIG will be
711 * returned, maybe other gss error for other mechanism.
713 * if we add new mechanism, make sure the correct error are
714 * returned in this case. */
715 CWARN("%s: server might lost the context, retrying\n",
716 errhdr->gh_major == GSS_S_NO_CONTEXT ? "NO_CONTEXT" : "BAD_SIG");
718 sptlrpc_cli_ctx_expire(ctx);
720 /* we need replace the ctx right here, otherwise during
721 * resent we'll hit the logic in sptlrpc_req_refresh_ctx()
722 * which keep the ctx with RESEND flag, thus we'll never
723 * get rid of this ctx. */
724 rc = sptlrpc_req_replace_dead_ctx(req);
731 int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx,
732 struct ptlrpc_request *req)
734 struct gss_cli_ctx *gctx;
735 struct gss_header *ghdr, *reqhdr;
736 struct lustre_msg *msg = req->rq_repdata;
738 int pack_bulk, rc = 0;
741 LASSERT(req->rq_cli_ctx == ctx);
744 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
746 /* special case for context negotiation, rq_repmsg/rq_replen actually
747 * are not used currently. but early reply always be treated normally */
748 if (req->rq_ctx_init && !req->rq_early) {
749 req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
750 req->rq_replen = msg->lm_buflens[1];
754 if (msg->lm_bufcount < 2 || msg->lm_bufcount > 4) {
755 CERROR("unexpected bufcount %u\n", msg->lm_bufcount);
759 ghdr = gss_swab_header(msg, 0);
761 CERROR("can't decode gss header\n");
766 reqhdr = lustre_msg_buf(msg, 0, sizeof(*reqhdr));
769 if (ghdr->gh_version != reqhdr->gh_version) {
770 CERROR("gss version %u mismatch, expect %u\n",
771 ghdr->gh_version, reqhdr->gh_version);
775 switch (ghdr->gh_proc) {
776 case PTLRPC_GSS_PROC_DATA:
777 pack_bulk = ghdr->gh_flags & LUSTRE_GSS_PACK_BULK;
779 if (!req->rq_early && !equi(req->rq_pack_bulk == 1, pack_bulk)){
780 CERROR("%s bulk flag in reply\n",
781 req->rq_pack_bulk ? "missing" : "unexpected");
785 if (ghdr->gh_seq != reqhdr->gh_seq) {
786 CERROR("seqnum %u mismatch, expect %u\n",
787 ghdr->gh_seq, reqhdr->gh_seq);
791 if (ghdr->gh_svc != reqhdr->gh_svc) {
792 CERROR("svc %u mismatch, expect %u\n",
793 ghdr->gh_svc, reqhdr->gh_svc);
797 if (lustre_msg_swabbed(msg))
798 gss_header_swabber(ghdr);
800 major = gss_verify_msg(msg, gctx->gc_mechctx, reqhdr->gh_svc);
801 if (major != GSS_S_COMPLETE)
804 if (req->rq_early && reqhdr->gh_svc == SPTLRPC_SVC_NULL) {
807 cksum = crc32_le(!(__u32) 0,
808 lustre_msg_buf(msg, 1, 0),
809 lustre_msg_buflen(msg, 1));
810 if (cksum != msg->lm_cksum) {
811 CWARN("early reply checksum mismatch: "
812 "%08x != %08x\n", cksum, msg->lm_cksum);
818 /* bulk checksum is right after the lustre msg */
819 if (msg->lm_bufcount < 3) {
820 CERROR("Invalid reply bufcount %u\n",
825 rc = bulk_sec_desc_unpack(msg, 2);
827 CERROR("unpack bulk desc: %d\n", rc);
832 req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
833 req->rq_replen = msg->lm_buflens[1];
835 case PTLRPC_GSS_PROC_ERR:
837 CERROR("server return error with early reply\n");
840 rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
844 CERROR("unknown gss proc %d\n", ghdr->gh_proc);
851 int gss_cli_ctx_seal(struct ptlrpc_cli_ctx *ctx,
852 struct ptlrpc_request *req)
854 struct gss_cli_ctx *gctx;
855 rawobj_t hdrobj, msgobj, token;
856 struct gss_header *ghdr;
857 __u32 buflens[2], major;
861 LASSERT(req->rq_clrbuf);
862 LASSERT(req->rq_cli_ctx == ctx);
863 LASSERT(req->rq_reqlen);
865 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
867 /* final clear data length */
868 req->rq_clrdata_len = lustre_msg_size_v2(req->rq_clrbuf->lm_bufcount,
869 req->rq_clrbuf->lm_buflens);
871 /* calculate wire data length */
872 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
873 buflens[1] = gss_cli_payload(&gctx->gc_base, req->rq_clrdata_len, 1);
874 wiresize = lustre_msg_size_v2(2, buflens);
876 /* allocate wire buffer */
879 LASSERT(req->rq_reqbuf);
880 LASSERT(req->rq_reqbuf != req->rq_clrbuf);
881 LASSERT(req->rq_reqbuf_len >= wiresize);
883 OBD_ALLOC(req->rq_reqbuf, wiresize);
886 req->rq_reqbuf_len = wiresize;
889 lustre_init_msg_v2(req->rq_reqbuf, 2, buflens, NULL);
890 req->rq_reqbuf->lm_secflvr = req->rq_flvr.sf_rpc;
893 ghdr = lustre_msg_buf(req->rq_reqbuf, 0, 0);
894 ghdr->gh_version = PTLRPC_GSS_VERSION;
895 ghdr->gh_sp = (__u8) ctx->cc_sec->ps_part;
897 ghdr->gh_proc = gctx->gc_proc;
898 ghdr->gh_svc = SPTLRPC_SVC_PRIV;
899 ghdr->gh_handle.len = gctx->gc_handle.len;
900 memcpy(ghdr->gh_handle.data, gctx->gc_handle.data, gctx->gc_handle.len);
901 if (req->rq_pack_bulk)
902 ghdr->gh_flags |= LUSTRE_GSS_PACK_BULK;
903 if (req->rq_pack_udesc)
904 ghdr->gh_flags |= LUSTRE_GSS_PACK_USER;
907 ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
910 hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
911 hdrobj.data = (__u8 *) ghdr;
912 msgobj.len = req->rq_clrdata_len;
913 msgobj.data = (__u8 *) req->rq_clrbuf;
914 token.len = lustre_msg_buflen(req->rq_reqbuf, 1);
915 token.data = lustre_msg_buf(req->rq_reqbuf, 1, 0);
917 major = lgss_wrap(gctx->gc_mechctx, &hdrobj, &msgobj,
918 req->rq_clrbuf_len, &token);
919 if (major != GSS_S_COMPLETE) {
920 CERROR("priv: wrap message error: %08x\n", major);
921 GOTO(err_free, rc = -EPERM);
923 LASSERT(token.len <= buflens[1]);
925 /* see explain in gss_cli_ctx_sign() */
926 if (unlikely(atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
927 GSS_SEQ_REPACK_THRESHOLD)) {
928 int behind = atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
930 gss_stat_oos_record_cli(behind);
931 CWARN("req %p: %u behind, retry sealing\n", req, behind);
933 ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
937 /* now set the final wire data length */
938 req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, 1, token.len,0);
943 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
944 req->rq_reqbuf = NULL;
945 req->rq_reqbuf_len = 0;
950 int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx,
951 struct ptlrpc_request *req)
953 struct gss_cli_ctx *gctx;
954 struct gss_header *ghdr;
955 struct lustre_msg *msg = req->rq_repdata;
956 int msglen, pack_bulk, rc;
960 LASSERT(req->rq_cli_ctx == ctx);
961 LASSERT(req->rq_ctx_init == 0);
964 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
966 ghdr = gss_swab_header(msg, 0);
968 CERROR("can't decode gss header\n");
973 if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
974 CERROR("gss version %u mismatch, expect %u\n",
975 ghdr->gh_version, PTLRPC_GSS_VERSION);
979 switch (ghdr->gh_proc) {
980 case PTLRPC_GSS_PROC_DATA:
981 pack_bulk = ghdr->gh_flags & LUSTRE_GSS_PACK_BULK;
983 if (!req->rq_early && !equi(req->rq_pack_bulk == 1, pack_bulk)){
984 CERROR("%s bulk flag in reply\n",
985 req->rq_pack_bulk ? "missing" : "unexpected");
989 if (lustre_msg_swabbed(msg))
990 gss_header_swabber(ghdr);
992 /* use rq_repdata_len as buffer size, which assume unseal
993 * doesn't need extra memory space. for precise control, we'd
994 * better calculate out actual buffer size as
995 * (repbuf_len - offset - repdata_len) */
996 major = gss_unseal_msg(gctx->gc_mechctx, msg,
997 &msglen, req->rq_repdata_len);
998 if (major != GSS_S_COMPLETE) {
1003 if (lustre_unpack_msg(msg, msglen)) {
1004 CERROR("Failed to unpack after decryption\n");
1008 if (msg->lm_bufcount < 1) {
1009 CERROR("Invalid reply buffer: empty\n");
1014 if (msg->lm_bufcount < 2) {
1015 CERROR("bufcount %u: missing bulk sec desc\n",
1020 /* bulk checksum is the last segment */
1021 if (bulk_sec_desc_unpack(msg, msg->lm_bufcount-1))
1025 req->rq_repmsg = lustre_msg_buf(msg, 0, 0);
1026 req->rq_replen = msg->lm_buflens[0];
1030 case PTLRPC_GSS_PROC_ERR:
1031 if (req->rq_early) {
1032 CERROR("server return error with early reply\n");
1035 rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
1039 CERROR("unexpected proc %d\n", ghdr->gh_proc);
1046 /*********************************************
1047 * reverse context installation *
1048 *********************************************/
1051 int gss_install_rvs_svc_ctx(struct obd_import *imp,
1052 struct gss_sec *gsec,
1053 struct gss_cli_ctx *gctx)
1055 return gss_svc_upcall_install_rvs_ctx(imp, gsec, gctx);
1058 /*********************************************
1059 * GSS security APIs *
1060 *********************************************/
1061 int gss_sec_create_common(struct gss_sec *gsec,
1062 struct ptlrpc_sec_policy *policy,
1063 struct obd_import *imp,
1064 struct ptlrpc_svc_ctx *svcctx,
1065 struct sptlrpc_flavor *sf)
1067 struct ptlrpc_sec *sec;
1070 LASSERT(RPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_GSS);
1072 gsec->gs_mech = lgss_subflavor_to_mech(RPC_FLVR_SUB(sf->sf_rpc));
1073 if (!gsec->gs_mech) {
1074 CERROR("gss backend 0x%x not found\n",
1075 RPC_FLVR_SUB(sf->sf_rpc));
1079 spin_lock_init(&gsec->gs_lock);
1080 gsec->gs_rvs_hdl = 0ULL;
1082 /* initialize upper ptlrpc_sec */
1083 sec = &gsec->gs_base;
1084 sec->ps_policy = policy;
1085 atomic_set(&sec->ps_refcount, 0);
1086 atomic_set(&sec->ps_nctx, 0);
1087 sec->ps_id = sptlrpc_get_next_secid();
1089 sec->ps_import = class_import_get(imp);
1090 sec->ps_lock = SPIN_LOCK_UNLOCKED;
1091 CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
1094 sec->ps_gc_interval = GSS_GC_INTERVAL;
1096 LASSERT(sec_is_reverse(sec));
1098 /* never do gc on reverse sec */
1099 sec->ps_gc_interval = 0;
1102 if (sec->ps_flvr.sf_bulk_ciph != BULK_CIPH_ALG_NULL &&
1103 sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_BULK)
1104 sptlrpc_enc_pool_add_user();
1106 CDEBUG(D_SEC, "create %s%s@%p\n", (svcctx ? "reverse " : ""),
1107 policy->sp_name, gsec);
1111 void gss_sec_destroy_common(struct gss_sec *gsec)
1113 struct ptlrpc_sec *sec = &gsec->gs_base;
1116 LASSERT(sec->ps_import);
1117 LASSERT(atomic_read(&sec->ps_refcount) == 0);
1118 LASSERT(atomic_read(&sec->ps_nctx) == 0);
1120 if (gsec->gs_mech) {
1121 lgss_mech_put(gsec->gs_mech);
1122 gsec->gs_mech = NULL;
1125 class_import_put(sec->ps_import);
1127 if (sec->ps_flvr.sf_bulk_ciph != BULK_CIPH_ALG_NULL &&
1128 sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_BULK)
1129 sptlrpc_enc_pool_del_user();
1134 void gss_sec_kill(struct ptlrpc_sec *sec)
1139 int gss_cli_ctx_init_common(struct ptlrpc_sec *sec,
1140 struct ptlrpc_cli_ctx *ctx,
1141 struct ptlrpc_ctx_ops *ctxops,
1142 struct vfs_cred *vcred)
1144 struct gss_cli_ctx *gctx = ctx2gctx(ctx);
1147 atomic_set(&gctx->gc_seq, 0);
1149 CFS_INIT_HLIST_NODE(&ctx->cc_cache);
1150 atomic_set(&ctx->cc_refcount, 0);
1152 ctx->cc_ops = ctxops;
1154 ctx->cc_flags = PTLRPC_CTX_NEW;
1155 ctx->cc_vcred = *vcred;
1156 spin_lock_init(&ctx->cc_lock);
1157 CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
1158 CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
1160 /* take a ref on belonging sec, balanced in ctx destroying */
1161 atomic_inc(&sec->ps_refcount);
1162 /* statistic only */
1163 atomic_inc(&sec->ps_nctx);
1165 CDEBUG(D_SEC, "%s@%p: create ctx %p(%u->%s)\n",
1166 sec->ps_policy->sp_name, ctx->cc_sec,
1167 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
1173 * 1: the context has been taken care of by someone else
1174 * 0: proceed to really destroy the context locally
1176 int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec,
1177 struct ptlrpc_cli_ctx *ctx)
1179 struct gss_cli_ctx *gctx = ctx2gctx(ctx);
1181 LASSERT(atomic_read(&sec->ps_nctx) > 0);
1182 LASSERT(atomic_read(&ctx->cc_refcount) == 0);
1183 LASSERT(ctx->cc_sec == sec);
1185 if (gctx->gc_mechctx) {
1186 /* the final context fini rpc will use this ctx too, and it's
1187 * asynchronous which finished by request_out_callback(). so
1188 * we add refcount, whoever drop finally drop the refcount to
1189 * 0 should responsible for the rest of destroy. */
1190 atomic_inc(&ctx->cc_refcount);
1192 gss_do_ctx_fini_rpc(gctx);
1193 gss_cli_ctx_finalize(gctx);
1195 if (!atomic_dec_and_test(&ctx->cc_refcount))
1199 if (sec_is_reverse(sec))
1200 CWARN("reverse sec %p: destroy ctx %p\n",
1203 CWARN("%s@%p: destroy ctx %p(%u->%s)\n",
1204 sec->ps_policy->sp_name, ctx->cc_sec,
1205 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
1211 int gss_alloc_reqbuf_intg(struct ptlrpc_sec *sec,
1212 struct ptlrpc_request *req,
1213 int svc, int msgsize)
1215 int bufsize, txtsize;
1221 * on-wire data layout:
1224 * - user descriptor (optional)
1225 * - bulk sec descriptor (optional)
1226 * - signature (optional)
1227 * - svc == NULL: NULL
1228 * - svc == AUTH: signature of gss header
1229 * - svc == INTG: signature of all above
1231 * if this is context negotiation, reserver fixed space
1232 * at the last (signature) segment regardless of svc mode.
1235 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1236 txtsize = buflens[0];
1238 buflens[1] = msgsize;
1239 if (svc == SPTLRPC_SVC_INTG)
1240 txtsize += buflens[1];
1242 if (req->rq_pack_udesc) {
1243 buflens[bufcnt] = sptlrpc_current_user_desc_size();
1244 if (svc == SPTLRPC_SVC_INTG)
1245 txtsize += buflens[bufcnt];
1249 if (req->rq_pack_bulk) {
1250 buflens[bufcnt] = bulk_sec_desc_size(
1251 req->rq_flvr.sf_bulk_hash, 1,
1253 if (svc == SPTLRPC_SVC_INTG)
1254 txtsize += buflens[bufcnt];
1258 if (req->rq_ctx_init)
1259 buflens[bufcnt++] = GSS_CTX_INIT_MAX_LEN;
1260 else if (svc != SPTLRPC_SVC_NULL)
1261 buflens[bufcnt++] = gss_cli_payload(req->rq_cli_ctx, txtsize,0);
1263 bufsize = lustre_msg_size_v2(bufcnt, buflens);
1265 if (!req->rq_reqbuf) {
1266 bufsize = size_roundup_power2(bufsize);
1268 OBD_ALLOC(req->rq_reqbuf, bufsize);
1269 if (!req->rq_reqbuf)
1272 req->rq_reqbuf_len = bufsize;
1274 LASSERT(req->rq_pool);
1275 LASSERT(req->rq_reqbuf_len >= bufsize);
1276 memset(req->rq_reqbuf, 0, bufsize);
1279 lustre_init_msg_v2(req->rq_reqbuf, bufcnt, buflens, NULL);
1280 req->rq_reqbuf->lm_secflvr = req->rq_flvr.sf_rpc;
1282 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, msgsize);
1283 LASSERT(req->rq_reqmsg);
1285 /* pack user desc here, later we might leave current user's process */
1286 if (req->rq_pack_udesc)
1287 sptlrpc_pack_user_desc(req->rq_reqbuf, 2);
1293 int gss_alloc_reqbuf_priv(struct ptlrpc_sec *sec,
1294 struct ptlrpc_request *req,
1297 __u32 ibuflens[3], wbuflens[2];
1299 int clearsize, wiresize;
1302 LASSERT(req->rq_clrbuf == NULL);
1303 LASSERT(req->rq_clrbuf_len == 0);
1305 /* Inner (clear) buffers
1307 * - user descriptor (optional)
1308 * - bulk checksum (optional)
1311 ibuflens[0] = msgsize;
1313 if (req->rq_pack_udesc)
1314 ibuflens[ibufcnt++] = sptlrpc_current_user_desc_size();
1315 if (req->rq_pack_bulk)
1316 ibuflens[ibufcnt++] = bulk_sec_desc_size(
1317 req->rq_flvr.sf_bulk_hash, 1,
1320 clearsize = lustre_msg_size_v2(ibufcnt, ibuflens);
1321 /* to allow append padding during encryption */
1322 clearsize += GSS_MAX_CIPHER_BLOCK;
1324 /* Wrapper (wire) buffers
1328 wbuflens[0] = PTLRPC_GSS_HEADER_SIZE;
1329 wbuflens[1] = gss_cli_payload(req->rq_cli_ctx, clearsize, 1);
1330 wiresize = lustre_msg_size_v2(2, wbuflens);
1333 /* rq_reqbuf is preallocated */
1334 LASSERT(req->rq_reqbuf);
1335 LASSERT(req->rq_reqbuf_len >= wiresize);
1337 memset(req->rq_reqbuf, 0, req->rq_reqbuf_len);
1339 /* if the pre-allocated buffer is big enough, we just pack
1340 * both clear buf & request buf in it, to avoid more alloc. */
1341 if (clearsize + wiresize <= req->rq_reqbuf_len) {
1343 (void *) (((char *) req->rq_reqbuf) + wiresize);
1345 CWARN("pre-allocated buf size %d is not enough for "
1346 "both clear (%d) and cipher (%d) text, proceed "
1347 "with extra allocation\n", req->rq_reqbuf_len,
1348 clearsize, wiresize);
1352 if (!req->rq_clrbuf) {
1353 clearsize = size_roundup_power2(clearsize);
1355 OBD_ALLOC(req->rq_clrbuf, clearsize);
1356 if (!req->rq_clrbuf)
1359 req->rq_clrbuf_len = clearsize;
1361 lustre_init_msg_v2(req->rq_clrbuf, ibufcnt, ibuflens, NULL);
1362 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, msgsize);
1364 if (req->rq_pack_udesc)
1365 sptlrpc_pack_user_desc(req->rq_clrbuf, 1);
1371 * NOTE: any change of request buffer allocation should also consider
1372 * changing enlarge_reqbuf() series functions.
1374 int gss_alloc_reqbuf(struct ptlrpc_sec *sec,
1375 struct ptlrpc_request *req,
1378 int svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
1380 LASSERT(!req->rq_pack_bulk ||
1381 (req->rq_bulk_read || req->rq_bulk_write));
1384 case SPTLRPC_SVC_NULL:
1385 case SPTLRPC_SVC_AUTH:
1386 case SPTLRPC_SVC_INTG:
1387 return gss_alloc_reqbuf_intg(sec, req, svc, msgsize);
1388 case SPTLRPC_SVC_PRIV:
1389 return gss_alloc_reqbuf_priv(sec, req, msgsize);
1391 LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
1396 void gss_free_reqbuf(struct ptlrpc_sec *sec,
1397 struct ptlrpc_request *req)
1402 LASSERT(!req->rq_pool || req->rq_reqbuf);
1403 privacy = RPC_FLVR_SVC(req->rq_flvr.sf_rpc) == SPTLRPC_SVC_PRIV;
1405 if (!req->rq_clrbuf)
1406 goto release_reqbuf;
1408 /* release clear buffer */
1410 LASSERT(req->rq_clrbuf_len);
1413 req->rq_clrbuf >= req->rq_reqbuf &&
1414 (char *) req->rq_clrbuf <
1415 (char *) req->rq_reqbuf + req->rq_reqbuf_len)
1416 goto release_reqbuf;
1418 OBD_FREE(req->rq_clrbuf, req->rq_clrbuf_len);
1419 req->rq_clrbuf = NULL;
1420 req->rq_clrbuf_len = 0;
1423 if (!req->rq_pool && req->rq_reqbuf) {
1424 LASSERT(req->rq_reqbuf_len);
1426 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
1427 req->rq_reqbuf = NULL;
1428 req->rq_reqbuf_len = 0;
1431 req->rq_reqmsg = NULL;
1436 static int do_alloc_repbuf(struct ptlrpc_request *req, int bufsize)
1438 bufsize = size_roundup_power2(bufsize);
1440 OBD_ALLOC(req->rq_repbuf, bufsize);
1441 if (!req->rq_repbuf)
1444 req->rq_repbuf_len = bufsize;
1449 int gss_alloc_repbuf_intg(struct ptlrpc_sec *sec,
1450 struct ptlrpc_request *req,
1451 int svc, int msgsize)
1459 * on-wire data layout:
1462 * - bulk sec descriptor (optional)
1463 * - signature (optional)
1464 * - svc == NULL: NULL
1465 * - svc == AUTH: signature of gss header
1466 * - svc == INTG: signature of all above
1468 * if this is context negotiation, reserver fixed space
1469 * at the last (signature) segment regardless of svc mode.
1472 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1473 txtsize = buflens[0];
1475 buflens[1] = msgsize;
1476 if (svc == SPTLRPC_SVC_INTG)
1477 txtsize += buflens[1];
1479 if (req->rq_pack_bulk) {
1480 buflens[bufcnt] = bulk_sec_desc_size(
1481 req->rq_flvr.sf_bulk_hash, 0,
1483 if (svc == SPTLRPC_SVC_INTG)
1484 txtsize += buflens[bufcnt];
1488 if (req->rq_ctx_init)
1489 buflens[bufcnt++] = GSS_CTX_INIT_MAX_LEN;
1490 else if (svc != SPTLRPC_SVC_NULL)
1491 buflens[bufcnt++] = gss_cli_payload(req->rq_cli_ctx, txtsize,0);
1493 alloc_size = lustre_msg_size_v2(bufcnt, buflens);
1495 /* add space for early reply */
1496 alloc_size += gss_at_reply_off_integ;
1498 return do_alloc_repbuf(req, alloc_size);
1502 int gss_alloc_repbuf_priv(struct ptlrpc_sec *sec,
1503 struct ptlrpc_request *req,
1513 buflens[0] = msgsize;
1515 if (req->rq_pack_bulk)
1516 buflens[bufcnt++] = bulk_sec_desc_size(
1517 req->rq_flvr.sf_bulk_hash, 0,
1519 txtsize = lustre_msg_size_v2(bufcnt, buflens);
1520 txtsize += GSS_MAX_CIPHER_BLOCK;
1522 /* wrapper buffers */
1524 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1525 buflens[1] = gss_cli_payload(req->rq_cli_ctx, txtsize, 1);
1527 alloc_size = lustre_msg_size_v2(bufcnt, buflens);
1528 /* add space for early reply */
1529 alloc_size += gss_at_reply_off_priv;
1531 return do_alloc_repbuf(req, alloc_size);
1534 int gss_alloc_repbuf(struct ptlrpc_sec *sec,
1535 struct ptlrpc_request *req,
1538 int svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
1541 LASSERT(!req->rq_pack_bulk ||
1542 (req->rq_bulk_read || req->rq_bulk_write));
1545 case SPTLRPC_SVC_NULL:
1546 case SPTLRPC_SVC_AUTH:
1547 case SPTLRPC_SVC_INTG:
1548 return gss_alloc_repbuf_intg(sec, req, svc, msgsize);
1549 case SPTLRPC_SVC_PRIV:
1550 return gss_alloc_repbuf_priv(sec, req, msgsize);
1552 LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
1557 void gss_free_repbuf(struct ptlrpc_sec *sec,
1558 struct ptlrpc_request *req)
1560 OBD_FREE(req->rq_repbuf, req->rq_repbuf_len);
1561 req->rq_repbuf = NULL;
1562 req->rq_repbuf_len = 0;
1564 req->rq_repmsg = NULL;
1567 static int get_enlarged_msgsize(struct lustre_msg *msg,
1568 int segment, int newsize)
1570 int save, newmsg_size;
1572 LASSERT(newsize >= msg->lm_buflens[segment]);
1574 save = msg->lm_buflens[segment];
1575 msg->lm_buflens[segment] = newsize;
1576 newmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1577 msg->lm_buflens[segment] = save;
1582 static int get_enlarged_msgsize2(struct lustre_msg *msg,
1583 int segment1, int newsize1,
1584 int segment2, int newsize2)
1586 int save1, save2, newmsg_size;
1588 LASSERT(newsize1 >= msg->lm_buflens[segment1]);
1589 LASSERT(newsize2 >= msg->lm_buflens[segment2]);
1591 save1 = msg->lm_buflens[segment1];
1592 save2 = msg->lm_buflens[segment2];
1593 msg->lm_buflens[segment1] = newsize1;
1594 msg->lm_buflens[segment2] = newsize2;
1595 newmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1596 msg->lm_buflens[segment1] = save1;
1597 msg->lm_buflens[segment2] = save2;
1603 int gss_enlarge_reqbuf_intg(struct ptlrpc_sec *sec,
1604 struct ptlrpc_request *req,
1606 int segment, int newsize)
1608 struct lustre_msg *newbuf;
1609 int txtsize, sigsize = 0, i;
1610 int newmsg_size, newbuf_size;
1613 * gss header is at seg 0;
1614 * embedded msg is at seg 1;
1615 * signature (if any) is at the last seg
1617 LASSERT(req->rq_reqbuf);
1618 LASSERT(req->rq_reqbuf_len > req->rq_reqlen);
1619 LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
1620 LASSERT(lustre_msg_buf(req->rq_reqbuf, 1, 0) == req->rq_reqmsg);
1622 /* 1. compute new embedded msg size */
1623 newmsg_size = get_enlarged_msgsize(req->rq_reqmsg, segment, newsize);
1624 LASSERT(newmsg_size >= req->rq_reqbuf->lm_buflens[1]);
1626 /* 2. compute new wrapper msg size */
1627 if (svc == SPTLRPC_SVC_NULL) {
1628 /* no signature, get size directly */
1629 newbuf_size = get_enlarged_msgsize(req->rq_reqbuf,
1632 txtsize = req->rq_reqbuf->lm_buflens[0];
1634 if (svc == SPTLRPC_SVC_INTG) {
1635 for (i = 1; i < req->rq_reqbuf->lm_bufcount; i++)
1636 txtsize += req->rq_reqbuf->lm_buflens[i];
1637 txtsize += newmsg_size - req->rq_reqbuf->lm_buflens[1];
1640 sigsize = gss_cli_payload(req->rq_cli_ctx, txtsize, 0);
1641 LASSERT(sigsize >= msg_last_seglen(req->rq_reqbuf));
1643 newbuf_size = get_enlarged_msgsize2(
1646 msg_last_segidx(req->rq_reqbuf),
1650 /* request from pool should always have enough buffer */
1651 LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
1653 if (req->rq_reqbuf_len < newbuf_size) {
1654 newbuf_size = size_roundup_power2(newbuf_size);
1656 OBD_ALLOC(newbuf, newbuf_size);
1660 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
1662 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
1663 req->rq_reqbuf = newbuf;
1664 req->rq_reqbuf_len = newbuf_size;
1665 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, 0);
1668 /* do enlargement, from wrapper to embedded, from end to begin */
1669 if (svc != SPTLRPC_SVC_NULL)
1670 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf,
1671 msg_last_segidx(req->rq_reqbuf),
1674 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, 1, newmsg_size);
1675 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
1677 req->rq_reqlen = newmsg_size;
1682 int gss_enlarge_reqbuf_priv(struct ptlrpc_sec *sec,
1683 struct ptlrpc_request *req,
1684 int segment, int newsize)
1686 struct lustre_msg *newclrbuf;
1687 int newmsg_size, newclrbuf_size, newcipbuf_size;
1691 * embedded msg is at seg 0 of clear buffer;
1692 * cipher text is at seg 2 of cipher buffer;
1694 LASSERT(req->rq_pool ||
1695 (req->rq_reqbuf == NULL && req->rq_reqbuf_len == 0));
1696 LASSERT(req->rq_reqbuf == NULL ||
1697 (req->rq_pool && req->rq_reqbuf->lm_bufcount == 3));
1698 LASSERT(req->rq_clrbuf);
1699 LASSERT(req->rq_clrbuf_len > req->rq_reqlen);
1700 LASSERT(lustre_msg_buf(req->rq_clrbuf, 0, 0) == req->rq_reqmsg);
1702 /* compute new embedded msg size */
1703 newmsg_size = get_enlarged_msgsize(req->rq_reqmsg, segment, newsize);
1705 /* compute new clear buffer size */
1706 newclrbuf_size = get_enlarged_msgsize(req->rq_clrbuf, 0, newmsg_size);
1707 newclrbuf_size += GSS_MAX_CIPHER_BLOCK;
1709 /* compute new cipher buffer size */
1710 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1711 buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
1712 buflens[2] = gss_cli_payload(req->rq_cli_ctx, newclrbuf_size, 1);
1713 newcipbuf_size = lustre_msg_size_v2(3, buflens);
1715 /* handle the case that we put both clear buf and cipher buf into
1716 * pre-allocated single buffer. */
1717 if (unlikely(req->rq_pool) &&
1718 req->rq_clrbuf >= req->rq_reqbuf &&
1719 (char *) req->rq_clrbuf <
1720 (char *) req->rq_reqbuf + req->rq_reqbuf_len) {
1721 /* it couldn't be better we still fit into the
1722 * pre-allocated buffer. */
1723 if (newclrbuf_size + newcipbuf_size <= req->rq_reqbuf_len) {
1726 /* move clear text backward. */
1727 src = req->rq_clrbuf;
1728 dst = (char *) req->rq_reqbuf + newcipbuf_size;
1730 memmove(dst, src, req->rq_clrbuf_len);
1732 req->rq_clrbuf = (struct lustre_msg *) dst;
1733 req->rq_clrbuf_len = newclrbuf_size;
1734 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
1736 /* sadly we have to split out the clear buffer */
1737 LASSERT(req->rq_reqbuf_len >= newcipbuf_size);
1738 LASSERT(req->rq_clrbuf_len < newclrbuf_size);
1742 if (req->rq_clrbuf_len < newclrbuf_size) {
1743 newclrbuf_size = size_roundup_power2(newclrbuf_size);
1745 OBD_ALLOC(newclrbuf, newclrbuf_size);
1746 if (newclrbuf == NULL)
1749 memcpy(newclrbuf, req->rq_clrbuf, req->rq_clrbuf_len);
1751 if (req->rq_reqbuf == NULL ||
1752 req->rq_clrbuf < req->rq_reqbuf ||
1753 (char *) req->rq_clrbuf >=
1754 (char *) req->rq_reqbuf + req->rq_reqbuf_len) {
1755 OBD_FREE(req->rq_clrbuf, req->rq_clrbuf_len);
1758 req->rq_clrbuf = newclrbuf;
1759 req->rq_clrbuf_len = newclrbuf_size;
1760 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
1763 _sptlrpc_enlarge_msg_inplace(req->rq_clrbuf, 0, newmsg_size);
1764 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
1765 req->rq_reqlen = newmsg_size;
1770 int gss_enlarge_reqbuf(struct ptlrpc_sec *sec,
1771 struct ptlrpc_request *req,
1772 int segment, int newsize)
1774 int svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
1776 LASSERT(!req->rq_ctx_init && !req->rq_ctx_fini);
1779 case SPTLRPC_SVC_NULL:
1780 case SPTLRPC_SVC_AUTH:
1781 case SPTLRPC_SVC_INTG:
1782 return gss_enlarge_reqbuf_intg(sec, req, svc, segment, newsize);
1783 case SPTLRPC_SVC_PRIV:
1784 return gss_enlarge_reqbuf_priv(sec, req, segment, newsize);
1786 LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
1791 int gss_sec_install_rctx(struct obd_import *imp,
1792 struct ptlrpc_sec *sec,
1793 struct ptlrpc_cli_ctx *ctx)
1795 struct gss_sec *gsec;
1796 struct gss_cli_ctx *gctx;
1799 gsec = container_of(sec, struct gss_sec, gs_base);
1800 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
1802 rc = gss_install_rvs_svc_ctx(imp, gsec, gctx);
1806 /********************************************
1808 ********************************************/
1811 int gss_svc_reqctx_is_special(struct gss_svc_reqctx *grctx)
1814 return (grctx->src_init || grctx->src_init_continue ||
1815 grctx->src_err_notify);
1819 void gss_svc_reqctx_free(struct gss_svc_reqctx *grctx)
1822 gss_svc_upcall_put_ctx(grctx->src_ctx);
1824 sptlrpc_policy_put(grctx->src_base.sc_policy);
1825 OBD_FREE_PTR(grctx);
1829 void gss_svc_reqctx_addref(struct gss_svc_reqctx *grctx)
1831 LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
1832 atomic_inc(&grctx->src_base.sc_refcount);
1836 void gss_svc_reqctx_decref(struct gss_svc_reqctx *grctx)
1838 LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
1840 if (atomic_dec_and_test(&grctx->src_base.sc_refcount))
1841 gss_svc_reqctx_free(grctx);
1845 int gss_svc_sign(struct ptlrpc_request *req,
1846 struct ptlrpc_reply_state *rs,
1847 struct gss_svc_reqctx *grctx,
1854 LASSERT(rs->rs_msg == lustre_msg_buf(rs->rs_repbuf, 1, 0));
1856 /* embedded lustre_msg might have been shrinked */
1857 if (req->rq_replen != rs->rs_repbuf->lm_buflens[1])
1858 lustre_shrink_msg(rs->rs_repbuf, 1, req->rq_replen, 1);
1860 if (req->rq_pack_bulk)
1861 flags |= LUSTRE_GSS_PACK_BULK;
1863 rc = gss_sign_msg(rs->rs_repbuf, grctx->src_ctx->gsc_mechctx,
1864 LUSTRE_SP_ANY, flags, PTLRPC_GSS_PROC_DATA,
1865 grctx->src_wirectx.gw_seq, svc, NULL);
1869 rs->rs_repdata_len = rc;
1871 if (likely(req->rq_packed_final)) {
1872 req->rq_reply_off = gss_at_reply_off_integ;
1874 if (svc == SPTLRPC_SVC_NULL)
1875 rs->rs_repbuf->lm_cksum = crc32_le(!(__u32) 0,
1876 lustre_msg_buf(rs->rs_repbuf, 1, 0),
1877 lustre_msg_buflen(rs->rs_repbuf, 1));
1878 req->rq_reply_off = 0;
1884 int gss_pack_err_notify(struct ptlrpc_request *req, __u32 major, __u32 minor)
1886 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
1887 struct ptlrpc_reply_state *rs;
1888 struct gss_err_header *ghdr;
1889 int replen = sizeof(struct ptlrpc_body);
1893 //if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_SVCGSS_ERR_NOTIFY, OBD_FAIL_ONCE))
1896 grctx->src_err_notify = 1;
1897 grctx->src_reserve_len = 0;
1899 rc = lustre_pack_reply_v2(req, 1, &replen, NULL, 0);
1901 CERROR("could not pack reply, err %d\n", rc);
1906 rs = req->rq_reply_state;
1907 LASSERT(rs->rs_repbuf->lm_buflens[1] >= sizeof(*ghdr));
1908 ghdr = lustre_msg_buf(rs->rs_repbuf, 0, 0);
1909 ghdr->gh_version = PTLRPC_GSS_VERSION;
1911 ghdr->gh_proc = PTLRPC_GSS_PROC_ERR;
1912 ghdr->gh_major = major;
1913 ghdr->gh_minor = minor;
1914 ghdr->gh_handle.len = 0; /* fake context handle */
1916 rs->rs_repdata_len = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
1917 rs->rs_repbuf->lm_buflens);
1919 CDEBUG(D_SEC, "prepare gss error notify(0x%x/0x%x) to %s\n",
1920 major, minor, libcfs_nid2str(req->rq_peer.nid));
1925 int gss_svc_handle_init(struct ptlrpc_request *req,
1926 struct gss_wire_ctx *gw)
1928 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
1929 struct lustre_msg *reqbuf = req->rq_reqbuf;
1930 struct obd_uuid *uuid;
1931 struct obd_device *target;
1932 rawobj_t uuid_obj, rvs_hdl, in_token;
1934 __u32 *secdata, seclen;
1938 CDEBUG(D_SEC, "processing gss init(%d) request from %s\n", gw->gw_proc,
1939 libcfs_nid2str(req->rq_peer.nid));
1941 req->rq_ctx_init = 1;
1943 if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
1944 CERROR("unexpected bulk flag\n");
1945 RETURN(SECSVC_DROP);
1948 if (gw->gw_proc == PTLRPC_GSS_PROC_INIT && gw->gw_handle.len != 0) {
1949 CERROR("proc %u: invalid handle length %u\n",
1950 gw->gw_proc, gw->gw_handle.len);
1951 RETURN(SECSVC_DROP);
1954 if (reqbuf->lm_bufcount < 3 || reqbuf->lm_bufcount > 4){
1955 CERROR("Invalid bufcount %d\n", reqbuf->lm_bufcount);
1956 RETURN(SECSVC_DROP);
1959 /* ctx initiate payload is in last segment */
1960 secdata = lustre_msg_buf(reqbuf, reqbuf->lm_bufcount - 1, 0);
1961 seclen = reqbuf->lm_buflens[reqbuf->lm_bufcount - 1];
1963 if (seclen < 4 + 4) {
1964 CERROR("sec size %d too small\n", seclen);
1965 RETURN(SECSVC_DROP);
1968 /* lustre svc type */
1969 lustre_svc = le32_to_cpu(*secdata++);
1972 /* extract target uuid, note this code is somewhat fragile
1973 * because touched internal structure of obd_uuid */
1974 if (rawobj_extract(&uuid_obj, &secdata, &seclen)) {
1975 CERROR("failed to extract target uuid\n");
1976 RETURN(SECSVC_DROP);
1978 uuid_obj.data[uuid_obj.len - 1] = '\0';
1980 uuid = (struct obd_uuid *) uuid_obj.data;
1981 target = class_uuid2obd(uuid);
1982 if (!target || target->obd_stopping || !target->obd_set_up) {
1983 CERROR("target '%s' is not available for context init (%s)\n",
1984 uuid->uuid, target == NULL ? "no target" :
1985 (target->obd_stopping ? "stopping" : "not set up"));
1986 RETURN(SECSVC_DROP);
1989 /* extract reverse handle */
1990 if (rawobj_extract(&rvs_hdl, &secdata, &seclen)) {
1991 CERROR("failed extract reverse handle\n");
1992 RETURN(SECSVC_DROP);
1996 if (rawobj_extract(&in_token, &secdata, &seclen)) {
1997 CERROR("can't extract token\n");
1998 RETURN(SECSVC_DROP);
2001 rc = gss_svc_upcall_handle_init(req, grctx, gw, target, lustre_svc,
2002 &rvs_hdl, &in_token);
2003 if (rc != SECSVC_OK)
2006 if (grctx->src_ctx->gsc_usr_mds || grctx->src_ctx->gsc_usr_root)
2007 CWARN("create svc ctx %p: user from %s authenticated as %s\n",
2008 grctx->src_ctx, libcfs_nid2str(req->rq_peer.nid),
2009 grctx->src_ctx->gsc_usr_mds ? "mds" : "root");
2011 CWARN("create svc ctx %p: accept user %u from %s\n",
2012 grctx->src_ctx, grctx->src_ctx->gsc_uid,
2013 libcfs_nid2str(req->rq_peer.nid));
2015 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
2016 if (reqbuf->lm_bufcount < 4) {
2017 CERROR("missing user descriptor\n");
2018 RETURN(SECSVC_DROP);
2020 if (sptlrpc_unpack_user_desc(reqbuf, 2)) {
2021 CERROR("Mal-formed user descriptor\n");
2022 RETURN(SECSVC_DROP);
2025 req->rq_pack_udesc = 1;
2026 req->rq_user_desc = lustre_msg_buf(reqbuf, 2, 0);
2029 req->rq_reqmsg = lustre_msg_buf(reqbuf, 1, 0);
2030 req->rq_reqlen = lustre_msg_buflen(reqbuf, 1);
2036 * last segment must be the gss signature.
2039 int gss_svc_verify_request(struct ptlrpc_request *req,
2040 struct gss_svc_reqctx *grctx,
2041 struct gss_wire_ctx *gw,
2044 struct gss_svc_ctx *gctx = grctx->src_ctx;
2045 struct lustre_msg *msg = req->rq_reqbuf;
2049 *major = GSS_S_COMPLETE;
2051 if (msg->lm_bufcount < 2) {
2052 CERROR("Too few segments (%u) in request\n", msg->lm_bufcount);
2056 if (gw->gw_svc == SPTLRPC_SVC_NULL)
2059 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
2060 CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
2061 *major = GSS_S_DUPLICATE_TOKEN;
2065 *major = gss_verify_msg(msg, gctx->gsc_mechctx, gw->gw_svc);
2066 if (*major != GSS_S_COMPLETE)
2069 if (gctx->gsc_reverse == 0 &&
2070 gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
2071 CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
2072 *major = GSS_S_DUPLICATE_TOKEN;
2077 /* user descriptor */
2078 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
2079 if (msg->lm_bufcount < (offset + 1)) {
2080 CERROR("no user desc included\n");
2084 if (sptlrpc_unpack_user_desc(msg, offset)) {
2085 CERROR("Mal-formed user descriptor\n");
2089 req->rq_pack_udesc = 1;
2090 req->rq_user_desc = lustre_msg_buf(msg, offset, 0);
2094 /* check bulk cksum data */
2095 if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
2096 if (msg->lm_bufcount < (offset + 1)) {
2097 CERROR("no bulk checksum included\n");
2101 if (bulk_sec_desc_unpack(msg, offset))
2104 req->rq_pack_bulk = 1;
2105 grctx->src_reqbsd = lustre_msg_buf(msg, offset, 0);
2106 grctx->src_reqbsd_size = lustre_msg_buflen(msg, offset);
2109 req->rq_reqmsg = lustre_msg_buf(msg, 1, 0);
2110 req->rq_reqlen = msg->lm_buflens[1];
2115 int gss_svc_unseal_request(struct ptlrpc_request *req,
2116 struct gss_svc_reqctx *grctx,
2117 struct gss_wire_ctx *gw,
2120 struct gss_svc_ctx *gctx = grctx->src_ctx;
2121 struct lustre_msg *msg = req->rq_reqbuf;
2122 int msglen, offset = 1;
2125 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
2126 CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
2127 *major = GSS_S_DUPLICATE_TOKEN;
2131 *major = gss_unseal_msg(gctx->gsc_mechctx, msg,
2132 &msglen, req->rq_reqdata_len);
2133 if (*major != GSS_S_COMPLETE)
2136 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
2137 CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
2138 *major = GSS_S_DUPLICATE_TOKEN;
2142 if (lustre_unpack_msg(msg, msglen)) {
2143 CERROR("Failed to unpack after decryption\n");
2146 req->rq_reqdata_len = msglen;
2148 if (msg->lm_bufcount < 1) {
2149 CERROR("Invalid buffer: is empty\n");
2153 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
2154 if (msg->lm_bufcount < offset + 1) {
2155 CERROR("no user descriptor included\n");
2159 if (sptlrpc_unpack_user_desc(msg, offset)) {
2160 CERROR("Mal-formed user descriptor\n");
2164 req->rq_pack_udesc = 1;
2165 req->rq_user_desc = lustre_msg_buf(msg, offset, 0);
2169 if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
2170 if (msg->lm_bufcount < offset + 1) {
2171 CERROR("no bulk checksum included\n");
2175 if (bulk_sec_desc_unpack(msg, offset))
2178 req->rq_pack_bulk = 1;
2179 grctx->src_reqbsd = lustre_msg_buf(msg, offset, 0);
2180 grctx->src_reqbsd_size = lustre_msg_buflen(msg, offset);
2183 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 0, 0);
2184 req->rq_reqlen = req->rq_reqbuf->lm_buflens[0];
2189 int gss_svc_handle_data(struct ptlrpc_request *req,
2190 struct gss_wire_ctx *gw)
2192 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2197 grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
2198 if (!grctx->src_ctx) {
2199 major = GSS_S_NO_CONTEXT;
2203 switch (gw->gw_svc) {
2204 case SPTLRPC_SVC_NULL:
2205 case SPTLRPC_SVC_AUTH:
2206 case SPTLRPC_SVC_INTG:
2207 rc = gss_svc_verify_request(req, grctx, gw, &major);
2209 case SPTLRPC_SVC_PRIV:
2210 rc = gss_svc_unseal_request(req, grctx, gw, &major);
2213 CERROR("unsupported gss service %d\n", gw->gw_svc);
2220 CERROR("svc %u failed: major 0x%08x: req xid "LPU64" ctx %p idx "
2221 LPX64"(%u->%s)\n", gw->gw_svc, major, req->rq_xid,
2222 grctx->src_ctx, gss_handle_to_u64(&gw->gw_handle),
2223 grctx->src_ctx->gsc_uid, libcfs_nid2str(req->rq_peer.nid));
2225 /* we only notify client in case of NO_CONTEXT/BAD_SIG, which
2226 * might happen after server reboot, to allow recovery. */
2227 if ((major == GSS_S_NO_CONTEXT || major == GSS_S_BAD_SIG) &&
2228 gss_pack_err_notify(req, major, 0) == 0)
2229 RETURN(SECSVC_COMPLETE);
2231 RETURN(SECSVC_DROP);
2235 int gss_svc_handle_destroy(struct ptlrpc_request *req,
2236 struct gss_wire_ctx *gw)
2238 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2242 req->rq_ctx_fini = 1;
2243 req->rq_no_reply = 1;
2245 grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
2246 if (!grctx->src_ctx) {
2247 CDEBUG(D_SEC, "invalid gss context handle for destroy.\n");
2248 RETURN(SECSVC_DROP);
2251 if (gw->gw_svc != SPTLRPC_SVC_INTG) {
2252 CERROR("svc %u is not supported in destroy.\n", gw->gw_svc);
2253 RETURN(SECSVC_DROP);
2256 if (gss_svc_verify_request(req, grctx, gw, &major))
2257 RETURN(SECSVC_DROP);
2259 CWARN("destroy svc ctx %p idx "LPX64" (%u->%s)\n",
2260 grctx->src_ctx, gss_handle_to_u64(&gw->gw_handle),
2261 grctx->src_ctx->gsc_uid, libcfs_nid2str(req->rq_peer.nid));
2263 gss_svc_upcall_destroy_ctx(grctx->src_ctx);
2265 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
2266 if (req->rq_reqbuf->lm_bufcount < 4) {
2267 CERROR("missing user descriptor, ignore it\n");
2270 if (sptlrpc_unpack_user_desc(req->rq_reqbuf, 2)) {
2271 CERROR("Mal-formed user descriptor, ignore it\n");
2275 req->rq_pack_udesc = 1;
2276 req->rq_user_desc = lustre_msg_buf(req->rq_reqbuf, 2, 0);
2282 int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req)
2284 struct gss_header *ghdr;
2285 struct gss_svc_reqctx *grctx;
2286 struct gss_wire_ctx *gw;
2290 LASSERT(req->rq_reqbuf);
2291 LASSERT(req->rq_svc_ctx == NULL);
2293 if (req->rq_reqbuf->lm_bufcount < 2) {
2294 CERROR("buf count only %d\n", req->rq_reqbuf->lm_bufcount);
2295 RETURN(SECSVC_DROP);
2298 ghdr = gss_swab_header(req->rq_reqbuf, 0);
2300 CERROR("can't decode gss header\n");
2301 RETURN(SECSVC_DROP);
2305 if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
2306 CERROR("gss version %u, expect %u\n", ghdr->gh_version,
2307 PTLRPC_GSS_VERSION);
2308 RETURN(SECSVC_DROP);
2311 req->rq_sp_from = ghdr->gh_sp;
2313 /* alloc grctx data */
2314 OBD_ALLOC_PTR(grctx);
2316 RETURN(SECSVC_DROP);
2318 grctx->src_base.sc_policy = sptlrpc_policy_get(policy);
2319 atomic_set(&grctx->src_base.sc_refcount, 1);
2320 req->rq_svc_ctx = &grctx->src_base;
2321 gw = &grctx->src_wirectx;
2323 /* save wire context */
2324 gw->gw_flags = ghdr->gh_flags;
2325 gw->gw_proc = ghdr->gh_proc;
2326 gw->gw_seq = ghdr->gh_seq;
2327 gw->gw_svc = ghdr->gh_svc;
2328 rawobj_from_netobj(&gw->gw_handle, &ghdr->gh_handle);
2330 /* keep original wire header which subject to checksum verification */
2331 if (lustre_msg_swabbed(req->rq_reqbuf))
2332 gss_header_swabber(ghdr);
2334 switch(ghdr->gh_proc) {
2335 case PTLRPC_GSS_PROC_INIT:
2336 case PTLRPC_GSS_PROC_CONTINUE_INIT:
2337 rc = gss_svc_handle_init(req, gw);
2339 case PTLRPC_GSS_PROC_DATA:
2340 rc = gss_svc_handle_data(req, gw);
2342 case PTLRPC_GSS_PROC_DESTROY:
2343 rc = gss_svc_handle_destroy(req, gw);
2346 CERROR("unknown proc %u\n", gw->gw_proc);
2353 LASSERT (grctx->src_ctx);
2355 req->rq_auth_gss = 1;
2356 req->rq_auth_remote = grctx->src_ctx->gsc_remote;
2357 req->rq_auth_usr_mdt = grctx->src_ctx->gsc_usr_mds;
2358 req->rq_auth_usr_root = grctx->src_ctx->gsc_usr_root;
2359 req->rq_auth_uid = grctx->src_ctx->gsc_uid;
2360 req->rq_auth_mapped_uid = grctx->src_ctx->gsc_mapped_uid;
2362 case SECSVC_COMPLETE:
2365 gss_svc_reqctx_free(grctx);
2366 req->rq_svc_ctx = NULL;
2373 void gss_svc_invalidate_ctx(struct ptlrpc_svc_ctx *svc_ctx)
2375 struct gss_svc_reqctx *grctx;
2378 if (svc_ctx == NULL) {
2383 grctx = gss_svc_ctx2reqctx(svc_ctx);
2385 CWARN("gss svc invalidate ctx %p(%u)\n",
2386 grctx->src_ctx, grctx->src_ctx->gsc_uid);
2387 gss_svc_upcall_destroy_ctx(grctx->src_ctx);
2393 int gss_svc_payload(struct gss_svc_reqctx *grctx, int early,
2394 int msgsize, int privacy)
2396 /* we should treat early reply normally, but which is actually sharing
2397 * the same ctx with original request, so in this case we should
2398 * ignore the special ctx's special flags */
2399 if (early == 0 && gss_svc_reqctx_is_special(grctx))
2400 return grctx->src_reserve_len;
2402 return gss_mech_payload(NULL, msgsize, privacy);
2405 int gss_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2407 struct gss_svc_reqctx *grctx;
2408 struct ptlrpc_reply_state *rs;
2409 int early, privacy, svc, bsd_off = 0;
2410 __u32 ibuflens[2], buflens[4];
2411 int ibufcnt = 0, bufcnt;
2412 int txtsize, wmsg_size, rs_size;
2415 LASSERT(msglen % 8 == 0);
2417 if (req->rq_pack_bulk && !req->rq_bulk_read && !req->rq_bulk_write) {
2418 CERROR("client request bulk sec on non-bulk rpc\n");
2422 svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
2423 early = (req->rq_packed_final == 0);
2425 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2426 if (!early && gss_svc_reqctx_is_special(grctx))
2429 privacy = (svc == SPTLRPC_SVC_PRIV);
2432 /* inner clear buffers */
2434 ibuflens[0] = msglen;
2436 if (req->rq_pack_bulk) {
2437 LASSERT(grctx->src_reqbsd);
2440 ibuflens[ibufcnt++] = bulk_sec_desc_size(
2441 grctx->src_reqbsd->bsd_hash_alg,
2442 0, req->rq_bulk_read);
2445 txtsize = lustre_msg_size_v2(ibufcnt, ibuflens);
2446 txtsize += GSS_MAX_CIPHER_BLOCK;
2448 /* wrapper buffer */
2450 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2451 buflens[1] = gss_svc_payload(grctx, early, txtsize, 1);
2454 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2455 buflens[1] = msglen;
2457 txtsize = buflens[0];
2458 if (svc == SPTLRPC_SVC_INTG)
2459 txtsize += buflens[1];
2461 if (req->rq_pack_bulk) {
2462 LASSERT(grctx->src_reqbsd);
2465 buflens[bufcnt] = bulk_sec_desc_size(
2466 grctx->src_reqbsd->bsd_hash_alg,
2467 0, req->rq_bulk_read);
2468 if (svc == SPTLRPC_SVC_INTG)
2469 txtsize += buflens[bufcnt];
2473 if ((!early && gss_svc_reqctx_is_special(grctx)) ||
2474 svc != SPTLRPC_SVC_NULL)
2475 buflens[bufcnt++] = gss_svc_payload(grctx, early,
2479 wmsg_size = lustre_msg_size_v2(bufcnt, buflens);
2481 rs_size = sizeof(*rs) + wmsg_size;
2482 rs = req->rq_reply_state;
2486 LASSERT(rs->rs_size >= rs_size);
2488 OBD_ALLOC(rs, rs_size);
2492 rs->rs_size = rs_size;
2495 rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
2496 rs->rs_repbuf_len = wmsg_size;
2498 /* initialize the buffer */
2500 lustre_init_msg_v2(rs->rs_repbuf, ibufcnt, ibuflens, NULL);
2501 rs->rs_msg = lustre_msg_buf(rs->rs_repbuf, 0, msglen);
2503 lustre_init_msg_v2(rs->rs_repbuf, bufcnt, buflens, NULL);
2504 rs->rs_repbuf->lm_secflvr = req->rq_flvr.sf_rpc;
2506 rs->rs_msg = lustre_msg_buf(rs->rs_repbuf, 1, 0);
2510 grctx->src_repbsd = lustre_msg_buf(rs->rs_repbuf, bsd_off, 0);
2511 grctx->src_repbsd_size = lustre_msg_buflen(rs->rs_repbuf,
2515 gss_svc_reqctx_addref(grctx);
2516 rs->rs_svc_ctx = req->rq_svc_ctx;
2518 LASSERT(rs->rs_msg);
2519 req->rq_reply_state = rs;
2523 static int gss_svc_seal(struct ptlrpc_request *req,
2524 struct ptlrpc_reply_state *rs,
2525 struct gss_svc_reqctx *grctx)
2527 struct gss_svc_ctx *gctx = grctx->src_ctx;
2528 rawobj_t hdrobj, msgobj, token;
2529 struct gss_header *ghdr;
2532 __u32 buflens[2], major;
2536 /* get clear data length. note embedded lustre_msg might
2537 * have been shrinked */
2538 if (req->rq_replen != lustre_msg_buflen(rs->rs_repbuf, 0))
2539 msglen = lustre_shrink_msg(rs->rs_repbuf, 0, req->rq_replen, 1);
2541 msglen = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
2542 rs->rs_repbuf->lm_buflens);
2544 /* temporarily use tail of buffer to hold gss header data */
2545 LASSERT(msglen + PTLRPC_GSS_HEADER_SIZE <= rs->rs_repbuf_len);
2546 ghdr = (struct gss_header *) ((char *) rs->rs_repbuf +
2547 rs->rs_repbuf_len - PTLRPC_GSS_HEADER_SIZE);
2548 ghdr->gh_version = PTLRPC_GSS_VERSION;
2549 ghdr->gh_sp = LUSTRE_SP_ANY;
2551 ghdr->gh_proc = PTLRPC_GSS_PROC_DATA;
2552 ghdr->gh_seq = grctx->src_wirectx.gw_seq;
2553 ghdr->gh_svc = SPTLRPC_SVC_PRIV;
2554 ghdr->gh_handle.len = 0;
2555 if (req->rq_pack_bulk)
2556 ghdr->gh_flags |= LUSTRE_GSS_PACK_BULK;
2558 /* allocate temporary cipher buffer */
2559 token_buflen = gss_mech_payload(gctx->gsc_mechctx, msglen, 1);
2560 OBD_ALLOC(token_buf, token_buflen);
2561 if (token_buf == NULL)
2564 hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
2565 hdrobj.data = (__u8 *) ghdr;
2566 msgobj.len = msglen;
2567 msgobj.data = (__u8 *) rs->rs_repbuf;
2568 token.len = token_buflen;
2569 token.data = token_buf;
2571 major = lgss_wrap(gctx->gsc_mechctx, &hdrobj, &msgobj,
2572 rs->rs_repbuf_len - PTLRPC_GSS_HEADER_SIZE, &token);
2573 if (major != GSS_S_COMPLETE) {
2574 CERROR("wrap message error: %08x\n", major);
2575 GOTO(out_free, rc = -EPERM);
2577 LASSERT(token.len <= token_buflen);
2579 /* we are about to override data at rs->rs_repbuf, nullify pointers
2580 * to which to catch further illegal usage. */
2581 if (req->rq_pack_bulk) {
2582 grctx->src_repbsd = NULL;
2583 grctx->src_repbsd_size = 0;
2586 /* now fill the actual wire data
2590 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2591 buflens[1] = token.len;
2593 rs->rs_repdata_len = lustre_msg_size_v2(2, buflens);
2594 LASSERT(rs->rs_repdata_len <= rs->rs_repbuf_len);
2596 lustre_init_msg_v2(rs->rs_repbuf, 2, buflens, NULL);
2597 rs->rs_repbuf->lm_secflvr = req->rq_flvr.sf_rpc;
2599 memcpy(lustre_msg_buf(rs->rs_repbuf, 0, 0), ghdr,
2600 PTLRPC_GSS_HEADER_SIZE);
2601 memcpy(lustre_msg_buf(rs->rs_repbuf, 1, 0), token.data, token.len);
2604 if (likely(req->rq_packed_final))
2605 req->rq_reply_off = gss_at_reply_off_priv;
2607 req->rq_reply_off = 0;
2609 /* to catch upper layer's further access */
2611 req->rq_repmsg = NULL;
2616 OBD_FREE(token_buf, token_buflen);
2620 int gss_svc_authorize(struct ptlrpc_request *req)
2622 struct ptlrpc_reply_state *rs = req->rq_reply_state;
2623 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2624 struct gss_wire_ctx *gw = &grctx->src_wirectx;
2628 early = (req->rq_packed_final == 0);
2630 if (!early && gss_svc_reqctx_is_special(grctx)) {
2631 LASSERT(rs->rs_repdata_len != 0);
2633 req->rq_reply_off = gss_at_reply_off_integ;
2637 /* early reply could happen in many cases */
2639 gw->gw_proc != PTLRPC_GSS_PROC_DATA &&
2640 gw->gw_proc != PTLRPC_GSS_PROC_DESTROY) {
2641 CERROR("proc %d not support\n", gw->gw_proc);
2645 LASSERT(grctx->src_ctx);
2647 switch (gw->gw_svc) {
2648 case SPTLRPC_SVC_NULL:
2649 case SPTLRPC_SVC_AUTH:
2650 case SPTLRPC_SVC_INTG:
2651 rc = gss_svc_sign(req, rs, grctx, gw->gw_svc);
2653 case SPTLRPC_SVC_PRIV:
2654 rc = gss_svc_seal(req, rs, grctx);
2657 CERROR("Unknown service %d\n", gw->gw_svc);
2658 GOTO(out, rc = -EINVAL);
2666 void gss_svc_free_rs(struct ptlrpc_reply_state *rs)
2668 struct gss_svc_reqctx *grctx;
2670 LASSERT(rs->rs_svc_ctx);
2671 grctx = container_of(rs->rs_svc_ctx, struct gss_svc_reqctx, src_base);
2673 gss_svc_reqctx_decref(grctx);
2674 rs->rs_svc_ctx = NULL;
2676 if (!rs->rs_prealloc)
2677 OBD_FREE(rs, rs->rs_size);
2680 void gss_svc_free_ctx(struct ptlrpc_svc_ctx *ctx)
2682 LASSERT(atomic_read(&ctx->sc_refcount) == 0);
2683 gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx));
2686 int gss_copy_rvc_cli_ctx(struct ptlrpc_cli_ctx *cli_ctx,
2687 struct ptlrpc_svc_ctx *svc_ctx)
2689 struct gss_cli_ctx *cli_gctx = ctx2gctx(cli_ctx);
2690 struct gss_svc_ctx *svc_gctx = gss_svc_ctx2gssctx(svc_ctx);
2691 struct gss_ctx *mechctx = NULL;
2694 LASSERT(svc_gctx && svc_gctx->gsc_mechctx);
2696 cli_gctx->gc_proc = PTLRPC_GSS_PROC_DATA;
2697 cli_gctx->gc_win = GSS_SEQ_WIN;
2699 /* The problem is the reverse ctx might get lost in some recovery
2700 * situations, and the same svc_ctx will be used to re-create it.
2701 * if there's callback be sentout before that, new reverse ctx start
2702 * with sequence 0 will lead to future callback rpc be treated as
2705 * each reverse root ctx will record its latest sequence number on its
2706 * buddy svcctx before be destroied, so here we continue use it.
2708 atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
2710 if (gss_svc_upcall_dup_handle(&cli_gctx->gc_svc_handle, svc_gctx)) {
2711 CERROR("failed to dup svc handle\n");
2715 if (lgss_copy_reverse_context(svc_gctx->gsc_mechctx, &mechctx) !=
2717 CERROR("failed to copy mech context\n");
2718 goto err_svc_handle;
2721 if (rawobj_dup(&cli_gctx->gc_handle, &svc_gctx->gsc_rvs_hdl)) {
2722 CERROR("failed to dup reverse handle\n");
2726 cli_gctx->gc_mechctx = mechctx;
2727 gss_cli_ctx_uptodate(cli_gctx);
2732 lgss_delete_sec_context(&mechctx);
2734 rawobj_free(&cli_gctx->gc_svc_handle);
2739 static void gss_init_at_reply_offset(void)
2744 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2745 buflens[1] = lustre_msg_early_size();
2746 buflens[2] = gss_cli_payload(NULL, buflens[1], 0);
2747 gss_at_reply_off_integ = lustre_msg_size_v2(3, buflens);
2749 buflens[0] = lustre_msg_early_size();
2750 clearsize = lustre_msg_size_v2(1, buflens);
2751 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2752 buflens[1] = gss_cli_payload(NULL, clearsize, 0);
2753 buflens[2] = gss_cli_payload(NULL, clearsize, 1);
2754 gss_at_reply_off_priv = lustre_msg_size_v2(3, buflens);
2757 int __init sptlrpc_gss_init(void)
2761 rc = gss_init_lproc();
2765 rc = gss_init_cli_upcall();
2769 rc = gss_init_svc_upcall();
2771 goto out_cli_upcall;
2773 rc = init_kerberos_module();
2775 goto out_svc_upcall;
2777 /* register policy after all other stuff be intialized, because it
2778 * might be in used immediately after the registration. */
2780 rc = gss_init_keyring();
2784 #ifdef HAVE_GSS_PIPEFS
2785 rc = gss_init_pipefs();
2790 gss_init_at_reply_offset();
2794 #ifdef HAVE_GSS_PIPEFS
2800 cleanup_kerberos_module();
2802 gss_exit_svc_upcall();
2804 gss_exit_cli_upcall();
2810 static void __exit sptlrpc_gss_exit(void)
2813 #ifdef HAVE_GSS_PIPEFS
2816 cleanup_kerberos_module();
2817 gss_exit_svc_upcall();
2818 gss_exit_cli_upcall();
2822 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
2823 MODULE_DESCRIPTION("GSS security policy for Lustre");
2824 MODULE_LICENSE("GPL");
2826 module_init(sptlrpc_gss_init);
2827 module_exit(sptlrpc_gss_exit);