2 * Modifications for Lustre
4 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
6 * Copyright (c) 2011, 2015, Intel Corporation.
8 * Author: Eric Mei <ericm@clusterfs.com>
12 * linux/net/sunrpc/auth_gss.c
14 * RPCSEC_GSS client authentication.
16 * Copyright (c) 2000 The Regents of the University of Michigan.
17 * All rights reserved.
19 * Dug Song <dugsong@monkey.org>
20 * Andy Adamson <andros@umich.edu>
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 3. Neither the name of the University nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
36 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
37 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
38 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
40 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
41 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
42 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
43 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
44 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
45 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 #define DEBUG_SUBSYSTEM S_SEC
50 #include <linux/init.h>
51 #include <linux/module.h>
52 #include <linux/slab.h>
53 #include <linux/dcache.h>
55 #include <linux/mutex.h>
56 #include <asm/atomic.h>
59 #include <obd_class.h>
60 #include <obd_support.h>
61 #include <obd_cksum.h>
62 #include <lustre_net.h>
63 #include <lustre_import.h>
64 #include <lustre_sec.h>
67 #include "gss_internal.h"
70 #include <linux/crypto.h>
71 #include <linux/crc32.h>
74 * early reply have fixed size, respectively in privacy and integrity mode.
75 * so we calculate them only once.
77 static int gss_at_reply_off_integ;
78 static int gss_at_reply_off_priv;
81 static inline int msg_last_segidx(struct lustre_msg *msg)
83 LASSERT(msg->lm_bufcount > 0);
84 return msg->lm_bufcount - 1;
86 static inline int msg_last_seglen(struct lustre_msg *msg)
88 return msg->lm_buflens[msg_last_segidx(msg)];
91 /* wire data swabber */
93 void gss_header_swabber(struct gss_header *ghdr)
95 __swab32s(&ghdr->gh_flags);
96 __swab32s(&ghdr->gh_proc);
97 __swab32s(&ghdr->gh_seq);
98 __swab32s(&ghdr->gh_svc);
99 __swab32s(&ghdr->gh_pad1);
100 __swab32s(&ghdr->gh_handle.len);
103 struct gss_header *gss_swab_header(struct lustre_msg *msg, int segment,
106 struct gss_header *ghdr;
108 ghdr = lustre_msg_buf(msg, segment, sizeof(*ghdr));
113 gss_header_swabber(ghdr);
115 if (sizeof(*ghdr) + ghdr->gh_handle.len > msg->lm_buflens[segment]) {
116 CERROR("gss header has length %d, now %u received\n",
117 (int) sizeof(*ghdr) + ghdr->gh_handle.len,
118 msg->lm_buflens[segment]);
126 * payload should be obtained from mechanism. but currently since we
127 * only support kerberos, we could simply use fixed value.
130 * - krb5 checksum: 20
132 * for privacy mode, payload also include the cipher text which has the same
133 * size as plain text, plus possible confounder, padding both at maximum cipher
136 #define GSS_KRB5_INTEG_MAX_PAYLOAD (40)
139 int gss_mech_payload(struct gss_ctx *mechctx, int msgsize, int privacy)
142 return GSS_KRB5_INTEG_MAX_PAYLOAD + 16 + 16 + 16 + msgsize;
144 return GSS_KRB5_INTEG_MAX_PAYLOAD;
148 * return signature size, otherwise < 0 to indicate error
150 static int gss_sign_msg(struct lustre_msg *msg, struct gss_ctx *mechctx,
151 enum lustre_sec_part sp, __u32 flags, __u32 proc,
152 __u32 seq, __u32 svc, rawobj_t *handle)
154 struct gss_header *ghdr;
155 rawobj_t text[4], mic;
156 int textcnt, max_textcnt, mic_idx;
159 LASSERT(msg->lm_bufcount >= 2);
162 LASSERT(msg->lm_buflens[0] >=
163 sizeof(*ghdr) + (handle ? handle->len : 0));
164 ghdr = lustre_msg_buf(msg, 0, 0);
166 ghdr->gh_version = PTLRPC_GSS_VERSION;
167 ghdr->gh_sp = (__u8) sp;
168 ghdr->gh_flags = flags;
169 ghdr->gh_proc = proc;
173 /* fill in a fake one */
174 ghdr->gh_handle.len = 0;
176 ghdr->gh_handle.len = handle->len;
177 memcpy(ghdr->gh_handle.data, handle->data, handle->len);
180 /* no actual signature for null mode */
181 if (svc == SPTLRPC_SVC_NULL)
182 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
185 mic_idx = msg_last_segidx(msg);
186 max_textcnt = (svc == SPTLRPC_SVC_AUTH) ? 1 : mic_idx;
188 for (textcnt = 0; textcnt < max_textcnt; textcnt++) {
189 text[textcnt].len = msg->lm_buflens[textcnt];
190 text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
193 mic.len = msg->lm_buflens[mic_idx];
194 mic.data = lustre_msg_buf(msg, mic_idx, 0);
196 major = lgss_get_mic(mechctx, textcnt, text, 0, NULL, &mic);
197 if (major != GSS_S_COMPLETE) {
198 CERROR("fail to generate MIC: %08x\n", major);
201 LASSERT(mic.len <= msg->lm_buflens[mic_idx]);
203 return lustre_shrink_msg(msg, mic_idx, mic.len, 0);
210 __u32 gss_verify_msg(struct lustre_msg *msg, struct gss_ctx *mechctx,
213 rawobj_t text[4], mic;
214 int textcnt, max_textcnt;
218 LASSERT(msg->lm_bufcount >= 2);
220 if (svc == SPTLRPC_SVC_NULL)
221 return GSS_S_COMPLETE;
223 mic_idx = msg_last_segidx(msg);
224 max_textcnt = (svc == SPTLRPC_SVC_AUTH) ? 1 : mic_idx;
226 for (textcnt = 0; textcnt < max_textcnt; textcnt++) {
227 text[textcnt].len = msg->lm_buflens[textcnt];
228 text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
231 mic.len = msg->lm_buflens[mic_idx];
232 mic.data = lustre_msg_buf(msg, mic_idx, 0);
234 major = lgss_verify_mic(mechctx, textcnt, text, 0, NULL, &mic);
235 if (major != GSS_S_COMPLETE)
236 CERROR("mic verify error: %08x\n", major);
242 * return gss error code
245 __u32 gss_unseal_msg(struct gss_ctx *mechctx, struct lustre_msg *msgbuf,
246 int *msg_len, int msgbuf_len)
248 rawobj_t clear_obj, hdrobj, token;
254 if (msgbuf->lm_bufcount != 2) {
255 CERROR("invalid bufcount %d\n", msgbuf->lm_bufcount);
256 RETURN(GSS_S_FAILURE);
259 /* allocate a temporary clear text buffer, same sized as token,
260 * we assume the final clear text size <= token size
262 clear_buflen = lustre_msg_buflen(msgbuf, 1);
263 OBD_ALLOC_LARGE(clear_buf, clear_buflen);
265 RETURN(GSS_S_FAILURE);
268 hdrobj.len = lustre_msg_buflen(msgbuf, 0);
269 hdrobj.data = lustre_msg_buf(msgbuf, 0, 0);
270 token.len = lustre_msg_buflen(msgbuf, 1);
271 token.data = lustre_msg_buf(msgbuf, 1, 0);
272 clear_obj.len = clear_buflen;
273 clear_obj.data = clear_buf;
275 major = lgss_unwrap(mechctx, &hdrobj, &token, &clear_obj);
276 if (major != GSS_S_COMPLETE) {
277 CERROR("unwrap message error: %08x\n", major);
278 GOTO(out_free, major = GSS_S_FAILURE);
280 LASSERT(clear_obj.len <= clear_buflen);
281 LASSERT(clear_obj.len <= msgbuf_len);
283 /* now the decrypted message */
284 memcpy(msgbuf, clear_obj.data, clear_obj.len);
285 *msg_len = clear_obj.len;
287 major = GSS_S_COMPLETE;
289 OBD_FREE_LARGE(clear_buf, clear_buflen);
293 /* gss client context manipulation helpers */
294 int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
296 LASSERT(atomic_read(&ctx->cc_refcount));
298 if (!test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
299 if (!ctx->cc_early_expire)
300 clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
302 CDEBUG(D_SEC, "ctx %p(%u->%s) get expired: %lld(%+llds)\n",
303 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
305 ctx->cc_expire == 0 ? 0 :
306 ctx->cc_expire - ktime_get_real_seconds());
308 sptlrpc_cli_ctx_wakeup(ctx);
316 * return 1 if the context is dead.
318 int cli_ctx_check_death(struct ptlrpc_cli_ctx *ctx)
320 if (unlikely(cli_ctx_is_dead(ctx)))
323 /* expire is 0 means never expire. a newly created gss context
324 * which during upcall may has 0 expiration
326 if (ctx->cc_expire == 0)
329 /* check real expiration */
330 if (ctx->cc_expire > ktime_get_real_seconds())
337 void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx)
339 struct ptlrpc_cli_ctx *ctx = &gctx->gc_base;
342 if (lgss_inquire_context(gctx->gc_mechctx, &ctx_expiry)) {
343 CERROR("ctx %p(%u): unable to inquire, expire it now\n",
344 gctx, ctx->cc_vcred.vc_uid);
345 ctx_expiry = 1; /* make it expired now */
348 ctx->cc_expire = gss_round_ctx_expiry(ctx_expiry,
349 ctx->cc_sec->ps_flvr.sf_flags);
351 /* At this point this ctx might have been marked as dead by
352 * someone else, in which case nobody will make further use
353 * of it. we don't care, and mark it UPTODATE will help
354 * destroying server side context when it be destroyed.
356 set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
358 if (sec_is_reverse(ctx->cc_sec)) {
359 CDEBUG(D_SEC, "server installed reverse ctx %p idx %#llx, expiry %lld(%+llds)\n",
360 ctx, gss_handle_to_u64(&gctx->gc_handle),
362 ctx->cc_expire - ktime_get_real_seconds());
364 CDEBUG(D_SEC, "client refreshed ctx %p idx %#llx (%u->%s), expiry %lld(%+llds)\n",
365 ctx, gss_handle_to_u64(&gctx->gc_handle),
366 ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
368 ctx->cc_expire - ktime_get_real_seconds());
370 /* install reverse svc ctx for root context */
371 if (ctx->cc_vcred.vc_uid == 0)
372 gss_sec_install_rctx(ctx->cc_sec->ps_import,
376 sptlrpc_cli_ctx_wakeup(ctx);
379 static void gss_cli_ctx_finalize(struct gss_cli_ctx *gctx)
381 LASSERT(gctx->gc_base.cc_sec);
383 if (gctx->gc_mechctx) {
384 lgss_delete_sec_context(&gctx->gc_mechctx);
385 gctx->gc_mechctx = NULL;
388 if (!rawobj_empty(&gctx->gc_svc_handle)) {
389 /* forward ctx: mark buddy reverse svcctx soon-expire. */
390 if (!sec_is_reverse(gctx->gc_base.cc_sec) &&
391 !rawobj_empty(&gctx->gc_svc_handle))
392 gss_svc_upcall_expire_rvs_ctx(&gctx->gc_svc_handle);
394 rawobj_free(&gctx->gc_svc_handle);
397 rawobj_free(&gctx->gc_handle);
401 * Based on sequence number algorithm as specified in RFC 2203.
403 * Modified for our own problem: arriving request has valid sequence number,
404 * but unwrapping request might cost a long time, after that its sequence
405 * are not valid anymore (fall behind the window). It rarely happen, mostly
406 * under extreme load.
408 * Note we should not check sequence before verifying the integrity of incoming
409 * request, because just one attacking request with high sequence number might
410 * cause all following requests be dropped.
412 * So here we use a multi-phase approach: prepare 2 sequence windows,
413 * "main window" for normal sequence and "back window" for fall behind sequence.
414 * and 3-phase checking mechanism:
415 * 0 - before integrity verification, perform an initial sequence checking in
416 * main window, which only tries and doesn't actually set any bits. if the
417 * sequence is high above the window or fits in the window and the bit
418 * is 0, then accept and proceed to integrity verification. otherwise
419 * reject this sequence.
420 * 1 - after integrity verification, check in main window again. if this
421 * sequence is high above the window or fits in the window and the bit
422 * is 0, then set the bit and accept; if it fits in the window but bit
423 * already set, then reject; if it falls behind the window, then proceed
425 * 2 - check in back window. if it is high above the window or fits in the
426 * window and the bit is 0, then set the bit and accept. otherwise reject.
428 * \return 1: looks like a replay
430 * \return -1: is a replay
432 * Note phase 0 is necessary, because otherwise replay attacking request of
433 * sequence which between the 2 windows can't be detected.
435 * This mechanism can't totally solve the problem, but could help reduce the
436 * number of valid requests be dropped.
439 int gss_do_check_seq(unsigned long *window, __u32 win_size, __u32 *max_seq,
440 __u32 seq_num, int phase)
442 LASSERT(phase >= 0 && phase <= 2);
444 if (seq_num > *max_seq) {
446 * 1. high above the window
451 if (seq_num >= *max_seq + win_size) {
452 memset(window, 0, win_size / 8);
455 while (*max_seq < seq_num) {
457 __clear_bit((*max_seq) % win_size, window);
460 __set_bit(seq_num % win_size, window);
461 } else if (seq_num + win_size <= *max_seq) {
463 * 2. low behind the window
465 if (phase == 0 || phase == 2)
468 CWARN("seq %u is %u behind (size %d), check backup window\n",
469 seq_num, *max_seq - win_size - seq_num, win_size);
473 * 3. fit into the window
477 if (test_bit(seq_num % win_size, window))
482 if (__test_and_set_bit(seq_num % win_size, window))
491 CERROR("seq %u (%s %s window) is a replay: max %u, winsize %d\n",
493 seq_num + win_size > *max_seq ? "in" : "behind",
494 phase == 2 ? "backup " : "main",
500 * Based on sequence number algorithm as specified in RFC 2203.
502 * if @set == 0: initial check, don't set any bit in window
503 * if @sec == 1: final check, set bit in window
505 int gss_check_seq_num(struct gss_svc_seq_data *ssd, __u32 seq_num, int set)
509 spin_lock(&ssd->ssd_lock);
515 rc = gss_do_check_seq(ssd->ssd_win_main, GSS_SEQ_WIN_MAIN,
516 &ssd->ssd_max_main, seq_num, 0);
518 gss_stat_oos_record_svc(0, 1);
521 * phase 1 checking main window
523 rc = gss_do_check_seq(ssd->ssd_win_main, GSS_SEQ_WIN_MAIN,
524 &ssd->ssd_max_main, seq_num, 1);
527 gss_stat_oos_record_svc(1, 1);
533 * phase 2 checking back window
535 rc = gss_do_check_seq(ssd->ssd_win_back, GSS_SEQ_WIN_BACK,
536 &ssd->ssd_max_back, seq_num, 2);
538 gss_stat_oos_record_svc(2, 1);
540 gss_stat_oos_record_svc(2, 0);
543 spin_unlock(&ssd->ssd_lock);
548 static inline int gss_cli_payload(struct ptlrpc_cli_ctx *ctx, int msgsize,
551 return gss_mech_payload(NULL, msgsize, privacy);
554 static int gss_cli_bulk_payload(struct ptlrpc_cli_ctx *ctx,
555 struct sptlrpc_flavor *flvr,
558 int payload = sizeof(struct ptlrpc_bulk_sec_desc);
560 LASSERT(SPTLRPC_FLVR_BULK_TYPE(flvr->sf_rpc) == SPTLRPC_BULK_DEFAULT);
562 if ((!reply && !read) || (reply && read)) {
563 switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
564 case SPTLRPC_BULK_SVC_NULL:
566 case SPTLRPC_BULK_SVC_INTG:
567 payload += gss_cli_payload(ctx, 0, 0);
569 case SPTLRPC_BULK_SVC_PRIV:
570 payload += gss_cli_payload(ctx, 0, 1);
572 case SPTLRPC_BULK_SVC_AUTH:
581 int gss_cli_ctx_match(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred)
583 return (ctx->cc_vcred.vc_uid == vcred->vc_uid);
586 void gss_cli_ctx_flags2str(unsigned long flags, char *buf, int bufsize)
590 if (flags & PTLRPC_CTX_NEW)
591 strlcat(buf, "new, ", bufsize);
592 if (flags & PTLRPC_CTX_UPTODATE)
593 strlcat(buf, "uptodate, ", bufsize);
594 if (flags & PTLRPC_CTX_DEAD)
595 strlcat(buf, "dead, ", bufsize);
596 if (flags & PTLRPC_CTX_ERROR)
597 strlcat(buf, "error, ", bufsize);
598 if (flags & PTLRPC_CTX_CACHED)
599 strlcat(buf, "cached, ", bufsize);
600 if (flags & PTLRPC_CTX_ETERNAL)
601 strlcat(buf, "eternal, ", bufsize);
602 if (buf[strlen(buf) - 2] == ',')
603 buf[strlen(buf) - 2] = '\0';
606 int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
608 struct gss_cli_ctx *gctx = ctx2gctx(ctx);
609 __u32 flags = 0, seq, svc;
613 LASSERT(req->rq_reqbuf);
614 LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
615 LASSERT(req->rq_cli_ctx == ctx);
617 /* nothing to do for context negotiation RPCs */
618 if (req->rq_ctx_init)
621 svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
622 if (req->rq_pack_bulk)
623 flags |= LUSTRE_GSS_PACK_BULK;
624 if (req->rq_pack_udesc)
625 flags |= LUSTRE_GSS_PACK_USER;
628 seq = atomic_inc_return(&gctx->gc_seq);
630 rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
631 ctx->cc_sec->ps_part,
632 flags, gctx->gc_proc, seq, svc,
637 /* gss_sign_msg() msg might take long time to finish, in which period
638 * more rpcs could be wrapped up and sent out. if we found too many
639 * of them we should repack this rpc, because sent it too late might
640 * lead to the sequence number fall behind the window on server and
641 * be dropped. also applies to gss_cli_ctx_seal().
643 * Note: null mode doesn't check sequence number.
645 if (svc != SPTLRPC_SVC_NULL &&
646 atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
647 int behind = atomic_read(&gctx->gc_seq) - seq;
649 gss_stat_oos_record_cli(behind);
650 CWARN("req %p: %u behind, retry signing\n", req, behind);
654 req->rq_reqdata_len = rc;
659 int gss_cli_ctx_handle_err_notify(struct ptlrpc_cli_ctx *ctx,
660 struct ptlrpc_request *req,
661 struct gss_header *ghdr)
663 struct gss_err_header *errhdr;
666 LASSERT(ghdr->gh_proc == PTLRPC_GSS_PROC_ERR);
668 errhdr = (struct gss_err_header *) ghdr;
670 CWARN("%s: req x%llu/t%llu, ctx %p idx %#llx(%u->%s): %sserver respond (%08x/%08x)\n",
671 ctx->cc_sec->ps_import->imp_obd->obd_name,
672 req->rq_xid, req->rq_transno, ctx,
673 gss_handle_to_u64(&ctx2gctx(ctx)->gc_handle),
674 ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
675 sec_is_reverse(ctx->cc_sec) ? "reverse " : "",
676 errhdr->gh_major, errhdr->gh_minor);
678 /* context fini rpc, let it failed */
679 if (req->rq_ctx_fini) {
680 CWARN("%s: context fini rpc failed: rc = %d\n",
681 ctx->cc_sec->ps_import->imp_obd->obd_name, -EINVAL);
685 /* reverse sec, just return error, don't expire this ctx because it's
686 * crucial to callback rpcs. note if the callback rpc failed because
687 * of bit flip during network transfer, the client will be evicted
688 * directly. so more gracefully we probably want let it retry for
691 if (sec_is_reverse(ctx->cc_sec) &&
692 errhdr->gh_major != GSS_S_NO_CONTEXT)
695 if (errhdr->gh_major != GSS_S_NO_CONTEXT &&
696 errhdr->gh_major != GSS_S_BAD_SIG)
699 /* server return NO_CONTEXT might be caused by context expire
700 * or server reboot/failover. we try to refresh a new ctx which
701 * be transparent to upper layer.
703 * In some cases, our gss handle is possible to be incidentally
704 * identical to another handle since the handle itself is not
705 * fully random. In krb5 case, the GSS_S_BAD_SIG will be
706 * returned, maybe other gss error for other mechanism.
708 * if we add new mechanism, make sure the correct error are
709 * returned in this case.
711 CWARN("%s: %s might have lost the context (%s), retrying\n",
712 ctx->cc_sec->ps_import->imp_obd->obd_name,
713 sec_is_reverse(ctx->cc_sec) ? "client" : "server",
714 errhdr->gh_major == GSS_S_NO_CONTEXT ? "NO_CONTEXT" : "BAD_SIG");
716 sptlrpc_cli_ctx_expire(ctx);
718 /* we need replace the ctx right here, otherwise during
719 * resent we'll hit the logic in sptlrpc_req_refresh_ctx()
720 * which keep the ctx with RESEND flag, thus we'll never
721 * get rid of this ctx.
723 rc = sptlrpc_req_replace_dead_ctx(req);
730 int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
732 struct gss_cli_ctx *gctx;
733 struct gss_header *ghdr, *reqhdr;
734 struct lustre_msg *msg = req->rq_repdata;
736 int pack_bulk, swabbed, rc = 0;
739 LASSERT(req->rq_cli_ctx == ctx);
742 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
744 /* special case for context negotiation, rq_repmsg/rq_replen actually
745 * are not used currently. but early reply always be treated normally
747 if (req->rq_ctx_init && !req->rq_early) {
748 req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
749 req->rq_replen = msg->lm_buflens[1];
753 if (msg->lm_bufcount < 2 || msg->lm_bufcount > 4) {
754 CERROR("unexpected bufcount %u\n", msg->lm_bufcount);
758 swabbed = req_capsule_rep_need_swab(&req->rq_pill);
760 ghdr = gss_swab_header(msg, 0, swabbed);
762 CERROR("can't decode gss header\n");
767 reqhdr = lustre_msg_buf(msg, 0, sizeof(*reqhdr));
770 if (ghdr->gh_version != reqhdr->gh_version) {
771 CERROR("gss version %u mismatch, expect %u\n",
772 ghdr->gh_version, reqhdr->gh_version);
776 switch (ghdr->gh_proc) {
777 case PTLRPC_GSS_PROC_DATA:
778 pack_bulk = ghdr->gh_flags & LUSTRE_GSS_PACK_BULK;
780 if (!req->rq_early &&
781 !equi(req->rq_pack_bulk == 1, pack_bulk)) {
782 CERROR("%s bulk flag in reply\n",
783 req->rq_pack_bulk ? "missing" : "unexpected");
787 if (ghdr->gh_seq != reqhdr->gh_seq) {
788 CERROR("seqnum %u mismatch, expect %u\n",
789 ghdr->gh_seq, reqhdr->gh_seq);
793 if (ghdr->gh_svc != reqhdr->gh_svc) {
794 CERROR("svc %u mismatch, expect %u\n",
795 ghdr->gh_svc, reqhdr->gh_svc);
800 gss_header_swabber(ghdr);
802 major = gss_verify_msg(msg, gctx->gc_mechctx, reqhdr->gh_svc);
803 if (major != GSS_S_COMPLETE) {
804 CERROR("failed to verify reply: %x\n", major);
808 if (req->rq_early && reqhdr->gh_svc == SPTLRPC_SVC_NULL) {
811 cksum = crc32_le(!(__u32) 0,
812 lustre_msg_buf(msg, 1, 0),
813 lustre_msg_buflen(msg, 1));
814 if (cksum != msg->lm_cksum) {
815 CWARN("early reply checksum mismatch: %08x != %08x\n",
816 cksum, msg->lm_cksum);
822 /* bulk checksum is right after the lustre msg */
823 if (msg->lm_bufcount < 3) {
824 CERROR("Invalid reply bufcount %u\n",
829 rc = bulk_sec_desc_unpack(msg, 2, swabbed);
831 CERROR("unpack bulk desc: %d\n", rc);
836 req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
837 req->rq_replen = msg->lm_buflens[1];
839 case PTLRPC_GSS_PROC_ERR:
841 CERROR("server return error with early reply\n");
844 rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
848 CERROR("unknown gss proc %d\n", ghdr->gh_proc);
855 int gss_cli_ctx_seal(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
857 struct gss_cli_ctx *gctx;
858 rawobj_t hdrobj, msgobj, token;
859 struct gss_header *ghdr;
860 __u32 buflens[2], major;
864 LASSERT(req->rq_clrbuf);
865 LASSERT(req->rq_cli_ctx == ctx);
866 LASSERT(req->rq_reqlen);
868 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
870 /* final clear data length */
871 req->rq_clrdata_len = lustre_msg_size_v2(req->rq_clrbuf->lm_bufcount,
872 req->rq_clrbuf->lm_buflens);
874 /* calculate wire data length */
875 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
876 buflens[1] = gss_cli_payload(&gctx->gc_base, req->rq_clrdata_len, 1);
877 wiresize = lustre_msg_size_v2(2, buflens);
879 /* allocate wire buffer */
882 LASSERT(req->rq_reqbuf);
883 LASSERT(req->rq_reqbuf != req->rq_clrbuf);
884 LASSERT(req->rq_reqbuf_len >= wiresize);
886 OBD_ALLOC_LARGE(req->rq_reqbuf, wiresize);
889 req->rq_reqbuf_len = wiresize;
892 lustre_init_msg_v2(req->rq_reqbuf, 2, buflens, NULL);
893 req->rq_reqbuf->lm_secflvr = req->rq_flvr.sf_rpc;
896 ghdr = lustre_msg_buf(req->rq_reqbuf, 0, 0);
897 ghdr->gh_version = PTLRPC_GSS_VERSION;
898 ghdr->gh_sp = (__u8) ctx->cc_sec->ps_part;
900 ghdr->gh_proc = gctx->gc_proc;
901 ghdr->gh_svc = SPTLRPC_SVC_PRIV;
902 ghdr->gh_handle.len = gctx->gc_handle.len;
903 memcpy(ghdr->gh_handle.data, gctx->gc_handle.data, gctx->gc_handle.len);
904 if (req->rq_pack_bulk)
905 ghdr->gh_flags |= LUSTRE_GSS_PACK_BULK;
906 if (req->rq_pack_udesc)
907 ghdr->gh_flags |= LUSTRE_GSS_PACK_USER;
910 ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
913 hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
914 hdrobj.data = (__u8 *) ghdr;
915 msgobj.len = req->rq_clrdata_len;
916 msgobj.data = (__u8 *) req->rq_clrbuf;
917 token.len = lustre_msg_buflen(req->rq_reqbuf, 1);
918 token.data = lustre_msg_buf(req->rq_reqbuf, 1, 0);
920 major = lgss_wrap(gctx->gc_mechctx, &hdrobj, &msgobj,
921 req->rq_clrbuf_len, &token);
922 if (major != GSS_S_COMPLETE) {
923 CERROR("priv: wrap message error: %08x\n", major);
924 GOTO(err_free, rc = -EPERM);
926 LASSERT(token.len <= buflens[1]);
928 /* see explain in gss_cli_ctx_sign() */
929 if (unlikely(atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
930 GSS_SEQ_REPACK_THRESHOLD)) {
931 int behind = atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
933 gss_stat_oos_record_cli(behind);
934 CWARN("req %p: %u behind, retry sealing\n", req, behind);
936 ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
940 /* now set the final wire data length */
941 req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, 1, token.len,
947 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
948 req->rq_reqbuf = NULL;
949 req->rq_reqbuf_len = 0;
954 int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
956 struct gss_cli_ctx *gctx;
957 struct gss_header *ghdr;
958 struct lustre_msg *msg = req->rq_repdata;
959 int msglen, pack_bulk, swabbed, rc;
963 LASSERT(req->rq_cli_ctx == ctx);
964 LASSERT(req->rq_ctx_init == 0);
967 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
968 swabbed = req_capsule_rep_need_swab(&req->rq_pill);
970 ghdr = gss_swab_header(msg, 0, swabbed);
972 CERROR("can't decode gss header\n");
977 if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
978 CERROR("gss version %u mismatch, expect %u\n",
979 ghdr->gh_version, PTLRPC_GSS_VERSION);
983 switch (ghdr->gh_proc) {
984 case PTLRPC_GSS_PROC_DATA:
985 pack_bulk = ghdr->gh_flags & LUSTRE_GSS_PACK_BULK;
987 if (!req->rq_early && !equi(req->rq_pack_bulk == 1,
989 CERROR("%s bulk flag in reply\n",
990 req->rq_pack_bulk ? "missing" : "unexpected");
995 gss_header_swabber(ghdr);
997 /* use rq_repdata_len as buffer size, which assume unseal
998 * doesn't need extra memory space. for precise control, we'd
999 * better calculate out actual buffer size as
1000 * (repbuf_len - offset - repdata_len)
1002 major = gss_unseal_msg(gctx->gc_mechctx, msg, &msglen,
1003 req->rq_repdata_len);
1004 if (major != GSS_S_COMPLETE) {
1005 CERROR("failed to unwrap reply: %x\n", major);
1010 swabbed = __lustre_unpack_msg(msg, msglen);
1012 CERROR("Failed to unpack after decryption\n");
1016 if (msg->lm_bufcount < 1) {
1017 CERROR("Invalid reply buffer: empty\n");
1022 if (msg->lm_bufcount < 2) {
1023 CERROR("bufcount %u: missing bulk sec desc\n",
1028 /* bulk checksum is the last segment */
1029 if (bulk_sec_desc_unpack(msg, msg->lm_bufcount - 1,
1034 req->rq_repmsg = lustre_msg_buf(msg, 0, 0);
1035 req->rq_replen = msg->lm_buflens[0];
1039 case PTLRPC_GSS_PROC_ERR:
1040 if (req->rq_early) {
1041 CERROR("server return error with early reply\n");
1044 rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
1048 CERROR("unexpected proc %d\n", ghdr->gh_proc);
1055 /* reverse context installation */
1057 int gss_install_rvs_svc_ctx(struct obd_import *imp, struct gss_sec *gsec,
1058 struct gss_cli_ctx *gctx)
1060 return gss_svc_upcall_install_rvs_ctx(imp, gsec, gctx);
1063 /* GSS security APIs */
1064 int gss_sec_create_common(struct gss_sec *gsec,
1065 struct ptlrpc_sec_policy *policy,
1066 struct obd_import *imp,
1067 struct ptlrpc_svc_ctx *svcctx,
1068 struct sptlrpc_flavor *sf)
1070 struct ptlrpc_sec *sec;
1073 LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_GSS);
1076 lgss_subflavor_to_mech(SPTLRPC_FLVR_BASE_SUB(sf->sf_rpc));
1077 if (!gsec->gs_mech) {
1078 CERROR("gss backend 0x%x not found\n",
1079 SPTLRPC_FLVR_BASE_SUB(sf->sf_rpc));
1083 spin_lock_init(&gsec->gs_lock);
1084 gsec->gs_rvs_hdl = 0ULL;
1086 /* initialize upper ptlrpc_sec */
1087 sec = &gsec->gs_base;
1088 sec->ps_policy = policy;
1089 atomic_set(&sec->ps_refcount, 0);
1090 atomic_set(&sec->ps_nctx, 0);
1091 sec->ps_id = sptlrpc_get_next_secid();
1093 sec->ps_import = class_import_get(imp);
1094 spin_lock_init(&sec->ps_lock);
1095 INIT_LIST_HEAD(&sec->ps_gc_list);
1098 sec->ps_gc_interval = GSS_GC_INTERVAL;
1100 LASSERT(sec_is_reverse(sec));
1102 /* never do gc on reverse sec */
1103 sec->ps_gc_interval = 0;
1106 if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
1107 obd_pool_add_user();
1109 CDEBUG(D_SEC, "create %s%s@%p\n", (svcctx ? "reverse " : ""),
1110 policy->sp_name, gsec);
1114 void gss_sec_destroy_common(struct gss_sec *gsec)
1116 struct ptlrpc_sec *sec = &gsec->gs_base;
1119 LASSERT(sec->ps_import);
1120 LASSERT(atomic_read(&sec->ps_refcount) == 0);
1121 LASSERT(atomic_read(&sec->ps_nctx) == 0);
1123 if (gsec->gs_mech) {
1124 lgss_mech_put(gsec->gs_mech);
1125 gsec->gs_mech = NULL;
1128 class_import_put(sec->ps_import);
1132 void gss_sec_kill(struct ptlrpc_sec *sec)
1137 int gss_cli_ctx_init_common(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx,
1138 struct ptlrpc_ctx_ops *ctxops,
1139 struct vfs_cred *vcred)
1141 struct gss_cli_ctx *gctx = ctx2gctx(ctx);
1144 atomic_set(&gctx->gc_seq, 0);
1146 INIT_HLIST_NODE(&ctx->cc_cache);
1147 atomic_set(&ctx->cc_refcount, 0);
1149 ctx->cc_ops = ctxops;
1151 ctx->cc_flags = PTLRPC_CTX_NEW;
1152 ctx->cc_vcred = *vcred;
1153 spin_lock_init(&ctx->cc_lock);
1154 INIT_LIST_HEAD(&ctx->cc_req_list);
1155 INIT_LIST_HEAD(&ctx->cc_gc_chain);
1157 /* take a ref on belonging sec, balanced in ctx destroying */
1158 atomic_inc(&sec->ps_refcount);
1159 /* statistic only */
1160 atomic_inc(&sec->ps_nctx);
1162 CDEBUG(D_SEC, "%s@%p: create ctx %p(%u->%s)\n",
1163 sec->ps_policy->sp_name, ctx->cc_sec,
1164 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
1170 * 1: the context has been taken care of by someone else
1171 * 0: proceed to really destroy the context locally
1173 int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx)
1175 struct gss_cli_ctx *gctx = ctx2gctx(ctx);
1177 LASSERT(atomic_read(&sec->ps_nctx) > 0);
1178 LASSERT(atomic_read(&ctx->cc_refcount) == 0);
1179 LASSERT(ctx->cc_sec == sec);
1182 * remove UPTODATE flag of reverse ctx thus we won't send fini rpc,
1183 * this is to avoid potential problems of client side reverse svc ctx
1184 * be mis-destroyed in various recovery senarios. anyway client can
1185 * manage its reverse ctx well by associating it with its buddy ctx.
1187 if (sec_is_reverse(sec))
1188 ctx->cc_flags &= ~PTLRPC_CTX_UPTODATE;
1190 if (gctx->gc_mechctx) {
1191 /* the final context fini rpc will use this ctx too, and it's
1192 * asynchronous which finished by request_out_callback(). so
1193 * we add refcount, whoever drop finally drop the refcount to
1194 * 0 should responsible for the rest of destroy.
1196 atomic_inc(&ctx->cc_refcount);
1198 gss_do_ctx_fini_rpc(gctx);
1199 gss_cli_ctx_finalize(gctx);
1201 if (!atomic_dec_and_test(&ctx->cc_refcount))
1205 if (sec_is_reverse(sec))
1206 CDEBUG(D_SEC, "reverse sec %p: destroy ctx %p\n",
1209 CDEBUG(D_SEC, "%s@%p: destroy ctx %p(%u->%s)\n",
1210 sec->ps_policy->sp_name, ctx->cc_sec,
1211 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
1217 int gss_alloc_reqbuf_intg(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
1218 int svc, int msgsize)
1220 int bufsize, txtsize;
1226 * on-wire data layout:
1229 * - user descriptor (optional)
1230 * - bulk sec descriptor (optional)
1231 * - signature (optional)
1232 * - svc == NULL: NULL
1233 * - svc == AUTH: signature of gss header
1234 * - svc == INTG: signature of all above
1236 * if this is context negotiation, reserver fixed space
1237 * at the last (signature) segment regardless of svc mode.
1240 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1241 txtsize = buflens[0];
1243 buflens[1] = msgsize;
1244 if (svc == SPTLRPC_SVC_INTG)
1245 txtsize += buflens[1];
1247 if (req->rq_pack_udesc) {
1248 buflens[bufcnt] = sptlrpc_current_user_desc_size();
1249 if (svc == SPTLRPC_SVC_INTG)
1250 txtsize += buflens[bufcnt];
1254 if (req->rq_pack_bulk) {
1255 buflens[bufcnt] = gss_cli_bulk_payload(req->rq_cli_ctx,
1257 0, req->rq_bulk_read);
1258 if (svc == SPTLRPC_SVC_INTG)
1259 txtsize += buflens[bufcnt];
1263 if (req->rq_ctx_init)
1264 buflens[bufcnt++] = GSS_CTX_INIT_MAX_LEN;
1265 else if (svc != SPTLRPC_SVC_NULL)
1266 buflens[bufcnt++] = gss_cli_payload(req->rq_cli_ctx, txtsize,
1269 bufsize = lustre_msg_size_v2(bufcnt, buflens);
1271 if (!req->rq_reqbuf) {
1272 bufsize = size_roundup_power2(bufsize);
1274 OBD_ALLOC_LARGE(req->rq_reqbuf, bufsize);
1275 if (!req->rq_reqbuf)
1278 req->rq_reqbuf_len = bufsize;
1280 LASSERT(req->rq_pool);
1281 LASSERT(req->rq_reqbuf_len >= bufsize);
1282 memset(req->rq_reqbuf, 0, bufsize);
1285 lustre_init_msg_v2(req->rq_reqbuf, bufcnt, buflens, NULL);
1286 req->rq_reqbuf->lm_secflvr = req->rq_flvr.sf_rpc;
1288 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, msgsize);
1289 LASSERT(req->rq_reqmsg);
1291 /* pack user desc here, later we might leave current user's process */
1292 if (req->rq_pack_udesc)
1293 sptlrpc_pack_user_desc(req->rq_reqbuf, 2);
1299 int gss_alloc_reqbuf_priv(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
1302 __u32 ibuflens[3], wbuflens[2];
1304 int clearsize, wiresize;
1307 LASSERT(req->rq_clrbuf == NULL);
1308 LASSERT(req->rq_clrbuf_len == 0);
1310 /* Inner (clear) buffers
1312 * - user descriptor (optional)
1313 * - bulk checksum (optional)
1316 ibuflens[0] = msgsize;
1318 if (req->rq_pack_udesc)
1319 ibuflens[ibufcnt++] = sptlrpc_current_user_desc_size();
1320 if (req->rq_pack_bulk)
1321 ibuflens[ibufcnt++] = gss_cli_bulk_payload(req->rq_cli_ctx,
1325 clearsize = lustre_msg_size_v2(ibufcnt, ibuflens);
1326 /* to allow append padding during encryption */
1327 clearsize += GSS_MAX_CIPHER_BLOCK;
1329 /* Wrapper (wire) buffers
1333 wbuflens[0] = PTLRPC_GSS_HEADER_SIZE;
1334 wbuflens[1] = gss_cli_payload(req->rq_cli_ctx, clearsize, 1);
1335 wiresize = lustre_msg_size_v2(2, wbuflens);
1338 /* rq_reqbuf is preallocated */
1339 LASSERT(req->rq_reqbuf);
1340 LASSERT(req->rq_reqbuf_len >= wiresize);
1342 memset(req->rq_reqbuf, 0, req->rq_reqbuf_len);
1344 /* if the pre-allocated buffer is big enough, we just pack
1345 * both clear buf & request buf in it, to avoid more alloc.
1347 if (clearsize + wiresize <= req->rq_reqbuf_len) {
1349 (void *) (((char *) req->rq_reqbuf) + wiresize);
1351 CWARN("pre-allocated buf size %d is not enough for both clear (%d) and cipher (%d) text, proceed with extra allocation\n",
1352 req->rq_reqbuf_len, clearsize, wiresize);
1356 if (!req->rq_clrbuf) {
1357 clearsize = size_roundup_power2(clearsize);
1359 OBD_ALLOC_LARGE(req->rq_clrbuf, clearsize);
1360 if (!req->rq_clrbuf)
1363 req->rq_clrbuf_len = clearsize;
1365 lustre_init_msg_v2(req->rq_clrbuf, ibufcnt, ibuflens, NULL);
1366 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, msgsize);
1368 if (req->rq_pack_udesc)
1369 sptlrpc_pack_user_desc(req->rq_clrbuf, 1);
1375 * NOTE: any change of request buffer allocation should also consider
1376 * changing enlarge_reqbuf() series functions.
1378 int gss_alloc_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
1381 int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
1383 LASSERT(!req->rq_pack_bulk ||
1384 (req->rq_bulk_read || req->rq_bulk_write));
1387 case SPTLRPC_SVC_NULL:
1388 case SPTLRPC_SVC_AUTH:
1389 case SPTLRPC_SVC_INTG:
1390 return gss_alloc_reqbuf_intg(sec, req, svc, msgsize);
1391 case SPTLRPC_SVC_PRIV:
1392 return gss_alloc_reqbuf_priv(sec, req, msgsize);
1394 LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
1399 void gss_free_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req)
1404 LASSERT(!req->rq_pool || req->rq_reqbuf);
1405 privacy = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) == SPTLRPC_SVC_PRIV;
1407 if (!req->rq_clrbuf)
1408 goto release_reqbuf;
1410 /* release clear buffer */
1412 LASSERT(req->rq_clrbuf_len);
1414 if (req->rq_pool == NULL || req->rq_clrbuf < req->rq_reqbuf ||
1415 (char *) req->rq_clrbuf >= (char *) req->rq_reqbuf +
1417 OBD_FREE_LARGE(req->rq_clrbuf, req->rq_clrbuf_len);
1419 req->rq_clrbuf = NULL;
1420 req->rq_clrbuf_len = 0;
1423 if (!req->rq_pool && req->rq_reqbuf) {
1424 LASSERT(req->rq_reqbuf_len);
1426 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
1427 req->rq_reqbuf = NULL;
1428 req->rq_reqbuf_len = 0;
1434 static int do_alloc_repbuf(struct ptlrpc_request *req, int bufsize)
1436 bufsize = size_roundup_power2(bufsize);
1438 OBD_ALLOC_LARGE(req->rq_repbuf, bufsize);
1439 if (!req->rq_repbuf)
1442 req->rq_repbuf_len = bufsize;
1447 int gss_alloc_repbuf_intg(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
1448 int svc, int msgsize)
1456 * on-wire data layout:
1459 * - bulk sec descriptor (optional)
1460 * - signature (optional)
1461 * - svc == NULL: NULL
1462 * - svc == AUTH: signature of gss header
1463 * - svc == INTG: signature of all above
1465 * if this is context negotiation, reserver fixed space
1466 * at the last (signature) segment regardless of svc mode.
1469 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1470 txtsize = buflens[0];
1472 buflens[1] = msgsize;
1473 if (svc == SPTLRPC_SVC_INTG)
1474 txtsize += buflens[1];
1476 if (req->rq_pack_bulk) {
1477 buflens[bufcnt] = gss_cli_bulk_payload(req->rq_cli_ctx,
1480 if (svc == SPTLRPC_SVC_INTG)
1481 txtsize += buflens[bufcnt];
1485 if (req->rq_ctx_init)
1486 buflens[bufcnt++] = GSS_CTX_INIT_MAX_LEN;
1487 else if (svc != SPTLRPC_SVC_NULL)
1488 buflens[bufcnt++] = gss_cli_payload(req->rq_cli_ctx, txtsize,
1491 alloc_size = lustre_msg_size_v2(bufcnt, buflens);
1493 /* add space for early reply */
1494 alloc_size += gss_at_reply_off_integ;
1496 return do_alloc_repbuf(req, alloc_size);
1500 int gss_alloc_repbuf_priv(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
1510 buflens[0] = msgsize;
1512 if (req->rq_pack_bulk)
1513 buflens[bufcnt++] = gss_cli_bulk_payload(req->rq_cli_ctx,
1516 txtsize = lustre_msg_size_v2(bufcnt, buflens);
1517 txtsize += GSS_MAX_CIPHER_BLOCK;
1519 /* wrapper buffers */
1521 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1522 buflens[1] = gss_cli_payload(req->rq_cli_ctx, txtsize, 1);
1524 alloc_size = lustre_msg_size_v2(bufcnt, buflens);
1525 /* add space for early reply */
1526 alloc_size += gss_at_reply_off_priv;
1528 return do_alloc_repbuf(req, alloc_size);
1531 int gss_alloc_repbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
1534 int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
1537 LASSERT(!req->rq_pack_bulk ||
1538 (req->rq_bulk_read || req->rq_bulk_write));
1541 case SPTLRPC_SVC_NULL:
1542 case SPTLRPC_SVC_AUTH:
1543 case SPTLRPC_SVC_INTG:
1544 return gss_alloc_repbuf_intg(sec, req, svc, msgsize);
1545 case SPTLRPC_SVC_PRIV:
1546 return gss_alloc_repbuf_priv(sec, req, msgsize);
1548 LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
1553 void gss_free_repbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req)
1555 OBD_FREE_LARGE(req->rq_repbuf, req->rq_repbuf_len);
1556 req->rq_repbuf = NULL;
1557 req->rq_repbuf_len = 0;
1558 req->rq_repdata = NULL;
1559 req->rq_repdata_len = 0;
1562 static int get_enlarged_msgsize(struct lustre_msg *msg, int segment,
1565 int save, newmsg_size;
1567 LASSERT(newsize >= msg->lm_buflens[segment]);
1569 save = msg->lm_buflens[segment];
1570 msg->lm_buflens[segment] = newsize;
1571 newmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1572 msg->lm_buflens[segment] = save;
1577 static int get_enlarged_msgsize2(struct lustre_msg *msg, int segment1,
1578 int newsize1, int segment2, int newsize2)
1580 int save1, save2, newmsg_size;
1582 LASSERT(newsize1 >= msg->lm_buflens[segment1]);
1583 LASSERT(newsize2 >= msg->lm_buflens[segment2]);
1585 save1 = msg->lm_buflens[segment1];
1586 save2 = msg->lm_buflens[segment2];
1587 msg->lm_buflens[segment1] = newsize1;
1588 msg->lm_buflens[segment2] = newsize2;
1589 newmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1590 msg->lm_buflens[segment1] = save1;
1591 msg->lm_buflens[segment2] = save2;
1597 int gss_enlarge_reqbuf_intg(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
1598 int svc, int segment, int newsize)
1600 struct lustre_msg *newbuf;
1601 int txtsize, sigsize = 0, i;
1602 int newmsg_size, newbuf_size;
1605 * gss header is at seg 0;
1606 * embedded msg is at seg 1;
1607 * signature (if any) is at the last seg
1609 LASSERT(req->rq_reqbuf);
1610 LASSERT(req->rq_reqbuf_len > req->rq_reqlen);
1611 LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
1612 LASSERT(lustre_msg_buf(req->rq_reqbuf, 1, 0) == req->rq_reqmsg);
1614 /* 1. compute new embedded msg size */
1615 newmsg_size = get_enlarged_msgsize(req->rq_reqmsg, segment, newsize);
1616 LASSERT(newmsg_size >= req->rq_reqbuf->lm_buflens[1]);
1618 /* 2. compute new wrapper msg size */
1619 if (svc == SPTLRPC_SVC_NULL) {
1620 /* no signature, get size directly */
1621 newbuf_size = get_enlarged_msgsize(req->rq_reqbuf, 1,
1624 txtsize = req->rq_reqbuf->lm_buflens[0];
1626 if (svc == SPTLRPC_SVC_INTG) {
1627 for (i = 1; i < req->rq_reqbuf->lm_bufcount; i++)
1628 txtsize += req->rq_reqbuf->lm_buflens[i];
1629 txtsize += newmsg_size - req->rq_reqbuf->lm_buflens[1];
1632 sigsize = gss_cli_payload(req->rq_cli_ctx, txtsize, 0);
1633 LASSERT(sigsize >= msg_last_seglen(req->rq_reqbuf));
1635 newbuf_size = get_enlarged_msgsize2(req->rq_reqbuf, 1,
1637 msg_last_segidx(req->rq_reqbuf),
1641 /* request from pool should always have enough buffer */
1642 LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
1644 if (req->rq_reqbuf_len < newbuf_size) {
1645 newbuf_size = size_roundup_power2(newbuf_size);
1647 OBD_ALLOC_LARGE(newbuf, newbuf_size);
1651 /* Must lock this, so that otherwise unprotected change of
1652 * rq_reqmsg is not racing with parallel processing of
1653 * imp_replay_list traversing threads. See LU-3333
1654 * This is a bandaid at best, we really need to deal with this
1655 * in request enlarging code before unpacking that's already
1659 spin_lock(&req->rq_import->imp_lock);
1661 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
1663 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
1664 req->rq_reqbuf = newbuf;
1665 req->rq_reqbuf_len = newbuf_size;
1666 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, 0);
1669 spin_unlock(&req->rq_import->imp_lock);
1672 /* do enlargement, from wrapper to embedded, from end to begin */
1673 if (svc != SPTLRPC_SVC_NULL)
1674 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf,
1675 msg_last_segidx(req->rq_reqbuf),
1678 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, 1, newmsg_size);
1679 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
1681 req->rq_reqlen = newmsg_size;
1686 int gss_enlarge_reqbuf_priv(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
1687 int segment, int newsize)
1689 struct lustre_msg *newclrbuf;
1690 int newmsg_size, newclrbuf_size, newcipbuf_size;
1694 * embedded msg is at seg 0 of clear buffer;
1695 * cipher text is at seg 2 of cipher buffer;
1697 LASSERT(req->rq_pool ||
1698 (req->rq_reqbuf == NULL && req->rq_reqbuf_len == 0));
1699 LASSERT(req->rq_reqbuf == NULL ||
1700 (req->rq_pool && req->rq_reqbuf->lm_bufcount == 3));
1701 LASSERT(req->rq_clrbuf);
1702 LASSERT(req->rq_clrbuf_len > req->rq_reqlen);
1703 LASSERT(lustre_msg_buf(req->rq_clrbuf, 0, 0) == req->rq_reqmsg);
1705 /* compute new embedded msg size */
1706 newmsg_size = get_enlarged_msgsize(req->rq_reqmsg, segment, newsize);
1708 /* compute new clear buffer size */
1709 newclrbuf_size = get_enlarged_msgsize(req->rq_clrbuf, 0, newmsg_size);
1710 newclrbuf_size += GSS_MAX_CIPHER_BLOCK;
1712 /* compute new cipher buffer size */
1713 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1714 buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
1715 buflens[2] = gss_cli_payload(req->rq_cli_ctx, newclrbuf_size, 1);
1716 newcipbuf_size = lustre_msg_size_v2(3, buflens);
1718 /* handle the case that we put both clear buf and cipher buf into
1719 * pre-allocated single buffer.
1721 if (unlikely(req->rq_pool) && req->rq_clrbuf >= req->rq_reqbuf &&
1722 (char *) req->rq_clrbuf < (char *) req->rq_reqbuf +
1723 req->rq_reqbuf_len) {
1724 /* it couldn't be better we still fit into the
1725 * pre-allocated buffer.
1727 if (newclrbuf_size + newcipbuf_size <= req->rq_reqbuf_len) {
1731 spin_lock(&req->rq_import->imp_lock);
1732 /* move clear text backward. */
1733 src = req->rq_clrbuf;
1734 dst = (char *) req->rq_reqbuf + newcipbuf_size;
1736 memmove(dst, src, req->rq_clrbuf_len);
1738 req->rq_clrbuf = (struct lustre_msg *) dst;
1739 req->rq_clrbuf_len = newclrbuf_size;
1740 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
1743 spin_unlock(&req->rq_import->imp_lock);
1745 /* sadly we have to split out the clear buffer */
1746 LASSERT(req->rq_reqbuf_len >= newcipbuf_size);
1747 LASSERT(req->rq_clrbuf_len < newclrbuf_size);
1751 if (req->rq_clrbuf_len < newclrbuf_size) {
1752 newclrbuf_size = size_roundup_power2(newclrbuf_size);
1754 OBD_ALLOC_LARGE(newclrbuf, newclrbuf_size);
1755 if (newclrbuf == NULL)
1758 /* Must lock this, so that otherwise unprotected change of
1759 * rq_reqmsg is not racing with parallel processing of
1760 * imp_replay_list traversing threads. See LU-3333
1761 * This is a bandaid at best, we really need to deal with this
1762 * in request enlarging code before unpacking that's already
1766 spin_lock(&req->rq_import->imp_lock);
1768 memcpy(newclrbuf, req->rq_clrbuf, req->rq_clrbuf_len);
1770 if (req->rq_reqbuf == NULL || req->rq_clrbuf < req->rq_reqbuf ||
1771 (char *) req->rq_clrbuf >= (char *) req->rq_reqbuf +
1772 req->rq_reqbuf_len) {
1773 OBD_FREE_LARGE(req->rq_clrbuf, req->rq_clrbuf_len);
1776 req->rq_clrbuf = newclrbuf;
1777 req->rq_clrbuf_len = newclrbuf_size;
1778 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
1781 spin_unlock(&req->rq_import->imp_lock);
1784 _sptlrpc_enlarge_msg_inplace(req->rq_clrbuf, 0, newmsg_size);
1785 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
1786 req->rq_reqlen = newmsg_size;
1791 int gss_enlarge_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
1792 int segment, int newsize)
1794 int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
1796 LASSERT(!req->rq_ctx_init && !req->rq_ctx_fini);
1799 case SPTLRPC_SVC_NULL:
1800 case SPTLRPC_SVC_AUTH:
1801 case SPTLRPC_SVC_INTG:
1802 return gss_enlarge_reqbuf_intg(sec, req, svc, segment, newsize);
1803 case SPTLRPC_SVC_PRIV:
1804 return gss_enlarge_reqbuf_priv(sec, req, segment, newsize);
1806 LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
1811 int gss_sec_install_rctx(struct obd_import *imp, struct ptlrpc_sec *sec,
1812 struct ptlrpc_cli_ctx *ctx)
1814 struct gss_sec *gsec;
1815 struct gss_cli_ctx *gctx;
1818 gsec = container_of(sec, struct gss_sec, gs_base);
1819 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
1821 rc = gss_install_rvs_svc_ctx(imp, gsec, gctx);
1825 /* server side API */
1827 int gss_svc_reqctx_is_special(struct gss_svc_reqctx *grctx)
1830 return (grctx->src_init || grctx->src_init_continue ||
1831 grctx->src_err_notify);
1835 void gss_svc_reqctx_free(struct gss_svc_reqctx *grctx)
1838 gss_svc_upcall_put_ctx(grctx->src_ctx);
1840 sptlrpc_policy_put(grctx->src_base.sc_policy);
1841 OBD_FREE_PTR(grctx);
1845 void gss_svc_reqctx_addref(struct gss_svc_reqctx *grctx)
1847 LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
1848 atomic_inc(&grctx->src_base.sc_refcount);
1852 void gss_svc_reqctx_decref(struct gss_svc_reqctx *grctx)
1854 LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
1856 if (atomic_dec_and_test(&grctx->src_base.sc_refcount))
1857 gss_svc_reqctx_free(grctx);
1861 int gss_svc_sign(struct ptlrpc_request *req, struct ptlrpc_reply_state *rs,
1862 struct gss_svc_reqctx *grctx, __u32 svc)
1868 LASSERT(rs->rs_msg == lustre_msg_buf(rs->rs_repbuf, 1, 0));
1870 /* embedded lustre_msg might have been shrunk */
1871 if (req->rq_replen != rs->rs_repbuf->lm_buflens[1])
1872 lustre_shrink_msg(rs->rs_repbuf, 1, req->rq_replen, 1);
1874 if (req->rq_pack_bulk)
1875 flags |= LUSTRE_GSS_PACK_BULK;
1877 rc = gss_sign_msg(rs->rs_repbuf, grctx->src_ctx->gsc_mechctx,
1878 LUSTRE_SP_ANY, flags, PTLRPC_GSS_PROC_DATA,
1879 grctx->src_wirectx.gw_seq, svc, NULL);
1883 rs->rs_repdata_len = rc;
1885 if (likely(req->rq_packed_final)) {
1886 if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
1887 req->rq_reply_off = gss_at_reply_off_integ;
1889 req->rq_reply_off = 0;
1891 if (svc == SPTLRPC_SVC_NULL)
1892 rs->rs_repbuf->lm_cksum = crc32_le(!(__u32) 0,
1893 lustre_msg_buf(rs->rs_repbuf, 1, 0),
1894 lustre_msg_buflen(rs->rs_repbuf, 1));
1895 req->rq_reply_off = 0;
1901 int gss_pack_err_notify(struct ptlrpc_request *req, __u32 major, __u32 minor)
1903 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
1904 struct ptlrpc_reply_state *rs;
1905 struct gss_err_header *ghdr;
1906 int replen = sizeof(struct ptlrpc_body);
1910 grctx->src_err_notify = 1;
1911 grctx->src_reserve_len = 0;
1913 rc = lustre_pack_reply_v2(req, 1, &replen, NULL, 0);
1915 CERROR("could not pack reply, err %d\n", rc);
1920 rs = req->rq_reply_state;
1921 LASSERT(rs->rs_repbuf->lm_buflens[1] >= sizeof(*ghdr));
1922 ghdr = lustre_msg_buf(rs->rs_repbuf, 0, 0);
1923 ghdr->gh_version = PTLRPC_GSS_VERSION;
1925 ghdr->gh_proc = PTLRPC_GSS_PROC_ERR;
1926 ghdr->gh_major = major;
1927 ghdr->gh_minor = minor;
1928 ghdr->gh_handle.len = 0; /* fake context handle */
1930 rs->rs_repdata_len = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
1931 rs->rs_repbuf->lm_buflens);
1933 CDEBUG(D_SEC, "prepare gss error notify(0x%x/0x%x) to %s\n", major,
1934 minor, libcfs_nidstr(&req->rq_peer.nid));
1939 int gss_svc_handle_init(struct ptlrpc_request *req, struct gss_wire_ctx *gw)
1941 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
1942 struct lustre_msg *reqbuf = req->rq_reqbuf;
1943 struct obd_uuid *uuid;
1944 struct obd_device *target;
1945 rawobj_t uuid_obj, rvs_hdl, in_token;
1947 __u32 *secdata, seclen;
1951 CDEBUG(D_SEC, "processing gss init(%d) request from %s\n", gw->gw_proc,
1952 libcfs_nidstr(&req->rq_peer.nid));
1954 req->rq_ctx_init = 1;
1956 if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
1958 CDEBUG(D_SEC, "unexpected bulk flag: rc = %d\n", rc);
1962 if (gw->gw_proc == PTLRPC_GSS_PROC_INIT && gw->gw_handle.len != 0) {
1964 CDEBUG(D_SEC, "proc %u: invalid handle length %u: rc = %d\n",
1965 gw->gw_proc, gw->gw_handle.len, rc);
1969 if (reqbuf->lm_bufcount < 3 || reqbuf->lm_bufcount > 4) {
1971 CDEBUG(D_SEC, "Invalid bufcount %d: rc = %d\n",
1972 reqbuf->lm_bufcount, rc);
1976 swabbed = req_capsule_req_need_swab(&req->rq_pill);
1978 /* ctx initiate payload is in last segment */
1979 secdata = lustre_msg_buf(reqbuf, reqbuf->lm_bufcount - 1, 0);
1980 seclen = reqbuf->lm_buflens[reqbuf->lm_bufcount - 1];
1982 if (seclen < 4 + 4) {
1984 CDEBUG(D_SEC, "sec size %d too small: rc = %d\n", seclen, rc);
1988 /* lustre svc type */
1989 lustre_svc = le32_to_cpu(*secdata++);
1992 /* extract target uuid, note this code is somewhat fragile
1993 * because touched internal structure of obd_uuid
1995 if (rawobj_extract(&uuid_obj, &secdata, &seclen)) {
1997 CDEBUG(D_SEC, "failed to extract target uuid: rc = %d\n", rc);
2000 uuid_obj.data[uuid_obj.len - 1] = '\0';
2002 uuid = (struct obd_uuid *) uuid_obj.data;
2003 target = class_uuid2obd(uuid);
2004 if (!target || target->obd_stopping || !target->obd_set_up) {
2008 if (gss_pack_err_notify(req, GSS_S_NO_CONTEXT, 0) == 0)
2009 rc = SECSVC_COMPLETE;
2013 deuuidify(uuid->uuid, NULL, &target_start, &target_len);
2014 LCONSOLE_ERROR("%.*s: not available for GSS context init from %s (%s).\n",
2015 target_len, target_start,
2016 libcfs_nidstr(&req->rq_peer.nid),
2018 (target->obd_stopping ?
2019 "stopping" : "not set up") :
2024 /* extract reverse handle */
2025 if (rawobj_extract(&rvs_hdl, &secdata, &seclen)) {
2027 CDEBUG(D_SEC, "%s: failed extract reverse handle: rc = %d\n",
2028 target->obd_name, rc);
2033 if (rawobj_extract(&in_token, &secdata, &seclen)) {
2035 CDEBUG(D_SEC, "%s: can't extract token: rc = %d\n",
2036 target->obd_name, rc);
2040 rc = gss_svc_upcall_handle_init(req, grctx, gw, target, lustre_svc,
2041 &rvs_hdl, &in_token);
2042 if (rc != SECSVC_OK)
2045 if (grctx->src_ctx->gsc_usr_mds || grctx->src_ctx->gsc_usr_oss ||
2046 grctx->src_ctx->gsc_usr_root)
2048 "%s: create svc ctx %p: user from %s authenticated as %s\n",
2050 grctx->src_ctx, libcfs_nidstr(&req->rq_peer.nid),
2051 grctx->src_ctx->gsc_usr_root ? "root" :
2052 (grctx->src_ctx->gsc_usr_mds ? "mds" :
2053 (grctx->src_ctx->gsc_usr_oss ? "oss" : "null")));
2055 CDEBUG(D_SEC, "%s: create svc ctx %p: accept user %u from %s\n",
2057 grctx->src_ctx, grctx->src_ctx->gsc_uid,
2058 libcfs_nidstr(&req->rq_peer.nid));
2060 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
2061 if (reqbuf->lm_bufcount < 4) {
2063 CDEBUG(D_SEC, "%s: missing user descriptor: rc = %d\n",
2064 target->obd_name, rc);
2067 if (sptlrpc_unpack_user_desc(reqbuf, 2, swabbed)) {
2070 "%s: Mal-formed user descriptor: rc = %d\n",
2071 target->obd_name, rc);
2075 req->rq_pack_udesc = 1;
2076 req->rq_user_desc = lustre_msg_buf(reqbuf, 2, 0);
2079 req->rq_reqmsg = lustre_msg_buf(reqbuf, 1, 0);
2080 req->rq_reqlen = lustre_msg_buflen(reqbuf, 1);
2086 * last segment must be the gss signature.
2089 int gss_svc_verify_request(struct ptlrpc_request *req,
2090 struct gss_svc_reqctx *grctx,
2091 struct gss_wire_ctx *gw, __u32 *major)
2093 struct gss_svc_ctx *gctx = grctx->src_ctx;
2094 struct lustre_msg *msg = req->rq_reqbuf;
2099 *major = GSS_S_COMPLETE;
2101 if (msg->lm_bufcount < 2) {
2102 CERROR("Too few segments (%u) in request\n", msg->lm_bufcount);
2106 if (gw->gw_svc == SPTLRPC_SVC_NULL)
2109 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
2110 CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
2111 *major = GSS_S_DUPLICATE_TOKEN;
2115 *major = gss_verify_msg(msg, gctx->gsc_mechctx, gw->gw_svc);
2116 if (*major != GSS_S_COMPLETE) {
2117 CERROR("failed to verify request: %x\n", *major);
2121 if (gctx->gsc_reverse == 0 &&
2122 gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
2123 CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
2124 *major = GSS_S_DUPLICATE_TOKEN;
2129 swabbed = req_capsule_req_need_swab(&req->rq_pill);
2131 /* user descriptor */
2132 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
2133 if (msg->lm_bufcount < (offset + 1)) {
2134 CERROR("no user desc included\n");
2138 if (sptlrpc_unpack_user_desc(msg, offset, swabbed)) {
2139 CERROR("Mal-formed user descriptor\n");
2143 req->rq_pack_udesc = 1;
2144 req->rq_user_desc = lustre_msg_buf(msg, offset, 0);
2148 /* check bulk_sec_desc data */
2149 if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
2150 if (msg->lm_bufcount < (offset + 1)) {
2151 CERROR("missing bulk sec descriptor\n");
2155 if (bulk_sec_desc_unpack(msg, offset, swabbed))
2158 req->rq_pack_bulk = 1;
2159 grctx->src_reqbsd = lustre_msg_buf(msg, offset, 0);
2160 grctx->src_reqbsd_size = lustre_msg_buflen(msg, offset);
2163 req->rq_reqmsg = lustre_msg_buf(msg, 1, 0);
2164 req->rq_reqlen = msg->lm_buflens[1];
2169 int gss_svc_unseal_request(struct ptlrpc_request *req,
2170 struct gss_svc_reqctx *grctx,
2171 struct gss_wire_ctx *gw, __u32 *major)
2173 struct gss_svc_ctx *gctx = grctx->src_ctx;
2174 struct lustre_msg *msg = req->rq_reqbuf;
2175 int swabbed, msglen, offset = 1;
2178 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
2179 CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
2180 *major = GSS_S_DUPLICATE_TOKEN;
2184 *major = gss_unseal_msg(gctx->gsc_mechctx, msg, &msglen,
2185 req->rq_reqdata_len);
2186 if (*major != GSS_S_COMPLETE) {
2187 CERROR("failed to unwrap request: %x\n", *major);
2191 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
2192 CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
2193 *major = GSS_S_DUPLICATE_TOKEN;
2197 swabbed = __lustre_unpack_msg(msg, msglen);
2199 CERROR("Failed to unpack after decryption\n");
2202 req->rq_reqdata_len = msglen;
2204 if (msg->lm_bufcount < 1) {
2205 CERROR("Invalid buffer: is empty\n");
2209 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
2210 if (msg->lm_bufcount < offset + 1) {
2211 CERROR("no user descriptor included\n");
2215 if (sptlrpc_unpack_user_desc(msg, offset, swabbed)) {
2216 CERROR("Mal-formed user descriptor\n");
2220 req->rq_pack_udesc = 1;
2221 req->rq_user_desc = lustre_msg_buf(msg, offset, 0);
2225 if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
2226 if (msg->lm_bufcount < offset + 1) {
2227 CERROR("no bulk checksum included\n");
2231 if (bulk_sec_desc_unpack(msg, offset, swabbed))
2234 req->rq_pack_bulk = 1;
2235 grctx->src_reqbsd = lustre_msg_buf(msg, offset, 0);
2236 grctx->src_reqbsd_size = lustre_msg_buflen(msg, offset);
2239 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 0, 0);
2240 req->rq_reqlen = req->rq_reqbuf->lm_buflens[0];
2245 int gss_svc_handle_data(struct ptlrpc_request *req, struct gss_wire_ctx *gw)
2247 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2252 grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
2253 if (!grctx->src_ctx) {
2254 major = GSS_S_NO_CONTEXT;
2258 switch (gw->gw_svc) {
2259 case SPTLRPC_SVC_NULL:
2260 case SPTLRPC_SVC_AUTH:
2261 case SPTLRPC_SVC_INTG:
2262 rc = gss_svc_verify_request(req, grctx, gw, &major);
2264 case SPTLRPC_SVC_PRIV:
2265 rc = gss_svc_unseal_request(req, grctx, gw, &major);
2268 CERROR("unsupported gss service %d\n", gw->gw_svc);
2275 CERROR("svc %u failed: major 0x%08x: req xid %llu ctx %p idx %#llx(%u->%s)\n",
2276 gw->gw_svc, major, req->rq_xid, grctx->src_ctx,
2277 gss_handle_to_u64(&gw->gw_handle), grctx->src_ctx->gsc_uid,
2278 libcfs_nidstr(&req->rq_peer.nid));
2280 /* we only notify client in case of NO_CONTEXT/BAD_SIG, which
2281 * might happen after server reboot, to allow recovery.
2283 if ((major == GSS_S_NO_CONTEXT || major == GSS_S_BAD_SIG) &&
2284 gss_pack_err_notify(req, major, 0) == 0)
2285 RETURN(SECSVC_COMPLETE);
2287 RETURN(SECSVC_DROP);
2291 int gss_svc_handle_destroy(struct ptlrpc_request *req, struct gss_wire_ctx *gw)
2293 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2297 req->rq_ctx_fini = 1;
2298 req->rq_no_reply = 1;
2300 grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
2301 if (!grctx->src_ctx) {
2302 CDEBUG(D_SEC, "invalid gss context handle for destroy.\n");
2303 RETURN(SECSVC_DROP);
2306 if (gw->gw_svc != SPTLRPC_SVC_INTG) {
2307 CERROR("svc %u is not supported in destroy.\n", gw->gw_svc);
2308 RETURN(SECSVC_DROP);
2311 if (gss_svc_verify_request(req, grctx, gw, &major))
2312 RETURN(SECSVC_DROP);
2314 CDEBUG(D_SEC, "destroy svc ctx %p idx %#llx (%u->%s)\n",
2315 grctx->src_ctx, gss_handle_to_u64(&gw->gw_handle),
2316 grctx->src_ctx->gsc_uid, libcfs_nidstr(&req->rq_peer.nid));
2318 gss_svc_upcall_destroy_ctx(grctx->src_ctx);
2320 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
2321 if (req->rq_reqbuf->lm_bufcount < 4) {
2322 CERROR("missing user descriptor, ignore it\n");
2325 if (sptlrpc_unpack_user_desc(req->rq_reqbuf, 2,
2326 req_capsule_req_need_swab(&req->rq_pill))) {
2327 CERROR("Mal-formed user descriptor, ignore it\n");
2331 req->rq_pack_udesc = 1;
2332 req->rq_user_desc = lustre_msg_buf(req->rq_reqbuf, 2, 0);
2338 int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req)
2340 struct gss_header *ghdr;
2341 struct gss_svc_reqctx *grctx;
2342 struct gss_wire_ctx *gw;
2346 LASSERT(req->rq_reqbuf);
2347 LASSERT(req->rq_svc_ctx == NULL);
2349 if (req->rq_reqbuf->lm_bufcount < 2) {
2350 CERROR("buf count only %d\n", req->rq_reqbuf->lm_bufcount);
2351 RETURN(SECSVC_DROP);
2354 swabbed = req_capsule_req_need_swab(&req->rq_pill);
2356 ghdr = gss_swab_header(req->rq_reqbuf, 0, swabbed);
2358 CERROR("can't decode gss header\n");
2359 RETURN(SECSVC_DROP);
2363 if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
2364 CERROR("gss version %u, expect %u\n", ghdr->gh_version,
2365 PTLRPC_GSS_VERSION);
2366 RETURN(SECSVC_DROP);
2369 req->rq_sp_from = ghdr->gh_sp;
2371 /* alloc grctx data */
2372 OBD_ALLOC_PTR(grctx);
2374 RETURN(SECSVC_DROP);
2376 grctx->src_base.sc_policy = sptlrpc_policy_get(policy);
2377 atomic_set(&grctx->src_base.sc_refcount, 1);
2378 req->rq_svc_ctx = &grctx->src_base;
2379 gw = &grctx->src_wirectx;
2381 /* save wire context */
2382 gw->gw_flags = ghdr->gh_flags;
2383 gw->gw_proc = ghdr->gh_proc;
2384 gw->gw_seq = ghdr->gh_seq;
2385 gw->gw_svc = ghdr->gh_svc;
2386 rawobj_from_netobj(&gw->gw_handle, &ghdr->gh_handle);
2388 /* keep original wire header which subject to checksum verification */
2390 gss_header_swabber(ghdr);
2392 switch (ghdr->gh_proc) {
2393 case PTLRPC_GSS_PROC_INIT:
2394 case PTLRPC_GSS_PROC_CONTINUE_INIT:
2395 rc = gss_svc_handle_init(req, gw);
2397 case PTLRPC_GSS_PROC_DATA:
2398 rc = gss_svc_handle_data(req, gw);
2400 case PTLRPC_GSS_PROC_DESTROY:
2401 rc = gss_svc_handle_destroy(req, gw);
2404 CERROR("unknown proc %u\n", gw->gw_proc);
2411 LASSERT (grctx->src_ctx);
2413 req->rq_auth_gss = 1;
2414 req->rq_auth_usr_mdt = grctx->src_ctx->gsc_usr_mds;
2415 req->rq_auth_usr_ost = grctx->src_ctx->gsc_usr_oss;
2416 req->rq_auth_usr_root = grctx->src_ctx->gsc_usr_root;
2417 req->rq_auth_uid = grctx->src_ctx->gsc_uid;
2418 req->rq_auth_mapped_uid = grctx->src_ctx->gsc_mapped_uid;
2420 case SECSVC_COMPLETE:
2423 gss_svc_reqctx_free(grctx);
2424 req->rq_svc_ctx = NULL;
2431 void gss_svc_invalidate_ctx(struct ptlrpc_svc_ctx *svc_ctx)
2433 struct gss_svc_reqctx *grctx;
2436 if (svc_ctx == NULL) {
2441 grctx = gss_svc_ctx2reqctx(svc_ctx);
2443 CWARN("gss svc invalidate ctx %p(%u)\n", grctx->src_ctx,
2444 grctx->src_ctx->gsc_uid);
2445 gss_svc_upcall_destroy_ctx(grctx->src_ctx);
2451 int gss_svc_payload(struct gss_svc_reqctx *grctx, int early,
2452 int msgsize, int privacy)
2454 /* we should treat early reply normally, but which is actually sharing
2455 * the same ctx with original request, so in this case we should
2456 * ignore the special ctx's special flags
2458 if (early == 0 && gss_svc_reqctx_is_special(grctx))
2459 return grctx->src_reserve_len;
2461 return gss_mech_payload(NULL, msgsize, privacy);
2464 static int gss_svc_bulk_payload(struct gss_svc_ctx *gctx,
2465 struct sptlrpc_flavor *flvr, int read)
2467 int payload = sizeof(struct ptlrpc_bulk_sec_desc);
2470 switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2471 case SPTLRPC_BULK_SVC_NULL:
2473 case SPTLRPC_BULK_SVC_INTG:
2474 payload += gss_mech_payload(NULL, 0, 0);
2476 case SPTLRPC_BULK_SVC_PRIV:
2477 payload += gss_mech_payload(NULL, 0, 1);
2479 case SPTLRPC_BULK_SVC_AUTH:
2488 int gss_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2490 struct gss_svc_reqctx *grctx;
2491 struct ptlrpc_reply_state *rs;
2492 int early, privacy, svc, bsd_off = 0;
2493 __u32 ibuflens[2], buflens[4];
2494 int ibufcnt = 0, bufcnt;
2495 int txtsize, wmsg_size, rs_size;
2498 LASSERT(msglen % 8 == 0);
2500 if (req->rq_pack_bulk && !req->rq_bulk_read && !req->rq_bulk_write) {
2501 CERROR("client request bulk sec on non-bulk rpc\n");
2505 svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
2506 early = (req->rq_packed_final == 0);
2508 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2509 if (!early && gss_svc_reqctx_is_special(grctx))
2512 privacy = (svc == SPTLRPC_SVC_PRIV);
2515 /* inner clear buffers */
2517 ibuflens[0] = msglen;
2519 if (req->rq_pack_bulk) {
2520 LASSERT(grctx->src_reqbsd);
2523 ibuflens[ibufcnt++] = gss_svc_bulk_payload(
2529 txtsize = lustre_msg_size_v2(ibufcnt, ibuflens);
2530 txtsize += GSS_MAX_CIPHER_BLOCK;
2532 /* wrapper buffer */
2534 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2535 buflens[1] = gss_svc_payload(grctx, early, txtsize, 1);
2538 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2539 buflens[1] = msglen;
2541 txtsize = buflens[0];
2542 if (svc == SPTLRPC_SVC_INTG)
2543 txtsize += buflens[1];
2545 if (req->rq_pack_bulk) {
2546 LASSERT(grctx->src_reqbsd);
2549 buflens[bufcnt] = gss_svc_bulk_payload(
2553 if (svc == SPTLRPC_SVC_INTG)
2554 txtsize += buflens[bufcnt];
2558 if ((!early && gss_svc_reqctx_is_special(grctx)) ||
2559 svc != SPTLRPC_SVC_NULL)
2560 buflens[bufcnt++] = gss_svc_payload(grctx, early,
2564 wmsg_size = lustre_msg_size_v2(bufcnt, buflens);
2566 rs_size = sizeof(*rs) + wmsg_size;
2567 rs = req->rq_reply_state;
2571 LASSERT(rs->rs_size >= rs_size);
2573 OBD_ALLOC_LARGE(rs, rs_size);
2577 rs->rs_size = rs_size;
2580 rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
2581 rs->rs_repbuf_len = wmsg_size;
2583 /* initialize the buffer */
2585 lustre_init_msg_v2(rs->rs_repbuf, ibufcnt, ibuflens, NULL);
2586 rs->rs_msg = lustre_msg_buf(rs->rs_repbuf, 0, msglen);
2588 lustre_init_msg_v2(rs->rs_repbuf, bufcnt, buflens, NULL);
2589 rs->rs_repbuf->lm_secflvr = req->rq_flvr.sf_rpc;
2591 rs->rs_msg = lustre_msg_buf(rs->rs_repbuf, 1, 0);
2595 grctx->src_repbsd = lustre_msg_buf(rs->rs_repbuf, bsd_off, 0);
2596 grctx->src_repbsd_size = lustre_msg_buflen(rs->rs_repbuf,
2600 gss_svc_reqctx_addref(grctx);
2601 rs->rs_svc_ctx = req->rq_svc_ctx;
2603 LASSERT(rs->rs_msg);
2604 req->rq_reply_state = rs;
2608 static int gss_svc_seal(struct ptlrpc_request *req,
2609 struct ptlrpc_reply_state *rs,
2610 struct gss_svc_reqctx *grctx)
2612 struct gss_svc_ctx *gctx = grctx->src_ctx;
2613 rawobj_t hdrobj, msgobj, token;
2614 struct gss_header *ghdr;
2617 __u32 buflens[2], major;
2621 /* get clear data length. note embedded lustre_msg might
2624 if (req->rq_replen != lustre_msg_buflen(rs->rs_repbuf, 0))
2625 msglen = lustre_shrink_msg(rs->rs_repbuf, 0, req->rq_replen, 1);
2627 msglen = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
2628 rs->rs_repbuf->lm_buflens);
2630 /* temporarily use tail of buffer to hold gss header data */
2631 LASSERT(msglen + PTLRPC_GSS_HEADER_SIZE <= rs->rs_repbuf_len);
2632 ghdr = (struct gss_header *) ((char *) rs->rs_repbuf +
2633 rs->rs_repbuf_len - PTLRPC_GSS_HEADER_SIZE);
2634 ghdr->gh_version = PTLRPC_GSS_VERSION;
2635 ghdr->gh_sp = LUSTRE_SP_ANY;
2637 ghdr->gh_proc = PTLRPC_GSS_PROC_DATA;
2638 ghdr->gh_seq = grctx->src_wirectx.gw_seq;
2639 ghdr->gh_svc = SPTLRPC_SVC_PRIV;
2640 ghdr->gh_handle.len = 0;
2641 if (req->rq_pack_bulk)
2642 ghdr->gh_flags |= LUSTRE_GSS_PACK_BULK;
2644 /* allocate temporary cipher buffer */
2645 token_buflen = gss_mech_payload(gctx->gsc_mechctx, msglen, 1);
2646 OBD_ALLOC_LARGE(token_buf, token_buflen);
2647 if (token_buf == NULL)
2650 hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
2651 hdrobj.data = (__u8 *) ghdr;
2652 msgobj.len = msglen;
2653 msgobj.data = (__u8 *) rs->rs_repbuf;
2654 token.len = token_buflen;
2655 token.data = token_buf;
2657 major = lgss_wrap(gctx->gsc_mechctx, &hdrobj, &msgobj,
2658 rs->rs_repbuf_len - PTLRPC_GSS_HEADER_SIZE, &token);
2659 if (major != GSS_S_COMPLETE) {
2660 CERROR("wrap message error: %08x\n", major);
2661 GOTO(out_free, rc = -EPERM);
2663 LASSERT(token.len <= token_buflen);
2665 /* we are about to override data at rs->rs_repbuf, nullify pointers
2666 * to which to catch further illegal usage.
2668 if (req->rq_pack_bulk) {
2669 grctx->src_repbsd = NULL;
2670 grctx->src_repbsd_size = 0;
2673 /* now fill the actual wire data
2677 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2678 buflens[1] = token.len;
2680 rs->rs_repdata_len = lustre_msg_size_v2(2, buflens);
2681 LASSERT(rs->rs_repdata_len <= rs->rs_repbuf_len);
2683 lustre_init_msg_v2(rs->rs_repbuf, 2, buflens, NULL);
2684 rs->rs_repbuf->lm_secflvr = req->rq_flvr.sf_rpc;
2686 memcpy(lustre_msg_buf(rs->rs_repbuf, 0, 0), ghdr,
2687 PTLRPC_GSS_HEADER_SIZE);
2688 memcpy(lustre_msg_buf(rs->rs_repbuf, 1, 0), token.data, token.len);
2691 if (req->rq_packed_final &&
2692 (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))
2693 req->rq_reply_off = gss_at_reply_off_priv;
2695 req->rq_reply_off = 0;
2697 /* to catch upper layer's further access */
2699 req->rq_repmsg = NULL;
2704 OBD_FREE_LARGE(token_buf, token_buflen);
2708 int gss_svc_authorize(struct ptlrpc_request *req)
2710 struct ptlrpc_reply_state *rs = req->rq_reply_state;
2711 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2712 struct gss_wire_ctx *gw = &grctx->src_wirectx;
2716 early = (req->rq_packed_final == 0);
2718 if (!early && gss_svc_reqctx_is_special(grctx)) {
2719 LASSERT(rs->rs_repdata_len != 0);
2721 req->rq_reply_off = gss_at_reply_off_integ;
2725 /* early reply could happen in many cases */
2726 if (!early && gw->gw_proc != PTLRPC_GSS_PROC_DATA &&
2727 gw->gw_proc != PTLRPC_GSS_PROC_DESTROY) {
2728 CERROR("proc %d not support\n", gw->gw_proc);
2732 LASSERT(grctx->src_ctx);
2734 switch (gw->gw_svc) {
2735 case SPTLRPC_SVC_NULL:
2736 case SPTLRPC_SVC_AUTH:
2737 case SPTLRPC_SVC_INTG:
2738 rc = gss_svc_sign(req, rs, grctx, gw->gw_svc);
2740 case SPTLRPC_SVC_PRIV:
2741 rc = gss_svc_seal(req, rs, grctx);
2744 CERROR("Unknown service %d\n", gw->gw_svc);
2745 GOTO(out, rc = -EINVAL);
2753 void gss_svc_free_rs(struct ptlrpc_reply_state *rs)
2755 struct gss_svc_reqctx *grctx;
2757 LASSERT(rs->rs_svc_ctx);
2758 grctx = container_of(rs->rs_svc_ctx, struct gss_svc_reqctx, src_base);
2760 gss_svc_reqctx_decref(grctx);
2761 rs->rs_svc_ctx = NULL;
2763 if (!rs->rs_prealloc)
2764 OBD_FREE_LARGE(rs, rs->rs_size);
2767 void gss_svc_free_ctx(struct ptlrpc_svc_ctx *ctx)
2769 LASSERT(atomic_read(&ctx->sc_refcount) == 0);
2770 gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx));
2773 int gss_copy_rvc_cli_ctx(struct ptlrpc_cli_ctx *cli_ctx,
2774 struct ptlrpc_svc_ctx *svc_ctx)
2776 struct gss_cli_ctx *cli_gctx = ctx2gctx(cli_ctx);
2777 struct gss_svc_ctx *svc_gctx = gss_svc_ctx2gssctx(svc_ctx);
2778 struct gss_ctx *mechctx = NULL;
2781 LASSERT(svc_gctx && svc_gctx->gsc_mechctx);
2783 cli_gctx->gc_proc = PTLRPC_GSS_PROC_DATA;
2784 cli_gctx->gc_win = GSS_SEQ_WIN;
2786 /* The problem is the reverse ctx might get lost in some recovery
2787 * situations, and the same svc_ctx will be used to re-create it.
2788 * if there's callback be sentout before that, new reverse ctx start
2789 * with sequence 0 will lead to future callback rpc be treated as
2792 * each reverse root ctx will record its latest sequence number on its
2793 * buddy svcctx before be destroyed, so here we continue use it.
2795 atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
2797 if (gss_svc_upcall_dup_handle(&cli_gctx->gc_svc_handle, svc_gctx)) {
2798 CERROR("failed to dup svc handle\n");
2802 if (lgss_copy_reverse_context(svc_gctx->gsc_mechctx, &mechctx) !=
2804 CERROR("failed to copy mech context\n");
2805 goto err_svc_handle;
2808 if (rawobj_dup(&cli_gctx->gc_handle, &svc_gctx->gsc_rvs_hdl)) {
2809 CERROR("failed to dup reverse handle\n");
2813 cli_gctx->gc_mechctx = mechctx;
2814 gss_cli_ctx_uptodate(cli_gctx);
2819 lgss_delete_sec_context(&mechctx);
2821 rawobj_free(&cli_gctx->gc_svc_handle);
2826 static void gss_init_at_reply_offset(void)
2831 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2832 buflens[1] = lustre_msg_early_size;
2833 buflens[2] = gss_cli_payload(NULL, buflens[1], 0);
2834 gss_at_reply_off_integ = lustre_msg_size_v2(3, buflens);
2836 buflens[0] = lustre_msg_early_size;
2837 clearsize = lustre_msg_size_v2(1, buflens);
2838 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2839 buflens[1] = gss_cli_payload(NULL, clearsize, 0);
2840 buflens[2] = gss_cli_payload(NULL, clearsize, 1);
2841 gss_at_reply_off_priv = lustre_msg_size_v2(3, buflens);
2844 static int __init sptlrpc_gss_init(void)
2848 rc = gss_init_tunables();
2852 rc = gss_init_cli_upcall();
2856 rc = gss_init_svc_upcall();
2858 goto out_cli_upcall;
2860 rc = init_null_module();
2862 goto out_svc_upcall;
2864 rc = init_kerberos_module();
2868 rc = init_sk_module();
2872 /* register policy after all other stuff be initialized, because it
2873 * might be in used immediately after the registration.
2875 rc = gss_init_keyring();
2879 gss_init_at_reply_offset();
2884 cleanup_sk_module();
2886 cleanup_kerberos_module();
2888 cleanup_null_module();
2890 gss_exit_svc_upcall();
2892 gss_exit_cli_upcall();
2894 gss_exit_tunables();
2898 static void __exit sptlrpc_gss_exit(void)
2901 cleanup_kerberos_module();
2902 gss_exit_svc_upcall();
2903 gss_exit_cli_upcall();
2904 gss_exit_tunables();
2907 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2908 MODULE_DESCRIPTION("Lustre GSS security policy");
2909 MODULE_VERSION(LUSTRE_VERSION_STRING);
2910 MODULE_LICENSE("GPL");
2912 module_init(sptlrpc_gss_init);
2913 module_exit(sptlrpc_gss_exit);