2 * Modifications for Lustre
4 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
6 * Copyright (c) 2011, 2015, Intel Corporation.
8 * Author: Eric Mei <ericm@clusterfs.com>
12 * linux/net/sunrpc/auth_gss.c
14 * RPCSEC_GSS client authentication.
16 * Copyright (c) 2000 The Regents of the University of Michigan.
17 * All rights reserved.
19 * Dug Song <dugsong@monkey.org>
20 * Andy Adamson <andros@umich.edu>
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 3. Neither the name of the University nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
36 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
37 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
38 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
40 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
41 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
42 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
43 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
44 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
45 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 #define DEBUG_SUBSYSTEM S_SEC
50 #include <linux/init.h>
51 #include <linux/module.h>
52 #include <linux/slab.h>
53 #include <linux/dcache.h>
55 #include <linux/mutex.h>
56 #include <asm/atomic.h>
59 #include <obd_class.h>
60 #include <obd_support.h>
61 #include <obd_cksum.h>
62 #include <lustre_net.h>
63 #include <lustre_import.h>
64 #include <lustre_sec.h>
67 #include "gss_internal.h"
70 #include <linux/crypto.h>
71 #include <linux/crc32.h>
74 * early reply have fixed size, respectively in privacy and integrity mode.
75 * so we calculate them only once.
77 static int gss_at_reply_off_integ;
78 static int gss_at_reply_off_priv;
81 static inline int msg_last_segidx(struct lustre_msg *msg)
83 LASSERT(msg->lm_bufcount > 0);
84 return msg->lm_bufcount - 1;
86 static inline int msg_last_seglen(struct lustre_msg *msg)
88 return msg->lm_buflens[msg_last_segidx(msg)];
91 /********************************************
93 ********************************************/
96 void gss_header_swabber(struct gss_header *ghdr)
98 __swab32s(&ghdr->gh_flags);
99 __swab32s(&ghdr->gh_proc);
100 __swab32s(&ghdr->gh_seq);
101 __swab32s(&ghdr->gh_svc);
102 __swab32s(&ghdr->gh_pad1);
103 __swab32s(&ghdr->gh_handle.len);
106 struct gss_header *gss_swab_header(struct lustre_msg *msg, int segment,
109 struct gss_header *ghdr;
111 ghdr = lustre_msg_buf(msg, segment, sizeof(*ghdr));
116 gss_header_swabber(ghdr);
118 if (sizeof(*ghdr) + ghdr->gh_handle.len > msg->lm_buflens[segment]) {
119 CERROR("gss header has length %d, now %u received\n",
120 (int) sizeof(*ghdr) + ghdr->gh_handle.len,
121 msg->lm_buflens[segment]);
129 * payload should be obtained from mechanism. but currently since we
130 * only support kerberos, we could simply use fixed value.
133 * - krb5 checksum: 20
135 * for privacy mode, payload also include the cipher text which has the same
136 * size as plain text, plus possible confounder, padding both at maximum cipher
139 #define GSS_KRB5_INTEG_MAX_PAYLOAD (40)
142 int gss_mech_payload(struct gss_ctx *mechctx, int msgsize, int privacy)
145 return GSS_KRB5_INTEG_MAX_PAYLOAD + 16 + 16 + 16 + msgsize;
147 return GSS_KRB5_INTEG_MAX_PAYLOAD;
151 * return signature size, otherwise < 0 to indicate error
153 static int gss_sign_msg(struct lustre_msg *msg,
154 struct gss_ctx *mechctx,
155 enum lustre_sec_part sp,
156 __u32 flags, __u32 proc, __u32 seq, __u32 svc,
159 struct gss_header *ghdr;
160 rawobj_t text[4], mic;
161 int textcnt, max_textcnt, mic_idx;
164 LASSERT(msg->lm_bufcount >= 2);
167 LASSERT(msg->lm_buflens[0] >=
168 sizeof(*ghdr) + (handle ? handle->len : 0));
169 ghdr = lustre_msg_buf(msg, 0, 0);
171 ghdr->gh_version = PTLRPC_GSS_VERSION;
172 ghdr->gh_sp = (__u8) sp;
173 ghdr->gh_flags = flags;
174 ghdr->gh_proc = proc;
178 /* fill in a fake one */
179 ghdr->gh_handle.len = 0;
181 ghdr->gh_handle.len = handle->len;
182 memcpy(ghdr->gh_handle.data, handle->data, handle->len);
185 /* no actual signature for null mode */
186 if (svc == SPTLRPC_SVC_NULL)
187 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
190 mic_idx = msg_last_segidx(msg);
191 max_textcnt = (svc == SPTLRPC_SVC_AUTH) ? 1 : mic_idx;
193 for (textcnt = 0; textcnt < max_textcnt; textcnt++) {
194 text[textcnt].len = msg->lm_buflens[textcnt];
195 text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
198 mic.len = msg->lm_buflens[mic_idx];
199 mic.data = lustre_msg_buf(msg, mic_idx, 0);
201 major = lgss_get_mic(mechctx, textcnt, text, 0, NULL, &mic);
202 if (major != GSS_S_COMPLETE) {
203 CERROR("fail to generate MIC: %08x\n", major);
206 LASSERT(mic.len <= msg->lm_buflens[mic_idx]);
208 return lustre_shrink_msg(msg, mic_idx, mic.len, 0);
215 __u32 gss_verify_msg(struct lustre_msg *msg,
216 struct gss_ctx *mechctx,
219 rawobj_t text[4], mic;
220 int textcnt, max_textcnt;
224 LASSERT(msg->lm_bufcount >= 2);
226 if (svc == SPTLRPC_SVC_NULL)
227 return GSS_S_COMPLETE;
229 mic_idx = msg_last_segidx(msg);
230 max_textcnt = (svc == SPTLRPC_SVC_AUTH) ? 1 : mic_idx;
232 for (textcnt = 0; textcnt < max_textcnt; textcnt++) {
233 text[textcnt].len = msg->lm_buflens[textcnt];
234 text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
237 mic.len = msg->lm_buflens[mic_idx];
238 mic.data = lustre_msg_buf(msg, mic_idx, 0);
240 major = lgss_verify_mic(mechctx, textcnt, text, 0, NULL, &mic);
241 if (major != GSS_S_COMPLETE)
242 CERROR("mic verify error: %08x\n", major);
248 * return gss error code
251 __u32 gss_unseal_msg(struct gss_ctx *mechctx,
252 struct lustre_msg *msgbuf,
253 int *msg_len, int msgbuf_len)
255 rawobj_t clear_obj, hdrobj, token;
261 if (msgbuf->lm_bufcount != 2) {
262 CERROR("invalid bufcount %d\n", msgbuf->lm_bufcount);
263 RETURN(GSS_S_FAILURE);
266 /* allocate a temporary clear text buffer, same sized as token,
267 * we assume the final clear text size <= token size */
268 clear_buflen = lustre_msg_buflen(msgbuf, 1);
269 OBD_ALLOC_LARGE(clear_buf, clear_buflen);
271 RETURN(GSS_S_FAILURE);
274 hdrobj.len = lustre_msg_buflen(msgbuf, 0);
275 hdrobj.data = lustre_msg_buf(msgbuf, 0, 0);
276 token.len = lustre_msg_buflen(msgbuf, 1);
277 token.data = lustre_msg_buf(msgbuf, 1, 0);
278 clear_obj.len = clear_buflen;
279 clear_obj.data = clear_buf;
281 major = lgss_unwrap(mechctx, &hdrobj, &token, &clear_obj);
282 if (major != GSS_S_COMPLETE) {
283 CERROR("unwrap message error: %08x\n", major);
284 GOTO(out_free, major = GSS_S_FAILURE);
286 LASSERT(clear_obj.len <= clear_buflen);
287 LASSERT(clear_obj.len <= msgbuf_len);
289 /* now the decrypted message */
290 memcpy(msgbuf, clear_obj.data, clear_obj.len);
291 *msg_len = clear_obj.len;
293 major = GSS_S_COMPLETE;
295 OBD_FREE_LARGE(clear_buf, clear_buflen);
299 /********************************************
300 * gss client context manipulation helpers *
301 ********************************************/
303 int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
305 LASSERT(atomic_read(&ctx->cc_refcount));
307 if (!test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
308 if (!ctx->cc_early_expire)
309 clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
311 CWARN("ctx %p(%u->%s) get expired: %lld(%+llds)\n",
312 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
314 ctx->cc_expire == 0 ? 0 :
315 ctx->cc_expire - ktime_get_real_seconds());
317 sptlrpc_cli_ctx_wakeup(ctx);
325 * return 1 if the context is dead.
327 int cli_ctx_check_death(struct ptlrpc_cli_ctx *ctx)
329 if (unlikely(cli_ctx_is_dead(ctx)))
332 /* expire is 0 means never expire. a newly created gss context
333 * which during upcall may has 0 expiration */
334 if (ctx->cc_expire == 0)
337 /* check real expiration */
338 if (ctx->cc_expire > ktime_get_real_seconds())
345 void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx)
347 struct ptlrpc_cli_ctx *ctx = &gctx->gc_base;
350 if (lgss_inquire_context(gctx->gc_mechctx, &ctx_expiry)) {
351 CERROR("ctx %p(%u): unable to inquire, expire it now\n",
352 gctx, ctx->cc_vcred.vc_uid);
353 ctx_expiry = 1; /* make it expired now */
356 ctx->cc_expire = gss_round_ctx_expiry(ctx_expiry,
357 ctx->cc_sec->ps_flvr.sf_flags);
359 /* At this point this ctx might have been marked as dead by
360 * someone else, in which case nobody will make further use
361 * of it. we don't care, and mark it UPTODATE will help
362 * destroying server side context when it be destroyed. */
363 set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
365 if (sec_is_reverse(ctx->cc_sec)) {
366 CWARN("server installed reverse ctx %p idx %#llx, "
367 "expiry %lld(%+llds)\n", ctx,
368 gss_handle_to_u64(&gctx->gc_handle),
370 ctx->cc_expire - ktime_get_real_seconds());
372 CWARN("client refreshed ctx %p idx %#llx (%u->%s), "
373 "expiry %lld(%+llds)\n", ctx,
374 gss_handle_to_u64(&gctx->gc_handle),
375 ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
377 ctx->cc_expire - ktime_get_real_seconds());
379 /* install reverse svc ctx for root context */
380 if (ctx->cc_vcred.vc_uid == 0)
381 gss_sec_install_rctx(ctx->cc_sec->ps_import,
385 sptlrpc_cli_ctx_wakeup(ctx);
388 static void gss_cli_ctx_finalize(struct gss_cli_ctx *gctx)
390 LASSERT(gctx->gc_base.cc_sec);
392 if (gctx->gc_mechctx) {
393 lgss_delete_sec_context(&gctx->gc_mechctx);
394 gctx->gc_mechctx = NULL;
397 if (!rawobj_empty(&gctx->gc_svc_handle)) {
398 /* forward ctx: mark buddy reverse svcctx soon-expire. */
399 if (!sec_is_reverse(gctx->gc_base.cc_sec) &&
400 !rawobj_empty(&gctx->gc_svc_handle))
401 gss_svc_upcall_expire_rvs_ctx(&gctx->gc_svc_handle);
403 rawobj_free(&gctx->gc_svc_handle);
406 rawobj_free(&gctx->gc_handle);
410 * Based on sequence number algorithm as specified in RFC 2203.
412 * Modified for our own problem: arriving request has valid sequence number,
413 * but unwrapping request might cost a long time, after that its sequence
414 * are not valid anymore (fall behind the window). It rarely happen, mostly
415 * under extreme load.
417 * Note we should not check sequence before verifying the integrity of incoming
418 * request, because just one attacking request with high sequence number might
419 * cause all following requests be dropped.
421 * So here we use a multi-phase approach: prepare 2 sequence windows,
422 * "main window" for normal sequence and "back window" for fall behind sequence.
423 * and 3-phase checking mechanism:
424 * 0 - before integrity verification, perform an initial sequence checking in
425 * main window, which only tries and doesn't actually set any bits. if the
426 * sequence is high above the window or fits in the window and the bit
427 * is 0, then accept and proceed to integrity verification. otherwise
428 * reject this sequence.
429 * 1 - after integrity verification, check in main window again. if this
430 * sequence is high above the window or fits in the window and the bit
431 * is 0, then set the bit and accept; if it fits in the window but bit
432 * already set, then reject; if it falls behind the window, then proceed
434 * 2 - check in back window. if it is high above the window or fits in the
435 * window and the bit is 0, then set the bit and accept. otherwise reject.
437 * \return 1: looks like a replay
439 * \return -1: is a replay
441 * Note phase 0 is necessary, because otherwise replay attacking request of
442 * sequence which between the 2 windows can't be detected.
444 * This mechanism can't totally solve the problem, but could help reduce the
445 * number of valid requests be dropped.
448 int gss_do_check_seq(unsigned long *window, __u32 win_size, __u32 *max_seq,
449 __u32 seq_num, int phase)
451 LASSERT(phase >= 0 && phase <= 2);
453 if (seq_num > *max_seq) {
455 * 1. high above the window
460 if (seq_num >= *max_seq + win_size) {
461 memset(window, 0, win_size / 8);
464 while(*max_seq < seq_num) {
466 __clear_bit((*max_seq) % win_size, window);
469 __set_bit(seq_num % win_size, window);
470 } else if (seq_num + win_size <= *max_seq) {
472 * 2. low behind the window
474 if (phase == 0 || phase == 2)
477 CWARN("seq %u is %u behind (size %d), check backup window\n",
478 seq_num, *max_seq - win_size - seq_num, win_size);
482 * 3. fit into the window
486 if (test_bit(seq_num % win_size, window))
491 if (__test_and_set_bit(seq_num % win_size, window))
500 CERROR("seq %u (%s %s window) is a replay: max %u, winsize %d\n",
502 seq_num + win_size > *max_seq ? "in" : "behind",
503 phase == 2 ? "backup " : "main",
509 * Based on sequence number algorithm as specified in RFC 2203.
511 * if @set == 0: initial check, don't set any bit in window
512 * if @sec == 1: final check, set bit in window
514 int gss_check_seq_num(struct gss_svc_seq_data *ssd, __u32 seq_num, int set)
518 spin_lock(&ssd->ssd_lock);
524 rc = gss_do_check_seq(ssd->ssd_win_main, GSS_SEQ_WIN_MAIN,
525 &ssd->ssd_max_main, seq_num, 0);
527 gss_stat_oos_record_svc(0, 1);
530 * phase 1 checking main window
532 rc = gss_do_check_seq(ssd->ssd_win_main, GSS_SEQ_WIN_MAIN,
533 &ssd->ssd_max_main, seq_num, 1);
536 gss_stat_oos_record_svc(1, 1);
542 * phase 2 checking back window
544 rc = gss_do_check_seq(ssd->ssd_win_back, GSS_SEQ_WIN_BACK,
545 &ssd->ssd_max_back, seq_num, 2);
547 gss_stat_oos_record_svc(2, 1);
549 gss_stat_oos_record_svc(2, 0);
552 spin_unlock(&ssd->ssd_lock);
556 /***************************************
558 ***************************************/
560 static inline int gss_cli_payload(struct ptlrpc_cli_ctx *ctx,
561 int msgsize, int privacy)
563 return gss_mech_payload(NULL, msgsize, privacy);
566 static int gss_cli_bulk_payload(struct ptlrpc_cli_ctx *ctx,
567 struct sptlrpc_flavor *flvr,
570 int payload = sizeof(struct ptlrpc_bulk_sec_desc);
572 LASSERT(SPTLRPC_FLVR_BULK_TYPE(flvr->sf_rpc) == SPTLRPC_BULK_DEFAULT);
574 if ((!reply && !read) || (reply && read)) {
575 switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
576 case SPTLRPC_BULK_SVC_NULL:
578 case SPTLRPC_BULK_SVC_INTG:
579 payload += gss_cli_payload(ctx, 0, 0);
581 case SPTLRPC_BULK_SVC_PRIV:
582 payload += gss_cli_payload(ctx, 0, 1);
584 case SPTLRPC_BULK_SVC_AUTH:
593 int gss_cli_ctx_match(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred)
595 return (ctx->cc_vcred.vc_uid == vcred->vc_uid);
598 void gss_cli_ctx_flags2str(unsigned long flags, char *buf, int bufsize)
602 if (flags & PTLRPC_CTX_NEW)
603 strlcat(buf, "new,", bufsize);
604 if (flags & PTLRPC_CTX_UPTODATE)
605 strlcat(buf, "uptodate,", bufsize);
606 if (flags & PTLRPC_CTX_DEAD)
607 strlcat(buf, "dead,", bufsize);
608 if (flags & PTLRPC_CTX_ERROR)
609 strlcat(buf, "error,", bufsize);
610 if (flags & PTLRPC_CTX_CACHED)
611 strlcat(buf, "cached,", bufsize);
612 if (flags & PTLRPC_CTX_ETERNAL)
613 strlcat(buf, "eternal,", bufsize);
615 strlcat(buf, "-,", bufsize);
618 int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx,
619 struct ptlrpc_request *req)
621 struct gss_cli_ctx *gctx = ctx2gctx(ctx);
622 __u32 flags = 0, seq, svc;
626 LASSERT(req->rq_reqbuf);
627 LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
628 LASSERT(req->rq_cli_ctx == ctx);
630 /* nothing to do for context negotiation RPCs */
631 if (req->rq_ctx_init)
634 svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
635 if (req->rq_pack_bulk)
636 flags |= LUSTRE_GSS_PACK_BULK;
637 if (req->rq_pack_udesc)
638 flags |= LUSTRE_GSS_PACK_USER;
641 seq = atomic_inc_return(&gctx->gc_seq);
643 rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
644 ctx->cc_sec->ps_part,
645 flags, gctx->gc_proc, seq, svc,
650 /* gss_sign_msg() msg might take long time to finish, in which period
651 * more rpcs could be wrapped up and sent out. if we found too many
652 * of them we should repack this rpc, because sent it too late might
653 * lead to the sequence number fall behind the window on server and
654 * be dropped. also applies to gss_cli_ctx_seal().
656 * Note: null mode doesn't check sequence number. */
657 if (svc != SPTLRPC_SVC_NULL &&
658 atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
659 int behind = atomic_read(&gctx->gc_seq) - seq;
661 gss_stat_oos_record_cli(behind);
662 CWARN("req %p: %u behind, retry signing\n", req, behind);
666 req->rq_reqdata_len = rc;
671 int gss_cli_ctx_handle_err_notify(struct ptlrpc_cli_ctx *ctx,
672 struct ptlrpc_request *req,
673 struct gss_header *ghdr)
675 struct gss_err_header *errhdr;
678 LASSERT(ghdr->gh_proc == PTLRPC_GSS_PROC_ERR);
680 errhdr = (struct gss_err_header *) ghdr;
682 CWARN("req x%llu/t%llu, ctx %p idx %#llx(%u->%s): "
683 "%sserver respond (%08x/%08x)\n",
684 req->rq_xid, req->rq_transno, ctx,
685 gss_handle_to_u64(&ctx2gctx(ctx)->gc_handle),
686 ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
687 sec_is_reverse(ctx->cc_sec) ? "reverse" : "",
688 errhdr->gh_major, errhdr->gh_minor);
690 /* context fini rpc, let it failed */
691 if (req->rq_ctx_fini) {
692 CWARN("context fini rpc failed\n");
696 /* reverse sec, just return error, don't expire this ctx because it's
697 * crucial to callback rpcs. note if the callback rpc failed because
698 * of bit flip during network transfer, the client will be evicted
699 * directly. so more gracefully we probably want let it retry for
700 * number of times. */
701 if (sec_is_reverse(ctx->cc_sec))
704 if (errhdr->gh_major != GSS_S_NO_CONTEXT &&
705 errhdr->gh_major != GSS_S_BAD_SIG)
708 /* server return NO_CONTEXT might be caused by context expire
709 * or server reboot/failover. we try to refresh a new ctx which
710 * be transparent to upper layer.
712 * In some cases, our gss handle is possible to be incidentally
713 * identical to another handle since the handle itself is not
714 * fully random. In krb5 case, the GSS_S_BAD_SIG will be
715 * returned, maybe other gss error for other mechanism.
717 * if we add new mechanism, make sure the correct error are
718 * returned in this case. */
719 CWARN("%s: server might lost the context, retrying\n",
720 errhdr->gh_major == GSS_S_NO_CONTEXT ? "NO_CONTEXT" : "BAD_SIG");
722 sptlrpc_cli_ctx_expire(ctx);
724 /* we need replace the ctx right here, otherwise during
725 * resent we'll hit the logic in sptlrpc_req_refresh_ctx()
726 * which keep the ctx with RESEND flag, thus we'll never
727 * get rid of this ctx. */
728 rc = sptlrpc_req_replace_dead_ctx(req);
735 int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx,
736 struct ptlrpc_request *req)
738 struct gss_cli_ctx *gctx;
739 struct gss_header *ghdr, *reqhdr;
740 struct lustre_msg *msg = req->rq_repdata;
742 int pack_bulk, swabbed, rc = 0;
745 LASSERT(req->rq_cli_ctx == ctx);
748 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
750 /* special case for context negotiation, rq_repmsg/rq_replen actually
751 * are not used currently. but early reply always be treated normally */
752 if (req->rq_ctx_init && !req->rq_early) {
753 req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
754 req->rq_replen = msg->lm_buflens[1];
758 if (msg->lm_bufcount < 2 || msg->lm_bufcount > 4) {
759 CERROR("unexpected bufcount %u\n", msg->lm_bufcount);
763 swabbed = ptlrpc_rep_need_swab(req);
765 ghdr = gss_swab_header(msg, 0, swabbed);
767 CERROR("can't decode gss header\n");
772 reqhdr = lustre_msg_buf(msg, 0, sizeof(*reqhdr));
775 if (ghdr->gh_version != reqhdr->gh_version) {
776 CERROR("gss version %u mismatch, expect %u\n",
777 ghdr->gh_version, reqhdr->gh_version);
781 switch (ghdr->gh_proc) {
782 case PTLRPC_GSS_PROC_DATA:
783 pack_bulk = ghdr->gh_flags & LUSTRE_GSS_PACK_BULK;
785 if (!req->rq_early && !equi(req->rq_pack_bulk == 1, pack_bulk)){
786 CERROR("%s bulk flag in reply\n",
787 req->rq_pack_bulk ? "missing" : "unexpected");
791 if (ghdr->gh_seq != reqhdr->gh_seq) {
792 CERROR("seqnum %u mismatch, expect %u\n",
793 ghdr->gh_seq, reqhdr->gh_seq);
797 if (ghdr->gh_svc != reqhdr->gh_svc) {
798 CERROR("svc %u mismatch, expect %u\n",
799 ghdr->gh_svc, reqhdr->gh_svc);
804 gss_header_swabber(ghdr);
806 major = gss_verify_msg(msg, gctx->gc_mechctx, reqhdr->gh_svc);
807 if (major != GSS_S_COMPLETE) {
808 CERROR("failed to verify reply: %x\n", major);
812 if (req->rq_early && reqhdr->gh_svc == SPTLRPC_SVC_NULL) {
815 cksum = crc32_le(!(__u32) 0,
816 lustre_msg_buf(msg, 1, 0),
817 lustre_msg_buflen(msg, 1));
818 if (cksum != msg->lm_cksum) {
819 CWARN("early reply checksum mismatch: "
820 "%08x != %08x\n", cksum, msg->lm_cksum);
826 /* bulk checksum is right after the lustre msg */
827 if (msg->lm_bufcount < 3) {
828 CERROR("Invalid reply bufcount %u\n",
833 rc = bulk_sec_desc_unpack(msg, 2, swabbed);
835 CERROR("unpack bulk desc: %d\n", rc);
840 req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
841 req->rq_replen = msg->lm_buflens[1];
843 case PTLRPC_GSS_PROC_ERR:
845 CERROR("server return error with early reply\n");
848 rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
852 CERROR("unknown gss proc %d\n", ghdr->gh_proc);
859 int gss_cli_ctx_seal(struct ptlrpc_cli_ctx *ctx,
860 struct ptlrpc_request *req)
862 struct gss_cli_ctx *gctx;
863 rawobj_t hdrobj, msgobj, token;
864 struct gss_header *ghdr;
865 __u32 buflens[2], major;
869 LASSERT(req->rq_clrbuf);
870 LASSERT(req->rq_cli_ctx == ctx);
871 LASSERT(req->rq_reqlen);
873 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
875 /* final clear data length */
876 req->rq_clrdata_len = lustre_msg_size_v2(req->rq_clrbuf->lm_bufcount,
877 req->rq_clrbuf->lm_buflens);
879 /* calculate wire data length */
880 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
881 buflens[1] = gss_cli_payload(&gctx->gc_base, req->rq_clrdata_len, 1);
882 wiresize = lustre_msg_size_v2(2, buflens);
884 /* allocate wire buffer */
887 LASSERT(req->rq_reqbuf);
888 LASSERT(req->rq_reqbuf != req->rq_clrbuf);
889 LASSERT(req->rq_reqbuf_len >= wiresize);
891 OBD_ALLOC_LARGE(req->rq_reqbuf, wiresize);
894 req->rq_reqbuf_len = wiresize;
897 lustre_init_msg_v2(req->rq_reqbuf, 2, buflens, NULL);
898 req->rq_reqbuf->lm_secflvr = req->rq_flvr.sf_rpc;
901 ghdr = lustre_msg_buf(req->rq_reqbuf, 0, 0);
902 ghdr->gh_version = PTLRPC_GSS_VERSION;
903 ghdr->gh_sp = (__u8) ctx->cc_sec->ps_part;
905 ghdr->gh_proc = gctx->gc_proc;
906 ghdr->gh_svc = SPTLRPC_SVC_PRIV;
907 ghdr->gh_handle.len = gctx->gc_handle.len;
908 memcpy(ghdr->gh_handle.data, gctx->gc_handle.data, gctx->gc_handle.len);
909 if (req->rq_pack_bulk)
910 ghdr->gh_flags |= LUSTRE_GSS_PACK_BULK;
911 if (req->rq_pack_udesc)
912 ghdr->gh_flags |= LUSTRE_GSS_PACK_USER;
915 ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
918 hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
919 hdrobj.data = (__u8 *) ghdr;
920 msgobj.len = req->rq_clrdata_len;
921 msgobj.data = (__u8 *) req->rq_clrbuf;
922 token.len = lustre_msg_buflen(req->rq_reqbuf, 1);
923 token.data = lustre_msg_buf(req->rq_reqbuf, 1, 0);
925 major = lgss_wrap(gctx->gc_mechctx, &hdrobj, &msgobj,
926 req->rq_clrbuf_len, &token);
927 if (major != GSS_S_COMPLETE) {
928 CERROR("priv: wrap message error: %08x\n", major);
929 GOTO(err_free, rc = -EPERM);
931 LASSERT(token.len <= buflens[1]);
933 /* see explain in gss_cli_ctx_sign() */
934 if (unlikely(atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
935 GSS_SEQ_REPACK_THRESHOLD)) {
936 int behind = atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
938 gss_stat_oos_record_cli(behind);
939 CWARN("req %p: %u behind, retry sealing\n", req, behind);
941 ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
945 /* now set the final wire data length */
946 req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, 1, token.len,0);
951 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
952 req->rq_reqbuf = NULL;
953 req->rq_reqbuf_len = 0;
958 int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx,
959 struct ptlrpc_request *req)
961 struct gss_cli_ctx *gctx;
962 struct gss_header *ghdr;
963 struct lustre_msg *msg = req->rq_repdata;
964 int msglen, pack_bulk, swabbed, rc;
968 LASSERT(req->rq_cli_ctx == ctx);
969 LASSERT(req->rq_ctx_init == 0);
972 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
973 swabbed = ptlrpc_rep_need_swab(req);
975 ghdr = gss_swab_header(msg, 0, swabbed);
977 CERROR("can't decode gss header\n");
982 if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
983 CERROR("gss version %u mismatch, expect %u\n",
984 ghdr->gh_version, PTLRPC_GSS_VERSION);
988 switch (ghdr->gh_proc) {
989 case PTLRPC_GSS_PROC_DATA:
990 pack_bulk = ghdr->gh_flags & LUSTRE_GSS_PACK_BULK;
992 if (!req->rq_early && !equi(req->rq_pack_bulk == 1, pack_bulk)){
993 CERROR("%s bulk flag in reply\n",
994 req->rq_pack_bulk ? "missing" : "unexpected");
999 gss_header_swabber(ghdr);
1001 /* use rq_repdata_len as buffer size, which assume unseal
1002 * doesn't need extra memory space. for precise control, we'd
1003 * better calculate out actual buffer size as
1004 * (repbuf_len - offset - repdata_len) */
1005 major = gss_unseal_msg(gctx->gc_mechctx, msg,
1006 &msglen, req->rq_repdata_len);
1007 if (major != GSS_S_COMPLETE) {
1008 CERROR("failed to unwrap reply: %x\n", major);
1013 swabbed = __lustre_unpack_msg(msg, msglen);
1015 CERROR("Failed to unpack after decryption\n");
1019 if (msg->lm_bufcount < 1) {
1020 CERROR("Invalid reply buffer: empty\n");
1025 if (msg->lm_bufcount < 2) {
1026 CERROR("bufcount %u: missing bulk sec desc\n",
1031 /* bulk checksum is the last segment */
1032 if (bulk_sec_desc_unpack(msg, msg->lm_bufcount - 1,
1037 req->rq_repmsg = lustre_msg_buf(msg, 0, 0);
1038 req->rq_replen = msg->lm_buflens[0];
1042 case PTLRPC_GSS_PROC_ERR:
1043 if (req->rq_early) {
1044 CERROR("server return error with early reply\n");
1047 rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
1051 CERROR("unexpected proc %d\n", ghdr->gh_proc);
1058 /*********************************************
1059 * reverse context installation *
1060 *********************************************/
1063 int gss_install_rvs_svc_ctx(struct obd_import *imp,
1064 struct gss_sec *gsec,
1065 struct gss_cli_ctx *gctx)
1067 return gss_svc_upcall_install_rvs_ctx(imp, gsec, gctx);
1070 /*********************************************
1071 * GSS security APIs *
1072 *********************************************/
1073 int gss_sec_create_common(struct gss_sec *gsec,
1074 struct ptlrpc_sec_policy *policy,
1075 struct obd_import *imp,
1076 struct ptlrpc_svc_ctx *svcctx,
1077 struct sptlrpc_flavor *sf)
1079 struct ptlrpc_sec *sec;
1082 LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_GSS);
1084 gsec->gs_mech = lgss_subflavor_to_mech(
1085 SPTLRPC_FLVR_BASE_SUB(sf->sf_rpc));
1086 if (!gsec->gs_mech) {
1087 CERROR("gss backend 0x%x not found\n",
1088 SPTLRPC_FLVR_BASE_SUB(sf->sf_rpc));
1092 spin_lock_init(&gsec->gs_lock);
1093 gsec->gs_rvs_hdl = 0ULL;
1095 /* initialize upper ptlrpc_sec */
1096 sec = &gsec->gs_base;
1097 sec->ps_policy = policy;
1098 atomic_set(&sec->ps_refcount, 0);
1099 atomic_set(&sec->ps_nctx, 0);
1100 sec->ps_id = sptlrpc_get_next_secid();
1102 sec->ps_import = class_import_get(imp);
1103 spin_lock_init(&sec->ps_lock);
1104 INIT_LIST_HEAD(&sec->ps_gc_list);
1105 sec->ps_sepol_mtime = 0;
1106 sec->ps_sepol_checknext = ktime_set(0, 0);
1107 sec->ps_sepol[0] = '\0';
1110 sec->ps_gc_interval = GSS_GC_INTERVAL;
1112 LASSERT(sec_is_reverse(sec));
1114 /* never do gc on reverse sec */
1115 sec->ps_gc_interval = 0;
1118 if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
1119 sptlrpc_enc_pool_add_user();
1121 CDEBUG(D_SEC, "create %s%s@%p\n", (svcctx ? "reverse " : ""),
1122 policy->sp_name, gsec);
1126 void gss_sec_destroy_common(struct gss_sec *gsec)
1128 struct ptlrpc_sec *sec = &gsec->gs_base;
1131 LASSERT(sec->ps_import);
1132 LASSERT(atomic_read(&sec->ps_refcount) == 0);
1133 LASSERT(atomic_read(&sec->ps_nctx) == 0);
1135 if (gsec->gs_mech) {
1136 lgss_mech_put(gsec->gs_mech);
1137 gsec->gs_mech = NULL;
1140 class_import_put(sec->ps_import);
1142 if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
1143 sptlrpc_enc_pool_del_user();
1148 void gss_sec_kill(struct ptlrpc_sec *sec)
1153 int gss_cli_ctx_init_common(struct ptlrpc_sec *sec,
1154 struct ptlrpc_cli_ctx *ctx,
1155 struct ptlrpc_ctx_ops *ctxops,
1156 struct vfs_cred *vcred)
1158 struct gss_cli_ctx *gctx = ctx2gctx(ctx);
1161 atomic_set(&gctx->gc_seq, 0);
1163 INIT_HLIST_NODE(&ctx->cc_cache);
1164 atomic_set(&ctx->cc_refcount, 0);
1166 ctx->cc_ops = ctxops;
1168 ctx->cc_flags = PTLRPC_CTX_NEW;
1169 ctx->cc_vcred = *vcred;
1170 spin_lock_init(&ctx->cc_lock);
1171 INIT_LIST_HEAD(&ctx->cc_req_list);
1172 INIT_LIST_HEAD(&ctx->cc_gc_chain);
1174 /* take a ref on belonging sec, balanced in ctx destroying */
1175 atomic_inc(&sec->ps_refcount);
1176 /* statistic only */
1177 atomic_inc(&sec->ps_nctx);
1179 CDEBUG(D_SEC, "%s@%p: create ctx %p(%u->%s)\n",
1180 sec->ps_policy->sp_name, ctx->cc_sec,
1181 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
1187 * 1: the context has been taken care of by someone else
1188 * 0: proceed to really destroy the context locally
1190 int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec,
1191 struct ptlrpc_cli_ctx *ctx)
1193 struct gss_cli_ctx *gctx = ctx2gctx(ctx);
1195 LASSERT(atomic_read(&sec->ps_nctx) > 0);
1196 LASSERT(atomic_read(&ctx->cc_refcount) == 0);
1197 LASSERT(ctx->cc_sec == sec);
1200 * remove UPTODATE flag of reverse ctx thus we won't send fini rpc,
1201 * this is to avoid potential problems of client side reverse svc ctx
1202 * be mis-destroyed in various recovery senarios. anyway client can
1203 * manage its reverse ctx well by associating it with its buddy ctx.
1205 if (sec_is_reverse(sec))
1206 ctx->cc_flags &= ~PTLRPC_CTX_UPTODATE;
1208 if (gctx->gc_mechctx) {
1209 /* the final context fini rpc will use this ctx too, and it's
1210 * asynchronous which finished by request_out_callback(). so
1211 * we add refcount, whoever drop finally drop the refcount to
1212 * 0 should responsible for the rest of destroy. */
1213 atomic_inc(&ctx->cc_refcount);
1215 gss_do_ctx_fini_rpc(gctx);
1216 gss_cli_ctx_finalize(gctx);
1218 if (!atomic_dec_and_test(&ctx->cc_refcount))
1222 if (sec_is_reverse(sec))
1223 CWARN("reverse sec %p: destroy ctx %p\n",
1226 CWARN("%s@%p: destroy ctx %p(%u->%s)\n",
1227 sec->ps_policy->sp_name, ctx->cc_sec,
1228 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
1234 int gss_alloc_reqbuf_intg(struct ptlrpc_sec *sec,
1235 struct ptlrpc_request *req,
1236 int svc, int msgsize)
1238 int bufsize, txtsize;
1244 * on-wire data layout:
1247 * - user descriptor (optional)
1248 * - bulk sec descriptor (optional)
1249 * - signature (optional)
1250 * - svc == NULL: NULL
1251 * - svc == AUTH: signature of gss header
1252 * - svc == INTG: signature of all above
1254 * if this is context negotiation, reserver fixed space
1255 * at the last (signature) segment regardless of svc mode.
1258 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1259 txtsize = buflens[0];
1261 buflens[1] = msgsize;
1262 if (svc == SPTLRPC_SVC_INTG)
1263 txtsize += buflens[1];
1265 if (req->rq_pack_udesc) {
1266 buflens[bufcnt] = sptlrpc_current_user_desc_size();
1267 if (svc == SPTLRPC_SVC_INTG)
1268 txtsize += buflens[bufcnt];
1272 if (req->rq_pack_bulk) {
1273 buflens[bufcnt] = gss_cli_bulk_payload(req->rq_cli_ctx,
1275 0, req->rq_bulk_read);
1276 if (svc == SPTLRPC_SVC_INTG)
1277 txtsize += buflens[bufcnt];
1281 if (req->rq_ctx_init)
1282 buflens[bufcnt++] = GSS_CTX_INIT_MAX_LEN;
1283 else if (svc != SPTLRPC_SVC_NULL)
1284 buflens[bufcnt++] = gss_cli_payload(req->rq_cli_ctx, txtsize,0);
1286 bufsize = lustre_msg_size_v2(bufcnt, buflens);
1288 if (!req->rq_reqbuf) {
1289 bufsize = size_roundup_power2(bufsize);
1291 OBD_ALLOC_LARGE(req->rq_reqbuf, bufsize);
1292 if (!req->rq_reqbuf)
1295 req->rq_reqbuf_len = bufsize;
1297 LASSERT(req->rq_pool);
1298 LASSERT(req->rq_reqbuf_len >= bufsize);
1299 memset(req->rq_reqbuf, 0, bufsize);
1302 lustre_init_msg_v2(req->rq_reqbuf, bufcnt, buflens, NULL);
1303 req->rq_reqbuf->lm_secflvr = req->rq_flvr.sf_rpc;
1305 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, msgsize);
1306 LASSERT(req->rq_reqmsg);
1308 /* pack user desc here, later we might leave current user's process */
1309 if (req->rq_pack_udesc)
1310 sptlrpc_pack_user_desc(req->rq_reqbuf, 2);
1316 int gss_alloc_reqbuf_priv(struct ptlrpc_sec *sec,
1317 struct ptlrpc_request *req,
1320 __u32 ibuflens[3], wbuflens[2];
1322 int clearsize, wiresize;
1325 LASSERT(req->rq_clrbuf == NULL);
1326 LASSERT(req->rq_clrbuf_len == 0);
1328 /* Inner (clear) buffers
1330 * - user descriptor (optional)
1331 * - bulk checksum (optional)
1334 ibuflens[0] = msgsize;
1336 if (req->rq_pack_udesc)
1337 ibuflens[ibufcnt++] = sptlrpc_current_user_desc_size();
1338 if (req->rq_pack_bulk)
1339 ibuflens[ibufcnt++] = gss_cli_bulk_payload(req->rq_cli_ctx,
1343 clearsize = lustre_msg_size_v2(ibufcnt, ibuflens);
1344 /* to allow append padding during encryption */
1345 clearsize += GSS_MAX_CIPHER_BLOCK;
1347 /* Wrapper (wire) buffers
1351 wbuflens[0] = PTLRPC_GSS_HEADER_SIZE;
1352 wbuflens[1] = gss_cli_payload(req->rq_cli_ctx, clearsize, 1);
1353 wiresize = lustre_msg_size_v2(2, wbuflens);
1356 /* rq_reqbuf is preallocated */
1357 LASSERT(req->rq_reqbuf);
1358 LASSERT(req->rq_reqbuf_len >= wiresize);
1360 memset(req->rq_reqbuf, 0, req->rq_reqbuf_len);
1362 /* if the pre-allocated buffer is big enough, we just pack
1363 * both clear buf & request buf in it, to avoid more alloc. */
1364 if (clearsize + wiresize <= req->rq_reqbuf_len) {
1366 (void *) (((char *) req->rq_reqbuf) + wiresize);
1368 CWARN("pre-allocated buf size %d is not enough for "
1369 "both clear (%d) and cipher (%d) text, proceed "
1370 "with extra allocation\n", req->rq_reqbuf_len,
1371 clearsize, wiresize);
1375 if (!req->rq_clrbuf) {
1376 clearsize = size_roundup_power2(clearsize);
1378 OBD_ALLOC_LARGE(req->rq_clrbuf, clearsize);
1379 if (!req->rq_clrbuf)
1382 req->rq_clrbuf_len = clearsize;
1384 lustre_init_msg_v2(req->rq_clrbuf, ibufcnt, ibuflens, NULL);
1385 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, msgsize);
1387 if (req->rq_pack_udesc)
1388 sptlrpc_pack_user_desc(req->rq_clrbuf, 1);
1394 * NOTE: any change of request buffer allocation should also consider
1395 * changing enlarge_reqbuf() series functions.
1397 int gss_alloc_reqbuf(struct ptlrpc_sec *sec,
1398 struct ptlrpc_request *req,
1401 int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
1403 LASSERT(!req->rq_pack_bulk ||
1404 (req->rq_bulk_read || req->rq_bulk_write));
1407 case SPTLRPC_SVC_NULL:
1408 case SPTLRPC_SVC_AUTH:
1409 case SPTLRPC_SVC_INTG:
1410 return gss_alloc_reqbuf_intg(sec, req, svc, msgsize);
1411 case SPTLRPC_SVC_PRIV:
1412 return gss_alloc_reqbuf_priv(sec, req, msgsize);
1414 LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
1419 void gss_free_reqbuf(struct ptlrpc_sec *sec,
1420 struct ptlrpc_request *req)
1425 LASSERT(!req->rq_pool || req->rq_reqbuf);
1426 privacy = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) == SPTLRPC_SVC_PRIV;
1428 if (!req->rq_clrbuf)
1429 goto release_reqbuf;
1431 /* release clear buffer */
1433 LASSERT(req->rq_clrbuf_len);
1435 if (req->rq_pool == NULL ||
1436 req->rq_clrbuf < req->rq_reqbuf ||
1437 (char *) req->rq_clrbuf >=
1438 (char *) req->rq_reqbuf + req->rq_reqbuf_len)
1439 OBD_FREE_LARGE(req->rq_clrbuf, req->rq_clrbuf_len);
1441 req->rq_clrbuf = NULL;
1442 req->rq_clrbuf_len = 0;
1445 if (!req->rq_pool && req->rq_reqbuf) {
1446 LASSERT(req->rq_reqbuf_len);
1448 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
1449 req->rq_reqbuf = NULL;
1450 req->rq_reqbuf_len = 0;
1456 static int do_alloc_repbuf(struct ptlrpc_request *req, int bufsize)
1458 bufsize = size_roundup_power2(bufsize);
1460 OBD_ALLOC_LARGE(req->rq_repbuf, bufsize);
1461 if (!req->rq_repbuf)
1464 req->rq_repbuf_len = bufsize;
1469 int gss_alloc_repbuf_intg(struct ptlrpc_sec *sec,
1470 struct ptlrpc_request *req,
1471 int svc, int msgsize)
1479 * on-wire data layout:
1482 * - bulk sec descriptor (optional)
1483 * - signature (optional)
1484 * - svc == NULL: NULL
1485 * - svc == AUTH: signature of gss header
1486 * - svc == INTG: signature of all above
1488 * if this is context negotiation, reserver fixed space
1489 * at the last (signature) segment regardless of svc mode.
1492 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1493 txtsize = buflens[0];
1495 buflens[1] = msgsize;
1496 if (svc == SPTLRPC_SVC_INTG)
1497 txtsize += buflens[1];
1499 if (req->rq_pack_bulk) {
1500 buflens[bufcnt] = gss_cli_bulk_payload(req->rq_cli_ctx,
1502 1, req->rq_bulk_read);
1503 if (svc == SPTLRPC_SVC_INTG)
1504 txtsize += buflens[bufcnt];
1508 if (req->rq_ctx_init)
1509 buflens[bufcnt++] = GSS_CTX_INIT_MAX_LEN;
1510 else if (svc != SPTLRPC_SVC_NULL)
1511 buflens[bufcnt++] = gss_cli_payload(req->rq_cli_ctx, txtsize,0);
1513 alloc_size = lustre_msg_size_v2(bufcnt, buflens);
1515 /* add space for early reply */
1516 alloc_size += gss_at_reply_off_integ;
1518 return do_alloc_repbuf(req, alloc_size);
1522 int gss_alloc_repbuf_priv(struct ptlrpc_sec *sec,
1523 struct ptlrpc_request *req,
1533 buflens[0] = msgsize;
1535 if (req->rq_pack_bulk)
1536 buflens[bufcnt++] = gss_cli_bulk_payload(req->rq_cli_ctx,
1538 1, req->rq_bulk_read);
1539 txtsize = lustre_msg_size_v2(bufcnt, buflens);
1540 txtsize += GSS_MAX_CIPHER_BLOCK;
1542 /* wrapper buffers */
1544 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1545 buflens[1] = gss_cli_payload(req->rq_cli_ctx, txtsize, 1);
1547 alloc_size = lustre_msg_size_v2(bufcnt, buflens);
1548 /* add space for early reply */
1549 alloc_size += gss_at_reply_off_priv;
1551 return do_alloc_repbuf(req, alloc_size);
1554 int gss_alloc_repbuf(struct ptlrpc_sec *sec,
1555 struct ptlrpc_request *req,
1558 int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
1561 LASSERT(!req->rq_pack_bulk ||
1562 (req->rq_bulk_read || req->rq_bulk_write));
1565 case SPTLRPC_SVC_NULL:
1566 case SPTLRPC_SVC_AUTH:
1567 case SPTLRPC_SVC_INTG:
1568 return gss_alloc_repbuf_intg(sec, req, svc, msgsize);
1569 case SPTLRPC_SVC_PRIV:
1570 return gss_alloc_repbuf_priv(sec, req, msgsize);
1572 LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
1577 void gss_free_repbuf(struct ptlrpc_sec *sec,
1578 struct ptlrpc_request *req)
1580 OBD_FREE_LARGE(req->rq_repbuf, req->rq_repbuf_len);
1581 req->rq_repbuf = NULL;
1582 req->rq_repbuf_len = 0;
1583 req->rq_repdata = NULL;
1584 req->rq_repdata_len = 0;
1587 static int get_enlarged_msgsize(struct lustre_msg *msg,
1588 int segment, int newsize)
1590 int save, newmsg_size;
1592 LASSERT(newsize >= msg->lm_buflens[segment]);
1594 save = msg->lm_buflens[segment];
1595 msg->lm_buflens[segment] = newsize;
1596 newmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1597 msg->lm_buflens[segment] = save;
1602 static int get_enlarged_msgsize2(struct lustre_msg *msg,
1603 int segment1, int newsize1,
1604 int segment2, int newsize2)
1606 int save1, save2, newmsg_size;
1608 LASSERT(newsize1 >= msg->lm_buflens[segment1]);
1609 LASSERT(newsize2 >= msg->lm_buflens[segment2]);
1611 save1 = msg->lm_buflens[segment1];
1612 save2 = msg->lm_buflens[segment2];
1613 msg->lm_buflens[segment1] = newsize1;
1614 msg->lm_buflens[segment2] = newsize2;
1615 newmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1616 msg->lm_buflens[segment1] = save1;
1617 msg->lm_buflens[segment2] = save2;
1623 int gss_enlarge_reqbuf_intg(struct ptlrpc_sec *sec,
1624 struct ptlrpc_request *req,
1626 int segment, int newsize)
1628 struct lustre_msg *newbuf;
1629 int txtsize, sigsize = 0, i;
1630 int newmsg_size, newbuf_size;
1633 * gss header is at seg 0;
1634 * embedded msg is at seg 1;
1635 * signature (if any) is at the last seg
1637 LASSERT(req->rq_reqbuf);
1638 LASSERT(req->rq_reqbuf_len > req->rq_reqlen);
1639 LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
1640 LASSERT(lustre_msg_buf(req->rq_reqbuf, 1, 0) == req->rq_reqmsg);
1642 /* 1. compute new embedded msg size */
1643 newmsg_size = get_enlarged_msgsize(req->rq_reqmsg, segment, newsize);
1644 LASSERT(newmsg_size >= req->rq_reqbuf->lm_buflens[1]);
1646 /* 2. compute new wrapper msg size */
1647 if (svc == SPTLRPC_SVC_NULL) {
1648 /* no signature, get size directly */
1649 newbuf_size = get_enlarged_msgsize(req->rq_reqbuf,
1652 txtsize = req->rq_reqbuf->lm_buflens[0];
1654 if (svc == SPTLRPC_SVC_INTG) {
1655 for (i = 1; i < req->rq_reqbuf->lm_bufcount; i++)
1656 txtsize += req->rq_reqbuf->lm_buflens[i];
1657 txtsize += newmsg_size - req->rq_reqbuf->lm_buflens[1];
1660 sigsize = gss_cli_payload(req->rq_cli_ctx, txtsize, 0);
1661 LASSERT(sigsize >= msg_last_seglen(req->rq_reqbuf));
1663 newbuf_size = get_enlarged_msgsize2(
1666 msg_last_segidx(req->rq_reqbuf),
1670 /* request from pool should always have enough buffer */
1671 LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
1673 if (req->rq_reqbuf_len < newbuf_size) {
1674 newbuf_size = size_roundup_power2(newbuf_size);
1676 OBD_ALLOC_LARGE(newbuf, newbuf_size);
1680 /* Must lock this, so that otherwise unprotected change of
1681 * rq_reqmsg is not racing with parallel processing of
1682 * imp_replay_list traversing threads. See LU-3333
1683 * This is a bandaid at best, we really need to deal with this
1684 * in request enlarging code before unpacking that's already
1687 spin_lock(&req->rq_import->imp_lock);
1689 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
1691 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
1692 req->rq_reqbuf = newbuf;
1693 req->rq_reqbuf_len = newbuf_size;
1694 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, 0);
1697 spin_unlock(&req->rq_import->imp_lock);
1700 /* do enlargement, from wrapper to embedded, from end to begin */
1701 if (svc != SPTLRPC_SVC_NULL)
1702 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf,
1703 msg_last_segidx(req->rq_reqbuf),
1706 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, 1, newmsg_size);
1707 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
1709 req->rq_reqlen = newmsg_size;
1714 int gss_enlarge_reqbuf_priv(struct ptlrpc_sec *sec,
1715 struct ptlrpc_request *req,
1716 int segment, int newsize)
1718 struct lustre_msg *newclrbuf;
1719 int newmsg_size, newclrbuf_size, newcipbuf_size;
1723 * embedded msg is at seg 0 of clear buffer;
1724 * cipher text is at seg 2 of cipher buffer;
1726 LASSERT(req->rq_pool ||
1727 (req->rq_reqbuf == NULL && req->rq_reqbuf_len == 0));
1728 LASSERT(req->rq_reqbuf == NULL ||
1729 (req->rq_pool && req->rq_reqbuf->lm_bufcount == 3));
1730 LASSERT(req->rq_clrbuf);
1731 LASSERT(req->rq_clrbuf_len > req->rq_reqlen);
1732 LASSERT(lustre_msg_buf(req->rq_clrbuf, 0, 0) == req->rq_reqmsg);
1734 /* compute new embedded msg size */
1735 newmsg_size = get_enlarged_msgsize(req->rq_reqmsg, segment, newsize);
1737 /* compute new clear buffer size */
1738 newclrbuf_size = get_enlarged_msgsize(req->rq_clrbuf, 0, newmsg_size);
1739 newclrbuf_size += GSS_MAX_CIPHER_BLOCK;
1741 /* compute new cipher buffer size */
1742 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1743 buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
1744 buflens[2] = gss_cli_payload(req->rq_cli_ctx, newclrbuf_size, 1);
1745 newcipbuf_size = lustre_msg_size_v2(3, buflens);
1747 /* handle the case that we put both clear buf and cipher buf into
1748 * pre-allocated single buffer. */
1749 if (unlikely(req->rq_pool) &&
1750 req->rq_clrbuf >= req->rq_reqbuf &&
1751 (char *) req->rq_clrbuf <
1752 (char *) req->rq_reqbuf + req->rq_reqbuf_len) {
1753 /* it couldn't be better we still fit into the
1754 * pre-allocated buffer. */
1755 if (newclrbuf_size + newcipbuf_size <= req->rq_reqbuf_len) {
1759 spin_lock(&req->rq_import->imp_lock);
1760 /* move clear text backward. */
1761 src = req->rq_clrbuf;
1762 dst = (char *) req->rq_reqbuf + newcipbuf_size;
1764 memmove(dst, src, req->rq_clrbuf_len);
1766 req->rq_clrbuf = (struct lustre_msg *) dst;
1767 req->rq_clrbuf_len = newclrbuf_size;
1768 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
1771 spin_unlock(&req->rq_import->imp_lock);
1773 /* sadly we have to split out the clear buffer */
1774 LASSERT(req->rq_reqbuf_len >= newcipbuf_size);
1775 LASSERT(req->rq_clrbuf_len < newclrbuf_size);
1779 if (req->rq_clrbuf_len < newclrbuf_size) {
1780 newclrbuf_size = size_roundup_power2(newclrbuf_size);
1782 OBD_ALLOC_LARGE(newclrbuf, newclrbuf_size);
1783 if (newclrbuf == NULL)
1786 /* Must lock this, so that otherwise unprotected change of
1787 * rq_reqmsg is not racing with parallel processing of
1788 * imp_replay_list traversing threads. See LU-3333
1789 * This is a bandaid at best, we really need to deal with this
1790 * in request enlarging code before unpacking that's already
1793 spin_lock(&req->rq_import->imp_lock);
1795 memcpy(newclrbuf, req->rq_clrbuf, req->rq_clrbuf_len);
1797 if (req->rq_reqbuf == NULL ||
1798 req->rq_clrbuf < req->rq_reqbuf ||
1799 (char *) req->rq_clrbuf >=
1800 (char *) req->rq_reqbuf + req->rq_reqbuf_len) {
1801 OBD_FREE_LARGE(req->rq_clrbuf, req->rq_clrbuf_len);
1804 req->rq_clrbuf = newclrbuf;
1805 req->rq_clrbuf_len = newclrbuf_size;
1806 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
1809 spin_unlock(&req->rq_import->imp_lock);
1812 _sptlrpc_enlarge_msg_inplace(req->rq_clrbuf, 0, newmsg_size);
1813 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
1814 req->rq_reqlen = newmsg_size;
1819 int gss_enlarge_reqbuf(struct ptlrpc_sec *sec,
1820 struct ptlrpc_request *req,
1821 int segment, int newsize)
1823 int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
1825 LASSERT(!req->rq_ctx_init && !req->rq_ctx_fini);
1828 case SPTLRPC_SVC_NULL:
1829 case SPTLRPC_SVC_AUTH:
1830 case SPTLRPC_SVC_INTG:
1831 return gss_enlarge_reqbuf_intg(sec, req, svc, segment, newsize);
1832 case SPTLRPC_SVC_PRIV:
1833 return gss_enlarge_reqbuf_priv(sec, req, segment, newsize);
1835 LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
1840 int gss_sec_install_rctx(struct obd_import *imp,
1841 struct ptlrpc_sec *sec,
1842 struct ptlrpc_cli_ctx *ctx)
1844 struct gss_sec *gsec;
1845 struct gss_cli_ctx *gctx;
1848 gsec = container_of(sec, struct gss_sec, gs_base);
1849 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
1851 rc = gss_install_rvs_svc_ctx(imp, gsec, gctx);
1855 /********************************************
1857 ********************************************/
1860 int gss_svc_reqctx_is_special(struct gss_svc_reqctx *grctx)
1863 return (grctx->src_init || grctx->src_init_continue ||
1864 grctx->src_err_notify);
1868 void gss_svc_reqctx_free(struct gss_svc_reqctx *grctx)
1871 gss_svc_upcall_put_ctx(grctx->src_ctx);
1873 sptlrpc_policy_put(grctx->src_base.sc_policy);
1874 OBD_FREE_PTR(grctx);
1878 void gss_svc_reqctx_addref(struct gss_svc_reqctx *grctx)
1880 LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
1881 atomic_inc(&grctx->src_base.sc_refcount);
1885 void gss_svc_reqctx_decref(struct gss_svc_reqctx *grctx)
1887 LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
1889 if (atomic_dec_and_test(&grctx->src_base.sc_refcount))
1890 gss_svc_reqctx_free(grctx);
1894 int gss_svc_sign(struct ptlrpc_request *req,
1895 struct ptlrpc_reply_state *rs,
1896 struct gss_svc_reqctx *grctx,
1903 LASSERT(rs->rs_msg == lustre_msg_buf(rs->rs_repbuf, 1, 0));
1905 /* embedded lustre_msg might have been shrunk */
1906 if (req->rq_replen != rs->rs_repbuf->lm_buflens[1])
1907 lustre_shrink_msg(rs->rs_repbuf, 1, req->rq_replen, 1);
1909 if (req->rq_pack_bulk)
1910 flags |= LUSTRE_GSS_PACK_BULK;
1912 rc = gss_sign_msg(rs->rs_repbuf, grctx->src_ctx->gsc_mechctx,
1913 LUSTRE_SP_ANY, flags, PTLRPC_GSS_PROC_DATA,
1914 grctx->src_wirectx.gw_seq, svc, NULL);
1918 rs->rs_repdata_len = rc;
1920 if (likely(req->rq_packed_final)) {
1921 if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
1922 req->rq_reply_off = gss_at_reply_off_integ;
1924 req->rq_reply_off = 0;
1926 if (svc == SPTLRPC_SVC_NULL)
1927 rs->rs_repbuf->lm_cksum = crc32_le(!(__u32) 0,
1928 lustre_msg_buf(rs->rs_repbuf, 1, 0),
1929 lustre_msg_buflen(rs->rs_repbuf, 1));
1930 req->rq_reply_off = 0;
1936 int gss_pack_err_notify(struct ptlrpc_request *req, __u32 major, __u32 minor)
1938 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
1939 struct ptlrpc_reply_state *rs;
1940 struct gss_err_header *ghdr;
1941 int replen = sizeof(struct ptlrpc_body);
1945 //if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_SVCGSS_ERR_NOTIFY, OBD_FAIL_ONCE))
1948 grctx->src_err_notify = 1;
1949 grctx->src_reserve_len = 0;
1951 rc = lustre_pack_reply_v2(req, 1, &replen, NULL, 0);
1953 CERROR("could not pack reply, err %d\n", rc);
1958 rs = req->rq_reply_state;
1959 LASSERT(rs->rs_repbuf->lm_buflens[1] >= sizeof(*ghdr));
1960 ghdr = lustre_msg_buf(rs->rs_repbuf, 0, 0);
1961 ghdr->gh_version = PTLRPC_GSS_VERSION;
1963 ghdr->gh_proc = PTLRPC_GSS_PROC_ERR;
1964 ghdr->gh_major = major;
1965 ghdr->gh_minor = minor;
1966 ghdr->gh_handle.len = 0; /* fake context handle */
1968 rs->rs_repdata_len = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
1969 rs->rs_repbuf->lm_buflens);
1971 CDEBUG(D_SEC, "prepare gss error notify(0x%x/0x%x) to %s\n",
1972 major, minor, libcfs_nid2str(req->rq_peer.nid));
1977 int gss_svc_handle_init(struct ptlrpc_request *req,
1978 struct gss_wire_ctx *gw)
1980 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
1981 struct lustre_msg *reqbuf = req->rq_reqbuf;
1982 struct obd_uuid *uuid;
1983 struct obd_device *target;
1984 rawobj_t uuid_obj, rvs_hdl, in_token;
1986 __u32 *secdata, seclen;
1990 CDEBUG(D_SEC, "processing gss init(%d) request from %s\n", gw->gw_proc,
1991 libcfs_nid2str(req->rq_peer.nid));
1993 req->rq_ctx_init = 1;
1995 if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
1996 CERROR("unexpected bulk flag\n");
1997 RETURN(SECSVC_DROP);
2000 if (gw->gw_proc == PTLRPC_GSS_PROC_INIT && gw->gw_handle.len != 0) {
2001 CERROR("proc %u: invalid handle length %u\n",
2002 gw->gw_proc, gw->gw_handle.len);
2003 RETURN(SECSVC_DROP);
2006 if (reqbuf->lm_bufcount < 3 || reqbuf->lm_bufcount > 4){
2007 CERROR("Invalid bufcount %d\n", reqbuf->lm_bufcount);
2008 RETURN(SECSVC_DROP);
2011 swabbed = ptlrpc_req_need_swab(req);
2013 /* ctx initiate payload is in last segment */
2014 secdata = lustre_msg_buf(reqbuf, reqbuf->lm_bufcount - 1, 0);
2015 seclen = reqbuf->lm_buflens[reqbuf->lm_bufcount - 1];
2017 if (seclen < 4 + 4) {
2018 CERROR("sec size %d too small\n", seclen);
2019 RETURN(SECSVC_DROP);
2022 /* lustre svc type */
2023 lustre_svc = le32_to_cpu(*secdata++);
2026 /* extract target uuid, note this code is somewhat fragile
2027 * because touched internal structure of obd_uuid */
2028 if (rawobj_extract(&uuid_obj, &secdata, &seclen)) {
2029 CERROR("failed to extract target uuid\n");
2030 RETURN(SECSVC_DROP);
2032 uuid_obj.data[uuid_obj.len - 1] = '\0';
2034 uuid = (struct obd_uuid *) uuid_obj.data;
2035 target = class_uuid2obd(uuid);
2036 if (!target || target->obd_stopping || !target->obd_set_up) {
2037 CERROR("target '%s' is not available for context init (%s)\n",
2038 uuid->uuid, target == NULL ? "no target" :
2039 (target->obd_stopping ? "stopping" : "not set up"));
2040 RETURN(SECSVC_DROP);
2043 /* extract reverse handle */
2044 if (rawobj_extract(&rvs_hdl, &secdata, &seclen)) {
2045 CERROR("failed extract reverse handle\n");
2046 RETURN(SECSVC_DROP);
2050 if (rawobj_extract(&in_token, &secdata, &seclen)) {
2051 CERROR("can't extract token\n");
2052 RETURN(SECSVC_DROP);
2055 rc = gss_svc_upcall_handle_init(req, grctx, gw, target, lustre_svc,
2056 &rvs_hdl, &in_token);
2057 if (rc != SECSVC_OK)
2060 if (grctx->src_ctx->gsc_usr_mds || grctx->src_ctx->gsc_usr_oss ||
2061 grctx->src_ctx->gsc_usr_root)
2062 CWARN("create svc ctx %p: user from %s authenticated as %s\n",
2063 grctx->src_ctx, libcfs_nid2str(req->rq_peer.nid),
2064 grctx->src_ctx->gsc_usr_root ? "root" :
2065 (grctx->src_ctx->gsc_usr_mds ? "mds" :
2066 (grctx->src_ctx->gsc_usr_oss ? "oss" : "null")));
2068 CWARN("create svc ctx %p: accept user %u from %s\n",
2069 grctx->src_ctx, grctx->src_ctx->gsc_uid,
2070 libcfs_nid2str(req->rq_peer.nid));
2072 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
2073 if (reqbuf->lm_bufcount < 4) {
2074 CERROR("missing user descriptor\n");
2075 RETURN(SECSVC_DROP);
2077 if (sptlrpc_unpack_user_desc(reqbuf, 2, swabbed)) {
2078 CERROR("Mal-formed user descriptor\n");
2079 RETURN(SECSVC_DROP);
2082 req->rq_pack_udesc = 1;
2083 req->rq_user_desc = lustre_msg_buf(reqbuf, 2, 0);
2086 req->rq_reqmsg = lustre_msg_buf(reqbuf, 1, 0);
2087 req->rq_reqlen = lustre_msg_buflen(reqbuf, 1);
2093 * last segment must be the gss signature.
2096 int gss_svc_verify_request(struct ptlrpc_request *req,
2097 struct gss_svc_reqctx *grctx,
2098 struct gss_wire_ctx *gw,
2101 struct gss_svc_ctx *gctx = grctx->src_ctx;
2102 struct lustre_msg *msg = req->rq_reqbuf;
2107 *major = GSS_S_COMPLETE;
2109 if (msg->lm_bufcount < 2) {
2110 CERROR("Too few segments (%u) in request\n", msg->lm_bufcount);
2114 if (gw->gw_svc == SPTLRPC_SVC_NULL)
2117 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
2118 CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
2119 *major = GSS_S_DUPLICATE_TOKEN;
2123 *major = gss_verify_msg(msg, gctx->gsc_mechctx, gw->gw_svc);
2124 if (*major != GSS_S_COMPLETE) {
2125 CERROR("failed to verify request: %x\n", *major);
2129 if (gctx->gsc_reverse == 0 &&
2130 gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
2131 CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
2132 *major = GSS_S_DUPLICATE_TOKEN;
2137 swabbed = ptlrpc_req_need_swab(req);
2139 /* user descriptor */
2140 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
2141 if (msg->lm_bufcount < (offset + 1)) {
2142 CERROR("no user desc included\n");
2146 if (sptlrpc_unpack_user_desc(msg, offset, swabbed)) {
2147 CERROR("Mal-formed user descriptor\n");
2151 req->rq_pack_udesc = 1;
2152 req->rq_user_desc = lustre_msg_buf(msg, offset, 0);
2156 /* check bulk_sec_desc data */
2157 if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
2158 if (msg->lm_bufcount < (offset + 1)) {
2159 CERROR("missing bulk sec descriptor\n");
2163 if (bulk_sec_desc_unpack(msg, offset, swabbed))
2166 req->rq_pack_bulk = 1;
2167 grctx->src_reqbsd = lustre_msg_buf(msg, offset, 0);
2168 grctx->src_reqbsd_size = lustre_msg_buflen(msg, offset);
2171 req->rq_reqmsg = lustre_msg_buf(msg, 1, 0);
2172 req->rq_reqlen = msg->lm_buflens[1];
2177 int gss_svc_unseal_request(struct ptlrpc_request *req,
2178 struct gss_svc_reqctx *grctx,
2179 struct gss_wire_ctx *gw,
2182 struct gss_svc_ctx *gctx = grctx->src_ctx;
2183 struct lustre_msg *msg = req->rq_reqbuf;
2184 int swabbed, msglen, offset = 1;
2187 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
2188 CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
2189 *major = GSS_S_DUPLICATE_TOKEN;
2193 *major = gss_unseal_msg(gctx->gsc_mechctx, msg,
2194 &msglen, req->rq_reqdata_len);
2195 if (*major != GSS_S_COMPLETE) {
2196 CERROR("failed to unwrap request: %x\n", *major);
2200 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
2201 CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
2202 *major = GSS_S_DUPLICATE_TOKEN;
2206 swabbed = __lustre_unpack_msg(msg, msglen);
2208 CERROR("Failed to unpack after decryption\n");
2211 req->rq_reqdata_len = msglen;
2213 if (msg->lm_bufcount < 1) {
2214 CERROR("Invalid buffer: is empty\n");
2218 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
2219 if (msg->lm_bufcount < offset + 1) {
2220 CERROR("no user descriptor included\n");
2224 if (sptlrpc_unpack_user_desc(msg, offset, swabbed)) {
2225 CERROR("Mal-formed user descriptor\n");
2229 req->rq_pack_udesc = 1;
2230 req->rq_user_desc = lustre_msg_buf(msg, offset, 0);
2234 if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
2235 if (msg->lm_bufcount < offset + 1) {
2236 CERROR("no bulk checksum included\n");
2240 if (bulk_sec_desc_unpack(msg, offset, swabbed))
2243 req->rq_pack_bulk = 1;
2244 grctx->src_reqbsd = lustre_msg_buf(msg, offset, 0);
2245 grctx->src_reqbsd_size = lustre_msg_buflen(msg, offset);
2248 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 0, 0);
2249 req->rq_reqlen = req->rq_reqbuf->lm_buflens[0];
2254 int gss_svc_handle_data(struct ptlrpc_request *req,
2255 struct gss_wire_ctx *gw)
2257 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2262 grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
2263 if (!grctx->src_ctx) {
2264 major = GSS_S_NO_CONTEXT;
2268 switch (gw->gw_svc) {
2269 case SPTLRPC_SVC_NULL:
2270 case SPTLRPC_SVC_AUTH:
2271 case SPTLRPC_SVC_INTG:
2272 rc = gss_svc_verify_request(req, grctx, gw, &major);
2274 case SPTLRPC_SVC_PRIV:
2275 rc = gss_svc_unseal_request(req, grctx, gw, &major);
2278 CERROR("unsupported gss service %d\n", gw->gw_svc);
2285 CERROR("svc %u failed: major 0x%08x: req xid %llu ctx %p idx "
2286 "%#llx(%u->%s)\n", gw->gw_svc, major, req->rq_xid,
2287 grctx->src_ctx, gss_handle_to_u64(&gw->gw_handle),
2288 grctx->src_ctx->gsc_uid, libcfs_nid2str(req->rq_peer.nid));
2290 /* we only notify client in case of NO_CONTEXT/BAD_SIG, which
2291 * might happen after server reboot, to allow recovery. */
2292 if ((major == GSS_S_NO_CONTEXT || major == GSS_S_BAD_SIG) &&
2293 gss_pack_err_notify(req, major, 0) == 0)
2294 RETURN(SECSVC_COMPLETE);
2296 RETURN(SECSVC_DROP);
2300 int gss_svc_handle_destroy(struct ptlrpc_request *req,
2301 struct gss_wire_ctx *gw)
2303 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2307 req->rq_ctx_fini = 1;
2308 req->rq_no_reply = 1;
2310 grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
2311 if (!grctx->src_ctx) {
2312 CDEBUG(D_SEC, "invalid gss context handle for destroy.\n");
2313 RETURN(SECSVC_DROP);
2316 if (gw->gw_svc != SPTLRPC_SVC_INTG) {
2317 CERROR("svc %u is not supported in destroy.\n", gw->gw_svc);
2318 RETURN(SECSVC_DROP);
2321 if (gss_svc_verify_request(req, grctx, gw, &major))
2322 RETURN(SECSVC_DROP);
2324 CWARN("destroy svc ctx %p idx %#llx (%u->%s)\n",
2325 grctx->src_ctx, gss_handle_to_u64(&gw->gw_handle),
2326 grctx->src_ctx->gsc_uid, libcfs_nid2str(req->rq_peer.nid));
2328 gss_svc_upcall_destroy_ctx(grctx->src_ctx);
2330 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
2331 if (req->rq_reqbuf->lm_bufcount < 4) {
2332 CERROR("missing user descriptor, ignore it\n");
2335 if (sptlrpc_unpack_user_desc(req->rq_reqbuf, 2,
2336 ptlrpc_req_need_swab(req))) {
2337 CERROR("Mal-formed user descriptor, ignore it\n");
2341 req->rq_pack_udesc = 1;
2342 req->rq_user_desc = lustre_msg_buf(req->rq_reqbuf, 2, 0);
2348 int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req)
2350 struct gss_header *ghdr;
2351 struct gss_svc_reqctx *grctx;
2352 struct gss_wire_ctx *gw;
2356 LASSERT(req->rq_reqbuf);
2357 LASSERT(req->rq_svc_ctx == NULL);
2359 if (req->rq_reqbuf->lm_bufcount < 2) {
2360 CERROR("buf count only %d\n", req->rq_reqbuf->lm_bufcount);
2361 RETURN(SECSVC_DROP);
2364 swabbed = ptlrpc_req_need_swab(req);
2366 ghdr = gss_swab_header(req->rq_reqbuf, 0, swabbed);
2368 CERROR("can't decode gss header\n");
2369 RETURN(SECSVC_DROP);
2373 if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
2374 CERROR("gss version %u, expect %u\n", ghdr->gh_version,
2375 PTLRPC_GSS_VERSION);
2376 RETURN(SECSVC_DROP);
2379 req->rq_sp_from = ghdr->gh_sp;
2381 /* alloc grctx data */
2382 OBD_ALLOC_PTR(grctx);
2384 RETURN(SECSVC_DROP);
2386 grctx->src_base.sc_policy = sptlrpc_policy_get(policy);
2387 atomic_set(&grctx->src_base.sc_refcount, 1);
2388 req->rq_svc_ctx = &grctx->src_base;
2389 gw = &grctx->src_wirectx;
2391 /* save wire context */
2392 gw->gw_flags = ghdr->gh_flags;
2393 gw->gw_proc = ghdr->gh_proc;
2394 gw->gw_seq = ghdr->gh_seq;
2395 gw->gw_svc = ghdr->gh_svc;
2396 rawobj_from_netobj(&gw->gw_handle, &ghdr->gh_handle);
2398 /* keep original wire header which subject to checksum verification */
2400 gss_header_swabber(ghdr);
2402 switch(ghdr->gh_proc) {
2403 case PTLRPC_GSS_PROC_INIT:
2404 case PTLRPC_GSS_PROC_CONTINUE_INIT:
2405 rc = gss_svc_handle_init(req, gw);
2407 case PTLRPC_GSS_PROC_DATA:
2408 rc = gss_svc_handle_data(req, gw);
2410 case PTLRPC_GSS_PROC_DESTROY:
2411 rc = gss_svc_handle_destroy(req, gw);
2414 CERROR("unknown proc %u\n", gw->gw_proc);
2421 LASSERT (grctx->src_ctx);
2423 req->rq_auth_gss = 1;
2424 req->rq_auth_usr_mdt = grctx->src_ctx->gsc_usr_mds;
2425 req->rq_auth_usr_ost = grctx->src_ctx->gsc_usr_oss;
2426 req->rq_auth_usr_root = grctx->src_ctx->gsc_usr_root;
2427 req->rq_auth_uid = grctx->src_ctx->gsc_uid;
2428 req->rq_auth_mapped_uid = grctx->src_ctx->gsc_mapped_uid;
2430 case SECSVC_COMPLETE:
2433 gss_svc_reqctx_free(grctx);
2434 req->rq_svc_ctx = NULL;
2441 void gss_svc_invalidate_ctx(struct ptlrpc_svc_ctx *svc_ctx)
2443 struct gss_svc_reqctx *grctx;
2446 if (svc_ctx == NULL) {
2451 grctx = gss_svc_ctx2reqctx(svc_ctx);
2453 CWARN("gss svc invalidate ctx %p(%u)\n",
2454 grctx->src_ctx, grctx->src_ctx->gsc_uid);
2455 gss_svc_upcall_destroy_ctx(grctx->src_ctx);
2461 int gss_svc_payload(struct gss_svc_reqctx *grctx, int early,
2462 int msgsize, int privacy)
2464 /* we should treat early reply normally, but which is actually sharing
2465 * the same ctx with original request, so in this case we should
2466 * ignore the special ctx's special flags */
2467 if (early == 0 && gss_svc_reqctx_is_special(grctx))
2468 return grctx->src_reserve_len;
2470 return gss_mech_payload(NULL, msgsize, privacy);
2473 static int gss_svc_bulk_payload(struct gss_svc_ctx *gctx,
2474 struct sptlrpc_flavor *flvr,
2477 int payload = sizeof(struct ptlrpc_bulk_sec_desc);
2480 switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2481 case SPTLRPC_BULK_SVC_NULL:
2483 case SPTLRPC_BULK_SVC_INTG:
2484 payload += gss_mech_payload(NULL, 0, 0);
2486 case SPTLRPC_BULK_SVC_PRIV:
2487 payload += gss_mech_payload(NULL, 0, 1);
2489 case SPTLRPC_BULK_SVC_AUTH:
2498 int gss_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2500 struct gss_svc_reqctx *grctx;
2501 struct ptlrpc_reply_state *rs;
2502 int early, privacy, svc, bsd_off = 0;
2503 __u32 ibuflens[2], buflens[4];
2504 int ibufcnt = 0, bufcnt;
2505 int txtsize, wmsg_size, rs_size;
2508 LASSERT(msglen % 8 == 0);
2510 if (req->rq_pack_bulk && !req->rq_bulk_read && !req->rq_bulk_write) {
2511 CERROR("client request bulk sec on non-bulk rpc\n");
2515 svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
2516 early = (req->rq_packed_final == 0);
2518 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2519 if (!early && gss_svc_reqctx_is_special(grctx))
2522 privacy = (svc == SPTLRPC_SVC_PRIV);
2525 /* inner clear buffers */
2527 ibuflens[0] = msglen;
2529 if (req->rq_pack_bulk) {
2530 LASSERT(grctx->src_reqbsd);
2533 ibuflens[ibufcnt++] = gss_svc_bulk_payload(
2539 txtsize = lustre_msg_size_v2(ibufcnt, ibuflens);
2540 txtsize += GSS_MAX_CIPHER_BLOCK;
2542 /* wrapper buffer */
2544 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2545 buflens[1] = gss_svc_payload(grctx, early, txtsize, 1);
2548 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2549 buflens[1] = msglen;
2551 txtsize = buflens[0];
2552 if (svc == SPTLRPC_SVC_INTG)
2553 txtsize += buflens[1];
2555 if (req->rq_pack_bulk) {
2556 LASSERT(grctx->src_reqbsd);
2559 buflens[bufcnt] = gss_svc_bulk_payload(
2563 if (svc == SPTLRPC_SVC_INTG)
2564 txtsize += buflens[bufcnt];
2568 if ((!early && gss_svc_reqctx_is_special(grctx)) ||
2569 svc != SPTLRPC_SVC_NULL)
2570 buflens[bufcnt++] = gss_svc_payload(grctx, early,
2574 wmsg_size = lustre_msg_size_v2(bufcnt, buflens);
2576 rs_size = sizeof(*rs) + wmsg_size;
2577 rs = req->rq_reply_state;
2581 LASSERT(rs->rs_size >= rs_size);
2583 OBD_ALLOC_LARGE(rs, rs_size);
2587 rs->rs_size = rs_size;
2590 rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
2591 rs->rs_repbuf_len = wmsg_size;
2593 /* initialize the buffer */
2595 lustre_init_msg_v2(rs->rs_repbuf, ibufcnt, ibuflens, NULL);
2596 rs->rs_msg = lustre_msg_buf(rs->rs_repbuf, 0, msglen);
2598 lustre_init_msg_v2(rs->rs_repbuf, bufcnt, buflens, NULL);
2599 rs->rs_repbuf->lm_secflvr = req->rq_flvr.sf_rpc;
2601 rs->rs_msg = lustre_msg_buf(rs->rs_repbuf, 1, 0);
2605 grctx->src_repbsd = lustre_msg_buf(rs->rs_repbuf, bsd_off, 0);
2606 grctx->src_repbsd_size = lustre_msg_buflen(rs->rs_repbuf,
2610 gss_svc_reqctx_addref(grctx);
2611 rs->rs_svc_ctx = req->rq_svc_ctx;
2613 LASSERT(rs->rs_msg);
2614 req->rq_reply_state = rs;
2618 static int gss_svc_seal(struct ptlrpc_request *req,
2619 struct ptlrpc_reply_state *rs,
2620 struct gss_svc_reqctx *grctx)
2622 struct gss_svc_ctx *gctx = grctx->src_ctx;
2623 rawobj_t hdrobj, msgobj, token;
2624 struct gss_header *ghdr;
2627 __u32 buflens[2], major;
2631 /* get clear data length. note embedded lustre_msg might
2632 * have been shrunk */
2633 if (req->rq_replen != lustre_msg_buflen(rs->rs_repbuf, 0))
2634 msglen = lustre_shrink_msg(rs->rs_repbuf, 0, req->rq_replen, 1);
2636 msglen = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
2637 rs->rs_repbuf->lm_buflens);
2639 /* temporarily use tail of buffer to hold gss header data */
2640 LASSERT(msglen + PTLRPC_GSS_HEADER_SIZE <= rs->rs_repbuf_len);
2641 ghdr = (struct gss_header *) ((char *) rs->rs_repbuf +
2642 rs->rs_repbuf_len - PTLRPC_GSS_HEADER_SIZE);
2643 ghdr->gh_version = PTLRPC_GSS_VERSION;
2644 ghdr->gh_sp = LUSTRE_SP_ANY;
2646 ghdr->gh_proc = PTLRPC_GSS_PROC_DATA;
2647 ghdr->gh_seq = grctx->src_wirectx.gw_seq;
2648 ghdr->gh_svc = SPTLRPC_SVC_PRIV;
2649 ghdr->gh_handle.len = 0;
2650 if (req->rq_pack_bulk)
2651 ghdr->gh_flags |= LUSTRE_GSS_PACK_BULK;
2653 /* allocate temporary cipher buffer */
2654 token_buflen = gss_mech_payload(gctx->gsc_mechctx, msglen, 1);
2655 OBD_ALLOC_LARGE(token_buf, token_buflen);
2656 if (token_buf == NULL)
2659 hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
2660 hdrobj.data = (__u8 *) ghdr;
2661 msgobj.len = msglen;
2662 msgobj.data = (__u8 *) rs->rs_repbuf;
2663 token.len = token_buflen;
2664 token.data = token_buf;
2666 major = lgss_wrap(gctx->gsc_mechctx, &hdrobj, &msgobj,
2667 rs->rs_repbuf_len - PTLRPC_GSS_HEADER_SIZE, &token);
2668 if (major != GSS_S_COMPLETE) {
2669 CERROR("wrap message error: %08x\n", major);
2670 GOTO(out_free, rc = -EPERM);
2672 LASSERT(token.len <= token_buflen);
2674 /* we are about to override data at rs->rs_repbuf, nullify pointers
2675 * to which to catch further illegal usage. */
2676 if (req->rq_pack_bulk) {
2677 grctx->src_repbsd = NULL;
2678 grctx->src_repbsd_size = 0;
2681 /* now fill the actual wire data
2685 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2686 buflens[1] = token.len;
2688 rs->rs_repdata_len = lustre_msg_size_v2(2, buflens);
2689 LASSERT(rs->rs_repdata_len <= rs->rs_repbuf_len);
2691 lustre_init_msg_v2(rs->rs_repbuf, 2, buflens, NULL);
2692 rs->rs_repbuf->lm_secflvr = req->rq_flvr.sf_rpc;
2694 memcpy(lustre_msg_buf(rs->rs_repbuf, 0, 0), ghdr,
2695 PTLRPC_GSS_HEADER_SIZE);
2696 memcpy(lustre_msg_buf(rs->rs_repbuf, 1, 0), token.data, token.len);
2699 if (req->rq_packed_final &&
2700 (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))
2701 req->rq_reply_off = gss_at_reply_off_priv;
2703 req->rq_reply_off = 0;
2705 /* to catch upper layer's further access */
2707 req->rq_repmsg = NULL;
2712 OBD_FREE_LARGE(token_buf, token_buflen);
2716 int gss_svc_authorize(struct ptlrpc_request *req)
2718 struct ptlrpc_reply_state *rs = req->rq_reply_state;
2719 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2720 struct gss_wire_ctx *gw = &grctx->src_wirectx;
2724 early = (req->rq_packed_final == 0);
2726 if (!early && gss_svc_reqctx_is_special(grctx)) {
2727 LASSERT(rs->rs_repdata_len != 0);
2729 req->rq_reply_off = gss_at_reply_off_integ;
2733 /* early reply could happen in many cases */
2735 gw->gw_proc != PTLRPC_GSS_PROC_DATA &&
2736 gw->gw_proc != PTLRPC_GSS_PROC_DESTROY) {
2737 CERROR("proc %d not support\n", gw->gw_proc);
2741 LASSERT(grctx->src_ctx);
2743 switch (gw->gw_svc) {
2744 case SPTLRPC_SVC_NULL:
2745 case SPTLRPC_SVC_AUTH:
2746 case SPTLRPC_SVC_INTG:
2747 rc = gss_svc_sign(req, rs, grctx, gw->gw_svc);
2749 case SPTLRPC_SVC_PRIV:
2750 rc = gss_svc_seal(req, rs, grctx);
2753 CERROR("Unknown service %d\n", gw->gw_svc);
2754 GOTO(out, rc = -EINVAL);
2762 void gss_svc_free_rs(struct ptlrpc_reply_state *rs)
2764 struct gss_svc_reqctx *grctx;
2766 LASSERT(rs->rs_svc_ctx);
2767 grctx = container_of(rs->rs_svc_ctx, struct gss_svc_reqctx, src_base);
2769 gss_svc_reqctx_decref(grctx);
2770 rs->rs_svc_ctx = NULL;
2772 if (!rs->rs_prealloc)
2773 OBD_FREE_LARGE(rs, rs->rs_size);
2776 void gss_svc_free_ctx(struct ptlrpc_svc_ctx *ctx)
2778 LASSERT(atomic_read(&ctx->sc_refcount) == 0);
2779 gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx));
2782 int gss_copy_rvc_cli_ctx(struct ptlrpc_cli_ctx *cli_ctx,
2783 struct ptlrpc_svc_ctx *svc_ctx)
2785 struct gss_cli_ctx *cli_gctx = ctx2gctx(cli_ctx);
2786 struct gss_svc_ctx *svc_gctx = gss_svc_ctx2gssctx(svc_ctx);
2787 struct gss_ctx *mechctx = NULL;
2790 LASSERT(svc_gctx && svc_gctx->gsc_mechctx);
2792 cli_gctx->gc_proc = PTLRPC_GSS_PROC_DATA;
2793 cli_gctx->gc_win = GSS_SEQ_WIN;
2795 /* The problem is the reverse ctx might get lost in some recovery
2796 * situations, and the same svc_ctx will be used to re-create it.
2797 * if there's callback be sentout before that, new reverse ctx start
2798 * with sequence 0 will lead to future callback rpc be treated as
2801 * each reverse root ctx will record its latest sequence number on its
2802 * buddy svcctx before be destroyed, so here we continue use it.
2804 atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
2806 if (gss_svc_upcall_dup_handle(&cli_gctx->gc_svc_handle, svc_gctx)) {
2807 CERROR("failed to dup svc handle\n");
2811 if (lgss_copy_reverse_context(svc_gctx->gsc_mechctx, &mechctx) !=
2813 CERROR("failed to copy mech context\n");
2814 goto err_svc_handle;
2817 if (rawobj_dup(&cli_gctx->gc_handle, &svc_gctx->gsc_rvs_hdl)) {
2818 CERROR("failed to dup reverse handle\n");
2822 cli_gctx->gc_mechctx = mechctx;
2823 gss_cli_ctx_uptodate(cli_gctx);
2828 lgss_delete_sec_context(&mechctx);
2830 rawobj_free(&cli_gctx->gc_svc_handle);
2835 static void gss_init_at_reply_offset(void)
2840 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2841 buflens[1] = lustre_msg_early_size();
2842 buflens[2] = gss_cli_payload(NULL, buflens[1], 0);
2843 gss_at_reply_off_integ = lustre_msg_size_v2(3, buflens);
2845 buflens[0] = lustre_msg_early_size();
2846 clearsize = lustre_msg_size_v2(1, buflens);
2847 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2848 buflens[1] = gss_cli_payload(NULL, clearsize, 0);
2849 buflens[2] = gss_cli_payload(NULL, clearsize, 1);
2850 gss_at_reply_off_priv = lustre_msg_size_v2(3, buflens);
2853 static int __init sptlrpc_gss_init(void)
2857 rc = gss_init_tunables();
2861 rc = gss_init_cli_upcall();
2865 rc = gss_init_svc_upcall();
2867 goto out_cli_upcall;
2869 rc = init_null_module();
2871 goto out_svc_upcall;
2873 rc = init_kerberos_module();
2877 rc = init_sk_module();
2881 /* register policy after all other stuff be initialized, because it
2882 * might be in used immediately after the registration. */
2884 rc = gss_init_keyring();
2888 rc = gss_init_pipefs();
2892 gss_init_at_reply_offset();
2899 cleanup_sk_module();
2901 cleanup_kerberos_module();
2903 cleanup_null_module();
2905 gss_exit_svc_upcall();
2907 gss_exit_cli_upcall();
2909 gss_exit_tunables();
2913 static void __exit sptlrpc_gss_exit(void)
2917 cleanup_kerberos_module();
2918 gss_exit_svc_upcall();
2919 gss_exit_cli_upcall();
2920 gss_exit_tunables();
2923 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2924 MODULE_DESCRIPTION("Lustre GSS security policy");
2925 MODULE_VERSION(LUSTRE_VERSION_STRING);
2926 MODULE_LICENSE("GPL");
2928 module_init(sptlrpc_gss_init);
2929 module_exit(sptlrpc_gss_exit);