2 * Modifications for Lustre
4 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
6 * Copyright (c) 2011, Whamcloud, Inc.
8 * Author: Eric Mei <ericm@clusterfs.com>
12 * linux/net/sunrpc/auth_gss.c
14 * RPCSEC_GSS client authentication.
16 * Copyright (c) 2000 The Regents of the University of Michigan.
17 * All rights reserved.
19 * Dug Song <dugsong@monkey.org>
20 * Andy Adamson <andros@umich.edu>
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 3. Neither the name of the University nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
36 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
37 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
38 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
40 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
41 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
42 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
43 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
44 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
45 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 #define DEBUG_SUBSYSTEM S_SEC
51 #include <linux/init.h>
52 #include <linux/module.h>
53 #include <linux/slab.h>
54 #include <linux/dcache.h>
56 #include <linux/mutex.h>
57 #include <asm/atomic.h>
59 #include <liblustre.h>
63 #include <obd_class.h>
64 #include <obd_support.h>
65 #include <obd_cksum.h>
66 #include <lustre/lustre_idl.h>
67 #include <lustre_net.h>
68 #include <lustre_import.h>
69 #include <lustre_sec.h>
72 #include "gss_internal.h"
75 #include <linux/crypto.h>
78 * early reply have fixed size, respectively in privacy and integrity mode.
79 * so we calculate them only once.
81 static int gss_at_reply_off_integ;
82 static int gss_at_reply_off_priv;
85 static inline int msg_last_segidx(struct lustre_msg *msg)
87 LASSERT(msg->lm_bufcount > 0);
88 return msg->lm_bufcount - 1;
90 static inline int msg_last_seglen(struct lustre_msg *msg)
92 return msg->lm_buflens[msg_last_segidx(msg)];
95 /********************************************
97 ********************************************/
100 void gss_header_swabber(struct gss_header *ghdr)
102 __swab32s(&ghdr->gh_flags);
103 __swab32s(&ghdr->gh_proc);
104 __swab32s(&ghdr->gh_seq);
105 __swab32s(&ghdr->gh_svc);
106 __swab32s(&ghdr->gh_pad1);
107 __swab32s(&ghdr->gh_handle.len);
110 struct gss_header *gss_swab_header(struct lustre_msg *msg, int segment,
113 struct gss_header *ghdr;
115 ghdr = lustre_msg_buf(msg, segment, sizeof(*ghdr));
120 gss_header_swabber(ghdr);
122 if (sizeof(*ghdr) + ghdr->gh_handle.len > msg->lm_buflens[segment]) {
123 CERROR("gss header has length %d, now %u received\n",
124 (int) sizeof(*ghdr) + ghdr->gh_handle.len,
125 msg->lm_buflens[segment]);
134 void gss_netobj_swabber(netobj_t *obj)
136 __swab32s(&obj->len);
139 netobj_t *gss_swab_netobj(struct lustre_msg *msg, int segment)
143 obj = lustre_swab_buf(msg, segment, sizeof(*obj), gss_netobj_swabber);
144 if (obj && sizeof(*obj) + obj->len > msg->lm_buflens[segment]) {
145 CERROR("netobj require length %u but only %u received\n",
146 (unsigned int) sizeof(*obj) + obj->len,
147 msg->lm_buflens[segment]);
156 * payload should be obtained from mechanism. but currently since we
157 * only support kerberos, we could simply use fixed value.
160 * - krb5 checksum: 20
162 * for privacy mode, payload also include the cipher text which has the same
163 * size as plain text, plus possible confounder, padding both at maximum cipher
166 #define GSS_KRB5_INTEG_MAX_PAYLOAD (40)
169 int gss_mech_payload(struct gss_ctx *mechctx, int msgsize, int privacy)
172 return GSS_KRB5_INTEG_MAX_PAYLOAD + 16 + 16 + 16 + msgsize;
174 return GSS_KRB5_INTEG_MAX_PAYLOAD;
178 * return signature size, otherwise < 0 to indicate error
180 static int gss_sign_msg(struct lustre_msg *msg,
181 struct gss_ctx *mechctx,
182 enum lustre_sec_part sp,
183 __u32 flags, __u32 proc, __u32 seq, __u32 svc,
186 struct gss_header *ghdr;
187 rawobj_t text[4], mic;
188 int textcnt, max_textcnt, mic_idx;
191 LASSERT(msg->lm_bufcount >= 2);
194 LASSERT(msg->lm_buflens[0] >=
195 sizeof(*ghdr) + (handle ? handle->len : 0));
196 ghdr = lustre_msg_buf(msg, 0, 0);
198 ghdr->gh_version = PTLRPC_GSS_VERSION;
199 ghdr->gh_sp = (__u8) sp;
200 ghdr->gh_flags = flags;
201 ghdr->gh_proc = proc;
205 /* fill in a fake one */
206 ghdr->gh_handle.len = 0;
208 ghdr->gh_handle.len = handle->len;
209 memcpy(ghdr->gh_handle.data, handle->data, handle->len);
212 /* no actual signature for null mode */
213 if (svc == SPTLRPC_SVC_NULL)
214 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
217 mic_idx = msg_last_segidx(msg);
218 max_textcnt = (svc == SPTLRPC_SVC_AUTH) ? 1 : mic_idx;
220 for (textcnt = 0; textcnt < max_textcnt; textcnt++) {
221 text[textcnt].len = msg->lm_buflens[textcnt];
222 text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
225 mic.len = msg->lm_buflens[mic_idx];
226 mic.data = lustre_msg_buf(msg, mic_idx, 0);
228 major = lgss_get_mic(mechctx, textcnt, text, 0, NULL, &mic);
229 if (major != GSS_S_COMPLETE) {
230 CERROR("fail to generate MIC: %08x\n", major);
233 LASSERT(mic.len <= msg->lm_buflens[mic_idx]);
235 return lustre_shrink_msg(msg, mic_idx, mic.len, 0);
242 __u32 gss_verify_msg(struct lustre_msg *msg,
243 struct gss_ctx *mechctx,
246 rawobj_t text[4], mic;
247 int textcnt, max_textcnt;
251 LASSERT(msg->lm_bufcount >= 2);
253 if (svc == SPTLRPC_SVC_NULL)
254 return GSS_S_COMPLETE;
256 mic_idx = msg_last_segidx(msg);
257 max_textcnt = (svc == SPTLRPC_SVC_AUTH) ? 1 : mic_idx;
259 for (textcnt = 0; textcnt < max_textcnt; textcnt++) {
260 text[textcnt].len = msg->lm_buflens[textcnt];
261 text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
264 mic.len = msg->lm_buflens[mic_idx];
265 mic.data = lustre_msg_buf(msg, mic_idx, 0);
267 major = lgss_verify_mic(mechctx, textcnt, text, 0, NULL, &mic);
268 if (major != GSS_S_COMPLETE)
269 CERROR("mic verify error: %08x\n", major);
275 * return gss error code
278 __u32 gss_unseal_msg(struct gss_ctx *mechctx,
279 struct lustre_msg *msgbuf,
280 int *msg_len, int msgbuf_len)
282 rawobj_t clear_obj, hdrobj, token;
288 if (msgbuf->lm_bufcount != 2) {
289 CERROR("invalid bufcount %d\n", msgbuf->lm_bufcount);
290 RETURN(GSS_S_FAILURE);
293 /* allocate a temporary clear text buffer, same sized as token,
294 * we assume the final clear text size <= token size */
295 clear_buflen = lustre_msg_buflen(msgbuf, 1);
296 OBD_ALLOC_LARGE(clear_buf, clear_buflen);
298 RETURN(GSS_S_FAILURE);
301 hdrobj.len = lustre_msg_buflen(msgbuf, 0);
302 hdrobj.data = lustre_msg_buf(msgbuf, 0, 0);
303 token.len = lustre_msg_buflen(msgbuf, 1);
304 token.data = lustre_msg_buf(msgbuf, 1, 0);
305 clear_obj.len = clear_buflen;
306 clear_obj.data = clear_buf;
308 major = lgss_unwrap(mechctx, &hdrobj, &token, &clear_obj);
309 if (major != GSS_S_COMPLETE) {
310 CERROR("unwrap message error: %08x\n", major);
311 GOTO(out_free, major = GSS_S_FAILURE);
313 LASSERT(clear_obj.len <= clear_buflen);
314 LASSERT(clear_obj.len <= msgbuf_len);
316 /* now the decrypted message */
317 memcpy(msgbuf, clear_obj.data, clear_obj.len);
318 *msg_len = clear_obj.len;
320 major = GSS_S_COMPLETE;
322 OBD_FREE_LARGE(clear_buf, clear_buflen);
326 /********************************************
327 * gss client context manipulation helpers *
328 ********************************************/
330 int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
332 LASSERT(cfs_atomic_read(&ctx->cc_refcount));
334 if (!test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
335 if (!ctx->cc_early_expire)
336 clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
338 CWARN("ctx %p(%u->%s) get expired: %lu(%+lds)\n",
339 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
341 ctx->cc_expire == 0 ? 0 :
342 cfs_time_sub(ctx->cc_expire, cfs_time_current_sec()));
344 sptlrpc_cli_ctx_wakeup(ctx);
352 * return 1 if the context is dead.
354 int cli_ctx_check_death(struct ptlrpc_cli_ctx *ctx)
356 if (unlikely(cli_ctx_is_dead(ctx)))
359 /* expire is 0 means never expire. a newly created gss context
360 * which during upcall may has 0 expiration */
361 if (ctx->cc_expire == 0)
364 /* check real expiration */
365 if (cfs_time_after(ctx->cc_expire, cfs_time_current_sec()))
372 void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx)
374 struct ptlrpc_cli_ctx *ctx = &gctx->gc_base;
375 unsigned long ctx_expiry;
377 if (lgss_inquire_context(gctx->gc_mechctx, &ctx_expiry)) {
378 CERROR("ctx %p(%u): unable to inquire, expire it now\n",
379 gctx, ctx->cc_vcred.vc_uid);
380 ctx_expiry = 1; /* make it expired now */
383 ctx->cc_expire = gss_round_ctx_expiry(ctx_expiry,
384 ctx->cc_sec->ps_flvr.sf_flags);
386 /* At this point this ctx might have been marked as dead by
387 * someone else, in which case nobody will make further use
388 * of it. we don't care, and mark it UPTODATE will help
389 * destroying server side context when it be destroied. */
390 set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
392 if (sec_is_reverse(ctx->cc_sec)) {
393 CWARN("server installed reverse ctx %p idx "LPX64", "
394 "expiry %lu(%+lds)\n", ctx,
395 gss_handle_to_u64(&gctx->gc_handle),
396 ctx->cc_expire, ctx->cc_expire - cfs_time_current_sec());
398 CWARN("client refreshed ctx %p idx "LPX64" (%u->%s), "
399 "expiry %lu(%+lds)\n", ctx,
400 gss_handle_to_u64(&gctx->gc_handle),
401 ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
402 ctx->cc_expire, ctx->cc_expire - cfs_time_current_sec());
404 /* install reverse svc ctx for root context */
405 if (ctx->cc_vcred.vc_uid == 0)
406 gss_sec_install_rctx(ctx->cc_sec->ps_import,
410 sptlrpc_cli_ctx_wakeup(ctx);
413 static void gss_cli_ctx_finalize(struct gss_cli_ctx *gctx)
415 LASSERT(gctx->gc_base.cc_sec);
417 if (gctx->gc_mechctx) {
418 lgss_delete_sec_context(&gctx->gc_mechctx);
419 gctx->gc_mechctx = NULL;
422 if (!rawobj_empty(&gctx->gc_svc_handle)) {
423 /* forward ctx: mark buddy reverse svcctx soon-expire. */
424 if (!sec_is_reverse(gctx->gc_base.cc_sec) &&
425 !rawobj_empty(&gctx->gc_svc_handle))
426 gss_svc_upcall_expire_rvs_ctx(&gctx->gc_svc_handle);
428 rawobj_free(&gctx->gc_svc_handle);
431 rawobj_free(&gctx->gc_handle);
435 * Based on sequence number algorithm as specified in RFC 2203.
437 * modified for our own problem: arriving request has valid sequence number,
438 * but unwrapping request might cost a long time, after that its sequence
439 * are not valid anymore (fall behind the window). It rarely happen, mostly
440 * under extreme load.
442 * note we should not check sequence before verify the integrity of incoming
443 * request, because just one attacking request with high sequence number might
444 * cause all following request be dropped.
446 * so here we use a multi-phase approach: prepare 2 sequence windows,
447 * "main window" for normal sequence and "back window" for fall behind sequence.
448 * and 3-phase checking mechanism:
449 * 0 - before integrity verification, perform a initial sequence checking in
450 * main window, which only try and don't actually set any bits. if the
451 * sequence is high above the window or fit in the window and the bit
452 * is 0, then accept and proceed to integrity verification. otherwise
453 * reject this sequence.
454 * 1 - after integrity verification, check in main window again. if this
455 * sequence is high above the window or fit in the window and the bit
456 * is 0, then set the bit and accept; if it fit in the window but bit
457 * already set, then reject; if it fall behind the window, then proceed
459 * 2 - check in back window. if it is high above the window or fit in the
460 * window and the bit is 0, then set the bit and accept. otherwise reject.
463 * 1: looks like a replay
467 * note phase 0 is necessary, because otherwise replay attacking request of
468 * sequence which between the 2 windows can't be detected.
470 * this mechanism can't totally solve the problem, but could help much less
471 * number of valid requests be dropped.
474 int gss_do_check_seq(unsigned long *window, __u32 win_size, __u32 *max_seq,
475 __u32 seq_num, int phase)
477 LASSERT(phase >= 0 && phase <= 2);
479 if (seq_num > *max_seq) {
481 * 1. high above the window
486 if (seq_num >= *max_seq + win_size) {
487 memset(window, 0, win_size / 8);
490 while(*max_seq < seq_num) {
492 __clear_bit((*max_seq) % win_size, window);
495 __set_bit(seq_num % win_size, window);
496 } else if (seq_num + win_size <= *max_seq) {
498 * 2. low behind the window
500 if (phase == 0 || phase == 2)
503 CWARN("seq %u is %u behind (size %d), check backup window\n",
504 seq_num, *max_seq - win_size - seq_num, win_size);
508 * 3. fit into the window
512 if (test_bit(seq_num % win_size, window))
517 if (__test_and_set_bit(seq_num % win_size, window))
526 CERROR("seq %u (%s %s window) is a replay: max %u, winsize %d\n",
528 seq_num + win_size > *max_seq ? "in" : "behind",
529 phase == 2 ? "backup " : "main",
535 * Based on sequence number algorithm as specified in RFC 2203.
537 * if @set == 0: initial check, don't set any bit in window
538 * if @sec == 1: final check, set bit in window
540 int gss_check_seq_num(struct gss_svc_seq_data *ssd, __u32 seq_num, int set)
544 spin_lock(&ssd->ssd_lock);
550 rc = gss_do_check_seq(ssd->ssd_win_main, GSS_SEQ_WIN_MAIN,
551 &ssd->ssd_max_main, seq_num, 0);
553 gss_stat_oos_record_svc(0, 1);
556 * phase 1 checking main window
558 rc = gss_do_check_seq(ssd->ssd_win_main, GSS_SEQ_WIN_MAIN,
559 &ssd->ssd_max_main, seq_num, 1);
562 gss_stat_oos_record_svc(1, 1);
568 * phase 2 checking back window
570 rc = gss_do_check_seq(ssd->ssd_win_back, GSS_SEQ_WIN_BACK,
571 &ssd->ssd_max_back, seq_num, 2);
573 gss_stat_oos_record_svc(2, 1);
575 gss_stat_oos_record_svc(2, 0);
578 spin_unlock(&ssd->ssd_lock);
582 /***************************************
584 ***************************************/
586 static inline int gss_cli_payload(struct ptlrpc_cli_ctx *ctx,
587 int msgsize, int privacy)
589 return gss_mech_payload(NULL, msgsize, privacy);
592 static int gss_cli_bulk_payload(struct ptlrpc_cli_ctx *ctx,
593 struct sptlrpc_flavor *flvr,
596 int payload = sizeof(struct ptlrpc_bulk_sec_desc);
598 LASSERT(SPTLRPC_FLVR_BULK_TYPE(flvr->sf_rpc) == SPTLRPC_BULK_DEFAULT);
600 if ((!reply && !read) || (reply && read)) {
601 switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
602 case SPTLRPC_BULK_SVC_NULL:
604 case SPTLRPC_BULK_SVC_INTG:
605 payload += gss_cli_payload(ctx, 0, 0);
607 case SPTLRPC_BULK_SVC_PRIV:
608 payload += gss_cli_payload(ctx, 0, 1);
610 case SPTLRPC_BULK_SVC_AUTH:
619 int gss_cli_ctx_match(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred)
621 return (ctx->cc_vcred.vc_uid == vcred->vc_uid);
624 void gss_cli_ctx_flags2str(unsigned long flags, char *buf, int bufsize)
628 if (flags & PTLRPC_CTX_NEW)
629 strncat(buf, "new,", bufsize);
630 if (flags & PTLRPC_CTX_UPTODATE)
631 strncat(buf, "uptodate,", bufsize);
632 if (flags & PTLRPC_CTX_DEAD)
633 strncat(buf, "dead,", bufsize);
634 if (flags & PTLRPC_CTX_ERROR)
635 strncat(buf, "error,", bufsize);
636 if (flags & PTLRPC_CTX_CACHED)
637 strncat(buf, "cached,", bufsize);
638 if (flags & PTLRPC_CTX_ETERNAL)
639 strncat(buf, "eternal,", bufsize);
641 strncat(buf, "-,", bufsize);
643 buf[strlen(buf) - 1] = '\0';
646 int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx,
647 struct ptlrpc_request *req)
649 struct gss_cli_ctx *gctx = ctx2gctx(ctx);
650 __u32 flags = 0, seq, svc;
654 LASSERT(req->rq_reqbuf);
655 LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
656 LASSERT(req->rq_cli_ctx == ctx);
658 /* nothing to do for context negotiation RPCs */
659 if (req->rq_ctx_init)
662 svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
663 if (req->rq_pack_bulk)
664 flags |= LUSTRE_GSS_PACK_BULK;
665 if (req->rq_pack_udesc)
666 flags |= LUSTRE_GSS_PACK_USER;
669 seq = cfs_atomic_inc_return(&gctx->gc_seq);
671 rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
672 ctx->cc_sec->ps_part,
673 flags, gctx->gc_proc, seq, svc,
678 /* gss_sign_msg() msg might take long time to finish, in which period
679 * more rpcs could be wrapped up and sent out. if we found too many
680 * of them we should repack this rpc, because sent it too late might
681 * lead to the sequence number fall behind the window on server and
682 * be dropped. also applies to gss_cli_ctx_seal().
684 * Note: null mode dosen't check sequence number. */
685 if (svc != SPTLRPC_SVC_NULL &&
686 cfs_atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
687 int behind = cfs_atomic_read(&gctx->gc_seq) - seq;
689 gss_stat_oos_record_cli(behind);
690 CWARN("req %p: %u behind, retry signing\n", req, behind);
694 req->rq_reqdata_len = rc;
699 int gss_cli_ctx_handle_err_notify(struct ptlrpc_cli_ctx *ctx,
700 struct ptlrpc_request *req,
701 struct gss_header *ghdr)
703 struct gss_err_header *errhdr;
706 LASSERT(ghdr->gh_proc == PTLRPC_GSS_PROC_ERR);
708 errhdr = (struct gss_err_header *) ghdr;
710 CWARN("req x"LPU64"/t"LPU64", ctx %p idx "LPX64"(%u->%s): "
711 "%sserver respond (%08x/%08x)\n",
712 req->rq_xid, req->rq_transno, ctx,
713 gss_handle_to_u64(&ctx2gctx(ctx)->gc_handle),
714 ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
715 sec_is_reverse(ctx->cc_sec) ? "reverse" : "",
716 errhdr->gh_major, errhdr->gh_minor);
718 /* context fini rpc, let it failed */
719 if (req->rq_ctx_fini) {
720 CWARN("context fini rpc failed\n");
724 /* reverse sec, just return error, don't expire this ctx because it's
725 * crucial to callback rpcs. note if the callback rpc failed because
726 * of bit flip during network transfer, the client will be evicted
727 * directly. so more gracefully we probably want let it retry for
728 * number of times. */
729 if (sec_is_reverse(ctx->cc_sec))
732 if (errhdr->gh_major != GSS_S_NO_CONTEXT &&
733 errhdr->gh_major != GSS_S_BAD_SIG)
736 /* server return NO_CONTEXT might be caused by context expire
737 * or server reboot/failover. we try to refresh a new ctx which
738 * be transparent to upper layer.
740 * In some cases, our gss handle is possible to be incidentally
741 * identical to another handle since the handle itself is not
742 * fully random. In krb5 case, the GSS_S_BAD_SIG will be
743 * returned, maybe other gss error for other mechanism.
745 * if we add new mechanism, make sure the correct error are
746 * returned in this case. */
747 CWARN("%s: server might lost the context, retrying\n",
748 errhdr->gh_major == GSS_S_NO_CONTEXT ? "NO_CONTEXT" : "BAD_SIG");
750 sptlrpc_cli_ctx_expire(ctx);
752 /* we need replace the ctx right here, otherwise during
753 * resent we'll hit the logic in sptlrpc_req_refresh_ctx()
754 * which keep the ctx with RESEND flag, thus we'll never
755 * get rid of this ctx. */
756 rc = sptlrpc_req_replace_dead_ctx(req);
763 int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx,
764 struct ptlrpc_request *req)
766 struct gss_cli_ctx *gctx;
767 struct gss_header *ghdr, *reqhdr;
768 struct lustre_msg *msg = req->rq_repdata;
770 int pack_bulk, swabbed, rc = 0;
773 LASSERT(req->rq_cli_ctx == ctx);
776 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
778 /* special case for context negotiation, rq_repmsg/rq_replen actually
779 * are not used currently. but early reply always be treated normally */
780 if (req->rq_ctx_init && !req->rq_early) {
781 req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
782 req->rq_replen = msg->lm_buflens[1];
786 if (msg->lm_bufcount < 2 || msg->lm_bufcount > 4) {
787 CERROR("unexpected bufcount %u\n", msg->lm_bufcount);
791 swabbed = ptlrpc_rep_need_swab(req);
793 ghdr = gss_swab_header(msg, 0, swabbed);
795 CERROR("can't decode gss header\n");
800 reqhdr = lustre_msg_buf(msg, 0, sizeof(*reqhdr));
803 if (ghdr->gh_version != reqhdr->gh_version) {
804 CERROR("gss version %u mismatch, expect %u\n",
805 ghdr->gh_version, reqhdr->gh_version);
809 switch (ghdr->gh_proc) {
810 case PTLRPC_GSS_PROC_DATA:
811 pack_bulk = ghdr->gh_flags & LUSTRE_GSS_PACK_BULK;
813 if (!req->rq_early && !equi(req->rq_pack_bulk == 1, pack_bulk)){
814 CERROR("%s bulk flag in reply\n",
815 req->rq_pack_bulk ? "missing" : "unexpected");
819 if (ghdr->gh_seq != reqhdr->gh_seq) {
820 CERROR("seqnum %u mismatch, expect %u\n",
821 ghdr->gh_seq, reqhdr->gh_seq);
825 if (ghdr->gh_svc != reqhdr->gh_svc) {
826 CERROR("svc %u mismatch, expect %u\n",
827 ghdr->gh_svc, reqhdr->gh_svc);
832 gss_header_swabber(ghdr);
834 major = gss_verify_msg(msg, gctx->gc_mechctx, reqhdr->gh_svc);
835 if (major != GSS_S_COMPLETE) {
836 CERROR("failed to verify reply: %x\n", major);
840 if (req->rq_early && reqhdr->gh_svc == SPTLRPC_SVC_NULL) {
843 cksum = crc32_le(!(__u32) 0,
844 lustre_msg_buf(msg, 1, 0),
845 lustre_msg_buflen(msg, 1));
846 if (cksum != msg->lm_cksum) {
847 CWARN("early reply checksum mismatch: "
848 "%08x != %08x\n", cksum, msg->lm_cksum);
854 /* bulk checksum is right after the lustre msg */
855 if (msg->lm_bufcount < 3) {
856 CERROR("Invalid reply bufcount %u\n",
861 rc = bulk_sec_desc_unpack(msg, 2, swabbed);
863 CERROR("unpack bulk desc: %d\n", rc);
868 req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
869 req->rq_replen = msg->lm_buflens[1];
871 case PTLRPC_GSS_PROC_ERR:
873 CERROR("server return error with early reply\n");
876 rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
880 CERROR("unknown gss proc %d\n", ghdr->gh_proc);
887 int gss_cli_ctx_seal(struct ptlrpc_cli_ctx *ctx,
888 struct ptlrpc_request *req)
890 struct gss_cli_ctx *gctx;
891 rawobj_t hdrobj, msgobj, token;
892 struct gss_header *ghdr;
893 __u32 buflens[2], major;
897 LASSERT(req->rq_clrbuf);
898 LASSERT(req->rq_cli_ctx == ctx);
899 LASSERT(req->rq_reqlen);
901 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
903 /* final clear data length */
904 req->rq_clrdata_len = lustre_msg_size_v2(req->rq_clrbuf->lm_bufcount,
905 req->rq_clrbuf->lm_buflens);
907 /* calculate wire data length */
908 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
909 buflens[1] = gss_cli_payload(&gctx->gc_base, req->rq_clrdata_len, 1);
910 wiresize = lustre_msg_size_v2(2, buflens);
912 /* allocate wire buffer */
915 LASSERT(req->rq_reqbuf);
916 LASSERT(req->rq_reqbuf != req->rq_clrbuf);
917 LASSERT(req->rq_reqbuf_len >= wiresize);
919 OBD_ALLOC_LARGE(req->rq_reqbuf, wiresize);
922 req->rq_reqbuf_len = wiresize;
925 lustre_init_msg_v2(req->rq_reqbuf, 2, buflens, NULL);
926 req->rq_reqbuf->lm_secflvr = req->rq_flvr.sf_rpc;
929 ghdr = lustre_msg_buf(req->rq_reqbuf, 0, 0);
930 ghdr->gh_version = PTLRPC_GSS_VERSION;
931 ghdr->gh_sp = (__u8) ctx->cc_sec->ps_part;
933 ghdr->gh_proc = gctx->gc_proc;
934 ghdr->gh_svc = SPTLRPC_SVC_PRIV;
935 ghdr->gh_handle.len = gctx->gc_handle.len;
936 memcpy(ghdr->gh_handle.data, gctx->gc_handle.data, gctx->gc_handle.len);
937 if (req->rq_pack_bulk)
938 ghdr->gh_flags |= LUSTRE_GSS_PACK_BULK;
939 if (req->rq_pack_udesc)
940 ghdr->gh_flags |= LUSTRE_GSS_PACK_USER;
943 ghdr->gh_seq = cfs_atomic_inc_return(&gctx->gc_seq);
946 hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
947 hdrobj.data = (__u8 *) ghdr;
948 msgobj.len = req->rq_clrdata_len;
949 msgobj.data = (__u8 *) req->rq_clrbuf;
950 token.len = lustre_msg_buflen(req->rq_reqbuf, 1);
951 token.data = lustre_msg_buf(req->rq_reqbuf, 1, 0);
953 major = lgss_wrap(gctx->gc_mechctx, &hdrobj, &msgobj,
954 req->rq_clrbuf_len, &token);
955 if (major != GSS_S_COMPLETE) {
956 CERROR("priv: wrap message error: %08x\n", major);
957 GOTO(err_free, rc = -EPERM);
959 LASSERT(token.len <= buflens[1]);
961 /* see explain in gss_cli_ctx_sign() */
962 if (unlikely(cfs_atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
963 GSS_SEQ_REPACK_THRESHOLD)) {
964 int behind = cfs_atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
966 gss_stat_oos_record_cli(behind);
967 CWARN("req %p: %u behind, retry sealing\n", req, behind);
969 ghdr->gh_seq = cfs_atomic_inc_return(&gctx->gc_seq);
973 /* now set the final wire data length */
974 req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, 1, token.len,0);
979 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
980 req->rq_reqbuf = NULL;
981 req->rq_reqbuf_len = 0;
986 int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx,
987 struct ptlrpc_request *req)
989 struct gss_cli_ctx *gctx;
990 struct gss_header *ghdr;
991 struct lustre_msg *msg = req->rq_repdata;
992 int msglen, pack_bulk, swabbed, rc;
996 LASSERT(req->rq_cli_ctx == ctx);
997 LASSERT(req->rq_ctx_init == 0);
1000 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
1001 swabbed = ptlrpc_rep_need_swab(req);
1003 ghdr = gss_swab_header(msg, 0, swabbed);
1005 CERROR("can't decode gss header\n");
1010 if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
1011 CERROR("gss version %u mismatch, expect %u\n",
1012 ghdr->gh_version, PTLRPC_GSS_VERSION);
1016 switch (ghdr->gh_proc) {
1017 case PTLRPC_GSS_PROC_DATA:
1018 pack_bulk = ghdr->gh_flags & LUSTRE_GSS_PACK_BULK;
1020 if (!req->rq_early && !equi(req->rq_pack_bulk == 1, pack_bulk)){
1021 CERROR("%s bulk flag in reply\n",
1022 req->rq_pack_bulk ? "missing" : "unexpected");
1027 gss_header_swabber(ghdr);
1029 /* use rq_repdata_len as buffer size, which assume unseal
1030 * doesn't need extra memory space. for precise control, we'd
1031 * better calculate out actual buffer size as
1032 * (repbuf_len - offset - repdata_len) */
1033 major = gss_unseal_msg(gctx->gc_mechctx, msg,
1034 &msglen, req->rq_repdata_len);
1035 if (major != GSS_S_COMPLETE) {
1036 CERROR("failed to unwrap reply: %x\n", major);
1041 swabbed = __lustre_unpack_msg(msg, msglen);
1043 CERROR("Failed to unpack after decryption\n");
1047 if (msg->lm_bufcount < 1) {
1048 CERROR("Invalid reply buffer: empty\n");
1053 if (msg->lm_bufcount < 2) {
1054 CERROR("bufcount %u: missing bulk sec desc\n",
1059 /* bulk checksum is the last segment */
1060 if (bulk_sec_desc_unpack(msg, msg->lm_bufcount - 1,
1065 req->rq_repmsg = lustre_msg_buf(msg, 0, 0);
1066 req->rq_replen = msg->lm_buflens[0];
1070 case PTLRPC_GSS_PROC_ERR:
1071 if (req->rq_early) {
1072 CERROR("server return error with early reply\n");
1075 rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
1079 CERROR("unexpected proc %d\n", ghdr->gh_proc);
1086 /*********************************************
1087 * reverse context installation *
1088 *********************************************/
1091 int gss_install_rvs_svc_ctx(struct obd_import *imp,
1092 struct gss_sec *gsec,
1093 struct gss_cli_ctx *gctx)
1095 return gss_svc_upcall_install_rvs_ctx(imp, gsec, gctx);
1098 /*********************************************
1099 * GSS security APIs *
1100 *********************************************/
1101 int gss_sec_create_common(struct gss_sec *gsec,
1102 struct ptlrpc_sec_policy *policy,
1103 struct obd_import *imp,
1104 struct ptlrpc_svc_ctx *svcctx,
1105 struct sptlrpc_flavor *sf)
1107 struct ptlrpc_sec *sec;
1110 LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_GSS);
1112 gsec->gs_mech = lgss_subflavor_to_mech(
1113 SPTLRPC_FLVR_BASE_SUB(sf->sf_rpc));
1114 if (!gsec->gs_mech) {
1115 CERROR("gss backend 0x%x not found\n",
1116 SPTLRPC_FLVR_BASE_SUB(sf->sf_rpc));
1120 spin_lock_init(&gsec->gs_lock);
1121 gsec->gs_rvs_hdl = 0ULL;
1123 /* initialize upper ptlrpc_sec */
1124 sec = &gsec->gs_base;
1125 sec->ps_policy = policy;
1126 cfs_atomic_set(&sec->ps_refcount, 0);
1127 cfs_atomic_set(&sec->ps_nctx, 0);
1128 sec->ps_id = sptlrpc_get_next_secid();
1130 sec->ps_import = class_import_get(imp);
1131 spin_lock_init(&sec->ps_lock);
1132 CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
1135 sec->ps_gc_interval = GSS_GC_INTERVAL;
1137 LASSERT(sec_is_reverse(sec));
1139 /* never do gc on reverse sec */
1140 sec->ps_gc_interval = 0;
1143 if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
1144 sptlrpc_enc_pool_add_user();
1146 CDEBUG(D_SEC, "create %s%s@%p\n", (svcctx ? "reverse " : ""),
1147 policy->sp_name, gsec);
1151 void gss_sec_destroy_common(struct gss_sec *gsec)
1153 struct ptlrpc_sec *sec = &gsec->gs_base;
1156 LASSERT(sec->ps_import);
1157 LASSERT(cfs_atomic_read(&sec->ps_refcount) == 0);
1158 LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
1160 if (gsec->gs_mech) {
1161 lgss_mech_put(gsec->gs_mech);
1162 gsec->gs_mech = NULL;
1165 class_import_put(sec->ps_import);
1167 if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
1168 sptlrpc_enc_pool_del_user();
1173 void gss_sec_kill(struct ptlrpc_sec *sec)
1178 int gss_cli_ctx_init_common(struct ptlrpc_sec *sec,
1179 struct ptlrpc_cli_ctx *ctx,
1180 struct ptlrpc_ctx_ops *ctxops,
1181 struct vfs_cred *vcred)
1183 struct gss_cli_ctx *gctx = ctx2gctx(ctx);
1186 cfs_atomic_set(&gctx->gc_seq, 0);
1188 CFS_INIT_HLIST_NODE(&ctx->cc_cache);
1189 cfs_atomic_set(&ctx->cc_refcount, 0);
1191 ctx->cc_ops = ctxops;
1193 ctx->cc_flags = PTLRPC_CTX_NEW;
1194 ctx->cc_vcred = *vcred;
1195 spin_lock_init(&ctx->cc_lock);
1196 CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
1197 CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
1199 /* take a ref on belonging sec, balanced in ctx destroying */
1200 cfs_atomic_inc(&sec->ps_refcount);
1201 /* statistic only */
1202 cfs_atomic_inc(&sec->ps_nctx);
1204 CDEBUG(D_SEC, "%s@%p: create ctx %p(%u->%s)\n",
1205 sec->ps_policy->sp_name, ctx->cc_sec,
1206 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
1212 * 1: the context has been taken care of by someone else
1213 * 0: proceed to really destroy the context locally
1215 int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec,
1216 struct ptlrpc_cli_ctx *ctx)
1218 struct gss_cli_ctx *gctx = ctx2gctx(ctx);
1220 LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0);
1221 LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
1222 LASSERT(ctx->cc_sec == sec);
1225 * remove UPTODATE flag of reverse ctx thus we won't send fini rpc,
1226 * this is to avoid potential problems of client side reverse svc ctx
1227 * be mis-destroyed in various recovery senarios. anyway client can
1228 * manage its reverse ctx well by associating it with its buddy ctx.
1230 if (sec_is_reverse(sec))
1231 ctx->cc_flags &= ~PTLRPC_CTX_UPTODATE;
1233 if (gctx->gc_mechctx) {
1234 /* the final context fini rpc will use this ctx too, and it's
1235 * asynchronous which finished by request_out_callback(). so
1236 * we add refcount, whoever drop finally drop the refcount to
1237 * 0 should responsible for the rest of destroy. */
1238 cfs_atomic_inc(&ctx->cc_refcount);
1240 gss_do_ctx_fini_rpc(gctx);
1241 gss_cli_ctx_finalize(gctx);
1243 if (!cfs_atomic_dec_and_test(&ctx->cc_refcount))
1247 if (sec_is_reverse(sec))
1248 CWARN("reverse sec %p: destroy ctx %p\n",
1251 CWARN("%s@%p: destroy ctx %p(%u->%s)\n",
1252 sec->ps_policy->sp_name, ctx->cc_sec,
1253 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
1259 int gss_alloc_reqbuf_intg(struct ptlrpc_sec *sec,
1260 struct ptlrpc_request *req,
1261 int svc, int msgsize)
1263 int bufsize, txtsize;
1269 * on-wire data layout:
1272 * - user descriptor (optional)
1273 * - bulk sec descriptor (optional)
1274 * - signature (optional)
1275 * - svc == NULL: NULL
1276 * - svc == AUTH: signature of gss header
1277 * - svc == INTG: signature of all above
1279 * if this is context negotiation, reserver fixed space
1280 * at the last (signature) segment regardless of svc mode.
1283 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1284 txtsize = buflens[0];
1286 buflens[1] = msgsize;
1287 if (svc == SPTLRPC_SVC_INTG)
1288 txtsize += buflens[1];
1290 if (req->rq_pack_udesc) {
1291 buflens[bufcnt] = sptlrpc_current_user_desc_size();
1292 if (svc == SPTLRPC_SVC_INTG)
1293 txtsize += buflens[bufcnt];
1297 if (req->rq_pack_bulk) {
1298 buflens[bufcnt] = gss_cli_bulk_payload(req->rq_cli_ctx,
1300 0, req->rq_bulk_read);
1301 if (svc == SPTLRPC_SVC_INTG)
1302 txtsize += buflens[bufcnt];
1306 if (req->rq_ctx_init)
1307 buflens[bufcnt++] = GSS_CTX_INIT_MAX_LEN;
1308 else if (svc != SPTLRPC_SVC_NULL)
1309 buflens[bufcnt++] = gss_cli_payload(req->rq_cli_ctx, txtsize,0);
1311 bufsize = lustre_msg_size_v2(bufcnt, buflens);
1313 if (!req->rq_reqbuf) {
1314 bufsize = size_roundup_power2(bufsize);
1316 OBD_ALLOC_LARGE(req->rq_reqbuf, bufsize);
1317 if (!req->rq_reqbuf)
1320 req->rq_reqbuf_len = bufsize;
1322 LASSERT(req->rq_pool);
1323 LASSERT(req->rq_reqbuf_len >= bufsize);
1324 memset(req->rq_reqbuf, 0, bufsize);
1327 lustre_init_msg_v2(req->rq_reqbuf, bufcnt, buflens, NULL);
1328 req->rq_reqbuf->lm_secflvr = req->rq_flvr.sf_rpc;
1330 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, msgsize);
1331 LASSERT(req->rq_reqmsg);
1333 /* pack user desc here, later we might leave current user's process */
1334 if (req->rq_pack_udesc)
1335 sptlrpc_pack_user_desc(req->rq_reqbuf, 2);
1341 int gss_alloc_reqbuf_priv(struct ptlrpc_sec *sec,
1342 struct ptlrpc_request *req,
1345 __u32 ibuflens[3], wbuflens[2];
1347 int clearsize, wiresize;
1350 LASSERT(req->rq_clrbuf == NULL);
1351 LASSERT(req->rq_clrbuf_len == 0);
1353 /* Inner (clear) buffers
1355 * - user descriptor (optional)
1356 * - bulk checksum (optional)
1359 ibuflens[0] = msgsize;
1361 if (req->rq_pack_udesc)
1362 ibuflens[ibufcnt++] = sptlrpc_current_user_desc_size();
1363 if (req->rq_pack_bulk)
1364 ibuflens[ibufcnt++] = gss_cli_bulk_payload(req->rq_cli_ctx,
1368 clearsize = lustre_msg_size_v2(ibufcnt, ibuflens);
1369 /* to allow append padding during encryption */
1370 clearsize += GSS_MAX_CIPHER_BLOCK;
1372 /* Wrapper (wire) buffers
1376 wbuflens[0] = PTLRPC_GSS_HEADER_SIZE;
1377 wbuflens[1] = gss_cli_payload(req->rq_cli_ctx, clearsize, 1);
1378 wiresize = lustre_msg_size_v2(2, wbuflens);
1381 /* rq_reqbuf is preallocated */
1382 LASSERT(req->rq_reqbuf);
1383 LASSERT(req->rq_reqbuf_len >= wiresize);
1385 memset(req->rq_reqbuf, 0, req->rq_reqbuf_len);
1387 /* if the pre-allocated buffer is big enough, we just pack
1388 * both clear buf & request buf in it, to avoid more alloc. */
1389 if (clearsize + wiresize <= req->rq_reqbuf_len) {
1391 (void *) (((char *) req->rq_reqbuf) + wiresize);
1393 CWARN("pre-allocated buf size %d is not enough for "
1394 "both clear (%d) and cipher (%d) text, proceed "
1395 "with extra allocation\n", req->rq_reqbuf_len,
1396 clearsize, wiresize);
1400 if (!req->rq_clrbuf) {
1401 clearsize = size_roundup_power2(clearsize);
1403 OBD_ALLOC_LARGE(req->rq_clrbuf, clearsize);
1404 if (!req->rq_clrbuf)
1407 req->rq_clrbuf_len = clearsize;
1409 lustre_init_msg_v2(req->rq_clrbuf, ibufcnt, ibuflens, NULL);
1410 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, msgsize);
1412 if (req->rq_pack_udesc)
1413 sptlrpc_pack_user_desc(req->rq_clrbuf, 1);
1419 * NOTE: any change of request buffer allocation should also consider
1420 * changing enlarge_reqbuf() series functions.
1422 int gss_alloc_reqbuf(struct ptlrpc_sec *sec,
1423 struct ptlrpc_request *req,
1426 int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
1428 LASSERT(!req->rq_pack_bulk ||
1429 (req->rq_bulk_read || req->rq_bulk_write));
1432 case SPTLRPC_SVC_NULL:
1433 case SPTLRPC_SVC_AUTH:
1434 case SPTLRPC_SVC_INTG:
1435 return gss_alloc_reqbuf_intg(sec, req, svc, msgsize);
1436 case SPTLRPC_SVC_PRIV:
1437 return gss_alloc_reqbuf_priv(sec, req, msgsize);
1439 LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
1444 void gss_free_reqbuf(struct ptlrpc_sec *sec,
1445 struct ptlrpc_request *req)
1450 LASSERT(!req->rq_pool || req->rq_reqbuf);
1451 privacy = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) == SPTLRPC_SVC_PRIV;
1453 if (!req->rq_clrbuf)
1454 goto release_reqbuf;
1456 /* release clear buffer */
1458 LASSERT(req->rq_clrbuf_len);
1460 if (req->rq_pool == NULL ||
1461 req->rq_clrbuf < req->rq_reqbuf ||
1462 (char *) req->rq_clrbuf >=
1463 (char *) req->rq_reqbuf + req->rq_reqbuf_len)
1464 OBD_FREE_LARGE(req->rq_clrbuf, req->rq_clrbuf_len);
1466 req->rq_clrbuf = NULL;
1467 req->rq_clrbuf_len = 0;
1470 if (!req->rq_pool && req->rq_reqbuf) {
1471 LASSERT(req->rq_reqbuf_len);
1473 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
1474 req->rq_reqbuf = NULL;
1475 req->rq_reqbuf_len = 0;
1481 static int do_alloc_repbuf(struct ptlrpc_request *req, int bufsize)
1483 bufsize = size_roundup_power2(bufsize);
1485 OBD_ALLOC_LARGE(req->rq_repbuf, bufsize);
1486 if (!req->rq_repbuf)
1489 req->rq_repbuf_len = bufsize;
1494 int gss_alloc_repbuf_intg(struct ptlrpc_sec *sec,
1495 struct ptlrpc_request *req,
1496 int svc, int msgsize)
1504 * on-wire data layout:
1507 * - bulk sec descriptor (optional)
1508 * - signature (optional)
1509 * - svc == NULL: NULL
1510 * - svc == AUTH: signature of gss header
1511 * - svc == INTG: signature of all above
1513 * if this is context negotiation, reserver fixed space
1514 * at the last (signature) segment regardless of svc mode.
1517 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1518 txtsize = buflens[0];
1520 buflens[1] = msgsize;
1521 if (svc == SPTLRPC_SVC_INTG)
1522 txtsize += buflens[1];
1524 if (req->rq_pack_bulk) {
1525 buflens[bufcnt] = gss_cli_bulk_payload(req->rq_cli_ctx,
1527 1, req->rq_bulk_read);
1528 if (svc == SPTLRPC_SVC_INTG)
1529 txtsize += buflens[bufcnt];
1533 if (req->rq_ctx_init)
1534 buflens[bufcnt++] = GSS_CTX_INIT_MAX_LEN;
1535 else if (svc != SPTLRPC_SVC_NULL)
1536 buflens[bufcnt++] = gss_cli_payload(req->rq_cli_ctx, txtsize,0);
1538 alloc_size = lustre_msg_size_v2(bufcnt, buflens);
1540 /* add space for early reply */
1541 alloc_size += gss_at_reply_off_integ;
1543 return do_alloc_repbuf(req, alloc_size);
1547 int gss_alloc_repbuf_priv(struct ptlrpc_sec *sec,
1548 struct ptlrpc_request *req,
1558 buflens[0] = msgsize;
1560 if (req->rq_pack_bulk)
1561 buflens[bufcnt++] = gss_cli_bulk_payload(req->rq_cli_ctx,
1563 1, req->rq_bulk_read);
1564 txtsize = lustre_msg_size_v2(bufcnt, buflens);
1565 txtsize += GSS_MAX_CIPHER_BLOCK;
1567 /* wrapper buffers */
1569 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1570 buflens[1] = gss_cli_payload(req->rq_cli_ctx, txtsize, 1);
1572 alloc_size = lustre_msg_size_v2(bufcnt, buflens);
1573 /* add space for early reply */
1574 alloc_size += gss_at_reply_off_priv;
1576 return do_alloc_repbuf(req, alloc_size);
1579 int gss_alloc_repbuf(struct ptlrpc_sec *sec,
1580 struct ptlrpc_request *req,
1583 int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
1586 LASSERT(!req->rq_pack_bulk ||
1587 (req->rq_bulk_read || req->rq_bulk_write));
1590 case SPTLRPC_SVC_NULL:
1591 case SPTLRPC_SVC_AUTH:
1592 case SPTLRPC_SVC_INTG:
1593 return gss_alloc_repbuf_intg(sec, req, svc, msgsize);
1594 case SPTLRPC_SVC_PRIV:
1595 return gss_alloc_repbuf_priv(sec, req, msgsize);
1597 LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
1602 void gss_free_repbuf(struct ptlrpc_sec *sec,
1603 struct ptlrpc_request *req)
1605 OBD_FREE_LARGE(req->rq_repbuf, req->rq_repbuf_len);
1606 req->rq_repbuf = NULL;
1607 req->rq_repbuf_len = 0;
1608 req->rq_repdata = NULL;
1609 req->rq_repdata_len = 0;
1612 static int get_enlarged_msgsize(struct lustre_msg *msg,
1613 int segment, int newsize)
1615 int save, newmsg_size;
1617 LASSERT(newsize >= msg->lm_buflens[segment]);
1619 save = msg->lm_buflens[segment];
1620 msg->lm_buflens[segment] = newsize;
1621 newmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1622 msg->lm_buflens[segment] = save;
1627 static int get_enlarged_msgsize2(struct lustre_msg *msg,
1628 int segment1, int newsize1,
1629 int segment2, int newsize2)
1631 int save1, save2, newmsg_size;
1633 LASSERT(newsize1 >= msg->lm_buflens[segment1]);
1634 LASSERT(newsize2 >= msg->lm_buflens[segment2]);
1636 save1 = msg->lm_buflens[segment1];
1637 save2 = msg->lm_buflens[segment2];
1638 msg->lm_buflens[segment1] = newsize1;
1639 msg->lm_buflens[segment2] = newsize2;
1640 newmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1641 msg->lm_buflens[segment1] = save1;
1642 msg->lm_buflens[segment2] = save2;
1648 int gss_enlarge_reqbuf_intg(struct ptlrpc_sec *sec,
1649 struct ptlrpc_request *req,
1651 int segment, int newsize)
1653 struct lustre_msg *newbuf;
1654 int txtsize, sigsize = 0, i;
1655 int newmsg_size, newbuf_size;
1658 * gss header is at seg 0;
1659 * embedded msg is at seg 1;
1660 * signature (if any) is at the last seg
1662 LASSERT(req->rq_reqbuf);
1663 LASSERT(req->rq_reqbuf_len > req->rq_reqlen);
1664 LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
1665 LASSERT(lustre_msg_buf(req->rq_reqbuf, 1, 0) == req->rq_reqmsg);
1667 /* 1. compute new embedded msg size */
1668 newmsg_size = get_enlarged_msgsize(req->rq_reqmsg, segment, newsize);
1669 LASSERT(newmsg_size >= req->rq_reqbuf->lm_buflens[1]);
1671 /* 2. compute new wrapper msg size */
1672 if (svc == SPTLRPC_SVC_NULL) {
1673 /* no signature, get size directly */
1674 newbuf_size = get_enlarged_msgsize(req->rq_reqbuf,
1677 txtsize = req->rq_reqbuf->lm_buflens[0];
1679 if (svc == SPTLRPC_SVC_INTG) {
1680 for (i = 1; i < req->rq_reqbuf->lm_bufcount; i++)
1681 txtsize += req->rq_reqbuf->lm_buflens[i];
1682 txtsize += newmsg_size - req->rq_reqbuf->lm_buflens[1];
1685 sigsize = gss_cli_payload(req->rq_cli_ctx, txtsize, 0);
1686 LASSERT(sigsize >= msg_last_seglen(req->rq_reqbuf));
1688 newbuf_size = get_enlarged_msgsize2(
1691 msg_last_segidx(req->rq_reqbuf),
1695 /* request from pool should always have enough buffer */
1696 LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
1698 if (req->rq_reqbuf_len < newbuf_size) {
1699 newbuf_size = size_roundup_power2(newbuf_size);
1701 OBD_ALLOC_LARGE(newbuf, newbuf_size);
1705 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
1707 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
1708 req->rq_reqbuf = newbuf;
1709 req->rq_reqbuf_len = newbuf_size;
1710 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, 0);
1713 /* do enlargement, from wrapper to embedded, from end to begin */
1714 if (svc != SPTLRPC_SVC_NULL)
1715 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf,
1716 msg_last_segidx(req->rq_reqbuf),
1719 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, 1, newmsg_size);
1720 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
1722 req->rq_reqlen = newmsg_size;
1727 int gss_enlarge_reqbuf_priv(struct ptlrpc_sec *sec,
1728 struct ptlrpc_request *req,
1729 int segment, int newsize)
1731 struct lustre_msg *newclrbuf;
1732 int newmsg_size, newclrbuf_size, newcipbuf_size;
1736 * embedded msg is at seg 0 of clear buffer;
1737 * cipher text is at seg 2 of cipher buffer;
1739 LASSERT(req->rq_pool ||
1740 (req->rq_reqbuf == NULL && req->rq_reqbuf_len == 0));
1741 LASSERT(req->rq_reqbuf == NULL ||
1742 (req->rq_pool && req->rq_reqbuf->lm_bufcount == 3));
1743 LASSERT(req->rq_clrbuf);
1744 LASSERT(req->rq_clrbuf_len > req->rq_reqlen);
1745 LASSERT(lustre_msg_buf(req->rq_clrbuf, 0, 0) == req->rq_reqmsg);
1747 /* compute new embedded msg size */
1748 newmsg_size = get_enlarged_msgsize(req->rq_reqmsg, segment, newsize);
1750 /* compute new clear buffer size */
1751 newclrbuf_size = get_enlarged_msgsize(req->rq_clrbuf, 0, newmsg_size);
1752 newclrbuf_size += GSS_MAX_CIPHER_BLOCK;
1754 /* compute new cipher buffer size */
1755 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1756 buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
1757 buflens[2] = gss_cli_payload(req->rq_cli_ctx, newclrbuf_size, 1);
1758 newcipbuf_size = lustre_msg_size_v2(3, buflens);
1760 /* handle the case that we put both clear buf and cipher buf into
1761 * pre-allocated single buffer. */
1762 if (unlikely(req->rq_pool) &&
1763 req->rq_clrbuf >= req->rq_reqbuf &&
1764 (char *) req->rq_clrbuf <
1765 (char *) req->rq_reqbuf + req->rq_reqbuf_len) {
1766 /* it couldn't be better we still fit into the
1767 * pre-allocated buffer. */
1768 if (newclrbuf_size + newcipbuf_size <= req->rq_reqbuf_len) {
1771 /* move clear text backward. */
1772 src = req->rq_clrbuf;
1773 dst = (char *) req->rq_reqbuf + newcipbuf_size;
1775 memmove(dst, src, req->rq_clrbuf_len);
1777 req->rq_clrbuf = (struct lustre_msg *) dst;
1778 req->rq_clrbuf_len = newclrbuf_size;
1779 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
1781 /* sadly we have to split out the clear buffer */
1782 LASSERT(req->rq_reqbuf_len >= newcipbuf_size);
1783 LASSERT(req->rq_clrbuf_len < newclrbuf_size);
1787 if (req->rq_clrbuf_len < newclrbuf_size) {
1788 newclrbuf_size = size_roundup_power2(newclrbuf_size);
1790 OBD_ALLOC_LARGE(newclrbuf, newclrbuf_size);
1791 if (newclrbuf == NULL)
1794 memcpy(newclrbuf, req->rq_clrbuf, req->rq_clrbuf_len);
1796 if (req->rq_reqbuf == NULL ||
1797 req->rq_clrbuf < req->rq_reqbuf ||
1798 (char *) req->rq_clrbuf >=
1799 (char *) req->rq_reqbuf + req->rq_reqbuf_len) {
1800 OBD_FREE_LARGE(req->rq_clrbuf, req->rq_clrbuf_len);
1803 req->rq_clrbuf = newclrbuf;
1804 req->rq_clrbuf_len = newclrbuf_size;
1805 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
1808 _sptlrpc_enlarge_msg_inplace(req->rq_clrbuf, 0, newmsg_size);
1809 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
1810 req->rq_reqlen = newmsg_size;
1815 int gss_enlarge_reqbuf(struct ptlrpc_sec *sec,
1816 struct ptlrpc_request *req,
1817 int segment, int newsize)
1819 int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
1821 LASSERT(!req->rq_ctx_init && !req->rq_ctx_fini);
1824 case SPTLRPC_SVC_NULL:
1825 case SPTLRPC_SVC_AUTH:
1826 case SPTLRPC_SVC_INTG:
1827 return gss_enlarge_reqbuf_intg(sec, req, svc, segment, newsize);
1828 case SPTLRPC_SVC_PRIV:
1829 return gss_enlarge_reqbuf_priv(sec, req, segment, newsize);
1831 LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
1836 int gss_sec_install_rctx(struct obd_import *imp,
1837 struct ptlrpc_sec *sec,
1838 struct ptlrpc_cli_ctx *ctx)
1840 struct gss_sec *gsec;
1841 struct gss_cli_ctx *gctx;
1844 gsec = container_of(sec, struct gss_sec, gs_base);
1845 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
1847 rc = gss_install_rvs_svc_ctx(imp, gsec, gctx);
1851 /********************************************
1853 ********************************************/
1856 int gss_svc_reqctx_is_special(struct gss_svc_reqctx *grctx)
1859 return (grctx->src_init || grctx->src_init_continue ||
1860 grctx->src_err_notify);
1864 void gss_svc_reqctx_free(struct gss_svc_reqctx *grctx)
1867 gss_svc_upcall_put_ctx(grctx->src_ctx);
1869 sptlrpc_policy_put(grctx->src_base.sc_policy);
1870 OBD_FREE_PTR(grctx);
1874 void gss_svc_reqctx_addref(struct gss_svc_reqctx *grctx)
1876 LASSERT(cfs_atomic_read(&grctx->src_base.sc_refcount) > 0);
1877 cfs_atomic_inc(&grctx->src_base.sc_refcount);
1881 void gss_svc_reqctx_decref(struct gss_svc_reqctx *grctx)
1883 LASSERT(cfs_atomic_read(&grctx->src_base.sc_refcount) > 0);
1885 if (cfs_atomic_dec_and_test(&grctx->src_base.sc_refcount))
1886 gss_svc_reqctx_free(grctx);
1890 int gss_svc_sign(struct ptlrpc_request *req,
1891 struct ptlrpc_reply_state *rs,
1892 struct gss_svc_reqctx *grctx,
1899 LASSERT(rs->rs_msg == lustre_msg_buf(rs->rs_repbuf, 1, 0));
1901 /* embedded lustre_msg might have been shrinked */
1902 if (req->rq_replen != rs->rs_repbuf->lm_buflens[1])
1903 lustre_shrink_msg(rs->rs_repbuf, 1, req->rq_replen, 1);
1905 if (req->rq_pack_bulk)
1906 flags |= LUSTRE_GSS_PACK_BULK;
1908 rc = gss_sign_msg(rs->rs_repbuf, grctx->src_ctx->gsc_mechctx,
1909 LUSTRE_SP_ANY, flags, PTLRPC_GSS_PROC_DATA,
1910 grctx->src_wirectx.gw_seq, svc, NULL);
1914 rs->rs_repdata_len = rc;
1916 if (likely(req->rq_packed_final)) {
1917 if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
1918 req->rq_reply_off = gss_at_reply_off_integ;
1920 req->rq_reply_off = 0;
1922 if (svc == SPTLRPC_SVC_NULL)
1923 rs->rs_repbuf->lm_cksum = crc32_le(!(__u32) 0,
1924 lustre_msg_buf(rs->rs_repbuf, 1, 0),
1925 lustre_msg_buflen(rs->rs_repbuf, 1));
1926 req->rq_reply_off = 0;
1932 int gss_pack_err_notify(struct ptlrpc_request *req, __u32 major, __u32 minor)
1934 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
1935 struct ptlrpc_reply_state *rs;
1936 struct gss_err_header *ghdr;
1937 int replen = sizeof(struct ptlrpc_body);
1941 //if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_SVCGSS_ERR_NOTIFY, OBD_FAIL_ONCE))
1944 grctx->src_err_notify = 1;
1945 grctx->src_reserve_len = 0;
1947 rc = lustre_pack_reply_v2(req, 1, &replen, NULL, 0);
1949 CERROR("could not pack reply, err %d\n", rc);
1954 rs = req->rq_reply_state;
1955 LASSERT(rs->rs_repbuf->lm_buflens[1] >= sizeof(*ghdr));
1956 ghdr = lustre_msg_buf(rs->rs_repbuf, 0, 0);
1957 ghdr->gh_version = PTLRPC_GSS_VERSION;
1959 ghdr->gh_proc = PTLRPC_GSS_PROC_ERR;
1960 ghdr->gh_major = major;
1961 ghdr->gh_minor = minor;
1962 ghdr->gh_handle.len = 0; /* fake context handle */
1964 rs->rs_repdata_len = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
1965 rs->rs_repbuf->lm_buflens);
1967 CDEBUG(D_SEC, "prepare gss error notify(0x%x/0x%x) to %s\n",
1968 major, minor, libcfs_nid2str(req->rq_peer.nid));
1973 int gss_svc_handle_init(struct ptlrpc_request *req,
1974 struct gss_wire_ctx *gw)
1976 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
1977 struct lustre_msg *reqbuf = req->rq_reqbuf;
1978 struct obd_uuid *uuid;
1979 struct obd_device *target;
1980 rawobj_t uuid_obj, rvs_hdl, in_token;
1982 __u32 *secdata, seclen;
1986 CDEBUG(D_SEC, "processing gss init(%d) request from %s\n", gw->gw_proc,
1987 libcfs_nid2str(req->rq_peer.nid));
1989 req->rq_ctx_init = 1;
1991 if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
1992 CERROR("unexpected bulk flag\n");
1993 RETURN(SECSVC_DROP);
1996 if (gw->gw_proc == PTLRPC_GSS_PROC_INIT && gw->gw_handle.len != 0) {
1997 CERROR("proc %u: invalid handle length %u\n",
1998 gw->gw_proc, gw->gw_handle.len);
1999 RETURN(SECSVC_DROP);
2002 if (reqbuf->lm_bufcount < 3 || reqbuf->lm_bufcount > 4){
2003 CERROR("Invalid bufcount %d\n", reqbuf->lm_bufcount);
2004 RETURN(SECSVC_DROP);
2007 swabbed = ptlrpc_req_need_swab(req);
2009 /* ctx initiate payload is in last segment */
2010 secdata = lustre_msg_buf(reqbuf, reqbuf->lm_bufcount - 1, 0);
2011 seclen = reqbuf->lm_buflens[reqbuf->lm_bufcount - 1];
2013 if (seclen < 4 + 4) {
2014 CERROR("sec size %d too small\n", seclen);
2015 RETURN(SECSVC_DROP);
2018 /* lustre svc type */
2019 lustre_svc = le32_to_cpu(*secdata++);
2022 /* extract target uuid, note this code is somewhat fragile
2023 * because touched internal structure of obd_uuid */
2024 if (rawobj_extract(&uuid_obj, &secdata, &seclen)) {
2025 CERROR("failed to extract target uuid\n");
2026 RETURN(SECSVC_DROP);
2028 uuid_obj.data[uuid_obj.len - 1] = '\0';
2030 uuid = (struct obd_uuid *) uuid_obj.data;
2031 target = class_uuid2obd(uuid);
2032 if (!target || target->obd_stopping || !target->obd_set_up) {
2033 CERROR("target '%s' is not available for context init (%s)\n",
2034 uuid->uuid, target == NULL ? "no target" :
2035 (target->obd_stopping ? "stopping" : "not set up"));
2036 RETURN(SECSVC_DROP);
2039 /* extract reverse handle */
2040 if (rawobj_extract(&rvs_hdl, &secdata, &seclen)) {
2041 CERROR("failed extract reverse handle\n");
2042 RETURN(SECSVC_DROP);
2046 if (rawobj_extract(&in_token, &secdata, &seclen)) {
2047 CERROR("can't extract token\n");
2048 RETURN(SECSVC_DROP);
2051 rc = gss_svc_upcall_handle_init(req, grctx, gw, target, lustre_svc,
2052 &rvs_hdl, &in_token);
2053 if (rc != SECSVC_OK)
2056 if (grctx->src_ctx->gsc_usr_mds || grctx->src_ctx->gsc_usr_oss ||
2057 grctx->src_ctx->gsc_usr_root)
2058 CWARN("create svc ctx %p: user from %s authenticated as %s\n",
2059 grctx->src_ctx, libcfs_nid2str(req->rq_peer.nid),
2060 grctx->src_ctx->gsc_usr_mds ? "mds" :
2061 (grctx->src_ctx->gsc_usr_oss ? "oss" : "root"));
2063 CWARN("create svc ctx %p: accept user %u from %s\n",
2064 grctx->src_ctx, grctx->src_ctx->gsc_uid,
2065 libcfs_nid2str(req->rq_peer.nid));
2067 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
2068 if (reqbuf->lm_bufcount < 4) {
2069 CERROR("missing user descriptor\n");
2070 RETURN(SECSVC_DROP);
2072 if (sptlrpc_unpack_user_desc(reqbuf, 2, swabbed)) {
2073 CERROR("Mal-formed user descriptor\n");
2074 RETURN(SECSVC_DROP);
2077 req->rq_pack_udesc = 1;
2078 req->rq_user_desc = lustre_msg_buf(reqbuf, 2, 0);
2081 req->rq_reqmsg = lustre_msg_buf(reqbuf, 1, 0);
2082 req->rq_reqlen = lustre_msg_buflen(reqbuf, 1);
2088 * last segment must be the gss signature.
2091 int gss_svc_verify_request(struct ptlrpc_request *req,
2092 struct gss_svc_reqctx *grctx,
2093 struct gss_wire_ctx *gw,
2096 struct gss_svc_ctx *gctx = grctx->src_ctx;
2097 struct lustre_msg *msg = req->rq_reqbuf;
2102 *major = GSS_S_COMPLETE;
2104 if (msg->lm_bufcount < 2) {
2105 CERROR("Too few segments (%u) in request\n", msg->lm_bufcount);
2109 if (gw->gw_svc == SPTLRPC_SVC_NULL)
2112 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
2113 CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
2114 *major = GSS_S_DUPLICATE_TOKEN;
2118 *major = gss_verify_msg(msg, gctx->gsc_mechctx, gw->gw_svc);
2119 if (*major != GSS_S_COMPLETE) {
2120 CERROR("failed to verify request: %x\n", *major);
2124 if (gctx->gsc_reverse == 0 &&
2125 gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
2126 CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
2127 *major = GSS_S_DUPLICATE_TOKEN;
2132 swabbed = ptlrpc_req_need_swab(req);
2134 /* user descriptor */
2135 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
2136 if (msg->lm_bufcount < (offset + 1)) {
2137 CERROR("no user desc included\n");
2141 if (sptlrpc_unpack_user_desc(msg, offset, swabbed)) {
2142 CERROR("Mal-formed user descriptor\n");
2146 req->rq_pack_udesc = 1;
2147 req->rq_user_desc = lustre_msg_buf(msg, offset, 0);
2151 /* check bulk_sec_desc data */
2152 if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
2153 if (msg->lm_bufcount < (offset + 1)) {
2154 CERROR("missing bulk sec descriptor\n");
2158 if (bulk_sec_desc_unpack(msg, offset, swabbed))
2161 req->rq_pack_bulk = 1;
2162 grctx->src_reqbsd = lustre_msg_buf(msg, offset, 0);
2163 grctx->src_reqbsd_size = lustre_msg_buflen(msg, offset);
2166 req->rq_reqmsg = lustre_msg_buf(msg, 1, 0);
2167 req->rq_reqlen = msg->lm_buflens[1];
2172 int gss_svc_unseal_request(struct ptlrpc_request *req,
2173 struct gss_svc_reqctx *grctx,
2174 struct gss_wire_ctx *gw,
2177 struct gss_svc_ctx *gctx = grctx->src_ctx;
2178 struct lustre_msg *msg = req->rq_reqbuf;
2179 int swabbed, msglen, offset = 1;
2182 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
2183 CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
2184 *major = GSS_S_DUPLICATE_TOKEN;
2188 *major = gss_unseal_msg(gctx->gsc_mechctx, msg,
2189 &msglen, req->rq_reqdata_len);
2190 if (*major != GSS_S_COMPLETE) {
2191 CERROR("failed to unwrap request: %x\n", *major);
2195 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
2196 CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
2197 *major = GSS_S_DUPLICATE_TOKEN;
2201 swabbed = __lustre_unpack_msg(msg, msglen);
2203 CERROR("Failed to unpack after decryption\n");
2206 req->rq_reqdata_len = msglen;
2208 if (msg->lm_bufcount < 1) {
2209 CERROR("Invalid buffer: is empty\n");
2213 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
2214 if (msg->lm_bufcount < offset + 1) {
2215 CERROR("no user descriptor included\n");
2219 if (sptlrpc_unpack_user_desc(msg, offset, swabbed)) {
2220 CERROR("Mal-formed user descriptor\n");
2224 req->rq_pack_udesc = 1;
2225 req->rq_user_desc = lustre_msg_buf(msg, offset, 0);
2229 if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
2230 if (msg->lm_bufcount < offset + 1) {
2231 CERROR("no bulk checksum included\n");
2235 if (bulk_sec_desc_unpack(msg, offset, swabbed))
2238 req->rq_pack_bulk = 1;
2239 grctx->src_reqbsd = lustre_msg_buf(msg, offset, 0);
2240 grctx->src_reqbsd_size = lustre_msg_buflen(msg, offset);
2243 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 0, 0);
2244 req->rq_reqlen = req->rq_reqbuf->lm_buflens[0];
2249 int gss_svc_handle_data(struct ptlrpc_request *req,
2250 struct gss_wire_ctx *gw)
2252 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2257 grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
2258 if (!grctx->src_ctx) {
2259 major = GSS_S_NO_CONTEXT;
2263 switch (gw->gw_svc) {
2264 case SPTLRPC_SVC_NULL:
2265 case SPTLRPC_SVC_AUTH:
2266 case SPTLRPC_SVC_INTG:
2267 rc = gss_svc_verify_request(req, grctx, gw, &major);
2269 case SPTLRPC_SVC_PRIV:
2270 rc = gss_svc_unseal_request(req, grctx, gw, &major);
2273 CERROR("unsupported gss service %d\n", gw->gw_svc);
2280 CERROR("svc %u failed: major 0x%08x: req xid "LPU64" ctx %p idx "
2281 LPX64"(%u->%s)\n", gw->gw_svc, major, req->rq_xid,
2282 grctx->src_ctx, gss_handle_to_u64(&gw->gw_handle),
2283 grctx->src_ctx->gsc_uid, libcfs_nid2str(req->rq_peer.nid));
2285 /* we only notify client in case of NO_CONTEXT/BAD_SIG, which
2286 * might happen after server reboot, to allow recovery. */
2287 if ((major == GSS_S_NO_CONTEXT || major == GSS_S_BAD_SIG) &&
2288 gss_pack_err_notify(req, major, 0) == 0)
2289 RETURN(SECSVC_COMPLETE);
2291 RETURN(SECSVC_DROP);
2295 int gss_svc_handle_destroy(struct ptlrpc_request *req,
2296 struct gss_wire_ctx *gw)
2298 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2302 req->rq_ctx_fini = 1;
2303 req->rq_no_reply = 1;
2305 grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
2306 if (!grctx->src_ctx) {
2307 CDEBUG(D_SEC, "invalid gss context handle for destroy.\n");
2308 RETURN(SECSVC_DROP);
2311 if (gw->gw_svc != SPTLRPC_SVC_INTG) {
2312 CERROR("svc %u is not supported in destroy.\n", gw->gw_svc);
2313 RETURN(SECSVC_DROP);
2316 if (gss_svc_verify_request(req, grctx, gw, &major))
2317 RETURN(SECSVC_DROP);
2319 CWARN("destroy svc ctx %p idx "LPX64" (%u->%s)\n",
2320 grctx->src_ctx, gss_handle_to_u64(&gw->gw_handle),
2321 grctx->src_ctx->gsc_uid, libcfs_nid2str(req->rq_peer.nid));
2323 gss_svc_upcall_destroy_ctx(grctx->src_ctx);
2325 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
2326 if (req->rq_reqbuf->lm_bufcount < 4) {
2327 CERROR("missing user descriptor, ignore it\n");
2330 if (sptlrpc_unpack_user_desc(req->rq_reqbuf, 2,
2331 ptlrpc_req_need_swab(req))) {
2332 CERROR("Mal-formed user descriptor, ignore it\n");
2336 req->rq_pack_udesc = 1;
2337 req->rq_user_desc = lustre_msg_buf(req->rq_reqbuf, 2, 0);
2343 int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req)
2345 struct gss_header *ghdr;
2346 struct gss_svc_reqctx *grctx;
2347 struct gss_wire_ctx *gw;
2351 LASSERT(req->rq_reqbuf);
2352 LASSERT(req->rq_svc_ctx == NULL);
2354 if (req->rq_reqbuf->lm_bufcount < 2) {
2355 CERROR("buf count only %d\n", req->rq_reqbuf->lm_bufcount);
2356 RETURN(SECSVC_DROP);
2359 swabbed = ptlrpc_req_need_swab(req);
2361 ghdr = gss_swab_header(req->rq_reqbuf, 0, swabbed);
2363 CERROR("can't decode gss header\n");
2364 RETURN(SECSVC_DROP);
2368 if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
2369 CERROR("gss version %u, expect %u\n", ghdr->gh_version,
2370 PTLRPC_GSS_VERSION);
2371 RETURN(SECSVC_DROP);
2374 req->rq_sp_from = ghdr->gh_sp;
2376 /* alloc grctx data */
2377 OBD_ALLOC_PTR(grctx);
2379 RETURN(SECSVC_DROP);
2381 grctx->src_base.sc_policy = sptlrpc_policy_get(policy);
2382 cfs_atomic_set(&grctx->src_base.sc_refcount, 1);
2383 req->rq_svc_ctx = &grctx->src_base;
2384 gw = &grctx->src_wirectx;
2386 /* save wire context */
2387 gw->gw_flags = ghdr->gh_flags;
2388 gw->gw_proc = ghdr->gh_proc;
2389 gw->gw_seq = ghdr->gh_seq;
2390 gw->gw_svc = ghdr->gh_svc;
2391 rawobj_from_netobj(&gw->gw_handle, &ghdr->gh_handle);
2393 /* keep original wire header which subject to checksum verification */
2395 gss_header_swabber(ghdr);
2397 switch(ghdr->gh_proc) {
2398 case PTLRPC_GSS_PROC_INIT:
2399 case PTLRPC_GSS_PROC_CONTINUE_INIT:
2400 rc = gss_svc_handle_init(req, gw);
2402 case PTLRPC_GSS_PROC_DATA:
2403 rc = gss_svc_handle_data(req, gw);
2405 case PTLRPC_GSS_PROC_DESTROY:
2406 rc = gss_svc_handle_destroy(req, gw);
2409 CERROR("unknown proc %u\n", gw->gw_proc);
2416 LASSERT (grctx->src_ctx);
2418 req->rq_auth_gss = 1;
2419 req->rq_auth_remote = grctx->src_ctx->gsc_remote;
2420 req->rq_auth_usr_mdt = grctx->src_ctx->gsc_usr_mds;
2421 req->rq_auth_usr_ost = grctx->src_ctx->gsc_usr_oss;
2422 req->rq_auth_usr_root = grctx->src_ctx->gsc_usr_root;
2423 req->rq_auth_uid = grctx->src_ctx->gsc_uid;
2424 req->rq_auth_mapped_uid = grctx->src_ctx->gsc_mapped_uid;
2426 case SECSVC_COMPLETE:
2429 gss_svc_reqctx_free(grctx);
2430 req->rq_svc_ctx = NULL;
2437 void gss_svc_invalidate_ctx(struct ptlrpc_svc_ctx *svc_ctx)
2439 struct gss_svc_reqctx *grctx;
2442 if (svc_ctx == NULL) {
2447 grctx = gss_svc_ctx2reqctx(svc_ctx);
2449 CWARN("gss svc invalidate ctx %p(%u)\n",
2450 grctx->src_ctx, grctx->src_ctx->gsc_uid);
2451 gss_svc_upcall_destroy_ctx(grctx->src_ctx);
2457 int gss_svc_payload(struct gss_svc_reqctx *grctx, int early,
2458 int msgsize, int privacy)
2460 /* we should treat early reply normally, but which is actually sharing
2461 * the same ctx with original request, so in this case we should
2462 * ignore the special ctx's special flags */
2463 if (early == 0 && gss_svc_reqctx_is_special(grctx))
2464 return grctx->src_reserve_len;
2466 return gss_mech_payload(NULL, msgsize, privacy);
2469 static int gss_svc_bulk_payload(struct gss_svc_ctx *gctx,
2470 struct sptlrpc_flavor *flvr,
2473 int payload = sizeof(struct ptlrpc_bulk_sec_desc);
2476 switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2477 case SPTLRPC_BULK_SVC_NULL:
2479 case SPTLRPC_BULK_SVC_INTG:
2480 payload += gss_mech_payload(NULL, 0, 0);
2482 case SPTLRPC_BULK_SVC_PRIV:
2483 payload += gss_mech_payload(NULL, 0, 1);
2485 case SPTLRPC_BULK_SVC_AUTH:
2494 int gss_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2496 struct gss_svc_reqctx *grctx;
2497 struct ptlrpc_reply_state *rs;
2498 int early, privacy, svc, bsd_off = 0;
2499 __u32 ibuflens[2], buflens[4];
2500 int ibufcnt = 0, bufcnt;
2501 int txtsize, wmsg_size, rs_size;
2504 LASSERT(msglen % 8 == 0);
2506 if (req->rq_pack_bulk && !req->rq_bulk_read && !req->rq_bulk_write) {
2507 CERROR("client request bulk sec on non-bulk rpc\n");
2511 svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
2512 early = (req->rq_packed_final == 0);
2514 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2515 if (!early && gss_svc_reqctx_is_special(grctx))
2518 privacy = (svc == SPTLRPC_SVC_PRIV);
2521 /* inner clear buffers */
2523 ibuflens[0] = msglen;
2525 if (req->rq_pack_bulk) {
2526 LASSERT(grctx->src_reqbsd);
2529 ibuflens[ibufcnt++] = gss_svc_bulk_payload(
2535 txtsize = lustre_msg_size_v2(ibufcnt, ibuflens);
2536 txtsize += GSS_MAX_CIPHER_BLOCK;
2538 /* wrapper buffer */
2540 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2541 buflens[1] = gss_svc_payload(grctx, early, txtsize, 1);
2544 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2545 buflens[1] = msglen;
2547 txtsize = buflens[0];
2548 if (svc == SPTLRPC_SVC_INTG)
2549 txtsize += buflens[1];
2551 if (req->rq_pack_bulk) {
2552 LASSERT(grctx->src_reqbsd);
2555 buflens[bufcnt] = gss_svc_bulk_payload(
2559 if (svc == SPTLRPC_SVC_INTG)
2560 txtsize += buflens[bufcnt];
2564 if ((!early && gss_svc_reqctx_is_special(grctx)) ||
2565 svc != SPTLRPC_SVC_NULL)
2566 buflens[bufcnt++] = gss_svc_payload(grctx, early,
2570 wmsg_size = lustre_msg_size_v2(bufcnt, buflens);
2572 rs_size = sizeof(*rs) + wmsg_size;
2573 rs = req->rq_reply_state;
2577 LASSERT(rs->rs_size >= rs_size);
2579 OBD_ALLOC_LARGE(rs, rs_size);
2583 rs->rs_size = rs_size;
2586 rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
2587 rs->rs_repbuf_len = wmsg_size;
2589 /* initialize the buffer */
2591 lustre_init_msg_v2(rs->rs_repbuf, ibufcnt, ibuflens, NULL);
2592 rs->rs_msg = lustre_msg_buf(rs->rs_repbuf, 0, msglen);
2594 lustre_init_msg_v2(rs->rs_repbuf, bufcnt, buflens, NULL);
2595 rs->rs_repbuf->lm_secflvr = req->rq_flvr.sf_rpc;
2597 rs->rs_msg = lustre_msg_buf(rs->rs_repbuf, 1, 0);
2601 grctx->src_repbsd = lustre_msg_buf(rs->rs_repbuf, bsd_off, 0);
2602 grctx->src_repbsd_size = lustre_msg_buflen(rs->rs_repbuf,
2606 gss_svc_reqctx_addref(grctx);
2607 rs->rs_svc_ctx = req->rq_svc_ctx;
2609 LASSERT(rs->rs_msg);
2610 req->rq_reply_state = rs;
2614 static int gss_svc_seal(struct ptlrpc_request *req,
2615 struct ptlrpc_reply_state *rs,
2616 struct gss_svc_reqctx *grctx)
2618 struct gss_svc_ctx *gctx = grctx->src_ctx;
2619 rawobj_t hdrobj, msgobj, token;
2620 struct gss_header *ghdr;
2623 __u32 buflens[2], major;
2627 /* get clear data length. note embedded lustre_msg might
2628 * have been shrinked */
2629 if (req->rq_replen != lustre_msg_buflen(rs->rs_repbuf, 0))
2630 msglen = lustre_shrink_msg(rs->rs_repbuf, 0, req->rq_replen, 1);
2632 msglen = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
2633 rs->rs_repbuf->lm_buflens);
2635 /* temporarily use tail of buffer to hold gss header data */
2636 LASSERT(msglen + PTLRPC_GSS_HEADER_SIZE <= rs->rs_repbuf_len);
2637 ghdr = (struct gss_header *) ((char *) rs->rs_repbuf +
2638 rs->rs_repbuf_len - PTLRPC_GSS_HEADER_SIZE);
2639 ghdr->gh_version = PTLRPC_GSS_VERSION;
2640 ghdr->gh_sp = LUSTRE_SP_ANY;
2642 ghdr->gh_proc = PTLRPC_GSS_PROC_DATA;
2643 ghdr->gh_seq = grctx->src_wirectx.gw_seq;
2644 ghdr->gh_svc = SPTLRPC_SVC_PRIV;
2645 ghdr->gh_handle.len = 0;
2646 if (req->rq_pack_bulk)
2647 ghdr->gh_flags |= LUSTRE_GSS_PACK_BULK;
2649 /* allocate temporary cipher buffer */
2650 token_buflen = gss_mech_payload(gctx->gsc_mechctx, msglen, 1);
2651 OBD_ALLOC_LARGE(token_buf, token_buflen);
2652 if (token_buf == NULL)
2655 hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
2656 hdrobj.data = (__u8 *) ghdr;
2657 msgobj.len = msglen;
2658 msgobj.data = (__u8 *) rs->rs_repbuf;
2659 token.len = token_buflen;
2660 token.data = token_buf;
2662 major = lgss_wrap(gctx->gsc_mechctx, &hdrobj, &msgobj,
2663 rs->rs_repbuf_len - PTLRPC_GSS_HEADER_SIZE, &token);
2664 if (major != GSS_S_COMPLETE) {
2665 CERROR("wrap message error: %08x\n", major);
2666 GOTO(out_free, rc = -EPERM);
2668 LASSERT(token.len <= token_buflen);
2670 /* we are about to override data at rs->rs_repbuf, nullify pointers
2671 * to which to catch further illegal usage. */
2672 if (req->rq_pack_bulk) {
2673 grctx->src_repbsd = NULL;
2674 grctx->src_repbsd_size = 0;
2677 /* now fill the actual wire data
2681 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2682 buflens[1] = token.len;
2684 rs->rs_repdata_len = lustre_msg_size_v2(2, buflens);
2685 LASSERT(rs->rs_repdata_len <= rs->rs_repbuf_len);
2687 lustre_init_msg_v2(rs->rs_repbuf, 2, buflens, NULL);
2688 rs->rs_repbuf->lm_secflvr = req->rq_flvr.sf_rpc;
2690 memcpy(lustre_msg_buf(rs->rs_repbuf, 0, 0), ghdr,
2691 PTLRPC_GSS_HEADER_SIZE);
2692 memcpy(lustre_msg_buf(rs->rs_repbuf, 1, 0), token.data, token.len);
2695 if (req->rq_packed_final &&
2696 (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))
2697 req->rq_reply_off = gss_at_reply_off_priv;
2699 req->rq_reply_off = 0;
2701 /* to catch upper layer's further access */
2703 req->rq_repmsg = NULL;
2708 OBD_FREE_LARGE(token_buf, token_buflen);
2712 int gss_svc_authorize(struct ptlrpc_request *req)
2714 struct ptlrpc_reply_state *rs = req->rq_reply_state;
2715 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2716 struct gss_wire_ctx *gw = &grctx->src_wirectx;
2720 early = (req->rq_packed_final == 0);
2722 if (!early && gss_svc_reqctx_is_special(grctx)) {
2723 LASSERT(rs->rs_repdata_len != 0);
2725 req->rq_reply_off = gss_at_reply_off_integ;
2729 /* early reply could happen in many cases */
2731 gw->gw_proc != PTLRPC_GSS_PROC_DATA &&
2732 gw->gw_proc != PTLRPC_GSS_PROC_DESTROY) {
2733 CERROR("proc %d not support\n", gw->gw_proc);
2737 LASSERT(grctx->src_ctx);
2739 switch (gw->gw_svc) {
2740 case SPTLRPC_SVC_NULL:
2741 case SPTLRPC_SVC_AUTH:
2742 case SPTLRPC_SVC_INTG:
2743 rc = gss_svc_sign(req, rs, grctx, gw->gw_svc);
2745 case SPTLRPC_SVC_PRIV:
2746 rc = gss_svc_seal(req, rs, grctx);
2749 CERROR("Unknown service %d\n", gw->gw_svc);
2750 GOTO(out, rc = -EINVAL);
2758 void gss_svc_free_rs(struct ptlrpc_reply_state *rs)
2760 struct gss_svc_reqctx *grctx;
2762 LASSERT(rs->rs_svc_ctx);
2763 grctx = container_of(rs->rs_svc_ctx, struct gss_svc_reqctx, src_base);
2765 gss_svc_reqctx_decref(grctx);
2766 rs->rs_svc_ctx = NULL;
2768 if (!rs->rs_prealloc)
2769 OBD_FREE_LARGE(rs, rs->rs_size);
2772 void gss_svc_free_ctx(struct ptlrpc_svc_ctx *ctx)
2774 LASSERT(cfs_atomic_read(&ctx->sc_refcount) == 0);
2775 gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx));
2778 int gss_copy_rvc_cli_ctx(struct ptlrpc_cli_ctx *cli_ctx,
2779 struct ptlrpc_svc_ctx *svc_ctx)
2781 struct gss_cli_ctx *cli_gctx = ctx2gctx(cli_ctx);
2782 struct gss_svc_ctx *svc_gctx = gss_svc_ctx2gssctx(svc_ctx);
2783 struct gss_ctx *mechctx = NULL;
2786 LASSERT(svc_gctx && svc_gctx->gsc_mechctx);
2788 cli_gctx->gc_proc = PTLRPC_GSS_PROC_DATA;
2789 cli_gctx->gc_win = GSS_SEQ_WIN;
2791 /* The problem is the reverse ctx might get lost in some recovery
2792 * situations, and the same svc_ctx will be used to re-create it.
2793 * if there's callback be sentout before that, new reverse ctx start
2794 * with sequence 0 will lead to future callback rpc be treated as
2797 * each reverse root ctx will record its latest sequence number on its
2798 * buddy svcctx before be destroied, so here we continue use it.
2800 cfs_atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
2802 if (gss_svc_upcall_dup_handle(&cli_gctx->gc_svc_handle, svc_gctx)) {
2803 CERROR("failed to dup svc handle\n");
2807 if (lgss_copy_reverse_context(svc_gctx->gsc_mechctx, &mechctx) !=
2809 CERROR("failed to copy mech context\n");
2810 goto err_svc_handle;
2813 if (rawobj_dup(&cli_gctx->gc_handle, &svc_gctx->gsc_rvs_hdl)) {
2814 CERROR("failed to dup reverse handle\n");
2818 cli_gctx->gc_mechctx = mechctx;
2819 gss_cli_ctx_uptodate(cli_gctx);
2824 lgss_delete_sec_context(&mechctx);
2826 rawobj_free(&cli_gctx->gc_svc_handle);
2831 static void gss_init_at_reply_offset(void)
2836 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2837 buflens[1] = lustre_msg_early_size();
2838 buflens[2] = gss_cli_payload(NULL, buflens[1], 0);
2839 gss_at_reply_off_integ = lustre_msg_size_v2(3, buflens);
2841 buflens[0] = lustre_msg_early_size();
2842 clearsize = lustre_msg_size_v2(1, buflens);
2843 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2844 buflens[1] = gss_cli_payload(NULL, clearsize, 0);
2845 buflens[2] = gss_cli_payload(NULL, clearsize, 1);
2846 gss_at_reply_off_priv = lustre_msg_size_v2(3, buflens);
2849 int __init sptlrpc_gss_init(void)
2853 rc = gss_init_lproc();
2857 rc = gss_init_cli_upcall();
2861 rc = gss_init_svc_upcall();
2863 goto out_cli_upcall;
2865 rc = init_kerberos_module();
2867 goto out_svc_upcall;
2869 /* register policy after all other stuff be intialized, because it
2870 * might be in used immediately after the registration. */
2872 rc = gss_init_keyring();
2876 #ifdef HAVE_GSS_PIPEFS
2877 rc = gss_init_pipefs();
2882 gss_init_at_reply_offset();
2886 #ifdef HAVE_GSS_PIPEFS
2892 cleanup_kerberos_module();
2894 gss_exit_svc_upcall();
2896 gss_exit_cli_upcall();
2902 static void __exit sptlrpc_gss_exit(void)
2905 #ifdef HAVE_GSS_PIPEFS
2908 cleanup_kerberos_module();
2909 gss_exit_svc_upcall();
2910 gss_exit_cli_upcall();
2914 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
2915 MODULE_DESCRIPTION("GSS security policy for Lustre");
2916 MODULE_LICENSE("GPL");
2918 module_init(sptlrpc_gss_init);
2919 module_exit(sptlrpc_gss_exit);