1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Modifications for Lustre
6 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
8 * Author: Eric Mei <ericm@clusterfs.com>
12 * linux/net/sunrpc/auth_gss.c
14 * RPCSEC_GSS client authentication.
16 * Copyright (c) 2000 The Regents of the University of Michigan.
17 * All rights reserved.
19 * Dug Song <dugsong@monkey.org>
20 * Andy Adamson <andros@umich.edu>
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 3. Neither the name of the University nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
36 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
37 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
38 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
40 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
41 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
42 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
43 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
44 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
45 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
50 # define EXPORT_SYMTAB
52 #define DEBUG_SUBSYSTEM S_SEC
54 #include <linux/init.h>
55 #include <linux/module.h>
56 #include <linux/slab.h>
57 #include <linux/dcache.h>
59 #include <linux/mutex.h>
60 #include <asm/atomic.h>
62 #include <liblustre.h>
66 #include <obd_class.h>
67 #include <obd_support.h>
68 #include <obd_cksum.h>
69 #include <lustre/lustre_idl.h>
70 #include <lustre_net.h>
71 #include <lustre_import.h>
72 #include <lustre_sec.h>
75 #include "gss_internal.h"
78 #include <linux/crypto.h>
81 * early reply have fixed size, respectively in privacy and integrity mode.
82 * so we calculate them only once.
84 static int gss_at_reply_off_integ;
85 static int gss_at_reply_off_priv;
88 static inline int msg_last_segidx(struct lustre_msg *msg)
90 LASSERT(msg->lm_bufcount > 0);
91 return msg->lm_bufcount - 1;
93 static inline int msg_last_seglen(struct lustre_msg *msg)
95 return msg->lm_buflens[msg_last_segidx(msg)];
98 /********************************************
100 ********************************************/
103 void gss_header_swabber(struct gss_header *ghdr)
105 __swab32s(&ghdr->gh_flags);
106 __swab32s(&ghdr->gh_proc);
107 __swab32s(&ghdr->gh_seq);
108 __swab32s(&ghdr->gh_svc);
109 __swab32s(&ghdr->gh_pad1);
110 __swab32s(&ghdr->gh_handle.len);
113 struct gss_header *gss_swab_header(struct lustre_msg *msg, int segment,
116 struct gss_header *ghdr;
118 ghdr = lustre_msg_buf(msg, segment, sizeof(*ghdr));
123 gss_header_swabber(ghdr);
125 if (sizeof(*ghdr) + ghdr->gh_handle.len > msg->lm_buflens[segment]) {
126 CERROR("gss header has length %d, now %u received\n",
127 (int) sizeof(*ghdr) + ghdr->gh_handle.len,
128 msg->lm_buflens[segment]);
137 void gss_netobj_swabber(netobj_t *obj)
139 __swab32s(&obj->len);
142 netobj_t *gss_swab_netobj(struct lustre_msg *msg, int segment)
146 obj = lustre_swab_buf(msg, segment, sizeof(*obj), gss_netobj_swabber);
147 if (obj && sizeof(*obj) + obj->len > msg->lm_buflens[segment]) {
148 CERROR("netobj require length %u but only %u received\n",
149 (unsigned int) sizeof(*obj) + obj->len,
150 msg->lm_buflens[segment]);
159 * payload should be obtained from mechanism. but currently since we
160 * only support kerberos, we could simply use fixed value.
163 * - krb5 checksum: 20
165 * for privacy mode, payload also include the cipher text which has the same
166 * size as plain text, plus possible confounder, padding both at maximum cipher
169 #define GSS_KRB5_INTEG_MAX_PAYLOAD (40)
172 int gss_mech_payload(struct gss_ctx *mechctx, int msgsize, int privacy)
175 return GSS_KRB5_INTEG_MAX_PAYLOAD + 16 + 16 + 16 + msgsize;
177 return GSS_KRB5_INTEG_MAX_PAYLOAD;
181 * return signature size, otherwise < 0 to indicate error
183 static int gss_sign_msg(struct lustre_msg *msg,
184 struct gss_ctx *mechctx,
185 enum lustre_sec_part sp,
186 __u32 flags, __u32 proc, __u32 seq, __u32 svc,
189 struct gss_header *ghdr;
190 rawobj_t text[4], mic;
191 int textcnt, max_textcnt, mic_idx;
194 LASSERT(msg->lm_bufcount >= 2);
197 LASSERT(msg->lm_buflens[0] >=
198 sizeof(*ghdr) + (handle ? handle->len : 0));
199 ghdr = lustre_msg_buf(msg, 0, 0);
201 ghdr->gh_version = PTLRPC_GSS_VERSION;
202 ghdr->gh_sp = (__u8) sp;
203 ghdr->gh_flags = flags;
204 ghdr->gh_proc = proc;
208 /* fill in a fake one */
209 ghdr->gh_handle.len = 0;
211 ghdr->gh_handle.len = handle->len;
212 memcpy(ghdr->gh_handle.data, handle->data, handle->len);
215 /* no actual signature for null mode */
216 if (svc == SPTLRPC_SVC_NULL)
217 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
220 mic_idx = msg_last_segidx(msg);
221 max_textcnt = (svc == SPTLRPC_SVC_AUTH) ? 1 : mic_idx;
223 for (textcnt = 0; textcnt < max_textcnt; textcnt++) {
224 text[textcnt].len = msg->lm_buflens[textcnt];
225 text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
228 mic.len = msg->lm_buflens[mic_idx];
229 mic.data = lustre_msg_buf(msg, mic_idx, 0);
231 major = lgss_get_mic(mechctx, textcnt, text, 0, NULL, &mic);
232 if (major != GSS_S_COMPLETE) {
233 CERROR("fail to generate MIC: %08x\n", major);
236 LASSERT(mic.len <= msg->lm_buflens[mic_idx]);
238 return lustre_shrink_msg(msg, mic_idx, mic.len, 0);
245 __u32 gss_verify_msg(struct lustre_msg *msg,
246 struct gss_ctx *mechctx,
249 rawobj_t text[4], mic;
250 int textcnt, max_textcnt;
254 LASSERT(msg->lm_bufcount >= 2);
256 if (svc == SPTLRPC_SVC_NULL)
257 return GSS_S_COMPLETE;
259 mic_idx = msg_last_segidx(msg);
260 max_textcnt = (svc == SPTLRPC_SVC_AUTH) ? 1 : mic_idx;
262 for (textcnt = 0; textcnt < max_textcnt; textcnt++) {
263 text[textcnt].len = msg->lm_buflens[textcnt];
264 text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
267 mic.len = msg->lm_buflens[mic_idx];
268 mic.data = lustre_msg_buf(msg, mic_idx, 0);
270 major = lgss_verify_mic(mechctx, textcnt, text, 0, NULL, &mic);
271 if (major != GSS_S_COMPLETE)
272 CERROR("mic verify error: %08x\n", major);
278 * return gss error code
281 __u32 gss_unseal_msg(struct gss_ctx *mechctx,
282 struct lustre_msg *msgbuf,
283 int *msg_len, int msgbuf_len)
285 rawobj_t clear_obj, hdrobj, token;
291 if (msgbuf->lm_bufcount != 2) {
292 CERROR("invalid bufcount %d\n", msgbuf->lm_bufcount);
293 RETURN(GSS_S_FAILURE);
296 /* allocate a temporary clear text buffer, same sized as token,
297 * we assume the final clear text size <= token size */
298 clear_buflen = lustre_msg_buflen(msgbuf, 1);
299 OBD_ALLOC(clear_buf, clear_buflen);
301 RETURN(GSS_S_FAILURE);
304 hdrobj.len = lustre_msg_buflen(msgbuf, 0);
305 hdrobj.data = lustre_msg_buf(msgbuf, 0, 0);
306 token.len = lustre_msg_buflen(msgbuf, 1);
307 token.data = lustre_msg_buf(msgbuf, 1, 0);
308 clear_obj.len = clear_buflen;
309 clear_obj.data = clear_buf;
311 major = lgss_unwrap(mechctx, &hdrobj, &token, &clear_obj);
312 if (major != GSS_S_COMPLETE) {
313 CERROR("unwrap message error: %08x\n", major);
314 GOTO(out_free, major = GSS_S_FAILURE);
316 LASSERT(clear_obj.len <= clear_buflen);
317 LASSERT(clear_obj.len <= msgbuf_len);
319 /* now the decrypted message */
320 memcpy(msgbuf, clear_obj.data, clear_obj.len);
321 *msg_len = clear_obj.len;
323 major = GSS_S_COMPLETE;
325 OBD_FREE(clear_buf, clear_buflen);
329 /********************************************
330 * gss client context manipulation helpers *
331 ********************************************/
333 int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
335 LASSERT(cfs_atomic_read(&ctx->cc_refcount));
337 if (!cfs_test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
338 if (!ctx->cc_early_expire)
339 cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
341 CWARN("ctx %p(%u->%s) get expired: %lu(%+lds)\n",
342 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
344 ctx->cc_expire == 0 ? 0 :
345 cfs_time_sub(ctx->cc_expire, cfs_time_current_sec()));
347 sptlrpc_cli_ctx_wakeup(ctx);
355 * return 1 if the context is dead.
357 int cli_ctx_check_death(struct ptlrpc_cli_ctx *ctx)
359 if (unlikely(cli_ctx_is_dead(ctx)))
362 /* expire is 0 means never expire. a newly created gss context
363 * which during upcall may has 0 expiration */
364 if (ctx->cc_expire == 0)
367 /* check real expiration */
368 if (cfs_time_after(ctx->cc_expire, cfs_time_current_sec()))
375 void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx)
377 struct ptlrpc_cli_ctx *ctx = &gctx->gc_base;
378 unsigned long ctx_expiry;
380 if (lgss_inquire_context(gctx->gc_mechctx, &ctx_expiry)) {
381 CERROR("ctx %p(%u): unable to inquire, expire it now\n",
382 gctx, ctx->cc_vcred.vc_uid);
383 ctx_expiry = 1; /* make it expired now */
386 ctx->cc_expire = gss_round_ctx_expiry(ctx_expiry,
387 ctx->cc_sec->ps_flvr.sf_flags);
389 /* At this point this ctx might have been marked as dead by
390 * someone else, in which case nobody will make further use
391 * of it. we don't care, and mark it UPTODATE will help
392 * destroying server side context when it be destroied. */
393 cfs_set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
395 if (sec_is_reverse(ctx->cc_sec)) {
396 CWARN("server installed reverse ctx %p idx "LPX64", "
397 "expiry %lu(%+lds)\n", ctx,
398 gss_handle_to_u64(&gctx->gc_handle),
399 ctx->cc_expire, ctx->cc_expire - cfs_time_current_sec());
401 CWARN("client refreshed ctx %p idx "LPX64" (%u->%s), "
402 "expiry %lu(%+lds)\n", ctx,
403 gss_handle_to_u64(&gctx->gc_handle),
404 ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
405 ctx->cc_expire, ctx->cc_expire - cfs_time_current_sec());
407 /* install reverse svc ctx for root context */
408 if (ctx->cc_vcred.vc_uid == 0)
409 gss_sec_install_rctx(ctx->cc_sec->ps_import,
413 sptlrpc_cli_ctx_wakeup(ctx);
416 static void gss_cli_ctx_finalize(struct gss_cli_ctx *gctx)
418 LASSERT(gctx->gc_base.cc_sec);
420 if (gctx->gc_mechctx) {
421 lgss_delete_sec_context(&gctx->gc_mechctx);
422 gctx->gc_mechctx = NULL;
425 if (!rawobj_empty(&gctx->gc_svc_handle)) {
426 /* forward ctx: mark buddy reverse svcctx soon-expire. */
427 if (!sec_is_reverse(gctx->gc_base.cc_sec) &&
428 !rawobj_empty(&gctx->gc_svc_handle))
429 gss_svc_upcall_expire_rvs_ctx(&gctx->gc_svc_handle);
431 rawobj_free(&gctx->gc_svc_handle);
434 rawobj_free(&gctx->gc_handle);
438 * Based on sequence number algorithm as specified in RFC 2203.
440 * modified for our own problem: arriving request has valid sequence number,
441 * but unwrapping request might cost a long time, after that its sequence
442 * are not valid anymore (fall behind the window). It rarely happen, mostly
443 * under extreme load.
445 * note we should not check sequence before verify the integrity of incoming
446 * request, because just one attacking request with high sequence number might
447 * cause all following request be dropped.
449 * so here we use a multi-phase approach: prepare 2 sequence windows,
450 * "main window" for normal sequence and "back window" for fall behind sequence.
451 * and 3-phase checking mechanism:
452 * 0 - before integrity verification, perform a initial sequence checking in
453 * main window, which only try and don't actually set any bits. if the
454 * sequence is high above the window or fit in the window and the bit
455 * is 0, then accept and proceed to integrity verification. otherwise
456 * reject this sequence.
457 * 1 - after integrity verification, check in main window again. if this
458 * sequence is high above the window or fit in the window and the bit
459 * is 0, then set the bit and accept; if it fit in the window but bit
460 * already set, then reject; if it fall behind the window, then proceed
462 * 2 - check in back window. if it is high above the window or fit in the
463 * window and the bit is 0, then set the bit and accept. otherwise reject.
466 * 1: looks like a replay
470 * note phase 0 is necessary, because otherwise replay attacking request of
471 * sequence which between the 2 windows can't be detected.
473 * this mechanism can't totally solve the problem, but could help much less
474 * number of valid requests be dropped.
477 int gss_do_check_seq(unsigned long *window, __u32 win_size, __u32 *max_seq,
478 __u32 seq_num, int phase)
480 LASSERT(phase >= 0 && phase <= 2);
482 if (seq_num > *max_seq) {
484 * 1. high above the window
489 if (seq_num >= *max_seq + win_size) {
490 memset(window, 0, win_size / 8);
493 while(*max_seq < seq_num) {
495 __clear_bit((*max_seq) % win_size, window);
498 __set_bit(seq_num % win_size, window);
499 } else if (seq_num + win_size <= *max_seq) {
501 * 2. low behind the window
503 if (phase == 0 || phase == 2)
506 CWARN("seq %u is %u behind (size %d), check backup window\n",
507 seq_num, *max_seq - win_size - seq_num, win_size);
511 * 3. fit into the window
515 if (cfs_test_bit(seq_num % win_size, window))
520 if (__test_and_set_bit(seq_num % win_size, window))
529 CERROR("seq %u (%s %s window) is a replay: max %u, winsize %d\n",
531 seq_num + win_size > *max_seq ? "in" : "behind",
532 phase == 2 ? "backup " : "main",
538 * Based on sequence number algorithm as specified in RFC 2203.
540 * if @set == 0: initial check, don't set any bit in window
541 * if @sec == 1: final check, set bit in window
543 int gss_check_seq_num(struct gss_svc_seq_data *ssd, __u32 seq_num, int set)
547 cfs_spin_lock(&ssd->ssd_lock);
553 rc = gss_do_check_seq(ssd->ssd_win_main, GSS_SEQ_WIN_MAIN,
554 &ssd->ssd_max_main, seq_num, 0);
556 gss_stat_oos_record_svc(0, 1);
559 * phase 1 checking main window
561 rc = gss_do_check_seq(ssd->ssd_win_main, GSS_SEQ_WIN_MAIN,
562 &ssd->ssd_max_main, seq_num, 1);
565 gss_stat_oos_record_svc(1, 1);
571 * phase 2 checking back window
573 rc = gss_do_check_seq(ssd->ssd_win_back, GSS_SEQ_WIN_BACK,
574 &ssd->ssd_max_back, seq_num, 2);
576 gss_stat_oos_record_svc(2, 1);
578 gss_stat_oos_record_svc(2, 0);
581 cfs_spin_unlock(&ssd->ssd_lock);
585 /***************************************
587 ***************************************/
589 static inline int gss_cli_payload(struct ptlrpc_cli_ctx *ctx,
590 int msgsize, int privacy)
592 return gss_mech_payload(NULL, msgsize, privacy);
595 static int gss_cli_bulk_payload(struct ptlrpc_cli_ctx *ctx,
596 struct sptlrpc_flavor *flvr,
599 int payload = sizeof(struct ptlrpc_bulk_sec_desc);
601 LASSERT(SPTLRPC_FLVR_BULK_TYPE(flvr->sf_rpc) == SPTLRPC_BULK_DEFAULT);
603 if ((!reply && !read) || (reply && read)) {
604 switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
605 case SPTLRPC_BULK_SVC_NULL:
607 case SPTLRPC_BULK_SVC_INTG:
608 payload += gss_cli_payload(ctx, 0, 0);
610 case SPTLRPC_BULK_SVC_PRIV:
611 payload += gss_cli_payload(ctx, 0, 1);
613 case SPTLRPC_BULK_SVC_AUTH:
622 int gss_cli_ctx_match(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred)
624 return (ctx->cc_vcred.vc_uid == vcred->vc_uid);
627 void gss_cli_ctx_flags2str(unsigned long flags, char *buf, int bufsize)
631 if (flags & PTLRPC_CTX_NEW)
632 strncat(buf, "new,", bufsize);
633 if (flags & PTLRPC_CTX_UPTODATE)
634 strncat(buf, "uptodate,", bufsize);
635 if (flags & PTLRPC_CTX_DEAD)
636 strncat(buf, "dead,", bufsize);
637 if (flags & PTLRPC_CTX_ERROR)
638 strncat(buf, "error,", bufsize);
639 if (flags & PTLRPC_CTX_CACHED)
640 strncat(buf, "cached,", bufsize);
641 if (flags & PTLRPC_CTX_ETERNAL)
642 strncat(buf, "eternal,", bufsize);
644 strncat(buf, "-,", bufsize);
646 buf[strlen(buf) - 1] = '\0';
649 int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx,
650 struct ptlrpc_request *req)
652 struct gss_cli_ctx *gctx = ctx2gctx(ctx);
653 __u32 flags = 0, seq, svc;
657 LASSERT(req->rq_reqbuf);
658 LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
659 LASSERT(req->rq_cli_ctx == ctx);
661 /* nothing to do for context negotiation RPCs */
662 if (req->rq_ctx_init)
665 svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
666 if (req->rq_pack_bulk)
667 flags |= LUSTRE_GSS_PACK_BULK;
668 if (req->rq_pack_udesc)
669 flags |= LUSTRE_GSS_PACK_USER;
672 seq = cfs_atomic_inc_return(&gctx->gc_seq);
674 rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
675 ctx->cc_sec->ps_part,
676 flags, gctx->gc_proc, seq, svc,
681 /* gss_sign_msg() msg might take long time to finish, in which period
682 * more rpcs could be wrapped up and sent out. if we found too many
683 * of them we should repack this rpc, because sent it too late might
684 * lead to the sequence number fall behind the window on server and
685 * be dropped. also applies to gss_cli_ctx_seal().
687 * Note: null mode dosen't check sequence number. */
688 if (svc != SPTLRPC_SVC_NULL &&
689 cfs_atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
690 int behind = cfs_atomic_read(&gctx->gc_seq) - seq;
692 gss_stat_oos_record_cli(behind);
693 CWARN("req %p: %u behind, retry signing\n", req, behind);
697 req->rq_reqdata_len = rc;
702 int gss_cli_ctx_handle_err_notify(struct ptlrpc_cli_ctx *ctx,
703 struct ptlrpc_request *req,
704 struct gss_header *ghdr)
706 struct gss_err_header *errhdr;
709 LASSERT(ghdr->gh_proc == PTLRPC_GSS_PROC_ERR);
711 errhdr = (struct gss_err_header *) ghdr;
713 CWARN("req x"LPU64"/t"LPU64", ctx %p idx "LPX64"(%u->%s): "
714 "%sserver respond (%08x/%08x)\n",
715 req->rq_xid, req->rq_transno, ctx,
716 gss_handle_to_u64(&ctx2gctx(ctx)->gc_handle),
717 ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
718 sec_is_reverse(ctx->cc_sec) ? "reverse" : "",
719 errhdr->gh_major, errhdr->gh_minor);
721 /* context fini rpc, let it failed */
722 if (req->rq_ctx_fini) {
723 CWARN("context fini rpc failed\n");
727 /* reverse sec, just return error, don't expire this ctx because it's
728 * crucial to callback rpcs. note if the callback rpc failed because
729 * of bit flip during network transfer, the client will be evicted
730 * directly. so more gracefully we probably want let it retry for
731 * number of times. */
732 if (sec_is_reverse(ctx->cc_sec))
735 if (errhdr->gh_major != GSS_S_NO_CONTEXT &&
736 errhdr->gh_major != GSS_S_BAD_SIG)
739 /* server return NO_CONTEXT might be caused by context expire
740 * or server reboot/failover. we try to refresh a new ctx which
741 * be transparent to upper layer.
743 * In some cases, our gss handle is possible to be incidentally
744 * identical to another handle since the handle itself is not
745 * fully random. In krb5 case, the GSS_S_BAD_SIG will be
746 * returned, maybe other gss error for other mechanism.
748 * if we add new mechanism, make sure the correct error are
749 * returned in this case. */
750 CWARN("%s: server might lost the context, retrying\n",
751 errhdr->gh_major == GSS_S_NO_CONTEXT ? "NO_CONTEXT" : "BAD_SIG");
753 sptlrpc_cli_ctx_expire(ctx);
755 /* we need replace the ctx right here, otherwise during
756 * resent we'll hit the logic in sptlrpc_req_refresh_ctx()
757 * which keep the ctx with RESEND flag, thus we'll never
758 * get rid of this ctx. */
759 rc = sptlrpc_req_replace_dead_ctx(req);
766 int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx,
767 struct ptlrpc_request *req)
769 struct gss_cli_ctx *gctx;
770 struct gss_header *ghdr, *reqhdr;
771 struct lustre_msg *msg = req->rq_repdata;
773 int pack_bulk, swabbed, rc = 0;
776 LASSERT(req->rq_cli_ctx == ctx);
779 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
781 /* special case for context negotiation, rq_repmsg/rq_replen actually
782 * are not used currently. but early reply always be treated normally */
783 if (req->rq_ctx_init && !req->rq_early) {
784 req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
785 req->rq_replen = msg->lm_buflens[1];
789 if (msg->lm_bufcount < 2 || msg->lm_bufcount > 4) {
790 CERROR("unexpected bufcount %u\n", msg->lm_bufcount);
794 swabbed = ptlrpc_rep_need_swab(req);
796 ghdr = gss_swab_header(msg, 0, swabbed);
798 CERROR("can't decode gss header\n");
803 reqhdr = lustre_msg_buf(msg, 0, sizeof(*reqhdr));
806 if (ghdr->gh_version != reqhdr->gh_version) {
807 CERROR("gss version %u mismatch, expect %u\n",
808 ghdr->gh_version, reqhdr->gh_version);
812 switch (ghdr->gh_proc) {
813 case PTLRPC_GSS_PROC_DATA:
814 pack_bulk = ghdr->gh_flags & LUSTRE_GSS_PACK_BULK;
816 if (!req->rq_early && !equi(req->rq_pack_bulk == 1, pack_bulk)){
817 CERROR("%s bulk flag in reply\n",
818 req->rq_pack_bulk ? "missing" : "unexpected");
822 if (ghdr->gh_seq != reqhdr->gh_seq) {
823 CERROR("seqnum %u mismatch, expect %u\n",
824 ghdr->gh_seq, reqhdr->gh_seq);
828 if (ghdr->gh_svc != reqhdr->gh_svc) {
829 CERROR("svc %u mismatch, expect %u\n",
830 ghdr->gh_svc, reqhdr->gh_svc);
835 gss_header_swabber(ghdr);
837 major = gss_verify_msg(msg, gctx->gc_mechctx, reqhdr->gh_svc);
838 if (major != GSS_S_COMPLETE) {
839 CERROR("failed to verify reply: %x\n", major);
843 if (req->rq_early && reqhdr->gh_svc == SPTLRPC_SVC_NULL) {
846 cksum = crc32_le(!(__u32) 0,
847 lustre_msg_buf(msg, 1, 0),
848 lustre_msg_buflen(msg, 1));
849 if (cksum != msg->lm_cksum) {
850 CWARN("early reply checksum mismatch: "
851 "%08x != %08x\n", cksum, msg->lm_cksum);
857 /* bulk checksum is right after the lustre msg */
858 if (msg->lm_bufcount < 3) {
859 CERROR("Invalid reply bufcount %u\n",
864 rc = bulk_sec_desc_unpack(msg, 2, swabbed);
866 CERROR("unpack bulk desc: %d\n", rc);
871 req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
872 req->rq_replen = msg->lm_buflens[1];
874 case PTLRPC_GSS_PROC_ERR:
876 CERROR("server return error with early reply\n");
879 rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
883 CERROR("unknown gss proc %d\n", ghdr->gh_proc);
890 int gss_cli_ctx_seal(struct ptlrpc_cli_ctx *ctx,
891 struct ptlrpc_request *req)
893 struct gss_cli_ctx *gctx;
894 rawobj_t hdrobj, msgobj, token;
895 struct gss_header *ghdr;
896 __u32 buflens[2], major;
900 LASSERT(req->rq_clrbuf);
901 LASSERT(req->rq_cli_ctx == ctx);
902 LASSERT(req->rq_reqlen);
904 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
906 /* final clear data length */
907 req->rq_clrdata_len = lustre_msg_size_v2(req->rq_clrbuf->lm_bufcount,
908 req->rq_clrbuf->lm_buflens);
910 /* calculate wire data length */
911 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
912 buflens[1] = gss_cli_payload(&gctx->gc_base, req->rq_clrdata_len, 1);
913 wiresize = lustre_msg_size_v2(2, buflens);
915 /* allocate wire buffer */
918 LASSERT(req->rq_reqbuf);
919 LASSERT(req->rq_reqbuf != req->rq_clrbuf);
920 LASSERT(req->rq_reqbuf_len >= wiresize);
922 OBD_ALLOC(req->rq_reqbuf, wiresize);
925 req->rq_reqbuf_len = wiresize;
928 lustre_init_msg_v2(req->rq_reqbuf, 2, buflens, NULL);
929 req->rq_reqbuf->lm_secflvr = req->rq_flvr.sf_rpc;
932 ghdr = lustre_msg_buf(req->rq_reqbuf, 0, 0);
933 ghdr->gh_version = PTLRPC_GSS_VERSION;
934 ghdr->gh_sp = (__u8) ctx->cc_sec->ps_part;
936 ghdr->gh_proc = gctx->gc_proc;
937 ghdr->gh_svc = SPTLRPC_SVC_PRIV;
938 ghdr->gh_handle.len = gctx->gc_handle.len;
939 memcpy(ghdr->gh_handle.data, gctx->gc_handle.data, gctx->gc_handle.len);
940 if (req->rq_pack_bulk)
941 ghdr->gh_flags |= LUSTRE_GSS_PACK_BULK;
942 if (req->rq_pack_udesc)
943 ghdr->gh_flags |= LUSTRE_GSS_PACK_USER;
946 ghdr->gh_seq = cfs_atomic_inc_return(&gctx->gc_seq);
949 hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
950 hdrobj.data = (__u8 *) ghdr;
951 msgobj.len = req->rq_clrdata_len;
952 msgobj.data = (__u8 *) req->rq_clrbuf;
953 token.len = lustre_msg_buflen(req->rq_reqbuf, 1);
954 token.data = lustre_msg_buf(req->rq_reqbuf, 1, 0);
956 major = lgss_wrap(gctx->gc_mechctx, &hdrobj, &msgobj,
957 req->rq_clrbuf_len, &token);
958 if (major != GSS_S_COMPLETE) {
959 CERROR("priv: wrap message error: %08x\n", major);
960 GOTO(err_free, rc = -EPERM);
962 LASSERT(token.len <= buflens[1]);
964 /* see explain in gss_cli_ctx_sign() */
965 if (unlikely(cfs_atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
966 GSS_SEQ_REPACK_THRESHOLD)) {
967 int behind = cfs_atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
969 gss_stat_oos_record_cli(behind);
970 CWARN("req %p: %u behind, retry sealing\n", req, behind);
972 ghdr->gh_seq = cfs_atomic_inc_return(&gctx->gc_seq);
976 /* now set the final wire data length */
977 req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, 1, token.len,0);
982 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
983 req->rq_reqbuf = NULL;
984 req->rq_reqbuf_len = 0;
989 int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx,
990 struct ptlrpc_request *req)
992 struct gss_cli_ctx *gctx;
993 struct gss_header *ghdr;
994 struct lustre_msg *msg = req->rq_repdata;
995 int msglen, pack_bulk, swabbed, rc;
999 LASSERT(req->rq_cli_ctx == ctx);
1000 LASSERT(req->rq_ctx_init == 0);
1003 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
1004 swabbed = ptlrpc_rep_need_swab(req);
1006 ghdr = gss_swab_header(msg, 0, swabbed);
1008 CERROR("can't decode gss header\n");
1013 if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
1014 CERROR("gss version %u mismatch, expect %u\n",
1015 ghdr->gh_version, PTLRPC_GSS_VERSION);
1019 switch (ghdr->gh_proc) {
1020 case PTLRPC_GSS_PROC_DATA:
1021 pack_bulk = ghdr->gh_flags & LUSTRE_GSS_PACK_BULK;
1023 if (!req->rq_early && !equi(req->rq_pack_bulk == 1, pack_bulk)){
1024 CERROR("%s bulk flag in reply\n",
1025 req->rq_pack_bulk ? "missing" : "unexpected");
1030 gss_header_swabber(ghdr);
1032 /* use rq_repdata_len as buffer size, which assume unseal
1033 * doesn't need extra memory space. for precise control, we'd
1034 * better calculate out actual buffer size as
1035 * (repbuf_len - offset - repdata_len) */
1036 major = gss_unseal_msg(gctx->gc_mechctx, msg,
1037 &msglen, req->rq_repdata_len);
1038 if (major != GSS_S_COMPLETE) {
1039 CERROR("failed to unwrap reply: %x\n", major);
1044 swabbed = __lustre_unpack_msg(msg, msglen);
1046 CERROR("Failed to unpack after decryption\n");
1050 if (msg->lm_bufcount < 1) {
1051 CERROR("Invalid reply buffer: empty\n");
1056 if (msg->lm_bufcount < 2) {
1057 CERROR("bufcount %u: missing bulk sec desc\n",
1062 /* bulk checksum is the last segment */
1063 if (bulk_sec_desc_unpack(msg, msg->lm_bufcount - 1,
1068 req->rq_repmsg = lustre_msg_buf(msg, 0, 0);
1069 req->rq_replen = msg->lm_buflens[0];
1073 case PTLRPC_GSS_PROC_ERR:
1074 if (req->rq_early) {
1075 CERROR("server return error with early reply\n");
1078 rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
1082 CERROR("unexpected proc %d\n", ghdr->gh_proc);
1089 /*********************************************
1090 * reverse context installation *
1091 *********************************************/
1094 int gss_install_rvs_svc_ctx(struct obd_import *imp,
1095 struct gss_sec *gsec,
1096 struct gss_cli_ctx *gctx)
1098 return gss_svc_upcall_install_rvs_ctx(imp, gsec, gctx);
1101 /*********************************************
1102 * GSS security APIs *
1103 *********************************************/
1104 int gss_sec_create_common(struct gss_sec *gsec,
1105 struct ptlrpc_sec_policy *policy,
1106 struct obd_import *imp,
1107 struct ptlrpc_svc_ctx *svcctx,
1108 struct sptlrpc_flavor *sf)
1110 struct ptlrpc_sec *sec;
1113 LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_GSS);
1115 gsec->gs_mech = lgss_subflavor_to_mech(
1116 SPTLRPC_FLVR_BASE_SUB(sf->sf_rpc));
1117 if (!gsec->gs_mech) {
1118 CERROR("gss backend 0x%x not found\n",
1119 SPTLRPC_FLVR_BASE_SUB(sf->sf_rpc));
1123 cfs_spin_lock_init(&gsec->gs_lock);
1124 gsec->gs_rvs_hdl = 0ULL;
1126 /* initialize upper ptlrpc_sec */
1127 sec = &gsec->gs_base;
1128 sec->ps_policy = policy;
1129 cfs_atomic_set(&sec->ps_refcount, 0);
1130 cfs_atomic_set(&sec->ps_nctx, 0);
1131 sec->ps_id = sptlrpc_get_next_secid();
1133 sec->ps_import = class_import_get(imp);
1134 cfs_spin_lock_init(&sec->ps_lock);
1135 CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
1138 sec->ps_gc_interval = GSS_GC_INTERVAL;
1140 LASSERT(sec_is_reverse(sec));
1142 /* never do gc on reverse sec */
1143 sec->ps_gc_interval = 0;
1146 if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
1147 sptlrpc_enc_pool_add_user();
1149 CDEBUG(D_SEC, "create %s%s@%p\n", (svcctx ? "reverse " : ""),
1150 policy->sp_name, gsec);
1154 void gss_sec_destroy_common(struct gss_sec *gsec)
1156 struct ptlrpc_sec *sec = &gsec->gs_base;
1159 LASSERT(sec->ps_import);
1160 LASSERT(cfs_atomic_read(&sec->ps_refcount) == 0);
1161 LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
1163 if (gsec->gs_mech) {
1164 lgss_mech_put(gsec->gs_mech);
1165 gsec->gs_mech = NULL;
1168 class_import_put(sec->ps_import);
1170 if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
1171 sptlrpc_enc_pool_del_user();
1176 void gss_sec_kill(struct ptlrpc_sec *sec)
1181 int gss_cli_ctx_init_common(struct ptlrpc_sec *sec,
1182 struct ptlrpc_cli_ctx *ctx,
1183 struct ptlrpc_ctx_ops *ctxops,
1184 struct vfs_cred *vcred)
1186 struct gss_cli_ctx *gctx = ctx2gctx(ctx);
1189 cfs_atomic_set(&gctx->gc_seq, 0);
1191 CFS_INIT_HLIST_NODE(&ctx->cc_cache);
1192 cfs_atomic_set(&ctx->cc_refcount, 0);
1194 ctx->cc_ops = ctxops;
1196 ctx->cc_flags = PTLRPC_CTX_NEW;
1197 ctx->cc_vcred = *vcred;
1198 cfs_spin_lock_init(&ctx->cc_lock);
1199 CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
1200 CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
1202 /* take a ref on belonging sec, balanced in ctx destroying */
1203 cfs_atomic_inc(&sec->ps_refcount);
1204 /* statistic only */
1205 cfs_atomic_inc(&sec->ps_nctx);
1207 CDEBUG(D_SEC, "%s@%p: create ctx %p(%u->%s)\n",
1208 sec->ps_policy->sp_name, ctx->cc_sec,
1209 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
1215 * 1: the context has been taken care of by someone else
1216 * 0: proceed to really destroy the context locally
1218 int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec,
1219 struct ptlrpc_cli_ctx *ctx)
1221 struct gss_cli_ctx *gctx = ctx2gctx(ctx);
1223 LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0);
1224 LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
1225 LASSERT(ctx->cc_sec == sec);
1228 * remove UPTODATE flag of reverse ctx thus we won't send fini rpc,
1229 * this is to avoid potential problems of client side reverse svc ctx
1230 * be mis-destroyed in various recovery senarios. anyway client can
1231 * manage its reverse ctx well by associating it with its buddy ctx.
1233 if (sec_is_reverse(sec))
1234 ctx->cc_flags &= ~PTLRPC_CTX_UPTODATE;
1236 if (gctx->gc_mechctx) {
1237 /* the final context fini rpc will use this ctx too, and it's
1238 * asynchronous which finished by request_out_callback(). so
1239 * we add refcount, whoever drop finally drop the refcount to
1240 * 0 should responsible for the rest of destroy. */
1241 cfs_atomic_inc(&ctx->cc_refcount);
1243 gss_do_ctx_fini_rpc(gctx);
1244 gss_cli_ctx_finalize(gctx);
1246 if (!cfs_atomic_dec_and_test(&ctx->cc_refcount))
1250 if (sec_is_reverse(sec))
1251 CWARN("reverse sec %p: destroy ctx %p\n",
1254 CWARN("%s@%p: destroy ctx %p(%u->%s)\n",
1255 sec->ps_policy->sp_name, ctx->cc_sec,
1256 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
1262 int gss_alloc_reqbuf_intg(struct ptlrpc_sec *sec,
1263 struct ptlrpc_request *req,
1264 int svc, int msgsize)
1266 int bufsize, txtsize;
1272 * on-wire data layout:
1275 * - user descriptor (optional)
1276 * - bulk sec descriptor (optional)
1277 * - signature (optional)
1278 * - svc == NULL: NULL
1279 * - svc == AUTH: signature of gss header
1280 * - svc == INTG: signature of all above
1282 * if this is context negotiation, reserver fixed space
1283 * at the last (signature) segment regardless of svc mode.
1286 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1287 txtsize = buflens[0];
1289 buflens[1] = msgsize;
1290 if (svc == SPTLRPC_SVC_INTG)
1291 txtsize += buflens[1];
1293 if (req->rq_pack_udesc) {
1294 buflens[bufcnt] = sptlrpc_current_user_desc_size();
1295 if (svc == SPTLRPC_SVC_INTG)
1296 txtsize += buflens[bufcnt];
1300 if (req->rq_pack_bulk) {
1301 buflens[bufcnt] = gss_cli_bulk_payload(req->rq_cli_ctx,
1303 0, req->rq_bulk_read);
1304 if (svc == SPTLRPC_SVC_INTG)
1305 txtsize += buflens[bufcnt];
1309 if (req->rq_ctx_init)
1310 buflens[bufcnt++] = GSS_CTX_INIT_MAX_LEN;
1311 else if (svc != SPTLRPC_SVC_NULL)
1312 buflens[bufcnt++] = gss_cli_payload(req->rq_cli_ctx, txtsize,0);
1314 bufsize = lustre_msg_size_v2(bufcnt, buflens);
1316 if (!req->rq_reqbuf) {
1317 bufsize = size_roundup_power2(bufsize);
1319 OBD_ALLOC(req->rq_reqbuf, bufsize);
1320 if (!req->rq_reqbuf)
1323 req->rq_reqbuf_len = bufsize;
1325 LASSERT(req->rq_pool);
1326 LASSERT(req->rq_reqbuf_len >= bufsize);
1327 memset(req->rq_reqbuf, 0, bufsize);
1330 lustre_init_msg_v2(req->rq_reqbuf, bufcnt, buflens, NULL);
1331 req->rq_reqbuf->lm_secflvr = req->rq_flvr.sf_rpc;
1333 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, msgsize);
1334 LASSERT(req->rq_reqmsg);
1336 /* pack user desc here, later we might leave current user's process */
1337 if (req->rq_pack_udesc)
1338 sptlrpc_pack_user_desc(req->rq_reqbuf, 2);
1344 int gss_alloc_reqbuf_priv(struct ptlrpc_sec *sec,
1345 struct ptlrpc_request *req,
1348 __u32 ibuflens[3], wbuflens[2];
1350 int clearsize, wiresize;
1353 LASSERT(req->rq_clrbuf == NULL);
1354 LASSERT(req->rq_clrbuf_len == 0);
1356 /* Inner (clear) buffers
1358 * - user descriptor (optional)
1359 * - bulk checksum (optional)
1362 ibuflens[0] = msgsize;
1364 if (req->rq_pack_udesc)
1365 ibuflens[ibufcnt++] = sptlrpc_current_user_desc_size();
1366 if (req->rq_pack_bulk)
1367 ibuflens[ibufcnt++] = gss_cli_bulk_payload(req->rq_cli_ctx,
1371 clearsize = lustre_msg_size_v2(ibufcnt, ibuflens);
1372 /* to allow append padding during encryption */
1373 clearsize += GSS_MAX_CIPHER_BLOCK;
1375 /* Wrapper (wire) buffers
1379 wbuflens[0] = PTLRPC_GSS_HEADER_SIZE;
1380 wbuflens[1] = gss_cli_payload(req->rq_cli_ctx, clearsize, 1);
1381 wiresize = lustre_msg_size_v2(2, wbuflens);
1384 /* rq_reqbuf is preallocated */
1385 LASSERT(req->rq_reqbuf);
1386 LASSERT(req->rq_reqbuf_len >= wiresize);
1388 memset(req->rq_reqbuf, 0, req->rq_reqbuf_len);
1390 /* if the pre-allocated buffer is big enough, we just pack
1391 * both clear buf & request buf in it, to avoid more alloc. */
1392 if (clearsize + wiresize <= req->rq_reqbuf_len) {
1394 (void *) (((char *) req->rq_reqbuf) + wiresize);
1396 CWARN("pre-allocated buf size %d is not enough for "
1397 "both clear (%d) and cipher (%d) text, proceed "
1398 "with extra allocation\n", req->rq_reqbuf_len,
1399 clearsize, wiresize);
1403 if (!req->rq_clrbuf) {
1404 clearsize = size_roundup_power2(clearsize);
1406 OBD_ALLOC(req->rq_clrbuf, clearsize);
1407 if (!req->rq_clrbuf)
1410 req->rq_clrbuf_len = clearsize;
1412 lustre_init_msg_v2(req->rq_clrbuf, ibufcnt, ibuflens, NULL);
1413 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, msgsize);
1415 if (req->rq_pack_udesc)
1416 sptlrpc_pack_user_desc(req->rq_clrbuf, 1);
1422 * NOTE: any change of request buffer allocation should also consider
1423 * changing enlarge_reqbuf() series functions.
1425 int gss_alloc_reqbuf(struct ptlrpc_sec *sec,
1426 struct ptlrpc_request *req,
1429 int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
1431 LASSERT(!req->rq_pack_bulk ||
1432 (req->rq_bulk_read || req->rq_bulk_write));
1435 case SPTLRPC_SVC_NULL:
1436 case SPTLRPC_SVC_AUTH:
1437 case SPTLRPC_SVC_INTG:
1438 return gss_alloc_reqbuf_intg(sec, req, svc, msgsize);
1439 case SPTLRPC_SVC_PRIV:
1440 return gss_alloc_reqbuf_priv(sec, req, msgsize);
1442 LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
1447 void gss_free_reqbuf(struct ptlrpc_sec *sec,
1448 struct ptlrpc_request *req)
1453 LASSERT(!req->rq_pool || req->rq_reqbuf);
1454 privacy = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) == SPTLRPC_SVC_PRIV;
1456 if (!req->rq_clrbuf)
1457 goto release_reqbuf;
1459 /* release clear buffer */
1461 LASSERT(req->rq_clrbuf_len);
1463 if (req->rq_pool == NULL ||
1464 req->rq_clrbuf < req->rq_reqbuf ||
1465 (char *) req->rq_clrbuf >=
1466 (char *) req->rq_reqbuf + req->rq_reqbuf_len)
1467 OBD_FREE(req->rq_clrbuf, req->rq_clrbuf_len);
1469 req->rq_clrbuf = NULL;
1470 req->rq_clrbuf_len = 0;
1473 if (!req->rq_pool && req->rq_reqbuf) {
1474 LASSERT(req->rq_reqbuf_len);
1476 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
1477 req->rq_reqbuf = NULL;
1478 req->rq_reqbuf_len = 0;
1484 static int do_alloc_repbuf(struct ptlrpc_request *req, int bufsize)
1486 bufsize = size_roundup_power2(bufsize);
1488 OBD_ALLOC(req->rq_repbuf, bufsize);
1489 if (!req->rq_repbuf)
1492 req->rq_repbuf_len = bufsize;
1497 int gss_alloc_repbuf_intg(struct ptlrpc_sec *sec,
1498 struct ptlrpc_request *req,
1499 int svc, int msgsize)
1507 * on-wire data layout:
1510 * - bulk sec descriptor (optional)
1511 * - signature (optional)
1512 * - svc == NULL: NULL
1513 * - svc == AUTH: signature of gss header
1514 * - svc == INTG: signature of all above
1516 * if this is context negotiation, reserver fixed space
1517 * at the last (signature) segment regardless of svc mode.
1520 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1521 txtsize = buflens[0];
1523 buflens[1] = msgsize;
1524 if (svc == SPTLRPC_SVC_INTG)
1525 txtsize += buflens[1];
1527 if (req->rq_pack_bulk) {
1528 buflens[bufcnt] = gss_cli_bulk_payload(req->rq_cli_ctx,
1530 1, req->rq_bulk_read);
1531 if (svc == SPTLRPC_SVC_INTG)
1532 txtsize += buflens[bufcnt];
1536 if (req->rq_ctx_init)
1537 buflens[bufcnt++] = GSS_CTX_INIT_MAX_LEN;
1538 else if (svc != SPTLRPC_SVC_NULL)
1539 buflens[bufcnt++] = gss_cli_payload(req->rq_cli_ctx, txtsize,0);
1541 alloc_size = lustre_msg_size_v2(bufcnt, buflens);
1543 /* add space for early reply */
1544 alloc_size += gss_at_reply_off_integ;
1546 return do_alloc_repbuf(req, alloc_size);
1550 int gss_alloc_repbuf_priv(struct ptlrpc_sec *sec,
1551 struct ptlrpc_request *req,
1561 buflens[0] = msgsize;
1563 if (req->rq_pack_bulk)
1564 buflens[bufcnt++] = gss_cli_bulk_payload(req->rq_cli_ctx,
1566 1, req->rq_bulk_read);
1567 txtsize = lustre_msg_size_v2(bufcnt, buflens);
1568 txtsize += GSS_MAX_CIPHER_BLOCK;
1570 /* wrapper buffers */
1572 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1573 buflens[1] = gss_cli_payload(req->rq_cli_ctx, txtsize, 1);
1575 alloc_size = lustre_msg_size_v2(bufcnt, buflens);
1576 /* add space for early reply */
1577 alloc_size += gss_at_reply_off_priv;
1579 return do_alloc_repbuf(req, alloc_size);
1582 int gss_alloc_repbuf(struct ptlrpc_sec *sec,
1583 struct ptlrpc_request *req,
1586 int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
1589 LASSERT(!req->rq_pack_bulk ||
1590 (req->rq_bulk_read || req->rq_bulk_write));
1593 case SPTLRPC_SVC_NULL:
1594 case SPTLRPC_SVC_AUTH:
1595 case SPTLRPC_SVC_INTG:
1596 return gss_alloc_repbuf_intg(sec, req, svc, msgsize);
1597 case SPTLRPC_SVC_PRIV:
1598 return gss_alloc_repbuf_priv(sec, req, msgsize);
1600 LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
1605 void gss_free_repbuf(struct ptlrpc_sec *sec,
1606 struct ptlrpc_request *req)
1608 OBD_FREE(req->rq_repbuf, req->rq_repbuf_len);
1609 req->rq_repbuf = NULL;
1610 req->rq_repbuf_len = 0;
1611 req->rq_repdata = NULL;
1612 req->rq_repdata_len = 0;
1615 static int get_enlarged_msgsize(struct lustre_msg *msg,
1616 int segment, int newsize)
1618 int save, newmsg_size;
1620 LASSERT(newsize >= msg->lm_buflens[segment]);
1622 save = msg->lm_buflens[segment];
1623 msg->lm_buflens[segment] = newsize;
1624 newmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1625 msg->lm_buflens[segment] = save;
1630 static int get_enlarged_msgsize2(struct lustre_msg *msg,
1631 int segment1, int newsize1,
1632 int segment2, int newsize2)
1634 int save1, save2, newmsg_size;
1636 LASSERT(newsize1 >= msg->lm_buflens[segment1]);
1637 LASSERT(newsize2 >= msg->lm_buflens[segment2]);
1639 save1 = msg->lm_buflens[segment1];
1640 save2 = msg->lm_buflens[segment2];
1641 msg->lm_buflens[segment1] = newsize1;
1642 msg->lm_buflens[segment2] = newsize2;
1643 newmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1644 msg->lm_buflens[segment1] = save1;
1645 msg->lm_buflens[segment2] = save2;
1651 int gss_enlarge_reqbuf_intg(struct ptlrpc_sec *sec,
1652 struct ptlrpc_request *req,
1654 int segment, int newsize)
1656 struct lustre_msg *newbuf;
1657 int txtsize, sigsize = 0, i;
1658 int newmsg_size, newbuf_size;
1661 * gss header is at seg 0;
1662 * embedded msg is at seg 1;
1663 * signature (if any) is at the last seg
1665 LASSERT(req->rq_reqbuf);
1666 LASSERT(req->rq_reqbuf_len > req->rq_reqlen);
1667 LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
1668 LASSERT(lustre_msg_buf(req->rq_reqbuf, 1, 0) == req->rq_reqmsg);
1670 /* 1. compute new embedded msg size */
1671 newmsg_size = get_enlarged_msgsize(req->rq_reqmsg, segment, newsize);
1672 LASSERT(newmsg_size >= req->rq_reqbuf->lm_buflens[1]);
1674 /* 2. compute new wrapper msg size */
1675 if (svc == SPTLRPC_SVC_NULL) {
1676 /* no signature, get size directly */
1677 newbuf_size = get_enlarged_msgsize(req->rq_reqbuf,
1680 txtsize = req->rq_reqbuf->lm_buflens[0];
1682 if (svc == SPTLRPC_SVC_INTG) {
1683 for (i = 1; i < req->rq_reqbuf->lm_bufcount; i++)
1684 txtsize += req->rq_reqbuf->lm_buflens[i];
1685 txtsize += newmsg_size - req->rq_reqbuf->lm_buflens[1];
1688 sigsize = gss_cli_payload(req->rq_cli_ctx, txtsize, 0);
1689 LASSERT(sigsize >= msg_last_seglen(req->rq_reqbuf));
1691 newbuf_size = get_enlarged_msgsize2(
1694 msg_last_segidx(req->rq_reqbuf),
1698 /* request from pool should always have enough buffer */
1699 LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
1701 if (req->rq_reqbuf_len < newbuf_size) {
1702 newbuf_size = size_roundup_power2(newbuf_size);
1704 OBD_ALLOC(newbuf, newbuf_size);
1708 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
1710 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
1711 req->rq_reqbuf = newbuf;
1712 req->rq_reqbuf_len = newbuf_size;
1713 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, 0);
1716 /* do enlargement, from wrapper to embedded, from end to begin */
1717 if (svc != SPTLRPC_SVC_NULL)
1718 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf,
1719 msg_last_segidx(req->rq_reqbuf),
1722 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, 1, newmsg_size);
1723 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
1725 req->rq_reqlen = newmsg_size;
1730 int gss_enlarge_reqbuf_priv(struct ptlrpc_sec *sec,
1731 struct ptlrpc_request *req,
1732 int segment, int newsize)
1734 struct lustre_msg *newclrbuf;
1735 int newmsg_size, newclrbuf_size, newcipbuf_size;
1739 * embedded msg is at seg 0 of clear buffer;
1740 * cipher text is at seg 2 of cipher buffer;
1742 LASSERT(req->rq_pool ||
1743 (req->rq_reqbuf == NULL && req->rq_reqbuf_len == 0));
1744 LASSERT(req->rq_reqbuf == NULL ||
1745 (req->rq_pool && req->rq_reqbuf->lm_bufcount == 3));
1746 LASSERT(req->rq_clrbuf);
1747 LASSERT(req->rq_clrbuf_len > req->rq_reqlen);
1748 LASSERT(lustre_msg_buf(req->rq_clrbuf, 0, 0) == req->rq_reqmsg);
1750 /* compute new embedded msg size */
1751 newmsg_size = get_enlarged_msgsize(req->rq_reqmsg, segment, newsize);
1753 /* compute new clear buffer size */
1754 newclrbuf_size = get_enlarged_msgsize(req->rq_clrbuf, 0, newmsg_size);
1755 newclrbuf_size += GSS_MAX_CIPHER_BLOCK;
1757 /* compute new cipher buffer size */
1758 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1759 buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
1760 buflens[2] = gss_cli_payload(req->rq_cli_ctx, newclrbuf_size, 1);
1761 newcipbuf_size = lustre_msg_size_v2(3, buflens);
1763 /* handle the case that we put both clear buf and cipher buf into
1764 * pre-allocated single buffer. */
1765 if (unlikely(req->rq_pool) &&
1766 req->rq_clrbuf >= req->rq_reqbuf &&
1767 (char *) req->rq_clrbuf <
1768 (char *) req->rq_reqbuf + req->rq_reqbuf_len) {
1769 /* it couldn't be better we still fit into the
1770 * pre-allocated buffer. */
1771 if (newclrbuf_size + newcipbuf_size <= req->rq_reqbuf_len) {
1774 /* move clear text backward. */
1775 src = req->rq_clrbuf;
1776 dst = (char *) req->rq_reqbuf + newcipbuf_size;
1778 memmove(dst, src, req->rq_clrbuf_len);
1780 req->rq_clrbuf = (struct lustre_msg *) dst;
1781 req->rq_clrbuf_len = newclrbuf_size;
1782 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
1784 /* sadly we have to split out the clear buffer */
1785 LASSERT(req->rq_reqbuf_len >= newcipbuf_size);
1786 LASSERT(req->rq_clrbuf_len < newclrbuf_size);
1790 if (req->rq_clrbuf_len < newclrbuf_size) {
1791 newclrbuf_size = size_roundup_power2(newclrbuf_size);
1793 OBD_ALLOC(newclrbuf, newclrbuf_size);
1794 if (newclrbuf == NULL)
1797 memcpy(newclrbuf, req->rq_clrbuf, req->rq_clrbuf_len);
1799 if (req->rq_reqbuf == NULL ||
1800 req->rq_clrbuf < req->rq_reqbuf ||
1801 (char *) req->rq_clrbuf >=
1802 (char *) req->rq_reqbuf + req->rq_reqbuf_len) {
1803 OBD_FREE(req->rq_clrbuf, req->rq_clrbuf_len);
1806 req->rq_clrbuf = newclrbuf;
1807 req->rq_clrbuf_len = newclrbuf_size;
1808 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
1811 _sptlrpc_enlarge_msg_inplace(req->rq_clrbuf, 0, newmsg_size);
1812 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
1813 req->rq_reqlen = newmsg_size;
1818 int gss_enlarge_reqbuf(struct ptlrpc_sec *sec,
1819 struct ptlrpc_request *req,
1820 int segment, int newsize)
1822 int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
1824 LASSERT(!req->rq_ctx_init && !req->rq_ctx_fini);
1827 case SPTLRPC_SVC_NULL:
1828 case SPTLRPC_SVC_AUTH:
1829 case SPTLRPC_SVC_INTG:
1830 return gss_enlarge_reqbuf_intg(sec, req, svc, segment, newsize);
1831 case SPTLRPC_SVC_PRIV:
1832 return gss_enlarge_reqbuf_priv(sec, req, segment, newsize);
1834 LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
1839 int gss_sec_install_rctx(struct obd_import *imp,
1840 struct ptlrpc_sec *sec,
1841 struct ptlrpc_cli_ctx *ctx)
1843 struct gss_sec *gsec;
1844 struct gss_cli_ctx *gctx;
1847 gsec = container_of(sec, struct gss_sec, gs_base);
1848 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
1850 rc = gss_install_rvs_svc_ctx(imp, gsec, gctx);
1854 /********************************************
1856 ********************************************/
1859 int gss_svc_reqctx_is_special(struct gss_svc_reqctx *grctx)
1862 return (grctx->src_init || grctx->src_init_continue ||
1863 grctx->src_err_notify);
1867 void gss_svc_reqctx_free(struct gss_svc_reqctx *grctx)
1870 gss_svc_upcall_put_ctx(grctx->src_ctx);
1872 sptlrpc_policy_put(grctx->src_base.sc_policy);
1873 OBD_FREE_PTR(grctx);
1877 void gss_svc_reqctx_addref(struct gss_svc_reqctx *grctx)
1879 LASSERT(cfs_atomic_read(&grctx->src_base.sc_refcount) > 0);
1880 cfs_atomic_inc(&grctx->src_base.sc_refcount);
1884 void gss_svc_reqctx_decref(struct gss_svc_reqctx *grctx)
1886 LASSERT(cfs_atomic_read(&grctx->src_base.sc_refcount) > 0);
1888 if (cfs_atomic_dec_and_test(&grctx->src_base.sc_refcount))
1889 gss_svc_reqctx_free(grctx);
1893 int gss_svc_sign(struct ptlrpc_request *req,
1894 struct ptlrpc_reply_state *rs,
1895 struct gss_svc_reqctx *grctx,
1902 LASSERT(rs->rs_msg == lustre_msg_buf(rs->rs_repbuf, 1, 0));
1904 /* embedded lustre_msg might have been shrinked */
1905 if (req->rq_replen != rs->rs_repbuf->lm_buflens[1])
1906 lustre_shrink_msg(rs->rs_repbuf, 1, req->rq_replen, 1);
1908 if (req->rq_pack_bulk)
1909 flags |= LUSTRE_GSS_PACK_BULK;
1911 rc = gss_sign_msg(rs->rs_repbuf, grctx->src_ctx->gsc_mechctx,
1912 LUSTRE_SP_ANY, flags, PTLRPC_GSS_PROC_DATA,
1913 grctx->src_wirectx.gw_seq, svc, NULL);
1917 rs->rs_repdata_len = rc;
1919 if (likely(req->rq_packed_final)) {
1920 if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
1921 req->rq_reply_off = gss_at_reply_off_integ;
1923 req->rq_reply_off = 0;
1925 if (svc == SPTLRPC_SVC_NULL)
1926 rs->rs_repbuf->lm_cksum = crc32_le(!(__u32) 0,
1927 lustre_msg_buf(rs->rs_repbuf, 1, 0),
1928 lustre_msg_buflen(rs->rs_repbuf, 1));
1929 req->rq_reply_off = 0;
1935 int gss_pack_err_notify(struct ptlrpc_request *req, __u32 major, __u32 minor)
1937 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
1938 struct ptlrpc_reply_state *rs;
1939 struct gss_err_header *ghdr;
1940 int replen = sizeof(struct ptlrpc_body);
1944 //if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_SVCGSS_ERR_NOTIFY, OBD_FAIL_ONCE))
1947 grctx->src_err_notify = 1;
1948 grctx->src_reserve_len = 0;
1950 rc = lustre_pack_reply_v2(req, 1, &replen, NULL, 0);
1952 CERROR("could not pack reply, err %d\n", rc);
1957 rs = req->rq_reply_state;
1958 LASSERT(rs->rs_repbuf->lm_buflens[1] >= sizeof(*ghdr));
1959 ghdr = lustre_msg_buf(rs->rs_repbuf, 0, 0);
1960 ghdr->gh_version = PTLRPC_GSS_VERSION;
1962 ghdr->gh_proc = PTLRPC_GSS_PROC_ERR;
1963 ghdr->gh_major = major;
1964 ghdr->gh_minor = minor;
1965 ghdr->gh_handle.len = 0; /* fake context handle */
1967 rs->rs_repdata_len = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
1968 rs->rs_repbuf->lm_buflens);
1970 CDEBUG(D_SEC, "prepare gss error notify(0x%x/0x%x) to %s\n",
1971 major, minor, libcfs_nid2str(req->rq_peer.nid));
1976 int gss_svc_handle_init(struct ptlrpc_request *req,
1977 struct gss_wire_ctx *gw)
1979 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
1980 struct lustre_msg *reqbuf = req->rq_reqbuf;
1981 struct obd_uuid *uuid;
1982 struct obd_device *target;
1983 rawobj_t uuid_obj, rvs_hdl, in_token;
1985 __u32 *secdata, seclen;
1989 CDEBUG(D_SEC, "processing gss init(%d) request from %s\n", gw->gw_proc,
1990 libcfs_nid2str(req->rq_peer.nid));
1992 req->rq_ctx_init = 1;
1994 if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
1995 CERROR("unexpected bulk flag\n");
1996 RETURN(SECSVC_DROP);
1999 if (gw->gw_proc == PTLRPC_GSS_PROC_INIT && gw->gw_handle.len != 0) {
2000 CERROR("proc %u: invalid handle length %u\n",
2001 gw->gw_proc, gw->gw_handle.len);
2002 RETURN(SECSVC_DROP);
2005 if (reqbuf->lm_bufcount < 3 || reqbuf->lm_bufcount > 4){
2006 CERROR("Invalid bufcount %d\n", reqbuf->lm_bufcount);
2007 RETURN(SECSVC_DROP);
2010 swabbed = ptlrpc_req_need_swab(req);
2012 /* ctx initiate payload is in last segment */
2013 secdata = lustre_msg_buf(reqbuf, reqbuf->lm_bufcount - 1, 0);
2014 seclen = reqbuf->lm_buflens[reqbuf->lm_bufcount - 1];
2016 if (seclen < 4 + 4) {
2017 CERROR("sec size %d too small\n", seclen);
2018 RETURN(SECSVC_DROP);
2021 /* lustre svc type */
2022 lustre_svc = le32_to_cpu(*secdata++);
2025 /* extract target uuid, note this code is somewhat fragile
2026 * because touched internal structure of obd_uuid */
2027 if (rawobj_extract(&uuid_obj, &secdata, &seclen)) {
2028 CERROR("failed to extract target uuid\n");
2029 RETURN(SECSVC_DROP);
2031 uuid_obj.data[uuid_obj.len - 1] = '\0';
2033 uuid = (struct obd_uuid *) uuid_obj.data;
2034 target = class_uuid2obd(uuid);
2035 if (!target || target->obd_stopping || !target->obd_set_up) {
2036 CERROR("target '%s' is not available for context init (%s)\n",
2037 uuid->uuid, target == NULL ? "no target" :
2038 (target->obd_stopping ? "stopping" : "not set up"));
2039 RETURN(SECSVC_DROP);
2042 /* extract reverse handle */
2043 if (rawobj_extract(&rvs_hdl, &secdata, &seclen)) {
2044 CERROR("failed extract reverse handle\n");
2045 RETURN(SECSVC_DROP);
2049 if (rawobj_extract(&in_token, &secdata, &seclen)) {
2050 CERROR("can't extract token\n");
2051 RETURN(SECSVC_DROP);
2054 rc = gss_svc_upcall_handle_init(req, grctx, gw, target, lustre_svc,
2055 &rvs_hdl, &in_token);
2056 if (rc != SECSVC_OK)
2059 if (grctx->src_ctx->gsc_usr_mds || grctx->src_ctx->gsc_usr_oss ||
2060 grctx->src_ctx->gsc_usr_root)
2061 CWARN("create svc ctx %p: user from %s authenticated as %s\n",
2062 grctx->src_ctx, libcfs_nid2str(req->rq_peer.nid),
2063 grctx->src_ctx->gsc_usr_mds ? "mds" :
2064 (grctx->src_ctx->gsc_usr_oss ? "oss" : "root"));
2066 CWARN("create svc ctx %p: accept user %u from %s\n",
2067 grctx->src_ctx, grctx->src_ctx->gsc_uid,
2068 libcfs_nid2str(req->rq_peer.nid));
2070 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
2071 if (reqbuf->lm_bufcount < 4) {
2072 CERROR("missing user descriptor\n");
2073 RETURN(SECSVC_DROP);
2075 if (sptlrpc_unpack_user_desc(reqbuf, 2, swabbed)) {
2076 CERROR("Mal-formed user descriptor\n");
2077 RETURN(SECSVC_DROP);
2080 req->rq_pack_udesc = 1;
2081 req->rq_user_desc = lustre_msg_buf(reqbuf, 2, 0);
2084 req->rq_reqmsg = lustre_msg_buf(reqbuf, 1, 0);
2085 req->rq_reqlen = lustre_msg_buflen(reqbuf, 1);
2091 * last segment must be the gss signature.
2094 int gss_svc_verify_request(struct ptlrpc_request *req,
2095 struct gss_svc_reqctx *grctx,
2096 struct gss_wire_ctx *gw,
2099 struct gss_svc_ctx *gctx = grctx->src_ctx;
2100 struct lustre_msg *msg = req->rq_reqbuf;
2105 *major = GSS_S_COMPLETE;
2107 if (msg->lm_bufcount < 2) {
2108 CERROR("Too few segments (%u) in request\n", msg->lm_bufcount);
2112 if (gw->gw_svc == SPTLRPC_SVC_NULL)
2115 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
2116 CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
2117 *major = GSS_S_DUPLICATE_TOKEN;
2121 *major = gss_verify_msg(msg, gctx->gsc_mechctx, gw->gw_svc);
2122 if (*major != GSS_S_COMPLETE) {
2123 CERROR("failed to verify request: %x\n", *major);
2127 if (gctx->gsc_reverse == 0 &&
2128 gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
2129 CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
2130 *major = GSS_S_DUPLICATE_TOKEN;
2135 swabbed = ptlrpc_req_need_swab(req);
2137 /* user descriptor */
2138 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
2139 if (msg->lm_bufcount < (offset + 1)) {
2140 CERROR("no user desc included\n");
2144 if (sptlrpc_unpack_user_desc(msg, offset, swabbed)) {
2145 CERROR("Mal-formed user descriptor\n");
2149 req->rq_pack_udesc = 1;
2150 req->rq_user_desc = lustre_msg_buf(msg, offset, 0);
2154 /* check bulk_sec_desc data */
2155 if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
2156 if (msg->lm_bufcount < (offset + 1)) {
2157 CERROR("missing bulk sec descriptor\n");
2161 if (bulk_sec_desc_unpack(msg, offset, swabbed))
2164 req->rq_pack_bulk = 1;
2165 grctx->src_reqbsd = lustre_msg_buf(msg, offset, 0);
2166 grctx->src_reqbsd_size = lustre_msg_buflen(msg, offset);
2169 req->rq_reqmsg = lustre_msg_buf(msg, 1, 0);
2170 req->rq_reqlen = msg->lm_buflens[1];
2175 int gss_svc_unseal_request(struct ptlrpc_request *req,
2176 struct gss_svc_reqctx *grctx,
2177 struct gss_wire_ctx *gw,
2180 struct gss_svc_ctx *gctx = grctx->src_ctx;
2181 struct lustre_msg *msg = req->rq_reqbuf;
2182 int swabbed, msglen, offset = 1;
2185 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
2186 CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
2187 *major = GSS_S_DUPLICATE_TOKEN;
2191 *major = gss_unseal_msg(gctx->gsc_mechctx, msg,
2192 &msglen, req->rq_reqdata_len);
2193 if (*major != GSS_S_COMPLETE) {
2194 CERROR("failed to unwrap request: %x\n", *major);
2198 if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
2199 CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
2200 *major = GSS_S_DUPLICATE_TOKEN;
2204 swabbed = __lustre_unpack_msg(msg, msglen);
2206 CERROR("Failed to unpack after decryption\n");
2209 req->rq_reqdata_len = msglen;
2211 if (msg->lm_bufcount < 1) {
2212 CERROR("Invalid buffer: is empty\n");
2216 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
2217 if (msg->lm_bufcount < offset + 1) {
2218 CERROR("no user descriptor included\n");
2222 if (sptlrpc_unpack_user_desc(msg, offset, swabbed)) {
2223 CERROR("Mal-formed user descriptor\n");
2227 req->rq_pack_udesc = 1;
2228 req->rq_user_desc = lustre_msg_buf(msg, offset, 0);
2232 if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
2233 if (msg->lm_bufcount < offset + 1) {
2234 CERROR("no bulk checksum included\n");
2238 if (bulk_sec_desc_unpack(msg, offset, swabbed))
2241 req->rq_pack_bulk = 1;
2242 grctx->src_reqbsd = lustre_msg_buf(msg, offset, 0);
2243 grctx->src_reqbsd_size = lustre_msg_buflen(msg, offset);
2246 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 0, 0);
2247 req->rq_reqlen = req->rq_reqbuf->lm_buflens[0];
2252 int gss_svc_handle_data(struct ptlrpc_request *req,
2253 struct gss_wire_ctx *gw)
2255 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2260 grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
2261 if (!grctx->src_ctx) {
2262 major = GSS_S_NO_CONTEXT;
2266 switch (gw->gw_svc) {
2267 case SPTLRPC_SVC_NULL:
2268 case SPTLRPC_SVC_AUTH:
2269 case SPTLRPC_SVC_INTG:
2270 rc = gss_svc_verify_request(req, grctx, gw, &major);
2272 case SPTLRPC_SVC_PRIV:
2273 rc = gss_svc_unseal_request(req, grctx, gw, &major);
2276 CERROR("unsupported gss service %d\n", gw->gw_svc);
2283 CERROR("svc %u failed: major 0x%08x: req xid "LPU64" ctx %p idx "
2284 LPX64"(%u->%s)\n", gw->gw_svc, major, req->rq_xid,
2285 grctx->src_ctx, gss_handle_to_u64(&gw->gw_handle),
2286 grctx->src_ctx->gsc_uid, libcfs_nid2str(req->rq_peer.nid));
2288 /* we only notify client in case of NO_CONTEXT/BAD_SIG, which
2289 * might happen after server reboot, to allow recovery. */
2290 if ((major == GSS_S_NO_CONTEXT || major == GSS_S_BAD_SIG) &&
2291 gss_pack_err_notify(req, major, 0) == 0)
2292 RETURN(SECSVC_COMPLETE);
2294 RETURN(SECSVC_DROP);
2298 int gss_svc_handle_destroy(struct ptlrpc_request *req,
2299 struct gss_wire_ctx *gw)
2301 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2305 req->rq_ctx_fini = 1;
2306 req->rq_no_reply = 1;
2308 grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
2309 if (!grctx->src_ctx) {
2310 CDEBUG(D_SEC, "invalid gss context handle for destroy.\n");
2311 RETURN(SECSVC_DROP);
2314 if (gw->gw_svc != SPTLRPC_SVC_INTG) {
2315 CERROR("svc %u is not supported in destroy.\n", gw->gw_svc);
2316 RETURN(SECSVC_DROP);
2319 if (gss_svc_verify_request(req, grctx, gw, &major))
2320 RETURN(SECSVC_DROP);
2322 CWARN("destroy svc ctx %p idx "LPX64" (%u->%s)\n",
2323 grctx->src_ctx, gss_handle_to_u64(&gw->gw_handle),
2324 grctx->src_ctx->gsc_uid, libcfs_nid2str(req->rq_peer.nid));
2326 gss_svc_upcall_destroy_ctx(grctx->src_ctx);
2328 if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
2329 if (req->rq_reqbuf->lm_bufcount < 4) {
2330 CERROR("missing user descriptor, ignore it\n");
2333 if (sptlrpc_unpack_user_desc(req->rq_reqbuf, 2,
2334 ptlrpc_req_need_swab(req))) {
2335 CERROR("Mal-formed user descriptor, ignore it\n");
2339 req->rq_pack_udesc = 1;
2340 req->rq_user_desc = lustre_msg_buf(req->rq_reqbuf, 2, 0);
2346 int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req)
2348 struct gss_header *ghdr;
2349 struct gss_svc_reqctx *grctx;
2350 struct gss_wire_ctx *gw;
2354 LASSERT(req->rq_reqbuf);
2355 LASSERT(req->rq_svc_ctx == NULL);
2357 if (req->rq_reqbuf->lm_bufcount < 2) {
2358 CERROR("buf count only %d\n", req->rq_reqbuf->lm_bufcount);
2359 RETURN(SECSVC_DROP);
2362 swabbed = ptlrpc_req_need_swab(req);
2364 ghdr = gss_swab_header(req->rq_reqbuf, 0, swabbed);
2366 CERROR("can't decode gss header\n");
2367 RETURN(SECSVC_DROP);
2371 if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
2372 CERROR("gss version %u, expect %u\n", ghdr->gh_version,
2373 PTLRPC_GSS_VERSION);
2374 RETURN(SECSVC_DROP);
2377 req->rq_sp_from = ghdr->gh_sp;
2379 /* alloc grctx data */
2380 OBD_ALLOC_PTR(grctx);
2382 RETURN(SECSVC_DROP);
2384 grctx->src_base.sc_policy = sptlrpc_policy_get(policy);
2385 cfs_atomic_set(&grctx->src_base.sc_refcount, 1);
2386 req->rq_svc_ctx = &grctx->src_base;
2387 gw = &grctx->src_wirectx;
2389 /* save wire context */
2390 gw->gw_flags = ghdr->gh_flags;
2391 gw->gw_proc = ghdr->gh_proc;
2392 gw->gw_seq = ghdr->gh_seq;
2393 gw->gw_svc = ghdr->gh_svc;
2394 rawobj_from_netobj(&gw->gw_handle, &ghdr->gh_handle);
2396 /* keep original wire header which subject to checksum verification */
2398 gss_header_swabber(ghdr);
2400 switch(ghdr->gh_proc) {
2401 case PTLRPC_GSS_PROC_INIT:
2402 case PTLRPC_GSS_PROC_CONTINUE_INIT:
2403 rc = gss_svc_handle_init(req, gw);
2405 case PTLRPC_GSS_PROC_DATA:
2406 rc = gss_svc_handle_data(req, gw);
2408 case PTLRPC_GSS_PROC_DESTROY:
2409 rc = gss_svc_handle_destroy(req, gw);
2412 CERROR("unknown proc %u\n", gw->gw_proc);
2419 LASSERT (grctx->src_ctx);
2421 req->rq_auth_gss = 1;
2422 req->rq_auth_remote = grctx->src_ctx->gsc_remote;
2423 req->rq_auth_usr_mdt = grctx->src_ctx->gsc_usr_mds;
2424 req->rq_auth_usr_ost = grctx->src_ctx->gsc_usr_oss;
2425 req->rq_auth_usr_root = grctx->src_ctx->gsc_usr_root;
2426 req->rq_auth_uid = grctx->src_ctx->gsc_uid;
2427 req->rq_auth_mapped_uid = grctx->src_ctx->gsc_mapped_uid;
2429 case SECSVC_COMPLETE:
2432 gss_svc_reqctx_free(grctx);
2433 req->rq_svc_ctx = NULL;
2440 void gss_svc_invalidate_ctx(struct ptlrpc_svc_ctx *svc_ctx)
2442 struct gss_svc_reqctx *grctx;
2445 if (svc_ctx == NULL) {
2450 grctx = gss_svc_ctx2reqctx(svc_ctx);
2452 CWARN("gss svc invalidate ctx %p(%u)\n",
2453 grctx->src_ctx, grctx->src_ctx->gsc_uid);
2454 gss_svc_upcall_destroy_ctx(grctx->src_ctx);
2460 int gss_svc_payload(struct gss_svc_reqctx *grctx, int early,
2461 int msgsize, int privacy)
2463 /* we should treat early reply normally, but which is actually sharing
2464 * the same ctx with original request, so in this case we should
2465 * ignore the special ctx's special flags */
2466 if (early == 0 && gss_svc_reqctx_is_special(grctx))
2467 return grctx->src_reserve_len;
2469 return gss_mech_payload(NULL, msgsize, privacy);
2472 static int gss_svc_bulk_payload(struct gss_svc_ctx *gctx,
2473 struct sptlrpc_flavor *flvr,
2476 int payload = sizeof(struct ptlrpc_bulk_sec_desc);
2479 switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2480 case SPTLRPC_BULK_SVC_NULL:
2482 case SPTLRPC_BULK_SVC_INTG:
2483 payload += gss_mech_payload(NULL, 0, 0);
2485 case SPTLRPC_BULK_SVC_PRIV:
2486 payload += gss_mech_payload(NULL, 0, 1);
2488 case SPTLRPC_BULK_SVC_AUTH:
2497 int gss_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2499 struct gss_svc_reqctx *grctx;
2500 struct ptlrpc_reply_state *rs;
2501 int early, privacy, svc, bsd_off = 0;
2502 __u32 ibuflens[2], buflens[4];
2503 int ibufcnt = 0, bufcnt;
2504 int txtsize, wmsg_size, rs_size;
2507 LASSERT(msglen % 8 == 0);
2509 if (req->rq_pack_bulk && !req->rq_bulk_read && !req->rq_bulk_write) {
2510 CERROR("client request bulk sec on non-bulk rpc\n");
2514 svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
2515 early = (req->rq_packed_final == 0);
2517 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2518 if (!early && gss_svc_reqctx_is_special(grctx))
2521 privacy = (svc == SPTLRPC_SVC_PRIV);
2524 /* inner clear buffers */
2526 ibuflens[0] = msglen;
2528 if (req->rq_pack_bulk) {
2529 LASSERT(grctx->src_reqbsd);
2532 ibuflens[ibufcnt++] = gss_svc_bulk_payload(
2538 txtsize = lustre_msg_size_v2(ibufcnt, ibuflens);
2539 txtsize += GSS_MAX_CIPHER_BLOCK;
2541 /* wrapper buffer */
2543 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2544 buflens[1] = gss_svc_payload(grctx, early, txtsize, 1);
2547 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2548 buflens[1] = msglen;
2550 txtsize = buflens[0];
2551 if (svc == SPTLRPC_SVC_INTG)
2552 txtsize += buflens[1];
2554 if (req->rq_pack_bulk) {
2555 LASSERT(grctx->src_reqbsd);
2558 buflens[bufcnt] = gss_svc_bulk_payload(
2562 if (svc == SPTLRPC_SVC_INTG)
2563 txtsize += buflens[bufcnt];
2567 if ((!early && gss_svc_reqctx_is_special(grctx)) ||
2568 svc != SPTLRPC_SVC_NULL)
2569 buflens[bufcnt++] = gss_svc_payload(grctx, early,
2573 wmsg_size = lustre_msg_size_v2(bufcnt, buflens);
2575 rs_size = sizeof(*rs) + wmsg_size;
2576 rs = req->rq_reply_state;
2580 LASSERT(rs->rs_size >= rs_size);
2582 OBD_ALLOC(rs, rs_size);
2586 rs->rs_size = rs_size;
2589 rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
2590 rs->rs_repbuf_len = wmsg_size;
2592 /* initialize the buffer */
2594 lustre_init_msg_v2(rs->rs_repbuf, ibufcnt, ibuflens, NULL);
2595 rs->rs_msg = lustre_msg_buf(rs->rs_repbuf, 0, msglen);
2597 lustre_init_msg_v2(rs->rs_repbuf, bufcnt, buflens, NULL);
2598 rs->rs_repbuf->lm_secflvr = req->rq_flvr.sf_rpc;
2600 rs->rs_msg = lustre_msg_buf(rs->rs_repbuf, 1, 0);
2604 grctx->src_repbsd = lustre_msg_buf(rs->rs_repbuf, bsd_off, 0);
2605 grctx->src_repbsd_size = lustre_msg_buflen(rs->rs_repbuf,
2609 gss_svc_reqctx_addref(grctx);
2610 rs->rs_svc_ctx = req->rq_svc_ctx;
2612 LASSERT(rs->rs_msg);
2613 req->rq_reply_state = rs;
2617 static int gss_svc_seal(struct ptlrpc_request *req,
2618 struct ptlrpc_reply_state *rs,
2619 struct gss_svc_reqctx *grctx)
2621 struct gss_svc_ctx *gctx = grctx->src_ctx;
2622 rawobj_t hdrobj, msgobj, token;
2623 struct gss_header *ghdr;
2626 __u32 buflens[2], major;
2630 /* get clear data length. note embedded lustre_msg might
2631 * have been shrinked */
2632 if (req->rq_replen != lustre_msg_buflen(rs->rs_repbuf, 0))
2633 msglen = lustre_shrink_msg(rs->rs_repbuf, 0, req->rq_replen, 1);
2635 msglen = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
2636 rs->rs_repbuf->lm_buflens);
2638 /* temporarily use tail of buffer to hold gss header data */
2639 LASSERT(msglen + PTLRPC_GSS_HEADER_SIZE <= rs->rs_repbuf_len);
2640 ghdr = (struct gss_header *) ((char *) rs->rs_repbuf +
2641 rs->rs_repbuf_len - PTLRPC_GSS_HEADER_SIZE);
2642 ghdr->gh_version = PTLRPC_GSS_VERSION;
2643 ghdr->gh_sp = LUSTRE_SP_ANY;
2645 ghdr->gh_proc = PTLRPC_GSS_PROC_DATA;
2646 ghdr->gh_seq = grctx->src_wirectx.gw_seq;
2647 ghdr->gh_svc = SPTLRPC_SVC_PRIV;
2648 ghdr->gh_handle.len = 0;
2649 if (req->rq_pack_bulk)
2650 ghdr->gh_flags |= LUSTRE_GSS_PACK_BULK;
2652 /* allocate temporary cipher buffer */
2653 token_buflen = gss_mech_payload(gctx->gsc_mechctx, msglen, 1);
2654 OBD_ALLOC(token_buf, token_buflen);
2655 if (token_buf == NULL)
2658 hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
2659 hdrobj.data = (__u8 *) ghdr;
2660 msgobj.len = msglen;
2661 msgobj.data = (__u8 *) rs->rs_repbuf;
2662 token.len = token_buflen;
2663 token.data = token_buf;
2665 major = lgss_wrap(gctx->gsc_mechctx, &hdrobj, &msgobj,
2666 rs->rs_repbuf_len - PTLRPC_GSS_HEADER_SIZE, &token);
2667 if (major != GSS_S_COMPLETE) {
2668 CERROR("wrap message error: %08x\n", major);
2669 GOTO(out_free, rc = -EPERM);
2671 LASSERT(token.len <= token_buflen);
2673 /* we are about to override data at rs->rs_repbuf, nullify pointers
2674 * to which to catch further illegal usage. */
2675 if (req->rq_pack_bulk) {
2676 grctx->src_repbsd = NULL;
2677 grctx->src_repbsd_size = 0;
2680 /* now fill the actual wire data
2684 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2685 buflens[1] = token.len;
2687 rs->rs_repdata_len = lustre_msg_size_v2(2, buflens);
2688 LASSERT(rs->rs_repdata_len <= rs->rs_repbuf_len);
2690 lustre_init_msg_v2(rs->rs_repbuf, 2, buflens, NULL);
2691 rs->rs_repbuf->lm_secflvr = req->rq_flvr.sf_rpc;
2693 memcpy(lustre_msg_buf(rs->rs_repbuf, 0, 0), ghdr,
2694 PTLRPC_GSS_HEADER_SIZE);
2695 memcpy(lustre_msg_buf(rs->rs_repbuf, 1, 0), token.data, token.len);
2698 if (req->rq_packed_final &&
2699 (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))
2700 req->rq_reply_off = gss_at_reply_off_priv;
2702 req->rq_reply_off = 0;
2704 /* to catch upper layer's further access */
2706 req->rq_repmsg = NULL;
2711 OBD_FREE(token_buf, token_buflen);
2715 int gss_svc_authorize(struct ptlrpc_request *req)
2717 struct ptlrpc_reply_state *rs = req->rq_reply_state;
2718 struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2719 struct gss_wire_ctx *gw = &grctx->src_wirectx;
2723 early = (req->rq_packed_final == 0);
2725 if (!early && gss_svc_reqctx_is_special(grctx)) {
2726 LASSERT(rs->rs_repdata_len != 0);
2728 req->rq_reply_off = gss_at_reply_off_integ;
2732 /* early reply could happen in many cases */
2734 gw->gw_proc != PTLRPC_GSS_PROC_DATA &&
2735 gw->gw_proc != PTLRPC_GSS_PROC_DESTROY) {
2736 CERROR("proc %d not support\n", gw->gw_proc);
2740 LASSERT(grctx->src_ctx);
2742 switch (gw->gw_svc) {
2743 case SPTLRPC_SVC_NULL:
2744 case SPTLRPC_SVC_AUTH:
2745 case SPTLRPC_SVC_INTG:
2746 rc = gss_svc_sign(req, rs, grctx, gw->gw_svc);
2748 case SPTLRPC_SVC_PRIV:
2749 rc = gss_svc_seal(req, rs, grctx);
2752 CERROR("Unknown service %d\n", gw->gw_svc);
2753 GOTO(out, rc = -EINVAL);
2761 void gss_svc_free_rs(struct ptlrpc_reply_state *rs)
2763 struct gss_svc_reqctx *grctx;
2765 LASSERT(rs->rs_svc_ctx);
2766 grctx = container_of(rs->rs_svc_ctx, struct gss_svc_reqctx, src_base);
2768 gss_svc_reqctx_decref(grctx);
2769 rs->rs_svc_ctx = NULL;
2771 if (!rs->rs_prealloc)
2772 OBD_FREE(rs, rs->rs_size);
2775 void gss_svc_free_ctx(struct ptlrpc_svc_ctx *ctx)
2777 LASSERT(cfs_atomic_read(&ctx->sc_refcount) == 0);
2778 gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx));
2781 int gss_copy_rvc_cli_ctx(struct ptlrpc_cli_ctx *cli_ctx,
2782 struct ptlrpc_svc_ctx *svc_ctx)
2784 struct gss_cli_ctx *cli_gctx = ctx2gctx(cli_ctx);
2785 struct gss_svc_ctx *svc_gctx = gss_svc_ctx2gssctx(svc_ctx);
2786 struct gss_ctx *mechctx = NULL;
2789 LASSERT(svc_gctx && svc_gctx->gsc_mechctx);
2791 cli_gctx->gc_proc = PTLRPC_GSS_PROC_DATA;
2792 cli_gctx->gc_win = GSS_SEQ_WIN;
2794 /* The problem is the reverse ctx might get lost in some recovery
2795 * situations, and the same svc_ctx will be used to re-create it.
2796 * if there's callback be sentout before that, new reverse ctx start
2797 * with sequence 0 will lead to future callback rpc be treated as
2800 * each reverse root ctx will record its latest sequence number on its
2801 * buddy svcctx before be destroied, so here we continue use it.
2803 cfs_atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
2805 if (gss_svc_upcall_dup_handle(&cli_gctx->gc_svc_handle, svc_gctx)) {
2806 CERROR("failed to dup svc handle\n");
2810 if (lgss_copy_reverse_context(svc_gctx->gsc_mechctx, &mechctx) !=
2812 CERROR("failed to copy mech context\n");
2813 goto err_svc_handle;
2816 if (rawobj_dup(&cli_gctx->gc_handle, &svc_gctx->gsc_rvs_hdl)) {
2817 CERROR("failed to dup reverse handle\n");
2821 cli_gctx->gc_mechctx = mechctx;
2822 gss_cli_ctx_uptodate(cli_gctx);
2827 lgss_delete_sec_context(&mechctx);
2829 rawobj_free(&cli_gctx->gc_svc_handle);
2834 static void gss_init_at_reply_offset(void)
2839 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2840 buflens[1] = lustre_msg_early_size();
2841 buflens[2] = gss_cli_payload(NULL, buflens[1], 0);
2842 gss_at_reply_off_integ = lustre_msg_size_v2(3, buflens);
2844 buflens[0] = lustre_msg_early_size();
2845 clearsize = lustre_msg_size_v2(1, buflens);
2846 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2847 buflens[1] = gss_cli_payload(NULL, clearsize, 0);
2848 buflens[2] = gss_cli_payload(NULL, clearsize, 1);
2849 gss_at_reply_off_priv = lustre_msg_size_v2(3, buflens);
2852 int __init sptlrpc_gss_init(void)
2856 rc = gss_init_lproc();
2860 rc = gss_init_cli_upcall();
2864 rc = gss_init_svc_upcall();
2866 goto out_cli_upcall;
2868 rc = init_kerberos_module();
2870 goto out_svc_upcall;
2872 /* register policy after all other stuff be intialized, because it
2873 * might be in used immediately after the registration. */
2875 rc = gss_init_keyring();
2879 #ifdef HAVE_GSS_PIPEFS
2880 rc = gss_init_pipefs();
2885 gss_init_at_reply_offset();
2889 #ifdef HAVE_GSS_PIPEFS
2895 cleanup_kerberos_module();
2897 gss_exit_svc_upcall();
2899 gss_exit_cli_upcall();
2905 static void __exit sptlrpc_gss_exit(void)
2908 #ifdef HAVE_GSS_PIPEFS
2911 cleanup_kerberos_module();
2912 gss_exit_svc_upcall();
2913 gss_exit_cli_upcall();
2917 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
2918 MODULE_DESCRIPTION("GSS security policy for Lustre");
2919 MODULE_LICENSE("GPL");
2921 module_init(sptlrpc_gss_init);
2922 module_exit(sptlrpc_gss_exit);