-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* Modifications for Lustre
- * Copyright 2004 - 2007, Cluster File Systems, Inc.
- * All rights reserved
+ *
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Copyright (c) 2011, 2014, Intel Corporation.
+ *
* Author: Eric Mei <ericm@clusterfs.com>
*/
*
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_SEC
-#ifdef __KERNEL__
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/dcache.h>
#include <linux/fs.h>
-#include <linux/random.h>
#include <linux/mutex.h>
#include <asm/atomic.h>
-#else
-#include <liblustre.h>
-#endif
#include <obd.h>
#include <obd_class.h>
#include <obd_support.h>
+#include <obd_cksum.h>
#include <lustre/lustre_idl.h>
#include <lustre_net.h>
#include <lustre_import.h>
#include "gss_api.h"
#include <linux/crypto.h>
+#include <linux/crc32.h>
+
+/*
+ * early reply have fixed size, respectively in privacy and integrity mode.
+ * so we calculate them only once.
+ */
+static int gss_at_reply_off_integ;
+static int gss_at_reply_off_priv;
+
+
+static inline int msg_last_segidx(struct lustre_msg *msg)
+{
+ LASSERT(msg->lm_bufcount > 0);
+ return msg->lm_bufcount - 1;
+}
+static inline int msg_last_seglen(struct lustre_msg *msg)
+{
+ return msg->lm_buflens[msg_last_segidx(msg)];
+}
/********************************************
* wire data swabber *
static
void gss_header_swabber(struct gss_header *ghdr)
{
- __swab32s(&ghdr->gh_version);
__swab32s(&ghdr->gh_flags);
__swab32s(&ghdr->gh_proc);
__swab32s(&ghdr->gh_seq);
__swab32s(&ghdr->gh_svc);
__swab32s(&ghdr->gh_pad1);
- __swab32s(&ghdr->gh_pad2);
- __swab32s(&ghdr->gh_pad3);
__swab32s(&ghdr->gh_handle.len);
}
-struct gss_header *gss_swab_header(struct lustre_msg *msg, int segment)
+struct gss_header *gss_swab_header(struct lustre_msg *msg, int segment,
+ int swabbed)
{
struct gss_header *ghdr;
- ghdr = lustre_swab_buf(msg, segment, sizeof(*ghdr),
- gss_header_swabber);
+ ghdr = lustre_msg_buf(msg, segment, sizeof(*ghdr));
+ if (ghdr == NULL)
+ return NULL;
+
+ if (swabbed)
+ gss_header_swabber(ghdr);
- if (ghdr &&
- sizeof(*ghdr) + ghdr->gh_handle.len > msg->lm_buflens[segment]) {
- CERROR("gss header require length %u, now %u received\n",
- (unsigned int) sizeof(*ghdr) + ghdr->gh_handle.len,
+ if (sizeof(*ghdr) + ghdr->gh_handle.len > msg->lm_buflens[segment]) {
+ CERROR("gss header has length %d, now %u received\n",
+ (int) sizeof(*ghdr) + ghdr->gh_handle.len,
msg->lm_buflens[segment]);
return NULL;
}
return ghdr;
}
+#if 0
static
void gss_netobj_swabber(netobj_t *obj)
{
return obj;
}
+#endif
/*
* payload should be obtained from mechanism. but currently since we
* only support kerberos, we could simply use fixed value.
- * krb5 header: 16
- * krb5 checksum: 20
+ * krb5 "meta" data:
+ * - krb5 header: 16
+ * - krb5 checksum: 20
+ *
+ * for privacy mode, payload also include the cipher text which has the same
+ * size as plain text, plus possible confounder, padding both at maximum cipher
+ * block size.
*/
#define GSS_KRB5_INTEG_MAX_PAYLOAD (40)
static inline
-int gss_estimate_payload(struct gss_ctx *mechctx, int msgsize, int privacy)
+int gss_mech_payload(struct gss_ctx *mechctx, int msgsize, int privacy)
{
- if (privacy) {
- /* we suppose max cipher block size is 16 bytes. here we
- * add 16 for confounder and 16 for padding.
- */
- return GSS_KRB5_INTEG_MAX_PAYLOAD + msgsize + 16 + 16 + 16;
- } else {
+ if (privacy)
+ return GSS_KRB5_INTEG_MAX_PAYLOAD + 16 + 16 + 16 + msgsize;
+ else
return GSS_KRB5_INTEG_MAX_PAYLOAD;
- }
}
/*
* return signature size, otherwise < 0 to indicate error
*/
-static
-int gss_sign_msg(struct lustre_msg *msg,
- struct gss_ctx *mechctx,
- __u32 proc, __u32 seq,
- rawobj_t *handle)
+static int gss_sign_msg(struct lustre_msg *msg,
+ struct gss_ctx *mechctx,
+ enum lustre_sec_part sp,
+ __u32 flags, __u32 proc, __u32 seq, __u32 svc,
+ rawobj_t *handle)
{
struct gss_header *ghdr;
- rawobj_t text[3], mic;
- int textcnt, mic_idx = msg->lm_bufcount - 1;
+ rawobj_t text[4], mic;
+ int textcnt, max_textcnt, mic_idx;
__u32 major;
- LASSERT(msg->lm_bufcount >= 3);
+ LASSERT(msg->lm_bufcount >= 2);
/* gss hdr */
LASSERT(msg->lm_buflens[0] >=
ghdr = lustre_msg_buf(msg, 0, 0);
ghdr->gh_version = PTLRPC_GSS_VERSION;
- ghdr->gh_flags = 0;
+ ghdr->gh_sp = (__u8) sp;
+ ghdr->gh_flags = flags;
ghdr->gh_proc = proc;
ghdr->gh_seq = seq;
- ghdr->gh_svc = PTLRPC_GSS_SVC_INTEGRITY;
+ ghdr->gh_svc = svc;
if (!handle) {
/* fill in a fake one */
ghdr->gh_handle.len = 0;
memcpy(ghdr->gh_handle.data, handle->data, handle->len);
}
+ /* no actual signature for null mode */
+ if (svc == SPTLRPC_SVC_NULL)
+ return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
+
/* MIC */
- for (textcnt = 0; textcnt < mic_idx; textcnt++) {
+ mic_idx = msg_last_segidx(msg);
+ max_textcnt = (svc == SPTLRPC_SVC_AUTH) ? 1 : mic_idx;
+
+ for (textcnt = 0; textcnt < max_textcnt; textcnt++) {
text[textcnt].len = msg->lm_buflens[textcnt];
text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
}
mic.len = msg->lm_buflens[mic_idx];
mic.data = lustre_msg_buf(msg, mic_idx, 0);
- major = lgss_get_mic(mechctx, textcnt, text, &mic);
+ major = lgss_get_mic(mechctx, textcnt, text, 0, NULL, &mic);
if (major != GSS_S_COMPLETE) {
CERROR("fail to generate MIC: %08x\n", major);
return -EPERM;
*/
static
__u32 gss_verify_msg(struct lustre_msg *msg,
- struct gss_ctx *mechctx)
+ struct gss_ctx *mechctx,
+ __u32 svc)
{
- rawobj_t text[3];
- rawobj_t mic;
- int textcnt, mic_idx = msg->lm_bufcount - 1;
- __u32 major;
+ rawobj_t text[4], mic;
+ int textcnt, max_textcnt;
+ int mic_idx;
+ __u32 major;
+
+ LASSERT(msg->lm_bufcount >= 2);
+
+ if (svc == SPTLRPC_SVC_NULL)
+ return GSS_S_COMPLETE;
- for (textcnt = 0; textcnt < mic_idx; textcnt++) {
+ mic_idx = msg_last_segidx(msg);
+ max_textcnt = (svc == SPTLRPC_SVC_AUTH) ? 1 : mic_idx;
+
+ for (textcnt = 0; textcnt < max_textcnt; textcnt++) {
text[textcnt].len = msg->lm_buflens[textcnt];
text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
}
mic.len = msg->lm_buflens[mic_idx];
mic.data = lustre_msg_buf(msg, mic_idx, 0);
- major = lgss_verify_mic(mechctx, textcnt, text, &mic);
+ major = lgss_verify_mic(mechctx, textcnt, text, 0, NULL, &mic);
if (major != GSS_S_COMPLETE)
CERROR("mic verify error: %08x\n", major);
struct lustre_msg *msgbuf,
int *msg_len, int msgbuf_len)
{
- rawobj_t clear_obj, micobj, msgobj, token;
+ rawobj_t clear_obj, hdrobj, token;
__u8 *clear_buf;
int clear_buflen;
__u32 major;
ENTRY;
- if (msgbuf->lm_bufcount != 3) {
+ if (msgbuf->lm_bufcount != 2) {
CERROR("invalid bufcount %d\n", msgbuf->lm_bufcount);
RETURN(GSS_S_FAILURE);
}
- /* verify gss header */
- msgobj.len = msgbuf->lm_buflens[0];
- msgobj.data = lustre_msg_buf(msgbuf, 0, 0);
- micobj.len = msgbuf->lm_buflens[1];
- micobj.data = lustre_msg_buf(msgbuf, 1, 0);
-
- major = lgss_verify_mic(mechctx, 1, &msgobj, &micobj);
- if (major != GSS_S_COMPLETE) {
- CERROR("priv: mic verify error: %08x\n", major);
- RETURN(major);
- }
-
- /* temporary clear text buffer */
- clear_buflen = msgbuf->lm_buflens[2];
- OBD_ALLOC(clear_buf, clear_buflen);
+ /* allocate a temporary clear text buffer, same sized as token,
+ * we assume the final clear text size <= token size */
+ clear_buflen = lustre_msg_buflen(msgbuf, 1);
+ OBD_ALLOC_LARGE(clear_buf, clear_buflen);
if (!clear_buf)
RETURN(GSS_S_FAILURE);
- token.len = msgbuf->lm_buflens[2];
- token.data = lustre_msg_buf(msgbuf, 2, 0);
-
+ /* buffer objects */
+ hdrobj.len = lustre_msg_buflen(msgbuf, 0);
+ hdrobj.data = lustre_msg_buf(msgbuf, 0, 0);
+ token.len = lustre_msg_buflen(msgbuf, 1);
+ token.data = lustre_msg_buf(msgbuf, 1, 0);
clear_obj.len = clear_buflen;
clear_obj.data = clear_buf;
- major = lgss_unwrap(mechctx, &token, &clear_obj);
+ major = lgss_unwrap(mechctx, &hdrobj, &token, &clear_obj);
if (major != GSS_S_COMPLETE) {
- CERROR("priv: unwrap message error: %08x\n", major);
+ CERROR("unwrap message error: %08x\n", major);
GOTO(out_free, major = GSS_S_FAILURE);
}
LASSERT(clear_obj.len <= clear_buflen);
+ LASSERT(clear_obj.len <= msgbuf_len);
/* now the decrypted message */
memcpy(msgbuf, clear_obj.data, clear_obj.len);
major = GSS_S_COMPLETE;
out_free:
- OBD_FREE(clear_buf, clear_buflen);
+ OBD_FREE_LARGE(clear_buf, clear_buflen);
RETURN(major);
}
int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
{
- LASSERT(atomic_read(&ctx->cc_refcount));
+ LASSERT(atomic_read(&ctx->cc_refcount));
- if (!test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
- cfs_time_t now;
+ if (!test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
+ if (!ctx->cc_early_expire)
+ clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
- clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+ CWARN("ctx %p(%u->%s) get expired: %lu(%+lds)\n",
+ ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
+ ctx->cc_expire,
+ ctx->cc_expire == 0 ? 0 :
+ cfs_time_sub(ctx->cc_expire, cfs_time_current_sec()));
- now = cfs_time_current_sec();
- if (ctx->cc_expire && cfs_time_aftereq(now, ctx->cc_expire))
- CWARN("ctx %p(%u->%s): get expired (%lds exceeds)\n",
- ctx, ctx->cc_vcred.vc_uid,
- sec2target_str(ctx->cc_sec),
- cfs_time_sub(now, ctx->cc_expire));
- else
- CWARN("ctx %p(%u->%s): force to die (%lds remains)\n",
- ctx, ctx->cc_vcred.vc_uid,
- sec2target_str(ctx->cc_sec),
- ctx->cc_expire == 0 ? 0 :
- cfs_time_sub(ctx->cc_expire, now));
+ sptlrpc_cli_ctx_wakeup(ctx);
+ return 1;
+ }
- return 1;
- }
- return 0;
+ return 0;
}
/*
return 1;
/* expire is 0 means never expire. a newly created gss context
- * which during upcall may has 0 expiration
- */
+ * which during upcall may has 0 expiration */
if (ctx->cc_expire == 0)
return 0;
void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx)
{
- struct ptlrpc_cli_ctx *ctx = &gctx->gc_base;
- unsigned long ctx_expiry;
+ struct ptlrpc_cli_ctx *ctx = &gctx->gc_base;
+ unsigned long ctx_expiry;
if (lgss_inquire_context(gctx->gc_mechctx, &ctx_expiry)) {
CERROR("ctx %p(%u): unable to inquire, expire it now\n",
}
ctx->cc_expire = gss_round_ctx_expiry(ctx_expiry,
- ctx->cc_sec->ps_flags);
+ ctx->cc_sec->ps_flvr.sf_flags);
/* At this point this ctx might have been marked as dead by
* someone else, in which case nobody will make further use
* of it. we don't care, and mark it UPTODATE will help
- * destroying server side context when it be destroied.
- */
- set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+ * destroying server side context when it be destroyed. */
+ set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+
+ if (sec_is_reverse(ctx->cc_sec)) {
+ CWARN("server installed reverse ctx %p idx "LPX64", "
+ "expiry %lu(%+lds)\n", ctx,
+ gss_handle_to_u64(&gctx->gc_handle),
+ ctx->cc_expire, ctx->cc_expire - cfs_time_current_sec());
+ } else {
+ CWARN("client refreshed ctx %p idx "LPX64" (%u->%s), "
+ "expiry %lu(%+lds)\n", ctx,
+ gss_handle_to_u64(&gctx->gc_handle),
+ ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
+ ctx->cc_expire, ctx->cc_expire - cfs_time_current_sec());
- CWARN("%s ctx %p(%u->%s), will expire at %lu(%lds lifetime)\n",
- (ctx->cc_sec->ps_flags & PTLRPC_SEC_FL_REVERSE ?
- "server installed reverse" : "client refreshed"),
- ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
- ctx->cc_expire, (long) (ctx->cc_expire - get_seconds()));
+ /* install reverse svc ctx for root context */
+ if (ctx->cc_vcred.vc_uid == 0)
+ gss_sec_install_rctx(ctx->cc_sec->ps_import,
+ ctx->cc_sec, ctx);
+ }
+
+ sptlrpc_cli_ctx_wakeup(ctx);
}
-static
-void gss_cli_ctx_finalize(struct gss_cli_ctx *gctx)
+static void gss_cli_ctx_finalize(struct gss_cli_ctx *gctx)
{
- if (gctx->gc_mechctx)
+ LASSERT(gctx->gc_base.cc_sec);
+
+ if (gctx->gc_mechctx) {
lgss_delete_sec_context(&gctx->gc_mechctx);
+ gctx->gc_mechctx = NULL;
+ }
+
+ if (!rawobj_empty(&gctx->gc_svc_handle)) {
+ /* forward ctx: mark buddy reverse svcctx soon-expire. */
+ if (!sec_is_reverse(gctx->gc_base.cc_sec) &&
+ !rawobj_empty(&gctx->gc_svc_handle))
+ gss_svc_upcall_expire_rvs_ctx(&gctx->gc_svc_handle);
+
+ rawobj_free(&gctx->gc_svc_handle);
+ }
rawobj_free(&gctx->gc_handle);
}
*/
switch (phase) {
case 0:
- if (test_bit(seq_num % win_size, window))
+ if (test_bit(seq_num % win_size, window))
goto replay;
break;
case 1:
*/
int gss_check_seq_num(struct gss_svc_seq_data *ssd, __u32 seq_num, int set)
{
- int rc = 0;
+ int rc = 0;
- spin_lock(&ssd->ssd_lock);
+ spin_lock(&ssd->ssd_lock);
if (set == 0) {
/*
gss_stat_oos_record_svc(2, 0);
}
exit:
- spin_unlock(&ssd->ssd_lock);
- return rc;
+ spin_unlock(&ssd->ssd_lock);
+ return rc;
}
/***************************************
* cred APIs *
***************************************/
-static inline
-int gss_cli_payload(struct ptlrpc_cli_ctx *ctx,
- int msgsize, int privacy)
+static inline int gss_cli_payload(struct ptlrpc_cli_ctx *ctx,
+ int msgsize, int privacy)
{
- return gss_estimate_payload(NULL, msgsize, privacy);
+ return gss_mech_payload(NULL, msgsize, privacy);
}
-int gss_cli_ctx_match(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred)
+static int gss_cli_bulk_payload(struct ptlrpc_cli_ctx *ctx,
+ struct sptlrpc_flavor *flvr,
+ int reply, int read)
{
- return (ctx->cc_vcred.vc_uid == vcred->vc_uid);
-}
+ int payload = sizeof(struct ptlrpc_bulk_sec_desc);
-static
-void gss_cli_ctx_flags2str(unsigned long flags, char *buf, int bufsize)
-{
- buf[0] = '\0';
+ LASSERT(SPTLRPC_FLVR_BULK_TYPE(flvr->sf_rpc) == SPTLRPC_BULK_DEFAULT);
- if (flags & PTLRPC_CTX_UPTODATE)
- strncat(buf, "uptodate,", bufsize);
- if (flags & PTLRPC_CTX_DEAD)
- strncat(buf, "dead,", bufsize);
- if (flags & PTLRPC_CTX_ERROR)
- strncat(buf, "error,", bufsize);
- if (flags & PTLRPC_CTX_CACHED)
- strncat(buf, "cached,", bufsize);
- if (flags & PTLRPC_CTX_ETERNAL)
- strncat(buf, "eternal,", bufsize);
- if (buf[0] == '\0')
- strncat(buf, "-,", bufsize);
+ if ((!reply && !read) || (reply && read)) {
+ switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
+ case SPTLRPC_BULK_SVC_NULL:
+ break;
+ case SPTLRPC_BULK_SVC_INTG:
+ payload += gss_cli_payload(ctx, 0, 0);
+ break;
+ case SPTLRPC_BULK_SVC_PRIV:
+ payload += gss_cli_payload(ctx, 0, 1);
+ break;
+ case SPTLRPC_BULK_SVC_AUTH:
+ default:
+ LBUG();
+ }
+ }
- buf[strlen(buf) - 1] = '\0';
+ return payload;
}
-int gss_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
+int gss_cli_ctx_match(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred)
{
- struct gss_cli_ctx *gctx;
- char flags_str[40];
- int written;
-
- gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
-
- gss_cli_ctx_flags2str(ctx->cc_flags, flags_str, sizeof(flags_str));
-
- written = snprintf(buf, bufsize,
- "UID %d:\n"
- " flags: %s\n"
- " seqwin: %d\n"
- " sequence: %d\n",
- ctx->cc_vcred.vc_uid,
- flags_str,
- gctx->gc_win,
- atomic_read(&gctx->gc_seq));
-
- if (gctx->gc_mechctx) {
- written += lgss_display(gctx->gc_mechctx,
- buf + written, bufsize - written);
- }
+ return (ctx->cc_vcred.vc_uid == vcred->vc_uid);
+}
- return written;
+void gss_cli_ctx_flags2str(unsigned long flags, char *buf, int bufsize)
+{
+ buf[0] = '\0';
+
+ if (flags & PTLRPC_CTX_NEW)
+ strlcat(buf, "new,", bufsize);
+ if (flags & PTLRPC_CTX_UPTODATE)
+ strlcat(buf, "uptodate,", bufsize);
+ if (flags & PTLRPC_CTX_DEAD)
+ strlcat(buf, "dead,", bufsize);
+ if (flags & PTLRPC_CTX_ERROR)
+ strlcat(buf, "error,", bufsize);
+ if (flags & PTLRPC_CTX_CACHED)
+ strlcat(buf, "cached,", bufsize);
+ if (flags & PTLRPC_CTX_ETERNAL)
+ strlcat(buf, "eternal,", bufsize);
+ if (buf[0] == '\0')
+ strlcat(buf, "-,", bufsize);
}
int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx,
struct ptlrpc_request *req)
{
- struct gss_cli_ctx *gctx;
- __u32 seq;
+ struct gss_cli_ctx *gctx = ctx2gctx(ctx);
+ __u32 flags = 0, seq, svc;
int rc;
ENTRY;
LASSERT(req->rq_reqbuf);
- LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
+ LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
LASSERT(req->rq_cli_ctx == ctx);
/* nothing to do for context negotiation RPCs */
if (req->rq_ctx_init)
RETURN(0);
- gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
-redo:
- seq = atomic_inc_return(&gctx->gc_seq);
-
- rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
- gctx->gc_proc, seq, &gctx->gc_handle);
- if (rc < 0)
- RETURN(rc);
-
- /* gss_sign_msg() msg might take long time to finish, in which period
- * more rpcs could be wrapped up and sent out. if we found too many
- * of them we should repack this rpc, because sent it too late might
- * lead to the sequence number fall behind the window on server and
- * be dropped. also applies to gss_cli_ctx_seal().
- */
- if (atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
- int behind = atomic_read(&gctx->gc_seq) - seq;
-
- gss_stat_oos_record_cli(behind);
- CWARN("req %p: %u behind, retry signing\n", req, behind);
- goto redo;
- }
+ svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+ if (req->rq_pack_bulk)
+ flags |= LUSTRE_GSS_PACK_BULK;
+ if (req->rq_pack_udesc)
+ flags |= LUSTRE_GSS_PACK_USER;
- req->rq_reqdata_len = rc;
- RETURN(0);
+redo:
+ seq = atomic_inc_return(&gctx->gc_seq);
+
+ rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
+ ctx->cc_sec->ps_part,
+ flags, gctx->gc_proc, seq, svc,
+ &gctx->gc_handle);
+ if (rc < 0)
+ RETURN(rc);
+
+ /* gss_sign_msg() msg might take long time to finish, in which period
+ * more rpcs could be wrapped up and sent out. if we found too many
+ * of them we should repack this rpc, because sent it too late might
+ * lead to the sequence number fall behind the window on server and
+ * be dropped. also applies to gss_cli_ctx_seal().
+ *
+ * Note: null mode doesn't check sequence number. */
+ if (svc != SPTLRPC_SVC_NULL &&
+ atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
+ int behind = atomic_read(&gctx->gc_seq) - seq;
+
+ gss_stat_oos_record_cli(behind);
+ CWARN("req %p: %u behind, retry signing\n", req, behind);
+ goto redo;
+ }
+
+ req->rq_reqdata_len = rc;
+ RETURN(0);
}
static
errhdr = (struct gss_err_header *) ghdr;
+ CWARN("req x"LPU64"/t"LPU64", ctx %p idx "LPX64"(%u->%s): "
+ "%sserver respond (%08x/%08x)\n",
+ req->rq_xid, req->rq_transno, ctx,
+ gss_handle_to_u64(&ctx2gctx(ctx)->gc_handle),
+ ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
+ sec_is_reverse(ctx->cc_sec) ? "reverse" : "",
+ errhdr->gh_major, errhdr->gh_minor);
+
+ /* context fini rpc, let it failed */
+ if (req->rq_ctx_fini) {
+ CWARN("context fini rpc failed\n");
+ return -EINVAL;
+ }
+
+ /* reverse sec, just return error, don't expire this ctx because it's
+ * crucial to callback rpcs. note if the callback rpc failed because
+ * of bit flip during network transfer, the client will be evicted
+ * directly. so more gracefully we probably want let it retry for
+ * number of times. */
+ if (sec_is_reverse(ctx->cc_sec))
+ return -EINVAL;
+
+ if (errhdr->gh_major != GSS_S_NO_CONTEXT &&
+ errhdr->gh_major != GSS_S_BAD_SIG)
+ return -EACCES;
+
/* server return NO_CONTEXT might be caused by context expire
- * or server reboot/failover. we refresh the cred transparently
- * to upper layer.
+ * or server reboot/failover. we try to refresh a new ctx which
+ * be transparent to upper layer.
+ *
* In some cases, our gss handle is possible to be incidentally
* identical to another handle since the handle itself is not
* fully random. In krb5 case, the GSS_S_BAD_SIG will be
* returned, maybe other gss error for other mechanism.
*
* if we add new mechanism, make sure the correct error are
- * returned in this case.
- *
- * but in any cases, don't resend ctx destroying rpc, don't resend
- * reverse rpc.
- */
- if (req->rq_ctx_fini) {
- CWARN("server respond error (%08x/%08x) for ctx fini\n",
- errhdr->gh_major, errhdr->gh_minor);
- rc = -EINVAL;
- } else if (ctx->cc_sec->ps_flags & PTLRPC_SEC_FL_REVERSE) {
- CWARN("reverse server respond error (%08x/%08x)\n",
- errhdr->gh_major, errhdr->gh_minor);
- rc = -EINVAL;
- } else if (errhdr->gh_major == GSS_S_NO_CONTEXT ||
- errhdr->gh_major == GSS_S_BAD_SIG) {
- CWARN("req x"LPU64"/t"LPU64": server respond ctx %p(%u->%s) "
- "%s, server might lost the context.\n",
- req->rq_xid, req->rq_transno, ctx, ctx->cc_vcred.vc_uid,
- sec2target_str(ctx->cc_sec),
- errhdr->gh_major == GSS_S_NO_CONTEXT ?
- "NO_CONTEXT" : "BAD_SIG");
-
- sptlrpc_cli_ctx_expire(ctx);
- /*
- * we need replace the ctx right here, otherwise during
- * resent we'll hit the logic in sptlrpc_req_refresh_ctx()
- * which keep the ctx with RESEND flag, thus we'll never
- * get rid of this ctx.
- */
- rc = sptlrpc_req_replace_dead_ctx(req);
- if (rc == 0)
- req->rq_resend = 1;
- } else {
- CERROR("req %p: server report gss error (%x/%x)\n",
- req, errhdr->gh_major, errhdr->gh_minor);
- rc = -EACCES;
- }
+ * returned in this case. */
+ CWARN("%s: server might lost the context, retrying\n",
+ errhdr->gh_major == GSS_S_NO_CONTEXT ? "NO_CONTEXT" : "BAD_SIG");
+
+ sptlrpc_cli_ctx_expire(ctx);
+
+ /* we need replace the ctx right here, otherwise during
+ * resent we'll hit the logic in sptlrpc_req_refresh_ctx()
+ * which keep the ctx with RESEND flag, thus we'll never
+ * get rid of this ctx. */
+ rc = sptlrpc_req_replace_dead_ctx(req);
+ if (rc == 0)
+ req->rq_resend = 1;
return rc;
}
{
struct gss_cli_ctx *gctx;
struct gss_header *ghdr, *reqhdr;
- struct lustre_msg *msg = req->rq_repbuf;
+ struct lustre_msg *msg = req->rq_repdata;
__u32 major;
- int rc = 0;
+ int pack_bulk, swabbed, rc = 0;
ENTRY;
LASSERT(req->rq_cli_ctx == ctx);
LASSERT(msg);
- req->rq_repdata_len = req->rq_nob_received;
gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
/* special case for context negotiation, rq_repmsg/rq_replen actually
- * are not used currently.
- */
- if (req->rq_ctx_init) {
+ * are not used currently. but early reply always be treated normally */
+ if (req->rq_ctx_init && !req->rq_early) {
req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
req->rq_replen = msg->lm_buflens[1];
RETURN(0);
}
- if (msg->lm_bufcount < 3 || msg->lm_bufcount > 4) {
+ if (msg->lm_bufcount < 2 || msg->lm_bufcount > 4) {
CERROR("unexpected bufcount %u\n", msg->lm_bufcount);
RETURN(-EPROTO);
}
- ghdr = gss_swab_header(msg, 0);
+ swabbed = ptlrpc_rep_need_swab(req);
+
+ ghdr = gss_swab_header(msg, 0, swabbed);
if (ghdr == NULL) {
CERROR("can't decode gss header\n");
RETURN(-EPROTO);
switch (ghdr->gh_proc) {
case PTLRPC_GSS_PROC_DATA:
+ pack_bulk = ghdr->gh_flags & LUSTRE_GSS_PACK_BULK;
+
+ if (!req->rq_early && !equi(req->rq_pack_bulk == 1, pack_bulk)){
+ CERROR("%s bulk flag in reply\n",
+ req->rq_pack_bulk ? "missing" : "unexpected");
+ RETURN(-EPROTO);
+ }
+
if (ghdr->gh_seq != reqhdr->gh_seq) {
CERROR("seqnum %u mismatch, expect %u\n",
ghdr->gh_seq, reqhdr->gh_seq);
RETURN(-EPROTO);
}
- if (ghdr->gh_svc != PTLRPC_GSS_SVC_INTEGRITY) {
- CERROR("unexpected svc %d\n", ghdr->gh_svc);
+ if (ghdr->gh_svc != reqhdr->gh_svc) {
+ CERROR("svc %u mismatch, expect %u\n",
+ ghdr->gh_svc, reqhdr->gh_svc);
RETURN(-EPROTO);
}
- if (lustre_msg_swabbed(msg))
+ if (swabbed)
gss_header_swabber(ghdr);
- major = gss_verify_msg(msg, gctx->gc_mechctx);
- if (major != GSS_S_COMPLETE)
+ major = gss_verify_msg(msg, gctx->gc_mechctx, reqhdr->gh_svc);
+ if (major != GSS_S_COMPLETE) {
+ CERROR("failed to verify reply: %x\n", major);
RETURN(-EPERM);
+ }
- req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
- req->rq_replen = msg->lm_buflens[1];
+ if (req->rq_early && reqhdr->gh_svc == SPTLRPC_SVC_NULL) {
+ __u32 cksum;
- if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
- if (msg->lm_bufcount < 4) {
+ cksum = crc32_le(!(__u32) 0,
+ lustre_msg_buf(msg, 1, 0),
+ lustre_msg_buflen(msg, 1));
+ if (cksum != msg->lm_cksum) {
+ CWARN("early reply checksum mismatch: "
+ "%08x != %08x\n", cksum, msg->lm_cksum);
+ RETURN(-EPROTO);
+ }
+ }
+
+ if (pack_bulk) {
+ /* bulk checksum is right after the lustre msg */
+ if (msg->lm_bufcount < 3) {
CERROR("Invalid reply bufcount %u\n",
msg->lm_bufcount);
RETURN(-EPROTO);
}
- /* bulk checksum is the second last segment */
- rc = bulk_sec_desc_unpack(msg, msg->lm_bufcount - 2);
+ rc = bulk_sec_desc_unpack(msg, 2, swabbed);
+ if (rc) {
+ CERROR("unpack bulk desc: %d\n", rc);
+ RETURN(rc);
+ }
}
+
+ req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
+ req->rq_replen = msg->lm_buflens[1];
break;
case PTLRPC_GSS_PROC_ERR:
- rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
+ if (req->rq_early) {
+ CERROR("server return error with early reply\n");
+ rc = -EPROTO;
+ } else {
+ rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
+ }
break;
default:
CERROR("unknown gss proc %d\n", ghdr->gh_proc);
struct ptlrpc_request *req)
{
struct gss_cli_ctx *gctx;
- rawobj_t msgobj, cipher_obj, micobj;
+ rawobj_t hdrobj, msgobj, token;
struct gss_header *ghdr;
- int buflens[3], wiresize, rc;
- __u32 major;
+ __u32 buflens[2], major;
+ int wiresize, rc;
ENTRY;
LASSERT(req->rq_clrbuf);
gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
- /* close clear data length */
+ /* final clear data length */
req->rq_clrdata_len = lustre_msg_size_v2(req->rq_clrbuf->lm_bufcount,
req->rq_clrbuf->lm_buflens);
/* calculate wire data length */
buflens[0] = PTLRPC_GSS_HEADER_SIZE;
- buflens[1] = gss_cli_payload(&gctx->gc_base, buflens[0], 0);
- buflens[2] = gss_cli_payload(&gctx->gc_base, req->rq_clrdata_len, 1);
- wiresize = lustre_msg_size_v2(3, buflens);
+ buflens[1] = gss_cli_payload(&gctx->gc_base, req->rq_clrdata_len, 1);
+ wiresize = lustre_msg_size_v2(2, buflens);
/* allocate wire buffer */
if (req->rq_pool) {
LASSERT(req->rq_reqbuf != req->rq_clrbuf);
LASSERT(req->rq_reqbuf_len >= wiresize);
} else {
- OBD_ALLOC(req->rq_reqbuf, wiresize);
+ OBD_ALLOC_LARGE(req->rq_reqbuf, wiresize);
if (!req->rq_reqbuf)
RETURN(-ENOMEM);
req->rq_reqbuf_len = wiresize;
}
- lustre_init_msg_v2(req->rq_reqbuf, 3, buflens, NULL);
- req->rq_reqbuf->lm_secflvr = req->rq_sec_flavor;
+ lustre_init_msg_v2(req->rq_reqbuf, 2, buflens, NULL);
+ req->rq_reqbuf->lm_secflvr = req->rq_flvr.sf_rpc;
/* gss header */
ghdr = lustre_msg_buf(req->rq_reqbuf, 0, 0);
ghdr->gh_version = PTLRPC_GSS_VERSION;
+ ghdr->gh_sp = (__u8) ctx->cc_sec->ps_part;
ghdr->gh_flags = 0;
ghdr->gh_proc = gctx->gc_proc;
- ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
- ghdr->gh_svc = PTLRPC_GSS_SVC_PRIVACY;
+ ghdr->gh_svc = SPTLRPC_SVC_PRIV;
ghdr->gh_handle.len = gctx->gc_handle.len;
memcpy(ghdr->gh_handle.data, gctx->gc_handle.data, gctx->gc_handle.len);
+ if (req->rq_pack_bulk)
+ ghdr->gh_flags |= LUSTRE_GSS_PACK_BULK;
+ if (req->rq_pack_udesc)
+ ghdr->gh_flags |= LUSTRE_GSS_PACK_USER;
redo:
- /* header signature */
- msgobj.len = req->rq_reqbuf->lm_buflens[0];
- msgobj.data = lustre_msg_buf(req->rq_reqbuf, 0, 0);
- micobj.len = req->rq_reqbuf->lm_buflens[1];
- micobj.data = lustre_msg_buf(req->rq_reqbuf, 1, 0);
+ ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
- major = lgss_get_mic(gctx->gc_mechctx, 1, &msgobj, &micobj);
- if (major != GSS_S_COMPLETE) {
- CERROR("priv: sign message error: %08x\n", major);
- GOTO(err_free, rc = -EPERM);
- }
- /* perhaps shrink msg has potential problem in re-packing???
- * ship a little bit more data is fine.
- lustre_shrink_msg(req->rq_reqbuf, 1, micobj.len, 0);
- */
-
- /* clear text */
+ /* buffer objects */
+ hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
+ hdrobj.data = (__u8 *) ghdr;
msgobj.len = req->rq_clrdata_len;
msgobj.data = (__u8 *) req->rq_clrbuf;
+ token.len = lustre_msg_buflen(req->rq_reqbuf, 1);
+ token.data = lustre_msg_buf(req->rq_reqbuf, 1, 0);
- /* cipher text */
- cipher_obj.len = req->rq_reqbuf->lm_buflens[2];
- cipher_obj.data = lustre_msg_buf(req->rq_reqbuf, 2, 0);
-
- major = lgss_wrap(gctx->gc_mechctx, &msgobj, req->rq_clrbuf_len,
- &cipher_obj);
+ major = lgss_wrap(gctx->gc_mechctx, &hdrobj, &msgobj,
+ req->rq_clrbuf_len, &token);
if (major != GSS_S_COMPLETE) {
CERROR("priv: wrap message error: %08x\n", major);
GOTO(err_free, rc = -EPERM);
}
- LASSERT(cipher_obj.len <= buflens[2]);
-
- /* see explain in gss_cli_ctx_sign() */
- if (atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
- GSS_SEQ_REPACK_THRESHOLD) {
- int behind = atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
+ LASSERT(token.len <= buflens[1]);
- gss_stat_oos_record_cli(behind);
- CWARN("req %p: %u behind, retry sealing\n", req, behind);
+ /* see explain in gss_cli_ctx_sign() */
+ if (unlikely(atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
+ GSS_SEQ_REPACK_THRESHOLD)) {
+ int behind = atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
- ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
- goto redo;
- }
+ gss_stat_oos_record_cli(behind);
+ CWARN("req %p: %u behind, retry sealing\n", req, behind);
- /* now set the final wire data length */
- req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, 2,
- cipher_obj.len, 0);
+ ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
+ goto redo;
+ }
- RETURN(0);
+ /* now set the final wire data length */
+ req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, 1, token.len,0);
+ RETURN(0);
err_free:
- if (!req->rq_pool) {
- OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
- req->rq_reqbuf = NULL;
- req->rq_reqbuf_len = 0;
- }
- RETURN(rc);
+ if (!req->rq_pool) {
+ OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
+ req->rq_reqbuf = NULL;
+ req->rq_reqbuf_len = 0;
+ }
+ RETURN(rc);
}
int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx,
{
struct gss_cli_ctx *gctx;
struct gss_header *ghdr;
- int msglen, rc;
+ struct lustre_msg *msg = req->rq_repdata;
+ int msglen, pack_bulk, swabbed, rc;
__u32 major;
ENTRY;
- LASSERT(req->rq_repbuf);
LASSERT(req->rq_cli_ctx == ctx);
+ LASSERT(req->rq_ctx_init == 0);
+ LASSERT(msg);
gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
+ swabbed = ptlrpc_rep_need_swab(req);
- ghdr = gss_swab_header(req->rq_repbuf, 0);
+ ghdr = gss_swab_header(msg, 0, swabbed);
if (ghdr == NULL) {
CERROR("can't decode gss header\n");
RETURN(-EPROTO);
switch (ghdr->gh_proc) {
case PTLRPC_GSS_PROC_DATA:
- if (lustre_msg_swabbed(req->rq_repbuf))
+ pack_bulk = ghdr->gh_flags & LUSTRE_GSS_PACK_BULK;
+
+ if (!req->rq_early && !equi(req->rq_pack_bulk == 1, pack_bulk)){
+ CERROR("%s bulk flag in reply\n",
+ req->rq_pack_bulk ? "missing" : "unexpected");
+ RETURN(-EPROTO);
+ }
+
+ if (swabbed)
gss_header_swabber(ghdr);
- major = gss_unseal_msg(gctx->gc_mechctx, req->rq_repbuf,
- &msglen, req->rq_repbuf_len);
+ /* use rq_repdata_len as buffer size, which assume unseal
+ * doesn't need extra memory space. for precise control, we'd
+ * better calculate out actual buffer size as
+ * (repbuf_len - offset - repdata_len) */
+ major = gss_unseal_msg(gctx->gc_mechctx, msg,
+ &msglen, req->rq_repdata_len);
if (major != GSS_S_COMPLETE) {
+ CERROR("failed to unwrap reply: %x\n", major);
rc = -EPERM;
break;
}
- if (lustre_unpack_msg(req->rq_repbuf, msglen)) {
+ swabbed = __lustre_unpack_msg(msg, msglen);
+ if (swabbed < 0) {
CERROR("Failed to unpack after decryption\n");
RETURN(-EPROTO);
}
- req->rq_repdata_len = msglen;
- if (req->rq_repbuf->lm_bufcount < 1) {
+ if (msg->lm_bufcount < 1) {
CERROR("Invalid reply buffer: empty\n");
RETURN(-EPROTO);
}
- if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
- if (req->rq_repbuf->lm_bufcount < 2) {
- CERROR("Too few request buffer segments %d\n",
- req->rq_repbuf->lm_bufcount);
+ if (pack_bulk) {
+ if (msg->lm_bufcount < 2) {
+ CERROR("bufcount %u: missing bulk sec desc\n",
+ msg->lm_bufcount);
RETURN(-EPROTO);
}
/* bulk checksum is the last segment */
- if (bulk_sec_desc_unpack(req->rq_repbuf,
- req->rq_repbuf->lm_bufcount-1))
+ if (bulk_sec_desc_unpack(msg, msg->lm_bufcount - 1,
+ swabbed))
RETURN(-EPROTO);
}
- req->rq_repmsg = lustre_msg_buf(req->rq_repbuf, 0, 0);
- req->rq_replen = req->rq_repbuf->lm_buflens[0];
+ req->rq_repmsg = lustre_msg_buf(msg, 0, 0);
+ req->rq_replen = msg->lm_buflens[0];
rc = 0;
break;
case PTLRPC_GSS_PROC_ERR:
- rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
+ if (req->rq_early) {
+ CERROR("server return error with early reply\n");
+ rc = -EPROTO;
+ } else {
+ rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
+ }
break;
default:
CERROR("unexpected proc %d\n", ghdr->gh_proc);
int gss_sec_create_common(struct gss_sec *gsec,
struct ptlrpc_sec_policy *policy,
struct obd_import *imp,
- struct ptlrpc_svc_ctx *ctx,
- __u32 flavor,
- unsigned long flags)
+ struct ptlrpc_svc_ctx *svcctx,
+ struct sptlrpc_flavor *sf)
{
struct ptlrpc_sec *sec;
LASSERT(imp);
- LASSERT(SEC_FLAVOR_POLICY(flavor) == SPTLRPC_POLICY_GSS);
+ LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_GSS);
- gsec->gs_mech = lgss_subflavor_to_mech(SEC_FLAVOR_SUB(flavor));
+ gsec->gs_mech = lgss_subflavor_to_mech(
+ SPTLRPC_FLVR_BASE_SUB(sf->sf_rpc));
if (!gsec->gs_mech) {
- CERROR("gss backend 0x%x not found\n", SEC_FLAVOR_SUB(flavor));
+ CERROR("gss backend 0x%x not found\n",
+ SPTLRPC_FLVR_BASE_SUB(sf->sf_rpc));
return -EOPNOTSUPP;
}
- spin_lock_init(&gsec->gs_lock);
+ spin_lock_init(&gsec->gs_lock);
gsec->gs_rvs_hdl = 0ULL;
- /* initialize upper ptlrpc_sec */
- sec = &gsec->gs_base;
- sec->ps_policy = policy;
- sec->ps_flavor = flavor;
- sec->ps_flags = flags;
- sec->ps_import = class_import_get(imp);
- sec->ps_lock = SPIN_LOCK_UNLOCKED;
- atomic_set(&sec->ps_busy, 0);
- INIT_LIST_HEAD(&sec->ps_gc_list);
-
- if (!ctx) {
+ /* initialize upper ptlrpc_sec */
+ sec = &gsec->gs_base;
+ sec->ps_policy = policy;
+ atomic_set(&sec->ps_refcount, 0);
+ atomic_set(&sec->ps_nctx, 0);
+ sec->ps_id = sptlrpc_get_next_secid();
+ sec->ps_flvr = *sf;
+ sec->ps_import = class_import_get(imp);
+ spin_lock_init(&sec->ps_lock);
+ INIT_LIST_HEAD(&sec->ps_gc_list);
+
+ if (!svcctx) {
sec->ps_gc_interval = GSS_GC_INTERVAL;
- sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
} else {
- LASSERT(sec->ps_flags & PTLRPC_SEC_FL_REVERSE);
+ LASSERT(sec_is_reverse(sec));
/* never do gc on reverse sec */
sec->ps_gc_interval = 0;
- sec->ps_gc_next = 0;
}
- if (SEC_FLAVOR_SVC(flavor) == SPTLRPC_SVC_PRIV &&
- flags & PTLRPC_SEC_FL_BULK)
+ if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
sptlrpc_enc_pool_add_user();
- CWARN("create %s%s@%p\n", (ctx ? "reverse " : ""),
- policy->sp_name, gsec);
+ CDEBUG(D_SEC, "create %s%s@%p\n", (svcctx ? "reverse " : ""),
+ policy->sp_name, gsec);
return 0;
}
void gss_sec_destroy_common(struct gss_sec *gsec)
{
- struct ptlrpc_sec *sec = &gsec->gs_base;
- ENTRY;
+ struct ptlrpc_sec *sec = &gsec->gs_base;
+ ENTRY;
- LASSERT(sec->ps_import);
- LASSERT(atomic_read(&sec->ps_refcount) == 0);
- LASSERT(atomic_read(&sec->ps_busy) == 0);
+ LASSERT(sec->ps_import);
+ LASSERT(atomic_read(&sec->ps_refcount) == 0);
+ LASSERT(atomic_read(&sec->ps_nctx) == 0);
- if (gsec->gs_mech) {
- lgss_mech_put(gsec->gs_mech);
- gsec->gs_mech = NULL;
- }
+ if (gsec->gs_mech) {
+ lgss_mech_put(gsec->gs_mech);
+ gsec->gs_mech = NULL;
+ }
- class_import_put(sec->ps_import);
+ class_import_put(sec->ps_import);
- if (SEC_FLAVOR_SVC(sec->ps_flavor) == SPTLRPC_SVC_PRIV &&
- sec->ps_flags & PTLRPC_SEC_FL_BULK)
- sptlrpc_enc_pool_del_user();
+ if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
+ sptlrpc_enc_pool_del_user();
- EXIT;
+ EXIT;
+}
+
+void gss_sec_kill(struct ptlrpc_sec *sec)
+{
+ sec->ps_dying = 1;
}
int gss_cli_ctx_init_common(struct ptlrpc_sec *sec,
struct ptlrpc_ctx_ops *ctxops,
struct vfs_cred *vcred)
{
- struct gss_cli_ctx *gctx = ctx2gctx(ctx);
-
- gctx->gc_win = 0;
- atomic_set(&gctx->gc_seq, 0);
-
- INIT_HLIST_NODE(&ctx->cc_hash);
- atomic_set(&ctx->cc_refcount, 0);
- ctx->cc_sec = sec;
- ctx->cc_ops = ctxops;
- ctx->cc_expire = 0;
- ctx->cc_flags = PTLRPC_CTX_NEW;
- ctx->cc_vcred = *vcred;
- spin_lock_init(&ctx->cc_lock);
- INIT_LIST_HEAD(&ctx->cc_req_list);
-
- /* take a ref on belonging sec */
- atomic_inc(&sec->ps_busy);
-
- CWARN("%s@%p: create ctx %p(%u->%s)\n",
- sec->ps_policy->sp_name, ctx->cc_sec,
- ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
- return 0;
+ struct gss_cli_ctx *gctx = ctx2gctx(ctx);
+
+ gctx->gc_win = 0;
+ atomic_set(&gctx->gc_seq, 0);
+
+ INIT_HLIST_NODE(&ctx->cc_cache);
+ atomic_set(&ctx->cc_refcount, 0);
+ ctx->cc_sec = sec;
+ ctx->cc_ops = ctxops;
+ ctx->cc_expire = 0;
+ ctx->cc_flags = PTLRPC_CTX_NEW;
+ ctx->cc_vcred = *vcred;
+ spin_lock_init(&ctx->cc_lock);
+ INIT_LIST_HEAD(&ctx->cc_req_list);
+ INIT_LIST_HEAD(&ctx->cc_gc_chain);
+
+ /* take a ref on belonging sec, balanced in ctx destroying */
+ atomic_inc(&sec->ps_refcount);
+ /* statistic only */
+ atomic_inc(&sec->ps_nctx);
+
+ CDEBUG(D_SEC, "%s@%p: create ctx %p(%u->%s)\n",
+ sec->ps_policy->sp_name, ctx->cc_sec,
+ ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+ return 0;
}
/*
- * return 1 if the busy count of the sec dropped to zero, then usually caller
- * should destroy the sec too; otherwise return 0.
+ * return value:
+ * 1: the context has been taken care of by someone else
+ * 0: proceed to really destroy the context locally
*/
int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec,
struct ptlrpc_cli_ctx *ctx)
{
- struct gss_cli_ctx *gctx = ctx2gctx(ctx);
-
- LASSERT(ctx->cc_sec == sec);
- LASSERT(atomic_read(&ctx->cc_refcount) == 0);
- LASSERT(atomic_read(&sec->ps_busy) > 0);
-
- if (gctx->gc_mechctx) {
- gss_do_ctx_fini_rpc(gctx);
- gss_cli_ctx_finalize(gctx);
- }
-
- CWARN("%s@%p: destroy ctx %p(%u->%s)\n",
- sec->ps_policy->sp_name, ctx->cc_sec,
- ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
-
- if (atomic_dec_and_test(&sec->ps_busy)) {
- LASSERT(atomic_read(&sec->ps_refcount) == 0);
- return 1;
- }
-
- return 0;
+ struct gss_cli_ctx *gctx = ctx2gctx(ctx);
+
+ LASSERT(atomic_read(&sec->ps_nctx) > 0);
+ LASSERT(atomic_read(&ctx->cc_refcount) == 0);
+ LASSERT(ctx->cc_sec == sec);
+
+ /*
+ * remove UPTODATE flag of reverse ctx thus we won't send fini rpc,
+ * this is to avoid potential problems of client side reverse svc ctx
+ * be mis-destroyed in various recovery senarios. anyway client can
+ * manage its reverse ctx well by associating it with its buddy ctx.
+ */
+ if (sec_is_reverse(sec))
+ ctx->cc_flags &= ~PTLRPC_CTX_UPTODATE;
+
+ if (gctx->gc_mechctx) {
+ /* the final context fini rpc will use this ctx too, and it's
+ * asynchronous which finished by request_out_callback(). so
+ * we add refcount, whoever drop finally drop the refcount to
+ * 0 should responsible for the rest of destroy. */
+ atomic_inc(&ctx->cc_refcount);
+
+ gss_do_ctx_fini_rpc(gctx);
+ gss_cli_ctx_finalize(gctx);
+
+ if (!atomic_dec_and_test(&ctx->cc_refcount))
+ return 1;
+ }
+
+ if (sec_is_reverse(sec))
+ CWARN("reverse sec %p: destroy ctx %p\n",
+ ctx->cc_sec, ctx);
+ else
+ CWARN("%s@%p: destroy ctx %p(%u->%s)\n",
+ sec->ps_policy->sp_name, ctx->cc_sec,
+ ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+
+ return 0;
}
static
-int gss_alloc_reqbuf_auth(struct ptlrpc_sec *sec,
+int gss_alloc_reqbuf_intg(struct ptlrpc_sec *sec,
struct ptlrpc_request *req,
- int msgsize)
+ int svc, int msgsize)
{
- struct sec_flavor_config *conf;
- int bufsize, txtsize;
- int buflens[5], bufcnt = 2;
+ int bufsize, txtsize;
+ int bufcnt = 2;
+ __u32 buflens[5];
ENTRY;
/*
+ * on-wire data layout:
* - gss header
* - lustre message
- * - user descriptor
- * - bulk sec descriptor
- * - signature
+ * - user descriptor (optional)
+ * - bulk sec descriptor (optional)
+ * - signature (optional)
+ * - svc == NULL: NULL
+ * - svc == AUTH: signature of gss header
+ * - svc == INTG: signature of all above
+ *
+ * if this is context negotiation, reserver fixed space
+ * at the last (signature) segment regardless of svc mode.
*/
+
buflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ txtsize = buflens[0];
+
buflens[1] = msgsize;
- txtsize = buflens[0] + buflens[1];
+ if (svc == SPTLRPC_SVC_INTG)
+ txtsize += buflens[1];
- if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
+ if (req->rq_pack_udesc) {
buflens[bufcnt] = sptlrpc_current_user_desc_size();
- txtsize += buflens[bufcnt];
+ if (svc == SPTLRPC_SVC_INTG)
+ txtsize += buflens[bufcnt];
bufcnt++;
}
- if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
- conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
- buflens[bufcnt] = bulk_sec_desc_size(conf->sfc_bulk_csum, 1,
- req->rq_bulk_read);
- txtsize += buflens[bufcnt];
+ if (req->rq_pack_bulk) {
+ buflens[bufcnt] = gss_cli_bulk_payload(req->rq_cli_ctx,
+ &req->rq_flvr,
+ 0, req->rq_bulk_read);
+ if (svc == SPTLRPC_SVC_INTG)
+ txtsize += buflens[bufcnt];
bufcnt++;
}
- buflens[bufcnt++] = req->rq_ctx_init ? GSS_CTX_INIT_MAX_LEN :
- gss_cli_payload(req->rq_cli_ctx, txtsize, 0);
+ if (req->rq_ctx_init)
+ buflens[bufcnt++] = GSS_CTX_INIT_MAX_LEN;
+ else if (svc != SPTLRPC_SVC_NULL)
+ buflens[bufcnt++] = gss_cli_payload(req->rq_cli_ctx, txtsize,0);
bufsize = lustre_msg_size_v2(bufcnt, buflens);
if (!req->rq_reqbuf) {
bufsize = size_roundup_power2(bufsize);
- OBD_ALLOC(req->rq_reqbuf, bufsize);
+ OBD_ALLOC_LARGE(req->rq_reqbuf, bufsize);
if (!req->rq_reqbuf)
RETURN(-ENOMEM);
}
lustre_init_msg_v2(req->rq_reqbuf, bufcnt, buflens, NULL);
- req->rq_reqbuf->lm_secflvr = req->rq_sec_flavor;
+ req->rq_reqbuf->lm_secflvr = req->rq_flvr.sf_rpc;
req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, msgsize);
LASSERT(req->rq_reqmsg);
/* pack user desc here, later we might leave current user's process */
- if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor))
+ if (req->rq_pack_udesc)
sptlrpc_pack_user_desc(req->rq_reqbuf, 2);
RETURN(0);
struct ptlrpc_request *req,
int msgsize)
{
- struct sec_flavor_config *conf;
- int ibuflens[3], ibufcnt;
- int buflens[3];
- int clearsize, wiresize;
+ __u32 ibuflens[3], wbuflens[2];
+ int ibufcnt;
+ int clearsize, wiresize;
ENTRY;
LASSERT(req->rq_clrbuf == NULL);
/* Inner (clear) buffers
* - lustre message
- * - user descriptor
- * - bulk checksum
+ * - user descriptor (optional)
+ * - bulk checksum (optional)
*/
ibufcnt = 1;
ibuflens[0] = msgsize;
- if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor))
+ if (req->rq_pack_udesc)
ibuflens[ibufcnt++] = sptlrpc_current_user_desc_size();
- if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
- conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
- ibuflens[ibufcnt++] = bulk_sec_desc_size(conf->sfc_bulk_csum, 1,
- req->rq_bulk_read);
- }
+ if (req->rq_pack_bulk)
+ ibuflens[ibufcnt++] = gss_cli_bulk_payload(req->rq_cli_ctx,
+ &req->rq_flvr, 0,
+ req->rq_bulk_read);
+
clearsize = lustre_msg_size_v2(ibufcnt, ibuflens);
/* to allow append padding during encryption */
clearsize += GSS_MAX_CIPHER_BLOCK;
/* Wrapper (wire) buffers
* - gss header
- * - signature of gss header
* - cipher text
*/
- buflens[0] = PTLRPC_GSS_HEADER_SIZE;
- buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
- buflens[2] = gss_cli_payload(req->rq_cli_ctx, clearsize, 1);
- wiresize = lustre_msg_size_v2(3, buflens);
+ wbuflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ wbuflens[1] = gss_cli_payload(req->rq_cli_ctx, clearsize, 1);
+ wiresize = lustre_msg_size_v2(2, wbuflens);
if (req->rq_pool) {
/* rq_reqbuf is preallocated */
memset(req->rq_reqbuf, 0, req->rq_reqbuf_len);
/* if the pre-allocated buffer is big enough, we just pack
- * both clear buf & request buf in it, to avoid more alloc.
- */
+ * both clear buf & request buf in it, to avoid more alloc. */
if (clearsize + wiresize <= req->rq_reqbuf_len) {
req->rq_clrbuf =
(void *) (((char *) req->rq_reqbuf) + wiresize);
if (!req->rq_clrbuf) {
clearsize = size_roundup_power2(clearsize);
- OBD_ALLOC(req->rq_clrbuf, clearsize);
+ OBD_ALLOC_LARGE(req->rq_clrbuf, clearsize);
if (!req->rq_clrbuf)
RETURN(-ENOMEM);
}
lustre_init_msg_v2(req->rq_clrbuf, ibufcnt, ibuflens, NULL);
req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, msgsize);
- if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor))
+ if (req->rq_pack_udesc)
sptlrpc_pack_user_desc(req->rq_clrbuf, 1);
RETURN(0);
struct ptlrpc_request *req,
int msgsize)
{
- LASSERT(!SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor) ||
+ int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+
+ LASSERT(!req->rq_pack_bulk ||
(req->rq_bulk_read || req->rq_bulk_write));
- switch (SEC_FLAVOR_SVC(req->rq_sec_flavor)) {
- case SPTLRPC_SVC_NONE:
+ switch (svc) {
+ case SPTLRPC_SVC_NULL:
case SPTLRPC_SVC_AUTH:
- return gss_alloc_reqbuf_auth(sec, req, msgsize);
+ case SPTLRPC_SVC_INTG:
+ return gss_alloc_reqbuf_intg(sec, req, svc, msgsize);
case SPTLRPC_SVC_PRIV:
return gss_alloc_reqbuf_priv(sec, req, msgsize);
default:
- LBUG();
+ LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
+ return 0;
}
- return 0;
}
void gss_free_reqbuf(struct ptlrpc_sec *sec,
struct ptlrpc_request *req)
{
- int privacy;
+ int privacy;
ENTRY;
LASSERT(!req->rq_pool || req->rq_reqbuf);
- privacy = SEC_FLAVOR_SVC(req->rq_sec_flavor) == SPTLRPC_SVC_PRIV;
+ privacy = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) == SPTLRPC_SVC_PRIV;
if (!req->rq_clrbuf)
goto release_reqbuf;
LASSERT(privacy);
LASSERT(req->rq_clrbuf_len);
- if (req->rq_pool &&
- req->rq_clrbuf >= req->rq_reqbuf &&
- (char *) req->rq_clrbuf <
+ if (req->rq_pool == NULL ||
+ req->rq_clrbuf < req->rq_reqbuf ||
+ (char *) req->rq_clrbuf >=
(char *) req->rq_reqbuf + req->rq_reqbuf_len)
- goto release_reqbuf;
+ OBD_FREE_LARGE(req->rq_clrbuf, req->rq_clrbuf_len);
- OBD_FREE(req->rq_clrbuf, req->rq_clrbuf_len);
req->rq_clrbuf = NULL;
req->rq_clrbuf_len = 0;
release_reqbuf:
if (!req->rq_pool && req->rq_reqbuf) {
- OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
+ LASSERT(req->rq_reqbuf_len);
+
+ OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
req->rq_reqbuf = NULL;
req->rq_reqbuf_len = 0;
}
EXIT;
}
-int gss_alloc_repbuf(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req,
- int msgsize)
+static int do_alloc_repbuf(struct ptlrpc_request *req, int bufsize)
{
- struct sec_flavor_config *conf;
- int privacy = (SEC_FLAVOR_SVC(req->rq_sec_flavor) == SPTLRPC_SVC_PRIV);
- int bufsize, txtsize;
- int buflens[4], bufcnt;
- ENTRY;
+ bufsize = size_roundup_power2(bufsize);
- LASSERT(!SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor) ||
- (req->rq_bulk_read || req->rq_bulk_write));
+ OBD_ALLOC_LARGE(req->rq_repbuf, bufsize);
+ if (!req->rq_repbuf)
+ return -ENOMEM;
- if (privacy) {
- bufcnt = 1;
- buflens[0] = msgsize;
- if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
- conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
- buflens[bufcnt++] = bulk_sec_desc_size(
- conf->sfc_bulk_csum, 0,
- req->rq_bulk_read);
- }
- txtsize = lustre_msg_size_v2(bufcnt, buflens);
- txtsize += GSS_MAX_CIPHER_BLOCK;
+ req->rq_repbuf_len = bufsize;
+ return 0;
+}
- bufcnt = 3;
- buflens[0] = PTLRPC_GSS_HEADER_SIZE;
- buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
- buflens[2] = gss_cli_payload(req->rq_cli_ctx, txtsize, 1);
- } else {
- bufcnt = 2;
- buflens[0] = PTLRPC_GSS_HEADER_SIZE;
- buflens[1] = msgsize;
- txtsize = buflens[0] + buflens[1];
+static
+int gss_alloc_repbuf_intg(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int svc, int msgsize)
+{
+ int txtsize;
+ __u32 buflens[4];
+ int bufcnt = 2;
+ int alloc_size;
- if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
- conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
- buflens[bufcnt] = bulk_sec_desc_size(
- conf->sfc_bulk_csum, 0,
- req->rq_bulk_read);
+ /*
+ * on-wire data layout:
+ * - gss header
+ * - lustre message
+ * - bulk sec descriptor (optional)
+ * - signature (optional)
+ * - svc == NULL: NULL
+ * - svc == AUTH: signature of gss header
+ * - svc == INTG: signature of all above
+ *
+ * if this is context negotiation, reserver fixed space
+ * at the last (signature) segment regardless of svc mode.
+ */
+
+ buflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ txtsize = buflens[0];
+
+ buflens[1] = msgsize;
+ if (svc == SPTLRPC_SVC_INTG)
+ txtsize += buflens[1];
+
+ if (req->rq_pack_bulk) {
+ buflens[bufcnt] = gss_cli_bulk_payload(req->rq_cli_ctx,
+ &req->rq_flvr,
+ 1, req->rq_bulk_read);
+ if (svc == SPTLRPC_SVC_INTG)
txtsize += buflens[bufcnt];
- bufcnt++;
- }
- buflens[bufcnt++] = req->rq_ctx_init ? GSS_CTX_INIT_MAX_LEN :
- gss_cli_payload(req->rq_cli_ctx, txtsize, 0);
+ bufcnt++;
}
- bufsize = lustre_msg_size_v2(bufcnt, buflens);
- bufsize = size_roundup_power2(bufsize);
+ if (req->rq_ctx_init)
+ buflens[bufcnt++] = GSS_CTX_INIT_MAX_LEN;
+ else if (svc != SPTLRPC_SVC_NULL)
+ buflens[bufcnt++] = gss_cli_payload(req->rq_cli_ctx, txtsize,0);
- OBD_ALLOC(req->rq_repbuf, bufsize);
- if (!req->rq_repbuf)
- return -ENOMEM;
+ alloc_size = lustre_msg_size_v2(bufcnt, buflens);
- req->rq_repbuf_len = bufsize;
- return 0;
+ /* add space for early reply */
+ alloc_size += gss_at_reply_off_integ;
+
+ return do_alloc_repbuf(req, alloc_size);
+}
+
+static
+int gss_alloc_repbuf_priv(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int msgsize)
+{
+ int txtsize;
+ __u32 buflens[2];
+ int bufcnt;
+ int alloc_size;
+
+ /* inner buffers */
+ bufcnt = 1;
+ buflens[0] = msgsize;
+
+ if (req->rq_pack_bulk)
+ buflens[bufcnt++] = gss_cli_bulk_payload(req->rq_cli_ctx,
+ &req->rq_flvr,
+ 1, req->rq_bulk_read);
+ txtsize = lustre_msg_size_v2(bufcnt, buflens);
+ txtsize += GSS_MAX_CIPHER_BLOCK;
+
+ /* wrapper buffers */
+ bufcnt = 2;
+ buflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ buflens[1] = gss_cli_payload(req->rq_cli_ctx, txtsize, 1);
+
+ alloc_size = lustre_msg_size_v2(bufcnt, buflens);
+ /* add space for early reply */
+ alloc_size += gss_at_reply_off_priv;
+
+ return do_alloc_repbuf(req, alloc_size);
+}
+
+int gss_alloc_repbuf(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int msgsize)
+{
+ int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+ ENTRY;
+
+ LASSERT(!req->rq_pack_bulk ||
+ (req->rq_bulk_read || req->rq_bulk_write));
+
+ switch (svc) {
+ case SPTLRPC_SVC_NULL:
+ case SPTLRPC_SVC_AUTH:
+ case SPTLRPC_SVC_INTG:
+ return gss_alloc_repbuf_intg(sec, req, svc, msgsize);
+ case SPTLRPC_SVC_PRIV:
+ return gss_alloc_repbuf_priv(sec, req, msgsize);
+ default:
+ LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
+ return 0;
+ }
}
void gss_free_repbuf(struct ptlrpc_sec *sec,
struct ptlrpc_request *req)
{
- OBD_FREE(req->rq_repbuf, req->rq_repbuf_len);
+ OBD_FREE_LARGE(req->rq_repbuf, req->rq_repbuf_len);
req->rq_repbuf = NULL;
req->rq_repbuf_len = 0;
+ req->rq_repdata = NULL;
+ req->rq_repdata_len = 0;
}
static int get_enlarged_msgsize(struct lustre_msg *msg,
return newmsg_size;
}
-static inline int msg_last_seglen(struct lustre_msg *msg)
-{
- return msg->lm_buflens[msg->lm_bufcount - 1];
-}
-
static
-int gss_enlarge_reqbuf_auth(struct ptlrpc_sec *sec,
+int gss_enlarge_reqbuf_intg(struct ptlrpc_sec *sec,
struct ptlrpc_request *req,
+ int svc,
int segment, int newsize)
{
struct lustre_msg *newbuf;
- int txtsize, sigsize, i;
+ int txtsize, sigsize = 0, i;
int newmsg_size, newbuf_size;
/*
- * embedded msg is at seg 1; signature is at the last seg
+ * gss header is at seg 0;
+ * embedded msg is at seg 1;
+ * signature (if any) is at the last seg
*/
LASSERT(req->rq_reqbuf);
LASSERT(req->rq_reqbuf_len > req->rq_reqlen);
LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
LASSERT(lustre_msg_buf(req->rq_reqbuf, 1, 0) == req->rq_reqmsg);
- /* compute new embedded msg size */
+ /* 1. compute new embedded msg size */
newmsg_size = get_enlarged_msgsize(req->rq_reqmsg, segment, newsize);
LASSERT(newmsg_size >= req->rq_reqbuf->lm_buflens[1]);
- /* compute new wrapper msg size */
- for (txtsize = 0, i = 0; i < req->rq_reqbuf->lm_bufcount; i++)
- txtsize += req->rq_reqbuf->lm_buflens[i];
- txtsize += newmsg_size - req->rq_reqbuf->lm_buflens[1];
+ /* 2. compute new wrapper msg size */
+ if (svc == SPTLRPC_SVC_NULL) {
+ /* no signature, get size directly */
+ newbuf_size = get_enlarged_msgsize(req->rq_reqbuf,
+ 1, newmsg_size);
+ } else {
+ txtsize = req->rq_reqbuf->lm_buflens[0];
+
+ if (svc == SPTLRPC_SVC_INTG) {
+ for (i = 1; i < req->rq_reqbuf->lm_bufcount; i++)
+ txtsize += req->rq_reqbuf->lm_buflens[i];
+ txtsize += newmsg_size - req->rq_reqbuf->lm_buflens[1];
+ }
+
+ sigsize = gss_cli_payload(req->rq_cli_ctx, txtsize, 0);
+ LASSERT(sigsize >= msg_last_seglen(req->rq_reqbuf));
- sigsize = gss_cli_payload(req->rq_cli_ctx, txtsize, 0);
- LASSERT(sigsize >= msg_last_seglen(req->rq_reqbuf));
- newbuf_size = get_enlarged_msgsize2(req->rq_reqbuf, 1, newmsg_size,
- req->rq_reqbuf->lm_bufcount - 1,
- sigsize);
+ newbuf_size = get_enlarged_msgsize2(
+ req->rq_reqbuf,
+ 1, newmsg_size,
+ msg_last_segidx(req->rq_reqbuf),
+ sigsize);
+ }
/* request from pool should always have enough buffer */
LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
if (req->rq_reqbuf_len < newbuf_size) {
newbuf_size = size_roundup_power2(newbuf_size);
- OBD_ALLOC(newbuf, newbuf_size);
+ OBD_ALLOC_LARGE(newbuf, newbuf_size);
if (newbuf == NULL)
RETURN(-ENOMEM);
+ /* Must lock this, so that otherwise unprotected change of
+ * rq_reqmsg is not racing with parallel processing of
+ * imp_replay_list traversing threads. See LU-3333
+ * This is a bandaid at best, we really need to deal with this
+ * in request enlarging code before unpacking that's already
+ * there */
+ if (req->rq_import)
+ spin_lock(&req->rq_import->imp_lock);
+
memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
- OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
+ OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
req->rq_reqbuf = newbuf;
req->rq_reqbuf_len = newbuf_size;
req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, 0);
+
+ if (req->rq_import)
+ spin_unlock(&req->rq_import->imp_lock);
}
- _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf,
- req->rq_reqbuf->lm_bufcount - 1, sigsize);
+ /* do enlargement, from wrapper to embedded, from end to begin */
+ if (svc != SPTLRPC_SVC_NULL)
+ _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf,
+ msg_last_segidx(req->rq_reqbuf),
+ sigsize);
+
_sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, 1, newmsg_size);
_sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
{
struct lustre_msg *newclrbuf;
int newmsg_size, newclrbuf_size, newcipbuf_size;
- int buflens[3];
+ __u32 buflens[3];
/*
* embedded msg is at seg 0 of clear buffer;
buflens[2] = gss_cli_payload(req->rq_cli_ctx, newclrbuf_size, 1);
newcipbuf_size = lustre_msg_size_v2(3, buflens);
- /*
- * handle the case that we put both clear buf and cipher buf into
- * pre-allocated single buffer.
- */
+ /* handle the case that we put both clear buf and cipher buf into
+ * pre-allocated single buffer. */
if (unlikely(req->rq_pool) &&
req->rq_clrbuf >= req->rq_reqbuf &&
(char *) req->rq_clrbuf <
(char *) req->rq_reqbuf + req->rq_reqbuf_len) {
- /*
- * it couldn't be better we still fit into the
- * pre-allocated buffer.
- */
+ /* it couldn't be better we still fit into the
+ * pre-allocated buffer. */
if (newclrbuf_size + newcipbuf_size <= req->rq_reqbuf_len) {
void *src, *dst;
+ if (req->rq_import)
+ spin_lock(&req->rq_import->imp_lock);
/* move clear text backward. */
src = req->rq_clrbuf;
dst = (char *) req->rq_reqbuf + newcipbuf_size;
req->rq_clrbuf = (struct lustre_msg *) dst;
req->rq_clrbuf_len = newclrbuf_size;
req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
+
+ if (req->rq_import)
+ spin_unlock(&req->rq_import->imp_lock);
} else {
- /*
- * sadly we have to split out the clear buffer
- */
+ /* sadly we have to split out the clear buffer */
LASSERT(req->rq_reqbuf_len >= newcipbuf_size);
LASSERT(req->rq_clrbuf_len < newclrbuf_size);
}
if (req->rq_clrbuf_len < newclrbuf_size) {
newclrbuf_size = size_roundup_power2(newclrbuf_size);
- OBD_ALLOC(newclrbuf, newclrbuf_size);
+ OBD_ALLOC_LARGE(newclrbuf, newclrbuf_size);
if (newclrbuf == NULL)
RETURN(-ENOMEM);
+ /* Must lock this, so that otherwise unprotected change of
+ * rq_reqmsg is not racing with parallel processing of
+ * imp_replay_list traversing threads. See LU-3333
+ * This is a bandaid at best, we really need to deal with this
+ * in request enlarging code before unpacking that's already
+ * there */
+ if (req->rq_import)
+ spin_lock(&req->rq_import->imp_lock);
+
memcpy(newclrbuf, req->rq_clrbuf, req->rq_clrbuf_len);
if (req->rq_reqbuf == NULL ||
req->rq_clrbuf < req->rq_reqbuf ||
(char *) req->rq_clrbuf >=
(char *) req->rq_reqbuf + req->rq_reqbuf_len) {
- OBD_FREE(req->rq_clrbuf, req->rq_clrbuf_len);
+ OBD_FREE_LARGE(req->rq_clrbuf, req->rq_clrbuf_len);
}
req->rq_clrbuf = newclrbuf;
req->rq_clrbuf_len = newclrbuf_size;
req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
+
+ if (req->rq_import)
+ spin_unlock(&req->rq_import->imp_lock);
}
_sptlrpc_enlarge_msg_inplace(req->rq_clrbuf, 0, newmsg_size);
struct ptlrpc_request *req,
int segment, int newsize)
{
+ int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+
LASSERT(!req->rq_ctx_init && !req->rq_ctx_fini);
- switch (SEC_FLAVOR_SVC(req->rq_sec_flavor)) {
+ switch (svc) {
+ case SPTLRPC_SVC_NULL:
case SPTLRPC_SVC_AUTH:
- return gss_enlarge_reqbuf_auth(sec, req, segment, newsize);
+ case SPTLRPC_SVC_INTG:
+ return gss_enlarge_reqbuf_intg(sec, req, svc, segment, newsize);
case SPTLRPC_SVC_PRIV:
return gss_enlarge_reqbuf_priv(sec, req, segment, newsize);
default:
- LASSERTF(0, "bad flavor %x\n", req->rq_sec_flavor);
+ LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
return 0;
}
}
static inline
void gss_svc_reqctx_addref(struct gss_svc_reqctx *grctx)
{
- LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
- atomic_inc(&grctx->src_base.sc_refcount);
+ LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
+ atomic_inc(&grctx->src_base.sc_refcount);
}
static inline
void gss_svc_reqctx_decref(struct gss_svc_reqctx *grctx)
{
- LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
+ LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
- if (atomic_dec_and_test(&grctx->src_base.sc_refcount))
- gss_svc_reqctx_free(grctx);
+ if (atomic_dec_and_test(&grctx->src_base.sc_refcount))
+ gss_svc_reqctx_free(grctx);
}
static
int gss_svc_sign(struct ptlrpc_request *req,
struct ptlrpc_reply_state *rs,
- struct gss_svc_reqctx *grctx)
+ struct gss_svc_reqctx *grctx,
+ __u32 svc)
{
+ __u32 flags = 0;
int rc;
ENTRY;
LASSERT(rs->rs_msg == lustre_msg_buf(rs->rs_repbuf, 1, 0));
- /* embedded lustre_msg might have been shrinked */
+ /* embedded lustre_msg might have been shrunk */
if (req->rq_replen != rs->rs_repbuf->lm_buflens[1])
lustre_shrink_msg(rs->rs_repbuf, 1, req->rq_replen, 1);
+ if (req->rq_pack_bulk)
+ flags |= LUSTRE_GSS_PACK_BULK;
+
rc = gss_sign_msg(rs->rs_repbuf, grctx->src_ctx->gsc_mechctx,
- PTLRPC_GSS_PROC_DATA, grctx->src_wirectx.gw_seq,
- NULL);
+ LUSTRE_SP_ANY, flags, PTLRPC_GSS_PROC_DATA,
+ grctx->src_wirectx.gw_seq, svc, NULL);
if (rc < 0)
RETURN(rc);
rs->rs_repdata_len = rc;
+
+ if (likely(req->rq_packed_final)) {
+ if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
+ req->rq_reply_off = gss_at_reply_off_integ;
+ else
+ req->rq_reply_off = 0;
+ } else {
+ if (svc == SPTLRPC_SVC_NULL)
+ rs->rs_repbuf->lm_cksum = crc32_le(!(__u32) 0,
+ lustre_msg_buf(rs->rs_repbuf, 1, 0),
+ lustre_msg_buflen(rs->rs_repbuf, 1));
+ req->rq_reply_off = 0;
+ }
+
RETURN(0);
}
int rc;
ENTRY;
- //OBD_FAIL_RETURN(OBD_FAIL_SVCGSS_ERR_NOTIFY|OBD_FAIL_ONCE, -EINVAL);
+ //if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_SVCGSS_ERR_NOTIFY, OBD_FAIL_ONCE))
+ // RETURN(-EINVAL);
grctx->src_err_notify = 1;
grctx->src_reserve_len = 0;
- rc = lustre_pack_reply_v2(req, 1, &replen, NULL);
+ rc = lustre_pack_reply_v2(req, 1, &replen, NULL, 0);
if (rc) {
CERROR("could not pack reply, err %d\n", rc);
RETURN(rc);
rawobj_t uuid_obj, rvs_hdl, in_token;
__u32 lustre_svc;
__u32 *secdata, seclen;
- int rc;
+ int swabbed, rc;
ENTRY;
CDEBUG(D_SEC, "processing gss init(%d) request from %s\n", gw->gw_proc,
libcfs_nid2str(req->rq_peer.nid));
+ req->rq_ctx_init = 1;
+
+ if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
+ CERROR("unexpected bulk flag\n");
+ RETURN(SECSVC_DROP);
+ }
+
if (gw->gw_proc == PTLRPC_GSS_PROC_INIT && gw->gw_handle.len != 0) {
CERROR("proc %u: invalid handle length %u\n",
gw->gw_proc, gw->gw_handle.len);
RETURN(SECSVC_DROP);
}
+ swabbed = ptlrpc_req_need_swab(req);
+
/* ctx initiate payload is in last segment */
secdata = lustre_msg_buf(reqbuf, reqbuf->lm_bufcount - 1, 0);
seclen = reqbuf->lm_buflens[reqbuf->lm_bufcount - 1];
seclen -= 4;
/* extract target uuid, note this code is somewhat fragile
- * because touched internal structure of obd_uuid
- */
+ * because touched internal structure of obd_uuid */
if (rawobj_extract(&uuid_obj, &secdata, &seclen)) {
CERROR("failed to extract target uuid\n");
RETURN(SECSVC_DROP);
uuid = (struct obd_uuid *) uuid_obj.data;
target = class_uuid2obd(uuid);
if (!target || target->obd_stopping || !target->obd_set_up) {
- CERROR("target '%s' is not available for context init (%s)",
+ CERROR("target '%s' is not available for context init (%s)\n",
uuid->uuid, target == NULL ? "no target" :
(target->obd_stopping ? "stopping" : "not set up"));
RETURN(SECSVC_DROP);
if (rc != SECSVC_OK)
RETURN(rc);
- if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
+ if (grctx->src_ctx->gsc_usr_mds || grctx->src_ctx->gsc_usr_oss ||
+ grctx->src_ctx->gsc_usr_root)
+ CWARN("create svc ctx %p: user from %s authenticated as %s\n",
+ grctx->src_ctx, libcfs_nid2str(req->rq_peer.nid),
+ grctx->src_ctx->gsc_usr_mds ? "mds" :
+ (grctx->src_ctx->gsc_usr_oss ? "oss" : "root"));
+ else
+ CWARN("create svc ctx %p: accept user %u from %s\n",
+ grctx->src_ctx, grctx->src_ctx->gsc_uid,
+ libcfs_nid2str(req->rq_peer.nid));
+
+ if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
if (reqbuf->lm_bufcount < 4) {
CERROR("missing user descriptor\n");
RETURN(SECSVC_DROP);
}
- if (sptlrpc_unpack_user_desc(reqbuf, 2)) {
+ if (sptlrpc_unpack_user_desc(reqbuf, 2, swabbed)) {
CERROR("Mal-formed user descriptor\n");
RETURN(SECSVC_DROP);
}
+
+ req->rq_pack_udesc = 1;
req->rq_user_desc = lustre_msg_buf(reqbuf, 2, 0);
}
*/
static
int gss_svc_verify_request(struct ptlrpc_request *req,
- struct gss_svc_ctx *gctx,
+ struct gss_svc_reqctx *grctx,
struct gss_wire_ctx *gw,
__u32 *major)
{
+ struct gss_svc_ctx *gctx = grctx->src_ctx;
struct lustre_msg *msg = req->rq_reqbuf;
int offset = 2;
+ int swabbed;
ENTRY;
*major = GSS_S_COMPLETE;
- if (msg->lm_bufcount < 3) {
+ if (msg->lm_bufcount < 2) {
CERROR("Too few segments (%u) in request\n", msg->lm_bufcount);
RETURN(-EINVAL);
}
+ if (gw->gw_svc == SPTLRPC_SVC_NULL)
+ goto verified;
+
if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
*major = GSS_S_DUPLICATE_TOKEN;
RETURN(-EACCES);
}
- *major = gss_verify_msg(msg, gctx->gsc_mechctx);
- if (*major != GSS_S_COMPLETE)
+ *major = gss_verify_msg(msg, gctx->gsc_mechctx, gw->gw_svc);
+ if (*major != GSS_S_COMPLETE) {
+ CERROR("failed to verify request: %x\n", *major);
RETURN(-EACCES);
+ }
- if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
+ if (gctx->gsc_reverse == 0 &&
+ gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
*major = GSS_S_DUPLICATE_TOKEN;
RETURN(-EACCES);
}
+verified:
+ swabbed = ptlrpc_req_need_swab(req);
+
/* user descriptor */
- if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
- if (msg->lm_bufcount < (offset + 1 + 1)) {
+ if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
+ if (msg->lm_bufcount < (offset + 1)) {
CERROR("no user desc included\n");
RETURN(-EINVAL);
}
- if (sptlrpc_unpack_user_desc(msg, offset)) {
+ if (sptlrpc_unpack_user_desc(msg, offset, swabbed)) {
CERROR("Mal-formed user descriptor\n");
RETURN(-EINVAL);
}
+ req->rq_pack_udesc = 1;
req->rq_user_desc = lustre_msg_buf(msg, offset, 0);
offset++;
}
- /* check bulk cksum data */
- if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
- if (msg->lm_bufcount < (offset + 1 + 1)) {
- CERROR("no bulk checksum included\n");
+ /* check bulk_sec_desc data */
+ if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
+ if (msg->lm_bufcount < (offset + 1)) {
+ CERROR("missing bulk sec descriptor\n");
RETURN(-EINVAL);
}
- if (bulk_sec_desc_unpack(msg, offset))
+ if (bulk_sec_desc_unpack(msg, offset, swabbed))
RETURN(-EINVAL);
+
+ req->rq_pack_bulk = 1;
+ grctx->src_reqbsd = lustre_msg_buf(msg, offset, 0);
+ grctx->src_reqbsd_size = lustre_msg_buflen(msg, offset);
}
req->rq_reqmsg = lustre_msg_buf(msg, 1, 0);
static
int gss_svc_unseal_request(struct ptlrpc_request *req,
- struct gss_svc_ctx *gctx,
+ struct gss_svc_reqctx *grctx,
struct gss_wire_ctx *gw,
__u32 *major)
{
+ struct gss_svc_ctx *gctx = grctx->src_ctx;
struct lustre_msg *msg = req->rq_reqbuf;
- int msglen, offset = 1;
+ int swabbed, msglen, offset = 1;
ENTRY;
if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
*major = gss_unseal_msg(gctx->gsc_mechctx, msg,
&msglen, req->rq_reqdata_len);
- if (*major != GSS_S_COMPLETE)
+ if (*major != GSS_S_COMPLETE) {
+ CERROR("failed to unwrap request: %x\n", *major);
RETURN(-EACCES);
+ }
if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
RETURN(-EACCES);
}
- if (lustre_unpack_msg(msg, msglen)) {
+ swabbed = __lustre_unpack_msg(msg, msglen);
+ if (swabbed < 0) {
CERROR("Failed to unpack after decryption\n");
RETURN(-EINVAL);
}
RETURN(-EINVAL);
}
- if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
+ if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
if (msg->lm_bufcount < offset + 1) {
CERROR("no user descriptor included\n");
RETURN(-EINVAL);
}
- if (sptlrpc_unpack_user_desc(msg, offset)) {
+ if (sptlrpc_unpack_user_desc(msg, offset, swabbed)) {
CERROR("Mal-formed user descriptor\n");
RETURN(-EINVAL);
}
+ req->rq_pack_udesc = 1;
req->rq_user_desc = lustre_msg_buf(msg, offset, 0);
offset++;
}
- if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
+ if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
if (msg->lm_bufcount < offset + 1) {
CERROR("no bulk checksum included\n");
RETURN(-EINVAL);
}
- if (bulk_sec_desc_unpack(msg, offset))
+ if (bulk_sec_desc_unpack(msg, offset, swabbed))
RETURN(-EINVAL);
+
+ req->rq_pack_bulk = 1;
+ grctx->src_reqbsd = lustre_msg_buf(msg, offset, 0);
+ grctx->src_reqbsd_size = lustre_msg_buflen(msg, offset);
}
req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 0, 0);
}
switch (gw->gw_svc) {
- case PTLRPC_GSS_SVC_INTEGRITY:
- rc = gss_svc_verify_request(req, grctx->src_ctx, gw, &major);
+ case SPTLRPC_SVC_NULL:
+ case SPTLRPC_SVC_AUTH:
+ case SPTLRPC_SVC_INTG:
+ rc = gss_svc_verify_request(req, grctx, gw, &major);
break;
- case PTLRPC_GSS_SVC_PRIVACY:
- rc = gss_svc_unseal_request(req, grctx->src_ctx, gw, &major);
+ case SPTLRPC_SVC_PRIV:
+ rc = gss_svc_unseal_request(req, grctx, gw, &major);
break;
default:
CERROR("unsupported gss service %d\n", gw->gw_svc);
if (rc == 0)
RETURN(SECSVC_OK);
- CERROR("svc %u failed: major 0x%08x: ctx %p(%u->%s)\n",
- gw->gw_svc, major, grctx->src_ctx, grctx->src_ctx->gsc_uid,
- libcfs_nid2str(req->rq_peer.nid));
+ CERROR("svc %u failed: major 0x%08x: req xid "LPU64" ctx %p idx "
+ LPX64"(%u->%s)\n", gw->gw_svc, major, req->rq_xid,
+ grctx->src_ctx, gss_handle_to_u64(&gw->gw_handle),
+ grctx->src_ctx->gsc_uid, libcfs_nid2str(req->rq_peer.nid));
error:
- /*
- * we only notify client in case of NO_CONTEXT/BAD_SIG, which
- * might happen after server reboot, to allow recovery.
- */
+ /* we only notify client in case of NO_CONTEXT/BAD_SIG, which
+ * might happen after server reboot, to allow recovery. */
if ((major == GSS_S_NO_CONTEXT || major == GSS_S_BAD_SIG) &&
gss_pack_err_notify(req, major, 0) == 0)
RETURN(SECSVC_COMPLETE);
struct gss_wire_ctx *gw)
{
struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
- int replen = sizeof(struct ptlrpc_body);
__u32 major;
ENTRY;
+ req->rq_ctx_fini = 1;
+ req->rq_no_reply = 1;
+
grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
if (!grctx->src_ctx) {
- CWARN("invalid gss context handle for destroy.\n");
+ CDEBUG(D_SEC, "invalid gss context handle for destroy.\n");
RETURN(SECSVC_DROP);
}
- if (gw->gw_svc != PTLRPC_GSS_SVC_INTEGRITY) {
+ if (gw->gw_svc != SPTLRPC_SVC_INTG) {
CERROR("svc %u is not supported in destroy.\n", gw->gw_svc);
RETURN(SECSVC_DROP);
}
- if (gss_svc_verify_request(req, grctx->src_ctx, gw, &major))
+ if (gss_svc_verify_request(req, grctx, gw, &major))
RETURN(SECSVC_DROP);
- if (lustre_pack_reply_v2(req, 1, &replen, NULL))
- RETURN(SECSVC_DROP);
-
- CWARN("gss svc destroy ctx %p(%u->%s)\n", grctx->src_ctx,
+ CWARN("destroy svc ctx %p idx "LPX64" (%u->%s)\n",
+ grctx->src_ctx, gss_handle_to_u64(&gw->gw_handle),
grctx->src_ctx->gsc_uid, libcfs_nid2str(req->rq_peer.nid));
gss_svc_upcall_destroy_ctx(grctx->src_ctx);
- if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
+ if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
if (req->rq_reqbuf->lm_bufcount < 4) {
CERROR("missing user descriptor, ignore it\n");
RETURN(SECSVC_OK);
}
- if (sptlrpc_unpack_user_desc(req->rq_reqbuf, 2)) {
+ if (sptlrpc_unpack_user_desc(req->rq_reqbuf, 2,
+ ptlrpc_req_need_swab(req))) {
CERROR("Mal-formed user descriptor, ignore it\n");
RETURN(SECSVC_OK);
}
+
+ req->rq_pack_udesc = 1;
req->rq_user_desc = lustre_msg_buf(req->rq_reqbuf, 2, 0);
}
struct gss_header *ghdr;
struct gss_svc_reqctx *grctx;
struct gss_wire_ctx *gw;
- int rc;
+ int swabbed, rc;
ENTRY;
LASSERT(req->rq_reqbuf);
RETURN(SECSVC_DROP);
}
- ghdr = gss_swab_header(req->rq_reqbuf, 0);
+ swabbed = ptlrpc_req_need_swab(req);
+
+ ghdr = gss_swab_header(req->rq_reqbuf, 0, swabbed);
if (ghdr == NULL) {
CERROR("can't decode gss header\n");
RETURN(SECSVC_DROP);
RETURN(SECSVC_DROP);
}
+ req->rq_sp_from = ghdr->gh_sp;
+
/* alloc grctx data */
OBD_ALLOC_PTR(grctx);
- if (!grctx) {
- CERROR("fail to alloc svc reqctx\n");
+ if (!grctx)
RETURN(SECSVC_DROP);
- }
- grctx->src_base.sc_policy = sptlrpc_policy_get(policy);
- atomic_set(&grctx->src_base.sc_refcount, 1);
- req->rq_svc_ctx = &grctx->src_base;
- gw = &grctx->src_wirectx;
+
+ grctx->src_base.sc_policy = sptlrpc_policy_get(policy);
+ atomic_set(&grctx->src_base.sc_refcount, 1);
+ req->rq_svc_ctx = &grctx->src_base;
+ gw = &grctx->src_wirectx;
/* save wire context */
+ gw->gw_flags = ghdr->gh_flags;
gw->gw_proc = ghdr->gh_proc;
gw->gw_seq = ghdr->gh_seq;
gw->gw_svc = ghdr->gh_svc;
rawobj_from_netobj(&gw->gw_handle, &ghdr->gh_handle);
/* keep original wire header which subject to checksum verification */
- if (lustre_msg_swabbed(req->rq_reqbuf))
+ if (swabbed)
gss_header_swabber(ghdr);
switch(ghdr->gh_proc) {
req->rq_auth_gss = 1;
req->rq_auth_remote = grctx->src_ctx->gsc_remote;
req->rq_auth_usr_mdt = grctx->src_ctx->gsc_usr_mds;
+ req->rq_auth_usr_ost = grctx->src_ctx->gsc_usr_oss;
req->rq_auth_usr_root = grctx->src_ctx->gsc_usr_root;
req->rq_auth_uid = grctx->src_ctx->gsc_uid;
req->rq_auth_mapped_uid = grctx->src_ctx->gsc_mapped_uid;
}
static inline
-int gss_svc_payload(struct gss_svc_reqctx *grctx, int msgsize, int privacy)
+int gss_svc_payload(struct gss_svc_reqctx *grctx, int early,
+ int msgsize, int privacy)
{
- if (gss_svc_reqctx_is_special(grctx))
+ /* we should treat early reply normally, but which is actually sharing
+ * the same ctx with original request, so in this case we should
+ * ignore the special ctx's special flags */
+ if (early == 0 && gss_svc_reqctx_is_special(grctx))
return grctx->src_reserve_len;
- return gss_estimate_payload(NULL, msgsize, privacy);
+ return gss_mech_payload(NULL, msgsize, privacy);
+}
+
+static int gss_svc_bulk_payload(struct gss_svc_ctx *gctx,
+ struct sptlrpc_flavor *flvr,
+ int read)
+{
+ int payload = sizeof(struct ptlrpc_bulk_sec_desc);
+
+ if (read) {
+ switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
+ case SPTLRPC_BULK_SVC_NULL:
+ break;
+ case SPTLRPC_BULK_SVC_INTG:
+ payload += gss_mech_payload(NULL, 0, 0);
+ break;
+ case SPTLRPC_BULK_SVC_PRIV:
+ payload += gss_mech_payload(NULL, 0, 1);
+ break;
+ case SPTLRPC_BULK_SVC_AUTH:
+ default:
+ LBUG();
+ }
+ }
+
+ return payload;
}
int gss_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
{
struct gss_svc_reqctx *grctx;
struct ptlrpc_reply_state *rs;
- struct ptlrpc_bulk_sec_desc *bsd;
- int privacy;
- int ibuflens[2], ibufcnt = 0;
- int buflens[4], bufcnt;
+ int early, privacy, svc, bsd_off = 0;
+ __u32 ibuflens[2], buflens[4];
+ int ibufcnt = 0, bufcnt;
int txtsize, wmsg_size, rs_size;
ENTRY;
LASSERT(msglen % 8 == 0);
- if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor) &&
- !req->rq_bulk_read && !req->rq_bulk_write) {
+ if (req->rq_pack_bulk && !req->rq_bulk_read && !req->rq_bulk_write) {
CERROR("client request bulk sec on non-bulk rpc\n");
RETURN(-EPROTO);
}
+ svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+ early = (req->rq_packed_final == 0);
+
grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
- if (gss_svc_reqctx_is_special(grctx))
+ if (!early && gss_svc_reqctx_is_special(grctx))
privacy = 0;
else
- privacy = (SEC_FLAVOR_SVC(req->rq_sec_flavor) ==
- SPTLRPC_SVC_PRIV);
+ privacy = (svc == SPTLRPC_SVC_PRIV);
if (privacy) {
- /* Inner buffer */
+ /* inner clear buffers */
ibufcnt = 1;
ibuflens[0] = msglen;
- if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
- LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
- bsd = lustre_msg_buf(req->rq_reqbuf,
- req->rq_reqbuf->lm_bufcount - 1,
- sizeof(*bsd));
+ if (req->rq_pack_bulk) {
+ LASSERT(grctx->src_reqbsd);
- ibuflens[ibufcnt++] = bulk_sec_desc_size(
- bsd->bsd_csum_alg, 0,
+ bsd_off = ibufcnt;
+ ibuflens[ibufcnt++] = gss_svc_bulk_payload(
+ grctx->src_ctx,
+ &req->rq_flvr,
req->rq_bulk_read);
}
txtsize += GSS_MAX_CIPHER_BLOCK;
/* wrapper buffer */
- bufcnt = 3;
+ bufcnt = 2;
buflens[0] = PTLRPC_GSS_HEADER_SIZE;
- buflens[1] = gss_svc_payload(grctx, buflens[0], 0);
- buflens[2] = gss_svc_payload(grctx, txtsize, 1);
+ buflens[1] = gss_svc_payload(grctx, early, txtsize, 1);
} else {
bufcnt = 2;
buflens[0] = PTLRPC_GSS_HEADER_SIZE;
buflens[1] = msglen;
- txtsize = buflens[0] + buflens[1];
- if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
- LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
- bsd = lustre_msg_buf(req->rq_reqbuf,
- req->rq_reqbuf->lm_bufcount - 2,
- sizeof(*bsd));
+ txtsize = buflens[0];
+ if (svc == SPTLRPC_SVC_INTG)
+ txtsize += buflens[1];
+
+ if (req->rq_pack_bulk) {
+ LASSERT(grctx->src_reqbsd);
- buflens[bufcnt] = bulk_sec_desc_size(
- bsd->bsd_csum_alg, 0,
+ bsd_off = bufcnt;
+ buflens[bufcnt] = gss_svc_bulk_payload(
+ grctx->src_ctx,
+ &req->rq_flvr,
req->rq_bulk_read);
- txtsize += buflens[bufcnt];
+ if (svc == SPTLRPC_SVC_INTG)
+ txtsize += buflens[bufcnt];
bufcnt++;
}
- buflens[bufcnt++] = gss_svc_payload(grctx, txtsize, 0);
+
+ if ((!early && gss_svc_reqctx_is_special(grctx)) ||
+ svc != SPTLRPC_SVC_NULL)
+ buflens[bufcnt++] = gss_svc_payload(grctx, early,
+ txtsize, 0);
}
wmsg_size = lustre_msg_size_v2(bufcnt, buflens);
/* pre-allocated */
LASSERT(rs->rs_size >= rs_size);
} else {
- OBD_ALLOC(rs, rs_size);
+ OBD_ALLOC_LARGE(rs, rs_size);
if (rs == NULL)
RETURN(-ENOMEM);
rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
rs->rs_repbuf_len = wmsg_size;
+ /* initialize the buffer */
if (privacy) {
lustre_init_msg_v2(rs->rs_repbuf, ibufcnt, ibuflens, NULL);
rs->rs_msg = lustre_msg_buf(rs->rs_repbuf, 0, msglen);
} else {
lustre_init_msg_v2(rs->rs_repbuf, bufcnt, buflens, NULL);
- rs->rs_repbuf->lm_secflvr = req->rq_sec_flavor;
+ rs->rs_repbuf->lm_secflvr = req->rq_flvr.sf_rpc;
- rs->rs_msg = (struct lustre_msg *)
- lustre_msg_buf(rs->rs_repbuf, 1, 0);
+ rs->rs_msg = lustre_msg_buf(rs->rs_repbuf, 1, 0);
+ }
+
+ if (bsd_off) {
+ grctx->src_repbsd = lustre_msg_buf(rs->rs_repbuf, bsd_off, 0);
+ grctx->src_repbsd_size = lustre_msg_buflen(rs->rs_repbuf,
+ bsd_off);
}
gss_svc_reqctx_addref(grctx);
RETURN(0);
}
-static
-int gss_svc_seal(struct ptlrpc_request *req,
- struct ptlrpc_reply_state *rs,
- struct gss_svc_reqctx *grctx)
+static int gss_svc_seal(struct ptlrpc_request *req,
+ struct ptlrpc_reply_state *rs,
+ struct gss_svc_reqctx *grctx)
{
struct gss_svc_ctx *gctx = grctx->src_ctx;
- rawobj_t msgobj, cipher_obj, micobj;
+ rawobj_t hdrobj, msgobj, token;
struct gss_header *ghdr;
- __u8 *cipher_buf;
- int cipher_buflen, buflens[3];
+ __u8 *token_buf;
+ int token_buflen;
+ __u32 buflens[2], major;
int msglen, rc;
- __u32 major;
ENTRY;
- /* embedded lustre_msg might have been shrinked */
- if (req->rq_replen != rs->rs_repbuf->lm_buflens[0])
- lustre_shrink_msg(rs->rs_repbuf, 0, req->rq_replen, 1);
-
- /* clear data length */
- msglen = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
- rs->rs_repbuf->lm_buflens);
-
- /* clear text */
- msgobj.len = msglen;
- msgobj.data = (__u8 *) rs->rs_repbuf;
+ /* get clear data length. note embedded lustre_msg might
+ * have been shrunk */
+ if (req->rq_replen != lustre_msg_buflen(rs->rs_repbuf, 0))
+ msglen = lustre_shrink_msg(rs->rs_repbuf, 0, req->rq_replen, 1);
+ else
+ msglen = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
+ rs->rs_repbuf->lm_buflens);
+
+ /* temporarily use tail of buffer to hold gss header data */
+ LASSERT(msglen + PTLRPC_GSS_HEADER_SIZE <= rs->rs_repbuf_len);
+ ghdr = (struct gss_header *) ((char *) rs->rs_repbuf +
+ rs->rs_repbuf_len - PTLRPC_GSS_HEADER_SIZE);
+ ghdr->gh_version = PTLRPC_GSS_VERSION;
+ ghdr->gh_sp = LUSTRE_SP_ANY;
+ ghdr->gh_flags = 0;
+ ghdr->gh_proc = PTLRPC_GSS_PROC_DATA;
+ ghdr->gh_seq = grctx->src_wirectx.gw_seq;
+ ghdr->gh_svc = SPTLRPC_SVC_PRIV;
+ ghdr->gh_handle.len = 0;
+ if (req->rq_pack_bulk)
+ ghdr->gh_flags |= LUSTRE_GSS_PACK_BULK;
/* allocate temporary cipher buffer */
- cipher_buflen = gss_estimate_payload(gctx->gsc_mechctx, msglen, 1);
- OBD_ALLOC(cipher_buf, cipher_buflen);
- if (!cipher_buf)
+ token_buflen = gss_mech_payload(gctx->gsc_mechctx, msglen, 1);
+ OBD_ALLOC_LARGE(token_buf, token_buflen);
+ if (token_buf == NULL)
RETURN(-ENOMEM);
- cipher_obj.len = cipher_buflen;
- cipher_obj.data = cipher_buf;
+ hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
+ hdrobj.data = (__u8 *) ghdr;
+ msgobj.len = msglen;
+ msgobj.data = (__u8 *) rs->rs_repbuf;
+ token.len = token_buflen;
+ token.data = token_buf;
- major = lgss_wrap(gctx->gsc_mechctx, &msgobj, rs->rs_repbuf_len,
- &cipher_obj);
+ major = lgss_wrap(gctx->gsc_mechctx, &hdrobj, &msgobj,
+ rs->rs_repbuf_len - PTLRPC_GSS_HEADER_SIZE, &token);
if (major != GSS_S_COMPLETE) {
- CERROR("priv: wrap message error: %08x\n", major);
+ CERROR("wrap message error: %08x\n", major);
GOTO(out_free, rc = -EPERM);
}
- LASSERT(cipher_obj.len <= cipher_buflen);
-
- /* now the real wire data */
- buflens[0] = PTLRPC_GSS_HEADER_SIZE;
- buflens[1] = gss_estimate_payload(gctx->gsc_mechctx, buflens[0], 0);
- buflens[2] = cipher_obj.len;
+ LASSERT(token.len <= token_buflen);
- LASSERT(lustre_msg_size_v2(3, buflens) <= rs->rs_repbuf_len);
- lustre_init_msg_v2(rs->rs_repbuf, 3, buflens, NULL);
- rs->rs_repbuf->lm_secflvr = req->rq_sec_flavor;
+ /* we are about to override data at rs->rs_repbuf, nullify pointers
+ * to which to catch further illegal usage. */
+ if (req->rq_pack_bulk) {
+ grctx->src_repbsd = NULL;
+ grctx->src_repbsd_size = 0;
+ }
- /* gss header */
- ghdr = lustre_msg_buf(rs->rs_repbuf, 0, 0);
- ghdr->gh_version = PTLRPC_GSS_VERSION;
- ghdr->gh_flags = 0;
- ghdr->gh_proc = PTLRPC_GSS_PROC_DATA;
- ghdr->gh_seq = grctx->src_wirectx.gw_seq;
- ghdr->gh_svc = PTLRPC_GSS_SVC_PRIVACY;
- ghdr->gh_handle.len = 0;
+ /* now fill the actual wire data
+ * - gss header
+ * - gss token
+ */
+ buflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ buflens[1] = token.len;
- /* header signature */
- msgobj.len = rs->rs_repbuf->lm_buflens[0];
- msgobj.data = lustre_msg_buf(rs->rs_repbuf, 0, 0);
- micobj.len = rs->rs_repbuf->lm_buflens[1];
- micobj.data = lustre_msg_buf(rs->rs_repbuf, 1, 0);
+ rs->rs_repdata_len = lustre_msg_size_v2(2, buflens);
+ LASSERT(rs->rs_repdata_len <= rs->rs_repbuf_len);
- major = lgss_get_mic(gctx->gsc_mechctx, 1, &msgobj, &micobj);
- if (major != GSS_S_COMPLETE) {
- CERROR("priv: sign message error: %08x\n", major);
- GOTO(out_free, rc = -EPERM);
- }
- lustre_shrink_msg(rs->rs_repbuf, 1, micobj.len, 0);
+ lustre_init_msg_v2(rs->rs_repbuf, 2, buflens, NULL);
+ rs->rs_repbuf->lm_secflvr = req->rq_flvr.sf_rpc;
- /* cipher token */
- memcpy(lustre_msg_buf(rs->rs_repbuf, 2, 0),
- cipher_obj.data, cipher_obj.len);
+ memcpy(lustre_msg_buf(rs->rs_repbuf, 0, 0), ghdr,
+ PTLRPC_GSS_HEADER_SIZE);
+ memcpy(lustre_msg_buf(rs->rs_repbuf, 1, 0), token.data, token.len);
- rs->rs_repdata_len = lustre_shrink_msg(rs->rs_repbuf, 2,
- cipher_obj.len, 0);
+ /* reply offset */
+ if (req->rq_packed_final &&
+ (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))
+ req->rq_reply_off = gss_at_reply_off_priv;
+ else
+ req->rq_reply_off = 0;
/* to catch upper layer's further access */
rs->rs_msg = NULL;
rc = 0;
out_free:
- OBD_FREE(cipher_buf, cipher_buflen);
+ OBD_FREE_LARGE(token_buf, token_buflen);
RETURN(rc);
}
{
struct ptlrpc_reply_state *rs = req->rq_reply_state;
struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
- struct gss_wire_ctx *gw;
- int rc;
+ struct gss_wire_ctx *gw = &grctx->src_wirectx;
+ int early, rc;
ENTRY;
- if (gss_svc_reqctx_is_special(grctx))
+ early = (req->rq_packed_final == 0);
+
+ if (!early && gss_svc_reqctx_is_special(grctx)) {
+ LASSERT(rs->rs_repdata_len != 0);
+
+ req->rq_reply_off = gss_at_reply_off_integ;
RETURN(0);
+ }
- gw = &grctx->src_wirectx;
- if (gw->gw_proc != PTLRPC_GSS_PROC_DATA &&
+ /* early reply could happen in many cases */
+ if (!early &&
+ gw->gw_proc != PTLRPC_GSS_PROC_DATA &&
gw->gw_proc != PTLRPC_GSS_PROC_DESTROY) {
CERROR("proc %d not support\n", gw->gw_proc);
RETURN(-EINVAL);
LASSERT(grctx->src_ctx);
switch (gw->gw_svc) {
- case PTLRPC_GSS_SVC_INTEGRITY:
- rc = gss_svc_sign(req, rs, grctx);
+ case SPTLRPC_SVC_NULL:
+ case SPTLRPC_SVC_AUTH:
+ case SPTLRPC_SVC_INTG:
+ rc = gss_svc_sign(req, rs, grctx, gw->gw_svc);
break;
- case PTLRPC_GSS_SVC_PRIVACY:
+ case SPTLRPC_SVC_PRIV:
rc = gss_svc_seal(req, rs, grctx);
break;
default:
rs->rs_svc_ctx = NULL;
if (!rs->rs_prealloc)
- OBD_FREE(rs, rs->rs_size);
+ OBD_FREE_LARGE(rs, rs->rs_size);
}
void gss_svc_free_ctx(struct ptlrpc_svc_ctx *ctx)
{
- LASSERT(atomic_read(&ctx->sc_refcount) == 0);
- gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx));
+ LASSERT(atomic_read(&ctx->sc_refcount) == 0);
+ gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx));
}
int gss_copy_rvc_cli_ctx(struct ptlrpc_cli_ctx *cli_ctx,
struct ptlrpc_svc_ctx *svc_ctx)
{
struct gss_cli_ctx *cli_gctx = ctx2gctx(cli_ctx);
- struct gss_svc_reqctx *grctx;
+ struct gss_svc_ctx *svc_gctx = gss_svc_ctx2gssctx(svc_ctx);
struct gss_ctx *mechctx = NULL;
+ LASSERT(cli_gctx);
+ LASSERT(svc_gctx && svc_gctx->gsc_mechctx);
+
cli_gctx->gc_proc = PTLRPC_GSS_PROC_DATA;
cli_gctx->gc_win = GSS_SEQ_WIN;
- atomic_set(&cli_gctx->gc_seq, 0);
- grctx = container_of(svc_ctx, struct gss_svc_reqctx, src_base);
- LASSERT(grctx->src_ctx);
- LASSERT(grctx->src_ctx->gsc_mechctx);
-
- if (lgss_copy_reverse_context(grctx->src_ctx->gsc_mechctx, &mechctx) !=
+ /* The problem is the reverse ctx might get lost in some recovery
+ * situations, and the same svc_ctx will be used to re-create it.
+ * if there's callback be sentout before that, new reverse ctx start
+ * with sequence 0 will lead to future callback rpc be treated as
+ * replay.
+ *
+ * each reverse root ctx will record its latest sequence number on its
+ * buddy svcctx before be destroyed, so here we continue use it.
+ */
+ atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
+
+ if (gss_svc_upcall_dup_handle(&cli_gctx->gc_svc_handle, svc_gctx)) {
+ CERROR("failed to dup svc handle\n");
+ goto err_out;
+ }
+
+ if (lgss_copy_reverse_context(svc_gctx->gsc_mechctx, &mechctx) !=
GSS_S_COMPLETE) {
CERROR("failed to copy mech context\n");
- return -ENOMEM;
+ goto err_svc_handle;
}
- if (rawobj_dup(&cli_gctx->gc_handle, &grctx->src_ctx->gsc_rvs_hdl)) {
+ if (rawobj_dup(&cli_gctx->gc_handle, &svc_gctx->gsc_rvs_hdl)) {
CERROR("failed to dup reverse handle\n");
- lgss_delete_sec_context(&mechctx);
- return -ENOMEM;
+ goto err_ctx;
}
cli_gctx->gc_mechctx = mechctx;
gss_cli_ctx_uptodate(cli_gctx);
return 0;
+
+err_ctx:
+ lgss_delete_sec_context(&mechctx);
+err_svc_handle:
+ rawobj_free(&cli_gctx->gc_svc_handle);
+err_out:
+ return -ENOMEM;
+}
+
+static void gss_init_at_reply_offset(void)
+{
+ __u32 buflens[3];
+ int clearsize;
+
+ buflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ buflens[1] = lustre_msg_early_size();
+ buflens[2] = gss_cli_payload(NULL, buflens[1], 0);
+ gss_at_reply_off_integ = lustre_msg_size_v2(3, buflens);
+
+ buflens[0] = lustre_msg_early_size();
+ clearsize = lustre_msg_size_v2(1, buflens);
+ buflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ buflens[1] = gss_cli_payload(NULL, clearsize, 0);
+ buflens[2] = gss_cli_payload(NULL, clearsize, 1);
+ gss_at_reply_off_priv = lustre_msg_size_v2(3, buflens);
}
int __init sptlrpc_gss_init(void)
if (rc)
goto out_cli_upcall;
- rc = init_kerberos_module();
- if (rc)
- goto out_svc_upcall;
+ rc = init_null_module();
+ if (rc)
+ goto out_svc_upcall;
- /*
- * register policy after all other stuff be intialized, because it
- * might be in used immediately after the registration.
- */
+ rc = init_kerberos_module();
+ if (rc)
+ goto out_null;
- rc = gss_init_keyring();
- if (rc)
- goto out_kerberos;
+ rc = init_sk_module();
+ if (rc)
+ goto out_kerberos;
-#ifdef HAVE_GSS_PIPEFS
- rc = gss_init_pipefs();
- if (rc)
- goto out_keyring;
-#endif
+ /* register policy after all other stuff be initialized, because it
+ * might be in used immediately after the registration. */
- return 0;
+ rc = gss_init_keyring();
+ if (rc)
+ goto out_sk;
-#ifdef HAVE_GSS_PIPEFS
-out_keyring:
- gss_exit_keyring();
-#endif
+ rc = gss_init_pipefs();
+ if (rc)
+ goto out_keyring;
+ gss_init_at_reply_offset();
+
+ return 0;
+
+out_keyring:
+ gss_exit_keyring();
+out_sk:
+ cleanup_sk_module();
out_kerberos:
- cleanup_kerberos_module();
+ cleanup_kerberos_module();
+out_null:
+ cleanup_null_module();
out_svc_upcall:
- gss_exit_svc_upcall();
+ gss_exit_svc_upcall();
out_cli_upcall:
- gss_exit_cli_upcall();
+ gss_exit_cli_upcall();
out_lproc:
- gss_exit_lproc();
- return rc;
+ gss_exit_lproc();
+ return rc;
}
static void __exit sptlrpc_gss_exit(void)
{
gss_exit_keyring();
-#ifdef HAVE_GSS_PIPEFS
gss_exit_pipefs();
-#endif
cleanup_kerberos_module();
gss_exit_svc_upcall();
gss_exit_cli_upcall();
gss_exit_lproc();
}
-MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("GSS security policy for Lustre");
MODULE_LICENSE("GPL");