* vim:expandtab:shiftwidth=8:tabstop=8:
*
* Modifications for Lustre
- * Copyright 2004 - 2007, Cluster File Systems, Inc.
- * All rights reserved
+ *
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
* Author: Eric Mei <ericm@clusterfs.com>
*/
#include <linux/slab.h>
#include <linux/dcache.h>
#include <linux/fs.h>
-#include <linux/random.h>
#include <linux/mutex.h>
#include <asm/atomic.h>
#else
#include <obd.h>
#include <obd_class.h>
#include <obd_support.h>
+#include <obd_cksum.h>
#include <lustre/lustre_idl.h>
#include <lustre_net.h>
#include <lustre_import.h>
#include <linux/crypto.h>
+/*
+ * early reply have fixed size, respectively in privacy and integrity mode.
+ * so we calculate them only once.
+ */
+static int gss_at_reply_off_integ;
+static int gss_at_reply_off_priv;
+
static inline int msg_last_segidx(struct lustre_msg *msg)
{
__swab32s(&ghdr->gh_handle.len);
}
-struct gss_header *gss_swab_header(struct lustre_msg *msg, int segment)
+struct gss_header *gss_swab_header(struct lustre_msg *msg, int segment,
+ int swabbed)
{
struct gss_header *ghdr;
- ghdr = lustre_swab_buf(msg, segment, sizeof(*ghdr),
- gss_header_swabber);
+ ghdr = lustre_msg_buf(msg, segment, sizeof(*ghdr));
+ if (ghdr == NULL)
+ return NULL;
+
+ if (swabbed)
+ gss_header_swabber(ghdr);
- if (ghdr &&
- sizeof(*ghdr) + ghdr->gh_handle.len > msg->lm_buflens[segment]) {
- CERROR("gss header require length %u, now %u received\n",
- (unsigned int) sizeof(*ghdr) + ghdr->gh_handle.len,
+ if (sizeof(*ghdr) + ghdr->gh_handle.len > msg->lm_buflens[segment]) {
+ CERROR("gss header has length %d, now %u received\n",
+ (int) sizeof(*ghdr) + ghdr->gh_handle.len,
msg->lm_buflens[segment]);
return NULL;
}
return ghdr;
}
+#if 0
static
void gss_netobj_swabber(netobj_t *obj)
{
return obj;
}
+#endif
/*
* payload should be obtained from mechanism. but currently since we
* only support kerberos, we could simply use fixed value.
- * krb5 header: 16
- * krb5 checksum: 20
+ * krb5 "meta" data:
+ * - krb5 header: 16
+ * - krb5 checksum: 20
+ *
+ * for privacy mode, payload also include the cipher text which has the same
+ * size as plain text, plus possible confounder, padding both at maximum cipher
+ * block size.
*/
#define GSS_KRB5_INTEG_MAX_PAYLOAD (40)
static inline
-int gss_estimate_payload(struct gss_ctx *mechctx, int msgsize, int privacy)
+int gss_mech_payload(struct gss_ctx *mechctx, int msgsize, int privacy)
{
- if (privacy) {
- /* we suppose max cipher block size is 16 bytes. here we
- * add 16 for confounder and 16 for padding. */
- return GSS_KRB5_INTEG_MAX_PAYLOAD + msgsize + 16 + 16 + 16;
- } else {
+ if (privacy)
+ return GSS_KRB5_INTEG_MAX_PAYLOAD + 16 + 16 + 16 + msgsize;
+ else
return GSS_KRB5_INTEG_MAX_PAYLOAD;
- }
}
/*
rawobj_t *handle)
{
struct gss_header *ghdr;
- rawobj_t text[3], mic;
+ rawobj_t text[4], mic;
int textcnt, max_textcnt, mic_idx;
__u32 major;
mic.len = msg->lm_buflens[mic_idx];
mic.data = lustre_msg_buf(msg, mic_idx, 0);
- major = lgss_get_mic(mechctx, textcnt, text, &mic);
+ major = lgss_get_mic(mechctx, textcnt, text, 0, NULL, &mic);
if (major != GSS_S_COMPLETE) {
CERROR("fail to generate MIC: %08x\n", major);
return -EPERM;
struct gss_ctx *mechctx,
__u32 svc)
{
- rawobj_t text[3], mic;
+ rawobj_t text[4], mic;
int textcnt, max_textcnt;
int mic_idx;
__u32 major;
mic.len = msg->lm_buflens[mic_idx];
mic.data = lustre_msg_buf(msg, mic_idx, 0);
- major = lgss_verify_mic(mechctx, textcnt, text, &mic);
+ major = lgss_verify_mic(mechctx, textcnt, text, 0, NULL, &mic);
if (major != GSS_S_COMPLETE)
CERROR("mic verify error: %08x\n", major);
struct lustre_msg *msgbuf,
int *msg_len, int msgbuf_len)
{
- rawobj_t clear_obj, micobj, msgobj, token;
+ rawobj_t clear_obj, hdrobj, token;
__u8 *clear_buf;
int clear_buflen;
__u32 major;
ENTRY;
- if (msgbuf->lm_bufcount != 3) {
+ if (msgbuf->lm_bufcount != 2) {
CERROR("invalid bufcount %d\n", msgbuf->lm_bufcount);
RETURN(GSS_S_FAILURE);
}
- /* verify gss header */
- msgobj.len = msgbuf->lm_buflens[0];
- msgobj.data = lustre_msg_buf(msgbuf, 0, 0);
- micobj.len = msgbuf->lm_buflens[1];
- micobj.data = lustre_msg_buf(msgbuf, 1, 0);
-
- major = lgss_verify_mic(mechctx, 1, &msgobj, &micobj);
- if (major != GSS_S_COMPLETE) {
- CERROR("priv: mic verify error: %08x\n", major);
- RETURN(major);
- }
-
- /* temporary clear text buffer */
- clear_buflen = msgbuf->lm_buflens[2];
+ /* allocate a temporary clear text buffer, same sized as token,
+ * we assume the final clear text size <= token size */
+ clear_buflen = lustre_msg_buflen(msgbuf, 1);
OBD_ALLOC(clear_buf, clear_buflen);
if (!clear_buf)
RETURN(GSS_S_FAILURE);
- token.len = msgbuf->lm_buflens[2];
- token.data = lustre_msg_buf(msgbuf, 2, 0);
-
+ /* buffer objects */
+ hdrobj.len = lustre_msg_buflen(msgbuf, 0);
+ hdrobj.data = lustre_msg_buf(msgbuf, 0, 0);
+ token.len = lustre_msg_buflen(msgbuf, 1);
+ token.data = lustre_msg_buf(msgbuf, 1, 0);
clear_obj.len = clear_buflen;
clear_obj.data = clear_buf;
- major = lgss_unwrap(mechctx, &token, &clear_obj);
+ major = lgss_unwrap(mechctx, &hdrobj, &token, &clear_obj);
if (major != GSS_S_COMPLETE) {
- CERROR("priv: unwrap message error: %08x\n", major);
+ CERROR("unwrap message error: %08x\n", major);
GOTO(out_free, major = GSS_S_FAILURE);
}
LASSERT(clear_obj.len <= clear_buflen);
+ LASSERT(clear_obj.len <= msgbuf_len);
/* now the decrypted message */
memcpy(msgbuf, clear_obj.data, clear_obj.len);
int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
{
- LASSERT(atomic_read(&ctx->cc_refcount));
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount));
- if (!test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
+ if (!cfs_test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
if (!ctx->cc_early_expire)
- clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+ cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
CWARN("ctx %p(%u->%s) get expired: %lu(%+lds)\n",
ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
ctx->cc_expire == 0 ? 0 :
cfs_time_sub(ctx->cc_expire, cfs_time_current_sec()));
+ sptlrpc_cli_ctx_wakeup(ctx);
return 1;
}
* someone else, in which case nobody will make further use
* of it. we don't care, and mark it UPTODATE will help
* destroying server side context when it be destroied. */
- set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+ cfs_set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
if (sec_is_reverse(ctx->cc_sec)) {
CWARN("server installed reverse ctx %p idx "LPX64", "
gss_sec_install_rctx(ctx->cc_sec->ps_import,
ctx->cc_sec, ctx);
}
+
+ sptlrpc_cli_ctx_wakeup(ctx);
}
static void gss_cli_ctx_finalize(struct gss_cli_ctx *gctx)
*/
switch (phase) {
case 0:
- if (test_bit(seq_num % win_size, window))
+ if (cfs_test_bit(seq_num % win_size, window))
goto replay;
break;
case 1:
{
int rc = 0;
- spin_lock(&ssd->ssd_lock);
+ cfs_spin_lock(&ssd->ssd_lock);
if (set == 0) {
/*
gss_stat_oos_record_svc(2, 0);
}
exit:
- spin_unlock(&ssd->ssd_lock);
+ cfs_spin_unlock(&ssd->ssd_lock);
return rc;
}
* cred APIs *
***************************************/
-static inline
-int gss_cli_payload(struct ptlrpc_cli_ctx *ctx,
- int msgsize, int privacy)
+static inline int gss_cli_payload(struct ptlrpc_cli_ctx *ctx,
+ int msgsize, int privacy)
{
- return gss_estimate_payload(NULL, msgsize, privacy);
+ return gss_mech_payload(NULL, msgsize, privacy);
+}
+
+static int gss_cli_bulk_payload(struct ptlrpc_cli_ctx *ctx,
+ struct sptlrpc_flavor *flvr,
+ int reply, int read)
+{
+ int payload = sizeof(struct ptlrpc_bulk_sec_desc);
+
+ LASSERT(SPTLRPC_FLVR_BULK_TYPE(flvr->sf_rpc) == SPTLRPC_BULK_DEFAULT);
+
+ if ((!reply && !read) || (reply && read)) {
+ switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
+ case SPTLRPC_BULK_SVC_NULL:
+ break;
+ case SPTLRPC_BULK_SVC_INTG:
+ payload += gss_cli_payload(ctx, 0, 0);
+ break;
+ case SPTLRPC_BULK_SVC_PRIV:
+ payload += gss_cli_payload(ctx, 0, 1);
+ break;
+ case SPTLRPC_BULK_SVC_AUTH:
+ default:
+ LBUG();
+ }
+ }
+
+ return payload;
}
int gss_cli_ctx_match(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred)
if (req->rq_ctx_init)
RETURN(0);
- svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+ svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
if (req->rq_pack_bulk)
flags |= LUSTRE_GSS_PACK_BULK;
if (req->rq_pack_udesc)
flags |= LUSTRE_GSS_PACK_USER;
redo:
- seq = atomic_inc_return(&gctx->gc_seq);
+ seq = cfs_atomic_inc_return(&gctx->gc_seq);
rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
ctx->cc_sec->ps_part,
*
* Note: null mode dosen't check sequence number. */
if (svc != SPTLRPC_SVC_NULL &&
- atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
- int behind = atomic_read(&gctx->gc_seq) - seq;
+ cfs_atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
+ int behind = cfs_atomic_read(&gctx->gc_seq) - seq;
gss_stat_oos_record_cli(behind);
CWARN("req %p: %u behind, retry signing\n", req, behind);
{
struct gss_cli_ctx *gctx;
struct gss_header *ghdr, *reqhdr;
- struct lustre_msg *msg = req->rq_repbuf;
+ struct lustre_msg *msg = req->rq_repdata;
__u32 major;
- int rc = 0;
+ int pack_bulk, swabbed, rc = 0;
ENTRY;
LASSERT(req->rq_cli_ctx == ctx);
LASSERT(msg);
- req->rq_repdata_len = req->rq_nob_received;
gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
/* special case for context negotiation, rq_repmsg/rq_replen actually
- * are not used currently. */
- if (req->rq_ctx_init) {
+ * are not used currently. but early reply always be treated normally */
+ if (req->rq_ctx_init && !req->rq_early) {
req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
req->rq_replen = msg->lm_buflens[1];
RETURN(0);
RETURN(-EPROTO);
}
- ghdr = gss_swab_header(msg, 0);
+ swabbed = ptlrpc_rep_need_swab(req);
+
+ ghdr = gss_swab_header(msg, 0, swabbed);
if (ghdr == NULL) {
CERROR("can't decode gss header\n");
RETURN(-EPROTO);
switch (ghdr->gh_proc) {
case PTLRPC_GSS_PROC_DATA:
- if (!equi(req->rq_pack_bulk == 1,
- ghdr->gh_flags & LUSTRE_GSS_PACK_BULK)) {
+ pack_bulk = ghdr->gh_flags & LUSTRE_GSS_PACK_BULK;
+
+ if (!req->rq_early && !equi(req->rq_pack_bulk == 1, pack_bulk)){
CERROR("%s bulk flag in reply\n",
req->rq_pack_bulk ? "missing" : "unexpected");
RETURN(-EPROTO);
RETURN(-EPROTO);
}
- if (lustre_msg_swabbed(msg))
+ if (swabbed)
gss_header_swabber(ghdr);
major = gss_verify_msg(msg, gctx->gc_mechctx, reqhdr->gh_svc);
- if (major != GSS_S_COMPLETE)
+ if (major != GSS_S_COMPLETE) {
+ CERROR("failed to verify reply: %x\n", major);
RETURN(-EPERM);
+ }
- req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
- req->rq_replen = msg->lm_buflens[1];
+ if (req->rq_early && reqhdr->gh_svc == SPTLRPC_SVC_NULL) {
+ __u32 cksum;
- if (req->rq_pack_bulk) {
- /* FIXME */
+ cksum = crc32_le(!(__u32) 0,
+ lustre_msg_buf(msg, 1, 0),
+ lustre_msg_buflen(msg, 1));
+ if (cksum != msg->lm_cksum) {
+ CWARN("early reply checksum mismatch: "
+ "%08x != %08x\n", cksum, msg->lm_cksum);
+ RETURN(-EPROTO);
+ }
+ }
+
+ if (pack_bulk) {
/* bulk checksum is right after the lustre msg */
if (msg->lm_bufcount < 3) {
CERROR("Invalid reply bufcount %u\n",
RETURN(-EPROTO);
}
- rc = bulk_sec_desc_unpack(msg, 2);
+ rc = bulk_sec_desc_unpack(msg, 2, swabbed);
+ if (rc) {
+ CERROR("unpack bulk desc: %d\n", rc);
+ RETURN(rc);
+ }
}
+
+ req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
+ req->rq_replen = msg->lm_buflens[1];
break;
case PTLRPC_GSS_PROC_ERR:
- rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
+ if (req->rq_early) {
+ CERROR("server return error with early reply\n");
+ rc = -EPROTO;
+ } else {
+ rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
+ }
break;
default:
CERROR("unknown gss proc %d\n", ghdr->gh_proc);
struct ptlrpc_request *req)
{
struct gss_cli_ctx *gctx;
- rawobj_t msgobj, cipher_obj, micobj;
+ rawobj_t hdrobj, msgobj, token;
struct gss_header *ghdr;
- int buflens[3], wiresize, rc;
- __u32 major;
+ __u32 buflens[2], major;
+ int wiresize, rc;
ENTRY;
LASSERT(req->rq_clrbuf);
gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
- /* close clear data length */
+ /* final clear data length */
req->rq_clrdata_len = lustre_msg_size_v2(req->rq_clrbuf->lm_bufcount,
req->rq_clrbuf->lm_buflens);
/* calculate wire data length */
buflens[0] = PTLRPC_GSS_HEADER_SIZE;
- buflens[1] = gss_cli_payload(&gctx->gc_base, buflens[0], 0);
- buflens[2] = gss_cli_payload(&gctx->gc_base, req->rq_clrdata_len, 1);
- wiresize = lustre_msg_size_v2(3, buflens);
+ buflens[1] = gss_cli_payload(&gctx->gc_base, req->rq_clrdata_len, 1);
+ wiresize = lustre_msg_size_v2(2, buflens);
/* allocate wire buffer */
if (req->rq_pool) {
req->rq_reqbuf_len = wiresize;
}
- lustre_init_msg_v2(req->rq_reqbuf, 3, buflens, NULL);
+ lustre_init_msg_v2(req->rq_reqbuf, 2, buflens, NULL);
req->rq_reqbuf->lm_secflvr = req->rq_flvr.sf_rpc;
/* gss header */
ghdr->gh_sp = (__u8) ctx->cc_sec->ps_part;
ghdr->gh_flags = 0;
ghdr->gh_proc = gctx->gc_proc;
- ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
ghdr->gh_svc = SPTLRPC_SVC_PRIV;
ghdr->gh_handle.len = gctx->gc_handle.len;
memcpy(ghdr->gh_handle.data, gctx->gc_handle.data, gctx->gc_handle.len);
ghdr->gh_flags |= LUSTRE_GSS_PACK_USER;
redo:
- /* header signature */
- msgobj.len = req->rq_reqbuf->lm_buflens[0];
- msgobj.data = lustre_msg_buf(req->rq_reqbuf, 0, 0);
- micobj.len = req->rq_reqbuf->lm_buflens[1];
- micobj.data = lustre_msg_buf(req->rq_reqbuf, 1, 0);
-
- major = lgss_get_mic(gctx->gc_mechctx, 1, &msgobj, &micobj);
- if (major != GSS_S_COMPLETE) {
- CERROR("priv: sign message error: %08x\n", major);
- GOTO(err_free, rc = -EPERM);
- }
- /* perhaps shrink msg has potential problem in re-packing???
- * ship a little bit more data is fine.
- lustre_shrink_msg(req->rq_reqbuf, 1, micobj.len, 0);
- */
+ ghdr->gh_seq = cfs_atomic_inc_return(&gctx->gc_seq);
- /* clear text */
+ /* buffer objects */
+ hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
+ hdrobj.data = (__u8 *) ghdr;
msgobj.len = req->rq_clrdata_len;
msgobj.data = (__u8 *) req->rq_clrbuf;
+ token.len = lustre_msg_buflen(req->rq_reqbuf, 1);
+ token.data = lustre_msg_buf(req->rq_reqbuf, 1, 0);
- /* cipher text */
- cipher_obj.len = req->rq_reqbuf->lm_buflens[2];
- cipher_obj.data = lustre_msg_buf(req->rq_reqbuf, 2, 0);
-
- major = lgss_wrap(gctx->gc_mechctx, &msgobj, req->rq_clrbuf_len,
- &cipher_obj);
+ major = lgss_wrap(gctx->gc_mechctx, &hdrobj, &msgobj,
+ req->rq_clrbuf_len, &token);
if (major != GSS_S_COMPLETE) {
CERROR("priv: wrap message error: %08x\n", major);
GOTO(err_free, rc = -EPERM);
}
- LASSERT(cipher_obj.len <= buflens[2]);
+ LASSERT(token.len <= buflens[1]);
/* see explain in gss_cli_ctx_sign() */
- if (atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
- GSS_SEQ_REPACK_THRESHOLD) {
- int behind = atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
+ if (unlikely(cfs_atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
+ GSS_SEQ_REPACK_THRESHOLD)) {
+ int behind = cfs_atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
gss_stat_oos_record_cli(behind);
CWARN("req %p: %u behind, retry sealing\n", req, behind);
- ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
+ ghdr->gh_seq = cfs_atomic_inc_return(&gctx->gc_seq);
goto redo;
}
/* now set the final wire data length */
- req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, 2,
- cipher_obj.len, 0);
-
+ req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, 1, token.len,0);
RETURN(0);
err_free:
{
struct gss_cli_ctx *gctx;
struct gss_header *ghdr;
- int msglen, rc;
+ struct lustre_msg *msg = req->rq_repdata;
+ int msglen, pack_bulk, swabbed, rc;
__u32 major;
ENTRY;
- LASSERT(req->rq_repbuf);
LASSERT(req->rq_cli_ctx == ctx);
+ LASSERT(req->rq_ctx_init == 0);
+ LASSERT(msg);
gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
+ swabbed = ptlrpc_rep_need_swab(req);
- ghdr = gss_swab_header(req->rq_repbuf, 0);
+ ghdr = gss_swab_header(msg, 0, swabbed);
if (ghdr == NULL) {
CERROR("can't decode gss header\n");
RETURN(-EPROTO);
switch (ghdr->gh_proc) {
case PTLRPC_GSS_PROC_DATA:
- if (!equi(req->rq_pack_bulk == 1,
- ghdr->gh_flags & LUSTRE_GSS_PACK_BULK)) {
+ pack_bulk = ghdr->gh_flags & LUSTRE_GSS_PACK_BULK;
+
+ if (!req->rq_early && !equi(req->rq_pack_bulk == 1, pack_bulk)){
CERROR("%s bulk flag in reply\n",
req->rq_pack_bulk ? "missing" : "unexpected");
RETURN(-EPROTO);
}
- if (lustre_msg_swabbed(req->rq_repbuf))
+ if (swabbed)
gss_header_swabber(ghdr);
- major = gss_unseal_msg(gctx->gc_mechctx, req->rq_repbuf,
- &msglen, req->rq_repbuf_len);
+ /* use rq_repdata_len as buffer size, which assume unseal
+ * doesn't need extra memory space. for precise control, we'd
+ * better calculate out actual buffer size as
+ * (repbuf_len - offset - repdata_len) */
+ major = gss_unseal_msg(gctx->gc_mechctx, msg,
+ &msglen, req->rq_repdata_len);
if (major != GSS_S_COMPLETE) {
+ CERROR("failed to unwrap reply: %x\n", major);
rc = -EPERM;
break;
}
- if (lustre_unpack_msg(req->rq_repbuf, msglen)) {
+ swabbed = __lustre_unpack_msg(msg, msglen);
+ if (swabbed < 0) {
CERROR("Failed to unpack after decryption\n");
RETURN(-EPROTO);
}
- req->rq_repdata_len = msglen;
- if (req->rq_repbuf->lm_bufcount < 1) {
+ if (msg->lm_bufcount < 1) {
CERROR("Invalid reply buffer: empty\n");
RETURN(-EPROTO);
}
- if (req->rq_pack_bulk) {
- if (req->rq_repbuf->lm_bufcount < 2) {
- CERROR("Too few request buffer segments %d\n",
- req->rq_repbuf->lm_bufcount);
+ if (pack_bulk) {
+ if (msg->lm_bufcount < 2) {
+ CERROR("bufcount %u: missing bulk sec desc\n",
+ msg->lm_bufcount);
RETURN(-EPROTO);
}
/* bulk checksum is the last segment */
- if (bulk_sec_desc_unpack(req->rq_repbuf,
- req->rq_repbuf->lm_bufcount-1))
+ if (bulk_sec_desc_unpack(msg, msg->lm_bufcount - 1,
+ swabbed))
RETURN(-EPROTO);
}
- req->rq_repmsg = lustre_msg_buf(req->rq_repbuf, 0, 0);
- req->rq_replen = req->rq_repbuf->lm_buflens[0];
+ req->rq_repmsg = lustre_msg_buf(msg, 0, 0);
+ req->rq_replen = msg->lm_buflens[0];
rc = 0;
break;
case PTLRPC_GSS_PROC_ERR:
- rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
+ if (req->rq_early) {
+ CERROR("server return error with early reply\n");
+ rc = -EPROTO;
+ } else {
+ rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
+ }
break;
default:
CERROR("unexpected proc %d\n", ghdr->gh_proc);
struct ptlrpc_sec *sec;
LASSERT(imp);
- LASSERT(RPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_GSS);
+ LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_GSS);
- gsec->gs_mech = lgss_subflavor_to_mech(RPC_FLVR_SUB(sf->sf_rpc));
+ gsec->gs_mech = lgss_subflavor_to_mech(
+ SPTLRPC_FLVR_BASE_SUB(sf->sf_rpc));
if (!gsec->gs_mech) {
CERROR("gss backend 0x%x not found\n",
- RPC_FLVR_SUB(sf->sf_rpc));
+ SPTLRPC_FLVR_BASE_SUB(sf->sf_rpc));
return -EOPNOTSUPP;
}
- spin_lock_init(&gsec->gs_lock);
+ cfs_spin_lock_init(&gsec->gs_lock);
gsec->gs_rvs_hdl = 0ULL;
/* initialize upper ptlrpc_sec */
sec = &gsec->gs_base;
sec->ps_policy = policy;
- atomic_set(&sec->ps_refcount, 0);
- atomic_set(&sec->ps_nctx, 0);
+ cfs_atomic_set(&sec->ps_refcount, 0);
+ cfs_atomic_set(&sec->ps_nctx, 0);
sec->ps_id = sptlrpc_get_next_secid();
sec->ps_flvr = *sf;
sec->ps_import = class_import_get(imp);
- sec->ps_lock = SPIN_LOCK_UNLOCKED;
+ cfs_spin_lock_init(&sec->ps_lock);
CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
if (!svcctx) {
sec->ps_gc_interval = 0;
}
- if (sec->ps_flvr.sf_bulk_ciph != BULK_CIPH_ALG_NULL &&
- sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_BULK)
+ if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
sptlrpc_enc_pool_add_user();
CDEBUG(D_SEC, "create %s%s@%p\n", (svcctx ? "reverse " : ""),
ENTRY;
LASSERT(sec->ps_import);
- LASSERT(atomic_read(&sec->ps_refcount) == 0);
- LASSERT(atomic_read(&sec->ps_nctx) == 0);
+ LASSERT(cfs_atomic_read(&sec->ps_refcount) == 0);
+ LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
if (gsec->gs_mech) {
lgss_mech_put(gsec->gs_mech);
class_import_put(sec->ps_import);
- if (sec->ps_flvr.sf_bulk_ciph != BULK_CIPH_ALG_NULL &&
- sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_BULK)
+ if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
sptlrpc_enc_pool_del_user();
EXIT;
struct gss_cli_ctx *gctx = ctx2gctx(ctx);
gctx->gc_win = 0;
- atomic_set(&gctx->gc_seq, 0);
+ cfs_atomic_set(&gctx->gc_seq, 0);
CFS_INIT_HLIST_NODE(&ctx->cc_cache);
- atomic_set(&ctx->cc_refcount, 0);
+ cfs_atomic_set(&ctx->cc_refcount, 0);
ctx->cc_sec = sec;
ctx->cc_ops = ctxops;
ctx->cc_expire = 0;
ctx->cc_flags = PTLRPC_CTX_NEW;
ctx->cc_vcred = *vcred;
- spin_lock_init(&ctx->cc_lock);
+ cfs_spin_lock_init(&ctx->cc_lock);
CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
/* take a ref on belonging sec, balanced in ctx destroying */
- atomic_inc(&sec->ps_refcount);
+ cfs_atomic_inc(&sec->ps_refcount);
/* statistic only */
- atomic_inc(&sec->ps_nctx);
+ cfs_atomic_inc(&sec->ps_nctx);
CDEBUG(D_SEC, "%s@%p: create ctx %p(%u->%s)\n",
sec->ps_policy->sp_name, ctx->cc_sec,
{
struct gss_cli_ctx *gctx = ctx2gctx(ctx);
- LASSERT(atomic_read(&sec->ps_nctx) > 0);
- LASSERT(atomic_read(&ctx->cc_refcount) == 0);
+ LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
LASSERT(ctx->cc_sec == sec);
+ /*
+ * remove UPTODATE flag of reverse ctx thus we won't send fini rpc,
+ * this is to avoid potential problems of client side reverse svc ctx
+ * be mis-destroyed in various recovery senarios. anyway client can
+ * manage its reverse ctx well by associating it with its buddy ctx.
+ */
+ if (sec_is_reverse(sec))
+ ctx->cc_flags &= ~PTLRPC_CTX_UPTODATE;
+
if (gctx->gc_mechctx) {
/* the final context fini rpc will use this ctx too, and it's
* asynchronous which finished by request_out_callback(). so
* we add refcount, whoever drop finally drop the refcount to
* 0 should responsible for the rest of destroy. */
- atomic_inc(&ctx->cc_refcount);
+ cfs_atomic_inc(&ctx->cc_refcount);
gss_do_ctx_fini_rpc(gctx);
gss_cli_ctx_finalize(gctx);
- if (!atomic_dec_and_test(&ctx->cc_refcount))
+ if (!cfs_atomic_dec_and_test(&ctx->cc_refcount))
return 1;
}
int svc, int msgsize)
{
int bufsize, txtsize;
- int buflens[5], bufcnt = 2;
+ int bufcnt = 2;
+ __u32 buflens[5];
ENTRY;
/*
}
if (req->rq_pack_bulk) {
- buflens[bufcnt] = bulk_sec_desc_size(
- req->rq_flvr.sf_bulk_hash, 1,
- req->rq_bulk_read);
+ buflens[bufcnt] = gss_cli_bulk_payload(req->rq_cli_ctx,
+ &req->rq_flvr,
+ 0, req->rq_bulk_read);
if (svc == SPTLRPC_SVC_INTG)
txtsize += buflens[bufcnt];
bufcnt++;
struct ptlrpc_request *req,
int msgsize)
{
- int ibuflens[3], ibufcnt;
- int buflens[3];
+ __u32 ibuflens[3], wbuflens[2];
+ int ibufcnt;
int clearsize, wiresize;
ENTRY;
* - user descriptor (optional)
* - bulk checksum (optional)
*/
-
ibufcnt = 1;
ibuflens[0] = msgsize;
if (req->rq_pack_udesc)
ibuflens[ibufcnt++] = sptlrpc_current_user_desc_size();
if (req->rq_pack_bulk)
- ibuflens[ibufcnt++] = bulk_sec_desc_size(
- req->rq_flvr.sf_bulk_hash, 1,
- req->rq_bulk_read);
+ ibuflens[ibufcnt++] = gss_cli_bulk_payload(req->rq_cli_ctx,
+ &req->rq_flvr, 0,
+ req->rq_bulk_read);
clearsize = lustre_msg_size_v2(ibufcnt, ibuflens);
/* to allow append padding during encryption */
/* Wrapper (wire) buffers
* - gss header
- * - signature of gss header
* - cipher text
*/
-
- buflens[0] = PTLRPC_GSS_HEADER_SIZE;
- buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
- buflens[2] = gss_cli_payload(req->rq_cli_ctx, clearsize, 1);
- wiresize = lustre_msg_size_v2(3, buflens);
+ wbuflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ wbuflens[1] = gss_cli_payload(req->rq_cli_ctx, clearsize, 1);
+ wiresize = lustre_msg_size_v2(2, wbuflens);
if (req->rq_pool) {
/* rq_reqbuf is preallocated */
struct ptlrpc_request *req,
int msgsize)
{
- int svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+ int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
LASSERT(!req->rq_pack_bulk ||
(req->rq_bulk_read || req->rq_bulk_write));
ENTRY;
LASSERT(!req->rq_pool || req->rq_reqbuf);
- privacy = RPC_FLVR_SVC(req->rq_flvr.sf_rpc) == SPTLRPC_SVC_PRIV;
+ privacy = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) == SPTLRPC_SVC_PRIV;
if (!req->rq_clrbuf)
goto release_reqbuf;
LASSERT(privacy);
LASSERT(req->rq_clrbuf_len);
- if (req->rq_pool &&
- req->rq_clrbuf >= req->rq_reqbuf &&
- (char *) req->rq_clrbuf <
+ if (req->rq_pool == NULL ||
+ req->rq_clrbuf < req->rq_reqbuf ||
+ (char *) req->rq_clrbuf >=
(char *) req->rq_reqbuf + req->rq_reqbuf_len)
- goto release_reqbuf;
+ OBD_FREE(req->rq_clrbuf, req->rq_clrbuf_len);
- OBD_FREE(req->rq_clrbuf, req->rq_clrbuf_len);
req->rq_clrbuf = NULL;
req->rq_clrbuf_len = 0;
req->rq_reqbuf_len = 0;
}
- req->rq_reqmsg = NULL;
-
EXIT;
}
struct ptlrpc_request *req,
int svc, int msgsize)
{
- int txtsize;
- int buflens[4], bufcnt = 2;
+ int txtsize;
+ __u32 buflens[4];
+ int bufcnt = 2;
+ int alloc_size;
/*
* on-wire data layout:
txtsize += buflens[1];
if (req->rq_pack_bulk) {
- buflens[bufcnt] = bulk_sec_desc_size(
- req->rq_flvr.sf_bulk_hash, 0,
- req->rq_bulk_read);
+ buflens[bufcnt] = gss_cli_bulk_payload(req->rq_cli_ctx,
+ &req->rq_flvr,
+ 1, req->rq_bulk_read);
if (svc == SPTLRPC_SVC_INTG)
txtsize += buflens[bufcnt];
bufcnt++;
else if (svc != SPTLRPC_SVC_NULL)
buflens[bufcnt++] = gss_cli_payload(req->rq_cli_ctx, txtsize,0);
- return do_alloc_repbuf(req, lustre_msg_size_v2(bufcnt, buflens));
+ alloc_size = lustre_msg_size_v2(bufcnt, buflens);
+
+ /* add space for early reply */
+ alloc_size += gss_at_reply_off_integ;
+
+ return do_alloc_repbuf(req, alloc_size);
}
static
struct ptlrpc_request *req,
int msgsize)
{
- int txtsize;
- int buflens[3], bufcnt;
-
- /* Inner (clear) buffers
- * - lustre message
- * - bulk checksum (optional)
- */
+ int txtsize;
+ __u32 buflens[2];
+ int bufcnt;
+ int alloc_size;
+ /* inner buffers */
bufcnt = 1;
buflens[0] = msgsize;
- if (req->rq_pack_bulk) {
- buflens[bufcnt++] = bulk_sec_desc_size(
- req->rq_flvr.sf_bulk_hash, 0,
- req->rq_bulk_read);
- }
+ if (req->rq_pack_bulk)
+ buflens[bufcnt++] = gss_cli_bulk_payload(req->rq_cli_ctx,
+ &req->rq_flvr,
+ 1, req->rq_bulk_read);
txtsize = lustre_msg_size_v2(bufcnt, buflens);
txtsize += GSS_MAX_CIPHER_BLOCK;
- /* Wrapper (wire) buffers
- * - gss header
- * - signature of gss header
- * - cipher text
- */
-
- bufcnt = 3;
+ /* wrapper buffers */
+ bufcnt = 2;
buflens[0] = PTLRPC_GSS_HEADER_SIZE;
- buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
- buflens[2] = gss_cli_payload(req->rq_cli_ctx, txtsize, 1);
+ buflens[1] = gss_cli_payload(req->rq_cli_ctx, txtsize, 1);
+
+ alloc_size = lustre_msg_size_v2(bufcnt, buflens);
+ /* add space for early reply */
+ alloc_size += gss_at_reply_off_priv;
- return do_alloc_repbuf(req, lustre_msg_size_v2(bufcnt, buflens));
+ return do_alloc_repbuf(req, alloc_size);
}
int gss_alloc_repbuf(struct ptlrpc_sec *sec,
struct ptlrpc_request *req,
int msgsize)
{
- int svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+ int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
ENTRY;
LASSERT(!req->rq_pack_bulk ||
OBD_FREE(req->rq_repbuf, req->rq_repbuf_len);
req->rq_repbuf = NULL;
req->rq_repbuf_len = 0;
-
- req->rq_repmsg = NULL;
+ req->rq_repdata = NULL;
+ req->rq_repdata_len = 0;
}
static int get_enlarged_msgsize(struct lustre_msg *msg,
{
struct lustre_msg *newclrbuf;
int newmsg_size, newclrbuf_size, newcipbuf_size;
- int buflens[3];
+ __u32 buflens[3];
/*
* embedded msg is at seg 0 of clear buffer;
struct ptlrpc_request *req,
int segment, int newsize)
{
- int svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+ int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
LASSERT(!req->rq_ctx_init && !req->rq_ctx_fini);
static inline
void gss_svc_reqctx_addref(struct gss_svc_reqctx *grctx)
{
- LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
- atomic_inc(&grctx->src_base.sc_refcount);
+ LASSERT(cfs_atomic_read(&grctx->src_base.sc_refcount) > 0);
+ cfs_atomic_inc(&grctx->src_base.sc_refcount);
}
static inline
void gss_svc_reqctx_decref(struct gss_svc_reqctx *grctx)
{
- LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
+ LASSERT(cfs_atomic_read(&grctx->src_base.sc_refcount) > 0);
- if (atomic_dec_and_test(&grctx->src_base.sc_refcount))
+ if (cfs_atomic_dec_and_test(&grctx->src_base.sc_refcount))
gss_svc_reqctx_free(grctx);
}
RETURN(rc);
rs->rs_repdata_len = rc;
+
+ if (likely(req->rq_packed_final)) {
+ if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
+ req->rq_reply_off = gss_at_reply_off_integ;
+ else
+ req->rq_reply_off = 0;
+ } else {
+ if (svc == SPTLRPC_SVC_NULL)
+ rs->rs_repbuf->lm_cksum = crc32_le(!(__u32) 0,
+ lustre_msg_buf(rs->rs_repbuf, 1, 0),
+ lustre_msg_buflen(rs->rs_repbuf, 1));
+ req->rq_reply_off = 0;
+ }
+
RETURN(0);
}
grctx->src_err_notify = 1;
grctx->src_reserve_len = 0;
- rc = lustre_pack_reply_v2(req, 1, &replen, NULL);
+ rc = lustre_pack_reply_v2(req, 1, &replen, NULL, 0);
if (rc) {
CERROR("could not pack reply, err %d\n", rc);
RETURN(rc);
rawobj_t uuid_obj, rvs_hdl, in_token;
__u32 lustre_svc;
__u32 *secdata, seclen;
- int rc;
+ int swabbed, rc;
ENTRY;
CDEBUG(D_SEC, "processing gss init(%d) request from %s\n", gw->gw_proc,
RETURN(SECSVC_DROP);
}
+ swabbed = ptlrpc_req_need_swab(req);
+
/* ctx initiate payload is in last segment */
secdata = lustre_msg_buf(reqbuf, reqbuf->lm_bufcount - 1, 0);
seclen = reqbuf->lm_buflens[reqbuf->lm_bufcount - 1];
CERROR("missing user descriptor\n");
RETURN(SECSVC_DROP);
}
- if (sptlrpc_unpack_user_desc(reqbuf, 2)) {
+ if (sptlrpc_unpack_user_desc(reqbuf, 2, swabbed)) {
CERROR("Mal-formed user descriptor\n");
RETURN(SECSVC_DROP);
}
struct gss_svc_ctx *gctx = grctx->src_ctx;
struct lustre_msg *msg = req->rq_reqbuf;
int offset = 2;
+ int swabbed;
ENTRY;
*major = GSS_S_COMPLETE;
}
*major = gss_verify_msg(msg, gctx->gsc_mechctx, gw->gw_svc);
- if (*major != GSS_S_COMPLETE)
+ if (*major != GSS_S_COMPLETE) {
+ CERROR("failed to verify request: %x\n", *major);
RETURN(-EACCES);
+ }
if (gctx->gsc_reverse == 0 &&
gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
}
verified:
+ swabbed = ptlrpc_req_need_swab(req);
+
/* user descriptor */
if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
if (msg->lm_bufcount < (offset + 1)) {
RETURN(-EINVAL);
}
- if (sptlrpc_unpack_user_desc(msg, offset)) {
+ if (sptlrpc_unpack_user_desc(msg, offset, swabbed)) {
CERROR("Mal-formed user descriptor\n");
RETURN(-EINVAL);
}
offset++;
}
- /* check bulk cksum data */
+ /* check bulk_sec_desc data */
if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
if (msg->lm_bufcount < (offset + 1)) {
- CERROR("no bulk checksum included\n");
+ CERROR("missing bulk sec descriptor\n");
RETURN(-EINVAL);
}
- if (bulk_sec_desc_unpack(msg, offset))
+ if (bulk_sec_desc_unpack(msg, offset, swabbed))
RETURN(-EINVAL);
req->rq_pack_bulk = 1;
{
struct gss_svc_ctx *gctx = grctx->src_ctx;
struct lustre_msg *msg = req->rq_reqbuf;
- int msglen, offset = 1;
+ int swabbed, msglen, offset = 1;
ENTRY;
if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
*major = gss_unseal_msg(gctx->gsc_mechctx, msg,
&msglen, req->rq_reqdata_len);
- if (*major != GSS_S_COMPLETE)
+ if (*major != GSS_S_COMPLETE) {
+ CERROR("failed to unwrap request: %x\n", *major);
RETURN(-EACCES);
+ }
if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
RETURN(-EACCES);
}
- if (lustre_unpack_msg(msg, msglen)) {
+ swabbed = __lustre_unpack_msg(msg, msglen);
+ if (swabbed < 0) {
CERROR("Failed to unpack after decryption\n");
RETURN(-EINVAL);
}
RETURN(-EINVAL);
}
- if (sptlrpc_unpack_user_desc(msg, offset)) {
+ if (sptlrpc_unpack_user_desc(msg, offset, swabbed)) {
CERROR("Mal-formed user descriptor\n");
RETURN(-EINVAL);
}
RETURN(-EINVAL);
}
- if (bulk_sec_desc_unpack(msg, offset))
+ if (bulk_sec_desc_unpack(msg, offset, swabbed))
RETURN(-EINVAL);
req->rq_pack_bulk = 1;
CERROR("missing user descriptor, ignore it\n");
RETURN(SECSVC_OK);
}
- if (sptlrpc_unpack_user_desc(req->rq_reqbuf, 2)) {
+ if (sptlrpc_unpack_user_desc(req->rq_reqbuf, 2,
+ ptlrpc_req_need_swab(req))) {
CERROR("Mal-formed user descriptor, ignore it\n");
RETURN(SECSVC_OK);
}
struct gss_header *ghdr;
struct gss_svc_reqctx *grctx;
struct gss_wire_ctx *gw;
- int rc;
+ int swabbed, rc;
ENTRY;
LASSERT(req->rq_reqbuf);
RETURN(SECSVC_DROP);
}
- ghdr = gss_swab_header(req->rq_reqbuf, 0);
+ swabbed = ptlrpc_req_need_swab(req);
+
+ ghdr = gss_swab_header(req->rq_reqbuf, 0, swabbed);
if (ghdr == NULL) {
CERROR("can't decode gss header\n");
RETURN(SECSVC_DROP);
/* alloc grctx data */
OBD_ALLOC_PTR(grctx);
- if (!grctx) {
- CERROR("fail to alloc svc reqctx\n");
+ if (!grctx)
RETURN(SECSVC_DROP);
- }
+
grctx->src_base.sc_policy = sptlrpc_policy_get(policy);
- atomic_set(&grctx->src_base.sc_refcount, 1);
+ cfs_atomic_set(&grctx->src_base.sc_refcount, 1);
req->rq_svc_ctx = &grctx->src_base;
gw = &grctx->src_wirectx;
rawobj_from_netobj(&gw->gw_handle, &ghdr->gh_handle);
/* keep original wire header which subject to checksum verification */
- if (lustre_msg_swabbed(req->rq_reqbuf))
+ if (swabbed)
gss_header_swabber(ghdr);
switch(ghdr->gh_proc) {
}
static inline
-int gss_svc_payload(struct gss_svc_reqctx *grctx, int msgsize, int privacy)
+int gss_svc_payload(struct gss_svc_reqctx *grctx, int early,
+ int msgsize, int privacy)
{
- if (gss_svc_reqctx_is_special(grctx))
+ /* we should treat early reply normally, but which is actually sharing
+ * the same ctx with original request, so in this case we should
+ * ignore the special ctx's special flags */
+ if (early == 0 && gss_svc_reqctx_is_special(grctx))
return grctx->src_reserve_len;
- return gss_estimate_payload(NULL, msgsize, privacy);
+ return gss_mech_payload(NULL, msgsize, privacy);
+}
+
+static int gss_svc_bulk_payload(struct gss_svc_ctx *gctx,
+ struct sptlrpc_flavor *flvr,
+ int read)
+{
+ int payload = sizeof(struct ptlrpc_bulk_sec_desc);
+
+ if (read) {
+ switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
+ case SPTLRPC_BULK_SVC_NULL:
+ break;
+ case SPTLRPC_BULK_SVC_INTG:
+ payload += gss_mech_payload(NULL, 0, 0);
+ break;
+ case SPTLRPC_BULK_SVC_PRIV:
+ payload += gss_mech_payload(NULL, 0, 1);
+ break;
+ case SPTLRPC_BULK_SVC_AUTH:
+ default:
+ LBUG();
+ }
+ }
+
+ return payload;
}
int gss_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
{
struct gss_svc_reqctx *grctx;
struct ptlrpc_reply_state *rs;
- int privacy, svc, bsd_off = 0;
- int ibuflens[2], ibufcnt = 0;
- int buflens[4], bufcnt;
+ int early, privacy, svc, bsd_off = 0;
+ __u32 ibuflens[2], buflens[4];
+ int ibufcnt = 0, bufcnt;
int txtsize, wmsg_size, rs_size;
ENTRY;
RETURN(-EPROTO);
}
- svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+ svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+ early = (req->rq_packed_final == 0);
grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
- if (gss_svc_reqctx_is_special(grctx))
+ if (!early && gss_svc_reqctx_is_special(grctx))
privacy = 0;
else
privacy = (svc == SPTLRPC_SVC_PRIV);
if (privacy) {
- /* Inner buffer */
+ /* inner clear buffers */
ibufcnt = 1;
ibuflens[0] = msglen;
LASSERT(grctx->src_reqbsd);
bsd_off = ibufcnt;
- ibuflens[ibufcnt++] = bulk_sec_desc_size(
- grctx->src_reqbsd->bsd_hash_alg,
- 0, req->rq_bulk_read);
+ ibuflens[ibufcnt++] = gss_svc_bulk_payload(
+ grctx->src_ctx,
+ &req->rq_flvr,
+ req->rq_bulk_read);
}
txtsize = lustre_msg_size_v2(ibufcnt, ibuflens);
txtsize += GSS_MAX_CIPHER_BLOCK;
/* wrapper buffer */
- bufcnt = 3;
+ bufcnt = 2;
buflens[0] = PTLRPC_GSS_HEADER_SIZE;
- buflens[1] = gss_svc_payload(grctx, buflens[0], 0);
- buflens[2] = gss_svc_payload(grctx, txtsize, 1);
+ buflens[1] = gss_svc_payload(grctx, early, txtsize, 1);
} else {
bufcnt = 2;
buflens[0] = PTLRPC_GSS_HEADER_SIZE;
LASSERT(grctx->src_reqbsd);
bsd_off = bufcnt;
- buflens[bufcnt] = bulk_sec_desc_size(
- grctx->src_reqbsd->bsd_hash_alg,
- 0, req->rq_bulk_read);
+ buflens[bufcnt] = gss_svc_bulk_payload(
+ grctx->src_ctx,
+ &req->rq_flvr,
+ req->rq_bulk_read);
if (svc == SPTLRPC_SVC_INTG)
txtsize += buflens[bufcnt];
bufcnt++;
}
- if (gss_svc_reqctx_is_special(grctx) ||
+ if ((!early && gss_svc_reqctx_is_special(grctx)) ||
svc != SPTLRPC_SVC_NULL)
- buflens[bufcnt++] = gss_svc_payload(grctx, txtsize, 0);
+ buflens[bufcnt++] = gss_svc_payload(grctx, early,
+ txtsize, 0);
}
wmsg_size = lustre_msg_size_v2(bufcnt, buflens);
RETURN(0);
}
-static
-int gss_svc_seal(struct ptlrpc_request *req,
- struct ptlrpc_reply_state *rs,
- struct gss_svc_reqctx *grctx)
+static int gss_svc_seal(struct ptlrpc_request *req,
+ struct ptlrpc_reply_state *rs,
+ struct gss_svc_reqctx *grctx)
{
struct gss_svc_ctx *gctx = grctx->src_ctx;
- rawobj_t msgobj, cipher_obj, micobj;
+ rawobj_t hdrobj, msgobj, token;
struct gss_header *ghdr;
- __u8 *cipher_buf;
- int cipher_buflen, buflens[3];
+ __u8 *token_buf;
+ int token_buflen;
+ __u32 buflens[2], major;
int msglen, rc;
- __u32 major;
ENTRY;
- /* embedded lustre_msg might have been shrinked */
- if (req->rq_replen != rs->rs_repbuf->lm_buflens[0])
- lustre_shrink_msg(rs->rs_repbuf, 0, req->rq_replen, 1);
-
- /* clear data length */
- msglen = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
- rs->rs_repbuf->lm_buflens);
-
- /* clear text */
- msgobj.len = msglen;
- msgobj.data = (__u8 *) rs->rs_repbuf;
+ /* get clear data length. note embedded lustre_msg might
+ * have been shrinked */
+ if (req->rq_replen != lustre_msg_buflen(rs->rs_repbuf, 0))
+ msglen = lustre_shrink_msg(rs->rs_repbuf, 0, req->rq_replen, 1);
+ else
+ msglen = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
+ rs->rs_repbuf->lm_buflens);
+
+ /* temporarily use tail of buffer to hold gss header data */
+ LASSERT(msglen + PTLRPC_GSS_HEADER_SIZE <= rs->rs_repbuf_len);
+ ghdr = (struct gss_header *) ((char *) rs->rs_repbuf +
+ rs->rs_repbuf_len - PTLRPC_GSS_HEADER_SIZE);
+ ghdr->gh_version = PTLRPC_GSS_VERSION;
+ ghdr->gh_sp = LUSTRE_SP_ANY;
+ ghdr->gh_flags = 0;
+ ghdr->gh_proc = PTLRPC_GSS_PROC_DATA;
+ ghdr->gh_seq = grctx->src_wirectx.gw_seq;
+ ghdr->gh_svc = SPTLRPC_SVC_PRIV;
+ ghdr->gh_handle.len = 0;
+ if (req->rq_pack_bulk)
+ ghdr->gh_flags |= LUSTRE_GSS_PACK_BULK;
/* allocate temporary cipher buffer */
- cipher_buflen = gss_estimate_payload(gctx->gsc_mechctx, msglen, 1);
- OBD_ALLOC(cipher_buf, cipher_buflen);
- if (!cipher_buf)
+ token_buflen = gss_mech_payload(gctx->gsc_mechctx, msglen, 1);
+ OBD_ALLOC(token_buf, token_buflen);
+ if (token_buf == NULL)
RETURN(-ENOMEM);
- cipher_obj.len = cipher_buflen;
- cipher_obj.data = cipher_buf;
+ hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
+ hdrobj.data = (__u8 *) ghdr;
+ msgobj.len = msglen;
+ msgobj.data = (__u8 *) rs->rs_repbuf;
+ token.len = token_buflen;
+ token.data = token_buf;
- major = lgss_wrap(gctx->gsc_mechctx, &msgobj, rs->rs_repbuf_len,
- &cipher_obj);
+ major = lgss_wrap(gctx->gsc_mechctx, &hdrobj, &msgobj,
+ rs->rs_repbuf_len - PTLRPC_GSS_HEADER_SIZE, &token);
if (major != GSS_S_COMPLETE) {
- CERROR("priv: wrap message error: %08x\n", major);
+ CERROR("wrap message error: %08x\n", major);
GOTO(out_free, rc = -EPERM);
}
- LASSERT(cipher_obj.len <= cipher_buflen);
+ LASSERT(token.len <= token_buflen);
/* we are about to override data at rs->rs_repbuf, nullify pointers
* to which to catch further illegal usage. */
- grctx->src_repbsd = NULL;
- grctx->src_repbsd_size = 0;
+ if (req->rq_pack_bulk) {
+ grctx->src_repbsd = NULL;
+ grctx->src_repbsd_size = 0;
+ }
- /* now the real wire data */
+ /* now fill the actual wire data
+ * - gss header
+ * - gss token
+ */
buflens[0] = PTLRPC_GSS_HEADER_SIZE;
- buflens[1] = gss_estimate_payload(gctx->gsc_mechctx, buflens[0], 0);
- buflens[2] = cipher_obj.len;
-
- LASSERT(lustre_msg_size_v2(3, buflens) <= rs->rs_repbuf_len);
- lustre_init_msg_v2(rs->rs_repbuf, 3, buflens, NULL);
- rs->rs_repbuf->lm_secflvr = req->rq_flvr.sf_rpc;
-
- /* gss header */
- ghdr = lustre_msg_buf(rs->rs_repbuf, 0, 0);
- ghdr->gh_version = PTLRPC_GSS_VERSION;
- ghdr->gh_flags = 0;
- ghdr->gh_proc = PTLRPC_GSS_PROC_DATA;
- ghdr->gh_seq = grctx->src_wirectx.gw_seq;
- ghdr->gh_svc = SPTLRPC_SVC_PRIV;
- ghdr->gh_handle.len = 0;
- if (req->rq_pack_bulk)
- ghdr->gh_flags |= LUSTRE_GSS_PACK_BULK;
+ buflens[1] = token.len;
- /* header signature */
- msgobj.len = rs->rs_repbuf->lm_buflens[0];
- msgobj.data = lustre_msg_buf(rs->rs_repbuf, 0, 0);
- micobj.len = rs->rs_repbuf->lm_buflens[1];
- micobj.data = lustre_msg_buf(rs->rs_repbuf, 1, 0);
+ rs->rs_repdata_len = lustre_msg_size_v2(2, buflens);
+ LASSERT(rs->rs_repdata_len <= rs->rs_repbuf_len);
- major = lgss_get_mic(gctx->gsc_mechctx, 1, &msgobj, &micobj);
- if (major != GSS_S_COMPLETE) {
- CERROR("priv: sign message error: %08x\n", major);
- GOTO(out_free, rc = -EPERM);
- }
- lustre_shrink_msg(rs->rs_repbuf, 1, micobj.len, 0);
+ lustre_init_msg_v2(rs->rs_repbuf, 2, buflens, NULL);
+ rs->rs_repbuf->lm_secflvr = req->rq_flvr.sf_rpc;
- /* cipher token */
- memcpy(lustre_msg_buf(rs->rs_repbuf, 2, 0),
- cipher_obj.data, cipher_obj.len);
+ memcpy(lustre_msg_buf(rs->rs_repbuf, 0, 0), ghdr,
+ PTLRPC_GSS_HEADER_SIZE);
+ memcpy(lustre_msg_buf(rs->rs_repbuf, 1, 0), token.data, token.len);
- rs->rs_repdata_len = lustre_shrink_msg(rs->rs_repbuf, 2,
- cipher_obj.len, 0);
+ /* reply offset */
+ if (req->rq_packed_final &&
+ (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))
+ req->rq_reply_off = gss_at_reply_off_priv;
+ else
+ req->rq_reply_off = 0;
/* to catch upper layer's further access */
rs->rs_msg = NULL;
rc = 0;
out_free:
- OBD_FREE(cipher_buf, cipher_buflen);
+ OBD_FREE(token_buf, token_buflen);
RETURN(rc);
}
{
struct ptlrpc_reply_state *rs = req->rq_reply_state;
struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
- struct gss_wire_ctx *gw;
- int rc;
+ struct gss_wire_ctx *gw = &grctx->src_wirectx;
+ int early, rc;
ENTRY;
- if (gss_svc_reqctx_is_special(grctx))
+ early = (req->rq_packed_final == 0);
+
+ if (!early && gss_svc_reqctx_is_special(grctx)) {
+ LASSERT(rs->rs_repdata_len != 0);
+
+ req->rq_reply_off = gss_at_reply_off_integ;
RETURN(0);
+ }
- gw = &grctx->src_wirectx;
- if (gw->gw_proc != PTLRPC_GSS_PROC_DATA &&
+ /* early reply could happen in many cases */
+ if (!early &&
+ gw->gw_proc != PTLRPC_GSS_PROC_DATA &&
gw->gw_proc != PTLRPC_GSS_PROC_DESTROY) {
CERROR("proc %d not support\n", gw->gw_proc);
RETURN(-EINVAL);
LASSERT(rs->rs_svc_ctx);
grctx = container_of(rs->rs_svc_ctx, struct gss_svc_reqctx, src_base);
- /* paranoid, maybe not necessary */
- grctx->src_reqbsd = NULL;
- grctx->src_repbsd = NULL;
-
gss_svc_reqctx_decref(grctx);
rs->rs_svc_ctx = NULL;
void gss_svc_free_ctx(struct ptlrpc_svc_ctx *ctx)
{
- LASSERT(atomic_read(&ctx->sc_refcount) == 0);
+ LASSERT(cfs_atomic_read(&ctx->sc_refcount) == 0);
gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx));
}
* each reverse root ctx will record its latest sequence number on its
* buddy svcctx before be destroied, so here we continue use it.
*/
- atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
+ cfs_atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
if (gss_svc_upcall_dup_handle(&cli_gctx->gc_svc_handle, svc_gctx)) {
CERROR("failed to dup svc handle\n");
return -ENOMEM;
}
+static void gss_init_at_reply_offset(void)
+{
+ __u32 buflens[3];
+ int clearsize;
+
+ buflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ buflens[1] = lustre_msg_early_size();
+ buflens[2] = gss_cli_payload(NULL, buflens[1], 0);
+ gss_at_reply_off_integ = lustre_msg_size_v2(3, buflens);
+
+ buflens[0] = lustre_msg_early_size();
+ clearsize = lustre_msg_size_v2(1, buflens);
+ buflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ buflens[1] = gss_cli_payload(NULL, clearsize, 0);
+ buflens[2] = gss_cli_payload(NULL, clearsize, 1);
+ gss_at_reply_off_priv = lustre_msg_size_v2(3, buflens);
+}
+
int __init sptlrpc_gss_init(void)
{
int rc;
goto out_keyring;
#endif
+ gss_init_at_reply_offset();
+
return 0;
#ifdef HAVE_GSS_PIPEFS
gss_exit_lproc();
}
-MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("GSS security policy for Lustre");
MODULE_LICENSE("GPL");