* vim:expandtab:shiftwidth=8:tabstop=8:
*
* Modifications for Lustre
- * Copyright 2004 - 2007, Cluster File Systems, Inc.
- * All rights reserved
+ *
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ *
* Author: Eric Mei <ericm@clusterfs.com>
*/
rawobj_t *handle)
{
struct gss_header *ghdr;
- rawobj_t text[3], mic;
+ rawobj_t text[4], mic;
int textcnt, max_textcnt, mic_idx;
__u32 major;
mic.len = msg->lm_buflens[mic_idx];
mic.data = lustre_msg_buf(msg, mic_idx, 0);
- major = lgss_get_mic(mechctx, textcnt, text, &mic);
+ major = lgss_get_mic(mechctx, textcnt, text, 0, NULL, &mic);
if (major != GSS_S_COMPLETE) {
CERROR("fail to generate MIC: %08x\n", major);
return -EPERM;
struct gss_ctx *mechctx,
__u32 svc)
{
- rawobj_t text[3], mic;
+ rawobj_t text[4], mic;
int textcnt, max_textcnt;
int mic_idx;
__u32 major;
mic.len = msg->lm_buflens[mic_idx];
mic.data = lustre_msg_buf(msg, mic_idx, 0);
- major = lgss_verify_mic(mechctx, textcnt, text, &mic);
+ major = lgss_verify_mic(mechctx, textcnt, text, 0, NULL, &mic);
if (major != GSS_S_COMPLETE)
CERROR("mic verify error: %08x\n", major);
return gss_mech_payload(NULL, msgsize, privacy);
}
+static int gss_cli_bulk_payload(struct ptlrpc_cli_ctx *ctx,
+ struct sptlrpc_flavor *flvr,
+ int reply, int read)
+{
+ int payload = sizeof(struct ptlrpc_bulk_sec_desc);
+
+ LASSERT(SPTLRPC_FLVR_BULK_TYPE(flvr->sf_rpc) == SPTLRPC_BULK_DEFAULT);
+
+ if ((!reply && !read) || (reply && read)) {
+ switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
+ case SPTLRPC_BULK_SVC_NULL:
+ break;
+ case SPTLRPC_BULK_SVC_INTG:
+ payload += gss_cli_payload(ctx, 0, 0);
+ break;
+ case SPTLRPC_BULK_SVC_PRIV:
+ payload += gss_cli_payload(ctx, 0, 1);
+ break;
+ case SPTLRPC_BULK_SVC_AUTH:
+ default:
+ LBUG();
+ }
+ }
+
+ return payload;
+}
+
int gss_cli_ctx_match(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred)
{
return (ctx->cc_vcred.vc_uid == vcred->vc_uid);
if (req->rq_ctx_init)
RETURN(0);
- svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+ svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
if (req->rq_pack_bulk)
flags |= LUSTRE_GSS_PACK_BULK;
if (req->rq_pack_udesc)
struct gss_header *ghdr, *reqhdr;
struct lustre_msg *msg = req->rq_repdata;
__u32 major;
- int pack_bulk, early = 0, rc = 0;
+ int pack_bulk, rc = 0;
ENTRY;
LASSERT(req->rq_cli_ctx == ctx);
gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
- if ((char *) msg < req->rq_repbuf ||
- (char *) msg >= req->rq_repbuf + req->rq_repbuf_len)
- early = 1;
-
/* special case for context negotiation, rq_repmsg/rq_replen actually
* are not used currently. but early reply always be treated normally */
- if (req->rq_ctx_init && !early) {
+ if (req->rq_ctx_init && !req->rq_early) {
req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
req->rq_replen = msg->lm_buflens[1];
RETURN(0);
case PTLRPC_GSS_PROC_DATA:
pack_bulk = ghdr->gh_flags & LUSTRE_GSS_PACK_BULK;
- if (!early && !equi(req->rq_pack_bulk == 1, pack_bulk)) {
+ if (!req->rq_early && !equi(req->rq_pack_bulk == 1, pack_bulk)){
CERROR("%s bulk flag in reply\n",
req->rq_pack_bulk ? "missing" : "unexpected");
RETURN(-EPROTO);
gss_header_swabber(ghdr);
major = gss_verify_msg(msg, gctx->gc_mechctx, reqhdr->gh_svc);
- if (major != GSS_S_COMPLETE)
+ if (major != GSS_S_COMPLETE) {
+ CERROR("failed to verify reply: %x\n", major);
RETURN(-EPERM);
+ }
- if (early && reqhdr->gh_svc == SPTLRPC_SVC_NULL) {
+ if (req->rq_early && reqhdr->gh_svc == SPTLRPC_SVC_NULL) {
__u32 cksum;
cksum = crc32_le(!(__u32) 0,
req->rq_replen = msg->lm_buflens[1];
break;
case PTLRPC_GSS_PROC_ERR:
- if (early) {
+ if (req->rq_early) {
CERROR("server return error with early reply\n");
rc = -EPROTO;
} else {
struct gss_cli_ctx *gctx;
struct gss_header *ghdr;
struct lustre_msg *msg = req->rq_repdata;
- int msglen, pack_bulk, early = 0, rc;
+ int msglen, pack_bulk, rc;
__u32 major;
ENTRY;
gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
- if ((char *) msg < req->rq_repbuf ||
- (char *) msg >= req->rq_repbuf + req->rq_repbuf_len)
- early = 1;
-
ghdr = gss_swab_header(msg, 0);
if (ghdr == NULL) {
CERROR("can't decode gss header\n");
case PTLRPC_GSS_PROC_DATA:
pack_bulk = ghdr->gh_flags & LUSTRE_GSS_PACK_BULK;
- if (!early && !equi(req->rq_pack_bulk == 1, pack_bulk)) {
+ if (!req->rq_early && !equi(req->rq_pack_bulk == 1, pack_bulk)){
CERROR("%s bulk flag in reply\n",
req->rq_pack_bulk ? "missing" : "unexpected");
RETURN(-EPROTO);
major = gss_unseal_msg(gctx->gc_mechctx, msg,
&msglen, req->rq_repdata_len);
if (major != GSS_S_COMPLETE) {
+ CERROR("failed to unwrap reply: %x\n", major);
rc = -EPERM;
break;
}
}
/* bulk checksum is the last segment */
- if (bulk_sec_desc_unpack(msg, msg->lm_bufcount-1))
+ if (bulk_sec_desc_unpack(msg, msg->lm_bufcount - 1))
RETURN(-EPROTO);
}
rc = 0;
break;
case PTLRPC_GSS_PROC_ERR:
- rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
+ if (req->rq_early) {
+ CERROR("server return error with early reply\n");
+ rc = -EPROTO;
+ } else {
+ rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
+ }
break;
default:
CERROR("unexpected proc %d\n", ghdr->gh_proc);
struct ptlrpc_sec *sec;
LASSERT(imp);
- LASSERT(RPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_GSS);
+ LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_GSS);
- gsec->gs_mech = lgss_subflavor_to_mech(RPC_FLVR_SUB(sf->sf_rpc));
+ gsec->gs_mech = lgss_subflavor_to_mech(
+ SPTLRPC_FLVR_BASE_SUB(sf->sf_rpc));
if (!gsec->gs_mech) {
CERROR("gss backend 0x%x not found\n",
- RPC_FLVR_SUB(sf->sf_rpc));
+ SPTLRPC_FLVR_BASE_SUB(sf->sf_rpc));
return -EOPNOTSUPP;
}
sec->ps_id = sptlrpc_get_next_secid();
sec->ps_flvr = *sf;
sec->ps_import = class_import_get(imp);
- sec->ps_lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&sec->ps_lock);
CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
if (!svcctx) {
sec->ps_gc_interval = 0;
}
- if (sec->ps_flvr.sf_bulk_ciph != BULK_CIPH_ALG_NULL &&
- sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_BULK)
+ if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
sptlrpc_enc_pool_add_user();
CDEBUG(D_SEC, "create %s%s@%p\n", (svcctx ? "reverse " : ""),
class_import_put(sec->ps_import);
- if (sec->ps_flvr.sf_bulk_ciph != BULK_CIPH_ALG_NULL &&
- sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_BULK)
+ if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
sptlrpc_enc_pool_del_user();
EXIT;
LASSERT(atomic_read(&ctx->cc_refcount) == 0);
LASSERT(ctx->cc_sec == sec);
+ /*
+ * remove UPTODATE flag of reverse ctx thus we won't send fini rpc,
+ * this is to avoid potential problems of client side reverse svc ctx
+ * be mis-destroyed in various recovery senarios. anyway client can
+ * manage its reverse ctx well by associating it with its buddy ctx.
+ */
+ if (sec_is_reverse(sec))
+ ctx->cc_flags &= ~PTLRPC_CTX_UPTODATE;
+
if (gctx->gc_mechctx) {
/* the final context fini rpc will use this ctx too, and it's
* asynchronous which finished by request_out_callback(). so
}
if (req->rq_pack_bulk) {
- buflens[bufcnt] = bulk_sec_desc_size(
- req->rq_flvr.sf_bulk_hash, 1,
- req->rq_bulk_read);
+ buflens[bufcnt] = gss_cli_bulk_payload(req->rq_cli_ctx,
+ &req->rq_flvr,
+ 0, req->rq_bulk_read);
if (svc == SPTLRPC_SVC_INTG)
txtsize += buflens[bufcnt];
bufcnt++;
if (req->rq_pack_udesc)
ibuflens[ibufcnt++] = sptlrpc_current_user_desc_size();
if (req->rq_pack_bulk)
- ibuflens[ibufcnt++] = bulk_sec_desc_size(
- req->rq_flvr.sf_bulk_hash, 1,
- req->rq_bulk_read);
+ ibuflens[ibufcnt++] = gss_cli_bulk_payload(req->rq_cli_ctx,
+ &req->rq_flvr, 0,
+ req->rq_bulk_read);
clearsize = lustre_msg_size_v2(ibufcnt, ibuflens);
/* to allow append padding during encryption */
struct ptlrpc_request *req,
int msgsize)
{
- int svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+ int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
LASSERT(!req->rq_pack_bulk ||
(req->rq_bulk_read || req->rq_bulk_write));
ENTRY;
LASSERT(!req->rq_pool || req->rq_reqbuf);
- privacy = RPC_FLVR_SVC(req->rq_flvr.sf_rpc) == SPTLRPC_SVC_PRIV;
+ privacy = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) == SPTLRPC_SVC_PRIV;
if (!req->rq_clrbuf)
goto release_reqbuf;
txtsize += buflens[1];
if (req->rq_pack_bulk) {
- buflens[bufcnt] = bulk_sec_desc_size(
- req->rq_flvr.sf_bulk_hash, 0,
- req->rq_bulk_read);
+ buflens[bufcnt] = gss_cli_bulk_payload(req->rq_cli_ctx,
+ &req->rq_flvr,
+ 1, req->rq_bulk_read);
if (svc == SPTLRPC_SVC_INTG)
txtsize += buflens[bufcnt];
bufcnt++;
buflens[0] = msgsize;
if (req->rq_pack_bulk)
- buflens[bufcnt++] = bulk_sec_desc_size(
- req->rq_flvr.sf_bulk_hash, 0,
- req->rq_bulk_read);
+ buflens[bufcnt++] = gss_cli_bulk_payload(req->rq_cli_ctx,
+ &req->rq_flvr,
+ 1, req->rq_bulk_read);
txtsize = lustre_msg_size_v2(bufcnt, buflens);
txtsize += GSS_MAX_CIPHER_BLOCK;
struct ptlrpc_request *req,
int msgsize)
{
- int svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+ int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
ENTRY;
LASSERT(!req->rq_pack_bulk ||
struct ptlrpc_request *req,
int segment, int newsize)
{
- int svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+ int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
LASSERT(!req->rq_ctx_init && !req->rq_ctx_fini);
rs->rs_repdata_len = rc;
if (likely(req->rq_packed_final)) {
- req->rq_reply_off = gss_at_reply_off_integ;
+ if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
+ req->rq_reply_off = gss_at_reply_off_integ;
+ else
+ req->rq_reply_off = 0;
} else {
if (svc == SPTLRPC_SVC_NULL)
rs->rs_repbuf->lm_cksum = crc32_le(!(__u32) 0,
}
*major = gss_verify_msg(msg, gctx->gsc_mechctx, gw->gw_svc);
- if (*major != GSS_S_COMPLETE)
+ if (*major != GSS_S_COMPLETE) {
+ CERROR("failed to verify request: %x\n", *major);
RETURN(-EACCES);
+ }
if (gctx->gsc_reverse == 0 &&
gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
offset++;
}
- /* check bulk cksum data */
+ /* check bulk_sec_desc data */
if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
if (msg->lm_bufcount < (offset + 1)) {
- CERROR("no bulk checksum included\n");
+ CERROR("missing bulk sec descriptor\n");
RETURN(-EINVAL);
}
*major = gss_unseal_msg(gctx->gsc_mechctx, msg,
&msglen, req->rq_reqdata_len);
- if (*major != GSS_S_COMPLETE)
+ if (*major != GSS_S_COMPLETE) {
+ CERROR("failed to unwrap request: %x\n", *major);
RETURN(-EACCES);
+ }
if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
return gss_mech_payload(NULL, msgsize, privacy);
}
+static int gss_svc_bulk_payload(struct gss_svc_ctx *gctx,
+ struct sptlrpc_flavor *flvr,
+ int read)
+{
+ int payload = sizeof(struct ptlrpc_bulk_sec_desc);
+
+ if (read) {
+ switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
+ case SPTLRPC_BULK_SVC_NULL:
+ break;
+ case SPTLRPC_BULK_SVC_INTG:
+ payload += gss_mech_payload(NULL, 0, 0);
+ break;
+ case SPTLRPC_BULK_SVC_PRIV:
+ payload += gss_mech_payload(NULL, 0, 1);
+ break;
+ case SPTLRPC_BULK_SVC_AUTH:
+ default:
+ LBUG();
+ }
+ }
+
+ return payload;
+}
+
int gss_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
{
struct gss_svc_reqctx *grctx;
RETURN(-EPROTO);
}
- svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+ svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
early = (req->rq_packed_final == 0);
grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
LASSERT(grctx->src_reqbsd);
bsd_off = ibufcnt;
- ibuflens[ibufcnt++] = bulk_sec_desc_size(
- grctx->src_reqbsd->bsd_hash_alg,
- 0, req->rq_bulk_read);
+ ibuflens[ibufcnt++] = gss_svc_bulk_payload(
+ grctx->src_ctx,
+ &req->rq_flvr,
+ req->rq_bulk_read);
}
txtsize = lustre_msg_size_v2(ibufcnt, ibuflens);
LASSERT(grctx->src_reqbsd);
bsd_off = bufcnt;
- buflens[bufcnt] = bulk_sec_desc_size(
- grctx->src_reqbsd->bsd_hash_alg,
- 0, req->rq_bulk_read);
+ buflens[bufcnt] = gss_svc_bulk_payload(
+ grctx->src_ctx,
+ &req->rq_flvr,
+ req->rq_bulk_read);
if (svc == SPTLRPC_SVC_INTG)
txtsize += buflens[bufcnt];
bufcnt++;
memcpy(lustre_msg_buf(rs->rs_repbuf, 1, 0), token.data, token.len);
/* reply offset */
- if (likely(req->rq_packed_final))
+ if (req->rq_packed_final &&
+ (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))
req->rq_reply_off = gss_at_reply_off_priv;
else
req->rq_reply_off = 0;
gss_exit_lproc();
}
-MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("GSS security policy for Lustre");
MODULE_LICENSE("GPL");