*
* Modifications for Lustre
*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*
* Author: Eric Mei <ericm@clusterfs.com>
*/
#include <linux/slab.h>
#include <linux/dcache.h>
#include <linux/fs.h>
-#include <linux/random.h>
#include <linux/mutex.h>
#include <asm/atomic.h>
#else
__swab32s(&ghdr->gh_handle.len);
}
-struct gss_header *gss_swab_header(struct lustre_msg *msg, int segment)
+struct gss_header *gss_swab_header(struct lustre_msg *msg, int segment,
+ int swabbed)
{
struct gss_header *ghdr;
- ghdr = lustre_swab_buf(msg, segment, sizeof(*ghdr),
- gss_header_swabber);
+ ghdr = lustre_msg_buf(msg, segment, sizeof(*ghdr));
+ if (ghdr == NULL)
+ return NULL;
+
+ if (swabbed)
+ gss_header_swabber(ghdr);
- if (ghdr &&
- sizeof(*ghdr) + ghdr->gh_handle.len > msg->lm_buflens[segment]) {
- CERROR("gss header require length %u, now %u received\n",
- (unsigned int) sizeof(*ghdr) + ghdr->gh_handle.len,
+ if (sizeof(*ghdr) + ghdr->gh_handle.len > msg->lm_buflens[segment]) {
+ CERROR("gss header has length %d, now %u received\n",
+ (int) sizeof(*ghdr) + ghdr->gh_handle.len,
msg->lm_buflens[segment]);
return NULL;
}
return ghdr;
}
+#if 0
static
void gss_netobj_swabber(netobj_t *obj)
{
return obj;
}
+#endif
/*
* payload should be obtained from mechanism. but currently since we
int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
{
- LASSERT(atomic_read(&ctx->cc_refcount));
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount));
- if (!test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
+ if (!cfs_test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
if (!ctx->cc_early_expire)
- clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+ cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
CWARN("ctx %p(%u->%s) get expired: %lu(%+lds)\n",
ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
ctx->cc_expire == 0 ? 0 :
cfs_time_sub(ctx->cc_expire, cfs_time_current_sec()));
+ sptlrpc_cli_ctx_wakeup(ctx);
return 1;
}
* someone else, in which case nobody will make further use
* of it. we don't care, and mark it UPTODATE will help
* destroying server side context when it be destroied. */
- set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+ cfs_set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
if (sec_is_reverse(ctx->cc_sec)) {
CWARN("server installed reverse ctx %p idx "LPX64", "
gss_sec_install_rctx(ctx->cc_sec->ps_import,
ctx->cc_sec, ctx);
}
+
+ sptlrpc_cli_ctx_wakeup(ctx);
}
static void gss_cli_ctx_finalize(struct gss_cli_ctx *gctx)
*/
switch (phase) {
case 0:
- if (test_bit(seq_num % win_size, window))
+ if (cfs_test_bit(seq_num % win_size, window))
goto replay;
break;
case 1:
{
int rc = 0;
- spin_lock(&ssd->ssd_lock);
+ cfs_spin_lock(&ssd->ssd_lock);
if (set == 0) {
/*
gss_stat_oos_record_svc(2, 0);
}
exit:
- spin_unlock(&ssd->ssd_lock);
+ cfs_spin_unlock(&ssd->ssd_lock);
return rc;
}
flags |= LUSTRE_GSS_PACK_USER;
redo:
- seq = atomic_inc_return(&gctx->gc_seq);
+ seq = cfs_atomic_inc_return(&gctx->gc_seq);
rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
ctx->cc_sec->ps_part,
*
* Note: null mode dosen't check sequence number. */
if (svc != SPTLRPC_SVC_NULL &&
- atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
- int behind = atomic_read(&gctx->gc_seq) - seq;
+ cfs_atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
+ int behind = cfs_atomic_read(&gctx->gc_seq) - seq;
gss_stat_oos_record_cli(behind);
CWARN("req %p: %u behind, retry signing\n", req, behind);
struct gss_header *ghdr, *reqhdr;
struct lustre_msg *msg = req->rq_repdata;
__u32 major;
- int pack_bulk, rc = 0;
+ int pack_bulk, swabbed, rc = 0;
ENTRY;
LASSERT(req->rq_cli_ctx == ctx);
RETURN(-EPROTO);
}
- ghdr = gss_swab_header(msg, 0);
+ swabbed = ptlrpc_rep_need_swab(req);
+
+ ghdr = gss_swab_header(msg, 0, swabbed);
if (ghdr == NULL) {
CERROR("can't decode gss header\n");
RETURN(-EPROTO);
RETURN(-EPROTO);
}
- if (lustre_msg_swabbed(msg))
+ if (swabbed)
gss_header_swabber(ghdr);
major = gss_verify_msg(msg, gctx->gc_mechctx, reqhdr->gh_svc);
RETURN(-EPROTO);
}
- rc = bulk_sec_desc_unpack(msg, 2);
+ rc = bulk_sec_desc_unpack(msg, 2, swabbed);
if (rc) {
CERROR("unpack bulk desc: %d\n", rc);
RETURN(rc);
ghdr->gh_flags |= LUSTRE_GSS_PACK_USER;
redo:
- ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
+ ghdr->gh_seq = cfs_atomic_inc_return(&gctx->gc_seq);
/* buffer objects */
hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
LASSERT(token.len <= buflens[1]);
/* see explain in gss_cli_ctx_sign() */
- if (unlikely(atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
+ if (unlikely(cfs_atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
GSS_SEQ_REPACK_THRESHOLD)) {
- int behind = atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
+ int behind = cfs_atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
gss_stat_oos_record_cli(behind);
CWARN("req %p: %u behind, retry sealing\n", req, behind);
- ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
+ ghdr->gh_seq = cfs_atomic_inc_return(&gctx->gc_seq);
goto redo;
}
struct gss_cli_ctx *gctx;
struct gss_header *ghdr;
struct lustre_msg *msg = req->rq_repdata;
- int msglen, pack_bulk, rc;
+ int msglen, pack_bulk, swabbed, rc;
__u32 major;
ENTRY;
LASSERT(msg);
gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
+ swabbed = ptlrpc_rep_need_swab(req);
- ghdr = gss_swab_header(msg, 0);
+ ghdr = gss_swab_header(msg, 0, swabbed);
if (ghdr == NULL) {
CERROR("can't decode gss header\n");
RETURN(-EPROTO);
RETURN(-EPROTO);
}
- if (lustre_msg_swabbed(msg))
+ if (swabbed)
gss_header_swabber(ghdr);
/* use rq_repdata_len as buffer size, which assume unseal
break;
}
- if (lustre_unpack_msg(msg, msglen)) {
+ swabbed = __lustre_unpack_msg(msg, msglen);
+ if (swabbed < 0) {
CERROR("Failed to unpack after decryption\n");
RETURN(-EPROTO);
}
}
/* bulk checksum is the last segment */
- if (bulk_sec_desc_unpack(msg, msg->lm_bufcount - 1))
+ if (bulk_sec_desc_unpack(msg, msg->lm_bufcount - 1,
+ swabbed))
RETURN(-EPROTO);
}
return -EOPNOTSUPP;
}
- spin_lock_init(&gsec->gs_lock);
+ cfs_spin_lock_init(&gsec->gs_lock);
gsec->gs_rvs_hdl = 0ULL;
/* initialize upper ptlrpc_sec */
sec = &gsec->gs_base;
sec->ps_policy = policy;
- atomic_set(&sec->ps_refcount, 0);
- atomic_set(&sec->ps_nctx, 0);
+ cfs_atomic_set(&sec->ps_refcount, 0);
+ cfs_atomic_set(&sec->ps_nctx, 0);
sec->ps_id = sptlrpc_get_next_secid();
sec->ps_flvr = *sf;
sec->ps_import = class_import_get(imp);
- spin_lock_init(&sec->ps_lock);
+ cfs_spin_lock_init(&sec->ps_lock);
CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
if (!svcctx) {
ENTRY;
LASSERT(sec->ps_import);
- LASSERT(atomic_read(&sec->ps_refcount) == 0);
- LASSERT(atomic_read(&sec->ps_nctx) == 0);
+ LASSERT(cfs_atomic_read(&sec->ps_refcount) == 0);
+ LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
if (gsec->gs_mech) {
lgss_mech_put(gsec->gs_mech);
struct gss_cli_ctx *gctx = ctx2gctx(ctx);
gctx->gc_win = 0;
- atomic_set(&gctx->gc_seq, 0);
+ cfs_atomic_set(&gctx->gc_seq, 0);
CFS_INIT_HLIST_NODE(&ctx->cc_cache);
- atomic_set(&ctx->cc_refcount, 0);
+ cfs_atomic_set(&ctx->cc_refcount, 0);
ctx->cc_sec = sec;
ctx->cc_ops = ctxops;
ctx->cc_expire = 0;
ctx->cc_flags = PTLRPC_CTX_NEW;
ctx->cc_vcred = *vcred;
- spin_lock_init(&ctx->cc_lock);
+ cfs_spin_lock_init(&ctx->cc_lock);
CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
/* take a ref on belonging sec, balanced in ctx destroying */
- atomic_inc(&sec->ps_refcount);
+ cfs_atomic_inc(&sec->ps_refcount);
/* statistic only */
- atomic_inc(&sec->ps_nctx);
+ cfs_atomic_inc(&sec->ps_nctx);
CDEBUG(D_SEC, "%s@%p: create ctx %p(%u->%s)\n",
sec->ps_policy->sp_name, ctx->cc_sec,
{
struct gss_cli_ctx *gctx = ctx2gctx(ctx);
- LASSERT(atomic_read(&sec->ps_nctx) > 0);
- LASSERT(atomic_read(&ctx->cc_refcount) == 0);
+ LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
LASSERT(ctx->cc_sec == sec);
/*
* asynchronous which finished by request_out_callback(). so
* we add refcount, whoever drop finally drop the refcount to
* 0 should responsible for the rest of destroy. */
- atomic_inc(&ctx->cc_refcount);
+ cfs_atomic_inc(&ctx->cc_refcount);
gss_do_ctx_fini_rpc(gctx);
gss_cli_ctx_finalize(gctx);
- if (!atomic_dec_and_test(&ctx->cc_refcount))
+ if (!cfs_atomic_dec_and_test(&ctx->cc_refcount))
return 1;
}
LASSERT(privacy);
LASSERT(req->rq_clrbuf_len);
- if (req->rq_pool &&
- req->rq_clrbuf >= req->rq_reqbuf &&
- (char *) req->rq_clrbuf <
+ if (req->rq_pool == NULL ||
+ req->rq_clrbuf < req->rq_reqbuf ||
+ (char *) req->rq_clrbuf >=
(char *) req->rq_reqbuf + req->rq_reqbuf_len)
- goto release_reqbuf;
+ OBD_FREE(req->rq_clrbuf, req->rq_clrbuf_len);
- OBD_FREE(req->rq_clrbuf, req->rq_clrbuf_len);
req->rq_clrbuf = NULL;
req->rq_clrbuf_len = 0;
req->rq_reqbuf_len = 0;
}
- req->rq_reqmsg = NULL;
-
EXIT;
}
OBD_FREE(req->rq_repbuf, req->rq_repbuf_len);
req->rq_repbuf = NULL;
req->rq_repbuf_len = 0;
-
- req->rq_repmsg = NULL;
+ req->rq_repdata = NULL;
+ req->rq_repdata_len = 0;
}
static int get_enlarged_msgsize(struct lustre_msg *msg,
static inline
void gss_svc_reqctx_addref(struct gss_svc_reqctx *grctx)
{
- LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
- atomic_inc(&grctx->src_base.sc_refcount);
+ LASSERT(cfs_atomic_read(&grctx->src_base.sc_refcount) > 0);
+ cfs_atomic_inc(&grctx->src_base.sc_refcount);
}
static inline
void gss_svc_reqctx_decref(struct gss_svc_reqctx *grctx)
{
- LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
+ LASSERT(cfs_atomic_read(&grctx->src_base.sc_refcount) > 0);
- if (atomic_dec_and_test(&grctx->src_base.sc_refcount))
+ if (cfs_atomic_dec_and_test(&grctx->src_base.sc_refcount))
gss_svc_reqctx_free(grctx);
}
rawobj_t uuid_obj, rvs_hdl, in_token;
__u32 lustre_svc;
__u32 *secdata, seclen;
- int rc;
+ int swabbed, rc;
ENTRY;
CDEBUG(D_SEC, "processing gss init(%d) request from %s\n", gw->gw_proc,
RETURN(SECSVC_DROP);
}
+ swabbed = ptlrpc_req_need_swab(req);
+
/* ctx initiate payload is in last segment */
secdata = lustre_msg_buf(reqbuf, reqbuf->lm_bufcount - 1, 0);
seclen = reqbuf->lm_buflens[reqbuf->lm_bufcount - 1];
CERROR("missing user descriptor\n");
RETURN(SECSVC_DROP);
}
- if (sptlrpc_unpack_user_desc(reqbuf, 2)) {
+ if (sptlrpc_unpack_user_desc(reqbuf, 2, swabbed)) {
CERROR("Mal-formed user descriptor\n");
RETURN(SECSVC_DROP);
}
struct gss_svc_ctx *gctx = grctx->src_ctx;
struct lustre_msg *msg = req->rq_reqbuf;
int offset = 2;
+ int swabbed;
ENTRY;
*major = GSS_S_COMPLETE;
}
verified:
+ swabbed = ptlrpc_req_need_swab(req);
+
/* user descriptor */
if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
if (msg->lm_bufcount < (offset + 1)) {
RETURN(-EINVAL);
}
- if (sptlrpc_unpack_user_desc(msg, offset)) {
+ if (sptlrpc_unpack_user_desc(msg, offset, swabbed)) {
CERROR("Mal-formed user descriptor\n");
RETURN(-EINVAL);
}
RETURN(-EINVAL);
}
- if (bulk_sec_desc_unpack(msg, offset))
+ if (bulk_sec_desc_unpack(msg, offset, swabbed))
RETURN(-EINVAL);
req->rq_pack_bulk = 1;
{
struct gss_svc_ctx *gctx = grctx->src_ctx;
struct lustre_msg *msg = req->rq_reqbuf;
- int msglen, offset = 1;
+ int swabbed, msglen, offset = 1;
ENTRY;
if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
RETURN(-EACCES);
}
- if (lustre_unpack_msg(msg, msglen)) {
+ swabbed = __lustre_unpack_msg(msg, msglen);
+ if (swabbed < 0) {
CERROR("Failed to unpack after decryption\n");
RETURN(-EINVAL);
}
RETURN(-EINVAL);
}
- if (sptlrpc_unpack_user_desc(msg, offset)) {
+ if (sptlrpc_unpack_user_desc(msg, offset, swabbed)) {
CERROR("Mal-formed user descriptor\n");
RETURN(-EINVAL);
}
RETURN(-EINVAL);
}
- if (bulk_sec_desc_unpack(msg, offset))
+ if (bulk_sec_desc_unpack(msg, offset, swabbed))
RETURN(-EINVAL);
req->rq_pack_bulk = 1;
CERROR("missing user descriptor, ignore it\n");
RETURN(SECSVC_OK);
}
- if (sptlrpc_unpack_user_desc(req->rq_reqbuf, 2)) {
+ if (sptlrpc_unpack_user_desc(req->rq_reqbuf, 2,
+ ptlrpc_req_need_swab(req))) {
CERROR("Mal-formed user descriptor, ignore it\n");
RETURN(SECSVC_OK);
}
struct gss_header *ghdr;
struct gss_svc_reqctx *grctx;
struct gss_wire_ctx *gw;
- int rc;
+ int swabbed, rc;
ENTRY;
LASSERT(req->rq_reqbuf);
RETURN(SECSVC_DROP);
}
- ghdr = gss_swab_header(req->rq_reqbuf, 0);
+ swabbed = ptlrpc_req_need_swab(req);
+
+ ghdr = gss_swab_header(req->rq_reqbuf, 0, swabbed);
if (ghdr == NULL) {
CERROR("can't decode gss header\n");
RETURN(SECSVC_DROP);
RETURN(SECSVC_DROP);
grctx->src_base.sc_policy = sptlrpc_policy_get(policy);
- atomic_set(&grctx->src_base.sc_refcount, 1);
+ cfs_atomic_set(&grctx->src_base.sc_refcount, 1);
req->rq_svc_ctx = &grctx->src_base;
gw = &grctx->src_wirectx;
rawobj_from_netobj(&gw->gw_handle, &ghdr->gh_handle);
/* keep original wire header which subject to checksum verification */
- if (lustre_msg_swabbed(req->rq_reqbuf))
+ if (swabbed)
gss_header_swabber(ghdr);
switch(ghdr->gh_proc) {
void gss_svc_free_ctx(struct ptlrpc_svc_ctx *ctx)
{
- LASSERT(atomic_read(&ctx->sc_refcount) == 0);
+ LASSERT(cfs_atomic_read(&ctx->sc_refcount) == 0);
gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx));
}
* each reverse root ctx will record its latest sequence number on its
* buddy svcctx before be destroied, so here we continue use it.
*/
- atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
+ cfs_atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
if (gss_svc_upcall_dup_handle(&cli_gctx->gc_svc_handle, svc_gctx)) {
CERROR("failed to dup svc handle\n");