-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* Modifications for Lustre
*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*
+ * Copyright (c) 2011, 2015, Intel Corporation.
+ *
* Author: Eric Mei <ericm@clusterfs.com>
*/
*
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_SEC
-#ifdef __KERNEL__
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/mutex.h>
#include <asm/atomic.h>
-#else
-#include <liblustre.h>
-#endif
#include <obd.h>
#include <obd_class.h>
#include <obd_support.h>
#include <obd_cksum.h>
-#include <lustre/lustre_idl.h>
#include <lustre_net.h>
#include <lustre_import.h>
#include <lustre_sec.h>
#include "gss_api.h"
#include <linux/crypto.h>
+#include <linux/crc32.h>
/*
* early reply have fixed size, respectively in privacy and integrity mode.
return ghdr;
}
-#if 0
-static
-void gss_netobj_swabber(netobj_t *obj)
-{
- __swab32s(&obj->len);
-}
-
-netobj_t *gss_swab_netobj(struct lustre_msg *msg, int segment)
-{
- netobj_t *obj;
-
- obj = lustre_swab_buf(msg, segment, sizeof(*obj), gss_netobj_swabber);
- if (obj && sizeof(*obj) + obj->len > msg->lm_buflens[segment]) {
- CERROR("netobj require length %u but only %u received\n",
- (unsigned int) sizeof(*obj) + obj->len,
- msg->lm_buflens[segment]);
- return NULL;
- }
-
- return obj;
-}
-#endif
-
/*
* payload should be obtained from mechanism. but currently since we
* only support kerberos, we could simply use fixed value.
int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
{
- LASSERT(cfs_atomic_read(&ctx->cc_refcount));
+ LASSERT(atomic_read(&ctx->cc_refcount));
- if (!cfs_test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
- if (!ctx->cc_early_expire)
- cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+ if (!test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
+ if (!ctx->cc_early_expire)
+ clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
- CWARN("ctx %p(%u->%s) get expired: %lu(%+lds)\n",
- ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
- ctx->cc_expire,
- ctx->cc_expire == 0 ? 0 :
- cfs_time_sub(ctx->cc_expire, cfs_time_current_sec()));
+ CWARN("ctx %p(%u->%s) get expired: %lld(%+llds)\n",
+ ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
+ ctx->cc_expire,
+ ctx->cc_expire == 0 ? 0 :
+ ctx->cc_expire - ktime_get_real_seconds());
- sptlrpc_cli_ctx_wakeup(ctx);
- return 1;
- }
+ sptlrpc_cli_ctx_wakeup(ctx);
+ return 1;
+ }
- return 0;
+ return 0;
}
/*
return 0;
/* check real expiration */
- if (cfs_time_after(ctx->cc_expire, cfs_time_current_sec()))
+ if (ctx->cc_expire > ktime_get_real_seconds())
return 0;
cli_ctx_expire(ctx);
void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx)
{
- struct ptlrpc_cli_ctx *ctx = &gctx->gc_base;
- unsigned long ctx_expiry;
+ struct ptlrpc_cli_ctx *ctx = &gctx->gc_base;
+ time64_t ctx_expiry;
if (lgss_inquire_context(gctx->gc_mechctx, &ctx_expiry)) {
CERROR("ctx %p(%u): unable to inquire, expire it now\n",
/* At this point this ctx might have been marked as dead by
* someone else, in which case nobody will make further use
* of it. we don't care, and mark it UPTODATE will help
- * destroying server side context when it be destroied. */
- cfs_set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
-
- if (sec_is_reverse(ctx->cc_sec)) {
- CWARN("server installed reverse ctx %p idx "LPX64", "
- "expiry %lu(%+lds)\n", ctx,
- gss_handle_to_u64(&gctx->gc_handle),
- ctx->cc_expire, ctx->cc_expire - cfs_time_current_sec());
+ * destroying server side context when it be destroyed. */
+ set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+
+ if (sec_is_reverse(ctx->cc_sec)) {
+ CWARN("server installed reverse ctx %p idx %#llx, "
+ "expiry %lld(%+llds)\n", ctx,
+ gss_handle_to_u64(&gctx->gc_handle),
+ ctx->cc_expire,
+ ctx->cc_expire - ktime_get_real_seconds());
} else {
- CWARN("client refreshed ctx %p idx "LPX64" (%u->%s), "
- "expiry %lu(%+lds)\n", ctx,
- gss_handle_to_u64(&gctx->gc_handle),
- ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
- ctx->cc_expire, ctx->cc_expire - cfs_time_current_sec());
-
- /* install reverse svc ctx for root context */
- if (ctx->cc_vcred.vc_uid == 0)
- gss_sec_install_rctx(ctx->cc_sec->ps_import,
- ctx->cc_sec, ctx);
- }
+ CWARN("client refreshed ctx %p idx %#llx (%u->%s), "
+ "expiry %lld(%+llds)\n", ctx,
+ gss_handle_to_u64(&gctx->gc_handle),
+ ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
+ ctx->cc_expire,
+ ctx->cc_expire - ktime_get_real_seconds());
+
+ /* install reverse svc ctx for root context */
+ if (ctx->cc_vcred.vc_uid == 0)
+ gss_sec_install_rctx(ctx->cc_sec->ps_import,
+ ctx->cc_sec, ctx);
+ }
sptlrpc_cli_ctx_wakeup(ctx);
}
rawobj_free(&gctx->gc_handle);
}
-/*
+/**
* Based on sequence number algorithm as specified in RFC 2203.
*
- * modified for our own problem: arriving request has valid sequence number,
+ * Modified for our own problem: arriving request has valid sequence number,
* but unwrapping request might cost a long time, after that its sequence
* are not valid anymore (fall behind the window). It rarely happen, mostly
* under extreme load.
*
- * note we should not check sequence before verify the integrity of incoming
+ * Note we should not check sequence before verifying the integrity of incoming
* request, because just one attacking request with high sequence number might
- * cause all following request be dropped.
+ * cause all following requests be dropped.
*
- * so here we use a multi-phase approach: prepare 2 sequence windows,
+ * So here we use a multi-phase approach: prepare 2 sequence windows,
* "main window" for normal sequence and "back window" for fall behind sequence.
* and 3-phase checking mechanism:
- * 0 - before integrity verification, perform a initial sequence checking in
- * main window, which only try and don't actually set any bits. if the
- * sequence is high above the window or fit in the window and the bit
+ * 0 - before integrity verification, perform an initial sequence checking in
+ * main window, which only tries and doesn't actually set any bits. if the
+ * sequence is high above the window or fits in the window and the bit
* is 0, then accept and proceed to integrity verification. otherwise
* reject this sequence.
* 1 - after integrity verification, check in main window again. if this
- * sequence is high above the window or fit in the window and the bit
- * is 0, then set the bit and accept; if it fit in the window but bit
- * already set, then reject; if it fall behind the window, then proceed
+ * sequence is high above the window or fits in the window and the bit
+ * is 0, then set the bit and accept; if it fits in the window but bit
+ * already set, then reject; if it falls behind the window, then proceed
* to phase 2.
- * 2 - check in back window. if it is high above the window or fit in the
+ * 2 - check in back window. if it is high above the window or fits in the
* window and the bit is 0, then set the bit and accept. otherwise reject.
*
- * return value:
- * 1: looks like a replay
- * 0: is ok
- * -1: is a replay
+ * \return 1: looks like a replay
+ * \return 0: is ok
+ * \return -1: is a replay
*
- * note phase 0 is necessary, because otherwise replay attacking request of
+ * Note phase 0 is necessary, because otherwise replay attacking request of
* sequence which between the 2 windows can't be detected.
*
- * this mechanism can't totally solve the problem, but could help much less
+ * This mechanism can't totally solve the problem, but could help reduce the
* number of valid requests be dropped.
*/
static
*/
switch (phase) {
case 0:
- if (cfs_test_bit(seq_num % win_size, window))
+ if (test_bit(seq_num % win_size, window))
goto replay;
break;
case 1:
*/
int gss_check_seq_num(struct gss_svc_seq_data *ssd, __u32 seq_num, int set)
{
- int rc = 0;
+ int rc = 0;
- cfs_spin_lock(&ssd->ssd_lock);
+ spin_lock(&ssd->ssd_lock);
if (set == 0) {
/*
gss_stat_oos_record_svc(2, 0);
}
exit:
- cfs_spin_unlock(&ssd->ssd_lock);
- return rc;
+ spin_unlock(&ssd->ssd_lock);
+ return rc;
}
/***************************************
void gss_cli_ctx_flags2str(unsigned long flags, char *buf, int bufsize)
{
- buf[0] = '\0';
+ buf[0] = '\0';
- if (flags & PTLRPC_CTX_NEW)
- strncat(buf, "new,", bufsize);
- if (flags & PTLRPC_CTX_UPTODATE)
- strncat(buf, "uptodate,", bufsize);
- if (flags & PTLRPC_CTX_DEAD)
- strncat(buf, "dead,", bufsize);
- if (flags & PTLRPC_CTX_ERROR)
- strncat(buf, "error,", bufsize);
- if (flags & PTLRPC_CTX_CACHED)
- strncat(buf, "cached,", bufsize);
- if (flags & PTLRPC_CTX_ETERNAL)
- strncat(buf, "eternal,", bufsize);
- if (buf[0] == '\0')
- strncat(buf, "-,", bufsize);
-
- buf[strlen(buf) - 1] = '\0';
+ if (flags & PTLRPC_CTX_NEW)
+ strlcat(buf, "new,", bufsize);
+ if (flags & PTLRPC_CTX_UPTODATE)
+ strlcat(buf, "uptodate,", bufsize);
+ if (flags & PTLRPC_CTX_DEAD)
+ strlcat(buf, "dead,", bufsize);
+ if (flags & PTLRPC_CTX_ERROR)
+ strlcat(buf, "error,", bufsize);
+ if (flags & PTLRPC_CTX_CACHED)
+ strlcat(buf, "cached,", bufsize);
+ if (flags & PTLRPC_CTX_ETERNAL)
+ strlcat(buf, "eternal,", bufsize);
+ if (buf[0] == '\0')
+ strlcat(buf, "-,", bufsize);
}
int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx,
flags |= LUSTRE_GSS_PACK_USER;
redo:
- seq = cfs_atomic_inc_return(&gctx->gc_seq);
-
- rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
- ctx->cc_sec->ps_part,
- flags, gctx->gc_proc, seq, svc,
- &gctx->gc_handle);
- if (rc < 0)
- RETURN(rc);
-
- /* gss_sign_msg() msg might take long time to finish, in which period
- * more rpcs could be wrapped up and sent out. if we found too many
- * of them we should repack this rpc, because sent it too late might
- * lead to the sequence number fall behind the window on server and
- * be dropped. also applies to gss_cli_ctx_seal().
- *
- * Note: null mode dosen't check sequence number. */
- if (svc != SPTLRPC_SVC_NULL &&
- cfs_atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
- int behind = cfs_atomic_read(&gctx->gc_seq) - seq;
-
- gss_stat_oos_record_cli(behind);
- CWARN("req %p: %u behind, retry signing\n", req, behind);
- goto redo;
- }
-
- req->rq_reqdata_len = rc;
- RETURN(0);
+ seq = atomic_inc_return(&gctx->gc_seq);
+
+ rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
+ ctx->cc_sec->ps_part,
+ flags, gctx->gc_proc, seq, svc,
+ &gctx->gc_handle);
+ if (rc < 0)
+ RETURN(rc);
+
+ /* gss_sign_msg() msg might take long time to finish, in which period
+ * more rpcs could be wrapped up and sent out. if we found too many
+ * of them we should repack this rpc, because sent it too late might
+ * lead to the sequence number fall behind the window on server and
+ * be dropped. also applies to gss_cli_ctx_seal().
+ *
+ * Note: null mode doesn't check sequence number. */
+ if (svc != SPTLRPC_SVC_NULL &&
+ atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
+ int behind = atomic_read(&gctx->gc_seq) - seq;
+
+ gss_stat_oos_record_cli(behind);
+ CWARN("req %p: %u behind, retry signing\n", req, behind);
+ goto redo;
+ }
+
+ req->rq_reqdata_len = rc;
+ RETURN(0);
}
static
errhdr = (struct gss_err_header *) ghdr;
- CWARN("req x"LPU64"/t"LPU64", ctx %p idx "LPX64"(%u->%s): "
+ CWARN("req x%llu/t%llu, ctx %p idx %#llx(%u->%s): "
"%sserver respond (%08x/%08x)\n",
req->rq_xid, req->rq_transno, ctx,
gss_handle_to_u64(&ctx2gctx(ctx)->gc_handle),
ghdr->gh_flags |= LUSTRE_GSS_PACK_USER;
redo:
- ghdr->gh_seq = cfs_atomic_inc_return(&gctx->gc_seq);
+ ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
/* buffer objects */
hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
}
LASSERT(token.len <= buflens[1]);
- /* see explain in gss_cli_ctx_sign() */
- if (unlikely(cfs_atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
- GSS_SEQ_REPACK_THRESHOLD)) {
- int behind = cfs_atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
+ /* see explain in gss_cli_ctx_sign() */
+ if (unlikely(atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
+ GSS_SEQ_REPACK_THRESHOLD)) {
+ int behind = atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
- gss_stat_oos_record_cli(behind);
- CWARN("req %p: %u behind, retry sealing\n", req, behind);
+ gss_stat_oos_record_cli(behind);
+ CWARN("req %p: %u behind, retry sealing\n", req, behind);
- ghdr->gh_seq = cfs_atomic_inc_return(&gctx->gc_seq);
- goto redo;
- }
+ ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
+ goto redo;
+ }
- /* now set the final wire data length */
- req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, 1, token.len,0);
- RETURN(0);
+ /* now set the final wire data length */
+ req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, 1, token.len,0);
+ RETURN(0);
err_free:
- if (!req->rq_pool) {
- OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
- req->rq_reqbuf = NULL;
- req->rq_reqbuf_len = 0;
- }
- RETURN(rc);
+ if (!req->rq_pool) {
+ OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
+ req->rq_reqbuf = NULL;
+ req->rq_reqbuf_len = 0;
+ }
+ RETURN(rc);
}
int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx,
return -EOPNOTSUPP;
}
- cfs_spin_lock_init(&gsec->gs_lock);
+ spin_lock_init(&gsec->gs_lock);
gsec->gs_rvs_hdl = 0ULL;
- /* initialize upper ptlrpc_sec */
- sec = &gsec->gs_base;
- sec->ps_policy = policy;
- cfs_atomic_set(&sec->ps_refcount, 0);
- cfs_atomic_set(&sec->ps_nctx, 0);
- sec->ps_id = sptlrpc_get_next_secid();
- sec->ps_flvr = *sf;
- sec->ps_import = class_import_get(imp);
- cfs_spin_lock_init(&sec->ps_lock);
- CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
+ /* initialize upper ptlrpc_sec */
+ sec = &gsec->gs_base;
+ sec->ps_policy = policy;
+ atomic_set(&sec->ps_refcount, 0);
+ atomic_set(&sec->ps_nctx, 0);
+ sec->ps_id = sptlrpc_get_next_secid();
+ sec->ps_flvr = *sf;
+ sec->ps_import = class_import_get(imp);
+ spin_lock_init(&sec->ps_lock);
+ INIT_LIST_HEAD(&sec->ps_gc_list);
if (!svcctx) {
sec->ps_gc_interval = GSS_GC_INTERVAL;
void gss_sec_destroy_common(struct gss_sec *gsec)
{
- struct ptlrpc_sec *sec = &gsec->gs_base;
- ENTRY;
+ struct ptlrpc_sec *sec = &gsec->gs_base;
+ ENTRY;
- LASSERT(sec->ps_import);
- LASSERT(cfs_atomic_read(&sec->ps_refcount) == 0);
- LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
+ LASSERT(sec->ps_import);
+ LASSERT(atomic_read(&sec->ps_refcount) == 0);
+ LASSERT(atomic_read(&sec->ps_nctx) == 0);
- if (gsec->gs_mech) {
- lgss_mech_put(gsec->gs_mech);
- gsec->gs_mech = NULL;
- }
+ if (gsec->gs_mech) {
+ lgss_mech_put(gsec->gs_mech);
+ gsec->gs_mech = NULL;
+ }
- class_import_put(sec->ps_import);
+ class_import_put(sec->ps_import);
- if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
- sptlrpc_enc_pool_del_user();
+ if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
+ sptlrpc_enc_pool_del_user();
- EXIT;
+ EXIT;
}
void gss_sec_kill(struct ptlrpc_sec *sec)
{
- sec->ps_dying = 1;
+ sec->ps_dying = 1;
}
int gss_cli_ctx_init_common(struct ptlrpc_sec *sec,
struct ptlrpc_ctx_ops *ctxops,
struct vfs_cred *vcred)
{
- struct gss_cli_ctx *gctx = ctx2gctx(ctx);
-
- gctx->gc_win = 0;
- cfs_atomic_set(&gctx->gc_seq, 0);
-
- CFS_INIT_HLIST_NODE(&ctx->cc_cache);
- cfs_atomic_set(&ctx->cc_refcount, 0);
- ctx->cc_sec = sec;
- ctx->cc_ops = ctxops;
- ctx->cc_expire = 0;
- ctx->cc_flags = PTLRPC_CTX_NEW;
- ctx->cc_vcred = *vcred;
- cfs_spin_lock_init(&ctx->cc_lock);
- CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
- CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
-
- /* take a ref on belonging sec, balanced in ctx destroying */
- cfs_atomic_inc(&sec->ps_refcount);
- /* statistic only */
- cfs_atomic_inc(&sec->ps_nctx);
-
- CDEBUG(D_SEC, "%s@%p: create ctx %p(%u->%s)\n",
- sec->ps_policy->sp_name, ctx->cc_sec,
- ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
- return 0;
+ struct gss_cli_ctx *gctx = ctx2gctx(ctx);
+
+ gctx->gc_win = 0;
+ atomic_set(&gctx->gc_seq, 0);
+
+ INIT_HLIST_NODE(&ctx->cc_cache);
+ atomic_set(&ctx->cc_refcount, 0);
+ ctx->cc_sec = sec;
+ ctx->cc_ops = ctxops;
+ ctx->cc_expire = 0;
+ ctx->cc_flags = PTLRPC_CTX_NEW;
+ ctx->cc_vcred = *vcred;
+ spin_lock_init(&ctx->cc_lock);
+ INIT_LIST_HEAD(&ctx->cc_req_list);
+ INIT_LIST_HEAD(&ctx->cc_gc_chain);
+
+ /* take a ref on belonging sec, balanced in ctx destroying */
+ atomic_inc(&sec->ps_refcount);
+ /* statistic only */
+ atomic_inc(&sec->ps_nctx);
+
+ CDEBUG(D_SEC, "%s@%p: create ctx %p(%u->%s)\n",
+ sec->ps_policy->sp_name, ctx->cc_sec,
+ ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+ return 0;
}
/*
int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec,
struct ptlrpc_cli_ctx *ctx)
{
- struct gss_cli_ctx *gctx = ctx2gctx(ctx);
+ struct gss_cli_ctx *gctx = ctx2gctx(ctx);
- LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0);
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
- LASSERT(ctx->cc_sec == sec);
+ LASSERT(atomic_read(&sec->ps_nctx) > 0);
+ LASSERT(atomic_read(&ctx->cc_refcount) == 0);
+ LASSERT(ctx->cc_sec == sec);
- /*
- * remove UPTODATE flag of reverse ctx thus we won't send fini rpc,
- * this is to avoid potential problems of client side reverse svc ctx
- * be mis-destroyed in various recovery senarios. anyway client can
- * manage its reverse ctx well by associating it with its buddy ctx.
- */
- if (sec_is_reverse(sec))
- ctx->cc_flags &= ~PTLRPC_CTX_UPTODATE;
+ /*
+ * remove UPTODATE flag of reverse ctx thus we won't send fini rpc,
+ * this is to avoid potential problems of client side reverse svc ctx
+ * be mis-destroyed in various recovery senarios. anyway client can
+ * manage its reverse ctx well by associating it with its buddy ctx.
+ */
+ if (sec_is_reverse(sec))
+ ctx->cc_flags &= ~PTLRPC_CTX_UPTODATE;
- if (gctx->gc_mechctx) {
- /* the final context fini rpc will use this ctx too, and it's
- * asynchronous which finished by request_out_callback(). so
- * we add refcount, whoever drop finally drop the refcount to
- * 0 should responsible for the rest of destroy. */
- cfs_atomic_inc(&ctx->cc_refcount);
+ if (gctx->gc_mechctx) {
+ /* the final context fini rpc will use this ctx too, and it's
+ * asynchronous which finished by request_out_callback(). so
+ * we add refcount, whoever drop finally drop the refcount to
+ * 0 should responsible for the rest of destroy. */
+ atomic_inc(&ctx->cc_refcount);
- gss_do_ctx_fini_rpc(gctx);
- gss_cli_ctx_finalize(gctx);
+ gss_do_ctx_fini_rpc(gctx);
+ gss_cli_ctx_finalize(gctx);
- if (!cfs_atomic_dec_and_test(&ctx->cc_refcount))
- return 1;
- }
+ if (!atomic_dec_and_test(&ctx->cc_refcount))
+ return 1;
+ }
- if (sec_is_reverse(sec))
- CWARN("reverse sec %p: destroy ctx %p\n",
- ctx->cc_sec, ctx);
- else
- CWARN("%s@%p: destroy ctx %p(%u->%s)\n",
- sec->ps_policy->sp_name, ctx->cc_sec,
- ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+ if (sec_is_reverse(sec))
+ CWARN("reverse sec %p: destroy ctx %p\n",
+ ctx->cc_sec, ctx);
+ else
+ CWARN("%s@%p: destroy ctx %p(%u->%s)\n",
+ sec->ps_policy->sp_name, ctx->cc_sec,
+ ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
- return 0;
+ return 0;
}
static
if (newbuf == NULL)
RETURN(-ENOMEM);
+ /* Must lock this, so that otherwise unprotected change of
+ * rq_reqmsg is not racing with parallel processing of
+ * imp_replay_list traversing threads. See LU-3333
+ * This is a bandaid at best, we really need to deal with this
+ * in request enlarging code before unpacking that's already
+ * there */
+ if (req->rq_import)
+ spin_lock(&req->rq_import->imp_lock);
+
memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
req->rq_reqbuf = newbuf;
req->rq_reqbuf_len = newbuf_size;
req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, 0);
+
+ if (req->rq_import)
+ spin_unlock(&req->rq_import->imp_lock);
}
/* do enlargement, from wrapper to embedded, from end to begin */
if (newclrbuf_size + newcipbuf_size <= req->rq_reqbuf_len) {
void *src, *dst;
+ if (req->rq_import)
+ spin_lock(&req->rq_import->imp_lock);
/* move clear text backward. */
src = req->rq_clrbuf;
dst = (char *) req->rq_reqbuf + newcipbuf_size;
req->rq_clrbuf = (struct lustre_msg *) dst;
req->rq_clrbuf_len = newclrbuf_size;
req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
+
+ if (req->rq_import)
+ spin_unlock(&req->rq_import->imp_lock);
} else {
/* sadly we have to split out the clear buffer */
LASSERT(req->rq_reqbuf_len >= newcipbuf_size);
if (newclrbuf == NULL)
RETURN(-ENOMEM);
+ /* Must lock this, so that otherwise unprotected change of
+ * rq_reqmsg is not racing with parallel processing of
+ * imp_replay_list traversing threads. See LU-3333
+ * This is a bandaid at best, we really need to deal with this
+ * in request enlarging code before unpacking that's already
+ * there */
+ if (req->rq_import)
+ spin_lock(&req->rq_import->imp_lock);
+
memcpy(newclrbuf, req->rq_clrbuf, req->rq_clrbuf_len);
if (req->rq_reqbuf == NULL ||
req->rq_clrbuf = newclrbuf;
req->rq_clrbuf_len = newclrbuf_size;
req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
+
+ if (req->rq_import)
+ spin_unlock(&req->rq_import->imp_lock);
}
_sptlrpc_enlarge_msg_inplace(req->rq_clrbuf, 0, newmsg_size);
static inline
void gss_svc_reqctx_addref(struct gss_svc_reqctx *grctx)
{
- LASSERT(cfs_atomic_read(&grctx->src_base.sc_refcount) > 0);
- cfs_atomic_inc(&grctx->src_base.sc_refcount);
+ LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
+ atomic_inc(&grctx->src_base.sc_refcount);
}
static inline
void gss_svc_reqctx_decref(struct gss_svc_reqctx *grctx)
{
- LASSERT(cfs_atomic_read(&grctx->src_base.sc_refcount) > 0);
+ LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
- if (cfs_atomic_dec_and_test(&grctx->src_base.sc_refcount))
- gss_svc_reqctx_free(grctx);
+ if (atomic_dec_and_test(&grctx->src_base.sc_refcount))
+ gss_svc_reqctx_free(grctx);
}
static
LASSERT(rs->rs_msg == lustre_msg_buf(rs->rs_repbuf, 1, 0));
- /* embedded lustre_msg might have been shrinked */
+ /* embedded lustre_msg might have been shrunk */
if (req->rq_replen != rs->rs_repbuf->lm_buflens[1])
lustre_shrink_msg(rs->rs_repbuf, 1, req->rq_replen, 1);
if (rc != SECSVC_OK)
RETURN(rc);
- if (grctx->src_ctx->gsc_usr_mds || grctx->src_ctx->gsc_usr_oss ||
- grctx->src_ctx->gsc_usr_root)
- CWARN("create svc ctx %p: user from %s authenticated as %s\n",
- grctx->src_ctx, libcfs_nid2str(req->rq_peer.nid),
- grctx->src_ctx->gsc_usr_mds ? "mds" :
- (grctx->src_ctx->gsc_usr_oss ? "oss" : "root"));
- else
- CWARN("create svc ctx %p: accept user %u from %s\n",
- grctx->src_ctx, grctx->src_ctx->gsc_uid,
- libcfs_nid2str(req->rq_peer.nid));
+ if (grctx->src_ctx->gsc_usr_mds || grctx->src_ctx->gsc_usr_oss ||
+ grctx->src_ctx->gsc_usr_root)
+ CWARN("create svc ctx %p: user from %s authenticated as %s\n",
+ grctx->src_ctx, libcfs_nid2str(req->rq_peer.nid),
+ grctx->src_ctx->gsc_usr_root ? "root" :
+ (grctx->src_ctx->gsc_usr_mds ? "mds" :
+ (grctx->src_ctx->gsc_usr_oss ? "oss" : "null")));
+ else
+ CWARN("create svc ctx %p: accept user %u from %s\n",
+ grctx->src_ctx, grctx->src_ctx->gsc_uid,
+ libcfs_nid2str(req->rq_peer.nid));
if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
if (reqbuf->lm_bufcount < 4) {
if (rc == 0)
RETURN(SECSVC_OK);
- CERROR("svc %u failed: major 0x%08x: req xid "LPU64" ctx %p idx "
- LPX64"(%u->%s)\n", gw->gw_svc, major, req->rq_xid,
+ CERROR("svc %u failed: major 0x%08x: req xid %llu ctx %p idx "
+ "%#llx(%u->%s)\n", gw->gw_svc, major, req->rq_xid,
grctx->src_ctx, gss_handle_to_u64(&gw->gw_handle),
grctx->src_ctx->gsc_uid, libcfs_nid2str(req->rq_peer.nid));
error:
if (gss_svc_verify_request(req, grctx, gw, &major))
RETURN(SECSVC_DROP);
- CWARN("destroy svc ctx %p idx "LPX64" (%u->%s)\n",
+ CWARN("destroy svc ctx %p idx %#llx (%u->%s)\n",
grctx->src_ctx, gss_handle_to_u64(&gw->gw_handle),
grctx->src_ctx->gsc_uid, libcfs_nid2str(req->rq_peer.nid));
if (!grctx)
RETURN(SECSVC_DROP);
- grctx->src_base.sc_policy = sptlrpc_policy_get(policy);
- cfs_atomic_set(&grctx->src_base.sc_refcount, 1);
- req->rq_svc_ctx = &grctx->src_base;
- gw = &grctx->src_wirectx;
+ grctx->src_base.sc_policy = sptlrpc_policy_get(policy);
+ atomic_set(&grctx->src_base.sc_refcount, 1);
+ req->rq_svc_ctx = &grctx->src_base;
+ gw = &grctx->src_wirectx;
/* save wire context */
gw->gw_flags = ghdr->gh_flags;
LASSERT (grctx->src_ctx);
req->rq_auth_gss = 1;
- req->rq_auth_remote = grctx->src_ctx->gsc_remote;
req->rq_auth_usr_mdt = grctx->src_ctx->gsc_usr_mds;
req->rq_auth_usr_ost = grctx->src_ctx->gsc_usr_oss;
req->rq_auth_usr_root = grctx->src_ctx->gsc_usr_root;
ENTRY;
/* get clear data length. note embedded lustre_msg might
- * have been shrinked */
+ * have been shrunk */
if (req->rq_replen != lustre_msg_buflen(rs->rs_repbuf, 0))
msglen = lustre_shrink_msg(rs->rs_repbuf, 0, req->rq_replen, 1);
else
void gss_svc_free_ctx(struct ptlrpc_svc_ctx *ctx)
{
- LASSERT(cfs_atomic_read(&ctx->sc_refcount) == 0);
- gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx));
+ LASSERT(atomic_read(&ctx->sc_refcount) == 0);
+ gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx));
}
int gss_copy_rvc_cli_ctx(struct ptlrpc_cli_ctx *cli_ctx,
cli_gctx->gc_proc = PTLRPC_GSS_PROC_DATA;
cli_gctx->gc_win = GSS_SEQ_WIN;
- /* The problem is the reverse ctx might get lost in some recovery
- * situations, and the same svc_ctx will be used to re-create it.
- * if there's callback be sentout before that, new reverse ctx start
- * with sequence 0 will lead to future callback rpc be treated as
- * replay.
- *
- * each reverse root ctx will record its latest sequence number on its
- * buddy svcctx before be destroied, so here we continue use it.
- */
- cfs_atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
-
- if (gss_svc_upcall_dup_handle(&cli_gctx->gc_svc_handle, svc_gctx)) {
- CERROR("failed to dup svc handle\n");
- goto err_out;
- }
+ /* The problem is the reverse ctx might get lost in some recovery
+ * situations, and the same svc_ctx will be used to re-create it.
+ * if there's callback be sentout before that, new reverse ctx start
+ * with sequence 0 will lead to future callback rpc be treated as
+ * replay.
+ *
+ * each reverse root ctx will record its latest sequence number on its
+ * buddy svcctx before be destroyed, so here we continue use it.
+ */
+ atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
+
+ if (gss_svc_upcall_dup_handle(&cli_gctx->gc_svc_handle, svc_gctx)) {
+ CERROR("failed to dup svc handle\n");
+ goto err_out;
+ }
if (lgss_copy_reverse_context(svc_gctx->gsc_mechctx, &mechctx) !=
GSS_S_COMPLETE) {
gss_at_reply_off_priv = lustre_msg_size_v2(3, buflens);
}
-int __init sptlrpc_gss_init(void)
+static int __init sptlrpc_gss_init(void)
{
int rc;
if (rc)
goto out_cli_upcall;
- rc = init_kerberos_module();
- if (rc)
- goto out_svc_upcall;
+ rc = init_null_module();
+ if (rc)
+ goto out_svc_upcall;
- /* register policy after all other stuff be intialized, because it
- * might be in used immediately after the registration. */
+ rc = init_kerberos_module();
+ if (rc)
+ goto out_null;
- rc = gss_init_keyring();
- if (rc)
- goto out_kerberos;
+ rc = init_sk_module();
+ if (rc)
+ goto out_kerberos;
-#ifdef HAVE_GSS_PIPEFS
- rc = gss_init_pipefs();
- if (rc)
- goto out_keyring;
-#endif
+ /* register policy after all other stuff be initialized, because it
+ * might be in used immediately after the registration. */
- gss_init_at_reply_offset();
+ rc = gss_init_keyring();
+ if (rc)
+ goto out_sk;
- return 0;
+ rc = gss_init_pipefs();
+ if (rc)
+ goto out_keyring;
-#ifdef HAVE_GSS_PIPEFS
-out_keyring:
- gss_exit_keyring();
-#endif
+ gss_init_at_reply_offset();
+
+ return 0;
+out_keyring:
+ gss_exit_keyring();
+out_sk:
+ cleanup_sk_module();
out_kerberos:
- cleanup_kerberos_module();
+ cleanup_kerberos_module();
+out_null:
+ cleanup_null_module();
out_svc_upcall:
- gss_exit_svc_upcall();
+ gss_exit_svc_upcall();
out_cli_upcall:
- gss_exit_cli_upcall();
+ gss_exit_cli_upcall();
out_lproc:
- gss_exit_lproc();
- return rc;
+ gss_exit_lproc();
+ return rc;
}
static void __exit sptlrpc_gss_exit(void)
{
gss_exit_keyring();
-#ifdef HAVE_GSS_PIPEFS
gss_exit_pipefs();
-#endif
cleanup_kerberos_module();
gss_exit_svc_upcall();
gss_exit_cli_upcall();
gss_exit_lproc();
}
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("GSS security policy for Lustre");
+MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Lustre GSS security policy");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
module_init(sptlrpc_gss_init);