*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
*
* Author: Eric Mei <ericm@clusterfs.com>
*/
*/
#define DEBUG_SUBSYSTEM S_SEC
-#ifdef __KERNEL__
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/mutex.h>
#include <asm/atomic.h>
-#else
-#include <liblustre.h>
-#endif
#include <obd.h>
#include <obd_class.h>
* destroying server side context when it be destroyed. */
set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
- if (sec_is_reverse(ctx->cc_sec)) {
- CWARN("server installed reverse ctx %p idx "LPX64", "
- "expiry %lu(%+lds)\n", ctx,
- gss_handle_to_u64(&gctx->gc_handle),
- ctx->cc_expire, ctx->cc_expire - cfs_time_current_sec());
+ if (sec_is_reverse(ctx->cc_sec)) {
+ CWARN("server installed reverse ctx %p idx "LPX64", "
+ "expiry %lu(%+lds)\n", ctx,
+ gss_handle_to_u64(&gctx->gc_handle),
+ ctx->cc_expire,
+ cfs_time_sub(ctx->cc_expire, cfs_time_current_sec()));
} else {
- CWARN("client refreshed ctx %p idx "LPX64" (%u->%s), "
- "expiry %lu(%+lds)\n", ctx,
- gss_handle_to_u64(&gctx->gc_handle),
- ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
- ctx->cc_expire, ctx->cc_expire - cfs_time_current_sec());
+ CWARN("client refreshed ctx %p idx "LPX64" (%u->%s), "
+ "expiry %lu(%+lds)\n", ctx,
+ gss_handle_to_u64(&gctx->gc_handle),
+ ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
+ ctx->cc_expire,
+ cfs_time_sub(ctx->cc_expire, cfs_time_current_sec()));
- /* install reverse svc ctx for root context */
- if (ctx->cc_vcred.vc_uid == 0)
- gss_sec_install_rctx(ctx->cc_sec->ps_import,
- ctx->cc_sec, ctx);
- }
+ /* install reverse svc ctx for root context */
+ if (ctx->cc_vcred.vc_uid == 0)
+ gss_sec_install_rctx(ctx->cc_sec->ps_import,
+ ctx->cc_sec, ctx);
+ }
sptlrpc_cli_ctx_wakeup(ctx);
}
rawobj_free(&gctx->gc_handle);
}
-/*
+/**
* Based on sequence number algorithm as specified in RFC 2203.
*
- * modified for our own problem: arriving request has valid sequence number,
+ * Modified for our own problem: arriving request has valid sequence number,
* but unwrapping request might cost a long time, after that its sequence
* are not valid anymore (fall behind the window). It rarely happen, mostly
* under extreme load.
*
- * note we should not check sequence before verify the integrity of incoming
+ * Note we should not check sequence before verifying the integrity of incoming
* request, because just one attacking request with high sequence number might
- * cause all following request be dropped.
+ * cause all following requests be dropped.
*
- * so here we use a multi-phase approach: prepare 2 sequence windows,
+ * So here we use a multi-phase approach: prepare 2 sequence windows,
* "main window" for normal sequence and "back window" for fall behind sequence.
* and 3-phase checking mechanism:
- * 0 - before integrity verification, perform a initial sequence checking in
- * main window, which only try and don't actually set any bits. if the
- * sequence is high above the window or fit in the window and the bit
+ * 0 - before integrity verification, perform an initial sequence checking in
+ * main window, which only tries and doesn't actually set any bits. if the
+ * sequence is high above the window or fits in the window and the bit
* is 0, then accept and proceed to integrity verification. otherwise
* reject this sequence.
* 1 - after integrity verification, check in main window again. if this
- * sequence is high above the window or fit in the window and the bit
- * is 0, then set the bit and accept; if it fit in the window but bit
- * already set, then reject; if it fall behind the window, then proceed
+ * sequence is high above the window or fits in the window and the bit
+ * is 0, then set the bit and accept; if it fits in the window but bit
+ * already set, then reject; if it falls behind the window, then proceed
* to phase 2.
- * 2 - check in back window. if it is high above the window or fit in the
+ * 2 - check in back window. if it is high above the window or fits in the
* window and the bit is 0, then set the bit and accept. otherwise reject.
*
- * return value:
- * 1: looks like a replay
- * 0: is ok
- * -1: is a replay
+ * \return 1: looks like a replay
+ * \return 0: is ok
+ * \return -1: is a replay
*
- * note phase 0 is necessary, because otherwise replay attacking request of
+ * Note phase 0 is necessary, because otherwise replay attacking request of
* sequence which between the 2 windows can't be detected.
*
- * this mechanism can't totally solve the problem, but could help much less
+ * This mechanism can't totally solve the problem, but could help reduce the
* number of valid requests be dropped.
*/
static
void gss_cli_ctx_flags2str(unsigned long flags, char *buf, int bufsize)
{
- buf[0] = '\0';
-
- if (flags & PTLRPC_CTX_NEW)
- strncat(buf, "new,", bufsize);
- if (flags & PTLRPC_CTX_UPTODATE)
- strncat(buf, "uptodate,", bufsize);
- if (flags & PTLRPC_CTX_DEAD)
- strncat(buf, "dead,", bufsize);
- if (flags & PTLRPC_CTX_ERROR)
- strncat(buf, "error,", bufsize);
- if (flags & PTLRPC_CTX_CACHED)
- strncat(buf, "cached,", bufsize);
- if (flags & PTLRPC_CTX_ETERNAL)
- strncat(buf, "eternal,", bufsize);
- if (buf[0] == '\0')
- strncat(buf, "-,", bufsize);
+ buf[0] = '\0';
- buf[strlen(buf) - 1] = '\0';
+ if (flags & PTLRPC_CTX_NEW)
+ strlcat(buf, "new,", bufsize);
+ if (flags & PTLRPC_CTX_UPTODATE)
+ strlcat(buf, "uptodate,", bufsize);
+ if (flags & PTLRPC_CTX_DEAD)
+ strlcat(buf, "dead,", bufsize);
+ if (flags & PTLRPC_CTX_ERROR)
+ strlcat(buf, "error,", bufsize);
+ if (flags & PTLRPC_CTX_CACHED)
+ strlcat(buf, "cached,", bufsize);
+ if (flags & PTLRPC_CTX_ETERNAL)
+ strlcat(buf, "eternal,", bufsize);
+ if (buf[0] == '\0')
+ strlcat(buf, "-,", bufsize);
}
int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx,
sec->ps_flvr = *sf;
sec->ps_import = class_import_get(imp);
spin_lock_init(&sec->ps_lock);
- CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
+ INIT_LIST_HEAD(&sec->ps_gc_list);
if (!svcctx) {
sec->ps_gc_interval = GSS_GC_INTERVAL;
gctx->gc_win = 0;
atomic_set(&gctx->gc_seq, 0);
- CFS_INIT_HLIST_NODE(&ctx->cc_cache);
+ INIT_HLIST_NODE(&ctx->cc_cache);
atomic_set(&ctx->cc_refcount, 0);
ctx->cc_sec = sec;
ctx->cc_ops = ctxops;
ctx->cc_flags = PTLRPC_CTX_NEW;
ctx->cc_vcred = *vcred;
spin_lock_init(&ctx->cc_lock);
- CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
- CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
+ INIT_LIST_HEAD(&ctx->cc_req_list);
+ INIT_LIST_HEAD(&ctx->cc_gc_chain);
/* take a ref on belonging sec, balanced in ctx destroying */
atomic_inc(&sec->ps_refcount);
gss_at_reply_off_priv = lustre_msg_size_v2(3, buflens);
}
-int __init sptlrpc_gss_init(void)
+static int __init sptlrpc_gss_init(void)
{
int rc;
gss_exit_lproc();
}
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("GSS security policy for Lustre");
+MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Lustre GSS security policy");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
module_init(sptlrpc_gss_init);