X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fptlrpc%2Fgss%2Fsec_gss.c;h=15455190605404de43c7f1c5d9d767594ca18799;hb=b9cbb29935faa388d36b16140c7ef76a47754bec;hp=69afcf0e5929d19fdc9386b0195694a435b6c7bd;hpb=b2bb3b247d1dc75e25f1b5c14a333905909b5e70;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/gss/sec_gss.c b/lustre/ptlrpc/gss/sec_gss.c index 69afcf0..1545519 100644 --- a/lustre/ptlrpc/gss/sec_gss.c +++ b/lustre/ptlrpc/gss/sec_gss.c @@ -3,7 +3,7 @@ * * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * - * Copyright (c) 2011, 2013, Intel Corporation. + * Copyright (c) 2011, 2014, Intel Corporation. * * Author: Eric Mei */ @@ -47,7 +47,6 @@ */ #define DEBUG_SUBSYSTEM S_SEC -#ifdef __KERNEL__ #include #include #include @@ -55,9 +54,6 @@ #include #include #include -#else -#include -#endif #include #include @@ -68,7 +64,6 @@ #include #include -#include "../ptlrpc_internal.h" #include "gss_err.h" #include "gss_internal.h" #include "gss_api.h" @@ -388,7 +383,7 @@ void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx) /* At this point this ctx might have been marked as dead by * someone else, in which case nobody will make further use * of it. we don't care, and mark it UPTODATE will help - * destroying server side context when it be destroied. */ + * destroying server side context when it be destroyed. */ set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags); if (sec_is_reverse(ctx->cc_sec)) { @@ -433,43 +428,42 @@ static void gss_cli_ctx_finalize(struct gss_cli_ctx *gctx) rawobj_free(&gctx->gc_handle); } -/* +/** * Based on sequence number algorithm as specified in RFC 2203. * - * modified for our own problem: arriving request has valid sequence number, + * Modified for our own problem: arriving request has valid sequence number, * but unwrapping request might cost a long time, after that its sequence * are not valid anymore (fall behind the window). It rarely happen, mostly * under extreme load. * - * note we should not check sequence before verify the integrity of incoming + * Note we should not check sequence before verifying the integrity of incoming * request, because just one attacking request with high sequence number might - * cause all following request be dropped. + * cause all following requests be dropped. * - * so here we use a multi-phase approach: prepare 2 sequence windows, + * So here we use a multi-phase approach: prepare 2 sequence windows, * "main window" for normal sequence and "back window" for fall behind sequence. * and 3-phase checking mechanism: - * 0 - before integrity verification, perform a initial sequence checking in - * main window, which only try and don't actually set any bits. if the - * sequence is high above the window or fit in the window and the bit + * 0 - before integrity verification, perform an initial sequence checking in + * main window, which only tries and doesn't actually set any bits. if the + * sequence is high above the window or fits in the window and the bit * is 0, then accept and proceed to integrity verification. otherwise * reject this sequence. * 1 - after integrity verification, check in main window again. if this - * sequence is high above the window or fit in the window and the bit - * is 0, then set the bit and accept; if it fit in the window but bit - * already set, then reject; if it fall behind the window, then proceed + * sequence is high above the window or fits in the window and the bit + * is 0, then set the bit and accept; if it fits in the window but bit + * already set, then reject; if it falls behind the window, then proceed * to phase 2. - * 2 - check in back window. if it is high above the window or fit in the + * 2 - check in back window. if it is high above the window or fits in the * window and the bit is 0, then set the bit and accept. otherwise reject. * - * return value: - * 1: looks like a replay - * 0: is ok - * -1: is a replay + * \return 1: looks like a replay + * \return 0: is ok + * \return -1: is a replay * - * note phase 0 is necessary, because otherwise replay attacking request of + * Note phase 0 is necessary, because otherwise replay attacking request of * sequence which between the 2 windows can't be detected. * - * this mechanism can't totally solve the problem, but could help much less + * This mechanism can't totally solve the problem, but could help reduce the * number of valid requests be dropped. */ static @@ -625,24 +619,22 @@ int gss_cli_ctx_match(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred) void gss_cli_ctx_flags2str(unsigned long flags, char *buf, int bufsize) { - buf[0] = '\0'; + buf[0] = '\0'; - if (flags & PTLRPC_CTX_NEW) - strncat(buf, "new,", bufsize); - if (flags & PTLRPC_CTX_UPTODATE) - strncat(buf, "uptodate,", bufsize); - if (flags & PTLRPC_CTX_DEAD) - strncat(buf, "dead,", bufsize); - if (flags & PTLRPC_CTX_ERROR) - strncat(buf, "error,", bufsize); - if (flags & PTLRPC_CTX_CACHED) - strncat(buf, "cached,", bufsize); - if (flags & PTLRPC_CTX_ETERNAL) - strncat(buf, "eternal,", bufsize); - if (buf[0] == '\0') - strncat(buf, "-,", bufsize); - - buf[strlen(buf) - 1] = '\0'; + if (flags & PTLRPC_CTX_NEW) + strlcat(buf, "new,", bufsize); + if (flags & PTLRPC_CTX_UPTODATE) + strlcat(buf, "uptodate,", bufsize); + if (flags & PTLRPC_CTX_DEAD) + strlcat(buf, "dead,", bufsize); + if (flags & PTLRPC_CTX_ERROR) + strlcat(buf, "error,", bufsize); + if (flags & PTLRPC_CTX_CACHED) + strlcat(buf, "cached,", bufsize); + if (flags & PTLRPC_CTX_ETERNAL) + strlcat(buf, "eternal,", bufsize); + if (buf[0] == '\0') + strlcat(buf, "-,", bufsize); } int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx, @@ -683,7 +675,7 @@ redo: * lead to the sequence number fall behind the window on server and * be dropped. also applies to gss_cli_ctx_seal(). * - * Note: null mode dosen't check sequence number. */ + * Note: null mode doesn't check sequence number. */ if (svc != SPTLRPC_SVC_NULL && atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) { int behind = atomic_read(&gctx->gc_seq) - seq; @@ -1131,7 +1123,7 @@ int gss_sec_create_common(struct gss_sec *gsec, sec->ps_flvr = *sf; sec->ps_import = class_import_get(imp); spin_lock_init(&sec->ps_lock); - CFS_INIT_LIST_HEAD(&sec->ps_gc_list); + INIT_LIST_HEAD(&sec->ps_gc_list); if (!svcctx) { sec->ps_gc_interval = GSS_GC_INTERVAL; @@ -1187,7 +1179,7 @@ int gss_cli_ctx_init_common(struct ptlrpc_sec *sec, gctx->gc_win = 0; atomic_set(&gctx->gc_seq, 0); - CFS_INIT_HLIST_NODE(&ctx->cc_cache); + INIT_HLIST_NODE(&ctx->cc_cache); atomic_set(&ctx->cc_refcount, 0); ctx->cc_sec = sec; ctx->cc_ops = ctxops; @@ -1195,8 +1187,8 @@ int gss_cli_ctx_init_common(struct ptlrpc_sec *sec, ctx->cc_flags = PTLRPC_CTX_NEW; ctx->cc_vcred = *vcred; spin_lock_init(&ctx->cc_lock); - CFS_INIT_LIST_HEAD(&ctx->cc_req_list); - CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain); + INIT_LIST_HEAD(&ctx->cc_req_list); + INIT_LIST_HEAD(&ctx->cc_gc_chain); /* take a ref on belonging sec, balanced in ctx destroying */ atomic_inc(&sec->ps_refcount); @@ -1652,9 +1644,9 @@ int gss_enlarge_reqbuf_intg(struct ptlrpc_sec *sec, int svc, int segment, int newsize) { + struct lustre_msg *newbuf; int txtsize, sigsize = 0, i; int newmsg_size, newbuf_size; - int rc; /* * gss header is at seg 0; @@ -1698,10 +1690,31 @@ int gss_enlarge_reqbuf_intg(struct ptlrpc_sec *sec, LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size); if (req->rq_reqbuf_len < newbuf_size) { - rc = ptlrpc_enlarge_req_buffer(req, newbuf_size); - if (rc != 0) - RETURN(rc); - } + newbuf_size = size_roundup_power2(newbuf_size); + + OBD_ALLOC_LARGE(newbuf, newbuf_size); + if (newbuf == NULL) + RETURN(-ENOMEM); + + /* Must lock this, so that otherwise unprotected change of + * rq_reqmsg is not racing with parallel processing of + * imp_replay_list traversing threads. See LU-3333 + * This is a bandaid at best, we really need to deal with this + * in request enlarging code before unpacking that's already + * there */ + if (req->rq_import) + spin_lock(&req->rq_import->imp_lock); + + memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len); + + OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len); + req->rq_reqbuf = newbuf; + req->rq_reqbuf_len = newbuf_size; + req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, 0); + + if (req->rq_import) + spin_unlock(&req->rq_import->imp_lock); + } /* do enlargement, from wrapper to embedded, from end to begin */ if (svc != SPTLRPC_SVC_NULL) @@ -1908,7 +1921,7 @@ int gss_svc_sign(struct ptlrpc_request *req, LASSERT(rs->rs_msg == lustre_msg_buf(rs->rs_repbuf, 1, 0)); - /* embedded lustre_msg might have been shrinked */ + /* embedded lustre_msg might have been shrunk */ if (req->rq_replen != rs->rs_repbuf->lm_buflens[1]) lustre_shrink_msg(rs->rs_repbuf, 1, req->rq_replen, 1); @@ -2635,7 +2648,7 @@ static int gss_svc_seal(struct ptlrpc_request *req, ENTRY; /* get clear data length. note embedded lustre_msg might - * have been shrinked */ + * have been shrunk */ if (req->rq_replen != lustre_msg_buflen(rs->rs_repbuf, 0)) msglen = lustre_shrink_msg(rs->rs_repbuf, 0, req->rq_replen, 1); else @@ -2805,7 +2818,7 @@ int gss_copy_rvc_cli_ctx(struct ptlrpc_cli_ctx *cli_ctx, * replay. * * each reverse root ctx will record its latest sequence number on its - * buddy svcctx before be destroied, so here we continue use it. + * buddy svcctx before be destroyed, so here we continue use it. */ atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq); @@ -2884,7 +2897,7 @@ int __init sptlrpc_gss_init(void) if (rc) goto out_kerberos; - /* register policy after all other stuff be intialized, because it + /* register policy after all other stuff be initialized, because it * might be in used immediately after the registration. */ rc = gss_init_keyring();