Whamcloud - gitweb
LU-3289 gss: Cleanup gss print statements and comments
[fs/lustre-release.git] / lustre / ptlrpc / gss / sec_gss.c
index 2a32e3c..9dd7898 100644 (file)
@@ -1,10 +1,10 @@
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
  * Modifications for Lustre
  *
  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  *
+ * Copyright (c) 2011, 2014, Intel Corporation.
+ *
  * Author: Eric Mei <ericm@clusterfs.com>
  */
 
  *
  */
 
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
 #define DEBUG_SUBSYSTEM S_SEC
-#ifdef __KERNEL__
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/slab.h>
@@ -58,9 +54,6 @@
 #include <linux/fs.h>
 #include <linux/mutex.h>
 #include <asm/atomic.h>
-#else
-#include <liblustre.h>
-#endif
 
 #include <obd.h>
 #include <obd_class.h>
@@ -76,6 +69,7 @@
 #include "gss_api.h"
 
 #include <linux/crypto.h>
+#include <linux/crc32.h>
 
 /*
  * early reply have fixed size, respectively in privacy and integrity mode.
@@ -296,7 +290,7 @@ __u32 gss_unseal_msg(struct gss_ctx *mechctx,
         /* allocate a temporary clear text buffer, same sized as token,
          * we assume the final clear text size <= token size */
         clear_buflen = lustre_msg_buflen(msgbuf, 1);
-        OBD_ALLOC(clear_buf, clear_buflen);
+        OBD_ALLOC_LARGE(clear_buf, clear_buflen);
         if (!clear_buf)
                 RETURN(GSS_S_FAILURE);
 
@@ -322,7 +316,7 @@ __u32 gss_unseal_msg(struct gss_ctx *mechctx,
 
         major = GSS_S_COMPLETE;
 out_free:
-        OBD_FREE(clear_buf, clear_buflen);
+        OBD_FREE_LARGE(clear_buf, clear_buflen);
         RETURN(major);
 }
 
@@ -332,23 +326,23 @@ out_free:
 
 int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
 {
-        LASSERT(cfs_atomic_read(&ctx->cc_refcount));
+       LASSERT(atomic_read(&ctx->cc_refcount));
 
-        if (!cfs_test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
-                if (!ctx->cc_early_expire)
-                        cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+       if (!test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
+               if (!ctx->cc_early_expire)
+                       clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
 
-                CWARN("ctx %p(%u->%s) get expired: %lu(%+lds)\n",
-                      ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
-                      ctx->cc_expire,
-                      ctx->cc_expire == 0 ? 0 :
-                      cfs_time_sub(ctx->cc_expire, cfs_time_current_sec()));
+               CWARN("ctx %p(%u->%s) get expired: %lu(%+lds)\n",
+                     ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
+                     ctx->cc_expire,
+                     ctx->cc_expire == 0 ? 0 :
+                     cfs_time_sub(ctx->cc_expire, cfs_time_current_sec()));
 
-                sptlrpc_cli_ctx_wakeup(ctx);
-                return 1;
-        }
+               sptlrpc_cli_ctx_wakeup(ctx);
+               return 1;
+       }
 
-        return 0;
+       return 0;
 }
 
 /*
@@ -389,26 +383,28 @@ void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx)
         /* At this point this ctx might have been marked as dead by
          * someone else, in which case nobody will make further use
          * of it. we don't care, and mark it UPTODATE will help
-         * destroying server side context when it be destroied. */
-        cfs_set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
-
-        if (sec_is_reverse(ctx->cc_sec)) {
-                CWARN("server installed reverse ctx %p idx "LPX64", "
-                      "expiry %lu(%+lds)\n", ctx,
-                      gss_handle_to_u64(&gctx->gc_handle),
-                      ctx->cc_expire, ctx->cc_expire - cfs_time_current_sec());
+         * destroying server side context when it be destroyed. */
+       set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+
+       if (sec_is_reverse(ctx->cc_sec)) {
+               CWARN("server installed reverse ctx %p idx "LPX64", "
+                     "expiry %lu(%+lds)\n", ctx,
+                     gss_handle_to_u64(&gctx->gc_handle),
+                     ctx->cc_expire,
+                     cfs_time_sub(ctx->cc_expire, cfs_time_current_sec()));
         } else {
-                CWARN("client refreshed ctx %p idx "LPX64" (%u->%s), "
-                      "expiry %lu(%+lds)\n", ctx,
-                      gss_handle_to_u64(&gctx->gc_handle),
-                      ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
-                      ctx->cc_expire, ctx->cc_expire - cfs_time_current_sec());
-
-                /* install reverse svc ctx for root context */
-                if (ctx->cc_vcred.vc_uid == 0)
-                        gss_sec_install_rctx(ctx->cc_sec->ps_import,
-                                             ctx->cc_sec, ctx);
-        }
+               CWARN("client refreshed ctx %p idx "LPX64" (%u->%s), "
+                     "expiry %lu(%+lds)\n", ctx,
+                     gss_handle_to_u64(&gctx->gc_handle),
+                     ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
+                     ctx->cc_expire,
+                     cfs_time_sub(ctx->cc_expire, cfs_time_current_sec()));
+
+               /* install reverse svc ctx for root context */
+               if (ctx->cc_vcred.vc_uid == 0)
+                       gss_sec_install_rctx(ctx->cc_sec->ps_import,
+                                            ctx->cc_sec, ctx);
+       }
 
         sptlrpc_cli_ctx_wakeup(ctx);
 }
@@ -434,43 +430,42 @@ static void gss_cli_ctx_finalize(struct gss_cli_ctx *gctx)
         rawobj_free(&gctx->gc_handle);
 }
 
-/*
+/**
  * Based on sequence number algorithm as specified in RFC 2203.
  *
- * modified for our own problem: arriving request has valid sequence number,
+ * Modified for our own problem: arriving request has valid sequence number,
  * but unwrapping request might cost a long time, after that its sequence
  * are not valid anymore (fall behind the window). It rarely happen, mostly
  * under extreme load.
  *
- * note we should not check sequence before verify the integrity of incoming
+ * Note we should not check sequence before verifying the integrity of incoming
  * request, because just one attacking request with high sequence number might
- * cause all following request be dropped.
+ * cause all following requests be dropped.
  *
- * so here we use a multi-phase approach: prepare 2 sequence windows,
+ * So here we use a multi-phase approach: prepare 2 sequence windows,
  * "main window" for normal sequence and "back window" for fall behind sequence.
  * and 3-phase checking mechanism:
- *  0 - before integrity verification, perform a initial sequence checking in
- *      main window, which only try and don't actually set any bits. if the
- *      sequence is high above the window or fit in the window and the bit
+ *  0 - before integrity verification, perform an initial sequence checking in
+ *      main window, which only tries and doesn't actually set any bits. if the
+ *      sequence is high above the window or fits in the window and the bit
  *      is 0, then accept and proceed to integrity verification. otherwise
  *      reject this sequence.
  *  1 - after integrity verification, check in main window again. if this
- *      sequence is high above the window or fit in the window and the bit
- *      is 0, then set the bit and accept; if it fit in the window but bit
- *      already set, then reject; if it fall behind the window, then proceed
+ *      sequence is high above the window or fits in the window and the bit
+ *      is 0, then set the bit and accept; if it fits in the window but bit
+ *      already set, then reject; if it falls behind the window, then proceed
  *      to phase 2.
- *  2 - check in back window. if it is high above the window or fit in the
+ *  2 - check in back window. if it is high above the window or fits in the
  *      window and the bit is 0, then set the bit and accept. otherwise reject.
  *
- * return value:
- *   1: looks like a replay
- *   0: is ok
- *  -1: is a replay
+ * \return      1:     looks like a replay
+ * \return      0:     is ok
+ * \return     -1:     is a replay
  *
- * note phase 0 is necessary, because otherwise replay attacking request of
+ * Note phase 0 is necessary, because otherwise replay attacking request of
  * sequence which between the 2 windows can't be detected.
  *
- * this mechanism can't totally solve the problem, but could help much less
+ * This mechanism can't totally solve the problem, but could help reduce the
  * number of valid requests be dropped.
  */
 static
@@ -512,7 +507,7 @@ int gss_do_check_seq(unsigned long *window, __u32 win_size, __u32 *max_seq,
                  */
                 switch (phase) {
                 case 0:
-                        if (cfs_test_bit(seq_num % win_size, window))
+                       if (test_bit(seq_num % win_size, window))
                                 goto replay;
                         break;
                 case 1:
@@ -542,9 +537,9 @@ replay:
  */
 int gss_check_seq_num(struct gss_svc_seq_data *ssd, __u32 seq_num, int set)
 {
-        int rc = 0;
+       int rc = 0;
 
-        cfs_spin_lock(&ssd->ssd_lock);
+       spin_lock(&ssd->ssd_lock);
 
         if (set == 0) {
                 /*
@@ -578,8 +573,8 @@ int gss_check_seq_num(struct gss_svc_seq_data *ssd, __u32 seq_num, int set)
                         gss_stat_oos_record_svc(2, 0);
         }
 exit:
-        cfs_spin_unlock(&ssd->ssd_lock);
-        return rc;
+       spin_unlock(&ssd->ssd_lock);
+       return rc;
 }
 
 /***************************************
@@ -626,24 +621,22 @@ int gss_cli_ctx_match(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred)
 
 void gss_cli_ctx_flags2str(unsigned long flags, char *buf, int bufsize)
 {
-        buf[0] = '\0';
-
-        if (flags & PTLRPC_CTX_NEW)
-                strncat(buf, "new,", bufsize);
-        if (flags & PTLRPC_CTX_UPTODATE)
-                strncat(buf, "uptodate,", bufsize);
-        if (flags & PTLRPC_CTX_DEAD)
-                strncat(buf, "dead,", bufsize);
-        if (flags & PTLRPC_CTX_ERROR)
-                strncat(buf, "error,", bufsize);
-        if (flags & PTLRPC_CTX_CACHED)
-                strncat(buf, "cached,", bufsize);
-        if (flags & PTLRPC_CTX_ETERNAL)
-                strncat(buf, "eternal,", bufsize);
-        if (buf[0] == '\0')
-                strncat(buf, "-,", bufsize);
+       buf[0] = '\0';
 
-        buf[strlen(buf) - 1] = '\0';
+       if (flags & PTLRPC_CTX_NEW)
+               strlcat(buf, "new,", bufsize);
+       if (flags & PTLRPC_CTX_UPTODATE)
+               strlcat(buf, "uptodate,", bufsize);
+       if (flags & PTLRPC_CTX_DEAD)
+               strlcat(buf, "dead,", bufsize);
+       if (flags & PTLRPC_CTX_ERROR)
+               strlcat(buf, "error,", bufsize);
+       if (flags & PTLRPC_CTX_CACHED)
+               strlcat(buf, "cached,", bufsize);
+       if (flags & PTLRPC_CTX_ETERNAL)
+               strlcat(buf, "eternal,", bufsize);
+       if (buf[0] == '\0')
+               strlcat(buf, "-,", bufsize);
 }
 
 int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx,
@@ -669,33 +662,33 @@ int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx,
                 flags |= LUSTRE_GSS_PACK_USER;
 
 redo:
-        seq = cfs_atomic_inc_return(&gctx->gc_seq);
-
-        rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
-                          ctx->cc_sec->ps_part,
-                          flags, gctx->gc_proc, seq, svc,
-                          &gctx->gc_handle);
-        if (rc < 0)
-                RETURN(rc);
-
-        /* gss_sign_msg() msg might take long time to finish, in which period
-         * more rpcs could be wrapped up and sent out. if we found too many
-         * of them we should repack this rpc, because sent it too late might
-         * lead to the sequence number fall behind the window on server and
-         * be dropped. also applies to gss_cli_ctx_seal().
-         *
-         * Note: null mode dosen't check sequence number. */
-        if (svc != SPTLRPC_SVC_NULL &&
-            cfs_atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
-                int behind = cfs_atomic_read(&gctx->gc_seq) - seq;
-
-                gss_stat_oos_record_cli(behind);
-                CWARN("req %p: %u behind, retry signing\n", req, behind);
-                goto redo;
-        }
-
-        req->rq_reqdata_len = rc;
-        RETURN(0);
+       seq = atomic_inc_return(&gctx->gc_seq);
+
+       rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
+                         ctx->cc_sec->ps_part,
+                         flags, gctx->gc_proc, seq, svc,
+                         &gctx->gc_handle);
+       if (rc < 0)
+               RETURN(rc);
+
+       /* gss_sign_msg() msg might take long time to finish, in which period
+        * more rpcs could be wrapped up and sent out. if we found too many
+        * of them we should repack this rpc, because sent it too late might
+        * lead to the sequence number fall behind the window on server and
+        * be dropped. also applies to gss_cli_ctx_seal().
+        *
+        * Note: null mode doesn't check sequence number. */
+       if (svc != SPTLRPC_SVC_NULL &&
+           atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
+               int behind = atomic_read(&gctx->gc_seq) - seq;
+
+               gss_stat_oos_record_cli(behind);
+               CWARN("req %p: %u behind, retry signing\n", req, behind);
+               goto redo;
+       }
+
+       req->rq_reqdata_len = rc;
+       RETURN(0);
 }
 
 static
@@ -919,7 +912,7 @@ int gss_cli_ctx_seal(struct ptlrpc_cli_ctx *ctx,
                 LASSERT(req->rq_reqbuf != req->rq_clrbuf);
                 LASSERT(req->rq_reqbuf_len >= wiresize);
         } else {
-                OBD_ALLOC(req->rq_reqbuf, wiresize);
+                OBD_ALLOC_LARGE(req->rq_reqbuf, wiresize);
                 if (!req->rq_reqbuf)
                         RETURN(-ENOMEM);
                 req->rq_reqbuf_len = wiresize;
@@ -943,7 +936,7 @@ int gss_cli_ctx_seal(struct ptlrpc_cli_ctx *ctx,
                 ghdr->gh_flags |= LUSTRE_GSS_PACK_USER;
 
 redo:
-        ghdr->gh_seq = cfs_atomic_inc_return(&gctx->gc_seq);
+       ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
 
         /* buffer objects */
         hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
@@ -961,29 +954,29 @@ redo:
         }
         LASSERT(token.len <= buflens[1]);
 
-        /* see explain in gss_cli_ctx_sign() */
-        if (unlikely(cfs_atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
-                     GSS_SEQ_REPACK_THRESHOLD)) {
-                int behind = cfs_atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
+       /* see explain in gss_cli_ctx_sign() */
+       if (unlikely(atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
+                    GSS_SEQ_REPACK_THRESHOLD)) {
+               int behind = atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
 
-                gss_stat_oos_record_cli(behind);
-                CWARN("req %p: %u behind, retry sealing\n", req, behind);
+               gss_stat_oos_record_cli(behind);
+               CWARN("req %p: %u behind, retry sealing\n", req, behind);
 
-                ghdr->gh_seq = cfs_atomic_inc_return(&gctx->gc_seq);
-                goto redo;
-        }
+               ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
+               goto redo;
+       }
 
-        /* now set the final wire data length */
-        req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, 1, token.len,0);
-        RETURN(0);
+       /* now set the final wire data length */
+       req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, 1, token.len,0);
+       RETURN(0);
 
 err_free:
-        if (!req->rq_pool) {
-                OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
-                req->rq_reqbuf = NULL;
-                req->rq_reqbuf_len = 0;
-        }
-        RETURN(rc);
+       if (!req->rq_pool) {
+               OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
+               req->rq_reqbuf = NULL;
+               req->rq_reqbuf_len = 0;
+       }
+       RETURN(rc);
 }
 
 int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx,
@@ -1120,19 +1113,19 @@ int gss_sec_create_common(struct gss_sec *gsec,
                 return -EOPNOTSUPP;
         }
 
-        cfs_spin_lock_init(&gsec->gs_lock);
+       spin_lock_init(&gsec->gs_lock);
         gsec->gs_rvs_hdl = 0ULL;
 
-        /* initialize upper ptlrpc_sec */
-        sec = &gsec->gs_base;
-        sec->ps_policy = policy;
-        cfs_atomic_set(&sec->ps_refcount, 0);
-        cfs_atomic_set(&sec->ps_nctx, 0);
-        sec->ps_id = sptlrpc_get_next_secid();
-        sec->ps_flvr = *sf;
-        sec->ps_import = class_import_get(imp);
-        cfs_spin_lock_init(&sec->ps_lock);
-        CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
+       /* initialize upper ptlrpc_sec */
+       sec = &gsec->gs_base;
+       sec->ps_policy = policy;
+       atomic_set(&sec->ps_refcount, 0);
+       atomic_set(&sec->ps_nctx, 0);
+       sec->ps_id = sptlrpc_get_next_secid();
+       sec->ps_flvr = *sf;
+       sec->ps_import = class_import_get(imp);
+       spin_lock_init(&sec->ps_lock);
+       INIT_LIST_HEAD(&sec->ps_gc_list);
 
         if (!svcctx) {
                 sec->ps_gc_interval = GSS_GC_INTERVAL;
@@ -1153,29 +1146,29 @@ int gss_sec_create_common(struct gss_sec *gsec,
 
 void gss_sec_destroy_common(struct gss_sec *gsec)
 {
-        struct ptlrpc_sec      *sec = &gsec->gs_base;
-        ENTRY;
+       struct ptlrpc_sec       *sec = &gsec->gs_base;
+       ENTRY;
 
-        LASSERT(sec->ps_import);
-        LASSERT(cfs_atomic_read(&sec->ps_refcount) == 0);
-        LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
+       LASSERT(sec->ps_import);
+       LASSERT(atomic_read(&sec->ps_refcount) == 0);
+       LASSERT(atomic_read(&sec->ps_nctx) == 0);
 
-        if (gsec->gs_mech) {
-                lgss_mech_put(gsec->gs_mech);
-                gsec->gs_mech = NULL;
-        }
+       if (gsec->gs_mech) {
+               lgss_mech_put(gsec->gs_mech);
+               gsec->gs_mech = NULL;
+       }
 
-        class_import_put(sec->ps_import);
+       class_import_put(sec->ps_import);
 
-        if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
-                sptlrpc_enc_pool_del_user();
+       if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
+               sptlrpc_enc_pool_del_user();
 
-        EXIT;
+       EXIT;
 }
 
 void gss_sec_kill(struct ptlrpc_sec *sec)
 {
-        sec->ps_dying = 1;
+       sec->ps_dying = 1;
 }
 
 int gss_cli_ctx_init_common(struct ptlrpc_sec *sec,
@@ -1183,31 +1176,31 @@ int gss_cli_ctx_init_common(struct ptlrpc_sec *sec,
                             struct ptlrpc_ctx_ops *ctxops,
                             struct vfs_cred *vcred)
 {
-        struct gss_cli_ctx    *gctx = ctx2gctx(ctx);
-
-        gctx->gc_win = 0;
-        cfs_atomic_set(&gctx->gc_seq, 0);
-
-        CFS_INIT_HLIST_NODE(&ctx->cc_cache);
-        cfs_atomic_set(&ctx->cc_refcount, 0);
-        ctx->cc_sec = sec;
-        ctx->cc_ops = ctxops;
-        ctx->cc_expire = 0;
-        ctx->cc_flags = PTLRPC_CTX_NEW;
-        ctx->cc_vcred = *vcred;
-        cfs_spin_lock_init(&ctx->cc_lock);
-        CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
-        CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
-
-        /* take a ref on belonging sec, balanced in ctx destroying */
-        cfs_atomic_inc(&sec->ps_refcount);
-        /* statistic only */
-        cfs_atomic_inc(&sec->ps_nctx);
-
-        CDEBUG(D_SEC, "%s@%p: create ctx %p(%u->%s)\n",
-               sec->ps_policy->sp_name, ctx->cc_sec,
-               ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
-        return 0;
+       struct gss_cli_ctx      *gctx = ctx2gctx(ctx);
+
+       gctx->gc_win = 0;
+       atomic_set(&gctx->gc_seq, 0);
+
+       INIT_HLIST_NODE(&ctx->cc_cache);
+       atomic_set(&ctx->cc_refcount, 0);
+       ctx->cc_sec = sec;
+       ctx->cc_ops = ctxops;
+       ctx->cc_expire = 0;
+       ctx->cc_flags = PTLRPC_CTX_NEW;
+       ctx->cc_vcred = *vcred;
+       spin_lock_init(&ctx->cc_lock);
+       INIT_LIST_HEAD(&ctx->cc_req_list);
+       INIT_LIST_HEAD(&ctx->cc_gc_chain);
+
+       /* take a ref on belonging sec, balanced in ctx destroying */
+       atomic_inc(&sec->ps_refcount);
+       /* statistic only */
+       atomic_inc(&sec->ps_nctx);
+
+       CDEBUG(D_SEC, "%s@%p: create ctx %p(%u->%s)\n",
+              sec->ps_policy->sp_name, ctx->cc_sec,
+              ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+       return 0;
 }
 
 /*
@@ -1218,44 +1211,44 @@ int gss_cli_ctx_init_common(struct ptlrpc_sec *sec,
 int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec,
                             struct ptlrpc_cli_ctx *ctx)
 {
-        struct gss_cli_ctx *gctx = ctx2gctx(ctx);
+       struct gss_cli_ctx *gctx = ctx2gctx(ctx);
 
-        LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0);
-        LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
-        LASSERT(ctx->cc_sec == sec);
+       LASSERT(atomic_read(&sec->ps_nctx) > 0);
+       LASSERT(atomic_read(&ctx->cc_refcount) == 0);
+       LASSERT(ctx->cc_sec == sec);
 
-        /*
-         * remove UPTODATE flag of reverse ctx thus we won't send fini rpc,
-         * this is to avoid potential problems of client side reverse svc ctx
-         * be mis-destroyed in various recovery senarios. anyway client can
-         * manage its reverse ctx well by associating it with its buddy ctx.
-         */
-        if (sec_is_reverse(sec))
-                ctx->cc_flags &= ~PTLRPC_CTX_UPTODATE;
+       /*
+        * remove UPTODATE flag of reverse ctx thus we won't send fini rpc,
+        * this is to avoid potential problems of client side reverse svc ctx
+        * be mis-destroyed in various recovery senarios. anyway client can
+        * manage its reverse ctx well by associating it with its buddy ctx.
+        */
+       if (sec_is_reverse(sec))
+               ctx->cc_flags &= ~PTLRPC_CTX_UPTODATE;
 
-        if (gctx->gc_mechctx) {
-                /* the final context fini rpc will use this ctx too, and it's
-                 * asynchronous which finished by request_out_callback(). so
-                 * we add refcount, whoever drop finally drop the refcount to
-                 * 0 should responsible for the rest of destroy. */
-                cfs_atomic_inc(&ctx->cc_refcount);
+       if (gctx->gc_mechctx) {
+               /* the final context fini rpc will use this ctx too, and it's
+                * asynchronous which finished by request_out_callback(). so
+                * we add refcount, whoever drop finally drop the refcount to
+                * 0 should responsible for the rest of destroy. */
+               atomic_inc(&ctx->cc_refcount);
 
-                gss_do_ctx_fini_rpc(gctx);
-                gss_cli_ctx_finalize(gctx);
+               gss_do_ctx_fini_rpc(gctx);
+               gss_cli_ctx_finalize(gctx);
 
-                if (!cfs_atomic_dec_and_test(&ctx->cc_refcount))
-                        return 1;
-        }
+               if (!atomic_dec_and_test(&ctx->cc_refcount))
+                       return 1;
+       }
 
-        if (sec_is_reverse(sec))
-                CWARN("reverse sec %p: destroy ctx %p\n",
-                      ctx->cc_sec, ctx);
-        else
-                CWARN("%s@%p: destroy ctx %p(%u->%s)\n",
-                      sec->ps_policy->sp_name, ctx->cc_sec,
-                      ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+       if (sec_is_reverse(sec))
+               CWARN("reverse sec %p: destroy ctx %p\n",
+                     ctx->cc_sec, ctx);
+       else
+               CWARN("%s@%p: destroy ctx %p(%u->%s)\n",
+                     sec->ps_policy->sp_name, ctx->cc_sec,
+                     ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
 
-        return 0;
+       return 0;
 }
 
 static
@@ -1316,7 +1309,7 @@ int gss_alloc_reqbuf_intg(struct ptlrpc_sec *sec,
         if (!req->rq_reqbuf) {
                 bufsize = size_roundup_power2(bufsize);
 
-                OBD_ALLOC(req->rq_reqbuf, bufsize);
+                OBD_ALLOC_LARGE(req->rq_reqbuf, bufsize);
                 if (!req->rq_reqbuf)
                         RETURN(-ENOMEM);
 
@@ -1403,7 +1396,7 @@ int gss_alloc_reqbuf_priv(struct ptlrpc_sec *sec,
         if (!req->rq_clrbuf) {
                 clearsize = size_roundup_power2(clearsize);
 
-                OBD_ALLOC(req->rq_clrbuf, clearsize);
+                OBD_ALLOC_LARGE(req->rq_clrbuf, clearsize);
                 if (!req->rq_clrbuf)
                         RETURN(-ENOMEM);
         }
@@ -1464,7 +1457,7 @@ void gss_free_reqbuf(struct ptlrpc_sec *sec,
             req->rq_clrbuf < req->rq_reqbuf ||
             (char *) req->rq_clrbuf >=
             (char *) req->rq_reqbuf + req->rq_reqbuf_len)
-                OBD_FREE(req->rq_clrbuf, req->rq_clrbuf_len);
+                OBD_FREE_LARGE(req->rq_clrbuf, req->rq_clrbuf_len);
 
         req->rq_clrbuf = NULL;
         req->rq_clrbuf_len = 0;
@@ -1473,7 +1466,7 @@ release_reqbuf:
         if (!req->rq_pool && req->rq_reqbuf) {
                 LASSERT(req->rq_reqbuf_len);
 
-                OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
+                OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
                 req->rq_reqbuf = NULL;
                 req->rq_reqbuf_len = 0;
         }
@@ -1485,7 +1478,7 @@ static int do_alloc_repbuf(struct ptlrpc_request *req, int bufsize)
 {
         bufsize = size_roundup_power2(bufsize);
 
-        OBD_ALLOC(req->rq_repbuf, bufsize);
+        OBD_ALLOC_LARGE(req->rq_repbuf, bufsize);
         if (!req->rq_repbuf)
                 return -ENOMEM;
 
@@ -1605,7 +1598,7 @@ int gss_alloc_repbuf(struct ptlrpc_sec *sec,
 void gss_free_repbuf(struct ptlrpc_sec *sec,
                      struct ptlrpc_request *req)
 {
-        OBD_FREE(req->rq_repbuf, req->rq_repbuf_len);
+        OBD_FREE_LARGE(req->rq_repbuf, req->rq_repbuf_len);
         req->rq_repbuf = NULL;
         req->rq_repbuf_len = 0;
         req->rq_repdata = NULL;
@@ -1701,16 +1694,28 @@ int gss_enlarge_reqbuf_intg(struct ptlrpc_sec *sec,
         if (req->rq_reqbuf_len < newbuf_size) {
                 newbuf_size = size_roundup_power2(newbuf_size);
 
-                OBD_ALLOC(newbuf, newbuf_size);
+                OBD_ALLOC_LARGE(newbuf, newbuf_size);
                 if (newbuf == NULL)
                         RETURN(-ENOMEM);
 
+               /* Must lock this, so that otherwise unprotected change of
+                * rq_reqmsg is not racing with parallel processing of
+                * imp_replay_list traversing threads. See LU-3333
+                * This is a bandaid at best, we really need to deal with this
+                * in request enlarging code before unpacking that's already
+                * there */
+               if (req->rq_import)
+                       spin_lock(&req->rq_import->imp_lock);
+
                 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
 
-                OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
+                OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
                 req->rq_reqbuf = newbuf;
                 req->rq_reqbuf_len = newbuf_size;
                 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, 0);
+
+               if (req->rq_import)
+                       spin_unlock(&req->rq_import->imp_lock);
         }
 
         /* do enlargement, from wrapper to embedded, from end to begin */
@@ -1771,6 +1776,8 @@ int gss_enlarge_reqbuf_priv(struct ptlrpc_sec *sec,
                 if (newclrbuf_size + newcipbuf_size <= req->rq_reqbuf_len) {
                         void *src, *dst;
 
+                       if (req->rq_import)
+                               spin_lock(&req->rq_import->imp_lock);
                         /* move clear text backward. */
                         src = req->rq_clrbuf;
                         dst = (char *) req->rq_reqbuf + newcipbuf_size;
@@ -1780,6 +1787,9 @@ int gss_enlarge_reqbuf_priv(struct ptlrpc_sec *sec,
                         req->rq_clrbuf = (struct lustre_msg *) dst;
                         req->rq_clrbuf_len = newclrbuf_size;
                         req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
+
+                       if (req->rq_import)
+                               spin_unlock(&req->rq_import->imp_lock);
                 } else {
                         /* sadly we have to split out the clear buffer */
                         LASSERT(req->rq_reqbuf_len >= newcipbuf_size);
@@ -1790,22 +1800,34 @@ int gss_enlarge_reqbuf_priv(struct ptlrpc_sec *sec,
         if (req->rq_clrbuf_len < newclrbuf_size) {
                 newclrbuf_size = size_roundup_power2(newclrbuf_size);
 
-                OBD_ALLOC(newclrbuf, newclrbuf_size);
+                OBD_ALLOC_LARGE(newclrbuf, newclrbuf_size);
                 if (newclrbuf == NULL)
                         RETURN(-ENOMEM);
 
+               /* Must lock this, so that otherwise unprotected change of
+                * rq_reqmsg is not racing with parallel processing of
+                * imp_replay_list traversing threads. See LU-3333
+                * This is a bandaid at best, we really need to deal with this
+                * in request enlarging code before unpacking that's already
+                * there */
+               if (req->rq_import)
+                       spin_lock(&req->rq_import->imp_lock);
+
                 memcpy(newclrbuf, req->rq_clrbuf, req->rq_clrbuf_len);
 
                 if (req->rq_reqbuf == NULL ||
                     req->rq_clrbuf < req->rq_reqbuf ||
                     (char *) req->rq_clrbuf >=
                     (char *) req->rq_reqbuf + req->rq_reqbuf_len) {
-                        OBD_FREE(req->rq_clrbuf, req->rq_clrbuf_len);
+                        OBD_FREE_LARGE(req->rq_clrbuf, req->rq_clrbuf_len);
                 }
 
                 req->rq_clrbuf = newclrbuf;
                 req->rq_clrbuf_len = newclrbuf_size;
                 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
+
+               if (req->rq_import)
+                       spin_unlock(&req->rq_import->imp_lock);
         }
 
         _sptlrpc_enlarge_msg_inplace(req->rq_clrbuf, 0, newmsg_size);
@@ -1876,17 +1898,17 @@ void gss_svc_reqctx_free(struct gss_svc_reqctx *grctx)
 static inline
 void gss_svc_reqctx_addref(struct gss_svc_reqctx *grctx)
 {
-        LASSERT(cfs_atomic_read(&grctx->src_base.sc_refcount) > 0);
-        cfs_atomic_inc(&grctx->src_base.sc_refcount);
+       LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
+       atomic_inc(&grctx->src_base.sc_refcount);
 }
 
 static inline
 void gss_svc_reqctx_decref(struct gss_svc_reqctx *grctx)
 {
-        LASSERT(cfs_atomic_read(&grctx->src_base.sc_refcount) > 0);
+       LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
 
-        if (cfs_atomic_dec_and_test(&grctx->src_base.sc_refcount))
-                gss_svc_reqctx_free(grctx);
+       if (atomic_dec_and_test(&grctx->src_base.sc_refcount))
+               gss_svc_reqctx_free(grctx);
 }
 
 static
@@ -1901,7 +1923,7 @@ int gss_svc_sign(struct ptlrpc_request *req,
 
         LASSERT(rs->rs_msg == lustre_msg_buf(rs->rs_repbuf, 1, 0));
 
-        /* embedded lustre_msg might have been shrinked */
+        /* embedded lustre_msg might have been shrunk */
         if (req->rq_replen != rs->rs_repbuf->lm_buflens[1])
                 lustre_shrink_msg(rs->rs_repbuf, 1, req->rq_replen, 1);
 
@@ -2056,10 +2078,12 @@ int gss_svc_handle_init(struct ptlrpc_request *req,
         if (rc != SECSVC_OK)
                 RETURN(rc);
 
-        if (grctx->src_ctx->gsc_usr_mds || grctx->src_ctx->gsc_usr_root)
+        if (grctx->src_ctx->gsc_usr_mds || grctx->src_ctx->gsc_usr_oss ||
+            grctx->src_ctx->gsc_usr_root)
                 CWARN("create svc ctx %p: user from %s authenticated as %s\n",
                       grctx->src_ctx, libcfs_nid2str(req->rq_peer.nid),
-                      grctx->src_ctx->gsc_usr_mds ? "mds" : "root");
+                      grctx->src_ctx->gsc_usr_mds ? "mds" :
+                        (grctx->src_ctx->gsc_usr_oss ? "oss" : "root"));
         else
                 CWARN("create svc ctx %p: accept user %u from %s\n",
                       grctx->src_ctx, grctx->src_ctx->gsc_uid,
@@ -2379,10 +2403,10 @@ int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req)
         if (!grctx)
                 RETURN(SECSVC_DROP);
 
-        grctx->src_base.sc_policy = sptlrpc_policy_get(policy);
-        cfs_atomic_set(&grctx->src_base.sc_refcount, 1);
-        req->rq_svc_ctx = &grctx->src_base;
-        gw = &grctx->src_wirectx;
+       grctx->src_base.sc_policy = sptlrpc_policy_get(policy);
+       atomic_set(&grctx->src_base.sc_refcount, 1);
+       req->rq_svc_ctx = &grctx->src_base;
+       gw = &grctx->src_wirectx;
 
         /* save wire context */
         gw->gw_flags = ghdr->gh_flags;
@@ -2419,6 +2443,7 @@ int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req)
                 req->rq_auth_gss = 1;
                 req->rq_auth_remote = grctx->src_ctx->gsc_remote;
                 req->rq_auth_usr_mdt = grctx->src_ctx->gsc_usr_mds;
+                req->rq_auth_usr_ost = grctx->src_ctx->gsc_usr_oss;
                 req->rq_auth_usr_root = grctx->src_ctx->gsc_usr_root;
                 req->rq_auth_uid = grctx->src_ctx->gsc_uid;
                 req->rq_auth_mapped_uid = grctx->src_ctx->gsc_mapped_uid;
@@ -2576,7 +2601,7 @@ int gss_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
                 /* pre-allocated */
                 LASSERT(rs->rs_size >= rs_size);
         } else {
-                OBD_ALLOC(rs, rs_size);
+                OBD_ALLOC_LARGE(rs, rs_size);
                 if (rs == NULL)
                         RETURN(-ENOMEM);
 
@@ -2625,7 +2650,7 @@ static int gss_svc_seal(struct ptlrpc_request *req,
         ENTRY;
 
         /* get clear data length. note embedded lustre_msg might
-         * have been shrinked */
+         * have been shrunk */
         if (req->rq_replen != lustre_msg_buflen(rs->rs_repbuf, 0))
                 msglen = lustre_shrink_msg(rs->rs_repbuf, 0, req->rq_replen, 1);
         else 
@@ -2648,7 +2673,7 @@ static int gss_svc_seal(struct ptlrpc_request *req,
 
         /* allocate temporary cipher buffer */
         token_buflen = gss_mech_payload(gctx->gsc_mechctx, msglen, 1);
-        OBD_ALLOC(token_buf, token_buflen);
+        OBD_ALLOC_LARGE(token_buf, token_buflen);
         if (token_buf == NULL)
                 RETURN(-ENOMEM);
 
@@ -2705,7 +2730,7 @@ static int gss_svc_seal(struct ptlrpc_request *req,
 
         rc = 0;
 out_free:
-        OBD_FREE(token_buf, token_buflen);
+        OBD_FREE_LARGE(token_buf, token_buflen);
         RETURN(rc);
 }
 
@@ -2766,13 +2791,13 @@ void gss_svc_free_rs(struct ptlrpc_reply_state *rs)
         rs->rs_svc_ctx = NULL;
 
         if (!rs->rs_prealloc)
-                OBD_FREE(rs, rs->rs_size);
+                OBD_FREE_LARGE(rs, rs->rs_size);
 }
 
 void gss_svc_free_ctx(struct ptlrpc_svc_ctx *ctx)
 {
-        LASSERT(cfs_atomic_read(&ctx->sc_refcount) == 0);
-        gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx));
+       LASSERT(atomic_read(&ctx->sc_refcount) == 0);
+       gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx));
 }
 
 int gss_copy_rvc_cli_ctx(struct ptlrpc_cli_ctx *cli_ctx,
@@ -2788,21 +2813,21 @@ int gss_copy_rvc_cli_ctx(struct ptlrpc_cli_ctx *cli_ctx,
         cli_gctx->gc_proc = PTLRPC_GSS_PROC_DATA;
         cli_gctx->gc_win = GSS_SEQ_WIN;
 
-        /* The problem is the reverse ctx might get lost in some recovery
-         * situations, and the same svc_ctx will be used to re-create it.
-         * if there's callback be sentout before that, new reverse ctx start
-         * with sequence 0 will lead to future callback rpc be treated as
-         * replay.
-         *
-         * each reverse root ctx will record its latest sequence number on its
-         * buddy svcctx before be destroied, so here we continue use it.
-         */
-        cfs_atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
-
-        if (gss_svc_upcall_dup_handle(&cli_gctx->gc_svc_handle, svc_gctx)) {
-                CERROR("failed to dup svc handle\n");
-                goto err_out;
-        }
+       /* The problem is the reverse ctx might get lost in some recovery
+        * situations, and the same svc_ctx will be used to re-create it.
+        * if there's callback be sentout before that, new reverse ctx start
+        * with sequence 0 will lead to future callback rpc be treated as
+        * replay.
+        *
+        * each reverse root ctx will record its latest sequence number on its
+        * buddy svcctx before be destroyed, so here we continue use it.
+        */
+       atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
+
+       if (gss_svc_upcall_dup_handle(&cli_gctx->gc_svc_handle, svc_gctx)) {
+               CERROR("failed to dup svc handle\n");
+               goto err_out;
+       }
 
         if (lgss_copy_reverse_context(svc_gctx->gsc_mechctx, &mechctx) !=
             GSS_S_COMPLETE) {
@@ -2846,7 +2871,7 @@ static void gss_init_at_reply_offset(void)
         gss_at_reply_off_priv = lustre_msg_size_v2(3, buflens);
 }
 
-int __init sptlrpc_gss_init(void)
+static int __init sptlrpc_gss_init(void)
 {
         int rc;
 
@@ -2862,57 +2887,63 @@ int __init sptlrpc_gss_init(void)
         if (rc)
                 goto out_cli_upcall;
 
-        rc = init_kerberos_module();
-        if (rc)
-                goto out_svc_upcall;
+       rc = init_null_module();
+       if (rc)
+               goto out_svc_upcall;
 
-        /* register policy after all other stuff be intialized, because it
-         * might be in used immediately after the registration. */
+       rc = init_kerberos_module();
+       if (rc)
+               goto out_null;
 
-        rc = gss_init_keyring();
-        if (rc)
-                goto out_kerberos;
+       rc = init_sk_module();
+       if (rc)
+               goto out_kerberos;
 
-#ifdef HAVE_GSS_PIPEFS
-        rc = gss_init_pipefs();
-        if (rc)
-                goto out_keyring;
-#endif
+       /* register policy after all other stuff be initialized, because it
+        * might be in used immediately after the registration. */
 
-        gss_init_at_reply_offset();
+       rc = gss_init_keyring();
+       if (rc)
+               goto out_sk;
 
-        return 0;
+       rc = gss_init_pipefs();
+       if (rc)
+               goto out_keyring;
 
-#ifdef HAVE_GSS_PIPEFS
-out_keyring:
-        gss_exit_keyring();
-#endif
+       gss_init_at_reply_offset();
 
+       return 0;
+
+out_keyring:
+       gss_exit_keyring();
+out_sk:
+       cleanup_sk_module();
 out_kerberos:
-        cleanup_kerberos_module();
+       cleanup_kerberos_module();
+out_null:
+       cleanup_null_module();
 out_svc_upcall:
-        gss_exit_svc_upcall();
+       gss_exit_svc_upcall();
 out_cli_upcall:
-        gss_exit_cli_upcall();
+       gss_exit_cli_upcall();
 out_lproc:
-        gss_exit_lproc();
-        return rc;
+       gss_exit_lproc();
+       return rc;
 }
 
 static void __exit sptlrpc_gss_exit(void)
 {
         gss_exit_keyring();
-#ifdef HAVE_GSS_PIPEFS
         gss_exit_pipefs();
-#endif
         cleanup_kerberos_module();
         gss_exit_svc_upcall();
         gss_exit_cli_upcall();
         gss_exit_lproc();
 }
 
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("GSS security policy for Lustre");
+MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Lustre GSS security policy");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
 MODULE_LICENSE("GPL");
 
 module_init(sptlrpc_gss_init);