Whamcloud - gitweb
LU-6496 ptlrpc: Fix wrong code indentation in plain_authorize
[fs/lustre-release.git] / lustre / ptlrpc / sec_plain.c
index bf02263..9eaa852 100644 (file)
@@ -27,7 +27,7 @@
  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2012, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -40,9 +40,6 @@
 
 #define DEBUG_SUBSYSTEM S_SEC
 
-#ifndef __KERNEL__
-#include <liblustre.h>
-#endif
 
 #include <obd_support.h>
 #include <obd_cksum.h>
@@ -50,6 +47,8 @@
 #include <lustre_net.h>
 #include <lustre_sec.h>
 
+#include "ptlrpc_internal.h"
+
 struct plain_sec {
         struct ptlrpc_sec       pls_base;
        rwlock_t            pls_lock;
@@ -155,7 +154,6 @@ static int plain_verify_bulk_csum(struct ptlrpc_bulk_desc *desc,
         return 0;
 }
 
-#ifdef __KERNEL__
 static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
 {
        char           *ptr;
@@ -166,26 +164,12 @@ static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
                        continue;
 
                ptr = kmap(desc->bd_iov[i].kiov_page);
-               off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
+               off = desc->bd_iov[i].kiov_offset & ~PAGE_MASK;
                ptr[off] ^= 0x1;
                kunmap(desc->bd_iov[i].kiov_page);
                return;
        }
 }
-#else
-static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
-{
-        unsigned int    i;
-
-        for (i = 0; i < desc->bd_iov_count; i++) {
-                if (desc->bd_iov[i].iov_len == 0)
-                        continue;
-
-                ((char *)desc->bd_iov[i].iov_base)[i] ^= 0x1;
-                return;
-        }
-}
-#endif /* __KERNEL__ */
 
 /****************************************
  * cli_ctx apis                         *
@@ -355,9 +339,7 @@ int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
         struct ptlrpc_bulk_sec_desc *bsdv;
         struct plain_bulk_token     *tokenv;
         int                          rc;
-#ifdef __KERNEL__
         int                          i, nob;
-#endif
 
         LASSERT(req->rq_pack_bulk);
         LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
@@ -372,7 +354,6 @@ int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
                 return 0;
         }
 
-#ifdef __KERNEL__
         /* fix the actual data size */
         for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
                 if (desc->bd_iov[i].kiov_len + nob > desc->bd_nob_transferred) {
@@ -381,7 +362,6 @@ int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
                 }
                 nob += desc->bd_iov[i].kiov_len;
         }
-#endif
 
         rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
                                     tokenv);
@@ -420,8 +400,8 @@ struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
                ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
                ctx->cc_vcred.vc_uid = 0;
                spin_lock_init(&ctx->cc_lock);
-               CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
-               CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
+               INIT_LIST_HEAD(&ctx->cc_req_list);
+               INIT_LIST_HEAD(&ctx->cc_gc_chain);
 
                plsec->pls_ctx = ctx;
                atomic_inc(&plsec->pls_base.ps_nctx);
@@ -489,7 +469,7 @@ struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
        sec->ps_import = class_import_get(imp);
        sec->ps_flvr = *sf;
        spin_lock_init(&sec->ps_lock);
-        CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
+       INIT_LIST_HEAD(&sec->ps_gc_list);
         sec->ps_gc_interval = 0;
         sec->ps_gc_next = 0;
 
@@ -705,6 +685,15 @@ int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
                 if (newbuf == NULL)
                         RETURN(-ENOMEM);
 
+               /* Must lock this, so that otherwise unprotected change of
+                * rq_reqmsg is not racing with parallel processing of
+                * imp_replay_list traversing threads. See LU-3333
+                * This is a bandaid at best, we really need to deal with this
+                * in request enlarging code before unpacking that's already
+                * there */
+               if (req->rq_import)
+                       spin_lock(&req->rq_import->imp_lock);
+
                 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
 
                 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
@@ -712,6 +701,9 @@ int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
                 req->rq_reqbuf_len = newbuf_size;
                 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
                                                 PLAIN_PACK_MSG_OFF, 0);
+
+               if (req->rq_import)
+                       spin_unlock(&req->rq_import->imp_lock);
         }
 
         _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
@@ -901,7 +893,7 @@ int plain_authorize(struct ptlrpc_request *req)
                        lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
                        lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
                        NULL, 0, (unsigned char *)&msg->lm_cksum, &hsize);
-                       req->rq_reply_off = 0;
+               req->rq_reply_off = 0;
         }
 
         RETURN(0);