* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_SEC
-#ifndef __KERNEL__
-#include <liblustre.h>
-#endif
#include <obd_support.h>
#include <obd_cksum.h>
#include <lustre_net.h>
#include <lustre_sec.h>
+#include "ptlrpc_internal.h"
+
struct plain_sec {
struct ptlrpc_sec pls_base;
rwlock_t pls_lock;
return 0;
}
-#ifdef __KERNEL__
static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
{
char *ptr;
unsigned int off, i;
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+
for (i = 0; i < desc->bd_iov_count; i++) {
- if (desc->bd_iov[i].kiov_len == 0)
+ if (BD_GET_KIOV(desc, i).kiov_len == 0)
continue;
- ptr = kmap(desc->bd_iov[i].kiov_page);
- off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
+ ptr = kmap(BD_GET_KIOV(desc, i).kiov_page);
+ off = BD_GET_KIOV(desc, i).kiov_offset & ~PAGE_MASK;
ptr[off] ^= 0x1;
- kunmap(desc->bd_iov[i].kiov_page);
+ kunmap(BD_GET_KIOV(desc, i).kiov_page);
return;
}
}
-#else
-static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
-{
- unsigned int i;
-
- for (i = 0; i < desc->bd_iov_count; i++) {
- if (desc->bd_iov[i].iov_len == 0)
- continue;
-
- ((char *)desc->bd_iov[i].iov_base)[i] ^= 0x1;
- return;
- }
-}
-#endif /* __KERNEL__ */
/****************************************
* cli_ctx apis *
struct ptlrpc_bulk_sec_desc *bsdv;
struct plain_bulk_token *tokenv;
int rc;
-#ifdef __KERNEL__
int i, nob;
-#endif
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
LASSERT(req->rq_pack_bulk);
LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
return 0;
}
-#ifdef __KERNEL__
- /* fix the actual data size */
- for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
- if (desc->bd_iov[i].kiov_len + nob > desc->bd_nob_transferred) {
- desc->bd_iov[i].kiov_len =
- desc->bd_nob_transferred - nob;
- }
- nob += desc->bd_iov[i].kiov_len;
- }
-#endif
+ /* fix the actual data size */
+ for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
+ if (BD_GET_KIOV(desc, i).kiov_len +
+ nob > desc->bd_nob_transferred) {
+ BD_GET_KIOV(desc, i).kiov_len =
+ desc->bd_nob_transferred - nob;
+ }
+ nob += BD_GET_KIOV(desc, i).kiov_len;
+ }
rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
tokenv);
ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
ctx->cc_vcred.vc_uid = 0;
spin_lock_init(&ctx->cc_lock);
- CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
- CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
+ INIT_LIST_HEAD(&ctx->cc_req_list);
+ INIT_LIST_HEAD(&ctx->cc_gc_chain);
plsec->pls_ctx = ctx;
atomic_inc(&plsec->pls_base.ps_nctx);
sec->ps_import = class_import_get(imp);
sec->ps_flvr = *sf;
spin_lock_init(&sec->ps_lock);
- CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
+ INIT_LIST_HEAD(&sec->ps_gc_list);
sec->ps_gc_interval = 0;
sec->ps_gc_next = 0;
if (newbuf == NULL)
RETURN(-ENOMEM);
+ /* Must lock this, so that otherwise unprotected change of
+ * rq_reqmsg is not racing with parallel processing of
+ * imp_replay_list traversing threads. See LU-3333
+ * This is a bandaid at best, we really need to deal with this
+ * in request enlarging code before unpacking that's already
+ * there */
+ if (req->rq_import)
+ spin_lock(&req->rq_import->imp_lock);
+
memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
req->rq_reqbuf_len = newbuf_size;
req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
PLAIN_PACK_MSG_OFF, 0);
+
+ if (req->rq_import)
+ spin_unlock(&req->rq_import->imp_lock);
}
_sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
NULL, 0, (unsigned char *)&msg->lm_cksum, &hsize);
- req->rq_reply_off = 0;
+ req->rq_reply_off = 0;
}
RETURN(0);