Bugzilla : 22911
Description: Don't enable extents by default for MDT.
+Severity : normal
+Bugzilla : 21877
+Description: Protect bitfield access to ptlrpc_request's rq_flags, since
+ the AT code can access it concurrently while sending early
+ replies.
+
-------------------------------------------------------------------------------
2010-04-30 Oracle, Inc.
/* mark that request is in recovery queue, so request handler will not
* drop rpc count in export, bug 19870*/
LASSERT(!req->rq_copy_queued);
+ spin_lock(&req->rq_lock);
req->rq_copy_queued = 1;
+ spin_unlock(&req->rq_lock);
/* increase refcount to keep request in queue */
atomic_inc(&req->rq_refcount);
/* release service thread while request is queued
* space for early reply) */
req->rq_replen = size_round(req->rq_nob_received);
req->rq_nob_received = 0;
+
+ spin_lock(&req->rq_lock);
req->rq_resend = 1;
+ spin_unlock(&req->rq_lock);
RETURN(0);
}
rc = ptl_send_rpc(req, 0);
if (rc) {
DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
+
+ spin_lock(&req->rq_lock);
req->rq_net_err = 1;
+ spin_unlock(&req->rq_lock);
RETURN(rc);
}
RETURN(0);
spin_unlock(&imp->imp_lock);
+ spin_lock(&req->rq_lock);
req->rq_waiting = 0;
+ spin_unlock(&req->rq_lock);
if (req->rq_timedout||req->rq_resend) {
/* This is re-sending anyways,
* let's mark req as resend. */
+ spin_lock(&req->rq_lock);
req->rq_resend = 1;
+ spin_unlock(&req->rq_lock);
if (req->rq_bulk) {
__u64 old_xid;
DEBUG_REQ(D_HA, req, "send failed (%d)",
rc);
force_timer_recalc = 1;
+
+ spin_lock(&req->rq_lock);
req->rq_net_err = 1;
+ spin_unlock(&req->rq_lock);
}
/* need to reset the timeout */
force_timer_recalc = 1;
/* we can have rq_timeout on dlm fake import which not support
* recovery - but me need resend request on this import instead
* of return error */
+ spin_lock(&req->rq_lock);
req->rq_resend = 1;
+ spin_unlock(&req->rq_lock);
goto restart;
}
CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
request->rq_import->imp_obd->obd_name);
/* this prevents us from waiting in ptlrpc_queue_wait */
+ spin_lock(&request->rq_lock);
request->rq_err = 1;
+ spin_unlock(&request->rq_lock);
+
request->rq_status = -ENODEV;
RETURN(-ENODEV);
}
OBD_ALLOC(request->rq_repbuf, request->rq_replen);
if (request->rq_repbuf == NULL) {
/* this prevents us from looping in ptlrpc_queue_wait */
+ spin_lock(&request->rq_lock);
request->rq_err = 1;
+ spin_unlock(&request->rq_lock);
+
request->rq_status = -ENOMEM;
GOTO(cleanup_bulk, rc = -ENOMEM);
}
LASSERT(req->rq_reply_state == NULL);
- if ((flags & LPRFL_EARLY_REPLY) == 0)
+ if ((flags & LPRFL_EARLY_REPLY) == 0) {
+ spin_lock(&req->rq_lock);
req->rq_packed_final = 1;
+ spin_unlock(&req->rq_lock);
+ }
msg_len = lustre_msg_size_v1(count, lens);
size = sizeof(struct ptlrpc_reply_state) + msg_len;
LASSERT(req->rq_reply_state == NULL);
- if ((flags & LPRFL_EARLY_REPLY) == 0)
+ if ((flags & LPRFL_EARLY_REPLY) == 0) {
+ spin_lock(&req->rq_lock);
req->rq_packed_final = 1;
+ spin_unlock(&req->rq_lock);
+ }
/* use the same size of ptlrpc_body as client requested for
* interoperability cases */
struct ptlrpc_at_array *array = &svc->srv_at_array;
__u32 index = req->rq_at_index;
+ spin_lock(&req->rq_lock);
req->rq_at_linked = 0;
+ spin_unlock(&req->rq_lock);
array->paa_reqs_count[index]--;
array->paa_count--;
}
if (list_empty(&req->rq_timed_list))
list_add(&req->rq_timed_list, &array->paa_reqs_array[index]);
+ spin_lock(&req->rq_lock);
req->rq_at_linked = 1;
+ spin_unlock(&req->rq_lock);
+
req->rq_at_index = index;
array->paa_reqs_count[index]++;
array->paa_count++;
counter++;
array->paa_reqs_count[index]--;
array->paa_count--;
+ spin_lock(&rq->rq_lock);
rq->rq_at_linked = 0;
+ spin_unlock(&rq->rq_lock);
continue;
}