sizeof(*repbody));
memcpy(&repbody->oa, &body->oa, sizeof(body->oa));
oti->oti_logcookies = &repbody->oa.o_lcookie;
+
req->rq_status = obd_create(exp, &repbody->oa, NULL, oti);
//obd_log_cancel(conn, NULL, 1, oti->oti_logcookies, 0);
RETURN(0);
return LDLM_ITER_CONTINUE;
}
+ CDEBUG(D_DLMTRACE,"refresh lock: "LPU64"/"LPU64" ("LPU64"->"LPU64")\n",
+ lock->l_resource->lr_name.name[0],
+ lock->l_resource->lr_name.name[1],
+ opd->opd_policy.l_extent.start, opd->opd_policy.l_extent.end);
/* OK. this is a possible lock the user holds doing I/O
* let's refresh eviction timer for it */
ldlm_refresh_waiting_lock(lock, opd->opd_timeout);
max(at_est2timeout(at_get(&req->rq_rqbd->
rqbd_service->srv_at_estimate)), ldlm_timeout);
- CDEBUG(D_DLMTRACE,"refresh locks: "LPU64"/"LPU64" ("LPU64"->"LPU64")\n",
+ CDEBUG(D_INFO,"refresh locks: "LPU64"/"LPU64" ("LPU64"->"LPU64")\n",
res_id.name[0], res_id.name[1], opd.opd_policy.l_extent.start,
opd.opd_policy.l_extent.end);
if (exp->exp_failed)
rc = -ENOTCONN;
else {
- sptlrpc_svc_wrap_bulk(req, desc);
-
- rc = ptlrpc_start_bulk_transfer(desc);
+ rc = sptlrpc_svc_wrap_bulk(req, desc);
+ if (rc == 0)
+ rc = ptlrpc_start_bulk_transfer(desc);
}
if (rc == 0) {
desc);
rc = l_wait_event(desc->bd_waitq,
!ptlrpc_server_bulk_active(desc) ||
- exp->exp_failed, &lwi);
+ exp->exp_failed ||
+ exp->exp_abort_active_req,
+ &lwi);
LASSERT(rc == 0 || rc == -ETIMEDOUT);
/* Wait again if we changed deadline */
} while ((rc == -ETIMEDOUT) &&
DEBUG_REQ(D_ERROR, req, "Eviction on bulk PUT");
rc = -ENOTCONN;
ptlrpc_abort_bulk(desc);
+ } else if (exp->exp_abort_active_req) {
+ DEBUG_REQ(D_ERROR, req, "Reconnect on bulk PUT");
+ /* we don't reply anyway */
+ rc = -ETIMEDOUT;
+ ptlrpc_abort_bulk(desc);
} else if (!desc->bd_success ||
desc->bd_nob_transferred != desc->bd_nob) {
DEBUG_REQ(D_ERROR, req, "%s bulk PUT %d(%d)",
body = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF, sizeof(*body));
LASSERT(body != NULL);
+ if ((body->oa.o_flags & OBD_BRW_MEMALLOC) &&
+ (exp->exp_connection->c_peer.nid == exp->exp_connection->c_self))
+ libcfs_memory_pressure_set();
+
objcount = lustre_msg_buflen(req->rq_reqmsg, REQ_REC_OFF + 1) /
sizeof(*ioo);
ioo = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF + 1,
local_nb[i].offset & ~CFS_PAGE_MASK,
local_nb[i].len);
+ rc = sptlrpc_svc_prep_bulk(req, desc);
+ if (rc != 0)
+ GOTO(out_lock, rc);
+
/* Check if client was evicted while we were doing i/o before touching
network */
if (desc->bd_export->exp_failed)
ost_bulk_timeout, desc);
rc = l_wait_event(desc->bd_waitq,
!ptlrpc_server_bulk_active(desc) ||
- desc->bd_export->exp_failed, &lwi);
+ desc->bd_export->exp_failed ||
+ desc->bd_export->exp_abort_active_req,
+ &lwi);
LASSERT(rc == 0 || rc == -ETIMEDOUT);
/* Wait again if we changed deadline */
} while ((rc == -ETIMEDOUT) &&
DEBUG_REQ(D_ERROR, req, "Eviction on bulk GET");
rc = -ENOTCONN;
ptlrpc_abort_bulk(desc);
- } else if (!desc->bd_success ||
- desc->bd_nob_transferred != desc->bd_nob) {
- DEBUG_REQ(D_ERROR, req, "%s bulk GET %d(%d)",
- desc->bd_success ?
- "truncated" : "network error on",
- desc->bd_nob_transferred, desc->bd_nob);
+ } else if (desc->bd_export->exp_abort_active_req) {
+ DEBUG_REQ(D_ERROR, req, "Reconnect on bulk GET");
+ /* we don't reply anyway */
+ rc = -ETIMEDOUT;
+ ptlrpc_abort_bulk(desc);
+ } else if (!desc->bd_success) {
+ DEBUG_REQ(D_ERROR, req, "network error on bulk GET");
/* XXX should this be a different errno? */
rc = -ETIMEDOUT;
+ } else {
+ rc = sptlrpc_svc_unwrap_bulk(req, desc);
}
} else {
DEBUG_REQ(D_ERROR, req, "ptlrpc_bulk_get failed: rc %d", rc);
}
no_reply = rc != 0;
- if (rc == 0)
- sptlrpc_svc_unwrap_bulk(req, desc);
-
repbody = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
sizeof(*repbody));
memcpy(&repbody->oa, &body->oa, sizeof(repbody->oa));
exp->exp_connection->c_remote_uuid.uuid,
libcfs_id2str(req->rq_peer));
}
+ libcfs_memory_pressure_clr();
RETURN(rc);
}
static int ost_set_info(struct obd_export *exp, struct ptlrpc_request *req)
{
+ struct ost_body *body = NULL, *repbody;
+ __u32 size[2] = { sizeof(struct ptlrpc_body), sizeof(*body) };
char *key, *val = NULL;
int keylen, vallen, rc = 0;
ENTRY;
}
keylen = lustre_msg_buflen(req->rq_reqmsg, REQ_REC_OFF);
- rc = lustre_pack_reply(req, 1, NULL, NULL);
- if (rc)
- RETURN(rc);
+ if (KEY_IS(KEY_GRANT_SHRINK)) {
+ rc = lustre_pack_reply(req, 2, size, NULL);
+ if (rc)
+ RETURN(rc);
+ } else {
+ rc = lustre_pack_reply(req, 1, NULL, NULL);
+ if (rc)
+ RETURN(rc);
+ }
vallen = lustre_msg_buflen(req->rq_reqmsg, REQ_REC_OFF + 1);
- if (vallen)
- val = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF + 1, 0);
+ if (vallen) {
+ if (KEY_IS(KEY_GRANT_SHRINK)) {
+ body = lustre_swab_reqbuf(req, REQ_REC_OFF + 1,
+ sizeof(*body),
+ lustre_swab_ost_body);
+ if (!body)
+ RETURN(-EFAULT);
+
+ repbody = lustre_msg_buf(req->rq_repmsg,
+ REPLY_REC_OFF,
+ sizeof(*repbody));
+ memcpy(repbody, body, sizeof(*body));
+ val = (char*)repbody;
+ } else
+ val = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF + 1,0);
+ }
if (KEY_IS(KEY_EVICT_BY_NID)) {
if (val && vallen)
obd_export_evict_by_nid(exp->exp_obd, val);
-
GOTO(out, rc = 0);
+ } else if (KEY_IS(KEY_MDS_CONN) && lustre_msg_swabbed(req->rq_reqmsg)) {
+ /* Val's are not swabbed automatically */
+ __swab32s((__u32 *)val);
}
rc = obd_set_info_async(exp, keylen, key, vallen, val, NULL);
struct sptlrpc_flavor flvr;
int rc = 0;
+ if (unlikely(strcmp(exp->exp_obd->obd_type->typ_name,
+ LUSTRE_ECHO_NAME) == 0)) {
+ exp->exp_flvr.sf_rpc = SPTLRPC_FLVR_ANY;
+ return 0;
+ }
+
if (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_INVALID) {
read_lock(&filter->fo_sptlrpc_lock);
sptlrpc_target_choose_flavor(&filter->fo_sptlrpc_rset,
end = (nb[ioo->ioo_bufcnt - 1].offset +
nb[ioo->ioo_bufcnt - 1].len - 1) | ~CFS_PAGE_MASK;
+ LASSERT(lock->l_resource != NULL);
+ if (!osc_res_name_eq(ioo->ioo_id, ioo->ioo_gr,
+ &lock->l_resource->lr_name))
+ RETURN(0);
+
if (!(lock->l_granted_mode & mode))
RETURN(0);