* GPL HEADER END
*/
/*
- * Copyright (c) 2011, 2014, Intel Corporation.
+ * Copyright (c) 2012, 2015, Intel Corporation.
*/
/*
* lustre/target/tgt_handler.c
RETURN(h);
}
+static int process_req_last_xid(struct ptlrpc_request *req)
+{
+ __u64 last_xid;
+ ENTRY;
+
+ /* check request's xid is consistent with export's last_xid */
+ last_xid = lustre_msg_get_last_xid(req->rq_reqmsg);
+ if (last_xid > req->rq_export->exp_last_xid)
+ req->rq_export->exp_last_xid = last_xid;
+
+ if (req->rq_xid == 0 ||
+ (req->rq_xid <= req->rq_export->exp_last_xid)) {
+ DEBUG_REQ(D_ERROR, req, "Unexpected xid %llx vs. "
+ "last_xid %llx\n", req->rq_xid,
+ req->rq_export->exp_last_xid);
+ /* Some request is allowed to be sent during replay,
+ * such as OUT update requests, FLD requests, so it
+ * is possible that replay requests has smaller XID
+ * than the exp_last_xid.
+ *
+ * Some non-replay requests may have smaller XID as
+ * well:
+ *
+ * - Client send a no_resend RPC, like statfs;
+ * - The RPC timedout (or some other error) on client,
+ * then it's removed from the unreplied list;
+ * - Client send some other request to bump the
+ * exp_last_xid on server;
+ * - The former RPC got chance to be processed;
+ */
+ if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
+ req->rq_status = -EPROTO;
+ RETURN(ptlrpc_error(req));
+ }
+ }
+
+ /* try to release in-memory reply data */
+ if (tgt_is_multimodrpcs_client(req->rq_export)) {
+ tgt_handle_received_xid(req->rq_export,
+ lustre_msg_get_last_xid(req->rq_reqmsg));
+ if (!(lustre_msg_get_flags(req->rq_reqmsg) &
+ (MSG_RESENT | MSG_REPLAY)))
+ tgt_handle_tag(req->rq_export,
+ lustre_msg_get_tag(req->rq_reqmsg));
+ }
+ RETURN(0);
+}
+
int tgt_request_handle(struct ptlrpc_request *req)
{
struct tgt_session_info *tsi = tgt_ses_info(req->rq_svc_thread->t_env);
struct lu_target *tgt;
int request_fail_id = 0;
__u32 opc = lustre_msg_get_opc(msg);
+ struct obd_device *obd;
int rc;
-
+ bool is_connect = false;
ENTRY;
/* Refill the context, to make sure all thread keys are allocated */
* target, otherwise that should be connect operation */
if (opc == MDS_CONNECT || opc == OST_CONNECT ||
opc == MGS_CONNECT) {
+ is_connect = true;
req_capsule_set(&req->rq_pill, &RQF_CONNECT);
rc = target_handle_connect(req);
if (rc != 0) {
GOTO(out, rc);
}
- /* check request's xid is consistent with export's last_xid */
- if (req->rq_export != NULL) {
- __u64 last_xid = lustre_msg_get_last_xid(req->rq_reqmsg);
- if (last_xid != 0)
- req->rq_export->exp_last_xid = last_xid;
- if (req->rq_xid == 0 ||
- req->rq_xid <= req->rq_export->exp_last_xid) {
- DEBUG_REQ(D_ERROR, req,
- "Unexpected xid %llx vs. last_xid %llx\n",
- req->rq_xid, req->rq_export->exp_last_xid);
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 93, 0)
- LBUG();
-#endif
- req->rq_status = -EPROTO;
- rc = ptlrpc_error(req);
+ /* Skip last_xid processing for the recovery thread, otherwise, the
+ * last_xid on same request could be processed twice: first time when
+ * processing the incoming request, second time when the request is
+ * being processed by recovery thread. */
+ obd = class_exp2obd(req->rq_export);
+ if (is_connect) {
+ /* reset the exp_last_xid on each connection. */
+ req->rq_export->exp_last_xid = 0;
+ } else if (obd->obd_recovery_data.trd_processing_task !=
+ current_pid()) {
+ rc = process_req_last_xid(req);
+ if (rc)
GOTO(out, rc);
- }
}
request_fail_id = tgt->lut_request_fail_id;
tsi->tsi_reply_fail_id = tgt->lut_reply_fail_id;
- /* try to release in-memory reply data */
- if (tgt_is_multimodrpcs_client(req->rq_export)) {
- tgt_handle_received_xid(req->rq_export,
- lustre_msg_get_last_xid(req->rq_reqmsg));
- if (!(lustre_msg_get_flags(req->rq_reqmsg) &
- (MSG_RESENT | MSG_REPLAY)))
- tgt_handle_tag(req->rq_export,
- lustre_msg_get_tag(req->rq_reqmsg));
- }
-
h = tgt_handler_find_check(req);
if (IS_ERR(h)) {
req->rq_status = PTR_ERR(h);
return err_serious(-EOPNOTSUPP);
}
+int tgt_send_buffer(struct tgt_session_info *tsi, struct lu_rdbuf *rdbuf)
+{
+ struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env);
+ struct ptlrpc_request *req = tgt_ses_req(tsi);
+ struct obd_export *exp = req->rq_export;
+ struct ptlrpc_bulk_desc *desc;
+ struct l_wait_info *lwi = &tti->tti_u.update.tti_wait_info;
+ int i;
+ int rc;
+
+ ENTRY;
+
+ desc = ptlrpc_prep_bulk_exp(req, rdbuf->rb_nbufs, 1,
+ PTLRPC_BULK_PUT_SOURCE | PTLRPC_BULK_BUF_KVEC,
+ MDS_BULK_PORTAL, &ptlrpc_bulk_kvec_ops);
+ if (desc == NULL)
+ RETURN(-ENOMEM);
+
+ for (i = 0; i < rdbuf->rb_nbufs; i++)
+ desc->bd_frag_ops->add_iov_frag(desc,
+ rdbuf->rb_bufs[i].lb_buf,
+ rdbuf->rb_bufs[i].lb_len);
+
+ rc = target_bulk_io(exp, desc, lwi);
+ ptlrpc_free_bulk(desc);
+ RETURN(rc);
+}
+EXPORT_SYMBOL(tgt_send_buffer);
+
int tgt_sendpage(struct tgt_session_info *tsi, struct lu_rdpg *rdpg, int nob)
{
struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env);
};
EXPORT_SYMBOL(tgt_llog_handlers);
+/*
+ * sec context handlers
+ */
+/* XXX: Implement based on mdt_sec_ctx_handle()? */
+static int tgt_sec_ctx_handle(struct tgt_session_info *tsi)
+{
+ return 0;
+}
+
+struct tgt_handler tgt_sec_ctx_handlers[] = {
+TGT_SEC_HDL_VAR(0, SEC_CTX_INIT, tgt_sec_ctx_handle),
+TGT_SEC_HDL_VAR(0, SEC_CTX_INIT_CONT, tgt_sec_ctx_handle),
+TGT_SEC_HDL_VAR(0, SEC_CTX_FINI, tgt_sec_ctx_handle),
+};
+EXPORT_SYMBOL(tgt_sec_ctx_handlers);
+
int (*tgt_lfsck_in_notify)(const struct lu_env *env,
struct dt_device *key,
struct lfsck_request *lr,
break;
}
}
+ if (OBD_FAIL_CHECK(OBD_FAIL_OST_READ_SIZE) &&
+ nob != cfs_fail_val)
+ rc = -E2BIG;
if (body->oa.o_valid & OBD_MD_FLCKSUM) {
cksum_type_t cksum_type =