Whamcloud - gitweb
git://git.whamcloud.com
/
fs
/
lustre-release.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
| inline |
side by side
LU-5319 tests: testcases for multiple modify RPCs feature
[fs/lustre-release.git]
/
lustre
/
target
/
tgt_handler.c
diff --git
a/lustre/target/tgt_handler.c
b/lustre/target/tgt_handler.c
index
93ff6cb
..
6b32fca
100644
(file)
--- a/
lustre/target/tgt_handler.c
+++ b/
lustre/target/tgt_handler.c
@@
-374,9
+374,24
@@
static int tgt_handle_request0(struct tgt_session_info *tsi,
{
int serious = 0;
int rc;
+ __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
ENTRY;
+
+ /* When dealing with sec context requests, no export is associated yet,
+ * because these requests are sent before *_CONNECT requests.
+ * A NULL req->rq_export means the normal *_common_slice handlers will
+ * not be called, because there is no reference to the target.
+ * So deal with them by hand and jump directly to target_send_reply().
+ */
+ switch (opc) {
+ case SEC_CTX_INIT:
+ case SEC_CTX_INIT_CONT:
+ case SEC_CTX_FINI:
+ GOTO(out, rc = 0);
+ }
+
/*
* Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
* to put same checks into handlers like mdt_close(), mdt_reint(),
@@
-390,6
+405,9
@@
static int tgt_handle_request0(struct tgt_session_info *tsi,
*/
if (OBD_FAIL_CHECK_ORSET(h->th_fail_id, OBD_FAIL_ONCE))
RETURN(0);
+ if (unlikely(lustre_msg_get_opc(req->rq_reqmsg) == MDS_REINT &&
+ OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_MULTI_NET)))
+ RETURN(0);
rc = tgt_request_preprocess(tsi, h, req);
/* pack reply if reply format is fixed */
@@
-444,6
+462,7
@@
static int tgt_handle_request0(struct tgt_session_info *tsi,
if (likely(rc == 0 && req->rq_export))
target_committed_to_req(req);
+out:
target_send_reply(req, rc, tsi->tsi_reply_fail_id);
RETURN(0);
}
@@
-472,6
+491,9
@@
static int tgt_filter_recovery_request(struct ptlrpc_request *req,
case OST_SETATTR:
case OST_SYNC:
case OST_WRITE:
+ case MDS_HSM_PROGRESS:
+ case MDS_HSM_STATE_SET:
+ case MDS_HSM_REQUEST:
*process = target_queue_recovery_request(req, obd);
RETURN(0);
@@
-507,11
+529,11
@@
static int tgt_handle_recovery(struct ptlrpc_request *req, int reply_fail_id)
/* sanity check: if the xid matches, the request must be marked as a
* resent or replayed */
- if (req_
xid_is_last(req
)) {
+ if (req_
can_reconstruct(req, NULL
)) {
if (!(lustre_msg_get_flags(req->rq_reqmsg) &
(MSG_RESENT | MSG_REPLAY))) {
DEBUG_REQ(D_WARNING, req, "rq_xid "LPU64" matches "
- "
last_
xid, expected REPLAY or RESENT flag "
+ "
saved
xid, expected REPLAY or RESENT flag "
"(%x)", req->rq_xid,
lustre_msg_get_flags(req->rq_reqmsg));
req->rq_status = -ENOTCONN;
@@
-554,6
+576,11
@@
static struct tgt_handler *tgt_handler_find_check(struct ptlrpc_request *req)
ENTRY;
tgt = class_exp2tgt(req->rq_export);
+ if (unlikely(tgt == NULL)) {
+ DEBUG_REQ(D_ERROR, req, "%s: No target for connected export\n",
+ class_exp2obd(req->rq_export)->obd_name);
+ RETURN(ERR_PTR(-EINVAL));
+ }
for (s = tgt->lut_slice; s->tos_hs != NULL; s++)
if (s->tos_opc_start <= opc && opc < s->tos_opc_end)
@@
-613,6
+640,14
@@
int tgt_request_handle(struct ptlrpc_request *req)
}
if (unlikely(!class_connected_export(req->rq_export))) {
+ if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT ||
+ opc == SEC_CTX_FINI) {
+ /* sec context initialization has to be handled
+ * by hand in tgt_handle_request0() */
+ tsi->tsi_reply_fail_id = OBD_FAIL_SEC_CTX_INIT_NET;
+ h = NULL;
+ GOTO(handle_recov, rc = 0);
+ }
CDEBUG(D_HA, "operation %d on unconnected OST from %s\n",
opc, libcfs_id2str(req->rq_peer));
req->rq_status = -ENOTCONN;
@@
-627,9
+662,46
@@
int tgt_request_handle(struct ptlrpc_request *req)
else
tsi->tsi_jobid = NULL;
+ if (tgt == NULL) {
+ DEBUG_REQ(D_ERROR, req, "%s: No target for connected export\n",
+ class_exp2obd(req->rq_export)->obd_name);
+ req->rq_status = -EINVAL;
+ rc = ptlrpc_error(req);
+ GOTO(out, rc);
+ }
+
+ /* check request's xid is consistent with export's last_xid */
+ if (req->rq_export != NULL) {
+ __u64 last_xid = lustre_msg_get_last_xid(req->rq_reqmsg);
+ if (last_xid != 0)
+ req->rq_export->exp_last_xid = last_xid;
+ if (req->rq_xid == 0 ||
+ req->rq_xid <= req->rq_export->exp_last_xid) {
+ DEBUG_REQ(D_ERROR, req,
+ "Unexpected xid %llx vs. last_xid %llx\n",
+ req->rq_xid, req->rq_export->exp_last_xid);
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 93, 0)
+ LBUG();
+#endif
+ req->rq_status = -EPROTO;
+ rc = ptlrpc_error(req);
+ GOTO(out, rc);
+ }
+ }
+
request_fail_id = tgt->lut_request_fail_id;
tsi->tsi_reply_fail_id = tgt->lut_reply_fail_id;
+ /* try to release in-memory reply data */
+ if (tgt_is_multimodrpcs_client(req->rq_export)) {
+ tgt_handle_received_xid(req->rq_export,
+ lustre_msg_get_last_xid(req->rq_reqmsg));
+ if (!(lustre_msg_get_flags(req->rq_reqmsg) &
+ (MSG_RESENT | MSG_REPLAY)))
+ tgt_handle_tag(req->rq_export,
+ lustre_msg_get_tag(req->rq_reqmsg));
+ }
+
h = tgt_handler_find_check(req);
if (IS_ERR(h)) {
req->rq_status = PTR_ERR(h);
@@
-637,6
+709,9
@@
int tgt_request_handle(struct ptlrpc_request *req)
GOTO(out, rc);
}
+ LASSERTF(h->th_opc == opc, "opcode mismatch %d != %d\n",
+ h->th_opc, opc);
+
if (CFS_FAIL_CHECK_ORSET(request_fail_id, CFS_FAIL_ONCE))
GOTO(out, rc = 0);
@@
-650,10
+725,9
@@
int tgt_request_handle(struct ptlrpc_request *req)
GOTO(out, rc);
}
+handle_recov:
rc = tgt_handle_recovery(req, tsi->tsi_reply_fail_id);
if (likely(rc == 1)) {
- LASSERTF(h->th_opc == opc, "opcode mismatch %d != %d\n",
- h->th_opc, opc);
rc = tgt_handle_request0(tsi, h, req);
if (rc)
GOTO(out, rc);
@@
-744,6
+818,12
@@
static int tgt_init_sec_level(struct ptlrpc_request *req)
RETURN(0);
}
+ if (unlikely(tgt == NULL)) {
+ DEBUG_REQ(D_ERROR, req, "%s: No target for connected export\n",
+ class_exp2obd(req->rq_export)->obd_name);
+ RETURN(-EINVAL);
+ }
+
client = libcfs_nid2str(req->rq_peer.nid);
/* no GSS support case */
if (!req->rq_auth_gss) {
@@
-854,6
+934,16
@@
int tgt_connect_check_sptlrpc(struct ptlrpc_request *req, struct obd_export *exp
spin_lock(&exp->exp_lock);
exp->exp_sp_peer = req->rq_sp_from;
exp->exp_flvr = flvr;
+
+ /* when on mgs, if no restriction is set, or if client
+ * is loopback, allow any flavor */
+ if ((strcmp(exp->exp_obd->obd_type->typ_name,
+ LUSTRE_MGS_NAME) == 0) &&
+ (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_NULL ||
+ LNET_NETTYP(LNET_NIDNET(exp->exp_connection->c_peer.nid))
+ == LOLND))
+ exp->exp_flvr.sf_rpc = SPTLRPC_FLVR_ANY;
+
if (exp->exp_flvr.sf_rpc != SPTLRPC_FLVR_ANY &&
exp->exp_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
CERROR("%s: unauthorized rpc flavor %x from %s, "
@@
-884,6
+974,11
@@
int tgt_adapt_sptlrpc_conf(struct lu_target *tgt, int initial)
struct sptlrpc_rule_set tmp_rset;
int rc;
+ if (unlikely(tgt == NULL)) {
+ CERROR("No target passed");
+ return -EINVAL;
+ }
+
sptlrpc_rule_set_init(&tmp_rset);
rc = sptlrpc_conf_target_get_rules(tgt->lut_obd, &tmp_rset, initial);
if (rc) {
@@
-976,11
+1071,6
@@
int tgt_obd_log_cancel(struct tgt_session_info *tsi)
return err_serious(-EOPNOTSUPP);
}
-int tgt_obd_qc_callback(struct tgt_session_info *tsi)
-{
- return err_serious(-EOPNOTSUPP);
-}
-
int tgt_sendpage(struct tgt_session_info *tsi, struct lu_rdpg *rdpg, int nob)
{
struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env);
@@
-995,8
+1085,11
@@
int tgt_sendpage(struct tgt_session_info *tsi, struct lu_rdpg *rdpg, int nob)
ENTRY;
- desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, 1, BULK_PUT_SOURCE,
- MDS_BULK_PORTAL);
+ desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, 1,
+ PTLRPC_BULK_PUT_SOURCE |
+ PTLRPC_BULK_BUF_KIOV,
+ MDS_BULK_PORTAL,
+ &ptlrpc_bulk_kiov_pin_ops);
if (desc == NULL)
RETURN(-ENOMEM);
@@
-1008,12
+1101,13
@@
int tgt_sendpage(struct tgt_session_info *tsi, struct lu_rdpg *rdpg, int nob)
for (i = 0, tmpcount = nob; i < rdpg->rp_npages && tmpcount > 0;
i++, tmpcount -= tmpsize) {
tmpsize = min_t(int, tmpcount, PAGE_CACHE_SIZE);
- ptlrpc_prep_bulk_page_pin(desc, rdpg->rp_pages[i], 0, tmpsize);
+ desc->bd_frag_ops->add_kiov_frag(desc, rdpg->rp_pages[i], 0,
+ tmpsize);
}
LASSERT(desc->bd_nob == nob);
rc = target_bulk_io(exp, desc, lwi);
- ptlrpc_free_bulk
_pin
(desc);
+ ptlrpc_free_bulk(desc);
RETURN(rc);
}
EXPORT_SYMBOL(tgt_sendpage);
@@
-1106,7
+1200,6
@@
out:
struct tgt_handler tgt_obd_handlers[] = {
TGT_OBD_HDL (0, OBD_PING, tgt_obd_ping),
TGT_OBD_HDL_VAR(0, OBD_LOG_CANCEL, tgt_obd_log_cancel),
-TGT_OBD_HDL_VAR(0, OBD_QC_CALLBACK, tgt_obd_qc_callback),
TGT_OBD_HDL (0, OBD_IDX_READ, tgt_obd_idx_read)
};
EXPORT_SYMBOL(tgt_obd_handlers);
@@
-1148,6
+1241,12
@@
static int tgt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
tgt = class_exp2tgt(lock->l_export);
+ if (unlikely(tgt == NULL)) {
+ CDEBUG(D_ERROR, "%s: No target for connected export\n",
+ class_exp2obd(lock->l_export)->obd_name);
+ RETURN(-EINVAL);
+ }
+
if (flag == LDLM_CB_CANCELING &&
(lock->l_granted_mode & (LCK_PW | LCK_GROUP)) &&
(tgt->lut_sync_lock_cancel == ALWAYS_SYNC_ON_CANCEL ||
@@
-1351,22
+1450,6
@@
TGT_LLOG_HDL_VAR(0, LLOG_ORIGIN_HANDLE_CLOSE, tgt_llog_close),
};
EXPORT_SYMBOL(tgt_llog_handlers);
-/*
- * sec context handlers
- */
-/* XXX: Implement based on mdt_sec_ctx_handle()? */
-static int tgt_sec_ctx_handle(struct tgt_session_info *tsi)
-{
- return 0;
-}
-
-struct tgt_handler tgt_sec_ctx_handlers[] = {
-TGT_SEC_HDL_VAR(0, SEC_CTX_INIT, tgt_sec_ctx_handle),
-TGT_SEC_HDL_VAR(0, SEC_CTX_INIT_CONT, tgt_sec_ctx_handle),
-TGT_SEC_HDL_VAR(0, SEC_CTX_FINI, tgt_sec_ctx_handle),
-};
-EXPORT_SYMBOL(tgt_sec_ctx_handlers);
-
int (*tgt_lfsck_in_notify)(const struct lu_env *env,
struct dt_device *key,
struct lfsck_request *lr,
@@
-1576,6
+1659,8
@@
static __u32 tgt_checksum_bulk(struct lu_target *tgt,
unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
__u32 cksum;
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+
hdesc = cfs_crypto_hash_init(cfs_alg, NULL, 0);
if (IS_ERR(hdesc)) {
CERROR("%s: unable to initialize checksum hash %s\n",
@@
-1589,10
+1674,11
@@
static __u32 tgt_checksum_bulk(struct lu_target *tgt,
* simulate a client->OST data error */
if (i == 0 && opc == OST_WRITE &&
OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_RECEIVE)) {
- int off = desc->bd_iov[i].kiov_offset & ~PAGE_MASK;
- int len = desc->bd_iov[i].kiov_len;
+ int off = BD_GET_KIOV(desc, i).kiov_offset &
+ ~PAGE_MASK;
+ int len = BD_GET_KIOV(desc, i).kiov_len;
struct page *np = tgt_page_to_corrupt;
- char *ptr = kmap(
desc->bd_iov[i]
.kiov_page) + off;
+ char *ptr = kmap(
BD_GET_KIOV(desc, i)
.kiov_page) + off;
if (np) {
char *ptr2 = kmap(np) + off;
@@
-1600,24
+1686,28
@@
static __u32 tgt_checksum_bulk(struct lu_target *tgt,
memcpy(ptr2, ptr, len);
memcpy(ptr2, "bad3", min(4, len));
kunmap(np);
-
desc->bd_iov[i]
.kiov_page = np;
+
BD_GET_KIOV(desc, i)
.kiov_page = np;
} else {
CERROR("%s: can't alloc page for corruption\n",
tgt_name(tgt));
}
}
- cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].kiov_page,
- desc->bd_iov[i].kiov_offset & ~PAGE_MASK,
- desc->bd_iov[i].kiov_len);
+ cfs_crypto_hash_update_page(hdesc,
+ BD_GET_KIOV(desc, i).kiov_page,
+ BD_GET_KIOV(desc, i).kiov_offset &
+ ~PAGE_MASK,
+ BD_GET_KIOV(desc, i).kiov_len);
/* corrupt the data after we compute the checksum, to
* simulate an OST->client data error */
if (i == 0 && opc == OST_READ &&
OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_SEND)) {
- int off = desc->bd_iov[i].kiov_offset & ~PAGE_MASK;
- int len = desc->bd_iov[i].kiov_len;
+ int off = BD_GET_KIOV(desc, i).kiov_offset
+ & ~PAGE_MASK;
+ int len = BD_GET_KIOV(desc, i).kiov_len;
struct page *np = tgt_page_to_corrupt;
- char *ptr = kmap(desc->bd_iov[i].kiov_page) + off;
+ char *ptr =
+ kmap(BD_GET_KIOV(desc, i).kiov_page) + off;
if (np) {
char *ptr2 = kmap(np) + off;
@@
-1625,7
+1715,7
@@
static __u32 tgt_checksum_bulk(struct lu_target *tgt,
memcpy(ptr2, ptr, len);
memcpy(ptr2, "bad4", min(4, len));
kunmap(np);
-
desc->bd_iov[i]
.kiov_page = np;
+
BD_GET_KIOV(desc, i)
.kiov_page = np;
} else {
CERROR("%s: can't alloc page for corruption\n",
tgt_name(tgt));
@@
-1723,12
+1813,15
@@
int tgt_brw_read(struct tgt_session_info *tsi)
npages = PTLRPC_MAX_BRW_PAGES;
rc = obd_preprw(tsi->tsi_env, OBD_BRW_READ, exp, &repbody->oa, 1,
- ioo, remote_nb, &npages, local_nb
, NULL
);
+ ioo, remote_nb, &npages, local_nb);
if (rc != 0)
GOTO(out_lock, rc);
desc = ptlrpc_prep_bulk_exp(req, npages, ioobj_max_brw_get(ioo),
- BULK_PUT_SOURCE, OST_BULK_PORTAL);
+ PTLRPC_BULK_PUT_SOURCE |
+ PTLRPC_BULK_BUF_KIOV,
+ OST_BULK_PORTAL,
+ &ptlrpc_bulk_kiov_nopin_ops);
if (desc == NULL)
GOTO(out_commitrw, rc = -ENOMEM);
@@
-1744,9
+1837,10
@@
int tgt_brw_read(struct tgt_session_info *tsi)
nob += page_rc;
if (page_rc != 0) { /* some data! */
LASSERT(local_nb[i].lnb_page != NULL);
- ptlrpc_prep_bulk_page_nopin(desc, local_nb[i].lnb_page,
- local_nb[i].lnb_page_offset,
- page_rc);
+ desc->bd_frag_ops->add_kiov_frag
+ (desc, local_nb[i].lnb_page,
+ local_nb[i].lnb_page_offset,
+ page_rc);
}
if (page_rc != local_nb[i].lnb_len) { /* short read */
@@
-1782,16
+1876,15
@@
int tgt_brw_read(struct tgt_session_info *tsi)
out_commitrw:
/* Must commit after prep above in all cases */
- rc = obd_commitrw(tsi->tsi_env, OBD_BRW_READ, exp,
- &repbody->oa, 1, ioo, remote_nb, npages, local_nb,
- NULL, rc);
+ rc = obd_commitrw(tsi->tsi_env, OBD_BRW_READ, exp, &repbody->oa, 1, ioo,
+ remote_nb, npages, local_nb, rc);
if (rc == 0)
tgt_drop_id(exp, &repbody->oa);
out_lock:
tgt_brw_unlock(ioo, remote_nb, &lockh, LCK_PR);
if (desc && !CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2))
- ptlrpc_free_bulk
_nopin
(desc);
+ ptlrpc_free_bulk(desc);
LASSERT(rc <= 0);
if (rc == 0) {
@@
-1819,7
+1912,7
@@
out_lock:
lwi1 = LWI_TIMEOUT_INTR(cfs_time_seconds(3), NULL, NULL, NULL);
l_wait_event(waitq, 0, &lwi1);
target_bulk_io(exp, desc, &lwi);
- ptlrpc_free_bulk
_nopin
(desc);
+ ptlrpc_free_bulk(desc);
}
RETURN(rc);
@@
-1989,20
+2082,23
@@
int tgt_brw_write(struct tgt_session_info *tsi)
npages = PTLRPC_MAX_BRW_PAGES;
rc = obd_preprw(tsi->tsi_env, OBD_BRW_WRITE, exp, &repbody->oa,
- objcount, ioo, remote_nb, &npages, local_nb
, NULL
);
+ objcount, ioo, remote_nb, &npages, local_nb);
if (rc < 0)
GOTO(out_lock, rc);
desc = ptlrpc_prep_bulk_exp(req, npages, ioobj_max_brw_get(ioo),
- BULK_GET_SINK, OST_BULK_PORTAL);
+ PTLRPC_BULK_GET_SINK | PTLRPC_BULK_BUF_KIOV,
+ OST_BULK_PORTAL,
+ &ptlrpc_bulk_kiov_nopin_ops);
if (desc == NULL)
GOTO(skip_transfer, rc = -ENOMEM);
/* NB Having prepped, we must commit... */
for (i = 0; i < npages; i++)
- ptlrpc_prep_bulk_page_nopin(desc, local_nb[i].lnb_page,
- local_nb[i].lnb_page_offset,
- local_nb[i].lnb_len);
+ desc->bd_frag_ops->add_kiov_frag(desc,
+ local_nb[i].lnb_page,
+ local_nb[i].lnb_page_offset,
+ local_nb[i].lnb_len);
rc = sptlrpc_svc_prep_bulk(req, desc);
if (rc != 0)
@@
-2043,8
+2139,7
@@
skip_transfer:
/* Must commit after prep above in all cases */
rc = obd_commitrw(tsi->tsi_env, OBD_BRW_WRITE, exp, &repbody->oa,
- objcount, ioo, remote_nb, npages, local_nb, NULL,
- rc);
+ objcount, ioo, remote_nb, npages, local_nb, rc);
if (rc == -ENOTCONN)
/* quota acquire process has been given up because
* either the client has been evicted or the client
@@
-2084,7
+2179,7
@@
skip_transfer:
out_lock:
tgt_brw_unlock(ioo, remote_nb, &lockh, LCK_PW);
if (desc)
- ptlrpc_free_bulk
_nopin
(desc);
+ ptlrpc_free_bulk(desc);
out:
if (no_reply) {
req->rq_no_reply = 1;
@@
-2100,3
+2195,44
@@
out:
RETURN(rc);
}
EXPORT_SYMBOL(tgt_brw_write);
+
+/* Check if request can be reconstructed from saved reply data
+ * A copy of the reply data is returned in @trd if the pointer is not NULL
+ */
+bool req_can_reconstruct(struct ptlrpc_request *req,
+ struct tg_reply_data *trd)
+{
+ struct tg_export_data *ted = &req->rq_export->exp_target_data;
+ struct lsd_client_data *lcd = ted->ted_lcd;
+ bool found;
+
+ if (tgt_is_multimodrpcs_client(req->rq_export))
+ return tgt_lookup_reply(req, trd);
+
+ mutex_lock(&ted->ted_lcd_lock);
+ found = req->rq_xid == lcd->lcd_last_xid ||
+ req->rq_xid == lcd->lcd_last_close_xid;
+
+ if (found && trd != NULL) {
+ if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_CLOSE) {
+ trd->trd_reply.lrd_xid = lcd->lcd_last_close_xid;
+ trd->trd_reply.lrd_transno =
+ lcd->lcd_last_close_transno;
+ trd->trd_reply.lrd_result = lcd->lcd_last_close_result;
+ } else {
+ trd->trd_reply.lrd_xid = lcd->lcd_last_xid;
+ trd->trd_reply.lrd_transno = lcd->lcd_last_transno;
+ trd->trd_reply.lrd_result = lcd->lcd_last_result;
+ trd->trd_reply.lrd_data = lcd->lcd_last_data;
+ trd->trd_pre_versions[0] = lcd->lcd_pre_versions[0];
+ trd->trd_pre_versions[1] = lcd->lcd_pre_versions[1];
+ trd->trd_pre_versions[2] = lcd->lcd_pre_versions[2];
+ trd->trd_pre_versions[3] = lcd->lcd_pre_versions[3];
+ }
+ }
+ mutex_unlock(&ted->ted_lcd_lock);
+
+ return found;
+}
+EXPORT_SYMBOL(req_can_reconstruct);
+