* GPL HEADER END
*/
/*
- * Copyright (c) 2013, 2016, Intel Corporation.
+ * Copyright (c) 2013, 2017, Intel Corporation.
*/
/*
* lustre/target/tgt_handler.c
#define DEBUG_SUBSYSTEM S_CLASS
#include <linux/user_namespace.h>
-#ifdef HAVE_UIDGID_HEADER
-# include <linux/uidgid.h>
-#endif
+#include <linux/delay.h>
+#include <linux/uidgid.h>
+#include <libcfs/linux/linux-mem.h>
#include <obd.h>
#include <obd_class.h>
#include <obd_cksum.h>
-#include <md_object.h>
#include <lustre_lfsck.h>
#include <lustre_nodemap.h>
+#include <lustre_acl.h>
#include "tgt_internal.h"
* - create lu_object, corresponding to the fid in mdt_body, and save it in
* @tsi;
*
- * - if HABEO_CORPUS flag is set for this request type check whether object
+ * - if HAS_BODY flag is set for this request type check whether object
* actually exists on storage (lu_object_exists()).
*
*/
&tsi->tsi_tgt->lut_bottom->dd_lu_dev,
&body->mbo_fid1, NULL);
if (!IS_ERR(obj)) {
- if ((flags & HABEO_CORPUS) && !lu_object_exists(obj)) {
+ if ((flags & HAS_BODY) && !lu_object_exists(obj)) {
lu_object_put(tsi->tsi_env, obj);
rc = -ENOENT;
} else {
}
if (!(body->oa.o_valid & OBD_MD_FLID)) {
- if (flags & HABEO_CORPUS) {
- CERROR("%s: OBD_MD_FLID flag is not set in ost_body "
- "but OID/FID is mandatory with HABEO_CORPUS\n",
+ if (flags & HAS_BODY) {
+ CERROR("%s: OBD_MD_FLID flag is not set in ost_body but OID/FID is mandatory with HAS_BODY\n",
tgt_name(tsi->tsi_tgt));
RETURN(-EPROTO);
} else {
LASSERT(h->th_opc == lustre_msg_get_opc(req->rq_reqmsg));
LASSERT(current->journal_info == NULL);
- LASSERT(ergo(flags & (HABEO_CORPUS | HABEO_REFERO),
+ LASSERT(ergo(flags & (HAS_BODY | HAS_REPLY),
h->th_fmt != NULL));
if (h->th_fmt != NULL) {
req_capsule_set(pill, h->th_fmt);
- if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT)) {
+ if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT) &&
+ req_capsule_field_present(pill, &RMF_MDT_BODY,
+ RCL_CLIENT)) {
rc = tgt_mdt_body_unpack(tsi, flags);
if (rc < 0)
RETURN(rc);
} else if (req_capsule_has_field(pill, &RMF_OST_BODY,
+ RCL_CLIENT) &&
+ req_capsule_field_present(pill, &RMF_OST_BODY,
RCL_CLIENT)) {
rc = tgt_ost_body_unpack(tsi, flags);
if (rc < 0)
}
}
- if (flags & MUTABOR && tgt_conn_flags(tsi) & OBD_CONNECT_RDONLY)
+ if (flags & IS_MUTABLE && tgt_conn_flags(tsi) & OBD_CONNECT_RDONLY)
RETURN(-EROFS);
- if (flags & HABEO_CLAVIS) {
+ if (flags & HAS_KEY) {
struct ldlm_request *dlm_req;
LASSERT(h->th_fmt != NULL);
dlm_req = req_capsule_client_get(pill, &RMF_DLM_REQ);
if (dlm_req != NULL) {
+ union ldlm_wire_policy_data *policy =
+ &dlm_req->lock_desc.l_policy_data;
+
if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
LDLM_IBITS &&
- dlm_req->lock_desc.l_policy_data.\
- l_inodebits.bits == 0)) {
+ (policy->l_inodebits.bits |
+ policy->l_inodebits.try_bits) == 0)) {
/*
* Lock without inodebits makes no sense and
* will oops later in ldlm. If client miss to
OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_MULTI_NET)))
RETURN(0);
+ /* drop OUT_UPDATE rpc */
+ if (unlikely(lustre_msg_get_opc(req->rq_reqmsg) == OUT_UPDATE &&
+ OBD_FAIL_CHECK(OBD_FAIL_OUT_UPDATE_DROP)))
+ RETURN(0);
+
rc = tgt_request_preprocess(tsi, h, req);
/* pack reply if reply format is fixed */
- if (rc == 0 && h->th_flags & HABEO_REFERO) {
+ if (rc == 0 && h->th_flags & HAS_REPLY) {
/* Pack reply */
if (req_capsule_has_field(tsi->tsi_pill, &RMF_MDT_MD,
RCL_SERVER))
RCL_SERVER))
req_capsule_set_size(tsi->tsi_pill, &RMF_LOGCOOKIES,
RCL_SERVER, 0);
+ if (req_capsule_has_field(tsi->tsi_pill, &RMF_ACL, RCL_SERVER))
+ req_capsule_set_size(tsi->tsi_pill,
+ &RMF_ACL, RCL_SERVER,
+ LUSTRE_POSIX_ACL_MAX_SIZE_OLD);
+
+ if (req_capsule_has_field(tsi->tsi_pill, &RMF_SHORT_IO,
+ RCL_SERVER)) {
+ struct niobuf_remote *remote_nb =
+ req_capsule_client_get(tsi->tsi_pill,
+ &RMF_NIOBUF_REMOTE);
+ struct ost_body *body = tsi->tsi_ost_body;
+
+ req_capsule_set_size(tsi->tsi_pill, &RMF_SHORT_IO,
+ RCL_SERVER,
+ (body->oa.o_valid & OBD_MD_FLFLAGS &&
+ body->oa.o_flags & OBD_FL_SHORT_IO) ?
+ remote_nb[0].rnb_len : 0);
+ }
rc = req_capsule_server_pack(tsi->tsi_pill);
}
rc = h->th_act(tsi);
if (!is_serious(rc) &&
!req->rq_no_reply && req->rq_reply_state == NULL) {
- DEBUG_REQ(D_ERROR, req, "%s \"handler\" %s did not "
- "pack reply and returned 0 error\n",
+ DEBUG_REQ(D_ERROR, req,
+ "%s: %s handler did not pack reply but returned no error",
tgt_name(tsi->tsi_tgt), h->th_name);
LBUG();
}
case MDS_HSM_PROGRESS:
case MDS_HSM_STATE_SET:
case MDS_HSM_REQUEST:
+ case OST_FALLOCATE:
*process = target_queue_recovery_request(req, obd);
RETURN(0);
/* sanity check: if the xid matches, the request must be marked as a
* resent or replayed */
- if (req_can_reconstruct(req, NULL)) {
+ if (req_can_reconstruct(req, NULL) == 1) {
if (!(lustre_msg_get_flags(req->rq_reqmsg) &
(MSG_RESENT | MSG_REPLAY))) {
- DEBUG_REQ(D_WARNING, req, "rq_xid %llu matches "
- "saved xid, expected REPLAY or RESENT flag "
- "(%x)", req->rq_xid,
+ DEBUG_REQ(D_WARNING, req,
+ "rq_xid=%llu matches saved XID, expected REPLAY or RESENT flag (%x)",
+ req->rq_xid,
lustre_msg_get_flags(req->rq_reqmsg));
req->rq_status = -ENOTCONN;
RETURN(-ENOTCONN);
tgt = class_exp2tgt(req->rq_export);
if (unlikely(tgt == NULL)) {
- DEBUG_REQ(D_ERROR, req, "%s: No target for connected export\n",
+ DEBUG_REQ(D_ERROR, req, "%s: no target for connected export",
class_exp2obd(req->rq_export)->obd_name);
RETURN(ERR_PTR(-EINVAL));
}
/* opcode was not found in slice */
if (unlikely(s->tos_hs == NULL)) {
- CERROR("%s: no handlers for opcode 0x%x\n", tgt_name(tgt),
- opc);
+ static bool printed;
+
+ /* don't spew error messages for unhandled RPCs */
+ if (!printed) {
+ CERROR("%s: no handler for opcode 0x%x from %s\n",
+ tgt_name(tgt), opc, libcfs_id2str(req->rq_peer));
+ printed = true;
+ }
RETURN(ERR_PTR(-ENOTSUPP));
}
static int process_req_last_xid(struct ptlrpc_request *req)
{
__u64 last_xid;
+ int rc = 0;
+ struct obd_export *exp = req->rq_export;
+ struct tg_export_data *ted = &exp->exp_target_data;
+ bool need_lock = tgt_is_multimodrpcs_client(exp);
ENTRY;
+ if (need_lock)
+ mutex_lock(&ted->ted_lcd_lock);
/* check request's xid is consistent with export's last_xid */
last_xid = lustre_msg_get_last_xid(req->rq_reqmsg);
- if (last_xid > req->rq_export->exp_last_xid)
- req->rq_export->exp_last_xid = last_xid;
-
- if (req->rq_xid == 0 ||
- (req->rq_xid <= req->rq_export->exp_last_xid)) {
- DEBUG_REQ(D_ERROR, req, "Unexpected xid %llx vs. "
- "last_xid %llx\n", req->rq_xid,
- req->rq_export->exp_last_xid);
+ if (last_xid > exp->exp_last_xid)
+ exp->exp_last_xid = last_xid;
+
+ if (req->rq_xid == 0 || req->rq_xid <= exp->exp_last_xid) {
/* Some request is allowed to be sent during replay,
* such as OUT update requests, FLD requests, so it
* is possible that replay requests has smaller XID
* - The former RPC got chance to be processed;
*/
if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY))
- RETURN(-EPROTO);
+ rc = -EPROTO;
+
+ DEBUG_REQ(D_WARNING, req,
+ "unexpected xid=%llx != exp_last_xid=%llx, rc = %d",
+ req->rq_xid, exp->exp_last_xid, rc);
+ if (rc)
+ GOTO(out, rc);
+ }
+
+ /* The "last_xid" is the minimum xid among unreplied requests,
+ * if the request is from the previous connection, its xid can
+ * still be larger than "exp_last_xid", then the above check of
+ * xid is not enough to determine whether the request is delayed.
+ *
+ * For example, if some replay request was delayed and caused
+ * timeout at client and the replay is restarted, the delayed
+ * replay request will have the larger xid than "exp_last_xid"
+ */
+ if (req->rq_export->exp_conn_cnt >
+ lustre_msg_get_conn_cnt(req->rq_reqmsg)) {
+ CDEBUG(D_RPCTRACE,
+ "Dropping request %llu from an old epoch %u/%u\n",
+ req->rq_xid,
+ lustre_msg_get_conn_cnt(req->rq_reqmsg),
+ req->rq_export->exp_conn_cnt);
+ req->rq_no_reply = 1;
+ GOTO(out, rc = -ESTALE);
}
/* try to release in-memory reply data */
- if (tgt_is_multimodrpcs_client(req->rq_export)) {
- tgt_handle_received_xid(req->rq_export,
- lustre_msg_get_last_xid(req->rq_reqmsg));
- if (!(lustre_msg_get_flags(req->rq_reqmsg) &
- (MSG_RESENT | MSG_REPLAY)))
- tgt_handle_tag(req->rq_export,
- lustre_msg_get_tag(req->rq_reqmsg));
+ if (tgt_is_multimodrpcs_client(exp)) {
+ tgt_handle_received_xid(exp, last_xid);
+ rc = tgt_handle_tag(req);
}
- RETURN(0);
+
+out:
+ if (need_lock)
+ mutex_unlock(&ted->ted_lcd_lock);
+
+ RETURN(rc);
}
int tgt_request_handle(struct ptlrpc_request *req)
bool is_connect = false;
ENTRY;
- /* Refill the context, to make sure all thread keys are allocated */
- lu_env_refill(req->rq_svc_thread->t_env);
+ if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_TGT_RECOVERY_REQ_RACE))) {
+ if (cfs_fail_val == 0 &&
+ lustre_msg_get_opc(msg) != OBD_PING &&
+ lustre_msg_get_flags(msg) & MSG_REQ_REPLAY_DONE) {
+ cfs_fail_val = 1;
+ cfs_race_state = 0;
+ wait_event_idle(cfs_race_waitq, (cfs_race_state == 1));
+ }
+ }
req_capsule_init(&req->rq_pill, req, RCL_SERVER);
tsi->tsi_pill = &req->rq_pill;
tsi->tsi_jobid = NULL;
if (tgt == NULL) {
- DEBUG_REQ(D_ERROR, req, "%s: No target for connected export\n",
+ DEBUG_REQ(D_ERROR, req, "%s: No target for connected export",
class_exp2obd(req->rq_export)->obd_name);
req->rq_status = -EINVAL;
rc = ptlrpc_error(req);
/* reset the exp_last_xid on each connection. */
req->rq_export->exp_last_xid = 0;
} else if (obd->obd_recovery_data.trd_processing_task !=
- current_pid()) {
+ current->pid) {
rc = process_req_last_xid(req);
if (rc) {
req->rq_status = rc;
LASSERTF(h->th_opc == opc, "opcode mismatch %d != %d\n",
h->th_opc, opc);
- if (CFS_FAIL_CHECK_ORSET(request_fail_id, CFS_FAIL_ONCE))
+ if ((cfs_fail_val == 0 || cfs_fail_val == opc) &&
+ CFS_FAIL_CHECK_ORSET(request_fail_id, CFS_FAIL_ONCE))
GOTO(out, rc = 0);
rc = lustre_msg_check_version(msg, h->th_version);
if (unlikely(rc)) {
- DEBUG_REQ(D_ERROR, req, "%s: drop mal-formed request, version"
- " %08x, expecting %08x\n", tgt_name(tgt),
- lustre_msg_get_version(msg), h->th_version);
+ DEBUG_REQ(D_ERROR, req,
+ "%s: drop malformed request version=%08x expect=%08x",
+ tgt_name(tgt), lustre_msg_get_version(msg),
+ h->th_version);
req->rq_status = -EINVAL;
rc = ptlrpc_error(req);
GOTO(out, rc);
int tgt_connect_check_sptlrpc(struct ptlrpc_request *req, struct obd_export *exp)
{
- struct lu_target *tgt = class_exp2tgt(exp);
- struct sptlrpc_flavor flvr;
- int rc = 0;
+ struct lu_target *tgt = class_exp2tgt(exp);
+ struct sptlrpc_flavor flvr;
+ int rc = 0;
LASSERT(tgt);
LASSERT(tgt->lut_obd);
exp->exp_sp_peer = req->rq_sp_from;
exp->exp_flvr = flvr;
- /* when on mgs, if no restriction is set, or if client
- * is loopback, allow any flavor */
+ /* when on mgs, if no restriction is set, or if the client
+ * NID is on the local node, allow any flavor
+ */
if ((strcmp(exp->exp_obd->obd_type->typ_name,
LUSTRE_MGS_NAME) == 0) &&
(exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_NULL ||
- LNET_NETTYP(LNET_NIDNET(exp->exp_connection->c_peer.nid))
- == LOLND))
+ LNetIsPeerLocal(exp->exp_connection->c_peer.nid)))
exp->exp_flvr.sf_rpc = SPTLRPC_FLVR_ANY;
if (exp->exp_flvr.sf_rpc != SPTLRPC_FLVR_ANY &&
int rc;
if (unlikely(tgt == NULL)) {
- CERROR("No target passed");
+ CERROR("No target passed\n");
return -EINVAL;
}
reply = req_capsule_server_get(tsi->tsi_pill, &RMF_CONNECT_DATA);
spin_lock(&tsi->tsi_exp->exp_lock);
*exp_connect_flags_ptr(tsi->tsi_exp) = reply->ocd_connect_flags;
+ if (reply->ocd_connect_flags & OBD_CONNECT_FLAGS2)
+ *exp_connect_flags2_ptr(tsi->tsi_exp) =
+ reply->ocd_connect_flags2;
tsi->tsi_exp->exp_connect_data.ocd_brw_size = reply->ocd_brw_size;
spin_unlock(&tsi->tsi_exp->exp_lock);
+ if (strcmp(tsi->tsi_exp->exp_obd->obd_type->typ_name,
+ LUSTRE_MDT_NAME) == 0) {
+ rc = req_check_sepol(tsi->tsi_pill);
+ if (rc)
+ GOTO(out, rc);
+
+ if (reply->ocd_connect_flags & OBD_CONNECT_FLAGS2 &&
+ reply->ocd_connect_flags2 & OBD_CONNECT2_ENCRYPT &&
+ tsi->tsi_pill->rc_req->rq_export) {
+ bool forbid_encrypt = true;
+ struct lu_nodemap *nm =
+ nodemap_get_from_exp(tsi->tsi_pill->rc_req->rq_export);
+
+ if (!nm) {
+ /* nodemap_get_from_exp returns NULL in case
+ * nodemap is not active, so we do not forbid
+ */
+ forbid_encrypt = false;
+ } else if (!IS_ERR(nm)) {
+ forbid_encrypt = nm->nmf_forbid_encryption;
+ nodemap_putref(nm);
+ }
+
+ if (forbid_encrypt)
+ GOTO(out, rc = -EACCES);
+ }
+ }
+
RETURN(0);
out:
obd_disconnect(class_export_get(tsi->tsi_exp));
ENTRY;
+ OBD_FAIL_TIMEOUT(OBD_FAIL_OST_DISCONNECT_DELAY, cfs_fail_val);
+
rc = target_handle_disconnect(tgt_ses_req(tsi));
if (rc)
RETURN(err_serious(rc));
ENTRY;
- rc = target_handle_ping(tgt_ses_req(tsi));
+ /* The target-specific part of OBD_PING request handling.
+ * It controls Filter Modification Data (FMD) expiration each time
+ * PING is received.
+ *
+ * Valid only for replayable targets, e.g. MDT and OFD
+ */
+ if (tsi->tsi_exp->exp_obd->obd_replayable)
+ tgt_fmd_expire(tsi->tsi_exp);
+
+ rc = req_capsule_server_pack(tsi->tsi_pill);
if (rc)
RETURN(err_serious(rc));
int tgt_send_buffer(struct tgt_session_info *tsi, struct lu_rdbuf *rdbuf)
{
- struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env);
struct ptlrpc_request *req = tgt_ses_req(tsi);
struct obd_export *exp = req->rq_export;
struct ptlrpc_bulk_desc *desc;
- struct l_wait_info *lwi = &tti->tti_u.update.tti_wait_info;
int i;
int rc;
+ int pages = 0;
ENTRY;
- desc = ptlrpc_prep_bulk_exp(req, rdbuf->rb_nbufs, 1,
- PTLRPC_BULK_PUT_SOURCE | PTLRPC_BULK_BUF_KVEC,
- MDS_BULK_PORTAL, &ptlrpc_bulk_kvec_ops);
+ for (i = 0; i < rdbuf->rb_nbufs; i++) {
+ unsigned int offset;
+
+ offset = (unsigned long)rdbuf->rb_bufs[i].lb_buf & ~PAGE_MASK;
+ pages += DIV_ROUND_UP(rdbuf->rb_bufs[i].lb_len + offset,
+ PAGE_SIZE);
+ }
+
+ desc = ptlrpc_prep_bulk_exp(req, pages, 1,
+ PTLRPC_BULK_PUT_SOURCE,
+ MDS_BULK_PORTAL,
+ &ptlrpc_bulk_kiov_nopin_ops);
if (desc == NULL)
RETURN(-ENOMEM);
rdbuf->rb_bufs[i].lb_buf,
rdbuf->rb_bufs[i].lb_len);
- rc = target_bulk_io(exp, desc, lwi);
+ rc = target_bulk_io(exp, desc);
ptlrpc_free_bulk(desc);
RETURN(rc);
}
int tgt_sendpage(struct tgt_session_info *tsi, struct lu_rdpg *rdpg, int nob)
{
- struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env);
struct ptlrpc_request *req = tgt_ses_req(tsi);
struct obd_export *exp = req->rq_export;
struct ptlrpc_bulk_desc *desc;
- struct l_wait_info *lwi = &tti->tti_u.rdpg.tti_wait_info;
int tmpcount;
int tmpsize;
int i;
ENTRY;
desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, 1,
- PTLRPC_BULK_PUT_SOURCE |
- PTLRPC_BULK_BUF_KIOV,
+ PTLRPC_BULK_PUT_SOURCE,
MDS_BULK_PORTAL,
&ptlrpc_bulk_kiov_pin_ops);
if (desc == NULL)
}
LASSERT(desc->bd_nob == nob);
- rc = target_bulk_io(exp, desc, lwi);
+ rc = target_bulk_io(exp, desc);
ptlrpc_free_bulk(desc);
RETURN(rc);
}
rdpg->rp_npages = (rdpg->rp_count + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* allocate pages to store the containers */
- OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
+ OBD_ALLOC_PTR_ARRAY(rdpg->rp_pages, rdpg->rp_npages);
if (rdpg->rp_pages == NULL)
GOTO(out, rc = -ENOMEM);
for (i = 0; i < rdpg->rp_npages; i++) {
for (i = 0; i < rdpg->rp_npages; i++)
if (rdpg->rp_pages[i])
__free_page(rdpg->rp_pages[i]);
- OBD_FREE(rdpg->rp_pages,
- rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
+ OBD_FREE_PTR_ARRAY(rdpg->rp_pages, rdpg->rp_npages);
}
return rc;
}
struct tgt_handler tgt_obd_handlers[] = {
TGT_OBD_HDL (0, OBD_PING, tgt_obd_ping),
-TGT_OBD_HDL_VAR(0, OBD_LOG_CANCEL, tgt_obd_log_cancel),
TGT_OBD_HDL (0, OBD_IDX_READ, tgt_obd_idx_read)
};
EXPORT_SYMBOL(tgt_obd_handlers);
if (flag == LDLM_CB_CANCELING &&
(lock->l_granted_mode & (LCK_EX | LCK_PW | LCK_GROUP)) &&
- (tgt->lut_sync_lock_cancel == ALWAYS_SYNC_ON_CANCEL ||
- (tgt->lut_sync_lock_cancel == BLOCKING_SYNC_ON_CANCEL &&
+ (tgt->lut_sync_lock_cancel == SYNC_LOCK_CANCEL_ALWAYS ||
+ (tgt->lut_sync_lock_cancel == SYNC_LOCK_CANCEL_BLOCKING &&
ldlm_is_cbpending(lock))) &&
((exp_connect_flags(lock->l_export) & OBD_CONNECT_MDS_MDS) ||
lock->l_resource->lr_type == LDLM_EXTENT)) {
rc = lu_env_init(&env, LCT_DT_THREAD);
if (unlikely(rc != 0))
- RETURN(rc);
+ GOTO(err, rc);
ost_fid_from_resid(&fid, &lock->l_resource->lr_name,
tgt->lut_lsd.lsd_osd_index);
err_env:
lu_env_fini(&env);
}
-
+err:
rc = ldlm_server_blocking_ast(lock, desc, data, flag);
RETURN(rc);
}
/* generic LDLM target handler */
struct tgt_handler tgt_dlm_handlers[] = {
-TGT_DLM_HDL (HABEO_CLAVIS, LDLM_ENQUEUE, tgt_enqueue),
-TGT_DLM_HDL_VAR(HABEO_CLAVIS, LDLM_CONVERT, tgt_convert),
-TGT_DLM_HDL_VAR(0, LDLM_BL_CALLBACK, tgt_bl_callback),
-TGT_DLM_HDL_VAR(0, LDLM_CP_CALLBACK, tgt_cp_callback)
+TGT_DLM_HDL(HAS_KEY, LDLM_ENQUEUE, tgt_enqueue),
+TGT_DLM_HDL(HAS_KEY, LDLM_CONVERT, tgt_convert),
+TGT_DLM_HDL_VAR(0, LDLM_BL_CALLBACK, tgt_bl_callback),
+TGT_DLM_HDL_VAR(0, LDLM_CP_CALLBACK, tgt_cp_callback)
};
EXPORT_SYMBOL(tgt_dlm_handlers);
}
EXPORT_SYMBOL(tgt_llog_open);
-int tgt_llog_close(struct tgt_session_info *tsi)
-{
- int rc;
-
- ENTRY;
-
- rc = llog_origin_handle_close(tgt_ses_req(tsi));
-
- RETURN(rc);
-}
-EXPORT_SYMBOL(tgt_llog_close);
-
-
-int tgt_llog_destroy(struct tgt_session_info *tsi)
-{
- int rc;
-
- ENTRY;
-
- rc = llog_origin_handle_destroy(tgt_ses_req(tsi));
-
- RETURN(rc);
-}
-
int tgt_llog_read_header(struct tgt_session_info *tsi)
{
int rc;
TGT_LLOG_HDL (0, LLOG_ORIGIN_HANDLE_NEXT_BLOCK, tgt_llog_next_block),
TGT_LLOG_HDL (0, LLOG_ORIGIN_HANDLE_READ_HEADER, tgt_llog_read_header),
TGT_LLOG_HDL (0, LLOG_ORIGIN_HANDLE_PREV_BLOCK, tgt_llog_prev_block),
-TGT_LLOG_HDL (0, LLOG_ORIGIN_HANDLE_DESTROY, tgt_llog_destroy),
-TGT_LLOG_HDL_VAR(0, LLOG_ORIGIN_HANDLE_CLOSE, tgt_llog_close),
};
EXPORT_SYMBOL(tgt_llog_handlers);
};
EXPORT_SYMBOL(tgt_sec_ctx_handlers);
+int (*tgt_lfsck_in_notify_local)(const struct lu_env *env,
+ struct dt_device *key,
+ struct lfsck_req_local *lrl,
+ struct thandle *th) = NULL;
+
+void tgt_register_lfsck_in_notify_local(int (*notify)(const struct lu_env *,
+ struct dt_device *,
+ struct lfsck_req_local *,
+ struct thandle *))
+{
+ tgt_lfsck_in_notify_local = notify;
+}
+EXPORT_SYMBOL(tgt_register_lfsck_in_notify_local);
+
int (*tgt_lfsck_in_notify)(const struct lu_env *env,
struct dt_device *key,
- struct lfsck_request *lr,
- struct thandle *th) = NULL;
+ struct lfsck_request *lr) = NULL;
void tgt_register_lfsck_in_notify(int (*notify)(const struct lu_env *,
struct dt_device *,
- struct lfsck_request *,
- struct thandle *))
+ struct lfsck_request *))
{
tgt_lfsck_in_notify = notify;
}
if (lr == NULL)
RETURN(-EPROTO);
- rc = tgt_lfsck_in_notify(env, key, lr, NULL);
+ rc = tgt_lfsck_in_notify(env, key, lr);
RETURN(rc);
}
}
struct tgt_handler tgt_lfsck_handlers[] = {
-TGT_LFSCK_HDL(HABEO_REFERO, LFSCK_NOTIFY, tgt_handle_lfsck_notify),
-TGT_LFSCK_HDL(HABEO_REFERO, LFSCK_QUERY, tgt_handle_lfsck_query),
+TGT_LFSCK_HDL(HAS_REPLY, LFSCK_NOTIFY, tgt_handle_lfsck_notify),
+TGT_LFSCK_HDL(HAS_REPLY, LFSCK_QUERY, tgt_handle_lfsck_query),
};
EXPORT_SYMBOL(tgt_lfsck_handlers);
EXIT;
}
EXPORT_SYMBOL(tgt_io_thread_done);
+
+/**
+ * Helper function for getting Data-on-MDT file server DLM lock
+ * if asked by client.
+ */
+int tgt_mdt_data_lock(struct ldlm_namespace *ns, struct ldlm_res_id *res_id,
+ struct lustre_handle *lh, int mode, __u64 *flags)
+{
+ union ldlm_policy_data policy = {
+ .l_inodebits.bits = MDS_INODELOCK_DOM,
+ };
+ int rc;
+
+ ENTRY;
+
+ LASSERT(lh != NULL);
+ LASSERT(ns != NULL);
+ LASSERT(!lustre_handle_is_used(lh));
+
+ rc = ldlm_cli_enqueue_local(NULL, ns, res_id, LDLM_IBITS, &policy, mode,
+ flags, ldlm_blocking_ast,
+ ldlm_completion_ast, ldlm_glimpse_ast,
+ NULL, 0, LVB_T_NONE, NULL, lh);
+
+ RETURN(rc == ELDLM_OK ? 0 : -EIO);
+}
+EXPORT_SYMBOL(tgt_mdt_data_lock);
+
/**
* Helper function for getting server side [start, start+count] DLM lock
* if asked by client.
*/
-int tgt_extent_lock(struct ldlm_namespace *ns, struct ldlm_res_id *res_id,
- __u64 start, __u64 end, struct lustre_handle *lh,
- int mode, __u64 *flags)
+int tgt_extent_lock(const struct lu_env *env, struct ldlm_namespace *ns,
+ struct ldlm_res_id *res_id, __u64 start, __u64 end,
+ struct lustre_handle *lh, int mode, __u64 *flags)
{
union ldlm_policy_data policy;
int rc;
else
policy.l_extent.end = end | ~PAGE_MASK;
- rc = ldlm_cli_enqueue_local(ns, res_id, LDLM_EXTENT, &policy, mode,
- flags, ldlm_blocking_ast,
+ rc = ldlm_cli_enqueue_local(env, ns, res_id, LDLM_EXTENT, &policy,
+ mode, flags, ldlm_blocking_ast,
ldlm_completion_ast, ldlm_glimpse_ast,
NULL, 0, LVB_T_NONE, NULL, lh);
RETURN(rc == ELDLM_OK ? 0 : -EIO);
}
EXPORT_SYMBOL(tgt_extent_lock);
-void tgt_extent_unlock(struct lustre_handle *lh, enum ldlm_mode mode)
+static int tgt_data_lock(const struct lu_env *env, struct obd_export *exp,
+ struct ldlm_res_id *res_id, __u64 start, __u64 end,
+ struct lustre_handle *lh, enum ldlm_mode mode)
+{
+ struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
+ __u64 flags = 0;
+
+ /* MDT IO for data-on-mdt */
+ if (exp->exp_connect_data.ocd_connect_flags & OBD_CONNECT_IBITS)
+ return tgt_mdt_data_lock(ns, res_id, lh, mode, &flags);
+
+ return tgt_extent_lock(env, ns, res_id, start, end, lh, mode, &flags);
+}
+
+void tgt_data_unlock(struct lustre_handle *lh, enum ldlm_mode mode)
{
LASSERT(lustre_handle_is_used(lh));
ldlm_lock_decref(lh, mode);
}
-EXPORT_SYMBOL(tgt_extent_unlock);
+EXPORT_SYMBOL(tgt_data_unlock);
-int tgt_brw_lock(struct ldlm_namespace *ns, struct ldlm_res_id *res_id,
- struct obd_ioobj *obj, struct niobuf_remote *nb,
- struct lustre_handle *lh, enum ldlm_mode mode)
+static int tgt_brw_lock(const struct lu_env *env, struct obd_export *exp,
+ struct ldlm_res_id *res_id, struct obd_ioobj *obj,
+ struct niobuf_remote *nb, struct lustre_handle *lh,
+ enum ldlm_mode mode)
{
- __u64 flags = 0;
- int nrbufs = obj->ioo_bufcnt;
- int i;
+ int nrbufs = obj->ioo_bufcnt;
+ int i;
ENTRY;
LASSERT(mode == LCK_PR || mode == LCK_PW);
LASSERT(!lustre_handle_is_used(lh));
- if (ns->ns_obd->obd_recovering)
+ if (exp->exp_obd->obd_recovering)
RETURN(0);
if (nrbufs == 0 || !(nb[0].rnb_flags & OBD_BRW_SRVLOCK))
if (!(nb[i].rnb_flags & OBD_BRW_SRVLOCK))
RETURN(-EFAULT);
- RETURN(tgt_extent_lock(ns, res_id, nb[0].rnb_offset,
- nb[nrbufs - 1].rnb_offset +
- nb[nrbufs - 1].rnb_len - 1,
- lh, mode, &flags));
+ return tgt_data_lock(env, exp, res_id, nb[0].rnb_offset,
+ nb[nrbufs - 1].rnb_offset +
+ nb[nrbufs - 1].rnb_len - 1, lh, mode);
}
-void tgt_brw_unlock(struct obd_ioobj *obj, struct niobuf_remote *niob,
- struct lustre_handle *lh, enum ldlm_mode mode)
+static void tgt_brw_unlock(struct obd_export *exp, struct obd_ioobj *obj,
+ struct niobuf_remote *niob,
+ struct lustre_handle *lh, enum ldlm_mode mode)
{
ENTRY;
LASSERT(mode == LCK_PR || mode == LCK_PW);
- LASSERT((obj->ioo_bufcnt > 0 &&
- (niob[0].rnb_flags & OBD_BRW_SRVLOCK)) ==
+ LASSERT((!exp->exp_obd->obd_recovering && obj->ioo_bufcnt &&
+ niob[0].rnb_flags & OBD_BRW_SRVLOCK) ==
lustre_handle_is_used(lh));
if (lustre_handle_is_used(lh))
- tgt_extent_unlock(lh, mode);
+ tgt_data_unlock(lh, mode);
EXIT;
}
-static __u32 tgt_checksum_bulk(struct lu_target *tgt,
- struct ptlrpc_bulk_desc *desc, int opc,
- cksum_type_t cksum_type)
+static int tgt_checksum_niobuf(struct lu_target *tgt,
+ struct niobuf_local *local_nb, int npages,
+ int opc, enum cksum_types cksum_type,
+ __u32 *cksum)
{
- struct cfs_crypto_hash_desc *hdesc;
+ struct ahash_request *req;
unsigned int bufsize;
int i, err;
unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
- __u32 cksum;
-
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
- hdesc = cfs_crypto_hash_init(cfs_alg, NULL, 0);
- if (IS_ERR(hdesc)) {
+ req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
+ if (IS_ERR(req)) {
CERROR("%s: unable to initialize checksum hash %s\n",
tgt_name(tgt), cfs_crypto_hash_name(cfs_alg));
- return PTR_ERR(hdesc);
+ return PTR_ERR(req);
}
CDEBUG(D_INFO, "Checksum for algo %s\n", cfs_crypto_hash_name(cfs_alg));
- for (i = 0; i < desc->bd_iov_count; i++) {
+ for (i = 0; i < npages; i++) {
/* corrupt the data before we compute the checksum, to
* simulate a client->OST data error */
if (i == 0 && opc == OST_WRITE &&
OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_RECEIVE)) {
- int off = BD_GET_KIOV(desc, i).kiov_offset &
- ~PAGE_MASK;
- int len = BD_GET_KIOV(desc, i).kiov_len;
+ int off = local_nb[i].lnb_page_offset & ~PAGE_MASK;
+ int len = local_nb[i].lnb_len;
struct page *np = tgt_page_to_corrupt;
- char *ptr = kmap(BD_GET_KIOV(desc, i).kiov_page) + off;
if (np) {
- char *ptr2 = kmap(np) + off;
+ char *ptr = kmap_atomic(local_nb[i].lnb_page);
+ char *ptr2 = page_address(np);
+
+ memcpy(ptr2 + off, ptr + off, len);
+ memcpy(ptr2 + off, "bad3", min(4, len));
+ kunmap_atomic(ptr);
+
+ /* LU-8376 to preserve original index for
+ * display in dump_all_bulk_pages() */
+ np->index = i;
- memcpy(ptr2, ptr, len);
- memcpy(ptr2, "bad3", min(4, len));
- kunmap(np);
- BD_GET_KIOV(desc, i).kiov_page = np;
+ cfs_crypto_hash_update_page(req, np, off,
+ len);
+ continue;
} else {
CERROR("%s: can't alloc page for corruption\n",
tgt_name(tgt));
}
}
- cfs_crypto_hash_update_page(hdesc,
- BD_GET_KIOV(desc, i).kiov_page,
- BD_GET_KIOV(desc, i).kiov_offset &
- ~PAGE_MASK,
- BD_GET_KIOV(desc, i).kiov_len);
+ cfs_crypto_hash_update_page(req, local_nb[i].lnb_page,
+ local_nb[i].lnb_page_offset & ~PAGE_MASK,
+ local_nb[i].lnb_len);
/* corrupt the data after we compute the checksum, to
* simulate an OST->client data error */
if (i == 0 && opc == OST_READ &&
OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_SEND)) {
- int off = BD_GET_KIOV(desc, i).kiov_offset
- & ~PAGE_MASK;
- int len = BD_GET_KIOV(desc, i).kiov_len;
+ int off = local_nb[i].lnb_page_offset & ~PAGE_MASK;
+ int len = local_nb[i].lnb_len;
+ struct page *np = tgt_page_to_corrupt;
+
+ if (np) {
+ char *ptr = kmap_atomic(local_nb[i].lnb_page);
+ char *ptr2 = page_address(np);
+
+ memcpy(ptr2 + off, ptr + off, len);
+ memcpy(ptr2 + off, "bad4", min(4, len));
+ kunmap_atomic(ptr);
+
+ /* LU-8376 to preserve original index for
+ * display in dump_all_bulk_pages() */
+ np->index = i;
+
+ cfs_crypto_hash_update_page(req, np, off,
+ len);
+ continue;
+ } else {
+ CERROR("%s: can't alloc page for corruption\n",
+ tgt_name(tgt));
+ }
+ }
+ }
+
+ bufsize = sizeof(*cksum);
+ err = cfs_crypto_hash_final(req, (unsigned char *)cksum, &bufsize);
+
+ return 0;
+}
+
+char dbgcksum_file_name[PATH_MAX];
+
+static void dump_all_bulk_pages(struct obdo *oa, int count,
+ struct niobuf_local *local_nb,
+ __u32 server_cksum, __u32 client_cksum)
+{
+ struct file *filp;
+ int rc, i;
+ unsigned int len;
+ char *buf;
+
+ /* will only keep dump of pages on first error for the same range in
+ * file/fid, not during the resends/retries. */
+ snprintf(dbgcksum_file_name, sizeof(dbgcksum_file_name),
+ "%s-checksum_dump-ost-"DFID":[%llu-%llu]-%x-%x",
+ (strncmp(libcfs_debug_file_path, "NONE", 4) != 0 ?
+ libcfs_debug_file_path : LIBCFS_DEBUG_FILE_PATH_DEFAULT),
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
+ local_nb[0].lnb_file_offset,
+ local_nb[count-1].lnb_file_offset +
+ local_nb[count-1].lnb_len - 1, client_cksum, server_cksum);
+ filp = filp_open(dbgcksum_file_name,
+ O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, 0600);
+ if (IS_ERR(filp)) {
+ rc = PTR_ERR(filp);
+ if (rc == -EEXIST)
+ CDEBUG(D_INFO, "%s: can't open to dump pages with "
+ "checksum error: rc = %d\n", dbgcksum_file_name,
+ rc);
+ else
+ CERROR("%s: can't open to dump pages with checksum "
+ "error: rc = %d\n", dbgcksum_file_name, rc);
+ return;
+ }
+
+ for (i = 0; i < count; i++) {
+ len = local_nb[i].lnb_len;
+ buf = kmap(local_nb[i].lnb_page);
+ while (len != 0) {
+ rc = cfs_kernel_write(filp, buf, len, &filp->f_pos);
+ if (rc < 0) {
+ CERROR("%s: wanted to write %u but got %d "
+ "error\n", dbgcksum_file_name, len, rc);
+ break;
+ }
+ len -= rc;
+ buf += rc;
+ CDEBUG(D_INFO, "%s: wrote %d bytes\n",
+ dbgcksum_file_name, rc);
+ }
+ kunmap(local_nb[i].lnb_page);
+ }
+
+ rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
+ if (rc)
+ CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
+ filp_close(filp, NULL);
+}
+
+static int check_read_checksum(struct niobuf_local *local_nb, int npages,
+ struct obd_export *exp, struct obdo *oa,
+ const struct lnet_process_id *peer,
+ __u32 client_cksum, __u32 server_cksum,
+ enum cksum_types server_cksum_type)
+{
+ char *msg;
+ enum cksum_types cksum_type;
+ loff_t start, end;
+
+ /* unlikely to happen and only if resend does not occur due to cksum
+ * control failure on Client */
+ if (unlikely(server_cksum == client_cksum)) {
+ CDEBUG(D_PAGE, "checksum %x confirmed upon retry\n",
+ client_cksum);
+ return 0;
+ }
+
+ if (exp->exp_obd->obd_checksum_dump)
+ dump_all_bulk_pages(oa, npages, local_nb, server_cksum,
+ client_cksum);
+
+ cksum_type = obd_cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
+ oa->o_flags : 0);
+
+ if (cksum_type != server_cksum_type)
+ msg = "the server may have not used the checksum type specified"
+ " in the original request - likely a protocol problem";
+ else
+ msg = "should have changed on the client or in transit";
+
+ start = local_nb[0].lnb_file_offset;
+ end = local_nb[npages-1].lnb_file_offset +
+ local_nb[npages-1].lnb_len - 1;
+
+ LCONSOLE_ERROR_MSG(0x132, "%s: BAD READ CHECKSUM: %s: from %s inode "
+ DFID " object "DOSTID" extent [%llu-%llu], client returned csum"
+ " %x (type %x), server csum %x (type %x)\n",
+ exp->exp_obd->obd_name,
+ msg, libcfs_nid2str(peer->nid),
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : 0ULL,
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
+ POSTID(&oa->o_oi),
+ start, end, client_cksum, cksum_type, server_cksum,
+ server_cksum_type);
+
+ return 1;
+}
+
+static int tgt_pages2shortio(struct niobuf_local *local, int npages,
+ unsigned char *buf, int size)
+{
+ int i, off, len, copied = size;
+ char *ptr;
+
+ for (i = 0; i < npages; i++) {
+ off = local[i].lnb_page_offset & ~PAGE_MASK;
+ len = local[i].lnb_len;
+
+ CDEBUG(D_PAGE, "index %d offset = %d len = %d left = %d\n",
+ i, off, len, size);
+ if (len > size)
+ return -EINVAL;
+
+ ptr = kmap_atomic(local[i].lnb_page);
+ memcpy(buf, ptr + off, len);
+ kunmap_atomic(ptr);
+ buf += len;
+ size -= len;
+ }
+ return copied - size;
+}
+
+static int tgt_checksum_niobuf_t10pi(struct lu_target *tgt,
+ struct niobuf_local *local_nb,
+ int npages, int opc,
+ obd_dif_csum_fn *fn,
+ int sector_size,
+ u32 *check_sum)
+{
+ enum cksum_types t10_cksum_type = tgt->lut_dt_conf.ddp_t10_cksum_type;
+ unsigned char cfs_alg = cksum_obd2cfs(OBD_CKSUM_T10_TOP);
+ const char *obd_name = tgt->lut_obd->obd_name;
+ struct ahash_request *req;
+ unsigned int bufsize;
+ unsigned char *buffer;
+ struct page *__page;
+ __u16 *guard_start;
+ int guard_number;
+ int used_number = 0;
+ __u32 cksum;
+ int rc = 0;
+ int used;
+ int i;
+
+ __page = alloc_page(GFP_KERNEL);
+ if (__page == NULL)
+ return -ENOMEM;
+
+ req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
+ if (IS_ERR(req)) {
+ CERROR("%s: unable to initialize checksum hash %s\n",
+ tgt_name(tgt), cfs_crypto_hash_name(cfs_alg));
+ return PTR_ERR(req);
+ }
+
+ buffer = kmap(__page);
+ guard_start = (__u16 *)buffer;
+ guard_number = PAGE_SIZE / sizeof(*guard_start);
+ for (i = 0; i < npages; i++) {
+ /* corrupt the data before we compute the checksum, to
+ * simulate a client->OST data error */
+ if (i == 0 && opc == OST_WRITE &&
+ OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_RECEIVE)) {
+ int off = local_nb[i].lnb_page_offset & ~PAGE_MASK;
+ int len = local_nb[i].lnb_len;
+ struct page *np = tgt_page_to_corrupt;
+
+ if (np) {
+ char *ptr = kmap_atomic(local_nb[i].lnb_page);
+ char *ptr2 = page_address(np);
+
+ memcpy(ptr2 + off, ptr + off, len);
+ memcpy(ptr2 + off, "bad3", min(4, len));
+ kunmap_atomic(ptr);
+
+ /* LU-8376 to preserve original index for
+ * display in dump_all_bulk_pages() */
+ np->index = i;
+
+ cfs_crypto_hash_update_page(req, np, off,
+ len);
+ continue;
+ } else {
+ CERROR("%s: can't alloc page for corruption\n",
+ tgt_name(tgt));
+ }
+ }
+
+ /*
+ * The left guard number should be able to hold checksums of a
+ * whole page
+ */
+ if (t10_cksum_type && opc == OST_READ &&
+ local_nb[i].lnb_len == PAGE_SIZE &&
+ local_nb[i].lnb_guard_disk) {
+ used = DIV_ROUND_UP(local_nb[i].lnb_len, sector_size);
+ if (used > (guard_number - used_number)) {
+ rc = -E2BIG;
+ break;
+ }
+ memcpy(guard_start + used_number,
+ local_nb[i].lnb_guards,
+ used * sizeof(*local_nb[i].lnb_guards));
+ } else {
+ rc = obd_page_dif_generate_buffer(obd_name,
+ local_nb[i].lnb_page,
+ local_nb[i].lnb_page_offset & ~PAGE_MASK,
+ local_nb[i].lnb_len, guard_start + used_number,
+ guard_number - used_number, &used, sector_size,
+ fn);
+ if (rc)
+ break;
+ }
+
+ LASSERT(used <= MAX_GUARD_NUMBER);
+ /*
+ * If disk support T10PI checksum, copy guards to local_nb.
+ * If the write is partial page, do not use the guards for bio
+ * submission since the data might not be full-sector. The bio
+ * guards will be generated later based on the full sectors. If
+ * the sector size is 512B rather than 4 KB, or the page size
+ * is larger than 4KB, this might drop some useful guards for
+ * partial page write, but it will only add minimal extra time
+ * of checksum calculation.
+ */
+ if (t10_cksum_type && opc == OST_WRITE &&
+ local_nb[i].lnb_len == PAGE_SIZE) {
+ local_nb[i].lnb_guard_rpc = 1;
+ memcpy(local_nb[i].lnb_guards,
+ guard_start + used_number,
+ used * sizeof(*local_nb[i].lnb_guards));
+ }
+
+ used_number += used;
+ if (used_number == guard_number) {
+ cfs_crypto_hash_update_page(req, __page, 0,
+ used_number * sizeof(*guard_start));
+ used_number = 0;
+ }
+
+ /* corrupt the data after we compute the checksum, to
+ * simulate an OST->client data error */
+ if (unlikely(i == 0 && opc == OST_READ &&
+ OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_SEND))) {
+ int off = local_nb[i].lnb_page_offset & ~PAGE_MASK;
+ int len = local_nb[i].lnb_len;
struct page *np = tgt_page_to_corrupt;
- char *ptr =
- kmap(BD_GET_KIOV(desc, i).kiov_page) + off;
if (np) {
- char *ptr2 = kmap(np) + off;
+ char *ptr = kmap_atomic(local_nb[i].lnb_page);
+ char *ptr2 = page_address(np);
+
+ memcpy(ptr2 + off, ptr + off, len);
+ memcpy(ptr2 + off, "bad4", min(4, len));
+ kunmap_atomic(ptr);
- memcpy(ptr2, ptr, len);
- memcpy(ptr2, "bad4", min(4, len));
- kunmap(np);
- BD_GET_KIOV(desc, i).kiov_page = np;
+ /* LU-8376 to preserve original index for
+ * display in dump_all_bulk_pages() */
+ np->index = i;
+
+ cfs_crypto_hash_update_page(req, np, off,
+ len);
+ continue;
} else {
CERROR("%s: can't alloc page for corruption\n",
tgt_name(tgt));
}
}
}
+ kunmap(__page);
+ if (rc)
+ GOTO(out, rc);
+
+ if (used_number != 0)
+ cfs_crypto_hash_update_page(req, __page, 0,
+ used_number * sizeof(*guard_start));
bufsize = sizeof(cksum);
- err = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
+ rc = cfs_crypto_hash_final(req, (unsigned char *)&cksum, &bufsize);
- return cksum;
+ if (rc == 0)
+ *check_sum = cksum;
+out:
+ __free_page(__page);
+ return rc;
+}
+
+static int tgt_checksum_niobuf_rw(struct lu_target *tgt,
+ enum cksum_types cksum_type,
+ struct niobuf_local *local_nb,
+ int npages, int opc, u32 *check_sum)
+{
+ obd_dif_csum_fn *fn = NULL;
+ int sector_size = 0;
+ int rc;
+
+ ENTRY;
+ obd_t10_cksum2dif(cksum_type, &fn, §or_size);
+
+ if (fn)
+ rc = tgt_checksum_niobuf_t10pi(tgt, local_nb, npages,
+ opc, fn, sector_size,
+ check_sum);
+ else
+ rc = tgt_checksum_niobuf(tgt, local_nb, npages, opc,
+ cksum_type, check_sum);
+ RETURN(rc);
}
int tgt_brw_read(struct tgt_session_info *tsi)
struct niobuf_local *local_nb;
struct obd_ioobj *ioo;
struct ost_body *body, *repbody;
- struct l_wait_info lwi;
struct lustre_handle lockh = { 0 };
- int npages, nob = 0, rc, i, no_reply = 0;
+ int npages, nob = 0, rc, i, no_reply = 0,
+ npages_read;
struct tgt_thread_big_cache *tbc = req->rq_svc_thread->t_data;
+ const char *obd_name = exp->exp_obd->obd_name;
ENTRY;
- if (ptlrpc_req2svc(req)->srv_req_portal != OST_IO_PORTAL) {
+ if (ptlrpc_req2svc(req)->srv_req_portal != OST_IO_PORTAL &&
+ ptlrpc_req2svc(req)->srv_req_portal != MDS_IO_PORTAL) {
CERROR("%s: deny read request from %s to portal %u\n",
tgt_name(tsi->tsi_tgt),
obd_export_nid2str(req->rq_export),
* finish */
if (unlikely(atomic_read(&exp->exp_obd->obd_evict_inprogress))) {
/* We do not care how long it takes */
- lwi = LWI_INTR(NULL, NULL);
- rc = l_wait_event(exp->exp_obd->obd_evict_inprogress_waitq,
- !atomic_read(&exp->exp_obd->obd_evict_inprogress),
- &lwi);
+ wait_event_idle(
+ exp->exp_obd->obd_evict_inprogress_waitq,
+ !atomic_read(&exp->exp_obd->obd_evict_inprogress));
}
/* There must be big cache in current thread to process this request
local_nb = tbc->local;
- rc = tgt_brw_lock(exp->exp_obd->obd_namespace, &tsi->tsi_resid, ioo,
- remote_nb, &lockh, LCK_PR);
+ rc = tgt_brw_lock(tsi->tsi_env, exp, &tsi->tsi_resid, ioo, remote_nb,
+ &lockh, LCK_PR);
if (rc != 0)
RETURN(rc);
* If getting the lock took more time than
* client was willing to wait, drop it. b=11330
*/
- if (cfs_time_current_sec() > req->rq_deadline ||
+ if (ktime_get_real_seconds() > req->rq_deadline ||
OBD_FAIL_CHECK(OBD_FAIL_OST_DROP_REQ)) {
no_reply = 1;
- CERROR("Dropping timed-out read from %s because locking"
- "object "DOSTID" took %ld seconds (limit was %ld).\n",
+ CERROR("Dropping timed-out read from %s because locking object " DOSTID " took %lld seconds (limit was %lld).\n",
libcfs_id2str(req->rq_peer), POSTID(&ioo->ioo_oid),
- cfs_time_current_sec() - req->rq_arrival_time.tv_sec,
+ ktime_get_real_seconds() - req->rq_arrival_time.tv_sec,
req->rq_deadline - req->rq_arrival_time.tv_sec);
GOTO(out_lock, rc = -ETIMEDOUT);
}
if (rc != 0)
GOTO(out_lock, rc);
- desc = ptlrpc_prep_bulk_exp(req, npages, ioobj_max_brw_get(ioo),
- PTLRPC_BULK_PUT_SOURCE |
- PTLRPC_BULK_BUF_KIOV,
- OST_BULK_PORTAL,
- &ptlrpc_bulk_kiov_nopin_ops);
- if (desc == NULL)
- GOTO(out_commitrw, rc = -ENOMEM);
+ if (body->oa.o_valid & OBD_MD_FLFLAGS &&
+ body->oa.o_flags & OBD_FL_SHORT_IO) {
+ desc = NULL;
+ } else {
+ desc = ptlrpc_prep_bulk_exp(req, npages, ioobj_max_brw_get(ioo),
+ PTLRPC_BULK_PUT_SOURCE,
+ OST_BULK_PORTAL,
+ &ptlrpc_bulk_kiov_nopin_ops);
+ if (desc == NULL)
+ GOTO(out_commitrw, rc = -ENOMEM);
+ }
nob = 0;
+ npages_read = npages;
for (i = 0; i < npages; i++) {
int page_rc = local_nb[i].lnb_rc;
if (page_rc < 0) {
rc = page_rc;
+ npages_read = i;
break;
}
nob += page_rc;
- if (page_rc != 0) { /* some data! */
+ if (page_rc != 0 && desc != NULL) { /* some data! */
LASSERT(local_nb[i].lnb_page != NULL);
desc->bd_frag_ops->add_kiov_frag
(desc, local_nb[i].lnb_page,
- local_nb[i].lnb_page_offset,
+ local_nb[i].lnb_page_offset & ~PAGE_MASK,
page_rc);
}
if (page_rc != local_nb[i].lnb_len) { /* short read */
+ local_nb[i].lnb_len = page_rc;
+ npages_read = i + (page_rc != 0 ? 1 : 0);
/* All subsequent pages should be 0 */
while (++i < npages)
LASSERT(local_nb[i].lnb_rc == 0);
break;
}
}
- if (OBD_FAIL_CHECK(OBD_FAIL_OST_READ_SIZE) &&
- nob != cfs_fail_val)
- rc = -E2BIG;
if (body->oa.o_valid & OBD_MD_FLCKSUM) {
- cksum_type_t cksum_type =
- cksum_type_unpack(body->oa.o_valid & OBD_MD_FLFLAGS ?
- body->oa.o_flags : 0);
- repbody->oa.o_flags = cksum_type_pack(cksum_type);
+ u32 flag = body->oa.o_valid & OBD_MD_FLFLAGS ?
+ body->oa.o_flags : 0;
+ enum cksum_types cksum_type = obd_cksum_type_unpack(flag);
+
+ repbody->oa.o_flags = obd_cksum_type_pack(obd_name,
+ cksum_type);
repbody->oa.o_valid = OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
- repbody->oa.o_cksum = tgt_checksum_bulk(tsi->tsi_tgt, desc,
- OST_READ, cksum_type);
+
+ rc = tgt_checksum_niobuf_rw(tsi->tsi_tgt, cksum_type,
+ local_nb, npages_read, OST_READ,
+ &repbody->oa.o_cksum);
+ if (rc < 0)
+ GOTO(out_commitrw, rc);
CDEBUG(D_PAGE, "checksum at read origin: %x\n",
repbody->oa.o_cksum);
+
+ /* if a resend it could be for a cksum error, so check Server
+ * cksum with returned Client cksum (this should even cover
+ * zero-cksum case) */
+ if ((body->oa.o_valid & OBD_MD_FLFLAGS) &&
+ (body->oa.o_flags & OBD_FL_RECOV_RESEND))
+ check_read_checksum(local_nb, npages_read, exp,
+ &body->oa, &req->rq_peer,
+ body->oa.o_cksum,
+ repbody->oa.o_cksum, cksum_type);
} else {
repbody->oa.o_valid = 0;
}
/* Check if client was evicted while we were doing i/o before touching
* network */
- if (likely(rc == 0 &&
- !CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2) &&
- !CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_BULK))) {
- rc = target_bulk_io(exp, desc, &lwi);
+ if (rc == 0) {
+ if (body->oa.o_valid & OBD_MD_FLFLAGS &&
+ body->oa.o_flags & OBD_FL_SHORT_IO) {
+ unsigned char *short_io_buf;
+ int short_io_size;
+
+ short_io_buf = req_capsule_server_get(&req->rq_pill,
+ &RMF_SHORT_IO);
+ short_io_size = req_capsule_get_size(&req->rq_pill,
+ &RMF_SHORT_IO,
+ RCL_SERVER);
+ rc = tgt_pages2shortio(local_nb, npages_read,
+ short_io_buf, short_io_size);
+ if (rc >= 0)
+ req_capsule_shrink(&req->rq_pill,
+ &RMF_SHORT_IO, rc,
+ RCL_SERVER);
+ rc = rc > 0 ? 0 : rc;
+ } else if (!CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2)) {
+ rc = target_bulk_io(exp, desc);
+ }
no_reply = rc != 0;
+ } else {
+ if (body->oa.o_valid & OBD_MD_FLFLAGS &&
+ body->oa.o_flags & OBD_FL_SHORT_IO)
+ req_capsule_shrink(&req->rq_pill, &RMF_SHORT_IO, 0,
+ RCL_SERVER);
}
out_commitrw:
rc = obd_commitrw(tsi->tsi_env, OBD_BRW_READ, exp, &repbody->oa, 1, ioo,
remote_nb, npages, local_nb, rc);
out_lock:
- tgt_brw_unlock(ioo, remote_nb, &lockh, LCK_PR);
+ tgt_brw_unlock(exp, ioo, remote_nb, &lockh, LCK_PR);
if (desc && !CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2))
ptlrpc_free_bulk(desc);
ptlrpc_req_drop_rs(req);
LCONSOLE_WARN("%s: Bulk IO read error with %s (at %s), "
"client will retry: rc %d\n",
- exp->exp_obd->obd_name,
+ obd_name,
obd_uuid2str(&exp->exp_client_uuid),
obd_export_nid2str(exp), rc);
}
/* send a bulk after reply to simulate a network delay or reordering
- * by a router */
- if (unlikely(CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2))) {
- wait_queue_head_t waitq;
- struct l_wait_info lwi1;
-
+ * by a router - Note that !desc implies short io, so there is no bulk
+ * to reorder. */
+ if (unlikely(CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2)) &&
+ desc) {
CDEBUG(D_INFO, "reorder BULK\n");
- init_waitqueue_head(&waitq);
- lwi1 = LWI_TIMEOUT_INTR(cfs_time_seconds(3), NULL, NULL, NULL);
- l_wait_event(waitq, 0, &lwi1);
- target_bulk_io(exp, desc, &lwi);
+ ssleep(3);
+ target_bulk_io(exp, desc);
ptlrpc_free_bulk(desc);
}
}
EXPORT_SYMBOL(tgt_brw_read);
+static int tgt_shortio2pages(struct niobuf_local *local, int npages,
+ unsigned char *buf, unsigned int size)
+{
+ int i, off, len;
+ char *ptr;
+
+ for (i = 0; i < npages; i++) {
+ off = local[i].lnb_page_offset & ~PAGE_MASK;
+ len = local[i].lnb_len;
+
+ if (len == 0)
+ continue;
+
+ CDEBUG(D_PAGE, "index %d offset = %d len = %d left = %d\n",
+ i, off, len, size);
+ ptr = kmap_atomic(local[i].lnb_page);
+ if (ptr == NULL)
+ return -EINVAL;
+ memcpy(ptr + off, buf, len < size ? len : size);
+ kunmap_atomic(ptr);
+ buf += len;
+ size -= len;
+ }
+ return 0;
+}
+
static void tgt_warn_on_cksum(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc,
struct niobuf_local *local_nb, int npages,
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
LASSERT(body != NULL);
- if (req->rq_peer.nid != desc->bd_sender) {
+ if (desc && req->rq_peer.nid != desc->bd_sender) {
via = " via ";
router = libcfs_nid2str(desc->bd_sender);
}
+ if (exp->exp_obd->obd_checksum_dump)
+ dump_all_bulk_pages(&body->oa, npages, local_nb, server_cksum,
+ client_cksum);
+
if (mmap) {
CDEBUG_LIMIT(D_INFO, "client csum %x, server csum %x\n",
client_cksum, server_cksum);
return;
}
- LCONSOLE_ERROR_MSG(0x168, "BAD WRITE CHECKSUM: %s from %s%s%s inode "
+ LCONSOLE_ERROR_MSG(0x168, "%s: BAD WRITE CHECKSUM: from %s%s%s inode "
DFID" object "DOSTID" extent [%llu-%llu"
"]: client csum %x, server csum %x\n",
exp->exp_obd->obd_name, libcfs_id2str(req->rq_peer),
struct niobuf_local *local_nb;
struct obd_ioobj *ioo;
struct ost_body *body, *repbody;
- struct l_wait_info lwi;
struct lustre_handle lockh = {0};
__u32 *rcs;
int objcount, niocount, npages;
int rc, i, j;
- cksum_type_t cksum_type = OBD_CKSUM_CRC32;
+ enum cksum_types cksum_type = OBD_CKSUM_CRC32;
bool no_reply = false, mmap;
struct tgt_thread_big_cache *tbc = req->rq_svc_thread->t_data;
bool wait_sync = false;
+ const char *obd_name = exp->exp_obd->obd_name;
+ /* '1' for consistency with code that checks !mpflag to restore */
+ unsigned int mpflags = 1;
ENTRY;
- if (ptlrpc_req2svc(req)->srv_req_portal != OST_IO_PORTAL) {
+ if (ptlrpc_req2svc(req)->srv_req_portal != OST_IO_PORTAL &&
+ ptlrpc_req2svc(req)->srv_req_portal != MDS_IO_PORTAL) {
CERROR("%s: deny write request from %s to portal %u\n",
tgt_name(tsi->tsi_tgt),
obd_export_nid2str(req->rq_export),
CFS_FAIL_TIMEOUT(OBD_FAIL_OST_BRW_PAUSE_BULK, cfs_fail_val > 0 ?
cfs_fail_val : (obd_timeout + 1) / 4);
+ /* Delay write commit to show stale size information */
+ CFS_FAIL_TIMEOUT(OBD_FAIL_OSC_NO_SIZE_DATA, cfs_fail_val);
+
/* There must be big cache in current thread to process this request
* if it is NULL then something went wrong and it wasn't allocated,
* report -ENOMEM in that case */
RETURN(err_serious(-EPROTO));
if ((remote_nb[0].rnb_flags & OBD_BRW_MEMALLOC) &&
- (exp->exp_connection->c_peer.nid == exp->exp_connection->c_self))
- memory_pressure_set();
+ ptlrpc_connection_is_local(exp->exp_connection))
+ mpflags = memalloc_noreclaim_save();
req_capsule_set_size(&req->rq_pill, &RMF_RCS, RCL_SERVER,
niocount * sizeof(*rcs));
local_nb = tbc->local;
- rc = tgt_brw_lock(exp->exp_obd->obd_namespace, &tsi->tsi_resid, ioo,
- remote_nb, &lockh, LCK_PW);
+ rc = tgt_brw_lock(tsi->tsi_env, exp, &tsi->tsi_resid, ioo, remote_nb,
+ &lockh, LCK_PW);
if (rc != 0)
GOTO(out, rc);
* If getting the lock took more time than
* client was willing to wait, drop it. b=11330
*/
- if (cfs_time_current_sec() > req->rq_deadline ||
+ if (ktime_get_real_seconds() > req->rq_deadline ||
OBD_FAIL_CHECK(OBD_FAIL_OST_DROP_REQ)) {
no_reply = true;
- CERROR("%s: Dropping timed-out write from %s because locking "
- "object "DOSTID" took %ld seconds (limit was %ld).\n",
+ CERROR("%s: Dropping timed-out write from %s because locking object " DOSTID " took %lld seconds (limit was %lld).\n",
tgt_name(tsi->tsi_tgt), libcfs_id2str(req->rq_peer),
POSTID(&ioo->ioo_oid),
- cfs_time_current_sec() - req->rq_arrival_time.tv_sec,
+ ktime_get_real_seconds() - req->rq_arrival_time.tv_sec,
req->rq_deadline - req->rq_arrival_time.tv_sec);
GOTO(out_lock, rc = -ETIMEDOUT);
}
objcount, ioo, remote_nb, &npages, local_nb);
if (rc < 0)
GOTO(out_lock, rc);
+ if (body->oa.o_valid & OBD_MD_FLFLAGS &&
+ body->oa.o_flags & OBD_FL_SHORT_IO) {
+ unsigned int short_io_size;
+ unsigned char *short_io_buf;
+
+ short_io_size = req_capsule_get_size(&req->rq_pill,
+ &RMF_SHORT_IO,
+ RCL_CLIENT);
+ short_io_buf = req_capsule_client_get(&req->rq_pill,
+ &RMF_SHORT_IO);
+ CDEBUG(D_INFO, "Client use short io for data transfer,"
+ " size = %d\n", short_io_size);
+
+ /* Copy short io buf to pages */
+ rc = tgt_shortio2pages(local_nb, npages, short_io_buf,
+ short_io_size);
+ desc = NULL;
+ } else {
+ desc = ptlrpc_prep_bulk_exp(req, npages, ioobj_max_brw_get(ioo),
+ PTLRPC_BULK_GET_SINK,
+ OST_BULK_PORTAL,
+ &ptlrpc_bulk_kiov_nopin_ops);
+ if (desc == NULL)
+ GOTO(skip_transfer, rc = -ENOMEM);
- desc = ptlrpc_prep_bulk_exp(req, npages, ioobj_max_brw_get(ioo),
- PTLRPC_BULK_GET_SINK | PTLRPC_BULK_BUF_KIOV,
- OST_BULK_PORTAL,
- &ptlrpc_bulk_kiov_nopin_ops);
- if (desc == NULL)
- GOTO(skip_transfer, rc = -ENOMEM);
+ /* NB Having prepped, we must commit... */
+ for (i = 0; i < npages; i++)
+ desc->bd_frag_ops->add_kiov_frag(desc,
+ local_nb[i].lnb_page,
+ local_nb[i].lnb_page_offset & ~PAGE_MASK,
+ local_nb[i].lnb_len);
- /* NB Having prepped, we must commit... */
- for (i = 0; i < npages; i++)
- desc->bd_frag_ops->add_kiov_frag(desc,
- local_nb[i].lnb_page,
- local_nb[i].lnb_page_offset,
- local_nb[i].lnb_len);
+ rc = sptlrpc_svc_prep_bulk(req, desc);
+ if (rc != 0)
+ GOTO(skip_transfer, rc);
- rc = sptlrpc_svc_prep_bulk(req, desc);
- if (rc != 0)
- GOTO(skip_transfer, rc);
+ rc = target_bulk_io(exp, desc);
+ }
- rc = target_bulk_io(exp, desc, &lwi);
no_reply = rc != 0;
skip_transfer:
static int cksum_counter;
if (body->oa.o_valid & OBD_MD_FLFLAGS)
- cksum_type = cksum_type_unpack(body->oa.o_flags);
+ cksum_type = obd_cksum_type_unpack(body->oa.o_flags);
repbody->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
repbody->oa.o_flags &= ~OBD_FL_CKSUM_ALL;
- repbody->oa.o_flags |= cksum_type_pack(cksum_type);
- repbody->oa.o_cksum = tgt_checksum_bulk(tsi->tsi_tgt, desc,
- OST_WRITE, cksum_type);
+ repbody->oa.o_flags |= obd_cksum_type_pack(obd_name,
+ cksum_type);
+
+ rc = tgt_checksum_niobuf_rw(tsi->tsi_tgt, cksum_type,
+ local_nb, npages, OST_WRITE,
+ &repbody->oa.o_cksum);
+ if (rc < 0)
+ GOTO(out_commitrw, rc);
+
cksum_counter++;
if (unlikely(body->oa.o_cksum != repbody->oa.o_cksum)) {
}
}
+ OBD_FAIL_TIMEOUT(OBD_FAIL_OST_BRW_PAUSE_BULK2, cfs_fail_val);
+
+out_commitrw:
/* Must commit after prep above in all cases */
rc = obd_commitrw(tsi->tsi_env, OBD_BRW_WRITE, exp, &repbody->oa,
objcount, ioo, remote_nb, npages, local_nb, rc);
ptlrpc_lprocfs_brw(req, nob);
}
out_lock:
- tgt_brw_unlock(ioo, remote_nb, &lockh, LCK_PW);
+ tgt_brw_unlock(exp, ioo, remote_nb, &lockh, LCK_PW);
if (desc)
ptlrpc_free_bulk(desc);
out:
if (!exp->exp_obd->obd_no_transno)
LCONSOLE_WARN("%s: Bulk IO write error with %s (at %s),"
" client will retry: rc = %d\n",
- exp->exp_obd->obd_name,
+ obd_name,
obd_uuid2str(&exp->exp_client_uuid),
obd_export_nid2str(exp), rc);
}
- memory_pressure_clr();
+
+ if (mpflags)
+ memalloc_noreclaim_restore(mpflags);
+
RETURN(rc);
}
EXPORT_SYMBOL(tgt_brw_write);
+/**
+ * Common request handler for OST_SEEK RPC.
+ *
+ * Unified request handling for OST_SEEK RPC.
+ * It takes object by its FID, does needed lseek and packs result
+ * into reply. Only SEEK_HOLE and SEEK_DATA are supported.
+ *
+ * \param[in] tsi target session environment for this request
+ *
+ * \retval 0 if successful
+ * \retval negative value on error
+ */
+int tgt_lseek(struct tgt_session_info *tsi)
+{
+ struct lustre_handle lh = { 0 };
+ struct dt_object *dob;
+ struct ost_body *repbody;
+ loff_t offset = tsi->tsi_ost_body->oa.o_size;
+ int whence = tsi->tsi_ost_body->oa.o_mode;
+ bool srvlock;
+ int rc = 0;
+
+ ENTRY;
+
+ if (whence != SEEK_HOLE && whence != SEEK_DATA)
+ RETURN(-EPROTO);
+
+ /* Negative offset is prohibited on wire and must be handled on client
+ * prior sending RPC.
+ */
+ if (offset < 0)
+ RETURN(-EPROTO);
+
+ repbody = req_capsule_server_get(tsi->tsi_pill, &RMF_OST_BODY);
+ if (repbody == NULL)
+ RETURN(-ENOMEM);
+ repbody->oa = tsi->tsi_ost_body->oa;
+
+ srvlock = tsi->tsi_ost_body->oa.o_valid & OBD_MD_FLFLAGS &&
+ tsi->tsi_ost_body->oa.o_flags & OBD_FL_SRVLOCK;
+ if (srvlock) {
+ rc = tgt_data_lock(tsi->tsi_env, tsi->tsi_exp, &tsi->tsi_resid,
+ offset, OBD_OBJECT_EOF, &lh, LCK_PR);
+ if (rc)
+ RETURN(rc);
+ }
+
+ dob = dt_locate(tsi->tsi_env, tsi->tsi_tgt->lut_bottom, &tsi->tsi_fid);
+ if (IS_ERR(dob))
+ GOTO(out, rc = PTR_ERR(dob));
+
+ if (!dt_object_exists(dob))
+ GOTO(obj_put, rc = -ENOENT);
+
+ repbody->oa.o_size = dt_lseek(tsi->tsi_env, dob, offset, whence);
+ rc = 0;
+obj_put:
+ dt_object_put(tsi->tsi_env, dob);
+out:
+ if (srvlock)
+ tgt_data_unlock(&lh, LCK_PR);
+
+ RETURN(rc);
+}
+EXPORT_SYMBOL(tgt_lseek);
+
/* Check if request can be reconstructed from saved reply data
* A copy of the reply data is returned in @trd if the pointer is not NULL
*/
-bool req_can_reconstruct(struct ptlrpc_request *req,
+int req_can_reconstruct(struct ptlrpc_request *req,
struct tg_reply_data *trd)
{
struct tg_export_data *ted = &req->rq_export->exp_target_data;
struct lsd_client_data *lcd = ted->ted_lcd;
- bool found;
+ int found;
if (tgt_is_multimodrpcs_client(req->rq_export))
return tgt_lookup_reply(req, trd);