* GPL HEADER END
*/
/*
- * Copyright (c) 2011, 2014, Intel Corporation.
+ * Copyright (c) 2013, 2017, Intel Corporation.
*/
/*
* lustre/target/tgt_handler.c
#define DEBUG_SUBSYSTEM S_CLASS
+#include <linux/user_namespace.h>
+#ifdef HAVE_UIDGID_HEADER
+# include <linux/uidgid.h>
+#endif
+
#include <obd.h>
#include <obd_class.h>
#include <obd_cksum.h>
-#include <md_object.h>
#include <lustre_lfsck.h>
#include <lustre_nodemap.h>
+#include <lustre_acl.h>
#include "tgt_internal.h"
if (!IS_ERR(obj)) {
if ((flags & HABEO_CORPUS) && !lu_object_exists(obj)) {
lu_object_put(tsi->tsi_env, obj);
- /* for capability renew ENOENT will be handled in
- * mdt_renew_capa */
- if (body->mbo_valid & OBD_MD_FLOSSCAPA)
- rc = 0;
- else
- rc = -ENOENT;
+ rc = -ENOENT;
} else {
tsi->tsi_corpus = obj;
rc = 0;
{
struct ost_body *body;
struct req_capsule *pill = tsi->tsi_pill;
- struct lustre_capa *capa;
struct lu_nodemap *nodemap;
int rc;
if (rc)
RETURN(rc);
- nodemap = tsi->tsi_exp->exp_target_data.ted_nodemap;
+ nodemap = nodemap_get_from_exp(tsi->tsi_exp);
+ if (IS_ERR(nodemap))
+ RETURN(PTR_ERR(nodemap));
body->oa.o_uid = nodemap_map_id(nodemap, NODEMAP_UID,
NODEMAP_CLIENT_TO_FS,
body->oa.o_gid = nodemap_map_id(nodemap, NODEMAP_GID,
NODEMAP_CLIENT_TO_FS,
body->oa.o_gid);
-
- if (body->oa.o_valid & OBD_MD_FLOSSCAPA) {
- capa = req_capsule_client_get(pill, &RMF_CAPA1);
- if (capa == NULL) {
- CERROR("%s: OSSCAPA flag is set without capability\n",
- tgt_name(tsi->tsi_tgt));
- RETURN(-EFAULT);
- }
- }
+ nodemap_putref(nodemap);
tsi->tsi_ost_body = body;
tsi->tsi_fid = body->oa.o_oi.oi_fid;
dlm_req = req_capsule_client_get(pill, &RMF_DLM_REQ);
if (dlm_req != NULL) {
+ union ldlm_wire_policy_data *policy =
+ &dlm_req->lock_desc.l_policy_data;
+
if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
LDLM_IBITS &&
- dlm_req->lock_desc.l_policy_data.\
- l_inodebits.bits == 0)) {
+ (policy->l_inodebits.bits |
+ policy->l_inodebits.try_bits) == 0)) {
/*
* Lock without inodebits makes no sense and
* will oops later in ldlm. If client miss to
{
int serious = 0;
int rc;
+ __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
ENTRY;
+
+ /* When dealing with sec context requests, no export is associated yet,
+ * because these requests are sent before *_CONNECT requests.
+ * A NULL req->rq_export means the normal *_common_slice handlers will
+ * not be called, because there is no reference to the target.
+ * So deal with them by hand and jump directly to target_send_reply().
+ */
+ switch (opc) {
+ case SEC_CTX_INIT:
+ case SEC_CTX_INIT_CONT:
+ case SEC_CTX_FINI:
+ CFS_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, cfs_fail_val);
+ GOTO(out, rc = 0);
+ }
+
/*
* Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
* to put same checks into handlers like mdt_close(), mdt_reint(),
*/
if (OBD_FAIL_CHECK_ORSET(h->th_fail_id, OBD_FAIL_ONCE))
RETURN(0);
+ if (unlikely(lustre_msg_get_opc(req->rq_reqmsg) == MDS_REINT &&
+ OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_MULTI_NET)))
+ RETURN(0);
rc = tgt_request_preprocess(tsi, h, req);
/* pack reply if reply format is fixed */
RCL_SERVER))
req_capsule_set_size(tsi->tsi_pill, &RMF_LOGCOOKIES,
RCL_SERVER, 0);
+ if (req_capsule_has_field(tsi->tsi_pill, &RMF_ACL, RCL_SERVER))
+ req_capsule_set_size(tsi->tsi_pill,
+ &RMF_ACL, RCL_SERVER,
+ LUSTRE_POSIX_ACL_MAX_SIZE_OLD);
+
+ if (req_capsule_has_field(tsi->tsi_pill, &RMF_SHORT_IO,
+ RCL_SERVER)) {
+ struct niobuf_remote *remote_nb =
+ req_capsule_client_get(tsi->tsi_pill,
+ &RMF_NIOBUF_REMOTE);
+ struct ost_body *body = tsi->tsi_ost_body;
+
+ req_capsule_set_size(tsi->tsi_pill, &RMF_SHORT_IO,
+ RCL_SERVER,
+ (body->oa.o_flags & OBD_FL_SHORT_IO) ?
+ remote_nb[0].rnb_len : 0);
+ }
rc = req_capsule_server_pack(tsi->tsi_pill);
}
if (likely(rc == 0 && req->rq_export))
target_committed_to_req(req);
+out:
target_send_reply(req, rc, tsi->tsi_reply_fail_id);
RETURN(0);
}
*process = 1;
RETURN(0);
case MDS_CLOSE:
- case MDS_DONE_WRITING:
case MDS_SYNC: /* used in unmounting */
case OBD_PING:
case MDS_REINT:
case OST_SETATTR:
case OST_SYNC:
case OST_WRITE:
+ case MDS_HSM_PROGRESS:
+ case MDS_HSM_STATE_SET:
+ case MDS_HSM_REQUEST:
*process = target_queue_recovery_request(req, obd);
RETURN(0);
/* sanity check: if the xid matches, the request must be marked as a
* resent or replayed */
- if (req_xid_is_last(req)) {
+ if (req_can_reconstruct(req, NULL)) {
if (!(lustre_msg_get_flags(req->rq_reqmsg) &
(MSG_RESENT | MSG_REPLAY))) {
- DEBUG_REQ(D_WARNING, req, "rq_xid "LPU64" matches "
- "last_xid, expected REPLAY or RESENT flag "
+ DEBUG_REQ(D_WARNING, req, "rq_xid %llu matches "
+ "saved xid, expected REPLAY or RESENT flag "
"(%x)", req->rq_xid,
lustre_msg_get_flags(req->rq_reqmsg));
req->rq_status = -ENOTCONN;
ENTRY;
tgt = class_exp2tgt(req->rq_export);
+ if (unlikely(tgt == NULL)) {
+ DEBUG_REQ(D_ERROR, req, "%s: No target for connected export\n",
+ class_exp2obd(req->rq_export)->obd_name);
+ RETURN(ERR_PTR(-EINVAL));
+ }
for (s = tgt->lut_slice; s->tos_hs != NULL; s++)
if (s->tos_opc_start <= opc && opc < s->tos_opc_end)
RETURN(h);
}
+static int process_req_last_xid(struct ptlrpc_request *req)
+{
+ __u64 last_xid;
+ ENTRY;
+
+ /* check request's xid is consistent with export's last_xid */
+ last_xid = lustre_msg_get_last_xid(req->rq_reqmsg);
+ if (last_xid > req->rq_export->exp_last_xid)
+ req->rq_export->exp_last_xid = last_xid;
+
+ if (req->rq_xid == 0 ||
+ (req->rq_xid <= req->rq_export->exp_last_xid)) {
+ DEBUG_REQ(D_ERROR, req, "Unexpected xid %llx vs. "
+ "last_xid %llx\n", req->rq_xid,
+ req->rq_export->exp_last_xid);
+ /* Some request is allowed to be sent during replay,
+ * such as OUT update requests, FLD requests, so it
+ * is possible that replay requests has smaller XID
+ * than the exp_last_xid.
+ *
+ * Some non-replay requests may have smaller XID as
+ * well:
+ *
+ * - Client send a no_resend RPC, like statfs;
+ * - The RPC timedout (or some other error) on client,
+ * then it's removed from the unreplied list;
+ * - Client send some other request to bump the
+ * exp_last_xid on server;
+ * - The former RPC got chance to be processed;
+ */
+ if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY))
+ RETURN(-EPROTO);
+ }
+
+ /* try to release in-memory reply data */
+ if (tgt_is_multimodrpcs_client(req->rq_export)) {
+ tgt_handle_received_xid(req->rq_export,
+ lustre_msg_get_last_xid(req->rq_reqmsg));
+ if (!(lustre_msg_get_flags(req->rq_reqmsg) &
+ (MSG_RESENT | MSG_REPLAY)))
+ tgt_handle_tag(req->rq_export,
+ lustre_msg_get_tag(req->rq_reqmsg));
+ }
+ RETURN(0);
+}
+
int tgt_request_handle(struct ptlrpc_request *req)
{
struct tgt_session_info *tsi = tgt_ses_info(req->rq_svc_thread->t_env);
struct lu_target *tgt;
int request_fail_id = 0;
__u32 opc = lustre_msg_get_opc(msg);
+ struct obd_device *obd;
int rc;
-
+ bool is_connect = false;
ENTRY;
/* Refill the context, to make sure all thread keys are allocated */
* target, otherwise that should be connect operation */
if (opc == MDS_CONNECT || opc == OST_CONNECT ||
opc == MGS_CONNECT) {
+ is_connect = true;
req_capsule_set(&req->rq_pill, &RQF_CONNECT);
rc = target_handle_connect(req);
if (rc != 0) {
}
if (unlikely(!class_connected_export(req->rq_export))) {
+ if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT ||
+ opc == SEC_CTX_FINI) {
+ /* sec context initialization has to be handled
+ * by hand in tgt_handle_request0() */
+ tsi->tsi_reply_fail_id = OBD_FAIL_SEC_CTX_INIT_NET;
+ h = NULL;
+ GOTO(handle_recov, rc = 0);
+ }
CDEBUG(D_HA, "operation %d on unconnected OST from %s\n",
opc, libcfs_id2str(req->rq_peer));
req->rq_status = -ENOTCONN;
else
tsi->tsi_jobid = NULL;
+ if (tgt == NULL) {
+ DEBUG_REQ(D_ERROR, req, "%s: No target for connected export\n",
+ class_exp2obd(req->rq_export)->obd_name);
+ req->rq_status = -EINVAL;
+ rc = ptlrpc_error(req);
+ GOTO(out, rc);
+ }
+
+ /* Skip last_xid processing for the recovery thread, otherwise, the
+ * last_xid on same request could be processed twice: first time when
+ * processing the incoming request, second time when the request is
+ * being processed by recovery thread. */
+ obd = class_exp2obd(req->rq_export);
+ if (is_connect) {
+ /* reset the exp_last_xid on each connection. */
+ req->rq_export->exp_last_xid = 0;
+ } else if (obd->obd_recovery_data.trd_processing_task !=
+ current_pid()) {
+ rc = process_req_last_xid(req);
+ if (rc) {
+ req->rq_status = rc;
+ rc = ptlrpc_error(req);
+ GOTO(out, rc);
+ }
+ }
+
request_fail_id = tgt->lut_request_fail_id;
tsi->tsi_reply_fail_id = tgt->lut_reply_fail_id;
GOTO(out, rc);
}
+ LASSERTF(h->th_opc == opc, "opcode mismatch %d != %d\n",
+ h->th_opc, opc);
+
if (CFS_FAIL_CHECK_ORSET(request_fail_id, CFS_FAIL_ONCE))
GOTO(out, rc = 0);
GOTO(out, rc);
}
+handle_recov:
rc = tgt_handle_recovery(req, tsi->tsi_reply_fail_id);
if (likely(rc == 1)) {
- LASSERTF(h->th_opc == opc, "opcode mismatch %d != %d\n",
- h->th_opc, opc);
rc = tgt_handle_request0(tsi, h, req);
if (rc)
GOTO(out, rc);
* Unified target generic handlers.
*/
-/*
- * Security functions
- */
-static inline void tgt_init_sec_none(struct obd_connect_data *reply)
-{
- reply->ocd_connect_flags &= ~(OBD_CONNECT_RMT_CLIENT |
- OBD_CONNECT_RMT_CLIENT_FORCE |
- OBD_CONNECT_MDS_CAPA |
- OBD_CONNECT_OSS_CAPA);
-}
-
-static int tgt_init_sec_level(struct ptlrpc_request *req)
-{
- struct lu_target *tgt = class_exp2tgt(req->rq_export);
- char *client = libcfs_nid2str(req->rq_peer.nid);
- struct obd_connect_data *data, *reply;
- int rc = 0;
- bool remote;
-
- ENTRY;
-
- data = req_capsule_client_get(&req->rq_pill, &RMF_CONNECT_DATA);
- reply = req_capsule_server_get(&req->rq_pill, &RMF_CONNECT_DATA);
- if (data == NULL || reply == NULL)
- RETURN(-EFAULT);
-
- /* connection from MDT is always trusted */
- if (req->rq_auth_usr_mdt) {
- tgt_init_sec_none(reply);
- RETURN(0);
- }
-
- /* no GSS support case */
- if (!req->rq_auth_gss) {
- if (tgt->lut_sec_level > LUSTRE_SEC_NONE) {
- CWARN("client %s -> target %s does not use GSS, "
- "can not run under security level %d.\n",
- client, tgt_name(tgt), tgt->lut_sec_level);
- RETURN(-EACCES);
- } else {
- tgt_init_sec_none(reply);
- RETURN(0);
- }
- }
-
- /* old version case */
- if (unlikely(!(data->ocd_connect_flags & OBD_CONNECT_RMT_CLIENT) ||
- !(data->ocd_connect_flags & OBD_CONNECT_MDS_CAPA) ||
- !(data->ocd_connect_flags & OBD_CONNECT_OSS_CAPA))) {
- if (tgt->lut_sec_level > LUSTRE_SEC_NONE) {
- CWARN("client %s -> target %s uses old version, "
- "can not run under security level %d.\n",
- client, tgt_name(tgt), tgt->lut_sec_level);
- RETURN(-EACCES);
- } else {
- CWARN("client %s -> target %s uses old version, "
- "run under security level %d.\n",
- client, tgt_name(tgt), tgt->lut_sec_level);
- tgt_init_sec_none(reply);
- RETURN(0);
- }
- }
-
- remote = data->ocd_connect_flags & OBD_CONNECT_RMT_CLIENT_FORCE;
- if (remote) {
- if (!req->rq_auth_remote)
- CDEBUG(D_SEC, "client (local realm) %s -> target %s "
- "asked to be remote.\n", client, tgt_name(tgt));
- } else if (req->rq_auth_remote) {
- remote = true;
- CDEBUG(D_SEC, "client (remote realm) %s -> target %s is set "
- "as remote by default.\n", client, tgt_name(tgt));
- }
-
- if (remote) {
- if (!tgt->lut_oss_capa) {
- CDEBUG(D_SEC,
- "client %s -> target %s is set as remote,"
- " but OSS capabilities are not enabled: %d.\n",
- client, tgt_name(tgt), tgt->lut_oss_capa);
- RETURN(-EACCES);
- }
- } else {
- if (!uid_valid(make_kuid(&init_user_ns, req->rq_auth_uid))) {
- CDEBUG(D_SEC, "client %s -> target %s: user is not "
- "authenticated!\n", client, tgt_name(tgt));
- RETURN(-EACCES);
- }
- }
-
-
- switch (tgt->lut_sec_level) {
- case LUSTRE_SEC_NONE:
- if (remote) {
- CDEBUG(D_SEC,
- "client %s -> target %s is set as remote, "
- "can not run under security level %d.\n",
- client, tgt_name(tgt), tgt->lut_sec_level);
- RETURN(-EACCES);
- }
- tgt_init_sec_none(reply);
- break;
- case LUSTRE_SEC_REMOTE:
- if (!remote)
- tgt_init_sec_none(reply);
- break;
- case LUSTRE_SEC_ALL:
- if (remote)
- break;
- reply->ocd_connect_flags &= ~(OBD_CONNECT_RMT_CLIENT |
- OBD_CONNECT_RMT_CLIENT_FORCE);
- if (!tgt->lut_oss_capa)
- reply->ocd_connect_flags &= ~OBD_CONNECT_OSS_CAPA;
- if (!tgt->lut_mds_capa)
- reply->ocd_connect_flags &= ~OBD_CONNECT_MDS_CAPA;
- break;
- default:
- RETURN(-EINVAL);
- }
-
- RETURN(rc);
-}
-
int tgt_connect_check_sptlrpc(struct ptlrpc_request *req, struct obd_export *exp)
{
struct lu_target *tgt = class_exp2tgt(exp);
spin_lock(&exp->exp_lock);
exp->exp_sp_peer = req->rq_sp_from;
exp->exp_flvr = flvr;
+
+ /* when on mgs, if no restriction is set, or if client
+ * is loopback, allow any flavor */
+ if ((strcmp(exp->exp_obd->obd_type->typ_name,
+ LUSTRE_MGS_NAME) == 0) &&
+ (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_NULL ||
+ LNET_NETTYP(LNET_NIDNET(exp->exp_connection->c_peer.nid))
+ == LOLND))
+ exp->exp_flvr.sf_rpc = SPTLRPC_FLVR_ANY;
+
if (exp->exp_flvr.sf_rpc != SPTLRPC_FLVR_ANY &&
exp->exp_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
CERROR("%s: unauthorized rpc flavor %x from %s, "
return rc;
}
-int tgt_adapt_sptlrpc_conf(struct lu_target *tgt, int initial)
+int tgt_adapt_sptlrpc_conf(struct lu_target *tgt)
{
struct sptlrpc_rule_set tmp_rset;
int rc;
+ if (unlikely(tgt == NULL)) {
+ CERROR("No target passed");
+ return -EINVAL;
+ }
+
sptlrpc_rule_set_init(&tmp_rset);
- rc = sptlrpc_conf_target_get_rules(tgt->lut_obd, &tmp_rset, initial);
+ rc = sptlrpc_conf_target_get_rules(tgt->lut_obd, &tmp_rset);
if (rc) {
CERROR("%s: failed get sptlrpc rules: rc = %d\n",
tgt_name(tgt), rc);
ENTRY;
- rc = tgt_init_sec_level(req);
- if (rc != 0)
- GOTO(out, rc);
-
/* XXX: better to call this check right after getting new export but
* before last_rcvd slot allocation to avoid server load upon insecure
* connects. This is to be fixed after unifiyng all targets.
return err_serious(-EOPNOTSUPP);
}
-int tgt_obd_qc_callback(struct tgt_session_info *tsi)
+int tgt_send_buffer(struct tgt_session_info *tsi, struct lu_rdbuf *rdbuf)
{
- return err_serious(-EOPNOTSUPP);
+ struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env);
+ struct ptlrpc_request *req = tgt_ses_req(tsi);
+ struct obd_export *exp = req->rq_export;
+ struct ptlrpc_bulk_desc *desc;
+ struct l_wait_info *lwi = &tti->tti_u.update.tti_wait_info;
+ int i;
+ int rc;
+
+ ENTRY;
+
+ desc = ptlrpc_prep_bulk_exp(req, rdbuf->rb_nbufs, 1,
+ PTLRPC_BULK_PUT_SOURCE | PTLRPC_BULK_BUF_KVEC,
+ MDS_BULK_PORTAL, &ptlrpc_bulk_kvec_ops);
+ if (desc == NULL)
+ RETURN(-ENOMEM);
+
+ for (i = 0; i < rdbuf->rb_nbufs; i++)
+ desc->bd_frag_ops->add_iov_frag(desc,
+ rdbuf->rb_bufs[i].lb_buf,
+ rdbuf->rb_bufs[i].lb_len);
+
+ rc = target_bulk_io(exp, desc, lwi);
+ ptlrpc_free_bulk(desc);
+ RETURN(rc);
}
+EXPORT_SYMBOL(tgt_send_buffer);
int tgt_sendpage(struct tgt_session_info *tsi, struct lu_rdpg *rdpg, int nob)
{
ENTRY;
- desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, 1, BULK_PUT_SOURCE,
- MDS_BULK_PORTAL);
+ desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, 1,
+ PTLRPC_BULK_PUT_SOURCE |
+ PTLRPC_BULK_BUF_KIOV,
+ MDS_BULK_PORTAL,
+ &ptlrpc_bulk_kiov_pin_ops);
if (desc == NULL)
RETURN(-ENOMEM);
if (!(exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE))
- /* old client requires reply size in it's PAGE_CACHE_SIZE,
+ /* old client requires reply size in it's PAGE_SIZE,
* which is rdpg->rp_count */
nob = rdpg->rp_count;
for (i = 0, tmpcount = nob; i < rdpg->rp_npages && tmpcount > 0;
i++, tmpcount -= tmpsize) {
- tmpsize = min_t(int, tmpcount, PAGE_CACHE_SIZE);
- ptlrpc_prep_bulk_page_pin(desc, rdpg->rp_pages[i], 0, tmpsize);
+ tmpsize = min_t(int, tmpcount, PAGE_SIZE);
+ desc->bd_frag_ops->add_kiov_frag(desc, rdpg->rp_pages[i], 0,
+ tmpsize);
}
LASSERT(desc->bd_nob == nob);
rc = target_bulk_io(exp, desc, lwi);
- ptlrpc_free_bulk_pin(desc);
+ ptlrpc_free_bulk(desc);
RETURN(rc);
}
EXPORT_SYMBOL(tgt_sendpage);
GOTO(out, rc = -EFAULT);
rdpg->rp_count = min_t(unsigned int, req_ii->ii_count << LU_PAGE_SHIFT,
exp_max_brw_size(tsi->tsi_exp));
- rdpg->rp_npages = (rdpg->rp_count + PAGE_CACHE_SIZE -1) >> PAGE_CACHE_SHIFT;
+ rdpg->rp_npages = (rdpg->rp_count + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* allocate pages to store the containers */
OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
if (rdpg->rp_pages == NULL)
GOTO(out, rc = -ENOMEM);
for (i = 0; i < rdpg->rp_npages; i++) {
- rdpg->rp_pages[i] = alloc_page(GFP_IOFS);
+ rdpg->rp_pages[i] = alloc_page(GFP_NOFS);
if (rdpg->rp_pages[i] == NULL)
GOTO(out, rc = -ENOMEM);
}
struct tgt_handler tgt_obd_handlers[] = {
TGT_OBD_HDL (0, OBD_PING, tgt_obd_ping),
TGT_OBD_HDL_VAR(0, OBD_LOG_CANCEL, tgt_obd_log_cancel),
-TGT_OBD_HDL_VAR(0, OBD_QC_CALLBACK, tgt_obd_qc_callback),
TGT_OBD_HDL (0, OBD_IDX_READ, tgt_obd_idx_read)
};
EXPORT_SYMBOL(tgt_obd_handlers);
tgt->lut_obd->obd_last_committed) {
rc = dt_object_sync(env, obj, start, end);
}
+ atomic_inc(&tgt->lut_sync_count);
RETURN(rc);
}
* Unified target DLM handlers.
*/
-/* Ensure that data and metadata are synced to the disk when lock is cancelled
- * (if requested) */
+/**
+ * Unified target BAST
+ *
+ * Ensure data and metadata are synced to disk when lock is canceled if Sync on
+ * Cancel (SOC) is enabled. If it's extent lock, normally sync obj is enough,
+ * but if it's cross-MDT lock, because remote object version is not set, a
+ * filesystem sync is needed.
+ *
+ * \param lock server side lock
+ * \param desc lock desc
+ * \param data ldlm_cb_set_arg
+ * \param flag indicates whether this cancelling or blocking callback
+ * \retval 0 on success
+ * \retval negative number on error
+ */
static int tgt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
void *data, int flag)
{
struct lu_env env;
struct lu_target *tgt;
- struct dt_object *obj;
+ struct dt_object *obj = NULL;
struct lu_fid fid;
int rc = 0;
tgt = class_exp2tgt(lock->l_export);
+ if (unlikely(tgt == NULL)) {
+ CDEBUG(D_ERROR, "%s: No target for connected export\n",
+ class_exp2obd(lock->l_export)->obd_name);
+ RETURN(-EINVAL);
+ }
+
if (flag == LDLM_CB_CANCELING &&
- (lock->l_granted_mode & (LCK_PW | LCK_GROUP)) &&
+ (lock->l_granted_mode & (LCK_EX | LCK_PW | LCK_GROUP)) &&
(tgt->lut_sync_lock_cancel == ALWAYS_SYNC_ON_CANCEL ||
(tgt->lut_sync_lock_cancel == BLOCKING_SYNC_ON_CANCEL &&
- lock->l_flags & LDLM_FL_CBPENDING))) {
+ ldlm_is_cbpending(lock))) &&
+ ((exp_connect_flags(lock->l_export) & OBD_CONNECT_MDS_MDS) ||
+ lock->l_resource->lr_type == LDLM_EXTENT)) {
__u64 start = 0;
__u64 end = OBD_OBJECT_EOF;
ost_fid_from_resid(&fid, &lock->l_resource->lr_name,
tgt->lut_lsd.lsd_osd_index);
- obj = dt_locate(&env, tgt->lut_bottom, &fid);
- if (IS_ERR(obj))
- GOTO(err_env, rc = PTR_ERR(obj));
-
- if (!dt_object_exists(obj))
- GOTO(err_put, rc = -ENOENT);
if (lock->l_resource->lr_type == LDLM_EXTENT) {
+ obj = dt_locate(&env, tgt->lut_bottom, &fid);
+ if (IS_ERR(obj))
+ GOTO(err_env, rc = PTR_ERR(obj));
+
+ if (!dt_object_exists(obj))
+ GOTO(err_put, rc = -ENOENT);
+
start = lock->l_policy_data.l_extent.start;
end = lock->l_policy_data.l_extent.end;
}
rc = tgt_sync(&env, tgt, obj, start, end);
if (rc < 0) {
- CERROR("%s: syncing "DFID" ("LPU64"-"LPU64") on lock "
+ CERROR("%s: syncing "DFID" (%llu-%llu) on lock "
"cancel: rc = %d\n",
tgt_name(tgt), PFID(&fid),
lock->l_policy_data.l_extent.start,
lock->l_policy_data.l_extent.end, rc);
}
err_put:
- lu_object_put(&env, &obj->do_lu);
+ if (obj != NULL)
+ dt_object_put(&env, obj);
err_env:
lu_env_fini(&env);
}
};
EXPORT_SYMBOL(tgt_sec_ctx_handlers);
+int (*tgt_lfsck_in_notify_local)(const struct lu_env *env,
+ struct dt_device *key,
+ struct lfsck_req_local *lrl,
+ struct thandle *th) = NULL;
+
+void tgt_register_lfsck_in_notify_local(int (*notify)(const struct lu_env *,
+ struct dt_device *,
+ struct lfsck_req_local *,
+ struct thandle *))
+{
+ tgt_lfsck_in_notify_local = notify;
+}
+EXPORT_SYMBOL(tgt_register_lfsck_in_notify_local);
+
int (*tgt_lfsck_in_notify)(const struct lu_env *env,
struct dt_device *key,
- struct lfsck_request *lr,
- struct thandle *th) = NULL;
+ struct lfsck_request *lr) = NULL;
void tgt_register_lfsck_in_notify(int (*notify)(const struct lu_env *,
struct dt_device *,
- struct lfsck_request *,
- struct thandle *))
+ struct lfsck_request *))
{
tgt_lfsck_in_notify = notify;
}
static int (*tgt_lfsck_query)(const struct lu_env *env,
struct dt_device *key,
- struct lfsck_request *lr) = NULL;
+ struct lfsck_request *req,
+ struct lfsck_reply *rep,
+ struct lfsck_query *que) = NULL;
void tgt_register_lfsck_query(int (*query)(const struct lu_env *,
struct dt_device *,
- struct lfsck_request *))
+ struct lfsck_request *,
+ struct lfsck_reply *,
+ struct lfsck_query *))
{
tgt_lfsck_query = query;
}
if (lr == NULL)
RETURN(-EPROTO);
- rc = tgt_lfsck_in_notify(env, key, lr, NULL);
+ rc = tgt_lfsck_in_notify(env, key, lr);
RETURN(rc);
}
if (reply == NULL)
RETURN(-ENOMEM);
- rc = tgt_lfsck_query(tsi->tsi_env, tsi->tsi_tgt->lut_bottom, request);
- reply->lr_status = rc;
+ rc = tgt_lfsck_query(tsi->tsi_env, tsi->tsi_tgt->lut_bottom,
+ request, reply, NULL);
RETURN(rc < 0 ? rc : 0);
}
EXIT;
}
EXPORT_SYMBOL(tgt_io_thread_done);
+
+/**
+ * Helper function for getting Data-on-MDT file server DLM lock
+ * if asked by client.
+ */
+int tgt_mdt_data_lock(struct ldlm_namespace *ns, struct ldlm_res_id *res_id,
+ struct lustre_handle *lh, int mode, __u64 *flags)
+{
+ union ldlm_policy_data policy = {
+ .l_inodebits.bits = MDS_INODELOCK_DOM,
+ };
+ int rc;
+
+ ENTRY;
+
+ LASSERT(lh != NULL);
+ LASSERT(ns != NULL);
+ LASSERT(!lustre_handle_is_used(lh));
+
+ rc = ldlm_cli_enqueue_local(ns, res_id, LDLM_IBITS, &policy, mode,
+ flags, ldlm_blocking_ast,
+ ldlm_completion_ast, ldlm_glimpse_ast,
+ NULL, 0, LVB_T_NONE, NULL, lh);
+
+ RETURN(rc == ELDLM_OK ? 0 : -EIO);
+}
+EXPORT_SYMBOL(tgt_mdt_data_lock);
+
+void tgt_mdt_data_unlock(struct lustre_handle *lh, enum ldlm_mode mode)
+{
+ LASSERT(lustre_handle_is_used(lh));
+ ldlm_lock_decref(lh, mode);
+}
+EXPORT_SYMBOL(tgt_mdt_data_unlock);
+
/**
* Helper function for getting server side [start, start+count] DLM lock
* if asked by client.
__u64 start, __u64 end, struct lustre_handle *lh,
int mode, __u64 *flags)
{
- ldlm_policy_data_t policy;
- int rc;
+ union ldlm_policy_data policy;
+ int rc;
ENTRY;
LASSERT(!lustre_handle_is_used(lh));
policy.l_extent.gid = 0;
- policy.l_extent.start = start & CFS_PAGE_MASK;
+ policy.l_extent.start = start & PAGE_MASK;
/*
* If ->o_blocks is EOF it means "lock till the end of the file".
if (end == OBD_OBJECT_EOF || end < start)
policy.l_extent.end = OBD_OBJECT_EOF;
else
- policy.l_extent.end = end | ~CFS_PAGE_MASK;
+ policy.l_extent.end = end | ~PAGE_MASK;
rc = ldlm_cli_enqueue_local(ns, res_id, LDLM_EXTENT, &policy, mode,
flags, ldlm_blocking_ast,
}
EXPORT_SYMBOL(tgt_extent_lock);
-void tgt_extent_unlock(struct lustre_handle *lh, ldlm_mode_t mode)
+void tgt_extent_unlock(struct lustre_handle *lh, enum ldlm_mode mode)
{
LASSERT(lustre_handle_is_used(lh));
ldlm_lock_decref(lh, mode);
}
EXPORT_SYMBOL(tgt_extent_unlock);
-int tgt_brw_lock(struct ldlm_namespace *ns, struct ldlm_res_id *res_id,
- struct obd_ioobj *obj, struct niobuf_remote *nb,
- struct lustre_handle *lh, int mode)
+static int tgt_brw_lock(struct obd_export *exp, struct ldlm_res_id *res_id,
+ struct obd_ioobj *obj, struct niobuf_remote *nb,
+ struct lustre_handle *lh, enum ldlm_mode mode)
{
+ struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
__u64 flags = 0;
int nrbufs = obj->ioo_bufcnt;
int i;
+ int rc;
ENTRY;
LASSERT(mode == LCK_PR || mode == LCK_PW);
LASSERT(!lustre_handle_is_used(lh));
+ if (ns->ns_obd->obd_recovering)
+ RETURN(0);
+
if (nrbufs == 0 || !(nb[0].rnb_flags & OBD_BRW_SRVLOCK))
RETURN(0);
if (!(nb[i].rnb_flags & OBD_BRW_SRVLOCK))
RETURN(-EFAULT);
- RETURN(tgt_extent_lock(ns, res_id, nb[0].rnb_offset,
- nb[nrbufs - 1].rnb_offset +
- nb[nrbufs - 1].rnb_len - 1,
- lh, mode, &flags));
+ /* MDT IO for data-on-mdt */
+ if (exp->exp_connect_data.ocd_connect_flags & OBD_CONNECT_IBITS)
+ rc = tgt_mdt_data_lock(ns, res_id, lh, mode, &flags);
+ else
+ rc = tgt_extent_lock(ns, res_id, nb[0].rnb_offset,
+ nb[nrbufs - 1].rnb_offset +
+ nb[nrbufs - 1].rnb_len - 1,
+ lh, mode, &flags);
+ RETURN(rc);
}
-void tgt_brw_unlock(struct obd_ioobj *obj, struct niobuf_remote *niob,
- struct lustre_handle *lh, int mode)
+static void tgt_brw_unlock(struct obd_ioobj *obj, struct niobuf_remote *niob,
+ struct lustre_handle *lh, enum ldlm_mode mode)
{
ENTRY;
tgt_extent_unlock(lh, mode);
EXIT;
}
-
-static __u32 tgt_checksum_bulk(struct lu_target *tgt,
- struct ptlrpc_bulk_desc *desc, int opc,
- cksum_type_t cksum_type)
+static int tgt_checksum_niobuf(struct lu_target *tgt,
+ struct niobuf_local *local_nb, int npages,
+ int opc, enum cksum_types cksum_type,
+ __u32 *cksum)
{
struct cfs_crypto_hash_desc *hdesc;
unsigned int bufsize;
int i, err;
unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
- __u32 cksum;
hdesc = cfs_crypto_hash_init(cfs_alg, NULL, 0);
if (IS_ERR(hdesc)) {
}
CDEBUG(D_INFO, "Checksum for algo %s\n", cfs_crypto_hash_name(cfs_alg));
- for (i = 0; i < desc->bd_iov_count; i++) {
+ for (i = 0; i < npages; i++) {
/* corrupt the data before we compute the checksum, to
* simulate a client->OST data error */
if (i == 0 && opc == OST_WRITE &&
OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_RECEIVE)) {
- int off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
- int len = desc->bd_iov[i].kiov_len;
+ int off = local_nb[i].lnb_page_offset & ~PAGE_MASK;
+ int len = local_nb[i].lnb_len;
struct page *np = tgt_page_to_corrupt;
- char *ptr = kmap(desc->bd_iov[i].kiov_page) + off;
if (np) {
- char *ptr2 = kmap(np) + off;
+ char *ptr = ll_kmap_atomic(local_nb[i].lnb_page,
+ KM_USER0);
+ char *ptr2 = page_address(np);
+
+ memcpy(ptr2 + off, ptr + off, len);
+ memcpy(ptr2 + off, "bad3", min(4, len));
+ ll_kunmap_atomic(ptr, KM_USER0);
- memcpy(ptr2, ptr, len);
- memcpy(ptr2, "bad3", min(4, len));
- kunmap(np);
- desc->bd_iov[i].kiov_page = np;
+ /* LU-8376 to preserve original index for
+ * display in dump_all_bulk_pages() */
+ np->index = i;
+
+ cfs_crypto_hash_update_page(hdesc, np, off,
+ len);
+ continue;
} else {
CERROR("%s: can't alloc page for corruption\n",
tgt_name(tgt));
}
}
- cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].kiov_page,
- desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK,
- desc->bd_iov[i].kiov_len);
+ cfs_crypto_hash_update_page(hdesc, local_nb[i].lnb_page,
+ local_nb[i].lnb_page_offset & ~PAGE_MASK,
+ local_nb[i].lnb_len);
/* corrupt the data after we compute the checksum, to
* simulate an OST->client data error */
if (i == 0 && opc == OST_READ &&
OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_SEND)) {
- int off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
- int len = desc->bd_iov[i].kiov_len;
+ int off = local_nb[i].lnb_page_offset & ~PAGE_MASK;
+ int len = local_nb[i].lnb_len;
struct page *np = tgt_page_to_corrupt;
- char *ptr = kmap(desc->bd_iov[i].kiov_page) + off;
if (np) {
- char *ptr2 = kmap(np) + off;
+ char *ptr = ll_kmap_atomic(local_nb[i].lnb_page,
+ KM_USER0);
+ char *ptr2 = page_address(np);
+
+ memcpy(ptr2 + off, ptr + off, len);
+ memcpy(ptr2 + off, "bad4", min(4, len));
+ ll_kunmap_atomic(ptr, KM_USER0);
- memcpy(ptr2, ptr, len);
- memcpy(ptr2, "bad4", min(4, len));
- kunmap(np);
- desc->bd_iov[i].kiov_page = np;
+ /* LU-8376 to preserve original index for
+ * display in dump_all_bulk_pages() */
+ np->index = i;
+
+ cfs_crypto_hash_update_page(hdesc, np, off,
+ len);
+ continue;
} else {
CERROR("%s: can't alloc page for corruption\n",
tgt_name(tgt));
}
}
- bufsize = sizeof(cksum);
- err = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
+ bufsize = sizeof(*cksum);
+ err = cfs_crypto_hash_final(hdesc, (unsigned char *)cksum, &bufsize);
+
+ return 0;
+}
+
+char dbgcksum_file_name[PATH_MAX];
+
+static void dump_all_bulk_pages(struct obdo *oa, int count,
+ struct niobuf_local *local_nb,
+ __u32 server_cksum, __u32 client_cksum)
+{
+ struct file *filp;
+ int rc, i;
+ unsigned int len;
+ char *buf;
+ mm_segment_t oldfs;
+
+ /* will only keep dump of pages on first error for the same range in
+ * file/fid, not during the resends/retries. */
+ snprintf(dbgcksum_file_name, sizeof(dbgcksum_file_name),
+ "%s-checksum_dump-ost-"DFID":[%llu-%llu]-%x-%x",
+ (strncmp(libcfs_debug_file_path_arr, "NONE", 4) != 0 ?
+ libcfs_debug_file_path_arr :
+ LIBCFS_DEBUG_FILE_PATH_DEFAULT),
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
+ local_nb[0].lnb_file_offset,
+ local_nb[count-1].lnb_file_offset +
+ local_nb[count-1].lnb_len - 1, client_cksum, server_cksum);
+ filp = filp_open(dbgcksum_file_name,
+ O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, 0600);
+ if (IS_ERR(filp)) {
+ rc = PTR_ERR(filp);
+ if (rc == -EEXIST)
+ CDEBUG(D_INFO, "%s: can't open to dump pages with "
+ "checksum error: rc = %d\n", dbgcksum_file_name,
+ rc);
+ else
+ CERROR("%s: can't open to dump pages with checksum "
+ "error: rc = %d\n", dbgcksum_file_name, rc);
+ return;
+ }
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ for (i = 0; i < count; i++) {
+ len = local_nb[i].lnb_len;
+ buf = kmap(local_nb[i].lnb_page);
+ while (len != 0) {
+ rc = vfs_write(filp, (__force const char __user *)buf,
+ len, &filp->f_pos);
+ if (rc < 0) {
+ CERROR("%s: wanted to write %u but got %d "
+ "error\n", dbgcksum_file_name, len, rc);
+ break;
+ }
+ len -= rc;
+ buf += rc;
+ CDEBUG(D_INFO, "%s: wrote %d bytes\n",
+ dbgcksum_file_name, rc);
+ }
+ kunmap(local_nb[i].lnb_page);
+ }
+ set_fs(oldfs);
+
+ rc = ll_vfs_fsync_range(filp, 0, LLONG_MAX, 1);
+ if (rc)
+ CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
+ filp_close(filp, NULL);
+ return;
+}
+
+static int check_read_checksum(struct niobuf_local *local_nb, int npages,
+ struct obd_export *exp, struct obdo *oa,
+ const struct lnet_process_id *peer,
+ __u32 client_cksum, __u32 server_cksum,
+ enum cksum_types server_cksum_type)
+{
+ char *msg;
+ enum cksum_types cksum_type;
+ loff_t start, end;
+
+ /* unlikely to happen and only if resend does not occur due to cksum
+ * control failure on Client */
+ if (unlikely(server_cksum == client_cksum)) {
+ CDEBUG(D_PAGE, "checksum %x confirmed upon retry\n",
+ client_cksum);
+ return 0;
+ }
+
+ if (exp->exp_obd->obd_checksum_dump)
+ dump_all_bulk_pages(oa, npages, local_nb, server_cksum,
+ client_cksum);
- return cksum;
+ cksum_type = cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
+ oa->o_flags : 0);
+
+ if (cksum_type != server_cksum_type)
+ msg = "the server may have not used the checksum type specified"
+ " in the original request - likely a protocol problem";
+ else
+ msg = "should have changed on the client or in transit";
+
+ start = local_nb[0].lnb_file_offset;
+ end = local_nb[npages-1].lnb_file_offset +
+ local_nb[npages-1].lnb_len - 1;
+
+ LCONSOLE_ERROR_MSG(0x132, "%s: BAD READ CHECKSUM: %s: from %s inode "
+ DFID " object "DOSTID" extent [%llu-%llu], client returned csum"
+ " %x (type %x), server csum %x (type %x)\n",
+ exp->exp_obd->obd_name,
+ msg, libcfs_nid2str(peer->nid),
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : 0ULL,
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
+ POSTID(&oa->o_oi),
+ start, end, client_cksum, cksum_type, server_cksum,
+ server_cksum_type);
+
+ return 1;
+}
+
+static int tgt_pages2shortio(struct niobuf_local *local, int npages,
+ unsigned char *buf, int size)
+{
+ int i, off, len, copied = size;
+ char *ptr;
+
+ for (i = 0; i < npages; i++) {
+ off = local[i].lnb_page_offset & ~PAGE_MASK;
+ len = local[i].lnb_len;
+
+ CDEBUG(D_PAGE, "index %d offset = %d len = %d left = %d\n",
+ i, off, len, size);
+ if (len > size)
+ return -EINVAL;
+
+ ptr = ll_kmap_atomic(local[i].lnb_page, KM_USER0);
+ memcpy(buf + off, ptr, len);
+ ll_kunmap_atomic(ptr, KM_USER0);
+ buf += len;
+ size -= len;
+ }
+ return copied - size;
}
int tgt_brw_read(struct tgt_session_info *tsi)
struct ost_body *body, *repbody;
struct l_wait_info lwi;
struct lustre_handle lockh = { 0 };
- int npages, nob = 0, rc, i, no_reply = 0;
+ int npages, nob = 0, rc, i, no_reply = 0,
+ npages_read;
struct tgt_thread_big_cache *tbc = req->rq_svc_thread->t_data;
ENTRY;
- if (ptlrpc_req2svc(req)->srv_req_portal != OST_IO_PORTAL) {
+ if (ptlrpc_req2svc(req)->srv_req_portal != OST_IO_PORTAL &&
+ ptlrpc_req2svc(req)->srv_req_portal != MDS_IO_PORTAL) {
CERROR("%s: deny read request from %s to portal %u\n",
tgt_name(tsi->tsi_tgt),
obd_export_nid2str(req->rq_export),
local_nb = tbc->local;
- rc = tgt_brw_lock(exp->exp_obd->obd_namespace, &tsi->tsi_resid, ioo,
- remote_nb, &lockh, LCK_PR);
+ rc = tgt_brw_lock(exp, &tsi->tsi_resid, ioo, remote_nb, &lockh,
+ LCK_PR);
if (rc != 0)
RETURN(rc);
* If getting the lock took more time than
* client was willing to wait, drop it. b=11330
*/
- if (cfs_time_current_sec() > req->rq_deadline ||
+ if (ktime_get_real_seconds() > req->rq_deadline ||
OBD_FAIL_CHECK(OBD_FAIL_OST_DROP_REQ)) {
no_reply = 1;
- CERROR("Dropping timed-out read from %s because locking"
- "object "DOSTID" took %ld seconds (limit was %ld).\n",
+ CERROR("Dropping timed-out read from %s because locking object " DOSTID " took %lld seconds (limit was %lld).\n",
libcfs_id2str(req->rq_peer), POSTID(&ioo->ioo_oid),
- cfs_time_current_sec() - req->rq_arrival_time.tv_sec,
+ ktime_get_real_seconds() - req->rq_arrival_time.tv_sec,
req->rq_deadline - req->rq_arrival_time.tv_sec);
GOTO(out_lock, rc = -ETIMEDOUT);
}
npages = PTLRPC_MAX_BRW_PAGES;
rc = obd_preprw(tsi->tsi_env, OBD_BRW_READ, exp, &repbody->oa, 1,
- ioo, remote_nb, &npages, local_nb, NULL, BYPASS_CAPA);
+ ioo, remote_nb, &npages, local_nb);
if (rc != 0)
GOTO(out_lock, rc);
- desc = ptlrpc_prep_bulk_exp(req, npages, ioobj_max_brw_get(ioo),
- BULK_PUT_SOURCE, OST_BULK_PORTAL);
- if (desc == NULL)
- GOTO(out_commitrw, rc = -ENOMEM);
+ if (body->oa.o_flags & OBD_FL_SHORT_IO) {
+ desc = NULL;
+ } else {
+ desc = ptlrpc_prep_bulk_exp(req, npages, ioobj_max_brw_get(ioo),
+ PTLRPC_BULK_PUT_SOURCE |
+ PTLRPC_BULK_BUF_KIOV,
+ OST_BULK_PORTAL,
+ &ptlrpc_bulk_kiov_nopin_ops);
+ if (desc == NULL)
+ GOTO(out_commitrw, rc = -ENOMEM);
+ }
nob = 0;
+ npages_read = npages;
for (i = 0; i < npages; i++) {
int page_rc = local_nb[i].lnb_rc;
if (page_rc < 0) {
rc = page_rc;
+ npages_read = i;
break;
}
nob += page_rc;
- if (page_rc != 0) { /* some data! */
+ if (page_rc != 0 && desc != NULL) { /* some data! */
LASSERT(local_nb[i].lnb_page != NULL);
- ptlrpc_prep_bulk_page_nopin(desc, local_nb[i].lnb_page,
- local_nb[i].lnb_page_offset,
- page_rc);
+ desc->bd_frag_ops->add_kiov_frag
+ (desc, local_nb[i].lnb_page,
+ local_nb[i].lnb_page_offset & ~PAGE_MASK,
+ page_rc);
}
if (page_rc != local_nb[i].lnb_len) { /* short read */
+ local_nb[i].lnb_len = page_rc;
+ npages_read = i + (page_rc != 0 ? 1 : 0);
/* All subsequent pages should be 0 */
while (++i < npages)
LASSERT(local_nb[i].lnb_rc == 0);
break;
}
}
+ if (OBD_FAIL_CHECK(OBD_FAIL_OST_READ_SIZE) &&
+ nob != cfs_fail_val)
+ rc = -E2BIG;
if (body->oa.o_valid & OBD_MD_FLCKSUM) {
- cksum_type_t cksum_type =
+ enum cksum_types cksum_type =
cksum_type_unpack(body->oa.o_valid & OBD_MD_FLFLAGS ?
body->oa.o_flags : 0);
+
repbody->oa.o_flags = cksum_type_pack(cksum_type);
repbody->oa.o_valid = OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
- repbody->oa.o_cksum = tgt_checksum_bulk(tsi->tsi_tgt, desc,
- OST_READ, cksum_type);
+ rc = tgt_checksum_niobuf(tsi->tsi_tgt, local_nb,
+ npages_read, OST_READ, cksum_type,
+ &repbody->oa.o_cksum);
+ if (rc < 0)
+ GOTO(out_commitrw, rc);
+
CDEBUG(D_PAGE, "checksum at read origin: %x\n",
repbody->oa.o_cksum);
+
+ /* if a resend it could be for a cksum error, so check Server
+ * cksum with returned Client cksum (this should even cover
+ * zero-cksum case) */
+ if ((body->oa.o_valid & OBD_MD_FLFLAGS) &&
+ (body->oa.o_flags & OBD_FL_RECOV_RESEND))
+ check_read_checksum(local_nb, npages_read, exp,
+ &body->oa, &req->rq_peer,
+ body->oa.o_cksum,
+ repbody->oa.o_cksum, cksum_type);
} else {
repbody->oa.o_valid = 0;
}
/* Check if client was evicted while we were doing i/o before touching
* network */
- if (likely(rc == 0 &&
- !CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2))) {
- rc = target_bulk_io(exp, desc, &lwi);
+ if (rc == 0) {
+ if (body->oa.o_flags & OBD_FL_SHORT_IO) {
+ unsigned char *short_io_buf;
+ int short_io_size;
+
+ short_io_buf = req_capsule_server_get(&req->rq_pill,
+ &RMF_SHORT_IO);
+ short_io_size = req_capsule_get_size(&req->rq_pill,
+ &RMF_SHORT_IO,
+ RCL_SERVER);
+ rc = tgt_pages2shortio(local_nb, npages_read,
+ short_io_buf, short_io_size);
+ if (rc >= 0)
+ req_capsule_shrink(&req->rq_pill,
+ &RMF_SHORT_IO, rc,
+ RCL_SERVER);
+ rc = rc > 0 ? 0 : rc;
+ } else if (!CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2)) {
+ rc = target_bulk_io(exp, desc, &lwi);
+ }
no_reply = rc != 0;
+ } else {
+ if (body->oa.o_flags & OBD_FL_SHORT_IO)
+ req_capsule_shrink(&req->rq_pill, &RMF_SHORT_IO, 0,
+ RCL_SERVER);
}
out_commitrw:
/* Must commit after prep above in all cases */
- rc = obd_commitrw(tsi->tsi_env, OBD_BRW_READ, exp,
- &repbody->oa, 1, ioo, remote_nb, npages, local_nb,
- NULL, rc);
- if (rc == 0)
- tgt_drop_id(exp, &repbody->oa);
+ rc = obd_commitrw(tsi->tsi_env, OBD_BRW_READ, exp, &repbody->oa, 1, ioo,
+ remote_nb, npages, local_nb, rc);
out_lock:
tgt_brw_unlock(ioo, remote_nb, &lockh, LCK_PR);
if (desc && !CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2))
- ptlrpc_free_bulk_nopin(desc);
+ ptlrpc_free_bulk(desc);
LASSERT(rc <= 0);
if (rc == 0) {
obd_export_nid2str(exp), rc);
}
/* send a bulk after reply to simulate a network delay or reordering
- * by a router */
- if (unlikely(CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2))) {
+ * by a router - Note that !desc implies short io, so there is no bulk
+ * to reorder. */
+ if (unlikely(CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2)) &&
+ desc) {
wait_queue_head_t waitq;
struct l_wait_info lwi1;
lwi1 = LWI_TIMEOUT_INTR(cfs_time_seconds(3), NULL, NULL, NULL);
l_wait_event(waitq, 0, &lwi1);
target_bulk_io(exp, desc, &lwi);
- ptlrpc_free_bulk_nopin(desc);
+ ptlrpc_free_bulk(desc);
}
RETURN(rc);
}
EXPORT_SYMBOL(tgt_brw_read);
+static int tgt_shortio2pages(struct niobuf_local *local, int npages,
+ unsigned char *buf, int size)
+{
+ int i, off, len;
+ char *ptr;
+
+ for (i = 0; i < npages; i++) {
+ off = local[i].lnb_page_offset & ~PAGE_MASK;
+ len = local[i].lnb_len;
+
+ if (len == 0)
+ continue;
+
+ CDEBUG(D_PAGE, "index %d offset = %d len = %d left = %d\n",
+ i, off, len, size);
+ ptr = ll_kmap_atomic(local[i].lnb_page, KM_USER0);
+ if (ptr == NULL)
+ return -EINVAL;
+ memcpy(ptr + off, buf, len < size ? len : size);
+ ll_kunmap_atomic(ptr, KM_USER0);
+ buf += len;
+ size -= len;
+ }
+ return 0;
+}
+
static void tgt_warn_on_cksum(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc,
struct niobuf_local *local_nb, int npages,
{
struct obd_export *exp = req->rq_export;
struct ost_body *body;
- char *router;
- char *via;
+ char *router = "";
+ char *via = "";
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
LASSERT(body != NULL);
- if (req->rq_peer.nid == desc->bd_sender) {
- via = router = "";
- } else {
+ if (desc && req->rq_peer.nid != desc->bd_sender) {
via = " via ";
router = libcfs_nid2str(desc->bd_sender);
}
+ if (exp->exp_obd->obd_checksum_dump)
+ dump_all_bulk_pages(&body->oa, npages, local_nb, server_cksum,
+ client_cksum);
+
if (mmap) {
CDEBUG_LIMIT(D_INFO, "client csum %x, server csum %x\n",
client_cksum, server_cksum);
return;
}
- LCONSOLE_ERROR_MSG(0x168, "BAD WRITE CHECKSUM: %s from %s%s%s inode "
- DFID" object "DOSTID" extent ["LPU64"-"LPU64
+ LCONSOLE_ERROR_MSG(0x168, "%s: BAD WRITE CHECKSUM: from %s%s%s inode "
+ DFID" object "DOSTID" extent [%llu-%llu"
"]: client csum %x, server csum %x\n",
exp->exp_obd->obd_name, libcfs_id2str(req->rq_peer),
via, router,
__u32 *rcs;
int objcount, niocount, npages;
int rc, i, j;
- cksum_type_t cksum_type = OBD_CKSUM_CRC32;
+ enum cksum_types cksum_type = OBD_CKSUM_CRC32;
bool no_reply = false, mmap;
struct tgt_thread_big_cache *tbc = req->rq_svc_thread->t_data;
+ bool wait_sync = false;
ENTRY;
- if (ptlrpc_req2svc(req)->srv_req_portal != OST_IO_PORTAL) {
+ if (ptlrpc_req2svc(req)->srv_req_portal != OST_IO_PORTAL &&
+ ptlrpc_req2svc(req)->srv_req_portal != MDS_IO_PORTAL) {
CERROR("%s: deny write request from %s to portal %u\n",
tgt_name(tsi->tsi_tgt),
obd_export_nid2str(req->rq_export),
RETURN(err_serious(-EPROTO));
if ((remote_nb[0].rnb_flags & OBD_BRW_MEMALLOC) &&
- (exp->exp_connection->c_peer.nid == exp->exp_connection->c_self))
+ ptlrpc_connection_is_local(exp->exp_connection))
memory_pressure_set();
req_capsule_set_size(&req->rq_pill, &RMF_RCS, RCL_SERVER,
local_nb = tbc->local;
- rc = tgt_brw_lock(exp->exp_obd->obd_namespace, &tsi->tsi_resid, ioo,
- remote_nb, &lockh, LCK_PW);
+ rc = tgt_brw_lock(exp, &tsi->tsi_resid, ioo, remote_nb, &lockh,
+ LCK_PW);
if (rc != 0)
GOTO(out, rc);
* If getting the lock took more time than
* client was willing to wait, drop it. b=11330
*/
- if (cfs_time_current_sec() > req->rq_deadline ||
+ if (ktime_get_real_seconds() > req->rq_deadline ||
OBD_FAIL_CHECK(OBD_FAIL_OST_DROP_REQ)) {
no_reply = true;
- CERROR("%s: Dropping timed-out write from %s because locking "
- "object "DOSTID" took %ld seconds (limit was %ld).\n",
+ CERROR("%s: Dropping timed-out write from %s because locking object " DOSTID " took %lld seconds (limit was %lld).\n",
tgt_name(tsi->tsi_tgt), libcfs_id2str(req->rq_peer),
POSTID(&ioo->ioo_oid),
- cfs_time_current_sec() - req->rq_arrival_time.tv_sec,
+ ktime_get_real_seconds() - req->rq_arrival_time.tv_sec,
req->rq_deadline - req->rq_arrival_time.tv_sec);
GOTO(out_lock, rc = -ETIMEDOUT);
}
npages = PTLRPC_MAX_BRW_PAGES;
rc = obd_preprw(tsi->tsi_env, OBD_BRW_WRITE, exp, &repbody->oa,
- objcount, ioo, remote_nb, &npages, local_nb, NULL,
- BYPASS_CAPA);
+ objcount, ioo, remote_nb, &npages, local_nb);
if (rc < 0)
GOTO(out_lock, rc);
+ if (body->oa.o_flags & OBD_FL_SHORT_IO) {
+ int short_io_size;
+ unsigned char *short_io_buf;
+
+ short_io_size = req_capsule_get_size(&req->rq_pill,
+ &RMF_SHORT_IO,
+ RCL_CLIENT);
+ short_io_buf = req_capsule_client_get(&req->rq_pill,
+ &RMF_SHORT_IO);
+ CDEBUG(D_INFO, "Client use short io for data transfer,"
+ " size = %d\n", short_io_size);
+
+ /* Copy short io buf to pages */
+ rc = tgt_shortio2pages(local_nb, npages, short_io_buf,
+ short_io_size);
+ desc = NULL;
+ } else {
+ desc = ptlrpc_prep_bulk_exp(req, npages, ioobj_max_brw_get(ioo),
+ PTLRPC_BULK_GET_SINK |
+ PTLRPC_BULK_BUF_KIOV,
+ OST_BULK_PORTAL,
+ &ptlrpc_bulk_kiov_nopin_ops);
+ if (desc == NULL)
+ GOTO(skip_transfer, rc = -ENOMEM);
+
+ /* NB Having prepped, we must commit... */
+ for (i = 0; i < npages; i++)
+ desc->bd_frag_ops->add_kiov_frag(desc,
+ local_nb[i].lnb_page,
+ local_nb[i].lnb_page_offset & ~PAGE_MASK,
+ local_nb[i].lnb_len);
+
+ rc = sptlrpc_svc_prep_bulk(req, desc);
+ if (rc != 0)
+ GOTO(skip_transfer, rc);
- desc = ptlrpc_prep_bulk_exp(req, npages, ioobj_max_brw_get(ioo),
- BULK_GET_SINK, OST_BULK_PORTAL);
- if (desc == NULL)
- GOTO(skip_transfer, rc = -ENOMEM);
-
- /* NB Having prepped, we must commit... */
- for (i = 0; i < npages; i++)
- ptlrpc_prep_bulk_page_nopin(desc, local_nb[i].lnb_page,
- local_nb[i].lnb_page_offset,
- local_nb[i].lnb_len);
-
- rc = sptlrpc_svc_prep_bulk(req, desc);
- if (rc != 0)
- GOTO(skip_transfer, rc);
+ rc = target_bulk_io(exp, desc, &lwi);
+ }
- rc = target_bulk_io(exp, desc, &lwi);
no_reply = rc != 0;
skip_transfer:
repbody->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
repbody->oa.o_flags &= ~OBD_FL_CKSUM_ALL;
repbody->oa.o_flags |= cksum_type_pack(cksum_type);
- repbody->oa.o_cksum = tgt_checksum_bulk(tsi->tsi_tgt, desc,
- OST_WRITE, cksum_type);
+ rc = tgt_checksum_niobuf(tsi->tsi_tgt, local_nb,
+ npages, OST_WRITE, cksum_type,
+ &repbody->oa.o_cksum);
+ if (rc < 0)
+ GOTO(out_commitrw, rc);
+
cksum_counter++;
if (unlikely(body->oa.o_cksum != repbody->oa.o_cksum)) {
}
}
+out_commitrw:
/* Must commit after prep above in all cases */
rc = obd_commitrw(tsi->tsi_env, OBD_BRW_WRITE, exp, &repbody->oa,
- objcount, ioo, remote_nb, npages, local_nb, NULL,
- rc);
+ objcount, ioo, remote_nb, npages, local_nb, rc);
if (rc == -ENOTCONN)
/* quota acquire process has been given up because
* either the client has been evicted or the client
* has timed out the request already */
no_reply = true;
+ for (i = 0; i < niocount; i++) {
+ if (!(local_nb[i].lnb_flags & OBD_BRW_ASYNC)) {
+ wait_sync = true;
+ break;
+ }
+ }
/*
* Disable sending mtime back to the client. If the client locked the
* whole object, then it has already updated the mtime on its side,
}
LASSERT(j == npages);
ptlrpc_lprocfs_brw(req, nob);
-
- tgt_drop_id(exp, &repbody->oa);
}
out_lock:
tgt_brw_unlock(ioo, remote_nb, &lockh, LCK_PW);
if (desc)
- ptlrpc_free_bulk_nopin(desc);
+ ptlrpc_free_bulk(desc);
out:
- if (no_reply) {
+ if (unlikely(no_reply || (exp->exp_obd->obd_no_transno && wait_sync))) {
req->rq_no_reply = 1;
/* reply out callback would free */
ptlrpc_req_drop_rs(req);
- LCONSOLE_WARN("%s: Bulk IO write error with %s (at %s), "
- "client will retry: rc %d\n",
- exp->exp_obd->obd_name,
- obd_uuid2str(&exp->exp_client_uuid),
- obd_export_nid2str(exp), rc);
+ if (!exp->exp_obd->obd_no_transno)
+ LCONSOLE_WARN("%s: Bulk IO write error with %s (at %s),"
+ " client will retry: rc = %d\n",
+ exp->exp_obd->obd_name,
+ obd_uuid2str(&exp->exp_client_uuid),
+ obd_export_nid2str(exp), rc);
}
memory_pressure_clr();
RETURN(rc);
}
EXPORT_SYMBOL(tgt_brw_write);
+
+/* Check if request can be reconstructed from saved reply data
+ * A copy of the reply data is returned in @trd if the pointer is not NULL
+ */
+bool req_can_reconstruct(struct ptlrpc_request *req,
+ struct tg_reply_data *trd)
+{
+ struct tg_export_data *ted = &req->rq_export->exp_target_data;
+ struct lsd_client_data *lcd = ted->ted_lcd;
+ bool found;
+
+ if (tgt_is_multimodrpcs_client(req->rq_export))
+ return tgt_lookup_reply(req, trd);
+
+ mutex_lock(&ted->ted_lcd_lock);
+ found = req->rq_xid == lcd->lcd_last_xid ||
+ req->rq_xid == lcd->lcd_last_close_xid;
+
+ if (found && trd != NULL) {
+ if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_CLOSE) {
+ trd->trd_reply.lrd_xid = lcd->lcd_last_close_xid;
+ trd->trd_reply.lrd_transno =
+ lcd->lcd_last_close_transno;
+ trd->trd_reply.lrd_result = lcd->lcd_last_close_result;
+ } else {
+ trd->trd_reply.lrd_xid = lcd->lcd_last_xid;
+ trd->trd_reply.lrd_transno = lcd->lcd_last_transno;
+ trd->trd_reply.lrd_result = lcd->lcd_last_result;
+ trd->trd_reply.lrd_data = lcd->lcd_last_data;
+ trd->trd_pre_versions[0] = lcd->lcd_pre_versions[0];
+ trd->trd_pre_versions[1] = lcd->lcd_pre_versions[1];
+ trd->trd_pre_versions[2] = lcd->lcd_pre_versions[2];
+ trd->trd_pre_versions[3] = lcd->lcd_pre_versions[3];
+ }
+ }
+ mutex_unlock(&ted->ted_lcd_lock);
+
+ return found;
+}
+EXPORT_SYMBOL(req_can_reconstruct);
+