4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2013, 2017, Intel Corporation.
27 * lustre/target/tgt_handler.c
29 * Lustre Unified Target request handler code
31 * Author: Brian Behlendorf <behlendorf1@llnl.gov>
32 * Author: Mikhail Pershin <mike.pershin@intel.com>
35 #define DEBUG_SUBSYSTEM S_CLASS
37 #include <linux/user_namespace.h>
38 #include <linux/delay.h>
39 #include <linux/uidgid.h>
41 #include <libcfs/linux/linux-mem.h>
43 #include <obd_class.h>
44 #include <obd_cksum.h>
45 #include <lustre_lfsck.h>
46 #include <lustre_nodemap.h>
47 #include <lustre_acl.h>
49 #include "tgt_internal.h"
51 char *tgt_name(struct lu_target *tgt)
53 LASSERT(tgt->lut_obd != NULL);
54 return tgt->lut_obd->obd_name;
56 EXPORT_SYMBOL(tgt_name);
59 * Generic code handling requests that have struct mdt_body passed in:
61 * - extract mdt_body from request and save it in @tsi, if present;
63 * - create lu_object, corresponding to the fid in mdt_body, and save it in
66 * - if HAS_BODY flag is set for this request type check whether object
67 * actually exists on storage (lu_object_exists()).
70 static int tgt_mdt_body_unpack(struct tgt_session_info *tsi, __u32 flags)
72 const struct mdt_body *body;
73 struct lu_object *obj;
74 struct req_capsule *pill = tsi->tsi_pill;
79 body = req_capsule_client_get(pill, &RMF_MDT_BODY);
83 tsi->tsi_mdt_body = body;
85 if (!(body->mbo_valid & OBD_MD_FLID))
88 /* mdc_pack_body() doesn't check if fid is zero and set OBD_ML_FID
89 * in any case in pre-2.5 clients. Fix that here if needed */
90 if (unlikely(fid_is_zero(&body->mbo_fid1)))
93 if (!fid_is_sane(&body->mbo_fid1)) {
94 CERROR("%s: invalid FID: "DFID"\n", tgt_name(tsi->tsi_tgt),
95 PFID(&body->mbo_fid1));
99 obj = lu_object_find(tsi->tsi_env,
100 &tsi->tsi_tgt->lut_bottom->dd_lu_dev,
101 &body->mbo_fid1, NULL);
103 if ((flags & HAS_BODY) && !lu_object_exists(obj)) {
104 lu_object_put(tsi->tsi_env, obj);
107 tsi->tsi_corpus = obj;
114 tsi->tsi_fid = body->mbo_fid1;
120 * Validate oa from client.
121 * If the request comes from 2.0 clients, currently only RSVD seq and IDIF
123 * a. objects in Single MDT FS seq = FID_SEQ_OST_MDT0, oi_id != 0
124 * b. Echo objects(seq = 2), old echo client still use oi_id/oi_seq to
125 * pack ost_id. Because non-zero oi_seq will make it diffcult to tell
126 * whether this is oi_fid or real ostid. So it will check
127 * OBD_CONNECT_FID, then convert the ostid to FID for old client.
128 * c. Old FID-disable osc will send IDIF.
129 * d. new FID-enable osc/osp will send normal FID.
131 * And also oi_id/f_oid should always start from 1. oi_id/f_oid = 0 will
132 * be used for LAST_ID file, and only being accessed inside OST now.
134 int tgt_validate_obdo(struct tgt_session_info *tsi, struct obdo *oa)
136 struct ost_id *oi = &oa->o_oi;
137 u64 seq = ostid_seq(oi);
138 u64 id = ostid_id(oi);
142 if (unlikely(!(exp_connect_flags(tsi->tsi_exp) & OBD_CONNECT_FID) &&
143 fid_seq_is_echo(seq))) {
144 /* Sigh 2.[123] client still sends echo req with oi_id = 0
145 * during create, and we will reset this to 1, since this
146 * oi_id is basically useless in the following create process,
147 * but oi_id == 0 will make it difficult to tell whether it is
148 * real FID or ost_id. */
149 oi->oi_fid.f_seq = FID_SEQ_ECHO;
150 oi->oi_fid.f_oid = id ?: 1;
151 oi->oi_fid.f_ver = 0;
153 struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env);
155 if (unlikely((oa->o_valid & OBD_MD_FLID) && id == 0))
156 GOTO(out, rc = -EPROTO);
158 /* Note: this check might be forced in 2.5 or 2.6, i.e.
159 * all of the requests are required to setup FLGROUP */
160 if (unlikely(!(oa->o_valid & OBD_MD_FLGROUP)))
161 oa->o_valid |= OBD_MD_FLGROUP;
163 if (unlikely(!(fid_seq_is_idif(seq) || fid_seq_is_mdt0(seq) ||
164 fid_seq_is_norm(seq) || fid_seq_is_echo(seq))))
165 GOTO(out, rc = -EPROTO);
167 rc = ostid_to_fid(&tti->tti_fid1, oi,
168 tsi->tsi_tgt->lut_lsd.lsd_osd_index);
169 if (unlikely(rc != 0))
172 oi->oi_fid = tti->tti_fid1;
178 CERROR("%s: client %s sent bad object "DOSTID": rc = %d\n",
179 tgt_name(tsi->tsi_tgt), obd_export_nid2str(tsi->tsi_exp),
183 EXPORT_SYMBOL(tgt_validate_obdo);
185 static int tgt_io_data_unpack(struct tgt_session_info *tsi, struct ost_id *oi)
188 struct niobuf_remote *rnb;
189 struct obd_ioobj *ioo;
194 ioo = req_capsule_client_get(tsi->tsi_pill, &RMF_OBD_IOOBJ);
198 rnb = req_capsule_client_get(tsi->tsi_pill, &RMF_NIOBUF_REMOTE);
202 max_brw = ioobj_max_brw_get(ioo);
203 if (unlikely((max_brw & (max_brw - 1)) != 0)) {
204 CERROR("%s: client %s sent bad ioobj max %u for "DOSTID
205 ": rc = %d\n", tgt_name(tsi->tsi_tgt),
206 obd_export_nid2str(tsi->tsi_exp), max_brw,
207 POSTID(oi), -EPROTO);
212 obj_count = req_capsule_get_size(tsi->tsi_pill, &RMF_OBD_IOOBJ,
213 RCL_CLIENT) / sizeof(*ioo);
214 if (obj_count == 0) {
215 CERROR("%s: short ioobj\n", tgt_name(tsi->tsi_tgt));
217 } else if (obj_count > 1) {
218 CERROR("%s: too many ioobjs (%d)\n", tgt_name(tsi->tsi_tgt),
223 if (ioo->ioo_bufcnt == 0) {
224 CERROR("%s: ioo has zero bufcnt\n", tgt_name(tsi->tsi_tgt));
228 if (ioo->ioo_bufcnt > PTLRPC_MAX_BRW_PAGES) {
229 DEBUG_REQ(D_RPCTRACE, tgt_ses_req(tsi),
230 "bulk has too many pages (%d)",
238 static int tgt_ost_body_unpack(struct tgt_session_info *tsi, __u32 flags)
240 struct ost_body *body;
241 struct req_capsule *pill = tsi->tsi_pill;
242 struct lu_nodemap *nodemap;
247 body = req_capsule_client_get(pill, &RMF_OST_BODY);
251 rc = tgt_validate_obdo(tsi, &body->oa);
255 nodemap = nodemap_get_from_exp(tsi->tsi_exp);
257 RETURN(PTR_ERR(nodemap));
259 body->oa.o_uid = nodemap_map_id(nodemap, NODEMAP_UID,
260 NODEMAP_CLIENT_TO_FS,
262 body->oa.o_gid = nodemap_map_id(nodemap, NODEMAP_GID,
263 NODEMAP_CLIENT_TO_FS,
265 body->oa.o_projid = nodemap_map_id(nodemap, NODEMAP_PROJID,
266 NODEMAP_CLIENT_TO_FS,
268 nodemap_putref(nodemap);
270 tsi->tsi_ost_body = body;
271 tsi->tsi_fid = body->oa.o_oi.oi_fid;
273 if (req_capsule_has_field(pill, &RMF_OBD_IOOBJ, RCL_CLIENT)) {
274 rc = tgt_io_data_unpack(tsi, &body->oa.o_oi);
279 if (!(body->oa.o_valid & OBD_MD_FLID)) {
280 if (flags & HAS_BODY) {
281 CERROR("%s: OBD_MD_FLID flag is not set in ost_body but OID/FID is mandatory with HAS_BODY\n",
282 tgt_name(tsi->tsi_tgt));
289 ost_fid_build_resid(&tsi->tsi_fid, &tsi->tsi_resid);
292 * OST doesn't get object in advance for further use to prevent
293 * situations with nested object_find which is potential deadlock.
295 tsi->tsi_corpus = NULL;
300 * Do necessary preprocessing according to handler ->th_flags.
302 static int tgt_request_preprocess(struct tgt_session_info *tsi,
303 struct tgt_handler *h,
304 struct ptlrpc_request *req)
306 struct req_capsule *pill = tsi->tsi_pill;
307 __u32 flags = h->th_flags;
312 if (tsi->tsi_preprocessed)
315 LASSERT(h->th_act != NULL);
316 LASSERT(h->th_opc == lustre_msg_get_opc(req->rq_reqmsg));
317 LASSERT(current->journal_info == NULL);
319 LASSERT(ergo(flags & (HAS_BODY | HAS_REPLY),
321 if (h->th_fmt != NULL) {
322 req_capsule_set(pill, h->th_fmt);
323 if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT) &&
324 req_capsule_field_present(pill, &RMF_MDT_BODY,
326 rc = tgt_mdt_body_unpack(tsi, flags);
329 } else if (req_capsule_has_field(pill, &RMF_OST_BODY,
331 req_capsule_field_present(pill, &RMF_OST_BODY,
333 rc = tgt_ost_body_unpack(tsi, flags);
339 if (flags & IS_MUTABLE && tgt_conn_flags(tsi) & OBD_CONNECT_RDONLY)
342 if (flags & HAS_KEY) {
343 struct ldlm_request *dlm_req;
345 LASSERT(h->th_fmt != NULL);
347 dlm_req = req_capsule_client_get(pill, &RMF_DLM_REQ);
348 if (dlm_req != NULL) {
349 union ldlm_wire_policy_data *policy =
350 &dlm_req->lock_desc.l_policy_data;
352 if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
354 (policy->l_inodebits.bits |
355 policy->l_inodebits.try_bits) == 0)) {
357 * Lock without inodebits makes no sense and
358 * will oops later in ldlm. If client miss to
359 * set such bits, do not trigger ASSERTION.
361 * For liblustre flock case, it maybe zero.
365 tsi->tsi_dlm_req = dlm_req;
371 tsi->tsi_preprocessed = 1;
376 * Invoke handler for this request opc. Also do necessary preprocessing
377 * (according to handler ->th_flags), and post-processing (setting of
378 * ->last_{xid,committed}).
380 static int tgt_handle_request0(struct tgt_session_info *tsi,
381 struct tgt_handler *h,
382 struct ptlrpc_request *req)
386 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
391 /* When dealing with sec context requests, no export is associated yet,
392 * because these requests are sent before *_CONNECT requests.
393 * A NULL req->rq_export means the normal *_common_slice handlers will
394 * not be called, because there is no reference to the target.
395 * So deal with them by hand and jump directly to target_send_reply().
399 case SEC_CTX_INIT_CONT:
401 CFS_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, cfs_fail_val);
406 * Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
407 * to put same checks into handlers like mdt_close(), mdt_reint(),
408 * etc., without talking to mdt authors first. Checking same thing
409 * there again is useless and returning 0 error without packing reply
410 * is buggy! Handlers either pack reply or return error.
412 * We return 0 here and do not send any reply in order to emulate
413 * network failure. Do not send any reply in case any of NET related
414 * fail_id has occured.
416 if (CFS_FAIL_CHECK_ORSET(h->th_fail_id, CFS_FAIL_ONCE))
418 if (unlikely(lustre_msg_get_opc(req->rq_reqmsg) == MDS_REINT &&
419 CFS_FAIL_CHECK(OBD_FAIL_MDS_REINT_MULTI_NET)))
422 /* drop OUT_UPDATE rpc */
423 if (unlikely(lustre_msg_get_opc(req->rq_reqmsg) == OUT_UPDATE &&
424 CFS_FAIL_CHECK(OBD_FAIL_OUT_UPDATE_DROP)))
427 rc = tgt_request_preprocess(tsi, h, req);
428 /* pack reply if reply format is fixed */
429 if (rc == 0 && h->th_flags & HAS_REPLY) {
431 if (req_capsule_has_field(tsi->tsi_pill, &RMF_MDT_MD,
433 req_capsule_set_size(tsi->tsi_pill, &RMF_MDT_MD,
435 tsi->tsi_mdt_body->mbo_eadatasize);
436 if (req_capsule_has_field(tsi->tsi_pill, &RMF_LOGCOOKIES,
438 req_capsule_set_size(tsi->tsi_pill, &RMF_LOGCOOKIES,
440 if (req_capsule_has_field(tsi->tsi_pill, &RMF_ACL, RCL_SERVER))
441 req_capsule_set_size(tsi->tsi_pill,
442 &RMF_ACL, RCL_SERVER,
443 LUSTRE_POSIX_ACL_MAX_SIZE_OLD);
445 if (req_capsule_has_field(tsi->tsi_pill, &RMF_SHORT_IO,
447 struct niobuf_remote *remote_nb =
448 req_capsule_client_get(tsi->tsi_pill,
450 struct ost_body *body = tsi->tsi_ost_body;
452 req_capsule_set_size(tsi->tsi_pill, &RMF_SHORT_IO,
454 (body->oa.o_valid & OBD_MD_FLFLAGS &&
455 body->oa.o_flags & OBD_FL_SHORT_IO) ?
456 remote_nb[0].rnb_len : 0);
458 if (req_capsule_has_field(tsi->tsi_pill, &RMF_FILE_ENCCTX,
460 req_capsule_set_size(tsi->tsi_pill, &RMF_FILE_ENCCTX,
463 if (req_capsule_has_field(tsi->tsi_pill, &RMF_OBD_QUOTA_ITER,
465 req_capsule_set_size(tsi->tsi_pill,
466 &RMF_OBD_QUOTA_ITER, RCL_SERVER, 0);
469 rc = req_capsule_server_pack(tsi->tsi_pill);
472 if (likely(rc == 0)) {
474 * Process request, there can be two types of rc:
475 * 1) errors with msg unpack/pack, other failures outside the
476 * operation itself. This is counted as serious errors;
477 * 2) errors during fs operation, should be placed in rq_status
481 if (!is_serious(rc) &&
482 !req->rq_no_reply && req->rq_reply_state == NULL) {
483 DEBUG_REQ(D_ERROR, req,
484 "%s: %s handler did not pack reply but returned no error",
485 tgt_name(tsi->tsi_tgt), h->th_name);
488 serious = is_serious(rc);
489 rc = clear_serious(rc);
497 * ELDLM_* codes which > 0 should be in rq_status only as well as
498 * all non-serious errors.
500 if (rc > 0 || !serious)
503 LASSERT(current->journal_info == NULL);
505 if (likely(rc == 0 && req->rq_export))
506 target_committed_to_req(req);
509 target_send_reply(req, rc, tsi->tsi_reply_fail_id);
513 static int tgt_filter_recovery_request(struct ptlrpc_request *req,
514 struct obd_device *obd, int *process)
516 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
523 case MDS_SYNC: /* used in unmounting */
538 case MDS_HSM_PROGRESS:
539 case MDS_HSM_STATE_SET:
540 case MDS_HSM_REQUEST:
542 *process = target_queue_recovery_request(req, obd);
546 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
553 * Handle recovery. Return:
554 * +1: continue request processing;
555 * -ve: abort immediately with the given error code;
556 * 0: send reply with error code in req->rq_status;
558 static int tgt_handle_recovery(struct ptlrpc_request *req, int reply_fail_id)
562 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
567 case SEC_CTX_INIT_CONT:
572 if (!req->rq_export->exp_obd->obd_replayable)
575 /* sanity check: if the xid matches, the request must be marked as a
576 * resent or replayed */
577 if (req_can_reconstruct(req, NULL) == 1) {
578 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
579 (MSG_RESENT | MSG_REPLAY))) {
580 DEBUG_REQ(D_WARNING, req,
581 "rq_xid=%llu matches saved XID, expected REPLAY or RESENT flag (%x)",
583 lustre_msg_get_flags(req->rq_reqmsg));
584 req->rq_status = -ENOTCONN;
588 /* else: note the opposite is not always true; a RESENT req after a
589 * failover will usually not match the last_xid, since it was likely
590 * never committed. A REPLAYed request will almost never match the
591 * last xid, however it could for a committed, but still retained,
594 /* Check for aborted recovery... */
595 if (unlikely(req->rq_export->exp_obd->obd_recovering)) {
599 DEBUG_REQ(D_INFO, req, "Got new replay");
600 rc = tgt_filter_recovery_request(req, req->rq_export->exp_obd,
602 if (rc != 0 || !should_process)
604 else if (should_process < 0) {
605 req->rq_status = should_process;
606 rc = ptlrpc_error(req);
613 /* Initial check for request, it is validation mostly */
614 static struct tgt_handler *tgt_handler_find_check(struct ptlrpc_request *req)
616 struct tgt_handler *h;
617 struct tgt_opc_slice *s;
618 struct lu_target *tgt;
619 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
620 /* don't spew error messages for unhandled RPCs */
625 tgt = class_exp2tgt(req->rq_export);
626 if (unlikely(tgt == NULL)) {
627 DEBUG_REQ(D_ERROR, req, "%s: no target for connected export",
628 class_exp2obd(req->rq_export)->obd_name);
629 RETURN(ERR_PTR(-EINVAL));
632 for (s = tgt->lut_slice; s->tos_hs != NULL; s++)
633 if (s->tos_opc_start <= opc && opc < s->tos_opc_end)
636 /* opcode was not found in slice */
637 if (unlikely(s->tos_hs == NULL)) {
639 CERROR("%s: no handler for opcode 0x%x from %s\n",
640 tgt_name(tgt), opc, libcfs_idstr(&req->rq_peer));
643 goto err_unsupported;
646 LASSERT(opc >= s->tos_opc_start && opc < s->tos_opc_end);
647 h = s->tos_hs + (opc - s->tos_opc_start);
648 if (unlikely(h->th_opc == 0)) {
650 CERROR("%s: unsupported opcode 0x%x\n",
654 goto err_unsupported;
657 if (CFS_FAIL_CHECK(OBD_FAIL_OST_OPCODE) && opc == cfs_fail_val)
658 goto err_unsupported;
663 * Unknown opcode does not necessarily means insane client. A new
664 * client might send RPCs with new opcodes to an old server. The
665 * client might desperately stuck there waiting for a reply. So,
666 * send an error back here.
668 * An old client might also send RPCs with deprecated opcodes (e.g.
671 * Error in ptlrpc_send_error() is ignored.
673 req->rq_status = -EOPNOTSUPP;
674 ptlrpc_send_error(req, PTLRPC_REPLY_MAYBE_DIFFICULT);
675 RETURN(ERR_PTR(-EOPNOTSUPP));
678 static int process_req_last_xid(struct ptlrpc_request *req)
682 struct obd_export *exp = req->rq_export;
683 struct tg_export_data *ted = &exp->exp_target_data;
684 bool need_lock = tgt_is_multimodrpcs_client(exp);
688 mutex_lock(&ted->ted_lcd_lock);
689 /* check request's xid is consistent with export's last_xid */
690 last_xid = lustre_msg_get_last_xid(req->rq_reqmsg);
691 if (last_xid > exp->exp_last_xid)
692 exp->exp_last_xid = last_xid;
694 if (req->rq_xid == 0 || req->rq_xid <= exp->exp_last_xid) {
695 /* Some request is allowed to be sent during replay,
696 * such as OUT update requests, FLD requests, so it
697 * is possible that replay requests has smaller XID
698 * than the exp_last_xid.
700 * Some non-replay requests may have smaller XID as
703 * - Client send a no_resend RPC, like statfs;
704 * - The RPC timedout (or some other error) on client,
705 * then it's removed from the unreplied list;
706 * - Client send some other request to bump the
707 * exp_last_xid on server;
708 * - The former RPC got chance to be processed;
710 if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY))
713 DEBUG_REQ(D_WARNING, req,
714 "unexpected xid=%llx != exp_last_xid=%llx, rc = %d",
715 req->rq_xid, exp->exp_last_xid, rc);
720 /* The "last_xid" is the minimum xid among unreplied requests,
721 * if the request is from the previous connection, its xid can
722 * still be larger than "exp_last_xid", then the above check of
723 * xid is not enough to determine whether the request is delayed.
725 * For example, if some replay request was delayed and caused
726 * timeout at client and the replay is restarted, the delayed
727 * replay request will have the larger xid than "exp_last_xid"
729 if (req->rq_export->exp_conn_cnt >
730 lustre_msg_get_conn_cnt(req->rq_reqmsg)) {
732 "Dropping request %llu from an old epoch %u/%u\n",
734 lustre_msg_get_conn_cnt(req->rq_reqmsg),
735 req->rq_export->exp_conn_cnt);
736 req->rq_no_reply = 1;
737 GOTO(out, rc = -ESTALE);
740 /* try to release in-memory reply data */
741 if (tgt_is_multimodrpcs_client(exp)) {
742 tgt_handle_received_xid(exp, last_xid);
743 rc = tgt_handle_tag(req);
748 mutex_unlock(&ted->ted_lcd_lock);
753 int tgt_request_handle(struct ptlrpc_request *req)
755 struct tgt_session_info *tsi = tgt_ses_info(req->rq_svc_thread->t_env);
757 struct lustre_msg *msg = req->rq_reqmsg;
758 struct tgt_handler *h;
759 struct lu_target *tgt;
760 int request_fail_id = 0;
761 __u32 opc = lustre_msg_get_opc(msg);
762 struct obd_device *obd;
764 bool is_connect = false;
767 if (unlikely(CFS_FAIL_CHECK(OBD_FAIL_TGT_RECOVERY_REQ_RACE))) {
768 if (cfs_fail_val == 0 &&
769 lustre_msg_get_opc(msg) != OBD_PING &&
770 lustre_msg_get_flags(msg) & MSG_REQ_REPLAY_DONE) {
773 wait_event_idle(cfs_race_waitq, (cfs_race_state == 1));
777 req_capsule_init(&req->rq_pill, req, RCL_SERVER);
778 tsi->tsi_pill = &req->rq_pill;
779 tsi->tsi_env = req->rq_svc_thread->t_env;
781 /* if request has export then get handlers slice from corresponding
782 * target, otherwise that should be connect operation */
783 if (opc == MDS_CONNECT || opc == OST_CONNECT ||
784 opc == MGS_CONNECT) {
786 req_capsule_set(&req->rq_pill, &RQF_CONNECT);
787 rc = target_handle_connect(req);
789 rc = ptlrpc_error(req);
792 /* recovery-small test 18c asks to drop connect reply */
793 if (unlikely(opc == OST_CONNECT &&
794 CFS_FAIL_CHECK(OBD_FAIL_OST_CONNECT_NET2)))
798 if (unlikely(!class_connected_export(req->rq_export))) {
799 if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT ||
800 opc == SEC_CTX_FINI) {
801 /* sec context initialization has to be handled
802 * by hand in tgt_handle_request0() */
803 tsi->tsi_reply_fail_id = OBD_FAIL_SEC_CTX_INIT_NET;
805 GOTO(handle_recov, rc = 0);
807 CDEBUG(D_HA, "operation %d on unconnected OST from %s\n",
808 opc, libcfs_idstr(&req->rq_peer));
809 req->rq_status = -ENOTCONN;
810 rc = ptlrpc_error(req);
814 tsi->tsi_tgt = tgt = class_exp2tgt(req->rq_export);
815 tsi->tsi_exp = req->rq_export;
816 if (exp_connect_flags(req->rq_export) & OBD_CONNECT_JOBSTATS)
817 tsi->tsi_jobid = lustre_msg_get_jobid(req->rq_reqmsg);
819 tsi->tsi_jobid = NULL;
822 DEBUG_REQ(D_ERROR, req, "%s: No target for connected export",
823 class_exp2obd(req->rq_export)->obd_name);
824 req->rq_status = -EINVAL;
825 rc = ptlrpc_error(req);
829 /* Skip last_xid processing for the recovery thread, otherwise, the
830 * last_xid on same request could be processed twice: first time when
831 * processing the incoming request, second time when the request is
832 * being processed by recovery thread. */
833 obd = class_exp2obd(req->rq_export);
835 /* reset the exp_last_xid on each connection. */
836 req->rq_export->exp_last_xid = 0;
837 } else if (obd->obd_recovery_data.trd_processing_task !=
839 rc = process_req_last_xid(req);
842 rc = ptlrpc_error(req);
847 request_fail_id = tgt->lut_request_fail_id;
848 tsi->tsi_reply_fail_id = tgt->lut_reply_fail_id;
850 h = tgt_handler_find_check(req);
852 req->rq_status = PTR_ERR(h);
853 rc = ptlrpc_error(req);
857 LASSERTF(h->th_opc == opc, "opcode mismatch %d != %d\n",
860 if ((cfs_fail_val == 0 || cfs_fail_val == opc) &&
861 CFS_FAIL_CHECK_ORSET(request_fail_id, CFS_FAIL_ONCE))
864 rc = lustre_msg_check_version(msg, h->th_version);
866 DEBUG_REQ(D_ERROR, req,
867 "%s: drop malformed request version=%08x expect=%08x",
868 tgt_name(tgt), lustre_msg_get_version(msg),
870 req->rq_status = -EINVAL;
871 rc = ptlrpc_error(req);
876 rc = tgt_handle_recovery(req, tsi->tsi_reply_fail_id);
877 if (likely(rc == 1)) {
878 rc = tgt_handle_request0(tsi, h, req);
884 req_capsule_fini(tsi->tsi_pill);
885 if (tsi->tsi_corpus != NULL) {
886 lu_object_put(tsi->tsi_env, tsi->tsi_corpus);
887 tsi->tsi_corpus = NULL;
891 EXPORT_SYMBOL(tgt_request_handle);
893 /** Assign high priority operations to the request if needed. */
894 int tgt_hpreq_handler(struct ptlrpc_request *req)
896 struct tgt_session_info *tsi = tgt_ses_info(req->rq_svc_thread->t_env);
897 struct tgt_handler *h;
902 if (req->rq_export == NULL)
905 req_capsule_init(&req->rq_pill, req, RCL_SERVER);
906 tsi->tsi_pill = &req->rq_pill;
907 tsi->tsi_env = req->rq_svc_thread->t_env;
908 tsi->tsi_tgt = class_exp2tgt(req->rq_export);
909 tsi->tsi_exp = req->rq_export;
911 h = tgt_handler_find_check(req);
917 rc = tgt_request_preprocess(tsi, h, req);
918 if (unlikely(rc != 0))
921 if (h->th_hp != NULL)
925 EXPORT_SYMBOL(tgt_hpreq_handler);
927 void tgt_counter_incr(struct obd_export *exp, int opcode)
929 lprocfs_counter_incr(exp->exp_obd->obd_stats, opcode);
930 if (exp->exp_nid_stats && exp->exp_nid_stats->nid_stats != NULL)
931 lprocfs_counter_incr(exp->exp_nid_stats->nid_stats, opcode);
933 EXPORT_SYMBOL(tgt_counter_incr);
936 * Unified target generic handlers.
939 int tgt_connect_check_sptlrpc(struct ptlrpc_request *req, struct obd_export *exp)
941 struct lu_target *tgt = class_exp2tgt(exp);
942 struct sptlrpc_flavor flvr;
946 LASSERT(tgt->lut_obd);
947 LASSERT(tgt->lut_slice);
949 /* always allow ECHO client */
950 if (unlikely(strcmp(exp->exp_obd->obd_type->typ_name,
951 LUSTRE_ECHO_NAME) == 0)) {
952 exp->exp_flvr.sf_rpc = SPTLRPC_FLVR_ANY;
956 if (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_INVALID) {
957 read_lock(&tgt->lut_sptlrpc_lock);
958 sptlrpc_target_choose_flavor(&tgt->lut_sptlrpc_rset,
962 read_unlock(&tgt->lut_sptlrpc_lock);
964 spin_lock(&exp->exp_lock);
965 exp->exp_sp_peer = req->rq_sp_from;
966 exp->exp_flvr = flvr;
968 /* when on mgs, if no restriction is set, or if the client
969 * NID is on the local node, allow any flavor
971 if ((strcmp(exp->exp_obd->obd_type->typ_name,
972 LUSTRE_MGS_NAME) == 0) &&
973 (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_NULL ||
974 LNetIsPeerLocal(&exp->exp_connection->c_peer.nid)))
975 exp->exp_flvr.sf_rpc = SPTLRPC_FLVR_ANY;
977 if (exp->exp_flvr.sf_rpc != SPTLRPC_FLVR_ANY &&
978 exp->exp_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
979 CERROR("%s: unauthorized rpc flavor %x from %s, "
980 "expect %x\n", tgt_name(tgt),
982 libcfs_nidstr(&req->rq_peer.nid),
983 exp->exp_flvr.sf_rpc);
986 spin_unlock(&exp->exp_lock);
988 if (exp->exp_sp_peer != req->rq_sp_from) {
989 CERROR("%s: RPC source %s doesn't match %s\n",
991 sptlrpc_part2name(req->rq_sp_from),
992 sptlrpc_part2name(exp->exp_sp_peer));
995 rc = sptlrpc_target_export_check(exp, req);
1002 int tgt_adapt_sptlrpc_conf(struct lu_target *tgt)
1004 struct sptlrpc_rule_set tmp_rset;
1007 if (unlikely(tgt == NULL)) {
1008 CERROR("No target passed\n");
1012 sptlrpc_rule_set_init(&tmp_rset);
1013 rc = sptlrpc_conf_target_get_rules(tgt->lut_obd, &tmp_rset);
1015 CERROR("%s: failed get sptlrpc rules: rc = %d\n",
1020 sptlrpc_target_update_exp_flavor(tgt->lut_obd, &tmp_rset);
1022 write_lock(&tgt->lut_sptlrpc_lock);
1023 sptlrpc_rule_set_free(&tgt->lut_sptlrpc_rset);
1024 tgt->lut_sptlrpc_rset = tmp_rset;
1025 write_unlock(&tgt->lut_sptlrpc_lock);
1029 EXPORT_SYMBOL(tgt_adapt_sptlrpc_conf);
1031 int tgt_connect(struct tgt_session_info *tsi)
1033 struct ptlrpc_request *req = tgt_ses_req(tsi);
1034 struct obd_connect_data *reply;
1039 /* XXX: better to call this check right after getting new export but
1040 * before last_rcvd slot allocation to avoid server load upon insecure
1041 * connects. This is to be fixed after unifiyng all targets.
1043 rc = tgt_connect_check_sptlrpc(req, tsi->tsi_exp);
1047 /* To avoid exposing partially initialized connection flags, changes up
1048 * to this point have been staged in reply->ocd_connect_flags. Now that
1049 * connection handling has completed successfully, atomically update
1050 * the connect flags in the shared export data structure. LU-1623 */
1051 reply = req_capsule_server_get(tsi->tsi_pill, &RMF_CONNECT_DATA);
1052 spin_lock(&tsi->tsi_exp->exp_lock);
1053 *exp_connect_flags_ptr(tsi->tsi_exp) = reply->ocd_connect_flags;
1054 if (reply->ocd_connect_flags & OBD_CONNECT_FLAGS2)
1055 *exp_connect_flags2_ptr(tsi->tsi_exp) =
1056 reply->ocd_connect_flags2;
1057 tsi->tsi_exp->exp_connect_data.ocd_brw_size = reply->ocd_brw_size;
1058 spin_unlock(&tsi->tsi_exp->exp_lock);
1060 if (strcmp(tsi->tsi_exp->exp_obd->obd_type->typ_name,
1061 LUSTRE_MDT_NAME) == 0) {
1062 struct lu_nodemap *nm = NULL;
1064 rc = req_check_sepol(tsi->tsi_pill);
1068 if (tsi->tsi_pill->rc_req->rq_export)
1070 nodemap_get_from_exp(tsi->tsi_pill->rc_req->rq_export);
1072 if (reply->ocd_connect_flags & OBD_CONNECT_FLAGS2 &&
1073 reply->ocd_connect_flags2 & OBD_CONNECT2_ENCRYPT) {
1074 bool forbid_encrypt = true;
1077 /* nodemap_get_from_exp returns NULL in case
1078 * nodemap is not active, so we do not forbid
1080 forbid_encrypt = false;
1081 else if (!IS_ERR(nm))
1082 forbid_encrypt = nm->nmf_forbid_encryption;
1084 GOTO(put_nm, rc = -EACCES);
1087 if (!(reply->ocd_connect_flags & OBD_CONNECT_RDONLY)) {
1088 bool readonly = false;
1090 if (!IS_ERR_OR_NULL(nm))
1091 readonly = nm->nmf_readonly_mount;
1092 if (unlikely(readonly))
1093 GOTO(put_nm, rc = -EROFS);
1097 if (!IS_ERR_OR_NULL(nm))
1105 obd_disconnect(class_export_get(tsi->tsi_exp));
1108 EXPORT_SYMBOL(tgt_connect);
1110 int tgt_disconnect(struct tgt_session_info *tsi)
1116 CFS_FAIL_TIMEOUT(OBD_FAIL_OST_DISCONNECT_DELAY, cfs_fail_val);
1118 rc = target_handle_disconnect(tgt_ses_req(tsi));
1120 RETURN(err_serious(rc));
1124 EXPORT_SYMBOL(tgt_disconnect);
1127 * Unified target OBD handlers
1129 int tgt_obd_ping(struct tgt_session_info *tsi)
1135 /* The target-specific part of OBD_PING request handling.
1136 * It controls Filter Modification Data (FMD) expiration each time
1139 * Valid only for replayable targets, e.g. MDT and OFD
1141 if (tsi->tsi_exp->exp_obd->obd_replayable)
1142 tgt_fmd_expire(tsi->tsi_exp);
1144 rc = req_capsule_server_pack(tsi->tsi_pill);
1146 RETURN(err_serious(rc));
1148 if (CFS_FAIL_CHECK(OBD_FAIL_MDS_CONNECT_VS_EVICT)) {
1149 if (strstr(tsi->tsi_exp->exp_obd->obd_name, "MDT0000") &&
1150 (exp_connect_flags(tsi->tsi_exp) & OBD_CONNECT_MDS_MDS))
1151 tsi->tsi_pill->rc_req->rq_no_reply = 1;
1156 EXPORT_SYMBOL(tgt_obd_ping);
1158 int tgt_send_buffer(struct tgt_session_info *tsi, struct lu_rdbuf *rdbuf)
1160 struct ptlrpc_request *req = tgt_ses_req(tsi);
1161 struct obd_export *exp = req->rq_export;
1162 struct ptlrpc_bulk_desc *desc;
1169 for (i = 0; i < rdbuf->rb_nbufs; i++) {
1170 unsigned int offset;
1172 offset = (unsigned long)rdbuf->rb_bufs[i].lb_buf & ~PAGE_MASK;
1173 pages += DIV_ROUND_UP(rdbuf->rb_bufs[i].lb_len + offset,
1177 desc = ptlrpc_prep_bulk_exp(req, pages, 1,
1178 PTLRPC_BULK_PUT_SOURCE,
1180 &ptlrpc_bulk_kiov_nopin_ops);
1184 for (i = 0; i < rdbuf->rb_nbufs; i++)
1185 desc->bd_frag_ops->add_iov_frag(desc,
1186 rdbuf->rb_bufs[i].lb_buf,
1187 rdbuf->rb_bufs[i].lb_len);
1189 rc = target_bulk_io(exp, desc);
1190 ptlrpc_free_bulk(desc);
1193 EXPORT_SYMBOL(tgt_send_buffer);
1195 int tgt_sendpage(struct tgt_session_info *tsi, struct lu_rdpg *rdpg, int nob)
1197 struct ptlrpc_request *req = tgt_ses_req(tsi);
1198 struct obd_export *exp = req->rq_export;
1199 struct ptlrpc_bulk_desc *desc;
1207 desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, 1,
1208 PTLRPC_BULK_PUT_SOURCE,
1210 &ptlrpc_bulk_kiov_pin_ops);
1214 if (!(exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE))
1215 /* old client requires reply size in it's PAGE_SIZE,
1216 * which is rdpg->rp_count */
1217 nob = rdpg->rp_count;
1219 for (i = 0, tmpcount = nob; i < rdpg->rp_npages && tmpcount > 0;
1220 i++, tmpcount -= tmpsize) {
1221 tmpsize = min_t(int, tmpcount, PAGE_SIZE);
1222 desc->bd_frag_ops->add_kiov_frag(desc, rdpg->rp_pages[i], 0,
1226 LASSERT(desc->bd_nob == nob);
1227 rc = target_bulk_io(exp, desc);
1228 ptlrpc_free_bulk(desc);
1231 EXPORT_SYMBOL(tgt_sendpage);
1234 * OBD_IDX_READ handler
1236 static int tgt_obd_idx_read(struct tgt_session_info *tsi)
1238 struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env);
1239 struct lu_rdpg *rdpg = &tti->tti_u.rdpg.tti_rdpg;
1240 struct idx_info *req_ii, *rep_ii;
1245 memset(rdpg, 0, sizeof(*rdpg));
1246 req_capsule_set(tsi->tsi_pill, &RQF_OBD_IDX_READ);
1248 /* extract idx_info buffer from request & reply */
1249 req_ii = req_capsule_client_get(tsi->tsi_pill, &RMF_IDX_INFO);
1250 if (req_ii == NULL || req_ii->ii_magic != IDX_INFO_MAGIC)
1251 RETURN(err_serious(-EPROTO));
1253 rc = req_capsule_server_pack(tsi->tsi_pill);
1255 RETURN(err_serious(rc));
1257 rep_ii = req_capsule_server_get(tsi->tsi_pill, &RMF_IDX_INFO);
1259 RETURN(err_serious(-EFAULT));
1260 rep_ii->ii_magic = IDX_INFO_MAGIC;
1262 /* extract hash to start with */
1263 rdpg->rp_hash = req_ii->ii_hash_start;
1265 /* extract requested attributes */
1266 rdpg->rp_attrs = req_ii->ii_attrs;
1268 /* check that fid packed in request is valid and supported */
1269 if (!fid_is_sane(&req_ii->ii_fid))
1271 rep_ii->ii_fid = req_ii->ii_fid;
1274 rep_ii->ii_flags = req_ii->ii_flags;
1276 /* compute number of pages to allocate, ii_count is the number of 4KB
1278 if (req_ii->ii_count <= 0)
1279 GOTO(out, rc = -EFAULT);
1280 rdpg->rp_count = min_t(unsigned int, req_ii->ii_count << LU_PAGE_SHIFT,
1281 exp_max_brw_size(tsi->tsi_exp));
1282 rdpg->rp_npages = (rdpg->rp_count + PAGE_SIZE - 1) >> PAGE_SHIFT;
1284 /* allocate pages to store the containers */
1285 OBD_ALLOC_PTR_ARRAY(rdpg->rp_pages, rdpg->rp_npages);
1286 if (rdpg->rp_pages == NULL)
1287 GOTO(out, rc = -ENOMEM);
1288 for (i = 0; i < rdpg->rp_npages; i++) {
1289 rdpg->rp_pages[i] = alloc_page(GFP_NOFS);
1290 if (rdpg->rp_pages[i] == NULL)
1291 GOTO(out, rc = -ENOMEM);
1294 /* populate pages with key/record pairs */
1295 rc = dt_index_read(tsi->tsi_env, tsi->tsi_tgt->lut_bottom, rep_ii, rdpg);
1299 LASSERTF(rc <= rdpg->rp_count, "dt_index_read() returned more than "
1300 "asked %d > %d\n", rc, rdpg->rp_count);
1302 /* send pages to client */
1303 rc = tgt_sendpage(tsi, rdpg, rc);
1308 if (rdpg->rp_pages) {
1309 for (i = 0; i < rdpg->rp_npages; i++)
1310 if (rdpg->rp_pages[i])
1311 __free_page(rdpg->rp_pages[i]);
1312 OBD_FREE_PTR_ARRAY(rdpg->rp_pages, rdpg->rp_npages);
1317 struct tgt_handler tgt_obd_handlers[] = {
1318 TGT_OBD_HDL (0, OBD_PING, tgt_obd_ping),
1319 TGT_OBD_HDL (0, OBD_IDX_READ, tgt_obd_idx_read)
1321 EXPORT_SYMBOL(tgt_obd_handlers);
1323 int tgt_sync(const struct lu_env *env, struct lu_target *tgt,
1324 struct dt_object *obj, __u64 start, __u64 end)
1330 /* if no objid is specified, it means "sync whole filesystem" */
1332 rc = dt_sync(env, tgt->lut_bottom);
1333 } else if (dt_version_get(env, obj) >
1334 tgt->lut_obd->obd_last_committed) {
1335 rc = dt_object_sync(env, obj, start, end);
1337 atomic_inc(&tgt->lut_sync_count);
1341 EXPORT_SYMBOL(tgt_sync);
1343 * Unified target DLM handlers.
1347 * Unified target BAST
1349 * Ensure data and metadata are synced to disk when lock is canceled if Sync on
1350 * Cancel (SOC) is enabled. If it's extent lock, normally sync obj is enough,
1351 * but if it's cross-MDT lock, because remote object version is not set, a
1352 * filesystem sync is needed.
1354 * \param lock server side lock
1355 * \param desc lock desc
1356 * \param data ldlm_cb_set_arg
1357 * \param flag indicates whether this cancelling or blocking callback
1358 * \retval 0 on success
1359 * \retval negative number on error
1361 int tgt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
1362 void *data, int flag)
1365 struct lu_target *tgt;
1366 struct dt_object *obj = NULL;
1372 tgt = class_exp2tgt(lock->l_export);
1374 if (unlikely(tgt == NULL)) {
1375 CDEBUG(D_ERROR, "%s: No target for connected export\n",
1376 class_exp2obd(lock->l_export)->obd_name);
1380 if (flag == LDLM_CB_CANCELING &&
1381 (lock->l_granted_mode & (LCK_EX | LCK_PW | LCK_GROUP)) &&
1382 (tgt->lut_sync_lock_cancel == SYNC_LOCK_CANCEL_ALWAYS ||
1383 (tgt->lut_sync_lock_cancel == SYNC_LOCK_CANCEL_BLOCKING &&
1384 ldlm_is_cbpending(lock))) &&
1385 ((exp_connect_flags(lock->l_export) & OBD_CONNECT_MDS_MDS) ||
1386 lock->l_resource->lr_type == LDLM_EXTENT)) {
1388 __u64 end = OBD_OBJECT_EOF;
1390 rc = lu_env_init(&env, LCT_DT_THREAD);
1391 if (unlikely(rc != 0))
1394 ost_fid_from_resid(&fid, &lock->l_resource->lr_name,
1395 tgt->lut_lsd.lsd_osd_index);
1397 if (lock->l_resource->lr_type == LDLM_EXTENT) {
1398 obj = dt_locate(&env, tgt->lut_bottom, &fid);
1400 GOTO(err_env, rc = PTR_ERR(obj));
1402 if (!dt_object_exists(obj))
1403 GOTO(err_put, rc = -ENOENT);
1405 start = lock->l_policy_data.l_extent.start;
1406 end = lock->l_policy_data.l_extent.end;
1409 rc = tgt_sync(&env, tgt, obj, start, end);
1411 CERROR("%s: syncing "DFID" (%llu-%llu) on lock "
1412 "cancel: rc = %d\n",
1413 tgt_name(tgt), PFID(&fid),
1414 lock->l_policy_data.l_extent.start,
1415 lock->l_policy_data.l_extent.end, rc);
1419 dt_object_put(&env, obj);
1424 rc = ldlm_server_blocking_ast(lock, desc, data, flag);
1427 EXPORT_SYMBOL(tgt_blocking_ast);
1429 static struct ldlm_callback_suite tgt_dlm_cbs = {
1430 .lcs_completion = ldlm_server_completion_ast,
1431 .lcs_blocking = tgt_blocking_ast,
1432 .lcs_glimpse = ldlm_server_glimpse_ast
1435 int tgt_enqueue(struct tgt_session_info *tsi)
1437 struct ptlrpc_request *req = tgt_ses_req(tsi);
1442 * tsi->tsi_dlm_req was already swapped and (if necessary) converted,
1443 * tsi->tsi_dlm_cbs was set by the *_req_handle() function.
1445 LASSERT(tsi->tsi_dlm_req != NULL);
1446 rc = ldlm_handle_enqueue(tsi->tsi_exp->exp_obd->obd_namespace,
1447 &req->rq_pill, tsi->tsi_dlm_req, &tgt_dlm_cbs);
1449 RETURN(err_serious(rc));
1451 switch (LUT_FAIL_CLASS(tsi->tsi_reply_fail_id)) {
1453 tsi->tsi_reply_fail_id = OBD_FAIL_MDS_LDLM_REPLY_NET;
1456 tsi->tsi_reply_fail_id = OBD_FAIL_OST_LDLM_REPLY_NET;
1459 tsi->tsi_reply_fail_id = OBD_FAIL_MGS_LDLM_REPLY_NET;
1462 tsi->tsi_reply_fail_id = OBD_FAIL_LDLM_REPLY;
1465 RETURN(req->rq_status);
1467 EXPORT_SYMBOL(tgt_enqueue);
1469 int tgt_convert(struct tgt_session_info *tsi)
1471 struct ptlrpc_request *req = tgt_ses_req(tsi);
1475 LASSERT(tsi->tsi_dlm_req);
1476 rc = ldlm_handle_convert0(req, tsi->tsi_dlm_req);
1478 RETURN(err_serious(rc));
1480 RETURN(req->rq_status);
1483 int tgt_bl_callback(struct tgt_session_info *tsi)
1485 return err_serious(-EOPNOTSUPP);
1488 int tgt_cp_callback(struct tgt_session_info *tsi)
1490 return err_serious(-EOPNOTSUPP);
1493 /* generic LDLM target handler */
1494 struct tgt_handler tgt_dlm_handlers[] = {
1495 TGT_DLM_HDL(HAS_KEY, LDLM_ENQUEUE, tgt_enqueue),
1496 TGT_DLM_HDL(HAS_KEY, LDLM_CONVERT, tgt_convert),
1497 TGT_DLM_HDL_VAR(0, LDLM_BL_CALLBACK, tgt_bl_callback),
1498 TGT_DLM_HDL_VAR(0, LDLM_CP_CALLBACK, tgt_cp_callback)
1500 EXPORT_SYMBOL(tgt_dlm_handlers);
1503 * Unified target LLOG handlers.
1505 int tgt_llog_open(struct tgt_session_info *tsi)
1511 rc = llog_origin_handle_open(tgt_ses_req(tsi));
1515 EXPORT_SYMBOL(tgt_llog_open);
1517 int tgt_llog_read_header(struct tgt_session_info *tsi)
1523 rc = llog_origin_handle_read_header(tgt_ses_req(tsi));
1527 EXPORT_SYMBOL(tgt_llog_read_header);
1529 int tgt_llog_next_block(struct tgt_session_info *tsi)
1535 rc = llog_origin_handle_next_block(tgt_ses_req(tsi));
1539 EXPORT_SYMBOL(tgt_llog_next_block);
1541 int tgt_llog_prev_block(struct tgt_session_info *tsi)
1547 rc = llog_origin_handle_prev_block(tgt_ses_req(tsi));
1551 EXPORT_SYMBOL(tgt_llog_prev_block);
1553 /* generic llog target handler */
1554 struct tgt_handler tgt_llog_handlers[] = {
1555 TGT_LLOG_HDL (0, LLOG_ORIGIN_HANDLE_CREATE, tgt_llog_open),
1556 TGT_LLOG_HDL (0, LLOG_ORIGIN_HANDLE_NEXT_BLOCK, tgt_llog_next_block),
1557 TGT_LLOG_HDL (0, LLOG_ORIGIN_HANDLE_READ_HEADER, tgt_llog_read_header),
1558 TGT_LLOG_HDL (0, LLOG_ORIGIN_HANDLE_PREV_BLOCK, tgt_llog_prev_block),
1560 EXPORT_SYMBOL(tgt_llog_handlers);
1563 * sec context handlers
1565 /* XXX: Implement based on mdt_sec_ctx_handle()? */
1566 static int tgt_sec_ctx_handle(struct tgt_session_info *tsi)
1571 struct tgt_handler tgt_sec_ctx_handlers[] = {
1572 TGT_SEC_HDL_VAR(0, SEC_CTX_INIT, tgt_sec_ctx_handle),
1573 TGT_SEC_HDL_VAR(0, SEC_CTX_INIT_CONT, tgt_sec_ctx_handle),
1574 TGT_SEC_HDL_VAR(0, SEC_CTX_FINI, tgt_sec_ctx_handle),
1576 EXPORT_SYMBOL(tgt_sec_ctx_handlers);
1578 int (*tgt_lfsck_in_notify_local)(const struct lu_env *env,
1579 struct dt_device *key,
1580 struct lfsck_req_local *lrl,
1581 struct thandle *th) = NULL;
1583 void tgt_register_lfsck_in_notify_local(int (*notify)(const struct lu_env *,
1585 struct lfsck_req_local *,
1588 tgt_lfsck_in_notify_local = notify;
1590 EXPORT_SYMBOL(tgt_register_lfsck_in_notify_local);
1592 int (*tgt_lfsck_in_notify)(const struct lu_env *env,
1593 struct dt_device *key,
1594 struct lfsck_request *lr) = NULL;
1596 void tgt_register_lfsck_in_notify(int (*notify)(const struct lu_env *,
1598 struct lfsck_request *))
1600 tgt_lfsck_in_notify = notify;
1602 EXPORT_SYMBOL(tgt_register_lfsck_in_notify);
1604 static int (*tgt_lfsck_query)(const struct lu_env *env,
1605 struct dt_device *key,
1606 struct lfsck_request *req,
1607 struct lfsck_reply *rep,
1608 struct lfsck_query *que) = NULL;
1610 void tgt_register_lfsck_query(int (*query)(const struct lu_env *,
1612 struct lfsck_request *,
1613 struct lfsck_reply *,
1614 struct lfsck_query *))
1616 tgt_lfsck_query = query;
1618 EXPORT_SYMBOL(tgt_register_lfsck_query);
1620 /* LFSCK request handlers */
1621 static int tgt_handle_lfsck_notify(struct tgt_session_info *tsi)
1623 const struct lu_env *env = tsi->tsi_env;
1624 struct dt_device *key = tsi->tsi_tgt->lut_bottom;
1625 struct lfsck_request *lr;
1629 lr = req_capsule_client_get(tsi->tsi_pill, &RMF_LFSCK_REQUEST);
1633 rc = tgt_lfsck_in_notify(env, key, lr);
1638 static int tgt_handle_lfsck_query(struct tgt_session_info *tsi)
1640 struct lfsck_request *request;
1641 struct lfsck_reply *reply;
1645 request = req_capsule_client_get(tsi->tsi_pill, &RMF_LFSCK_REQUEST);
1646 if (request == NULL)
1649 reply = req_capsule_server_get(tsi->tsi_pill, &RMF_LFSCK_REPLY);
1653 rc = tgt_lfsck_query(tsi->tsi_env, tsi->tsi_tgt->lut_bottom,
1654 request, reply, NULL);
1656 RETURN(rc < 0 ? rc : 0);
1659 struct tgt_handler tgt_lfsck_handlers[] = {
1660 TGT_LFSCK_HDL(HAS_REPLY, LFSCK_NOTIFY, tgt_handle_lfsck_notify),
1661 TGT_LFSCK_HDL(HAS_REPLY, LFSCK_QUERY, tgt_handle_lfsck_query),
1663 EXPORT_SYMBOL(tgt_lfsck_handlers);
1666 * initialize per-thread page pool (bug 5137).
1668 int tgt_io_thread_init(struct ptlrpc_thread *thread)
1670 struct tgt_thread_big_cache *tbc;
1674 LASSERT(thread != NULL);
1675 LASSERT(thread->t_data == NULL);
1677 OBD_ALLOC_LARGE(tbc, sizeof(*tbc));
1680 thread->t_data = tbc;
1683 EXPORT_SYMBOL(tgt_io_thread_init);
1686 * free per-thread pool created by tgt_thread_init().
1688 void tgt_io_thread_done(struct ptlrpc_thread *thread)
1690 struct tgt_thread_big_cache *tbc;
1694 LASSERT(thread != NULL);
1697 * be prepared to handle partially-initialized pools (because this is
1698 * called from ost_io_thread_init() for cleanup.
1700 tbc = thread->t_data;
1702 OBD_FREE_LARGE(tbc, sizeof(*tbc));
1703 thread->t_data = NULL;
1707 EXPORT_SYMBOL(tgt_io_thread_done);
1710 * Helper function for getting Data-on-MDT file server DLM lock
1711 * if asked by client.
1713 int tgt_mdt_data_lock(struct ldlm_namespace *ns, struct ldlm_res_id *res_id,
1714 struct lustre_handle *lh, int mode, __u64 *flags)
1716 union ldlm_policy_data policy = {
1717 .l_inodebits.bits = MDS_INODELOCK_DOM,
1723 LASSERT(lh != NULL);
1724 LASSERT(ns != NULL);
1725 LASSERT(!lustre_handle_is_used(lh));
1727 rc = ldlm_cli_enqueue_local(NULL, ns, res_id, LDLM_IBITS, &policy, mode,
1728 flags, ldlm_blocking_ast,
1729 ldlm_completion_ast, ldlm_glimpse_ast,
1730 NULL, 0, LVB_T_NONE, NULL, lh);
1732 RETURN(rc == ELDLM_OK ? 0 : -EIO);
1734 EXPORT_SYMBOL(tgt_mdt_data_lock);
1737 * Helper function for getting server side [start, start+count] DLM lock
1738 * if asked by client.
1740 int tgt_extent_lock(const struct lu_env *env, struct ldlm_namespace *ns,
1741 struct ldlm_res_id *res_id, __u64 start, __u64 end,
1742 struct lustre_handle *lh, int mode, __u64 *flags)
1744 union ldlm_policy_data policy;
1749 LASSERT(lh != NULL);
1750 LASSERT(ns != NULL);
1751 LASSERT(!lustre_handle_is_used(lh));
1753 policy.l_extent.gid = 0;
1754 policy.l_extent.start = start & PAGE_MASK;
1757 * If ->o_blocks is EOF it means "lock till the end of the file".
1758 * Otherwise, it's size of an extent or hole being punched (in bytes).
1760 if (end == OBD_OBJECT_EOF || end < start)
1761 policy.l_extent.end = OBD_OBJECT_EOF;
1763 policy.l_extent.end = end | ~PAGE_MASK;
1765 rc = ldlm_cli_enqueue_local(env, ns, res_id, LDLM_EXTENT, &policy,
1766 mode, flags, ldlm_blocking_ast,
1767 ldlm_completion_ast, ldlm_glimpse_ast,
1768 NULL, 0, LVB_T_NONE, NULL, lh);
1769 RETURN(rc == ELDLM_OK ? 0 : -EIO);
1771 EXPORT_SYMBOL(tgt_extent_lock);
1773 static int tgt_data_lock(const struct lu_env *env, struct obd_export *exp,
1774 struct ldlm_res_id *res_id, __u64 start, __u64 end,
1775 struct lustre_handle *lh, enum ldlm_mode mode)
1777 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
1780 /* MDT IO for data-on-mdt */
1781 if (exp->exp_connect_data.ocd_connect_flags & OBD_CONNECT_IBITS)
1782 return tgt_mdt_data_lock(ns, res_id, lh, mode, &flags);
1784 return tgt_extent_lock(env, ns, res_id, start, end, lh, mode, &flags);
1787 void tgt_data_unlock(struct lustre_handle *lh, enum ldlm_mode mode)
1789 LASSERT(lustre_handle_is_used(lh));
1790 ldlm_lock_decref(lh, mode);
1792 EXPORT_SYMBOL(tgt_data_unlock);
1794 static int tgt_brw_lock(const struct lu_env *env, struct obd_export *exp,
1795 struct ldlm_res_id *res_id, struct obd_ioobj *obj,
1796 struct niobuf_remote *nb, struct lustre_handle *lh,
1797 enum ldlm_mode mode)
1799 int nrbufs = obj->ioo_bufcnt;
1804 LASSERT(mode == LCK_PR || mode == LCK_PW);
1805 LASSERT(!lustre_handle_is_used(lh));
1807 if (exp->exp_obd->obd_recovering)
1810 if (nrbufs == 0 || !(nb[0].rnb_flags & OBD_BRW_SRVLOCK))
1813 for (i = 1; i < nrbufs; i++)
1814 if (!(nb[i].rnb_flags & OBD_BRW_SRVLOCK))
1817 return tgt_data_lock(env, exp, res_id, nb[0].rnb_offset,
1818 nb[nrbufs - 1].rnb_offset +
1819 nb[nrbufs - 1].rnb_len - 1, lh, mode);
1822 static void tgt_brw_unlock(struct obd_export *exp, struct obd_ioobj *obj,
1823 struct niobuf_remote *niob,
1824 struct lustre_handle *lh, enum ldlm_mode mode)
1828 LASSERT(mode == LCK_PR || mode == LCK_PW);
1829 LASSERT((!exp->exp_obd->obd_recovering && obj->ioo_bufcnt &&
1830 niob[0].rnb_flags & OBD_BRW_SRVLOCK) ==
1831 lustre_handle_is_used(lh));
1833 if (lustre_handle_is_used(lh))
1834 tgt_data_unlock(lh, mode);
1838 static int tgt_checksum_niobuf(struct lu_target *tgt,
1839 struct niobuf_local *local_nb, int npages,
1840 int opc, enum cksum_types cksum_type,
1843 struct ahash_request *req;
1844 unsigned int bufsize;
1846 unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
1848 req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1850 CERROR("%s: unable to initialize checksum hash %s\n",
1851 tgt_name(tgt), cfs_crypto_hash_name(cfs_alg));
1852 return PTR_ERR(req);
1855 CDEBUG(D_INFO, "Checksum for algo %s\n", cfs_crypto_hash_name(cfs_alg));
1856 for (i = 0; i < npages; i++) {
1857 /* corrupt the data before we compute the checksum, to
1858 * simulate a client->OST data error */
1859 if (i == 0 && opc == OST_WRITE &&
1860 CFS_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_RECEIVE)) {
1861 int off = local_nb[i].lnb_page_offset & ~PAGE_MASK;
1862 int len = local_nb[i].lnb_len;
1863 struct page *np = tgt_page_to_corrupt;
1866 char *ptr = kmap_atomic(local_nb[i].lnb_page);
1867 char *ptr2 = page_address(np);
1869 memcpy(ptr2 + off, ptr + off, len);
1870 memcpy(ptr2 + off, "bad3", min(4, len));
1873 /* LU-8376 to preserve original index for
1874 * display in dump_all_bulk_pages() */
1877 cfs_crypto_hash_update_page(req, np, off,
1881 CERROR("%s: can't alloc page for corruption\n",
1885 cfs_crypto_hash_update_page(req, local_nb[i].lnb_page,
1886 local_nb[i].lnb_page_offset & ~PAGE_MASK,
1887 local_nb[i].lnb_len);
1889 /* corrupt the data after we compute the checksum, to
1890 * simulate an OST->client data error */
1891 if (i == 0 && opc == OST_READ &&
1892 CFS_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_SEND)) {
1893 int off = local_nb[i].lnb_page_offset & ~PAGE_MASK;
1894 int len = local_nb[i].lnb_len;
1895 struct page *np = tgt_page_to_corrupt;
1898 char *ptr = kmap_atomic(local_nb[i].lnb_page);
1899 char *ptr2 = page_address(np);
1901 memcpy(ptr2 + off, ptr + off, len);
1902 memcpy(ptr2 + off, "bad4", min(4, len));
1905 /* LU-8376 to preserve original index for
1906 * display in dump_all_bulk_pages() */
1909 cfs_crypto_hash_update_page(req, np, off,
1913 CERROR("%s: can't alloc page for corruption\n",
1919 bufsize = sizeof(*cksum);
1920 err = cfs_crypto_hash_final(req, (unsigned char *)cksum, &bufsize);
1925 char dbgcksum_file_name[PATH_MAX];
1927 static void dump_all_bulk_pages(struct obdo *oa, int count,
1928 struct niobuf_local *local_nb,
1929 __u32 server_cksum, __u32 client_cksum)
1936 /* will only keep dump of pages on first error for the same range in
1937 * file/fid, not during the resends/retries. */
1938 snprintf(dbgcksum_file_name, sizeof(dbgcksum_file_name),
1939 "%s-checksum_dump-ost-"DFID":[%llu-%llu]-%x-%x",
1940 (strncmp(libcfs_debug_file_path, "NONE", 4) != 0 ?
1941 libcfs_debug_file_path : LIBCFS_DEBUG_FILE_PATH_DEFAULT),
1942 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1943 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1944 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1945 local_nb[0].lnb_file_offset,
1946 local_nb[count-1].lnb_file_offset +
1947 local_nb[count-1].lnb_len - 1, client_cksum, server_cksum);
1948 CWARN("dumping checksum data to %s\n", dbgcksum_file_name);
1949 filp = filp_open(dbgcksum_file_name,
1950 O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, 0600);
1954 CDEBUG(D_INFO, "%s: can't open to dump pages with "
1955 "checksum error: rc = %d\n", dbgcksum_file_name,
1958 CERROR("%s: can't open to dump pages with checksum "
1959 "error: rc = %d\n", dbgcksum_file_name, rc);
1963 for (i = 0; i < count; i++) {
1964 len = local_nb[i].lnb_len;
1965 buf = kmap(local_nb[i].lnb_page);
1967 rc = cfs_kernel_write(filp, buf, len, &filp->f_pos);
1969 CERROR("%s: wanted to write %u but got %d "
1970 "error\n", dbgcksum_file_name, len, rc);
1976 kunmap(local_nb[i].lnb_page);
1979 rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
1981 CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
1982 filp_close(filp, NULL);
1984 libcfs_debug_dumplog();
1987 static int check_read_checksum(struct niobuf_local *local_nb, int npages,
1988 struct obd_export *exp, struct obdo *oa,
1989 const struct lnet_processid *peer,
1990 __u32 client_cksum, __u32 server_cksum,
1991 enum cksum_types server_cksum_type)
1994 enum cksum_types cksum_type;
1997 if (unlikely(npages <= 0))
2000 /* unlikely to happen and only if resend does not occur due to cksum
2001 * control failure on Client */
2002 if (unlikely(server_cksum == client_cksum)) {
2003 CDEBUG(D_PAGE, "checksum %x confirmed upon retry\n",
2008 if (exp->exp_obd->obd_checksum_dump)
2009 dump_all_bulk_pages(oa, npages, local_nb, server_cksum,
2012 cksum_type = obd_cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
2015 if (cksum_type != server_cksum_type)
2016 msg = "the server may have not used the checksum type specified"
2017 " in the original request - likely a protocol problem";
2019 msg = "should have changed on the client or in transit";
2021 start = local_nb[0].lnb_file_offset;
2022 end = local_nb[npages-1].lnb_file_offset +
2023 local_nb[npages-1].lnb_len - 1;
2025 LCONSOLE_ERROR_MSG(0x132, "%s: BAD READ CHECKSUM: %s: from %s inode "
2026 DFID " object "DOSTID" extent [%llu-%llu], client returned csum"
2027 " %x (type %x), server csum %x (type %x)\n",
2028 exp->exp_obd->obd_name,
2029 msg, libcfs_nidstr(&peer->nid),
2030 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : 0ULL,
2031 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
2032 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
2034 start, end, client_cksum, cksum_type, server_cksum,
2040 static int tgt_pages2shortio(struct niobuf_local *local, int npages,
2041 unsigned char *buf, int size)
2043 int i, off, len, copied = size;
2046 for (i = 0; i < npages; i++) {
2047 off = local[i].lnb_page_offset & ~PAGE_MASK;
2048 len = local[i].lnb_len;
2050 CDEBUG(D_PAGE, "index %d offset = %d len = %d left = %d\n",
2055 ptr = kmap_atomic(local[i].lnb_page);
2056 memcpy(buf, ptr + off, len);
2061 return copied - size;
2064 static int tgt_checksum_niobuf_t10pi(struct lu_target *tgt,
2065 enum cksum_types cksum_type,
2066 struct niobuf_local *local_nb, int npages,
2067 int opc, obd_dif_csum_fn *fn,
2068 int sector_size, u32 *check_sum,
2071 enum cksum_types t10_cksum_type = tgt->lut_dt_conf.ddp_t10_cksum_type;
2072 unsigned char cfs_alg = cksum_obd2cfs(OBD_CKSUM_T10_TOP);
2073 const char *obd_name = tgt->lut_obd->obd_name;
2074 struct ahash_request *req;
2075 unsigned char *buffer;
2076 struct page *__page;
2077 __be16 *guard_start;
2079 int used_number = 0;
2081 unsigned int bufsize = sizeof(cksum);
2086 __page = alloc_page(GFP_KERNEL);
2090 req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
2093 CERROR("%s: unable to initialize checksum hash %s: rc = %d\n",
2094 tgt_name(tgt), cfs_crypto_hash_name(cfs_alg), rc);
2098 buffer = kmap(__page);
2099 guard_start = (__be16 *)buffer;
2100 guard_number = PAGE_SIZE / sizeof(*guard_start);
2101 if (unlikely(resend))
2102 CDEBUG(D_PAGE | D_HA, "GRD tags per page = %u\n", guard_number);
2103 for (i = 0; i < npages; i++) {
2105 int off = local_nb[i].lnb_page_offset & ~PAGE_MASK;
2106 int len = local_nb[i].lnb_len;
2107 int guards_needed = DIV_ROUND_UP(off + len, sector_size) -
2108 (off / sector_size);
2110 if (guards_needed > guard_number - used_number) {
2111 cfs_crypto_hash_update_page(req, __page, 0,
2112 used_number * sizeof(*guard_start));
2116 /* corrupt the data before we compute the checksum, to
2117 * simulate a client->OST data error */
2118 if (i == 0 && opc == OST_WRITE &&
2119 CFS_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_RECEIVE)) {
2120 struct page *np = tgt_page_to_corrupt;
2123 char *ptr = kmap_atomic(local_nb[i].lnb_page);
2124 char *ptr2 = page_address(np);
2126 memcpy(ptr2 + off, ptr + off, len);
2127 memcpy(ptr2 + off, "bad3", min(4, len));
2130 /* LU-8376 to preserve original index for
2131 * display in dump_all_bulk_pages() */
2134 cfs_crypto_hash_update_page(req, np, off,
2138 CERROR("%s: can't alloc page for corruption\n",
2144 * The left guard number should be able to hold checksums of a
2147 use_t10_grd = t10_cksum_type && t10_cksum_type == cksum_type &&
2149 local_nb[i].lnb_len == PAGE_SIZE &&
2150 local_nb[i].lnb_guard_disk;
2152 used = guards_needed;
2153 if (used > (guard_number - used_number)) {
2155 CDEBUG(D_PAGE | D_HA,
2156 "%s: used %u, guard %u/%u, data size %u+%u, sector_size %u: rc = %d\n",
2157 obd_name, used, guard_number,
2158 used_number, local_nb[i].lnb_page_offset,
2159 local_nb[i].lnb_len, sector_size, rc);
2162 memcpy(guard_start + used_number,
2163 local_nb[i].lnb_guards,
2164 used * sizeof(*guard_start));
2165 if (unlikely(resend))
2166 CDEBUG(D_PAGE | D_HA,
2167 "lnb[%u]: used %u off %u+%u lnb checksum: %*phN\n",
2169 local_nb[i].lnb_page_offset,
2170 local_nb[i].lnb_len,
2171 (int)(used * sizeof(*guard_start)),
2172 guard_start + used_number);
2174 if (!use_t10_grd || unlikely(resend)) {
2175 __be16 guard_tmp[MAX_GUARD_NUMBER];
2176 __be16 *guards = guard_start + used_number;
2177 int used_tmp = -1, *usedp = &used;
2179 if (unlikely(use_t10_grd)) {
2183 rc = obd_page_dif_generate_buffer(obd_name,
2184 local_nb[i].lnb_page,
2185 local_nb[i].lnb_page_offset & ~PAGE_MASK,
2186 local_nb[i].lnb_len, guards,
2187 guard_number - used_number, usedp, sector_size,
2189 if (unlikely(resend)) {
2190 bool bad = use_t10_grd &&
2192 local_nb[i].lnb_guards,
2193 used_tmp * sizeof(*guard_tmp));
2196 CERROR("lnb[%u]: used %u/%u off %u+%u tmp checksum: %*phN\n",
2198 local_nb[i].lnb_page_offset,
2199 local_nb[i].lnb_len,
2200 (int)(used_tmp * sizeof(*guard_start)),
2202 CDEBUG_LIMIT(D_PAGE | D_HA | (bad ? D_ERROR : 0),
2203 "lnb[%u]: used %u/%u off %u+%u gen checksum: %*phN\n",
2205 local_nb[i].lnb_page_offset,
2206 local_nb[i].lnb_len,
2207 (int)(used * sizeof(*guard_start)),
2208 guard_start + used_number);
2214 LASSERT(used <= MAX_GUARD_NUMBER);
2216 * If disk support T10PI checksum, copy guards to local_nb.
2217 * If the write is partial page, do not use the guards for bio
2218 * submission since the data might not be full-sector. The bio
2219 * guards will be generated later based on the full sectors. If
2220 * the sector size is 512B rather than 4 KB, or the page size
2221 * is larger than 4KB, this might drop some useful guards for
2222 * partial page write, but it will only add minimal extra time
2223 * of checksum calculation.
2225 if (t10_cksum_type && t10_cksum_type == cksum_type &&
2227 local_nb[i].lnb_len == PAGE_SIZE) {
2228 local_nb[i].lnb_guard_rpc = 1;
2229 memcpy(local_nb[i].lnb_guards,
2230 guard_start + used_number,
2231 used * sizeof(*local_nb[i].lnb_guards));
2234 used_number += used;
2236 /* corrupt the data after we compute the checksum, to
2237 * simulate an OST->client data error */
2238 if (unlikely(i == 0 && opc == OST_READ &&
2239 CFS_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_SEND))) {
2240 struct page *np = tgt_page_to_corrupt;
2243 char *ptr = kmap_atomic(local_nb[i].lnb_page);
2244 char *ptr2 = page_address(np);
2246 memcpy(ptr2 + off, ptr + off, len);
2247 memcpy(ptr2 + off, "bad4", min(4, len));
2250 /* LU-8376 to preserve original index for
2251 * display in dump_all_bulk_pages() */
2254 cfs_crypto_hash_update_page(req, np, off,
2258 CERROR("%s: can't alloc page for corruption\n",
2267 if (used_number != 0)
2268 cfs_crypto_hash_update_page(req, __page, 0,
2269 used_number * sizeof(*guard_start));
2271 rc2 = cfs_crypto_hash_final(req, (unsigned char *)&cksum, &bufsize);
2277 __free_page(__page);
2281 static int tgt_checksum_niobuf_rw(struct lu_target *tgt,
2282 enum cksum_types cksum_type,
2283 struct niobuf_local *local_nb,
2284 int npages, int opc, u32 *check_sum,
2287 obd_dif_csum_fn *fn = NULL;
2288 int sector_size = 0;
2292 obd_t10_cksum2dif(cksum_type, &fn, §or_size);
2295 rc = tgt_checksum_niobuf_t10pi(tgt, cksum_type,
2297 opc, fn, sector_size,
2300 rc = tgt_checksum_niobuf(tgt, local_nb, npages, opc,
2301 cksum_type, check_sum);
2306 int tgt_brw_read(struct tgt_session_info *tsi)
2308 struct ptlrpc_request *req = tgt_ses_req(tsi);
2309 struct ptlrpc_bulk_desc *desc = NULL;
2310 struct obd_export *exp = tsi->tsi_exp;
2311 struct niobuf_remote *remote_nb;
2312 struct niobuf_local *local_nb;
2313 struct obd_ioobj *ioo;
2314 struct ost_body *body, *repbody;
2315 struct lustre_handle lockh = { 0 };
2316 int npages, nob = 0, rc, i, no_reply = 0,
2318 struct tgt_thread_big_cache *tbc = req->rq_svc_thread->t_data;
2319 const char *obd_name = exp->exp_obd->obd_name;
2324 if (ptlrpc_req2svc(req)->srv_req_portal != OST_IO_PORTAL &&
2325 ptlrpc_req2svc(req)->srv_req_portal != MDS_IO_PORTAL) {
2326 CERROR("%s: deny read request from %s to portal %u\n",
2327 tgt_name(tsi->tsi_tgt),
2328 obd_export_nid2str(req->rq_export),
2329 ptlrpc_req2svc(req)->srv_req_portal);
2333 req->rq_bulk_read = 1;
2335 if (CFS_FAIL_CHECK(OBD_FAIL_OST_BRW_READ_BULK)) {
2336 /* optionally use cfs_fail_val - 1 to select a specific OST on
2337 * this server to fail requests.
2339 char fail_ost_name[MAX_OBD_NAME];
2341 if (cfs_fail_val > 0) {
2342 snprintf(fail_ost_name, MAX_OBD_NAME, "OST%04X",
2345 if (strstr(obd_name, fail_ost_name))
2346 RETURN(err_serious(-EIO));
2348 RETURN(err_serious(-EIO));
2352 CFS_FAIL_TIMEOUT(OBD_FAIL_OST_BRW_PAUSE_BULK, cfs_fail_val > 0 ?
2353 cfs_fail_val : (obd_timeout + 1) / 4);
2355 /* There must be big cache in current thread to process this request
2356 * if it is NULL then something went wrong and it wasn't allocated,
2357 * report -ENOMEM in that case */
2361 body = tsi->tsi_ost_body;
2362 LASSERT(body != NULL);
2364 if (body->oa.o_valid & OBD_MD_FLFLAGS &&
2365 body->oa.o_flags & OBD_FL_NORPC)
2368 ioo = req_capsule_client_get(tsi->tsi_pill, &RMF_OBD_IOOBJ);
2369 LASSERT(ioo != NULL); /* must exists after tgt_ost_body_unpack */
2371 remote_nb = req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE);
2372 LASSERT(remote_nb != NULL); /* must exists after tgt_ost_body_unpack */
2374 local_nb = tbc->local;
2376 rc = tgt_brw_lock(tsi->tsi_env, exp, &tsi->tsi_resid, ioo, remote_nb,
2382 * If getting the lock took more time than
2383 * client was willing to wait, drop it. b=11330
2385 if (ktime_get_real_seconds() > req->rq_deadline ||
2386 CFS_FAIL_CHECK(OBD_FAIL_OST_DROP_REQ)) {
2388 CERROR("Dropping timed-out read from %s because locking object " DOSTID " took %lld seconds (limit was %lld).\n",
2389 libcfs_idstr(&req->rq_peer), POSTID(&ioo->ioo_oid),
2390 ktime_get_real_seconds() - req->rq_arrival_time.tv_sec,
2391 req->rq_deadline - req->rq_arrival_time.tv_sec);
2392 GOTO(out_lock, rc = -ETIMEDOUT);
2396 * Because we already sync grant info with client when
2397 * reconnect, grant info will be cleared for resent req,
2398 * otherwise, outdated grant count in the rpc would de-sync
2399 * grant counters in case of shrink
2401 if (lustre_msg_get_flags(req->rq_reqmsg) & (MSG_RESENT | MSG_REPLAY)) {
2402 DEBUG_REQ(D_CACHE, req, "clear resent/replay req grant info");
2403 body->oa.o_valid &= ~OBD_MD_FLGRANT;
2406 repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
2407 repbody->oa = body->oa;
2409 npages = PTLRPC_MAX_BRW_PAGES;
2410 kstart = ktime_get();
2411 rc = obd_preprw(tsi->tsi_env, OBD_BRW_READ, exp, &repbody->oa, 1,
2412 ioo, remote_nb, &npages, local_nb);
2416 if (body->oa.o_valid & OBD_MD_FLFLAGS &&
2417 body->oa.o_flags & OBD_FL_SHORT_IO) {
2420 desc = ptlrpc_prep_bulk_exp(req, npages, ioobj_max_brw_get(ioo),
2421 PTLRPC_BULK_PUT_SOURCE,
2423 &ptlrpc_bulk_kiov_nopin_ops);
2425 GOTO(out_commitrw, rc = -ENOMEM);
2428 npages_read = npages;
2429 for (i = 0; i < npages; i++) {
2430 int page_rc = local_nb[i].lnb_rc;
2439 if (page_rc != 0 && desc != NULL) { /* some data! */
2440 LASSERT(local_nb[i].lnb_page != NULL);
2441 desc->bd_frag_ops->add_kiov_frag
2442 (desc, local_nb[i].lnb_page,
2443 local_nb[i].lnb_page_offset & ~PAGE_MASK,
2447 if (page_rc != local_nb[i].lnb_len) { /* short read */
2448 local_nb[i].lnb_len = page_rc;
2449 npages_read = i + (page_rc != 0 ? 1 : 0);
2450 /* All subsequent pages should be 0 */
2451 while (++i < npages)
2452 LASSERT(local_nb[i].lnb_rc == 0);
2457 if (body->oa.o_valid & OBD_MD_FLCKSUM) {
2458 u32 flag = body->oa.o_valid & OBD_MD_FLFLAGS ?
2459 body->oa.o_flags : 0;
2460 enum cksum_types cksum_type = obd_cksum_type_unpack(flag);
2461 bool resend = (body->oa.o_valid & OBD_MD_FLFLAGS) &&
2462 (body->oa.o_flags & OBD_FL_RECOV_RESEND);
2464 repbody->oa.o_flags = obd_cksum_type_pack(obd_name,
2466 repbody->oa.o_valid = OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
2468 rc = tgt_checksum_niobuf_rw(tsi->tsi_tgt, cksum_type,
2469 local_nb, npages_read, OST_READ,
2470 &repbody->oa.o_cksum, resend);
2472 GOTO(out_commitrw, rc);
2473 CDEBUG(D_PAGE | (resend ? D_HA : 0),
2474 "checksum at read origin: %x (%x)\n",
2475 repbody->oa.o_cksum, cksum_type);
2477 /* if a resend it could be for a cksum error, so check Server
2478 * cksum with returned Client cksum (this should even cover
2479 * zero-cksum case) */
2481 check_read_checksum(local_nb, npages_read, exp,
2482 &body->oa, &req->rq_peer,
2484 repbody->oa.o_cksum, cksum_type);
2486 repbody->oa.o_valid = 0;
2488 if (body->oa.o_valid & OBD_MD_FLGRANT)
2489 repbody->oa.o_valid |= OBD_MD_FLGRANT;
2490 /* We're finishing using body->oa as an input variable */
2492 /* Check if client was evicted while we were doing i/o before touching
2495 if (body->oa.o_valid & OBD_MD_FLFLAGS &&
2496 body->oa.o_flags & OBD_FL_SHORT_IO) {
2497 unsigned char *short_io_buf;
2500 short_io_buf = req_capsule_server_get(&req->rq_pill,
2502 short_io_size = req_capsule_get_size(&req->rq_pill,
2505 rc = tgt_pages2shortio(local_nb, npages_read,
2506 short_io_buf, short_io_size);
2508 req_capsule_shrink(&req->rq_pill,
2511 rc = rc > 0 ? 0 : rc;
2512 } else if (!CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2)) {
2513 rc = target_bulk_io(exp, desc);
2517 if (body->oa.o_valid & OBD_MD_FLFLAGS &&
2518 body->oa.o_flags & OBD_FL_SHORT_IO)
2519 req_capsule_shrink(&req->rq_pill, &RMF_SHORT_IO, 0,
2524 /* Must commit after prep above in all cases */
2525 rc = obd_commitrw(tsi->tsi_env, OBD_BRW_READ, exp, &repbody->oa, 1, ioo,
2526 remote_nb, npages, local_nb, rc, nob, kstart);
2528 tgt_brw_unlock(exp, ioo, remote_nb, &lockh, LCK_PR);
2530 if (desc && !CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2))
2531 ptlrpc_free_bulk(desc);
2536 ptlrpc_lprocfs_brw(req, nob);
2537 } else if (no_reply) {
2538 req->rq_no_reply = 1;
2539 /* reply out callback would free */
2540 ptlrpc_req_drop_rs(req);
2541 LCONSOLE_WARN("%s: Bulk IO read error with %s (at %s), "
2542 "client will retry: rc %d\n",
2544 obd_uuid2str(&exp->exp_client_uuid),
2545 obd_export_nid2str(exp), rc);
2547 /* send a bulk after reply to simulate a network delay or reordering
2548 * by a router - Note that !desc implies short io, so there is no bulk
2550 if (unlikely(CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2)) &&
2552 /* Calculate checksum before request transfer, original
2553 * it is done by target_bulk_io() */
2554 rc = sptlrpc_svc_wrap_bulk(req, desc);
2555 if (OCD_HAS_FLAG(&exp->exp_connect_data, BULK_MBITS))
2556 req->rq_mbits = lustre_msg_get_mbits(req->rq_reqmsg);
2557 else /* old version, bulk matchbits is rq_xid */
2558 req->rq_mbits = req->rq_xid;
2560 req->rq_status = rc;
2561 target_committed_to_req(req);
2562 target_send_reply(req, 0, 0);
2564 CDEBUG(D_INFO, "reorder BULK\n");
2565 CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2,
2566 cfs_fail_val ? : 3);
2568 target_bulk_io(exp, desc);
2569 ptlrpc_free_bulk(desc);
2574 EXPORT_SYMBOL(tgt_brw_read);
2576 static int tgt_shortio2pages(struct niobuf_local *local, int npages,
2577 unsigned char *buf, unsigned int size)
2582 for (i = 0; i < npages; i++) {
2583 off = local[i].lnb_page_offset & ~PAGE_MASK;
2584 len = local[i].lnb_len;
2589 CDEBUG(D_PAGE, "index %d offset = %d len = %d left = %d\n",
2591 ptr = kmap_atomic(local[i].lnb_page);
2594 memcpy(ptr + off, buf, len < size ? len : size);
2602 static void tgt_warn_on_cksum(struct ptlrpc_request *req,
2603 struct ptlrpc_bulk_desc *desc,
2604 struct niobuf_local *local_nb, int npages,
2605 u32 client_cksum, u32 server_cksum,
2608 struct obd_export *exp = req->rq_export;
2609 struct ost_body *body;
2613 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
2614 LASSERT(body != NULL);
2616 if (desc && !nid_same(&req->rq_peer.nid, &desc->bd_sender)) {
2618 router = libcfs_nidstr(&desc->bd_sender);
2621 if (exp->exp_obd->obd_checksum_dump)
2622 dump_all_bulk_pages(&body->oa, npages, local_nb, server_cksum,
2626 CDEBUG_LIMIT(D_INFO, "client csum %x, server csum %x\n",
2627 client_cksum, server_cksum);
2631 LCONSOLE_ERROR_MSG(0x168, "%s: BAD WRITE CHECKSUM: from %s%s%s inode "
2632 DFID" object "DOSTID" extent [%llu-%llu"
2633 "]: client csum %x, server csum %x\n",
2634 exp->exp_obd->obd_name, libcfs_idstr(&req->rq_peer),
2636 body->oa.o_valid & OBD_MD_FLFID ?
2637 body->oa.o_parent_seq : (__u64)0,
2638 body->oa.o_valid & OBD_MD_FLFID ?
2639 body->oa.o_parent_oid : 0,
2640 body->oa.o_valid & OBD_MD_FLFID ?
2641 body->oa.o_parent_ver : 0,
2642 POSTID(&body->oa.o_oi),
2643 local_nb[0].lnb_file_offset,
2644 local_nb[npages-1].lnb_file_offset +
2645 local_nb[npages - 1].lnb_len - 1,
2646 client_cksum, server_cksum);
2649 int tgt_brw_write(struct tgt_session_info *tsi)
2651 struct ptlrpc_request *req = tgt_ses_req(tsi);
2652 struct ptlrpc_bulk_desc *desc = NULL;
2653 struct obd_export *exp = req->rq_export;
2654 struct niobuf_remote *remote_nb;
2655 struct niobuf_local *local_nb;
2656 struct obd_ioobj *ioo;
2657 struct ost_body *body, *repbody;
2658 struct lustre_handle lockh = {0};
2660 int objcount, niocount, npages;
2663 enum cksum_types cksum_type = OBD_CKSUM_CRC32;
2664 bool no_reply = false, mmap;
2665 struct tgt_thread_big_cache *tbc = req->rq_svc_thread->t_data;
2666 bool wait_sync = false;
2667 const char *obd_name = exp->exp_obd->obd_name;
2668 /* '1' for consistency with code that checks !mpflag to restore */
2669 unsigned int mpflags = 1;
2675 if (ptlrpc_req2svc(req)->srv_req_portal != OST_IO_PORTAL &&
2676 ptlrpc_req2svc(req)->srv_req_portal != MDS_IO_PORTAL) {
2677 CERROR("%s: deny write request from %s to portal %u\n",
2678 tgt_name(tsi->tsi_tgt),
2679 obd_export_nid2str(req->rq_export),
2680 ptlrpc_req2svc(req)->srv_req_portal);
2681 RETURN(err_serious(-EPROTO));
2684 if (CFS_FAIL_CHECK(OBD_FAIL_OST_ENOSPC))
2685 RETURN(err_serious(-ENOSPC));
2686 if (CFS_FAIL_TIMEOUT(OBD_FAIL_OST_EROFS, 1))
2687 RETURN(err_serious(cfs_fail_val ? -cfs_fail_val : -EROFS));
2689 req->rq_bulk_write = 1;
2691 if (CFS_FAIL_CHECK(OBD_FAIL_OST_BRW_WRITE_BULK))
2693 if (CFS_FAIL_CHECK(OBD_FAIL_OST_BRW_WRITE_BULK2))
2696 /* optionally use cfs_fail_val - 1 to select a specific OST on
2697 * this server to fail requests.
2699 char fail_ost_name[MAX_OBD_NAME];
2701 if (cfs_fail_val > 0) {
2702 snprintf(fail_ost_name, MAX_OBD_NAME, "OST%04X",
2705 if (strstr(obd_name, fail_ost_name))
2706 RETURN(err_serious(rc));
2708 RETURN(err_serious(rc));
2712 /* pause before transaction has been started */
2713 CFS_FAIL_TIMEOUT(OBD_FAIL_OST_BRW_PAUSE_BULK, cfs_fail_val > 0 ?
2714 cfs_fail_val : (obd_timeout + 1) / 4);
2716 /* Delay write commit to show stale size information */
2717 CFS_FAIL_TIMEOUT(OBD_FAIL_OSC_NO_SIZE_DATA, cfs_fail_val);
2719 /* There must be big cache in current thread to process this request
2720 * if it is NULL then something went wrong and it wasn't allocated,
2721 * report -ENOMEM in that case */
2725 body = tsi->tsi_ost_body;
2726 LASSERT(body != NULL);
2728 if (body->oa.o_valid & OBD_MD_FLFLAGS &&
2729 body->oa.o_flags & OBD_FL_NORPC)
2733 ioo = req_capsule_client_get(&req->rq_pill, &RMF_OBD_IOOBJ);
2734 LASSERT(ioo != NULL); /* must exists after tgt_ost_body_unpack */
2736 objcount = req_capsule_get_size(&req->rq_pill, &RMF_OBD_IOOBJ,
2737 RCL_CLIENT) / sizeof(*ioo);
2739 for (niocount = i = 0; i < objcount; i++)
2740 niocount += ioo[i].ioo_bufcnt;
2742 remote_nb = req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE);
2743 LASSERT(remote_nb != NULL); /* must exists after tgt_ost_body_unpack */
2744 if (niocount != req_capsule_get_size(&req->rq_pill,
2745 &RMF_NIOBUF_REMOTE, RCL_CLIENT) /
2747 RETURN(err_serious(-EPROTO));
2749 if ((remote_nb[0].rnb_flags & OBD_BRW_MEMALLOC) &&
2750 ptlrpc_connection_is_local(exp->exp_connection))
2751 mpflags = memalloc_noreclaim_save();
2753 req_capsule_set_size(&req->rq_pill, &RMF_RCS, RCL_SERVER,
2754 niocount * sizeof(*rcs));
2755 rc = req_capsule_server_pack(&req->rq_pill);
2757 GOTO(out, rc = err_serious(rc));
2759 CFS_FAIL_TIMEOUT(OBD_FAIL_OST_BRW_PAUSE_PACK, cfs_fail_val);
2760 rcs = req_capsule_server_get(&req->rq_pill, &RMF_RCS);
2762 local_nb = tbc->local;
2764 rc = tgt_brw_lock(tsi->tsi_env, exp, &tsi->tsi_resid, ioo, remote_nb,
2770 * If getting the lock took more time than
2771 * client was willing to wait, drop it. b=11330
2773 if (ktime_get_real_seconds() > req->rq_deadline ||
2774 CFS_FAIL_CHECK(OBD_FAIL_OST_DROP_REQ)) {
2776 CERROR("%s: Dropping timed-out write from %s because locking object " DOSTID " took %lld seconds (limit was %lld).\n",
2777 tgt_name(tsi->tsi_tgt), libcfs_idstr(&req->rq_peer),
2778 POSTID(&ioo->ioo_oid),
2779 ktime_get_real_seconds() - req->rq_arrival_time.tv_sec,
2780 req->rq_deadline - req->rq_arrival_time.tv_sec);
2781 GOTO(out_lock, rc = -ETIMEDOUT);
2784 /* Because we already sync grant info with client when reconnect,
2785 * grant info will be cleared for resent req, then fed_grant and
2786 * total_grant will not be modified in following preprw_write */
2787 if (lustre_msg_get_flags(req->rq_reqmsg) & (MSG_RESENT | MSG_REPLAY)) {
2788 DEBUG_REQ(D_CACHE, req, "clear resent/replay req grant info");
2789 body->oa.o_valid &= ~OBD_MD_FLGRANT;
2792 repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
2793 if (repbody == NULL)
2794 GOTO(out_lock, rc = -ENOMEM);
2795 repbody->oa = body->oa;
2797 npages = PTLRPC_MAX_BRW_PAGES;
2798 kstart = ktime_get();
2799 rc = obd_preprw(tsi->tsi_env, OBD_BRW_WRITE, exp, &repbody->oa,
2800 objcount, ioo, remote_nb, &npages, local_nb);
2803 if (body->oa.o_valid & OBD_MD_FLFLAGS &&
2804 body->oa.o_flags & OBD_FL_SHORT_IO) {
2805 unsigned int short_io_size;
2806 unsigned char *short_io_buf;
2808 short_io_size = req_capsule_get_size(&req->rq_pill,
2811 short_io_buf = req_capsule_client_get(&req->rq_pill,
2813 CDEBUG(D_INFO, "Client use short io for data transfer,"
2814 " size = %d\n", short_io_size);
2816 /* Copy short io buf to pages */
2817 rc = tgt_shortio2pages(local_nb, npages, short_io_buf,
2821 desc = ptlrpc_prep_bulk_exp(req, npages, ioobj_max_brw_get(ioo),
2822 PTLRPC_BULK_GET_SINK,
2824 &ptlrpc_bulk_kiov_nopin_ops);
2826 GOTO(skip_transfer, rc = -ENOMEM);
2828 /* NB Having prepped, we must commit... */
2829 for (i = 0; i < npages; i++)
2830 desc->bd_frag_ops->add_kiov_frag(desc,
2831 local_nb[i].lnb_page,
2832 local_nb[i].lnb_page_offset & ~PAGE_MASK,
2833 local_nb[i].lnb_len);
2835 rc = sptlrpc_svc_prep_bulk(req, desc);
2837 GOTO(skip_transfer, rc);
2839 rc = target_bulk_io(exp, desc);
2845 if (body->oa.o_valid & OBD_MD_FLCKSUM && rc == 0) {
2846 static int cksum_counter;
2848 if (body->oa.o_valid & OBD_MD_FLFLAGS)
2849 cksum_type = obd_cksum_type_unpack(body->oa.o_flags);
2851 repbody->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
2852 repbody->oa.o_flags &= ~OBD_FL_CKSUM_ALL;
2853 repbody->oa.o_flags |= obd_cksum_type_pack(obd_name,
2856 rc = tgt_checksum_niobuf_rw(tsi->tsi_tgt, cksum_type,
2857 local_nb, npages, OST_WRITE,
2858 &repbody->oa.o_cksum, false);
2860 GOTO(out_commitrw, rc);
2864 if (unlikely(body->oa.o_cksum != repbody->oa.o_cksum)) {
2865 mmap = (body->oa.o_valid & OBD_MD_FLFLAGS &&
2866 body->oa.o_flags & OBD_FL_MMAP);
2868 tgt_warn_on_cksum(req, desc, local_nb, npages,
2870 repbody->oa.o_cksum, mmap);
2872 } else if ((cksum_counter & (-cksum_counter)) ==
2874 CDEBUG(D_INFO, "Checksum %u from %s OK: %x\n",
2875 cksum_counter, libcfs_idstr(&req->rq_peer),
2876 repbody->oa.o_cksum);
2880 CFS_FAIL_TIMEOUT(OBD_FAIL_OST_BRW_PAUSE_BULK2, cfs_fail_val);
2883 /* calculate the expected actual write bytes (nob) for OFD stats.
2884 * Technically, if commit fails this would be wrong, but that should be
2887 for (i = 0; i < niocount; i++) {
2888 int len = remote_nb[i].rnb_len;
2893 /* multiple transactions can be assigned during write commit */
2894 tsi->tsi_mult_trans = 1;
2896 /* Must commit after prep above in all cases */
2897 rc = obd_commitrw(tsi->tsi_env, OBD_BRW_WRITE, exp, &repbody->oa,
2898 objcount, ioo, remote_nb, npages, local_nb, rc, nob,
2900 if (rc == -ENOTCONN)
2901 /* quota acquire process has been given up because
2902 * either the client has been evicted or the client
2903 * has timed out the request already
2907 for (i = 0; i < niocount; i++) {
2908 if (!(local_nb[i].lnb_flags & OBD_BRW_ASYNC)) {
2914 * Disable sending mtime back to the client. If the client locked the
2915 * whole object, then it has already updated the mtime on its side,
2916 * otherwise it will have to glimpse anyway (see bug 21489, comment 32)
2918 repbody->oa.o_valid &= ~(OBD_MD_FLMTIME | OBD_MD_FLATIME);
2921 /* set per-requested niobuf return codes */
2922 for (i = j = 0; i < niocount; i++) {
2923 int len = remote_nb[i].rnb_len;
2927 LASSERT(j < npages);
2928 if (local_nb[j].lnb_rc < 0)
2929 rcs[i] = local_nb[j].lnb_rc;
2930 len -= local_nb[j].lnb_len;
2935 LASSERT(j == npages);
2936 ptlrpc_lprocfs_brw(req, nob);
2939 tgt_brw_unlock(exp, ioo, remote_nb, &lockh, LCK_PW);
2941 ptlrpc_free_bulk(desc);
2943 if (unlikely(no_reply || (exp->exp_obd->obd_no_transno && wait_sync))) {
2944 req->rq_no_reply = 1;
2945 /* reply out callback would free */
2946 ptlrpc_req_drop_rs(req);
2947 if (!exp->exp_obd->obd_no_transno)
2948 LCONSOLE_WARN("%s: Bulk IO write error with %s (at %s),"
2949 " client will retry: rc = %d\n",
2951 obd_uuid2str(&exp->exp_client_uuid),
2952 obd_export_nid2str(exp), rc);
2956 memalloc_noreclaim_restore(mpflags);
2960 EXPORT_SYMBOL(tgt_brw_write);
2963 * Common request handler for OST_SEEK RPC.
2965 * Unified request handling for OST_SEEK RPC.
2966 * It takes object by its FID, does needed lseek and packs result
2967 * into reply. Only SEEK_HOLE and SEEK_DATA are supported.
2969 * \param[in] tsi target session environment for this request
2971 * \retval 0 if successful
2972 * \retval negative value on error
2974 int tgt_lseek(struct tgt_session_info *tsi)
2976 struct lustre_handle lh = { 0 };
2977 struct dt_object *dob;
2978 struct ost_body *repbody;
2979 loff_t offset = tsi->tsi_ost_body->oa.o_size;
2980 int whence = tsi->tsi_ost_body->oa.o_mode;
2986 if (whence != SEEK_HOLE && whence != SEEK_DATA)
2989 /* Negative offset is prohibited on wire and must be handled on client
2990 * prior sending RPC.
2995 repbody = req_capsule_server_get(tsi->tsi_pill, &RMF_OST_BODY);
2996 if (repbody == NULL)
2998 repbody->oa = tsi->tsi_ost_body->oa;
3000 srvlock = tsi->tsi_ost_body->oa.o_valid & OBD_MD_FLFLAGS &&
3001 tsi->tsi_ost_body->oa.o_flags & OBD_FL_SRVLOCK;
3003 rc = tgt_data_lock(tsi->tsi_env, tsi->tsi_exp, &tsi->tsi_resid,
3004 offset, OBD_OBJECT_EOF, &lh, LCK_PR);
3009 dob = dt_locate(tsi->tsi_env, tsi->tsi_tgt->lut_bottom, &tsi->tsi_fid);
3011 GOTO(out, rc = PTR_ERR(dob));
3013 if (!dt_object_exists(dob))
3014 GOTO(obj_put, rc = -ENOENT);
3016 repbody->oa.o_size = dt_lseek(tsi->tsi_env, dob, offset, whence);
3019 dt_object_put(tsi->tsi_env, dob);
3022 tgt_data_unlock(&lh, LCK_PR);
3026 EXPORT_SYMBOL(tgt_lseek);
3028 /* Check if request can be reconstructed from saved reply data
3029 * A copy of the reply data is returned in @trd if the pointer is not NULL
3031 int req_can_reconstruct(struct ptlrpc_request *req,
3032 struct tg_reply_data *trd)
3034 struct tg_export_data *ted = &req->rq_export->exp_target_data;
3035 struct lsd_client_data *lcd = ted->ted_lcd;
3038 if (tgt_is_multimodrpcs_client(req->rq_export))
3039 return tgt_lookup_reply(req, trd);
3041 mutex_lock(&ted->ted_lcd_lock);
3042 found = req->rq_xid == lcd->lcd_last_xid ||
3043 req->rq_xid == lcd->lcd_last_close_xid;
3045 if (found && trd != NULL) {
3046 if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_CLOSE) {
3047 trd->trd_reply.lrd_xid = lcd->lcd_last_close_xid;
3048 trd->trd_reply.lrd_transno =
3049 lcd->lcd_last_close_transno;
3050 trd->trd_reply.lrd_result = lcd->lcd_last_close_result;
3052 trd->trd_reply.lrd_xid = lcd->lcd_last_xid;
3053 trd->trd_reply.lrd_transno = lcd->lcd_last_transno;
3054 trd->trd_reply.lrd_result = lcd->lcd_last_result;
3055 trd->trd_reply.lrd_data = lcd->lcd_last_data;
3056 trd->trd_pre_versions[0] = lcd->lcd_pre_versions[0];
3057 trd->trd_pre_versions[1] = lcd->lcd_pre_versions[1];
3058 trd->trd_pre_versions[2] = lcd->lcd_pre_versions[2];
3059 trd->trd_pre_versions[3] = lcd->lcd_pre_versions[3];
3062 mutex_unlock(&ted->ted_lcd_lock);
3066 EXPORT_SYMBOL(req_can_reconstruct);
3068 bool tgt_check_resent(struct ptlrpc_request *req, struct tg_reply_data *trd)
3070 struct lsd_reply_data *lrd;
3071 bool need_reconstruct = false;
3075 if (likely(!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT)))
3078 if (req_can_reconstruct(req, trd)) {
3079 lrd = &trd->trd_reply;
3080 req->rq_transno = lrd->lrd_transno;
3081 req->rq_status = lrd->lrd_result;
3082 if (req->rq_status != 0)
3083 req->rq_transno = 0;
3084 lustre_msg_set_transno(req->rq_repmsg, req->rq_transno);
3085 lustre_msg_set_status(req->rq_repmsg, req->rq_status);
3087 DEBUG_REQ(D_HA, req, "reconstruct resent RPC");
3088 need_reconstruct = true;
3090 DEBUG_REQ(D_HA, req, "no reply for RESENT req");
3093 return need_reconstruct;
3095 EXPORT_SYMBOL(tgt_check_resent);