4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2013, 2017, Intel Corporation.
27 * lustre/target/tgt_handler.c
29 * Lustre Unified Target request handler code
31 * Author: Brian Behlendorf <behlendorf1@llnl.gov>
32 * Author: Mikhail Pershin <mike.pershin@intel.com>
35 #define DEBUG_SUBSYSTEM S_CLASS
37 #include <linux/user_namespace.h>
38 #ifdef HAVE_UIDGID_HEADER
39 # include <linux/uidgid.h>
43 #include <obd_class.h>
44 #include <obd_cksum.h>
45 #include <lustre_lfsck.h>
46 #include <lustre_nodemap.h>
47 #include <lustre_acl.h>
49 #include "tgt_internal.h"
51 char *tgt_name(struct lu_target *tgt)
53 LASSERT(tgt->lut_obd != NULL);
54 return tgt->lut_obd->obd_name;
56 EXPORT_SYMBOL(tgt_name);
59 * Generic code handling requests that have struct mdt_body passed in:
61 * - extract mdt_body from request and save it in @tsi, if present;
63 * - create lu_object, corresponding to the fid in mdt_body, and save it in
66 * - if HABEO_CORPUS flag is set for this request type check whether object
67 * actually exists on storage (lu_object_exists()).
70 static int tgt_mdt_body_unpack(struct tgt_session_info *tsi, __u32 flags)
72 const struct mdt_body *body;
73 struct lu_object *obj;
74 struct req_capsule *pill = tsi->tsi_pill;
79 body = req_capsule_client_get(pill, &RMF_MDT_BODY);
83 tsi->tsi_mdt_body = body;
85 if (!(body->mbo_valid & OBD_MD_FLID))
88 /* mdc_pack_body() doesn't check if fid is zero and set OBD_ML_FID
89 * in any case in pre-2.5 clients. Fix that here if needed */
90 if (unlikely(fid_is_zero(&body->mbo_fid1)))
93 if (!fid_is_sane(&body->mbo_fid1)) {
94 CERROR("%s: invalid FID: "DFID"\n", tgt_name(tsi->tsi_tgt),
95 PFID(&body->mbo_fid1));
99 obj = lu_object_find(tsi->tsi_env,
100 &tsi->tsi_tgt->lut_bottom->dd_lu_dev,
101 &body->mbo_fid1, NULL);
103 if ((flags & HABEO_CORPUS) && !lu_object_exists(obj)) {
104 lu_object_put(tsi->tsi_env, obj);
107 tsi->tsi_corpus = obj;
114 tsi->tsi_fid = body->mbo_fid1;
120 * Validate oa from client.
121 * If the request comes from 2.0 clients, currently only RSVD seq and IDIF
123 * a. objects in Single MDT FS seq = FID_SEQ_OST_MDT0, oi_id != 0
124 * b. Echo objects(seq = 2), old echo client still use oi_id/oi_seq to
125 * pack ost_id. Because non-zero oi_seq will make it diffcult to tell
126 * whether this is oi_fid or real ostid. So it will check
127 * OBD_CONNECT_FID, then convert the ostid to FID for old client.
128 * c. Old FID-disable osc will send IDIF.
129 * d. new FID-enable osc/osp will send normal FID.
131 * And also oi_id/f_oid should always start from 1. oi_id/f_oid = 0 will
132 * be used for LAST_ID file, and only being accessed inside OST now.
134 int tgt_validate_obdo(struct tgt_session_info *tsi, struct obdo *oa)
136 struct ost_id *oi = &oa->o_oi;
137 u64 seq = ostid_seq(oi);
138 u64 id = ostid_id(oi);
142 if (unlikely(!(exp_connect_flags(tsi->tsi_exp) & OBD_CONNECT_FID) &&
143 fid_seq_is_echo(seq))) {
144 /* Sigh 2.[123] client still sends echo req with oi_id = 0
145 * during create, and we will reset this to 1, since this
146 * oi_id is basically useless in the following create process,
147 * but oi_id == 0 will make it difficult to tell whether it is
148 * real FID or ost_id. */
149 oi->oi_fid.f_seq = FID_SEQ_ECHO;
150 oi->oi_fid.f_oid = id ?: 1;
151 oi->oi_fid.f_ver = 0;
153 struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env);
155 if (unlikely((oa->o_valid & OBD_MD_FLID) && id == 0))
156 GOTO(out, rc = -EPROTO);
158 /* Note: this check might be forced in 2.5 or 2.6, i.e.
159 * all of the requests are required to setup FLGROUP */
160 if (unlikely(!(oa->o_valid & OBD_MD_FLGROUP))) {
161 ostid_set_seq_mdt0(oi);
162 oa->o_valid |= OBD_MD_FLGROUP;
166 if (unlikely(!(fid_seq_is_idif(seq) || fid_seq_is_mdt0(seq) ||
167 fid_seq_is_norm(seq) || fid_seq_is_echo(seq))))
168 GOTO(out, rc = -EPROTO);
170 rc = ostid_to_fid(&tti->tti_fid1, oi,
171 tsi->tsi_tgt->lut_lsd.lsd_osd_index);
172 if (unlikely(rc != 0))
175 oi->oi_fid = tti->tti_fid1;
181 CERROR("%s: client %s sent bad object "DOSTID": rc = %d\n",
182 tgt_name(tsi->tsi_tgt), obd_export_nid2str(tsi->tsi_exp),
186 EXPORT_SYMBOL(tgt_validate_obdo);
188 static int tgt_io_data_unpack(struct tgt_session_info *tsi, struct ost_id *oi)
191 struct niobuf_remote *rnb;
192 struct obd_ioobj *ioo;
197 ioo = req_capsule_client_get(tsi->tsi_pill, &RMF_OBD_IOOBJ);
201 rnb = req_capsule_client_get(tsi->tsi_pill, &RMF_NIOBUF_REMOTE);
205 max_brw = ioobj_max_brw_get(ioo);
206 if (unlikely((max_brw & (max_brw - 1)) != 0)) {
207 CERROR("%s: client %s sent bad ioobj max %u for "DOSTID
208 ": rc = %d\n", tgt_name(tsi->tsi_tgt),
209 obd_export_nid2str(tsi->tsi_exp), max_brw,
210 POSTID(oi), -EPROTO);
215 obj_count = req_capsule_get_size(tsi->tsi_pill, &RMF_OBD_IOOBJ,
216 RCL_CLIENT) / sizeof(*ioo);
217 if (obj_count == 0) {
218 CERROR("%s: short ioobj\n", tgt_name(tsi->tsi_tgt));
220 } else if (obj_count > 1) {
221 CERROR("%s: too many ioobjs (%d)\n", tgt_name(tsi->tsi_tgt),
226 if (ioo->ioo_bufcnt == 0) {
227 CERROR("%s: ioo has zero bufcnt\n", tgt_name(tsi->tsi_tgt));
231 if (ioo->ioo_bufcnt > PTLRPC_MAX_BRW_PAGES) {
232 DEBUG_REQ(D_RPCTRACE, tgt_ses_req(tsi),
233 "bulk has too many pages (%d)",
241 static int tgt_ost_body_unpack(struct tgt_session_info *tsi, __u32 flags)
243 struct ost_body *body;
244 struct req_capsule *pill = tsi->tsi_pill;
245 struct lu_nodemap *nodemap;
250 body = req_capsule_client_get(pill, &RMF_OST_BODY);
254 rc = tgt_validate_obdo(tsi, &body->oa);
258 nodemap = nodemap_get_from_exp(tsi->tsi_exp);
260 RETURN(PTR_ERR(nodemap));
262 body->oa.o_uid = nodemap_map_id(nodemap, NODEMAP_UID,
263 NODEMAP_CLIENT_TO_FS,
265 body->oa.o_gid = nodemap_map_id(nodemap, NODEMAP_GID,
266 NODEMAP_CLIENT_TO_FS,
268 nodemap_putref(nodemap);
270 tsi->tsi_ost_body = body;
271 tsi->tsi_fid = body->oa.o_oi.oi_fid;
273 if (req_capsule_has_field(pill, &RMF_OBD_IOOBJ, RCL_CLIENT)) {
274 rc = tgt_io_data_unpack(tsi, &body->oa.o_oi);
279 if (!(body->oa.o_valid & OBD_MD_FLID)) {
280 if (flags & HABEO_CORPUS) {
281 CERROR("%s: OBD_MD_FLID flag is not set in ost_body "
282 "but OID/FID is mandatory with HABEO_CORPUS\n",
283 tgt_name(tsi->tsi_tgt));
290 ost_fid_build_resid(&tsi->tsi_fid, &tsi->tsi_resid);
293 * OST doesn't get object in advance for further use to prevent
294 * situations with nested object_find which is potential deadlock.
296 tsi->tsi_corpus = NULL;
301 * Do necessary preprocessing according to handler ->th_flags.
303 static int tgt_request_preprocess(struct tgt_session_info *tsi,
304 struct tgt_handler *h,
305 struct ptlrpc_request *req)
307 struct req_capsule *pill = tsi->tsi_pill;
308 __u32 flags = h->th_flags;
313 if (tsi->tsi_preprocessed)
316 LASSERT(h->th_act != NULL);
317 LASSERT(h->th_opc == lustre_msg_get_opc(req->rq_reqmsg));
318 LASSERT(current->journal_info == NULL);
320 LASSERT(ergo(flags & (HABEO_CORPUS | HABEO_REFERO),
322 if (h->th_fmt != NULL) {
323 req_capsule_set(pill, h->th_fmt);
324 if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT)) {
325 rc = tgt_mdt_body_unpack(tsi, flags);
328 } else if (req_capsule_has_field(pill, &RMF_OST_BODY,
330 rc = tgt_ost_body_unpack(tsi, flags);
336 if (flags & MUTABOR && tgt_conn_flags(tsi) & OBD_CONNECT_RDONLY)
339 if (flags & HABEO_CLAVIS) {
340 struct ldlm_request *dlm_req;
342 LASSERT(h->th_fmt != NULL);
344 dlm_req = req_capsule_client_get(pill, &RMF_DLM_REQ);
345 if (dlm_req != NULL) {
346 union ldlm_wire_policy_data *policy =
347 &dlm_req->lock_desc.l_policy_data;
349 if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
351 (policy->l_inodebits.bits |
352 policy->l_inodebits.try_bits) == 0)) {
354 * Lock without inodebits makes no sense and
355 * will oops later in ldlm. If client miss to
356 * set such bits, do not trigger ASSERTION.
358 * For liblustre flock case, it maybe zero.
362 tsi->tsi_dlm_req = dlm_req;
368 tsi->tsi_preprocessed = 1;
373 * Invoke handler for this request opc. Also do necessary preprocessing
374 * (according to handler ->th_flags), and post-processing (setting of
375 * ->last_{xid,committed}).
377 static int tgt_handle_request0(struct tgt_session_info *tsi,
378 struct tgt_handler *h,
379 struct ptlrpc_request *req)
383 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
388 /* When dealing with sec context requests, no export is associated yet,
389 * because these requests are sent before *_CONNECT requests.
390 * A NULL req->rq_export means the normal *_common_slice handlers will
391 * not be called, because there is no reference to the target.
392 * So deal with them by hand and jump directly to target_send_reply().
396 case SEC_CTX_INIT_CONT:
398 CFS_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, cfs_fail_val);
403 * Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
404 * to put same checks into handlers like mdt_close(), mdt_reint(),
405 * etc., without talking to mdt authors first. Checking same thing
406 * there again is useless and returning 0 error without packing reply
407 * is buggy! Handlers either pack reply or return error.
409 * We return 0 here and do not send any reply in order to emulate
410 * network failure. Do not send any reply in case any of NET related
411 * fail_id has occured.
413 if (OBD_FAIL_CHECK_ORSET(h->th_fail_id, OBD_FAIL_ONCE))
415 if (unlikely(lustre_msg_get_opc(req->rq_reqmsg) == MDS_REINT &&
416 OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_MULTI_NET)))
419 rc = tgt_request_preprocess(tsi, h, req);
420 /* pack reply if reply format is fixed */
421 if (rc == 0 && h->th_flags & HABEO_REFERO) {
423 if (req_capsule_has_field(tsi->tsi_pill, &RMF_MDT_MD,
425 req_capsule_set_size(tsi->tsi_pill, &RMF_MDT_MD,
427 tsi->tsi_mdt_body->mbo_eadatasize);
428 if (req_capsule_has_field(tsi->tsi_pill, &RMF_LOGCOOKIES,
430 req_capsule_set_size(tsi->tsi_pill, &RMF_LOGCOOKIES,
432 if (req_capsule_has_field(tsi->tsi_pill, &RMF_ACL, RCL_SERVER))
433 req_capsule_set_size(tsi->tsi_pill,
434 &RMF_ACL, RCL_SERVER,
435 LUSTRE_POSIX_ACL_MAX_SIZE_OLD);
437 if (req_capsule_has_field(tsi->tsi_pill, &RMF_SHORT_IO,
439 struct niobuf_remote *remote_nb =
440 req_capsule_client_get(tsi->tsi_pill,
442 struct ost_body *body = tsi->tsi_ost_body;
444 req_capsule_set_size(tsi->tsi_pill, &RMF_SHORT_IO,
446 (body->oa.o_valid & OBD_MD_FLFLAGS &&
447 body->oa.o_flags & OBD_FL_SHORT_IO) ?
448 remote_nb[0].rnb_len : 0);
451 rc = req_capsule_server_pack(tsi->tsi_pill);
454 if (likely(rc == 0)) {
456 * Process request, there can be two types of rc:
457 * 1) errors with msg unpack/pack, other failures outside the
458 * operation itself. This is counted as serious errors;
459 * 2) errors during fs operation, should be placed in rq_status
463 if (!is_serious(rc) &&
464 !req->rq_no_reply && req->rq_reply_state == NULL) {
465 DEBUG_REQ(D_ERROR, req, "%s \"handler\" %s did not "
466 "pack reply and returned 0 error\n",
467 tgt_name(tsi->tsi_tgt), h->th_name);
470 serious = is_serious(rc);
471 rc = clear_serious(rc);
479 * ELDLM_* codes which > 0 should be in rq_status only as well as
480 * all non-serious errors.
482 if (rc > 0 || !serious)
485 LASSERT(current->journal_info == NULL);
487 if (likely(rc == 0 && req->rq_export))
488 target_committed_to_req(req);
491 target_send_reply(req, rc, tsi->tsi_reply_fail_id);
495 static int tgt_filter_recovery_request(struct ptlrpc_request *req,
496 struct obd_device *obd, int *process)
498 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
505 case MDS_SYNC: /* used in unmounting */
519 case MDS_HSM_PROGRESS:
520 case MDS_HSM_STATE_SET:
521 case MDS_HSM_REQUEST:
522 *process = target_queue_recovery_request(req, obd);
526 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
533 * Handle recovery. Return:
534 * +1: continue request processing;
535 * -ve: abort immediately with the given error code;
536 * 0: send reply with error code in req->rq_status;
538 static int tgt_handle_recovery(struct ptlrpc_request *req, int reply_fail_id)
542 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
547 case SEC_CTX_INIT_CONT:
552 if (!req->rq_export->exp_obd->obd_replayable)
555 /* sanity check: if the xid matches, the request must be marked as a
556 * resent or replayed */
557 if (req_can_reconstruct(req, NULL)) {
558 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
559 (MSG_RESENT | MSG_REPLAY))) {
560 DEBUG_REQ(D_WARNING, req, "rq_xid %llu matches "
561 "saved xid, expected REPLAY or RESENT flag "
563 lustre_msg_get_flags(req->rq_reqmsg));
564 req->rq_status = -ENOTCONN;
568 /* else: note the opposite is not always true; a RESENT req after a
569 * failover will usually not match the last_xid, since it was likely
570 * never committed. A REPLAYed request will almost never match the
571 * last xid, however it could for a committed, but still retained,
574 /* Check for aborted recovery... */
575 if (unlikely(req->rq_export->exp_obd->obd_recovering)) {
579 DEBUG_REQ(D_INFO, req, "Got new replay");
580 rc = tgt_filter_recovery_request(req, req->rq_export->exp_obd,
582 if (rc != 0 || !should_process)
584 else if (should_process < 0) {
585 req->rq_status = should_process;
586 rc = ptlrpc_error(req);
593 /* Initial check for request, it is validation mostly */
594 static struct tgt_handler *tgt_handler_find_check(struct ptlrpc_request *req)
596 struct tgt_handler *h;
597 struct tgt_opc_slice *s;
598 struct lu_target *tgt;
599 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
603 tgt = class_exp2tgt(req->rq_export);
604 if (unlikely(tgt == NULL)) {
605 DEBUG_REQ(D_ERROR, req, "%s: No target for connected export\n",
606 class_exp2obd(req->rq_export)->obd_name);
607 RETURN(ERR_PTR(-EINVAL));
610 for (s = tgt->lut_slice; s->tos_hs != NULL; s++)
611 if (s->tos_opc_start <= opc && opc < s->tos_opc_end)
614 /* opcode was not found in slice */
615 if (unlikely(s->tos_hs == NULL)) {
618 /* don't print error messages for known unhandled RPCs */
619 if (opc != OST_FALLOCATE && opc != OST_SEEK && !printed) {
620 CERROR("%s: no handler for opcode 0x%x from %s\n",
621 tgt_name(tgt), opc, libcfs_id2str(req->rq_peer));
624 RETURN(ERR_PTR(-ENOTSUPP));
627 LASSERT(opc >= s->tos_opc_start && opc < s->tos_opc_end);
628 h = s->tos_hs + (opc - s->tos_opc_start);
629 if (unlikely(h->th_opc == 0)) {
630 CERROR("%s: unsupported opcode 0x%x\n", tgt_name(tgt), opc);
631 RETURN(ERR_PTR(-ENOTSUPP));
637 static int process_req_last_xid(struct ptlrpc_request *req)
642 /* check request's xid is consistent with export's last_xid */
643 last_xid = lustre_msg_get_last_xid(req->rq_reqmsg);
644 if (last_xid > req->rq_export->exp_last_xid)
645 req->rq_export->exp_last_xid = last_xid;
647 if (req->rq_xid == 0 ||
648 (req->rq_xid <= req->rq_export->exp_last_xid)) {
649 DEBUG_REQ(D_ERROR, req, "Unexpected xid %llx vs. "
650 "last_xid %llx\n", req->rq_xid,
651 req->rq_export->exp_last_xid);
652 /* Some request is allowed to be sent during replay,
653 * such as OUT update requests, FLD requests, so it
654 * is possible that replay requests has smaller XID
655 * than the exp_last_xid.
657 * Some non-replay requests may have smaller XID as
660 * - Client send a no_resend RPC, like statfs;
661 * - The RPC timedout (or some other error) on client,
662 * then it's removed from the unreplied list;
663 * - Client send some other request to bump the
664 * exp_last_xid on server;
665 * - The former RPC got chance to be processed;
667 if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY))
671 /* The "last_xid" is the minimum xid among unreplied requests,
672 * if the request is from the previous connection, its xid can
673 * still be larger than "exp_last_xid", then the above check of
674 * xid is not enough to determine whether the request is delayed.
676 * For example, if some replay request was delayed and caused
677 * timeout at client and the replay is restarted, the delayed
678 * replay request will have the larger xid than "exp_last_xid"
680 if (req->rq_export->exp_conn_cnt >
681 lustre_msg_get_conn_cnt(req->rq_reqmsg))
684 /* try to release in-memory reply data */
685 if (tgt_is_multimodrpcs_client(req->rq_export)) {
686 tgt_handle_received_xid(req->rq_export,
687 lustre_msg_get_last_xid(req->rq_reqmsg));
688 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
689 (MSG_RESENT | MSG_REPLAY)))
690 tgt_handle_tag(req->rq_export,
691 lustre_msg_get_tag(req->rq_reqmsg));
696 int tgt_request_handle(struct ptlrpc_request *req)
698 struct tgt_session_info *tsi = tgt_ses_info(req->rq_svc_thread->t_env);
700 struct lustre_msg *msg = req->rq_reqmsg;
701 struct tgt_handler *h;
702 struct lu_target *tgt;
703 int request_fail_id = 0;
704 __u32 opc = lustre_msg_get_opc(msg);
705 struct obd_device *obd;
707 bool is_connect = false;
710 if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_TGT_RECOVERY_REQ_RACE))) {
711 if (cfs_fail_val == 0 &&
712 lustre_msg_get_opc(msg) != OBD_PING &&
713 lustre_msg_get_flags(msg) & MSG_REQ_REPLAY_DONE) {
714 struct l_wait_info lwi = { 0 };
718 l_wait_event(cfs_race_waitq, (cfs_race_state == 1),
723 req_capsule_init(&req->rq_pill, req, RCL_SERVER);
724 tsi->tsi_pill = &req->rq_pill;
725 tsi->tsi_env = req->rq_svc_thread->t_env;
727 /* if request has export then get handlers slice from corresponding
728 * target, otherwise that should be connect operation */
729 if (opc == MDS_CONNECT || opc == OST_CONNECT ||
730 opc == MGS_CONNECT) {
732 req_capsule_set(&req->rq_pill, &RQF_CONNECT);
733 rc = target_handle_connect(req);
735 rc = ptlrpc_error(req);
738 /* recovery-small test 18c asks to drop connect reply */
739 if (unlikely(opc == OST_CONNECT &&
740 OBD_FAIL_CHECK(OBD_FAIL_OST_CONNECT_NET2)))
744 if (unlikely(!class_connected_export(req->rq_export))) {
745 if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT ||
746 opc == SEC_CTX_FINI) {
747 /* sec context initialization has to be handled
748 * by hand in tgt_handle_request0() */
749 tsi->tsi_reply_fail_id = OBD_FAIL_SEC_CTX_INIT_NET;
751 GOTO(handle_recov, rc = 0);
753 CDEBUG(D_HA, "operation %d on unconnected OST from %s\n",
754 opc, libcfs_id2str(req->rq_peer));
755 req->rq_status = -ENOTCONN;
756 rc = ptlrpc_error(req);
760 tsi->tsi_tgt = tgt = class_exp2tgt(req->rq_export);
761 tsi->tsi_exp = req->rq_export;
762 if (exp_connect_flags(req->rq_export) & OBD_CONNECT_JOBSTATS)
763 tsi->tsi_jobid = lustre_msg_get_jobid(req->rq_reqmsg);
765 tsi->tsi_jobid = NULL;
768 DEBUG_REQ(D_ERROR, req, "%s: No target for connected export\n",
769 class_exp2obd(req->rq_export)->obd_name);
770 req->rq_status = -EINVAL;
771 rc = ptlrpc_error(req);
775 /* Skip last_xid processing for the recovery thread, otherwise, the
776 * last_xid on same request could be processed twice: first time when
777 * processing the incoming request, second time when the request is
778 * being processed by recovery thread. */
779 obd = class_exp2obd(req->rq_export);
781 /* reset the exp_last_xid on each connection. */
782 req->rq_export->exp_last_xid = 0;
783 } else if (obd->obd_recovery_data.trd_processing_task !=
785 rc = process_req_last_xid(req);
788 rc = ptlrpc_error(req);
793 request_fail_id = tgt->lut_request_fail_id;
794 tsi->tsi_reply_fail_id = tgt->lut_reply_fail_id;
796 h = tgt_handler_find_check(req);
798 req->rq_status = PTR_ERR(h);
799 rc = ptlrpc_error(req);
803 LASSERTF(h->th_opc == opc, "opcode mismatch %d != %d\n",
806 if (CFS_FAIL_CHECK_ORSET(request_fail_id, CFS_FAIL_ONCE))
809 rc = lustre_msg_check_version(msg, h->th_version);
811 DEBUG_REQ(D_ERROR, req, "%s: drop mal-formed request, version"
812 " %08x, expecting %08x\n", tgt_name(tgt),
813 lustre_msg_get_version(msg), h->th_version);
814 req->rq_status = -EINVAL;
815 rc = ptlrpc_error(req);
820 rc = tgt_handle_recovery(req, tsi->tsi_reply_fail_id);
821 if (likely(rc == 1)) {
822 rc = tgt_handle_request0(tsi, h, req);
828 req_capsule_fini(tsi->tsi_pill);
829 if (tsi->tsi_corpus != NULL) {
830 lu_object_put(tsi->tsi_env, tsi->tsi_corpus);
831 tsi->tsi_corpus = NULL;
835 EXPORT_SYMBOL(tgt_request_handle);
837 /** Assign high priority operations to the request if needed. */
838 int tgt_hpreq_handler(struct ptlrpc_request *req)
840 struct tgt_session_info *tsi = tgt_ses_info(req->rq_svc_thread->t_env);
841 struct tgt_handler *h;
846 if (req->rq_export == NULL)
849 req_capsule_init(&req->rq_pill, req, RCL_SERVER);
850 tsi->tsi_pill = &req->rq_pill;
851 tsi->tsi_env = req->rq_svc_thread->t_env;
852 tsi->tsi_tgt = class_exp2tgt(req->rq_export);
853 tsi->tsi_exp = req->rq_export;
855 h = tgt_handler_find_check(req);
861 rc = tgt_request_preprocess(tsi, h, req);
862 if (unlikely(rc != 0))
865 if (h->th_hp != NULL)
869 EXPORT_SYMBOL(tgt_hpreq_handler);
871 void tgt_counter_incr(struct obd_export *exp, int opcode)
873 lprocfs_counter_incr(exp->exp_obd->obd_stats, opcode);
874 if (exp->exp_nid_stats && exp->exp_nid_stats->nid_stats != NULL)
875 lprocfs_counter_incr(exp->exp_nid_stats->nid_stats, opcode);
877 EXPORT_SYMBOL(tgt_counter_incr);
880 * Unified target generic handlers.
883 int tgt_connect_check_sptlrpc(struct ptlrpc_request *req, struct obd_export *exp)
885 struct lu_target *tgt = class_exp2tgt(exp);
886 struct sptlrpc_flavor flvr;
890 LASSERT(tgt->lut_obd);
891 LASSERT(tgt->lut_slice);
893 /* always allow ECHO client */
894 if (unlikely(strcmp(exp->exp_obd->obd_type->typ_name,
895 LUSTRE_ECHO_NAME) == 0)) {
896 exp->exp_flvr.sf_rpc = SPTLRPC_FLVR_ANY;
900 if (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_INVALID) {
901 read_lock(&tgt->lut_sptlrpc_lock);
902 sptlrpc_target_choose_flavor(&tgt->lut_sptlrpc_rset,
906 read_unlock(&tgt->lut_sptlrpc_lock);
908 spin_lock(&exp->exp_lock);
909 exp->exp_sp_peer = req->rq_sp_from;
910 exp->exp_flvr = flvr;
912 /* when on mgs, if no restriction is set, or if the client
913 * NID is on the local node, allow any flavor
915 if ((strcmp(exp->exp_obd->obd_type->typ_name,
916 LUSTRE_MGS_NAME) == 0) &&
917 (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_NULL ||
918 LNetIsPeerLocal(exp->exp_connection->c_peer.nid)))
919 exp->exp_flvr.sf_rpc = SPTLRPC_FLVR_ANY;
921 if (exp->exp_flvr.sf_rpc != SPTLRPC_FLVR_ANY &&
922 exp->exp_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
923 CERROR("%s: unauthorized rpc flavor %x from %s, "
924 "expect %x\n", tgt_name(tgt),
926 libcfs_nid2str(req->rq_peer.nid),
927 exp->exp_flvr.sf_rpc);
930 spin_unlock(&exp->exp_lock);
932 if (exp->exp_sp_peer != req->rq_sp_from) {
933 CERROR("%s: RPC source %s doesn't match %s\n",
935 sptlrpc_part2name(req->rq_sp_from),
936 sptlrpc_part2name(exp->exp_sp_peer));
939 rc = sptlrpc_target_export_check(exp, req);
946 int tgt_adapt_sptlrpc_conf(struct lu_target *tgt)
948 struct sptlrpc_rule_set tmp_rset;
951 if (unlikely(tgt == NULL)) {
952 CERROR("No target passed");
956 sptlrpc_rule_set_init(&tmp_rset);
957 rc = sptlrpc_conf_target_get_rules(tgt->lut_obd, &tmp_rset);
959 CERROR("%s: failed get sptlrpc rules: rc = %d\n",
964 sptlrpc_target_update_exp_flavor(tgt->lut_obd, &tmp_rset);
966 write_lock(&tgt->lut_sptlrpc_lock);
967 sptlrpc_rule_set_free(&tgt->lut_sptlrpc_rset);
968 tgt->lut_sptlrpc_rset = tmp_rset;
969 write_unlock(&tgt->lut_sptlrpc_lock);
973 EXPORT_SYMBOL(tgt_adapt_sptlrpc_conf);
975 int tgt_connect(struct tgt_session_info *tsi)
977 struct ptlrpc_request *req = tgt_ses_req(tsi);
978 struct obd_connect_data *reply;
983 /* XXX: better to call this check right after getting new export but
984 * before last_rcvd slot allocation to avoid server load upon insecure
985 * connects. This is to be fixed after unifiyng all targets.
987 rc = tgt_connect_check_sptlrpc(req, tsi->tsi_exp);
991 /* To avoid exposing partially initialized connection flags, changes up
992 * to this point have been staged in reply->ocd_connect_flags. Now that
993 * connection handling has completed successfully, atomically update
994 * the connect flags in the shared export data structure. LU-1623 */
995 reply = req_capsule_server_get(tsi->tsi_pill, &RMF_CONNECT_DATA);
996 spin_lock(&tsi->tsi_exp->exp_lock);
997 *exp_connect_flags_ptr(tsi->tsi_exp) = reply->ocd_connect_flags;
998 if (reply->ocd_connect_flags & OBD_CONNECT_FLAGS2)
999 *exp_connect_flags2_ptr(tsi->tsi_exp) =
1000 reply->ocd_connect_flags2;
1001 tsi->tsi_exp->exp_connect_data.ocd_brw_size = reply->ocd_brw_size;
1002 spin_unlock(&tsi->tsi_exp->exp_lock);
1004 if (strcmp(tsi->tsi_exp->exp_obd->obd_type->typ_name,
1005 LUSTRE_MDT_NAME) == 0) {
1006 rc = req_check_sepol(tsi->tsi_pill);
1013 obd_disconnect(class_export_get(tsi->tsi_exp));
1016 EXPORT_SYMBOL(tgt_connect);
1018 int tgt_disconnect(struct tgt_session_info *tsi)
1024 OBD_FAIL_TIMEOUT(OBD_FAIL_OST_DISCONNECT_DELAY, cfs_fail_val);
1026 rc = target_handle_disconnect(tgt_ses_req(tsi));
1028 RETURN(err_serious(rc));
1032 EXPORT_SYMBOL(tgt_disconnect);
1035 * Unified target OBD handlers
1037 int tgt_obd_ping(struct tgt_session_info *tsi)
1043 /* The target-specific part of OBD_PING request handling.
1044 * It controls Filter Modification Data (FMD) expiration each time
1047 * Valid only for replayable targets, e.g. MDT and OFD
1049 if (tsi->tsi_exp->exp_obd->obd_replayable)
1050 tgt_fmd_expire(tsi->tsi_exp);
1052 rc = req_capsule_server_pack(tsi->tsi_pill);
1054 RETURN(err_serious(rc));
1058 EXPORT_SYMBOL(tgt_obd_ping);
1060 int tgt_obd_log_cancel(struct tgt_session_info *tsi)
1062 return err_serious(-EOPNOTSUPP);
1065 int tgt_send_buffer(struct tgt_session_info *tsi, struct lu_rdbuf *rdbuf)
1067 struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env);
1068 struct ptlrpc_request *req = tgt_ses_req(tsi);
1069 struct obd_export *exp = req->rq_export;
1070 struct ptlrpc_bulk_desc *desc;
1071 struct l_wait_info *lwi = &tti->tti_u.update.tti_wait_info;
1077 desc = ptlrpc_prep_bulk_exp(req, rdbuf->rb_nbufs, 1,
1078 PTLRPC_BULK_PUT_SOURCE | PTLRPC_BULK_BUF_KVEC,
1079 MDS_BULK_PORTAL, &ptlrpc_bulk_kvec_ops);
1083 for (i = 0; i < rdbuf->rb_nbufs; i++)
1084 desc->bd_frag_ops->add_iov_frag(desc,
1085 rdbuf->rb_bufs[i].lb_buf,
1086 rdbuf->rb_bufs[i].lb_len);
1088 rc = target_bulk_io(exp, desc, lwi);
1089 ptlrpc_free_bulk(desc);
1092 EXPORT_SYMBOL(tgt_send_buffer);
1094 int tgt_sendpage(struct tgt_session_info *tsi, struct lu_rdpg *rdpg, int nob)
1096 struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env);
1097 struct ptlrpc_request *req = tgt_ses_req(tsi);
1098 struct obd_export *exp = req->rq_export;
1099 struct ptlrpc_bulk_desc *desc;
1100 struct l_wait_info *lwi = &tti->tti_u.rdpg.tti_wait_info;
1108 desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, 1,
1109 PTLRPC_BULK_PUT_SOURCE |
1110 PTLRPC_BULK_BUF_KIOV,
1112 &ptlrpc_bulk_kiov_pin_ops);
1116 if (!(exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE))
1117 /* old client requires reply size in it's PAGE_SIZE,
1118 * which is rdpg->rp_count */
1119 nob = rdpg->rp_count;
1121 for (i = 0, tmpcount = nob; i < rdpg->rp_npages && tmpcount > 0;
1122 i++, tmpcount -= tmpsize) {
1123 tmpsize = min_t(int, tmpcount, PAGE_SIZE);
1124 desc->bd_frag_ops->add_kiov_frag(desc, rdpg->rp_pages[i], 0,
1128 LASSERT(desc->bd_nob == nob);
1129 rc = target_bulk_io(exp, desc, lwi);
1130 ptlrpc_free_bulk(desc);
1133 EXPORT_SYMBOL(tgt_sendpage);
1136 * OBD_IDX_READ handler
1138 static int tgt_obd_idx_read(struct tgt_session_info *tsi)
1140 struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env);
1141 struct lu_rdpg *rdpg = &tti->tti_u.rdpg.tti_rdpg;
1142 struct idx_info *req_ii, *rep_ii;
1147 memset(rdpg, 0, sizeof(*rdpg));
1148 req_capsule_set(tsi->tsi_pill, &RQF_OBD_IDX_READ);
1150 /* extract idx_info buffer from request & reply */
1151 req_ii = req_capsule_client_get(tsi->tsi_pill, &RMF_IDX_INFO);
1152 if (req_ii == NULL || req_ii->ii_magic != IDX_INFO_MAGIC)
1153 RETURN(err_serious(-EPROTO));
1155 rc = req_capsule_server_pack(tsi->tsi_pill);
1157 RETURN(err_serious(rc));
1159 rep_ii = req_capsule_server_get(tsi->tsi_pill, &RMF_IDX_INFO);
1161 RETURN(err_serious(-EFAULT));
1162 rep_ii->ii_magic = IDX_INFO_MAGIC;
1164 /* extract hash to start with */
1165 rdpg->rp_hash = req_ii->ii_hash_start;
1167 /* extract requested attributes */
1168 rdpg->rp_attrs = req_ii->ii_attrs;
1170 /* check that fid packed in request is valid and supported */
1171 if (!fid_is_sane(&req_ii->ii_fid))
1173 rep_ii->ii_fid = req_ii->ii_fid;
1176 rep_ii->ii_flags = req_ii->ii_flags;
1178 /* compute number of pages to allocate, ii_count is the number of 4KB
1180 if (req_ii->ii_count <= 0)
1181 GOTO(out, rc = -EFAULT);
1182 rdpg->rp_count = min_t(unsigned int, req_ii->ii_count << LU_PAGE_SHIFT,
1183 exp_max_brw_size(tsi->tsi_exp));
1184 rdpg->rp_npages = (rdpg->rp_count + PAGE_SIZE - 1) >> PAGE_SHIFT;
1186 /* allocate pages to store the containers */
1187 OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
1188 if (rdpg->rp_pages == NULL)
1189 GOTO(out, rc = -ENOMEM);
1190 for (i = 0; i < rdpg->rp_npages; i++) {
1191 rdpg->rp_pages[i] = alloc_page(GFP_NOFS);
1192 if (rdpg->rp_pages[i] == NULL)
1193 GOTO(out, rc = -ENOMEM);
1196 /* populate pages with key/record pairs */
1197 rc = dt_index_read(tsi->tsi_env, tsi->tsi_tgt->lut_bottom, rep_ii, rdpg);
1201 LASSERTF(rc <= rdpg->rp_count, "dt_index_read() returned more than "
1202 "asked %d > %d\n", rc, rdpg->rp_count);
1204 /* send pages to client */
1205 rc = tgt_sendpage(tsi, rdpg, rc);
1210 if (rdpg->rp_pages) {
1211 for (i = 0; i < rdpg->rp_npages; i++)
1212 if (rdpg->rp_pages[i])
1213 __free_page(rdpg->rp_pages[i]);
1214 OBD_FREE(rdpg->rp_pages,
1215 rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
1220 struct tgt_handler tgt_obd_handlers[] = {
1221 TGT_OBD_HDL (0, OBD_PING, tgt_obd_ping),
1222 TGT_OBD_HDL (0, OBD_IDX_READ, tgt_obd_idx_read)
1224 EXPORT_SYMBOL(tgt_obd_handlers);
1226 int tgt_sync(const struct lu_env *env, struct lu_target *tgt,
1227 struct dt_object *obj, __u64 start, __u64 end)
1233 /* if no objid is specified, it means "sync whole filesystem" */
1235 rc = dt_sync(env, tgt->lut_bottom);
1236 } else if (dt_version_get(env, obj) >
1237 tgt->lut_obd->obd_last_committed) {
1238 rc = dt_object_sync(env, obj, start, end);
1240 atomic_inc(&tgt->lut_sync_count);
1244 EXPORT_SYMBOL(tgt_sync);
1246 * Unified target DLM handlers.
1250 * Unified target BAST
1252 * Ensure data and metadata are synced to disk when lock is canceled if Sync on
1253 * Cancel (SOC) is enabled. If it's extent lock, normally sync obj is enough,
1254 * but if it's cross-MDT lock, because remote object version is not set, a
1255 * filesystem sync is needed.
1257 * \param lock server side lock
1258 * \param desc lock desc
1259 * \param data ldlm_cb_set_arg
1260 * \param flag indicates whether this cancelling or blocking callback
1261 * \retval 0 on success
1262 * \retval negative number on error
1264 static int tgt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
1265 void *data, int flag)
1268 struct lu_target *tgt;
1269 struct dt_object *obj = NULL;
1275 tgt = class_exp2tgt(lock->l_export);
1277 if (unlikely(tgt == NULL)) {
1278 CDEBUG(D_ERROR, "%s: No target for connected export\n",
1279 class_exp2obd(lock->l_export)->obd_name);
1283 if (flag == LDLM_CB_CANCELING &&
1284 (lock->l_granted_mode & (LCK_EX | LCK_PW | LCK_GROUP)) &&
1285 (tgt->lut_sync_lock_cancel == SYNC_LOCK_CANCEL_ALWAYS ||
1286 (tgt->lut_sync_lock_cancel == SYNC_LOCK_CANCEL_BLOCKING &&
1287 ldlm_is_cbpending(lock))) &&
1288 ((exp_connect_flags(lock->l_export) & OBD_CONNECT_MDS_MDS) ||
1289 lock->l_resource->lr_type == LDLM_EXTENT)) {
1291 __u64 end = OBD_OBJECT_EOF;
1293 rc = lu_env_init(&env, LCT_DT_THREAD);
1294 if (unlikely(rc != 0))
1297 ost_fid_from_resid(&fid, &lock->l_resource->lr_name,
1298 tgt->lut_lsd.lsd_osd_index);
1300 if (lock->l_resource->lr_type == LDLM_EXTENT) {
1301 obj = dt_locate(&env, tgt->lut_bottom, &fid);
1303 GOTO(err_env, rc = PTR_ERR(obj));
1305 if (!dt_object_exists(obj))
1306 GOTO(err_put, rc = -ENOENT);
1308 start = lock->l_policy_data.l_extent.start;
1309 end = lock->l_policy_data.l_extent.end;
1312 rc = tgt_sync(&env, tgt, obj, start, end);
1314 CERROR("%s: syncing "DFID" (%llu-%llu) on lock "
1315 "cancel: rc = %d\n",
1316 tgt_name(tgt), PFID(&fid),
1317 lock->l_policy_data.l_extent.start,
1318 lock->l_policy_data.l_extent.end, rc);
1322 dt_object_put(&env, obj);
1327 rc = ldlm_server_blocking_ast(lock, desc, data, flag);
1331 static struct ldlm_callback_suite tgt_dlm_cbs = {
1332 .lcs_completion = ldlm_server_completion_ast,
1333 .lcs_blocking = tgt_blocking_ast,
1334 .lcs_glimpse = ldlm_server_glimpse_ast
1337 int tgt_enqueue(struct tgt_session_info *tsi)
1339 struct ptlrpc_request *req = tgt_ses_req(tsi);
1344 * tsi->tsi_dlm_req was already swapped and (if necessary) converted,
1345 * tsi->tsi_dlm_cbs was set by the *_req_handle() function.
1347 LASSERT(tsi->tsi_dlm_req != NULL);
1348 rc = ldlm_handle_enqueue0(tsi->tsi_exp->exp_obd->obd_namespace, req,
1349 tsi->tsi_dlm_req, &tgt_dlm_cbs);
1351 RETURN(err_serious(rc));
1353 switch (LUT_FAIL_CLASS(tsi->tsi_reply_fail_id)) {
1355 tsi->tsi_reply_fail_id = OBD_FAIL_MDS_LDLM_REPLY_NET;
1358 tsi->tsi_reply_fail_id = OBD_FAIL_OST_LDLM_REPLY_NET;
1361 tsi->tsi_reply_fail_id = OBD_FAIL_MGS_LDLM_REPLY_NET;
1364 tsi->tsi_reply_fail_id = OBD_FAIL_LDLM_REPLY;
1367 RETURN(req->rq_status);
1369 EXPORT_SYMBOL(tgt_enqueue);
1371 int tgt_convert(struct tgt_session_info *tsi)
1373 struct ptlrpc_request *req = tgt_ses_req(tsi);
1377 LASSERT(tsi->tsi_dlm_req);
1378 rc = ldlm_handle_convert0(req, tsi->tsi_dlm_req);
1380 RETURN(err_serious(rc));
1382 RETURN(req->rq_status);
1385 int tgt_bl_callback(struct tgt_session_info *tsi)
1387 return err_serious(-EOPNOTSUPP);
1390 int tgt_cp_callback(struct tgt_session_info *tsi)
1392 return err_serious(-EOPNOTSUPP);
1395 /* generic LDLM target handler */
1396 struct tgt_handler tgt_dlm_handlers[] = {
1397 TGT_DLM_HDL (HABEO_CLAVIS, LDLM_ENQUEUE, tgt_enqueue),
1398 TGT_DLM_HDL (HABEO_CLAVIS, LDLM_CONVERT, tgt_convert),
1399 TGT_DLM_HDL_VAR(0, LDLM_BL_CALLBACK, tgt_bl_callback),
1400 TGT_DLM_HDL_VAR(0, LDLM_CP_CALLBACK, tgt_cp_callback)
1402 EXPORT_SYMBOL(tgt_dlm_handlers);
1405 * Unified target LLOG handlers.
1407 int tgt_llog_open(struct tgt_session_info *tsi)
1413 rc = llog_origin_handle_open(tgt_ses_req(tsi));
1417 EXPORT_SYMBOL(tgt_llog_open);
1419 int tgt_llog_read_header(struct tgt_session_info *tsi)
1425 rc = llog_origin_handle_read_header(tgt_ses_req(tsi));
1429 EXPORT_SYMBOL(tgt_llog_read_header);
1431 int tgt_llog_next_block(struct tgt_session_info *tsi)
1437 rc = llog_origin_handle_next_block(tgt_ses_req(tsi));
1441 EXPORT_SYMBOL(tgt_llog_next_block);
1443 int tgt_llog_prev_block(struct tgt_session_info *tsi)
1449 rc = llog_origin_handle_prev_block(tgt_ses_req(tsi));
1453 EXPORT_SYMBOL(tgt_llog_prev_block);
1455 /* generic llog target handler */
1456 struct tgt_handler tgt_llog_handlers[] = {
1457 TGT_LLOG_HDL (0, LLOG_ORIGIN_HANDLE_CREATE, tgt_llog_open),
1458 TGT_LLOG_HDL (0, LLOG_ORIGIN_HANDLE_NEXT_BLOCK, tgt_llog_next_block),
1459 TGT_LLOG_HDL (0, LLOG_ORIGIN_HANDLE_READ_HEADER, tgt_llog_read_header),
1460 TGT_LLOG_HDL (0, LLOG_ORIGIN_HANDLE_PREV_BLOCK, tgt_llog_prev_block),
1462 EXPORT_SYMBOL(tgt_llog_handlers);
1465 * sec context handlers
1467 /* XXX: Implement based on mdt_sec_ctx_handle()? */
1468 static int tgt_sec_ctx_handle(struct tgt_session_info *tsi)
1473 struct tgt_handler tgt_sec_ctx_handlers[] = {
1474 TGT_SEC_HDL_VAR(0, SEC_CTX_INIT, tgt_sec_ctx_handle),
1475 TGT_SEC_HDL_VAR(0, SEC_CTX_INIT_CONT, tgt_sec_ctx_handle),
1476 TGT_SEC_HDL_VAR(0, SEC_CTX_FINI, tgt_sec_ctx_handle),
1478 EXPORT_SYMBOL(tgt_sec_ctx_handlers);
1480 int (*tgt_lfsck_in_notify_local)(const struct lu_env *env,
1481 struct dt_device *key,
1482 struct lfsck_req_local *lrl,
1483 struct thandle *th) = NULL;
1485 void tgt_register_lfsck_in_notify_local(int (*notify)(const struct lu_env *,
1487 struct lfsck_req_local *,
1490 tgt_lfsck_in_notify_local = notify;
1492 EXPORT_SYMBOL(tgt_register_lfsck_in_notify_local);
1494 int (*tgt_lfsck_in_notify)(const struct lu_env *env,
1495 struct dt_device *key,
1496 struct lfsck_request *lr) = NULL;
1498 void tgt_register_lfsck_in_notify(int (*notify)(const struct lu_env *,
1500 struct lfsck_request *))
1502 tgt_lfsck_in_notify = notify;
1504 EXPORT_SYMBOL(tgt_register_lfsck_in_notify);
1506 static int (*tgt_lfsck_query)(const struct lu_env *env,
1507 struct dt_device *key,
1508 struct lfsck_request *req,
1509 struct lfsck_reply *rep,
1510 struct lfsck_query *que) = NULL;
1512 void tgt_register_lfsck_query(int (*query)(const struct lu_env *,
1514 struct lfsck_request *,
1515 struct lfsck_reply *,
1516 struct lfsck_query *))
1518 tgt_lfsck_query = query;
1520 EXPORT_SYMBOL(tgt_register_lfsck_query);
1522 /* LFSCK request handlers */
1523 static int tgt_handle_lfsck_notify(struct tgt_session_info *tsi)
1525 const struct lu_env *env = tsi->tsi_env;
1526 struct dt_device *key = tsi->tsi_tgt->lut_bottom;
1527 struct lfsck_request *lr;
1531 lr = req_capsule_client_get(tsi->tsi_pill, &RMF_LFSCK_REQUEST);
1535 rc = tgt_lfsck_in_notify(env, key, lr);
1540 static int tgt_handle_lfsck_query(struct tgt_session_info *tsi)
1542 struct lfsck_request *request;
1543 struct lfsck_reply *reply;
1547 request = req_capsule_client_get(tsi->tsi_pill, &RMF_LFSCK_REQUEST);
1548 if (request == NULL)
1551 reply = req_capsule_server_get(tsi->tsi_pill, &RMF_LFSCK_REPLY);
1555 rc = tgt_lfsck_query(tsi->tsi_env, tsi->tsi_tgt->lut_bottom,
1556 request, reply, NULL);
1558 RETURN(rc < 0 ? rc : 0);
1561 struct tgt_handler tgt_lfsck_handlers[] = {
1562 TGT_LFSCK_HDL(HABEO_REFERO, LFSCK_NOTIFY, tgt_handle_lfsck_notify),
1563 TGT_LFSCK_HDL(HABEO_REFERO, LFSCK_QUERY, tgt_handle_lfsck_query),
1565 EXPORT_SYMBOL(tgt_lfsck_handlers);
1568 * initialize per-thread page pool (bug 5137).
1570 int tgt_io_thread_init(struct ptlrpc_thread *thread)
1572 struct tgt_thread_big_cache *tbc;
1576 LASSERT(thread != NULL);
1577 LASSERT(thread->t_data == NULL);
1579 OBD_ALLOC_LARGE(tbc, sizeof(*tbc));
1582 thread->t_data = tbc;
1585 EXPORT_SYMBOL(tgt_io_thread_init);
1588 * free per-thread pool created by tgt_thread_init().
1590 void tgt_io_thread_done(struct ptlrpc_thread *thread)
1592 struct tgt_thread_big_cache *tbc;
1596 LASSERT(thread != NULL);
1599 * be prepared to handle partially-initialized pools (because this is
1600 * called from ost_io_thread_init() for cleanup.
1602 tbc = thread->t_data;
1604 OBD_FREE_LARGE(tbc, sizeof(*tbc));
1605 thread->t_data = NULL;
1609 EXPORT_SYMBOL(tgt_io_thread_done);
1612 * Helper function for getting Data-on-MDT file server DLM lock
1613 * if asked by client.
1615 int tgt_mdt_data_lock(struct ldlm_namespace *ns, struct ldlm_res_id *res_id,
1616 struct lustre_handle *lh, int mode, __u64 *flags)
1618 union ldlm_policy_data policy = {
1619 .l_inodebits.bits = MDS_INODELOCK_DOM,
1625 LASSERT(lh != NULL);
1626 LASSERT(ns != NULL);
1627 LASSERT(!lustre_handle_is_used(lh));
1629 rc = ldlm_cli_enqueue_local(NULL, ns, res_id, LDLM_IBITS, &policy, mode,
1630 flags, ldlm_blocking_ast,
1631 ldlm_completion_ast, ldlm_glimpse_ast,
1632 NULL, 0, LVB_T_NONE, NULL, lh);
1634 RETURN(rc == ELDLM_OK ? 0 : -EIO);
1636 EXPORT_SYMBOL(tgt_mdt_data_lock);
1638 void tgt_mdt_data_unlock(struct lustre_handle *lh, enum ldlm_mode mode)
1640 LASSERT(lustre_handle_is_used(lh));
1641 ldlm_lock_decref(lh, mode);
1643 EXPORT_SYMBOL(tgt_mdt_data_unlock);
1646 * Helper function for getting server side [start, start+count] DLM lock
1647 * if asked by client.
1649 int tgt_extent_lock(const struct lu_env *env, struct ldlm_namespace *ns,
1650 struct ldlm_res_id *res_id, __u64 start, __u64 end,
1651 struct lustre_handle *lh, int mode, __u64 *flags)
1653 union ldlm_policy_data policy;
1658 LASSERT(lh != NULL);
1659 LASSERT(ns != NULL);
1660 LASSERT(!lustre_handle_is_used(lh));
1662 policy.l_extent.gid = 0;
1663 policy.l_extent.start = start & PAGE_MASK;
1666 * If ->o_blocks is EOF it means "lock till the end of the file".
1667 * Otherwise, it's size of an extent or hole being punched (in bytes).
1669 if (end == OBD_OBJECT_EOF || end < start)
1670 policy.l_extent.end = OBD_OBJECT_EOF;
1672 policy.l_extent.end = end | ~PAGE_MASK;
1674 rc = ldlm_cli_enqueue_local(env, ns, res_id, LDLM_EXTENT, &policy,
1675 mode, flags, ldlm_blocking_ast,
1676 ldlm_completion_ast, ldlm_glimpse_ast,
1677 NULL, 0, LVB_T_NONE, NULL, lh);
1678 RETURN(rc == ELDLM_OK ? 0 : -EIO);
1680 EXPORT_SYMBOL(tgt_extent_lock);
1682 void tgt_extent_unlock(struct lustre_handle *lh, enum ldlm_mode mode)
1684 LASSERT(lustre_handle_is_used(lh));
1685 ldlm_lock_decref(lh, mode);
1687 EXPORT_SYMBOL(tgt_extent_unlock);
1689 static int tgt_brw_lock(const struct lu_env *env, struct obd_export *exp,
1690 struct ldlm_res_id *res_id, struct obd_ioobj *obj,
1691 struct niobuf_remote *nb, struct lustre_handle *lh,
1692 enum ldlm_mode mode)
1694 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
1696 int nrbufs = obj->ioo_bufcnt;
1702 LASSERT(mode == LCK_PR || mode == LCK_PW);
1703 LASSERT(!lustre_handle_is_used(lh));
1705 if (ns->ns_obd->obd_recovering)
1708 if (nrbufs == 0 || !(nb[0].rnb_flags & OBD_BRW_SRVLOCK))
1711 for (i = 1; i < nrbufs; i++)
1712 if (!(nb[i].rnb_flags & OBD_BRW_SRVLOCK))
1715 /* MDT IO for data-on-mdt */
1716 if (exp->exp_connect_data.ocd_connect_flags & OBD_CONNECT_IBITS)
1717 rc = tgt_mdt_data_lock(ns, res_id, lh, mode, &flags);
1719 rc = tgt_extent_lock(env, ns, res_id, nb[0].rnb_offset,
1720 nb[nrbufs - 1].rnb_offset +
1721 nb[nrbufs - 1].rnb_len - 1,
1726 static void tgt_brw_unlock(struct obd_ioobj *obj, struct niobuf_remote *niob,
1727 struct lustre_handle *lh, enum ldlm_mode mode)
1731 LASSERT(mode == LCK_PR || mode == LCK_PW);
1732 LASSERT((obj->ioo_bufcnt > 0 &&
1733 (niob[0].rnb_flags & OBD_BRW_SRVLOCK)) ==
1734 lustre_handle_is_used(lh));
1736 if (lustre_handle_is_used(lh))
1737 tgt_extent_unlock(lh, mode);
1740 static int tgt_checksum_niobuf(struct lu_target *tgt,
1741 struct niobuf_local *local_nb, int npages,
1742 int opc, enum cksum_types cksum_type,
1745 struct ahash_request *req;
1746 unsigned int bufsize;
1748 unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
1750 req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1752 CERROR("%s: unable to initialize checksum hash %s\n",
1753 tgt_name(tgt), cfs_crypto_hash_name(cfs_alg));
1754 return PTR_ERR(req);
1757 CDEBUG(D_INFO, "Checksum for algo %s\n", cfs_crypto_hash_name(cfs_alg));
1758 for (i = 0; i < npages; i++) {
1759 /* corrupt the data before we compute the checksum, to
1760 * simulate a client->OST data error */
1761 if (i == 0 && opc == OST_WRITE &&
1762 OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_RECEIVE)) {
1763 int off = local_nb[i].lnb_page_offset & ~PAGE_MASK;
1764 int len = local_nb[i].lnb_len;
1765 struct page *np = tgt_page_to_corrupt;
1768 char *ptr = ll_kmap_atomic(local_nb[i].lnb_page,
1770 char *ptr2 = page_address(np);
1772 memcpy(ptr2 + off, ptr + off, len);
1773 memcpy(ptr2 + off, "bad3", min(4, len));
1774 ll_kunmap_atomic(ptr, KM_USER0);
1776 /* LU-8376 to preserve original index for
1777 * display in dump_all_bulk_pages() */
1780 cfs_crypto_hash_update_page(req, np, off,
1784 CERROR("%s: can't alloc page for corruption\n",
1788 cfs_crypto_hash_update_page(req, local_nb[i].lnb_page,
1789 local_nb[i].lnb_page_offset & ~PAGE_MASK,
1790 local_nb[i].lnb_len);
1792 /* corrupt the data after we compute the checksum, to
1793 * simulate an OST->client data error */
1794 if (i == 0 && opc == OST_READ &&
1795 OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_SEND)) {
1796 int off = local_nb[i].lnb_page_offset & ~PAGE_MASK;
1797 int len = local_nb[i].lnb_len;
1798 struct page *np = tgt_page_to_corrupt;
1801 char *ptr = ll_kmap_atomic(local_nb[i].lnb_page,
1803 char *ptr2 = page_address(np);
1805 memcpy(ptr2 + off, ptr + off, len);
1806 memcpy(ptr2 + off, "bad4", min(4, len));
1807 ll_kunmap_atomic(ptr, KM_USER0);
1809 /* LU-8376 to preserve original index for
1810 * display in dump_all_bulk_pages() */
1813 cfs_crypto_hash_update_page(req, np, off,
1817 CERROR("%s: can't alloc page for corruption\n",
1823 bufsize = sizeof(*cksum);
1824 err = cfs_crypto_hash_final(req, (unsigned char *)cksum, &bufsize);
1829 char dbgcksum_file_name[PATH_MAX];
1831 static void dump_all_bulk_pages(struct obdo *oa, int count,
1832 struct niobuf_local *local_nb,
1833 __u32 server_cksum, __u32 client_cksum)
1840 /* will only keep dump of pages on first error for the same range in
1841 * file/fid, not during the resends/retries. */
1842 snprintf(dbgcksum_file_name, sizeof(dbgcksum_file_name),
1843 "%s-checksum_dump-ost-"DFID":[%llu-%llu]-%x-%x",
1844 (strncmp(libcfs_debug_file_path_arr, "NONE", 4) != 0 ?
1845 libcfs_debug_file_path_arr :
1846 LIBCFS_DEBUG_FILE_PATH_DEFAULT),
1847 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1848 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1849 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1850 local_nb[0].lnb_file_offset,
1851 local_nb[count-1].lnb_file_offset +
1852 local_nb[count-1].lnb_len - 1, client_cksum, server_cksum);
1853 filp = filp_open(dbgcksum_file_name,
1854 O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, 0600);
1858 CDEBUG(D_INFO, "%s: can't open to dump pages with "
1859 "checksum error: rc = %d\n", dbgcksum_file_name,
1862 CERROR("%s: can't open to dump pages with checksum "
1863 "error: rc = %d\n", dbgcksum_file_name, rc);
1867 for (i = 0; i < count; i++) {
1868 len = local_nb[i].lnb_len;
1869 buf = kmap(local_nb[i].lnb_page);
1871 rc = cfs_kernel_write(filp, buf, len, &filp->f_pos);
1873 CERROR("%s: wanted to write %u but got %d "
1874 "error\n", dbgcksum_file_name, len, rc);
1879 CDEBUG(D_INFO, "%s: wrote %d bytes\n",
1880 dbgcksum_file_name, rc);
1882 kunmap(local_nb[i].lnb_page);
1885 rc = ll_vfs_fsync_range(filp, 0, LLONG_MAX, 1);
1887 CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
1888 filp_close(filp, NULL);
1892 static int check_read_checksum(struct niobuf_local *local_nb, int npages,
1893 struct obd_export *exp, struct obdo *oa,
1894 const struct lnet_process_id *peer,
1895 __u32 client_cksum, __u32 server_cksum,
1896 enum cksum_types server_cksum_type)
1899 enum cksum_types cksum_type;
1902 /* unlikely to happen and only if resend does not occur due to cksum
1903 * control failure on Client */
1904 if (unlikely(server_cksum == client_cksum)) {
1905 CDEBUG(D_PAGE, "checksum %x confirmed upon retry\n",
1910 if (exp->exp_obd->obd_checksum_dump)
1911 dump_all_bulk_pages(oa, npages, local_nb, server_cksum,
1914 cksum_type = obd_cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
1917 if (cksum_type != server_cksum_type)
1918 msg = "the server may have not used the checksum type specified"
1919 " in the original request - likely a protocol problem";
1921 msg = "should have changed on the client or in transit";
1923 start = local_nb[0].lnb_file_offset;
1924 end = local_nb[npages-1].lnb_file_offset +
1925 local_nb[npages-1].lnb_len - 1;
1927 LCONSOLE_ERROR_MSG(0x132, "%s: BAD READ CHECKSUM: %s: from %s inode "
1928 DFID " object "DOSTID" extent [%llu-%llu], client returned csum"
1929 " %x (type %x), server csum %x (type %x)\n",
1930 exp->exp_obd->obd_name,
1931 msg, libcfs_nid2str(peer->nid),
1932 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : 0ULL,
1933 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1934 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1936 start, end, client_cksum, cksum_type, server_cksum,
1942 static int tgt_pages2shortio(struct niobuf_local *local, int npages,
1943 unsigned char *buf, int size)
1945 int i, off, len, copied = size;
1948 for (i = 0; i < npages; i++) {
1949 off = local[i].lnb_page_offset & ~PAGE_MASK;
1950 len = local[i].lnb_len;
1952 CDEBUG(D_PAGE, "index %d offset = %d len = %d left = %d\n",
1957 ptr = ll_kmap_atomic(local[i].lnb_page, KM_USER0);
1958 memcpy(buf + off, ptr, len);
1959 ll_kunmap_atomic(ptr, KM_USER0);
1963 return copied - size;
1966 static int tgt_checksum_niobuf_t10pi(struct lu_target *tgt,
1967 struct niobuf_local *local_nb,
1968 int npages, int opc,
1969 obd_dif_csum_fn *fn,
1973 enum cksum_types t10_cksum_type = tgt->lut_dt_conf.ddp_t10_cksum_type;
1974 unsigned char cfs_alg = cksum_obd2cfs(OBD_CKSUM_T10_TOP);
1975 const char *obd_name = tgt->lut_obd->obd_name;
1976 struct ahash_request *req;
1977 unsigned int bufsize;
1978 unsigned char *buffer;
1979 struct page *__page;
1982 int used_number = 0;
1988 __page = alloc_page(GFP_KERNEL);
1992 req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1994 CERROR("%s: unable to initialize checksum hash %s\n",
1995 tgt_name(tgt), cfs_crypto_hash_name(cfs_alg));
1996 return PTR_ERR(req);
1999 buffer = kmap(__page);
2000 guard_start = (__u16 *)buffer;
2001 guard_number = PAGE_SIZE / sizeof(*guard_start);
2002 for (i = 0; i < npages; i++) {
2003 /* corrupt the data before we compute the checksum, to
2004 * simulate a client->OST data error */
2005 if (i == 0 && opc == OST_WRITE &&
2006 OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_RECEIVE)) {
2007 int off = local_nb[i].lnb_page_offset & ~PAGE_MASK;
2008 int len = local_nb[i].lnb_len;
2009 struct page *np = tgt_page_to_corrupt;
2012 char *ptr = ll_kmap_atomic(local_nb[i].lnb_page,
2014 char *ptr2 = page_address(np);
2016 memcpy(ptr2 + off, ptr + off, len);
2017 memcpy(ptr2 + off, "bad3", min(4, len));
2018 ll_kunmap_atomic(ptr, KM_USER0);
2020 /* LU-8376 to preserve original index for
2021 * display in dump_all_bulk_pages() */
2024 cfs_crypto_hash_update_page(req, np, off,
2028 CERROR("%s: can't alloc page for corruption\n",
2034 * The left guard number should be able to hold checksums of a
2037 if (t10_cksum_type && opc == OST_READ &&
2038 local_nb[i].lnb_guard_disk) {
2039 used = DIV_ROUND_UP(local_nb[i].lnb_len, sector_size);
2040 if (used > (guard_number - used_number)) {
2044 memcpy(guard_start + used_number,
2045 local_nb[i].lnb_guards,
2046 used * sizeof(*local_nb[i].lnb_guards));
2048 rc = obd_page_dif_generate_buffer(obd_name,
2049 local_nb[i].lnb_page,
2050 local_nb[i].lnb_page_offset & ~PAGE_MASK,
2051 local_nb[i].lnb_len, guard_start + used_number,
2052 guard_number - used_number, &used, sector_size,
2058 LASSERT(used <= MAX_GUARD_NUMBER);
2060 * If disk support T10PI checksum, copy guards to local_nb.
2061 * If the write is partial page, do not use the guards for bio
2062 * submission since the data might not be full-sector. The bio
2063 * guards will be generated later based on the full sectors. If
2064 * the sector size is 512B rather than 4 KB, or the page size
2065 * is larger than 4KB, this might drop some useful guards for
2066 * partial page write, but it will only add minimal extra time
2067 * of checksum calculation.
2069 if (t10_cksum_type && opc == OST_WRITE &&
2070 local_nb[i].lnb_len == PAGE_SIZE) {
2071 local_nb[i].lnb_guard_rpc = 1;
2072 memcpy(local_nb[i].lnb_guards,
2073 guard_start + used_number,
2074 used * sizeof(*local_nb[i].lnb_guards));
2077 used_number += used;
2078 if (used_number == guard_number) {
2079 cfs_crypto_hash_update_page(req, __page, 0,
2080 used_number * sizeof(*guard_start));
2084 /* corrupt the data after we compute the checksum, to
2085 * simulate an OST->client data error */
2086 if (unlikely(i == 0 && opc == OST_READ &&
2087 OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_SEND))) {
2088 int off = local_nb[i].lnb_page_offset & ~PAGE_MASK;
2089 int len = local_nb[i].lnb_len;
2090 struct page *np = tgt_page_to_corrupt;
2093 char *ptr = ll_kmap_atomic(local_nb[i].lnb_page,
2095 char *ptr2 = page_address(np);
2097 memcpy(ptr2 + off, ptr + off, len);
2098 memcpy(ptr2 + off, "bad4", min(4, len));
2099 ll_kunmap_atomic(ptr, KM_USER0);
2101 /* LU-8376 to preserve original index for
2102 * display in dump_all_bulk_pages() */
2105 cfs_crypto_hash_update_page(req, np, off,
2109 CERROR("%s: can't alloc page for corruption\n",
2118 if (used_number != 0)
2119 cfs_crypto_hash_update_page(req, __page, 0,
2120 used_number * sizeof(*guard_start));
2122 bufsize = sizeof(cksum);
2123 rc = cfs_crypto_hash_final(req, (unsigned char *)&cksum, &bufsize);
2128 __free_page(__page);
2132 static int tgt_checksum_niobuf_rw(struct lu_target *tgt,
2133 enum cksum_types cksum_type,
2134 struct niobuf_local *local_nb,
2135 int npages, int opc, u32 *check_sum)
2137 obd_dif_csum_fn *fn = NULL;
2138 int sector_size = 0;
2142 obd_t10_cksum2dif(cksum_type, &fn, §or_size);
2145 rc = tgt_checksum_niobuf_t10pi(tgt, local_nb, npages,
2146 opc, fn, sector_size,
2149 rc = tgt_checksum_niobuf(tgt, local_nb, npages, opc,
2150 cksum_type, check_sum);
2154 int tgt_brw_read(struct tgt_session_info *tsi)
2156 struct ptlrpc_request *req = tgt_ses_req(tsi);
2157 struct ptlrpc_bulk_desc *desc = NULL;
2158 struct obd_export *exp = tsi->tsi_exp;
2159 struct niobuf_remote *remote_nb;
2160 struct niobuf_local *local_nb;
2161 struct obd_ioobj *ioo;
2162 struct ost_body *body, *repbody;
2163 struct l_wait_info lwi;
2164 struct lustre_handle lockh = { 0 };
2165 int npages, nob = 0, rc, i, no_reply = 0,
2167 struct tgt_thread_big_cache *tbc = req->rq_svc_thread->t_data;
2168 const char *obd_name = exp->exp_obd->obd_name;
2172 if (ptlrpc_req2svc(req)->srv_req_portal != OST_IO_PORTAL &&
2173 ptlrpc_req2svc(req)->srv_req_portal != MDS_IO_PORTAL) {
2174 CERROR("%s: deny read request from %s to portal %u\n",
2175 tgt_name(tsi->tsi_tgt),
2176 obd_export_nid2str(req->rq_export),
2177 ptlrpc_req2svc(req)->srv_req_portal);
2181 req->rq_bulk_read = 1;
2183 if (OBD_FAIL_CHECK(OBD_FAIL_OST_BRW_READ_BULK))
2186 OBD_FAIL_TIMEOUT(OBD_FAIL_OST_BRW_PAUSE_BULK, cfs_fail_val > 0 ?
2187 cfs_fail_val : (obd_timeout + 1) / 4);
2189 /* Check if there is eviction in progress, and if so, wait for it to
2191 if (unlikely(atomic_read(&exp->exp_obd->obd_evict_inprogress))) {
2192 /* We do not care how long it takes */
2193 lwi = LWI_INTR(NULL, NULL);
2194 rc = l_wait_event(exp->exp_obd->obd_evict_inprogress_waitq,
2195 !atomic_read(&exp->exp_obd->obd_evict_inprogress),
2199 /* There must be big cache in current thread to process this request
2200 * if it is NULL then something went wrong and it wasn't allocated,
2201 * report -ENOMEM in that case */
2205 body = tsi->tsi_ost_body;
2206 LASSERT(body != NULL);
2208 ioo = req_capsule_client_get(tsi->tsi_pill, &RMF_OBD_IOOBJ);
2209 LASSERT(ioo != NULL); /* must exists after tgt_ost_body_unpack */
2211 remote_nb = req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE);
2212 LASSERT(remote_nb != NULL); /* must exists after tgt_ost_body_unpack */
2214 local_nb = tbc->local;
2216 rc = tgt_brw_lock(tsi->tsi_env, exp, &tsi->tsi_resid, ioo, remote_nb,
2222 * If getting the lock took more time than
2223 * client was willing to wait, drop it. b=11330
2225 if (ktime_get_real_seconds() > req->rq_deadline ||
2226 OBD_FAIL_CHECK(OBD_FAIL_OST_DROP_REQ)) {
2228 CERROR("Dropping timed-out read from %s because locking object " DOSTID " took %lld seconds (limit was %lld).\n",
2229 libcfs_id2str(req->rq_peer), POSTID(&ioo->ioo_oid),
2230 ktime_get_real_seconds() - req->rq_arrival_time.tv_sec,
2231 req->rq_deadline - req->rq_arrival_time.tv_sec);
2232 GOTO(out_lock, rc = -ETIMEDOUT);
2236 * Because we already sync grant info with client when
2237 * reconnect, grant info will be cleared for resent req,
2238 * otherwise, outdated grant count in the rpc would de-sync
2239 * grant counters in case of shrink
2241 if (lustre_msg_get_flags(req->rq_reqmsg) & (MSG_RESENT | MSG_REPLAY)) {
2242 DEBUG_REQ(D_CACHE, req, "clear resent/replay req grant info");
2243 body->oa.o_valid &= ~OBD_MD_FLGRANT;
2246 repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
2247 repbody->oa = body->oa;
2249 npages = PTLRPC_MAX_BRW_PAGES;
2250 rc = obd_preprw(tsi->tsi_env, OBD_BRW_READ, exp, &repbody->oa, 1,
2251 ioo, remote_nb, &npages, local_nb);
2255 if (body->oa.o_valid & OBD_MD_FLFLAGS &&
2256 body->oa.o_flags & OBD_FL_SHORT_IO) {
2259 desc = ptlrpc_prep_bulk_exp(req, npages, ioobj_max_brw_get(ioo),
2260 PTLRPC_BULK_PUT_SOURCE |
2261 PTLRPC_BULK_BUF_KIOV,
2263 &ptlrpc_bulk_kiov_nopin_ops);
2265 GOTO(out_commitrw, rc = -ENOMEM);
2269 npages_read = npages;
2270 for (i = 0; i < npages; i++) {
2271 int page_rc = local_nb[i].lnb_rc;
2280 if (page_rc != 0 && desc != NULL) { /* some data! */
2281 LASSERT(local_nb[i].lnb_page != NULL);
2282 desc->bd_frag_ops->add_kiov_frag
2283 (desc, local_nb[i].lnb_page,
2284 local_nb[i].lnb_page_offset & ~PAGE_MASK,
2288 if (page_rc != local_nb[i].lnb_len) { /* short read */
2289 local_nb[i].lnb_len = page_rc;
2290 npages_read = i + (page_rc != 0 ? 1 : 0);
2291 /* All subsequent pages should be 0 */
2292 while (++i < npages)
2293 LASSERT(local_nb[i].lnb_rc == 0);
2297 if (OBD_FAIL_CHECK(OBD_FAIL_OST_READ_SIZE) &&
2298 nob != cfs_fail_val)
2301 if (body->oa.o_valid & OBD_MD_FLCKSUM) {
2302 u32 flag = body->oa.o_valid & OBD_MD_FLFLAGS ?
2303 body->oa.o_flags : 0;
2304 enum cksum_types cksum_type = obd_cksum_type_unpack(flag);
2306 repbody->oa.o_flags = obd_cksum_type_pack(obd_name,
2308 repbody->oa.o_valid = OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
2310 rc = tgt_checksum_niobuf_rw(tsi->tsi_tgt, cksum_type,
2311 local_nb, npages_read, OST_READ,
2312 &repbody->oa.o_cksum);
2314 GOTO(out_commitrw, rc);
2315 CDEBUG(D_PAGE, "checksum at read origin: %x\n",
2316 repbody->oa.o_cksum);
2318 /* if a resend it could be for a cksum error, so check Server
2319 * cksum with returned Client cksum (this should even cover
2320 * zero-cksum case) */
2321 if ((body->oa.o_valid & OBD_MD_FLFLAGS) &&
2322 (body->oa.o_flags & OBD_FL_RECOV_RESEND))
2323 check_read_checksum(local_nb, npages_read, exp,
2324 &body->oa, &req->rq_peer,
2326 repbody->oa.o_cksum, cksum_type);
2328 repbody->oa.o_valid = 0;
2330 if (body->oa.o_valid & OBD_MD_FLGRANT)
2331 repbody->oa.o_valid |= OBD_MD_FLGRANT;
2332 /* We're finishing using body->oa as an input variable */
2334 /* Check if client was evicted while we were doing i/o before touching
2337 if (body->oa.o_valid & OBD_MD_FLFLAGS &&
2338 body->oa.o_flags & OBD_FL_SHORT_IO) {
2339 unsigned char *short_io_buf;
2342 short_io_buf = req_capsule_server_get(&req->rq_pill,
2344 short_io_size = req_capsule_get_size(&req->rq_pill,
2347 rc = tgt_pages2shortio(local_nb, npages_read,
2348 short_io_buf, short_io_size);
2350 req_capsule_shrink(&req->rq_pill,
2353 rc = rc > 0 ? 0 : rc;
2354 } else if (!CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2)) {
2355 rc = target_bulk_io(exp, desc, &lwi);
2359 if (body->oa.o_valid & OBD_MD_FLFLAGS &&
2360 body->oa.o_flags & OBD_FL_SHORT_IO)
2361 req_capsule_shrink(&req->rq_pill, &RMF_SHORT_IO, 0,
2366 /* Must commit after prep above in all cases */
2367 rc = obd_commitrw(tsi->tsi_env, OBD_BRW_READ, exp, &repbody->oa, 1, ioo,
2368 remote_nb, npages, local_nb, rc);
2370 tgt_brw_unlock(ioo, remote_nb, &lockh, LCK_PR);
2372 if (desc && !CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2))
2373 ptlrpc_free_bulk(desc);
2378 ptlrpc_lprocfs_brw(req, nob);
2379 } else if (no_reply) {
2380 req->rq_no_reply = 1;
2381 /* reply out callback would free */
2382 ptlrpc_req_drop_rs(req);
2383 LCONSOLE_WARN("%s: Bulk IO read error with %s (at %s), "
2384 "client will retry: rc %d\n",
2386 obd_uuid2str(&exp->exp_client_uuid),
2387 obd_export_nid2str(exp), rc);
2389 /* send a bulk after reply to simulate a network delay or reordering
2390 * by a router - Note that !desc implies short io, so there is no bulk
2392 if (unlikely(CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2)) &&
2394 wait_queue_head_t waitq;
2395 struct l_wait_info lwi1;
2397 CDEBUG(D_INFO, "reorder BULK\n");
2398 init_waitqueue_head(&waitq);
2400 lwi1 = LWI_TIMEOUT_INTR(cfs_time_seconds(3), NULL, NULL, NULL);
2401 l_wait_event(waitq, 0, &lwi1);
2402 target_bulk_io(exp, desc, &lwi);
2403 ptlrpc_free_bulk(desc);
2408 EXPORT_SYMBOL(tgt_brw_read);
2410 static int tgt_shortio2pages(struct niobuf_local *local, int npages,
2411 unsigned char *buf, unsigned int size)
2416 for (i = 0; i < npages; i++) {
2417 off = local[i].lnb_page_offset & ~PAGE_MASK;
2418 len = local[i].lnb_len;
2423 CDEBUG(D_PAGE, "index %d offset = %d len = %d left = %d\n",
2425 ptr = ll_kmap_atomic(local[i].lnb_page, KM_USER0);
2428 memcpy(ptr + off, buf, len < size ? len : size);
2429 ll_kunmap_atomic(ptr, KM_USER0);
2436 static void tgt_warn_on_cksum(struct ptlrpc_request *req,
2437 struct ptlrpc_bulk_desc *desc,
2438 struct niobuf_local *local_nb, int npages,
2439 u32 client_cksum, u32 server_cksum,
2442 struct obd_export *exp = req->rq_export;
2443 struct ost_body *body;
2447 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
2448 LASSERT(body != NULL);
2450 if (desc && req->rq_peer.nid != desc->bd_sender) {
2452 router = libcfs_nid2str(desc->bd_sender);
2455 if (exp->exp_obd->obd_checksum_dump)
2456 dump_all_bulk_pages(&body->oa, npages, local_nb, server_cksum,
2460 CDEBUG_LIMIT(D_INFO, "client csum %x, server csum %x\n",
2461 client_cksum, server_cksum);
2465 LCONSOLE_ERROR_MSG(0x168, "%s: BAD WRITE CHECKSUM: from %s%s%s inode "
2466 DFID" object "DOSTID" extent [%llu-%llu"
2467 "]: client csum %x, server csum %x\n",
2468 exp->exp_obd->obd_name, libcfs_id2str(req->rq_peer),
2470 body->oa.o_valid & OBD_MD_FLFID ?
2471 body->oa.o_parent_seq : (__u64)0,
2472 body->oa.o_valid & OBD_MD_FLFID ?
2473 body->oa.o_parent_oid : 0,
2474 body->oa.o_valid & OBD_MD_FLFID ?
2475 body->oa.o_parent_ver : 0,
2476 POSTID(&body->oa.o_oi),
2477 local_nb[0].lnb_file_offset,
2478 local_nb[npages-1].lnb_file_offset +
2479 local_nb[npages - 1].lnb_len - 1,
2480 client_cksum, server_cksum);
2483 int tgt_brw_write(struct tgt_session_info *tsi)
2485 struct ptlrpc_request *req = tgt_ses_req(tsi);
2486 struct ptlrpc_bulk_desc *desc = NULL;
2487 struct obd_export *exp = req->rq_export;
2488 struct niobuf_remote *remote_nb;
2489 struct niobuf_local *local_nb;
2490 struct obd_ioobj *ioo;
2491 struct ost_body *body, *repbody;
2492 struct l_wait_info lwi;
2493 struct lustre_handle lockh = {0};
2495 int objcount, niocount, npages;
2497 enum cksum_types cksum_type = OBD_CKSUM_CRC32;
2498 bool no_reply = false, mmap;
2499 struct tgt_thread_big_cache *tbc = req->rq_svc_thread->t_data;
2500 bool wait_sync = false;
2501 const char *obd_name = exp->exp_obd->obd_name;
2505 if (ptlrpc_req2svc(req)->srv_req_portal != OST_IO_PORTAL &&
2506 ptlrpc_req2svc(req)->srv_req_portal != MDS_IO_PORTAL) {
2507 CERROR("%s: deny write request from %s to portal %u\n",
2508 tgt_name(tsi->tsi_tgt),
2509 obd_export_nid2str(req->rq_export),
2510 ptlrpc_req2svc(req)->srv_req_portal);
2511 RETURN(err_serious(-EPROTO));
2514 if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOSPC))
2515 RETURN(err_serious(-ENOSPC));
2516 if (OBD_FAIL_TIMEOUT(OBD_FAIL_OST_EROFS, 1))
2517 RETURN(err_serious(-EROFS));
2519 req->rq_bulk_write = 1;
2521 if (OBD_FAIL_CHECK(OBD_FAIL_OST_BRW_WRITE_BULK))
2522 RETURN(err_serious(-EIO));
2523 if (OBD_FAIL_CHECK(OBD_FAIL_OST_BRW_WRITE_BULK2))
2524 RETURN(err_serious(-EFAULT));
2526 /* pause before transaction has been started */
2527 CFS_FAIL_TIMEOUT(OBD_FAIL_OST_BRW_PAUSE_BULK, cfs_fail_val > 0 ?
2528 cfs_fail_val : (obd_timeout + 1) / 4);
2530 /* Delay write commit to show stale size information */
2531 CFS_FAIL_TIMEOUT(OBD_FAIL_OSC_NO_SIZE_DATA, cfs_fail_val);
2533 /* There must be big cache in current thread to process this request
2534 * if it is NULL then something went wrong and it wasn't allocated,
2535 * report -ENOMEM in that case */
2539 body = tsi->tsi_ost_body;
2540 LASSERT(body != NULL);
2542 ioo = req_capsule_client_get(&req->rq_pill, &RMF_OBD_IOOBJ);
2543 LASSERT(ioo != NULL); /* must exists after tgt_ost_body_unpack */
2545 objcount = req_capsule_get_size(&req->rq_pill, &RMF_OBD_IOOBJ,
2546 RCL_CLIENT) / sizeof(*ioo);
2548 for (niocount = i = 0; i < objcount; i++)
2549 niocount += ioo[i].ioo_bufcnt;
2551 remote_nb = req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE);
2552 LASSERT(remote_nb != NULL); /* must exists after tgt_ost_body_unpack */
2553 if (niocount != req_capsule_get_size(&req->rq_pill,
2554 &RMF_NIOBUF_REMOTE, RCL_CLIENT) /
2556 RETURN(err_serious(-EPROTO));
2558 if ((remote_nb[0].rnb_flags & OBD_BRW_MEMALLOC) &&
2559 ptlrpc_connection_is_local(exp->exp_connection))
2560 memory_pressure_set();
2562 req_capsule_set_size(&req->rq_pill, &RMF_RCS, RCL_SERVER,
2563 niocount * sizeof(*rcs));
2564 rc = req_capsule_server_pack(&req->rq_pill);
2566 GOTO(out, rc = err_serious(rc));
2568 CFS_FAIL_TIMEOUT(OBD_FAIL_OST_BRW_PAUSE_PACK, cfs_fail_val);
2569 rcs = req_capsule_server_get(&req->rq_pill, &RMF_RCS);
2571 local_nb = tbc->local;
2573 rc = tgt_brw_lock(tsi->tsi_env, exp, &tsi->tsi_resid, ioo, remote_nb,
2579 * If getting the lock took more time than
2580 * client was willing to wait, drop it. b=11330
2582 if (ktime_get_real_seconds() > req->rq_deadline ||
2583 OBD_FAIL_CHECK(OBD_FAIL_OST_DROP_REQ)) {
2585 CERROR("%s: Dropping timed-out write from %s because locking object " DOSTID " took %lld seconds (limit was %lld).\n",
2586 tgt_name(tsi->tsi_tgt), libcfs_id2str(req->rq_peer),
2587 POSTID(&ioo->ioo_oid),
2588 ktime_get_real_seconds() - req->rq_arrival_time.tv_sec,
2589 req->rq_deadline - req->rq_arrival_time.tv_sec);
2590 GOTO(out_lock, rc = -ETIMEDOUT);
2593 /* Because we already sync grant info with client when reconnect,
2594 * grant info will be cleared for resent req, then fed_grant and
2595 * total_grant will not be modified in following preprw_write */
2596 if (lustre_msg_get_flags(req->rq_reqmsg) & (MSG_RESENT | MSG_REPLAY)) {
2597 DEBUG_REQ(D_CACHE, req, "clear resent/replay req grant info");
2598 body->oa.o_valid &= ~OBD_MD_FLGRANT;
2601 repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
2602 if (repbody == NULL)
2603 GOTO(out_lock, rc = -ENOMEM);
2604 repbody->oa = body->oa;
2606 npages = PTLRPC_MAX_BRW_PAGES;
2607 rc = obd_preprw(tsi->tsi_env, OBD_BRW_WRITE, exp, &repbody->oa,
2608 objcount, ioo, remote_nb, &npages, local_nb);
2611 if (body->oa.o_valid & OBD_MD_FLFLAGS &&
2612 body->oa.o_flags & OBD_FL_SHORT_IO) {
2613 unsigned int short_io_size;
2614 unsigned char *short_io_buf;
2616 short_io_size = req_capsule_get_size(&req->rq_pill,
2619 short_io_buf = req_capsule_client_get(&req->rq_pill,
2621 CDEBUG(D_INFO, "Client use short io for data transfer,"
2622 " size = %d\n", short_io_size);
2624 /* Copy short io buf to pages */
2625 rc = tgt_shortio2pages(local_nb, npages, short_io_buf,
2629 desc = ptlrpc_prep_bulk_exp(req, npages, ioobj_max_brw_get(ioo),
2630 PTLRPC_BULK_GET_SINK |
2631 PTLRPC_BULK_BUF_KIOV,
2633 &ptlrpc_bulk_kiov_nopin_ops);
2635 GOTO(skip_transfer, rc = -ENOMEM);
2637 /* NB Having prepped, we must commit... */
2638 for (i = 0; i < npages; i++)
2639 desc->bd_frag_ops->add_kiov_frag(desc,
2640 local_nb[i].lnb_page,
2641 local_nb[i].lnb_page_offset & ~PAGE_MASK,
2642 local_nb[i].lnb_len);
2644 rc = sptlrpc_svc_prep_bulk(req, desc);
2646 GOTO(skip_transfer, rc);
2648 rc = target_bulk_io(exp, desc, &lwi);
2654 if (body->oa.o_valid & OBD_MD_FLCKSUM && rc == 0) {
2655 static int cksum_counter;
2657 if (body->oa.o_valid & OBD_MD_FLFLAGS)
2658 cksum_type = obd_cksum_type_unpack(body->oa.o_flags);
2660 repbody->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
2661 repbody->oa.o_flags &= ~OBD_FL_CKSUM_ALL;
2662 repbody->oa.o_flags |= obd_cksum_type_pack(obd_name,
2665 rc = tgt_checksum_niobuf_rw(tsi->tsi_tgt, cksum_type,
2666 local_nb, npages, OST_WRITE,
2667 &repbody->oa.o_cksum);
2669 GOTO(out_commitrw, rc);
2673 if (unlikely(body->oa.o_cksum != repbody->oa.o_cksum)) {
2674 mmap = (body->oa.o_valid & OBD_MD_FLFLAGS &&
2675 body->oa.o_flags & OBD_FL_MMAP);
2677 tgt_warn_on_cksum(req, desc, local_nb, npages,
2679 repbody->oa.o_cksum, mmap);
2681 } else if ((cksum_counter & (-cksum_counter)) ==
2683 CDEBUG(D_INFO, "Checksum %u from %s OK: %x\n",
2684 cksum_counter, libcfs_id2str(req->rq_peer),
2685 repbody->oa.o_cksum);
2690 /* Must commit after prep above in all cases */
2691 rc = obd_commitrw(tsi->tsi_env, OBD_BRW_WRITE, exp, &repbody->oa,
2692 objcount, ioo, remote_nb, npages, local_nb, rc);
2693 if (rc == -ENOTCONN)
2694 /* quota acquire process has been given up because
2695 * either the client has been evicted or the client
2696 * has timed out the request already */
2699 for (i = 0; i < niocount; i++) {
2700 if (!(local_nb[i].lnb_flags & OBD_BRW_ASYNC)) {
2706 * Disable sending mtime back to the client. If the client locked the
2707 * whole object, then it has already updated the mtime on its side,
2708 * otherwise it will have to glimpse anyway (see bug 21489, comment 32)
2710 repbody->oa.o_valid &= ~(OBD_MD_FLMTIME | OBD_MD_FLATIME);
2715 /* set per-requested niobuf return codes */
2716 for (i = j = 0; i < niocount; i++) {
2717 int len = remote_nb[i].rnb_len;
2722 LASSERT(j < npages);
2723 if (local_nb[j].lnb_rc < 0)
2724 rcs[i] = local_nb[j].lnb_rc;
2725 len -= local_nb[j].lnb_len;
2730 LASSERT(j == npages);
2731 ptlrpc_lprocfs_brw(req, nob);
2734 tgt_brw_unlock(ioo, remote_nb, &lockh, LCK_PW);
2736 ptlrpc_free_bulk(desc);
2738 if (unlikely(no_reply || (exp->exp_obd->obd_no_transno && wait_sync))) {
2739 req->rq_no_reply = 1;
2740 /* reply out callback would free */
2741 ptlrpc_req_drop_rs(req);
2742 if (!exp->exp_obd->obd_no_transno)
2743 LCONSOLE_WARN("%s: Bulk IO write error with %s (at %s),"
2744 " client will retry: rc = %d\n",
2746 obd_uuid2str(&exp->exp_client_uuid),
2747 obd_export_nid2str(exp), rc);
2749 memory_pressure_clr();
2752 EXPORT_SYMBOL(tgt_brw_write);
2754 /* Check if request can be reconstructed from saved reply data
2755 * A copy of the reply data is returned in @trd if the pointer is not NULL
2757 bool req_can_reconstruct(struct ptlrpc_request *req,
2758 struct tg_reply_data *trd)
2760 struct tg_export_data *ted = &req->rq_export->exp_target_data;
2761 struct lsd_client_data *lcd = ted->ted_lcd;
2764 if (tgt_is_multimodrpcs_client(req->rq_export))
2765 return tgt_lookup_reply(req, trd);
2767 mutex_lock(&ted->ted_lcd_lock);
2768 found = req->rq_xid == lcd->lcd_last_xid ||
2769 req->rq_xid == lcd->lcd_last_close_xid;
2771 if (found && trd != NULL) {
2772 if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_CLOSE) {
2773 trd->trd_reply.lrd_xid = lcd->lcd_last_close_xid;
2774 trd->trd_reply.lrd_transno =
2775 lcd->lcd_last_close_transno;
2776 trd->trd_reply.lrd_result = lcd->lcd_last_close_result;
2778 trd->trd_reply.lrd_xid = lcd->lcd_last_xid;
2779 trd->trd_reply.lrd_transno = lcd->lcd_last_transno;
2780 trd->trd_reply.lrd_result = lcd->lcd_last_result;
2781 trd->trd_reply.lrd_data = lcd->lcd_last_data;
2782 trd->trd_pre_versions[0] = lcd->lcd_pre_versions[0];
2783 trd->trd_pre_versions[1] = lcd->lcd_pre_versions[1];
2784 trd->trd_pre_versions[2] = lcd->lcd_pre_versions[2];
2785 trd->trd_pre_versions[3] = lcd->lcd_pre_versions[3];
2788 mutex_unlock(&ted->ted_lcd_lock);
2792 EXPORT_SYMBOL(req_can_reconstruct);