4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2012, 2015, Intel Corporation.
27 * lustre/target/tgt_handler.c
29 * Lustre Unified Target request handler code
31 * Author: Brian Behlendorf <behlendorf1@llnl.gov>
32 * Author: Mikhail Pershin <mike.pershin@intel.com>
35 #define DEBUG_SUBSYSTEM S_CLASS
37 #include <linux/user_namespace.h>
38 #ifdef HAVE_UIDGID_HEADER
39 # include <linux/uidgid.h>
43 #include <obd_class.h>
44 #include <obd_cksum.h>
45 #include <md_object.h>
46 #include <lustre_lfsck.h>
47 #include <lustre_nodemap.h>
49 #include "tgt_internal.h"
51 char *tgt_name(struct lu_target *tgt)
53 LASSERT(tgt->lut_obd != NULL);
54 return tgt->lut_obd->obd_name;
56 EXPORT_SYMBOL(tgt_name);
59 * Generic code handling requests that have struct mdt_body passed in:
61 * - extract mdt_body from request and save it in @tsi, if present;
63 * - create lu_object, corresponding to the fid in mdt_body, and save it in
66 * - if HABEO_CORPUS flag is set for this request type check whether object
67 * actually exists on storage (lu_object_exists()).
70 static int tgt_mdt_body_unpack(struct tgt_session_info *tsi, __u32 flags)
72 const struct mdt_body *body;
73 struct lu_object *obj;
74 struct req_capsule *pill = tsi->tsi_pill;
79 body = req_capsule_client_get(pill, &RMF_MDT_BODY);
83 tsi->tsi_mdt_body = body;
85 if (!(body->mbo_valid & OBD_MD_FLID))
88 /* mdc_pack_body() doesn't check if fid is zero and set OBD_ML_FID
89 * in any case in pre-2.5 clients. Fix that here if needed */
90 if (unlikely(fid_is_zero(&body->mbo_fid1)))
93 if (!fid_is_sane(&body->mbo_fid1)) {
94 CERROR("%s: invalid FID: "DFID"\n", tgt_name(tsi->tsi_tgt),
95 PFID(&body->mbo_fid1));
99 obj = lu_object_find(tsi->tsi_env,
100 &tsi->tsi_tgt->lut_bottom->dd_lu_dev,
101 &body->mbo_fid1, NULL);
103 if ((flags & HABEO_CORPUS) && !lu_object_exists(obj)) {
104 lu_object_put(tsi->tsi_env, obj);
107 tsi->tsi_corpus = obj;
114 tsi->tsi_fid = body->mbo_fid1;
120 * Validate oa from client.
121 * If the request comes from 2.0 clients, currently only RSVD seq and IDIF
123 * a. objects in Single MDT FS seq = FID_SEQ_OST_MDT0, oi_id != 0
124 * b. Echo objects(seq = 2), old echo client still use oi_id/oi_seq to
125 * pack ost_id. Because non-zero oi_seq will make it diffcult to tell
126 * whether this is oi_fid or real ostid. So it will check
127 * OBD_CONNECT_FID, then convert the ostid to FID for old client.
128 * c. Old FID-disable osc will send IDIF.
129 * d. new FID-enable osc/osp will send normal FID.
131 * And also oi_id/f_oid should always start from 1. oi_id/f_oid = 0 will
132 * be used for LAST_ID file, and only being accessed inside OST now.
134 int tgt_validate_obdo(struct tgt_session_info *tsi, struct obdo *oa)
136 struct ost_id *oi = &oa->o_oi;
137 u64 seq = ostid_seq(oi);
138 u64 id = ostid_id(oi);
142 if (unlikely(!(exp_connect_flags(tsi->tsi_exp) & OBD_CONNECT_FID) &&
143 fid_seq_is_echo(seq))) {
144 /* Sigh 2.[123] client still sends echo req with oi_id = 0
145 * during create, and we will reset this to 1, since this
146 * oi_id is basically useless in the following create process,
147 * but oi_id == 0 will make it difficult to tell whether it is
148 * real FID or ost_id. */
149 oi->oi_fid.f_seq = FID_SEQ_ECHO;
150 oi->oi_fid.f_oid = id ?: 1;
151 oi->oi_fid.f_ver = 0;
153 struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env);
155 if (unlikely((oa->o_valid & OBD_MD_FLID) && id == 0))
156 GOTO(out, rc = -EPROTO);
158 /* Note: this check might be forced in 2.5 or 2.6, i.e.
159 * all of the requests are required to setup FLGROUP */
160 if (unlikely(!(oa->o_valid & OBD_MD_FLGROUP))) {
161 ostid_set_seq_mdt0(oi);
162 oa->o_valid |= OBD_MD_FLGROUP;
166 if (unlikely(!(fid_seq_is_idif(seq) || fid_seq_is_mdt0(seq) ||
167 fid_seq_is_norm(seq) || fid_seq_is_echo(seq))))
168 GOTO(out, rc = -EPROTO);
170 rc = ostid_to_fid(&tti->tti_fid1, oi,
171 tsi->tsi_tgt->lut_lsd.lsd_osd_index);
172 if (unlikely(rc != 0))
175 oi->oi_fid = tti->tti_fid1;
181 CERROR("%s: client %s sent bad object "DOSTID": rc = %d\n",
182 tgt_name(tsi->tsi_tgt), obd_export_nid2str(tsi->tsi_exp),
186 EXPORT_SYMBOL(tgt_validate_obdo);
188 static int tgt_io_data_unpack(struct tgt_session_info *tsi, struct ost_id *oi)
191 struct niobuf_remote *rnb;
192 struct obd_ioobj *ioo;
197 ioo = req_capsule_client_get(tsi->tsi_pill, &RMF_OBD_IOOBJ);
201 rnb = req_capsule_client_get(tsi->tsi_pill, &RMF_NIOBUF_REMOTE);
205 max_brw = ioobj_max_brw_get(ioo);
206 if (unlikely((max_brw & (max_brw - 1)) != 0)) {
207 CERROR("%s: client %s sent bad ioobj max %u for "DOSTID
208 ": rc = %d\n", tgt_name(tsi->tsi_tgt),
209 obd_export_nid2str(tsi->tsi_exp), max_brw,
210 POSTID(oi), -EPROTO);
215 obj_count = req_capsule_get_size(tsi->tsi_pill, &RMF_OBD_IOOBJ,
216 RCL_CLIENT) / sizeof(*ioo);
217 if (obj_count == 0) {
218 CERROR("%s: short ioobj\n", tgt_name(tsi->tsi_tgt));
220 } else if (obj_count > 1) {
221 CERROR("%s: too many ioobjs (%d)\n", tgt_name(tsi->tsi_tgt),
226 if (ioo->ioo_bufcnt == 0) {
227 CERROR("%s: ioo has zero bufcnt\n", tgt_name(tsi->tsi_tgt));
231 if (ioo->ioo_bufcnt > PTLRPC_MAX_BRW_PAGES) {
232 DEBUG_REQ(D_RPCTRACE, tgt_ses_req(tsi),
233 "bulk has too many pages (%d)",
241 static int tgt_ost_body_unpack(struct tgt_session_info *tsi, __u32 flags)
243 struct ost_body *body;
244 struct req_capsule *pill = tsi->tsi_pill;
245 struct lu_nodemap *nodemap;
250 body = req_capsule_client_get(pill, &RMF_OST_BODY);
254 rc = tgt_validate_obdo(tsi, &body->oa);
258 nodemap = nodemap_get_from_exp(tsi->tsi_exp);
260 RETURN(PTR_ERR(nodemap));
262 body->oa.o_uid = nodemap_map_id(nodemap, NODEMAP_UID,
263 NODEMAP_CLIENT_TO_FS,
265 body->oa.o_gid = nodemap_map_id(nodemap, NODEMAP_GID,
266 NODEMAP_CLIENT_TO_FS,
268 nodemap_putref(nodemap);
270 tsi->tsi_ost_body = body;
271 tsi->tsi_fid = body->oa.o_oi.oi_fid;
273 if (req_capsule_has_field(pill, &RMF_OBD_IOOBJ, RCL_CLIENT)) {
274 rc = tgt_io_data_unpack(tsi, &body->oa.o_oi);
279 if (!(body->oa.o_valid & OBD_MD_FLID)) {
280 if (flags & HABEO_CORPUS) {
281 CERROR("%s: OBD_MD_FLID flag is not set in ost_body "
282 "but OID/FID is mandatory with HABEO_CORPUS\n",
283 tgt_name(tsi->tsi_tgt));
290 ost_fid_build_resid(&tsi->tsi_fid, &tsi->tsi_resid);
293 * OST doesn't get object in advance for further use to prevent
294 * situations with nested object_find which is potential deadlock.
296 tsi->tsi_corpus = NULL;
301 * Do necessary preprocessing according to handler ->th_flags.
303 static int tgt_request_preprocess(struct tgt_session_info *tsi,
304 struct tgt_handler *h,
305 struct ptlrpc_request *req)
307 struct req_capsule *pill = tsi->tsi_pill;
308 __u32 flags = h->th_flags;
313 if (tsi->tsi_preprocessed)
316 LASSERT(h->th_act != NULL);
317 LASSERT(h->th_opc == lustre_msg_get_opc(req->rq_reqmsg));
318 LASSERT(current->journal_info == NULL);
320 LASSERT(ergo(flags & (HABEO_CORPUS | HABEO_REFERO),
322 if (h->th_fmt != NULL) {
323 req_capsule_set(pill, h->th_fmt);
324 if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT)) {
325 rc = tgt_mdt_body_unpack(tsi, flags);
328 } else if (req_capsule_has_field(pill, &RMF_OST_BODY,
330 rc = tgt_ost_body_unpack(tsi, flags);
336 if (flags & MUTABOR && tgt_conn_flags(tsi) & OBD_CONNECT_RDONLY)
339 if (flags & HABEO_CLAVIS) {
340 struct ldlm_request *dlm_req;
342 LASSERT(h->th_fmt != NULL);
344 dlm_req = req_capsule_client_get(pill, &RMF_DLM_REQ);
345 if (dlm_req != NULL) {
346 if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
348 dlm_req->lock_desc.l_policy_data.\
349 l_inodebits.bits == 0)) {
351 * Lock without inodebits makes no sense and
352 * will oops later in ldlm. If client miss to
353 * set such bits, do not trigger ASSERTION.
355 * For liblustre flock case, it maybe zero.
359 tsi->tsi_dlm_req = dlm_req;
365 tsi->tsi_preprocessed = 1;
370 * Invoke handler for this request opc. Also do necessary preprocessing
371 * (according to handler ->th_flags), and post-processing (setting of
372 * ->last_{xid,committed}).
374 static int tgt_handle_request0(struct tgt_session_info *tsi,
375 struct tgt_handler *h,
376 struct ptlrpc_request *req)
380 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
385 /* When dealing with sec context requests, no export is associated yet,
386 * because these requests are sent before *_CONNECT requests.
387 * A NULL req->rq_export means the normal *_common_slice handlers will
388 * not be called, because there is no reference to the target.
389 * So deal with them by hand and jump directly to target_send_reply().
393 case SEC_CTX_INIT_CONT:
395 CFS_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, cfs_fail_val);
400 * Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
401 * to put same checks into handlers like mdt_close(), mdt_reint(),
402 * etc., without talking to mdt authors first. Checking same thing
403 * there again is useless and returning 0 error without packing reply
404 * is buggy! Handlers either pack reply or return error.
406 * We return 0 here and do not send any reply in order to emulate
407 * network failure. Do not send any reply in case any of NET related
408 * fail_id has occured.
410 if (OBD_FAIL_CHECK_ORSET(h->th_fail_id, OBD_FAIL_ONCE))
412 if (unlikely(lustre_msg_get_opc(req->rq_reqmsg) == MDS_REINT &&
413 OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_MULTI_NET)))
416 rc = tgt_request_preprocess(tsi, h, req);
417 /* pack reply if reply format is fixed */
418 if (rc == 0 && h->th_flags & HABEO_REFERO) {
420 if (req_capsule_has_field(tsi->tsi_pill, &RMF_MDT_MD,
422 req_capsule_set_size(tsi->tsi_pill, &RMF_MDT_MD,
424 tsi->tsi_mdt_body->mbo_eadatasize);
425 if (req_capsule_has_field(tsi->tsi_pill, &RMF_LOGCOOKIES,
427 req_capsule_set_size(tsi->tsi_pill, &RMF_LOGCOOKIES,
430 rc = req_capsule_server_pack(tsi->tsi_pill);
433 if (likely(rc == 0)) {
435 * Process request, there can be two types of rc:
436 * 1) errors with msg unpack/pack, other failures outside the
437 * operation itself. This is counted as serious errors;
438 * 2) errors during fs operation, should be placed in rq_status
442 if (!is_serious(rc) &&
443 !req->rq_no_reply && req->rq_reply_state == NULL) {
444 DEBUG_REQ(D_ERROR, req, "%s \"handler\" %s did not "
445 "pack reply and returned 0 error\n",
446 tgt_name(tsi->tsi_tgt), h->th_name);
449 serious = is_serious(rc);
450 rc = clear_serious(rc);
458 * ELDLM_* codes which > 0 should be in rq_status only as well as
459 * all non-serious errors.
461 if (rc > 0 || !serious)
464 LASSERT(current->journal_info == NULL);
466 if (likely(rc == 0 && req->rq_export))
467 target_committed_to_req(req);
470 target_send_reply(req, rc, tsi->tsi_reply_fail_id);
474 static int tgt_filter_recovery_request(struct ptlrpc_request *req,
475 struct obd_device *obd, int *process)
477 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
484 case MDS_SYNC: /* used in unmounting */
498 case MDS_HSM_PROGRESS:
499 case MDS_HSM_STATE_SET:
500 case MDS_HSM_REQUEST:
501 *process = target_queue_recovery_request(req, obd);
505 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
512 * Handle recovery. Return:
513 * +1: continue request processing;
514 * -ve: abort immediately with the given error code;
515 * 0: send reply with error code in req->rq_status;
517 static int tgt_handle_recovery(struct ptlrpc_request *req, int reply_fail_id)
521 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
526 case SEC_CTX_INIT_CONT:
531 if (!req->rq_export->exp_obd->obd_replayable)
534 /* sanity check: if the xid matches, the request must be marked as a
535 * resent or replayed */
536 if (req_can_reconstruct(req, NULL)) {
537 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
538 (MSG_RESENT | MSG_REPLAY))) {
539 DEBUG_REQ(D_WARNING, req, "rq_xid "LPU64" matches "
540 "saved xid, expected REPLAY or RESENT flag "
542 lustre_msg_get_flags(req->rq_reqmsg));
543 req->rq_status = -ENOTCONN;
547 /* else: note the opposite is not always true; a RESENT req after a
548 * failover will usually not match the last_xid, since it was likely
549 * never committed. A REPLAYed request will almost never match the
550 * last xid, however it could for a committed, but still retained,
553 /* Check for aborted recovery... */
554 if (unlikely(req->rq_export->exp_obd->obd_recovering)) {
558 DEBUG_REQ(D_INFO, req, "Got new replay");
559 rc = tgt_filter_recovery_request(req, req->rq_export->exp_obd,
561 if (rc != 0 || !should_process)
563 else if (should_process < 0) {
564 req->rq_status = should_process;
565 rc = ptlrpc_error(req);
572 /* Initial check for request, it is validation mostly */
573 static struct tgt_handler *tgt_handler_find_check(struct ptlrpc_request *req)
575 struct tgt_handler *h;
576 struct tgt_opc_slice *s;
577 struct lu_target *tgt;
578 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
582 tgt = class_exp2tgt(req->rq_export);
583 if (unlikely(tgt == NULL)) {
584 DEBUG_REQ(D_ERROR, req, "%s: No target for connected export\n",
585 class_exp2obd(req->rq_export)->obd_name);
586 RETURN(ERR_PTR(-EINVAL));
589 for (s = tgt->lut_slice; s->tos_hs != NULL; s++)
590 if (s->tos_opc_start <= opc && opc < s->tos_opc_end)
593 /* opcode was not found in slice */
594 if (unlikely(s->tos_hs == NULL)) {
595 CERROR("%s: no handlers for opcode 0x%x\n", tgt_name(tgt),
597 RETURN(ERR_PTR(-ENOTSUPP));
600 LASSERT(opc >= s->tos_opc_start && opc < s->tos_opc_end);
601 h = s->tos_hs + (opc - s->tos_opc_start);
602 if (unlikely(h->th_opc == 0)) {
603 CERROR("%s: unsupported opcode 0x%x\n", tgt_name(tgt), opc);
604 RETURN(ERR_PTR(-ENOTSUPP));
610 static int process_req_last_xid(struct ptlrpc_request *req)
615 /* check request's xid is consistent with export's last_xid */
616 last_xid = lustre_msg_get_last_xid(req->rq_reqmsg);
617 if (last_xid > req->rq_export->exp_last_xid)
618 req->rq_export->exp_last_xid = last_xid;
620 if (req->rq_xid == 0 ||
621 (req->rq_xid <= req->rq_export->exp_last_xid)) {
622 DEBUG_REQ(D_ERROR, req, "Unexpected xid %llx vs. "
623 "last_xid %llx\n", req->rq_xid,
624 req->rq_export->exp_last_xid);
625 /* Some request is allowed to be sent during replay,
626 * such as OUT update requests, FLD requests, so it
627 * is possible that replay requests has smaller XID
628 * than the exp_last_xid.
630 * Some non-replay requests may have smaller XID as
633 * - Client send a no_resend RPC, like statfs;
634 * - The RPC timedout (or some other error) on client,
635 * then it's removed from the unreplied list;
636 * - Client send some other request to bump the
637 * exp_last_xid on server;
638 * - The former RPC got chance to be processed;
640 if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY))
644 /* try to release in-memory reply data */
645 if (tgt_is_multimodrpcs_client(req->rq_export)) {
646 tgt_handle_received_xid(req->rq_export,
647 lustre_msg_get_last_xid(req->rq_reqmsg));
648 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
649 (MSG_RESENT | MSG_REPLAY)))
650 tgt_handle_tag(req->rq_export,
651 lustre_msg_get_tag(req->rq_reqmsg));
656 int tgt_request_handle(struct ptlrpc_request *req)
658 struct tgt_session_info *tsi = tgt_ses_info(req->rq_svc_thread->t_env);
660 struct lustre_msg *msg = req->rq_reqmsg;
661 struct tgt_handler *h;
662 struct lu_target *tgt;
663 int request_fail_id = 0;
664 __u32 opc = lustre_msg_get_opc(msg);
665 struct obd_device *obd;
667 bool is_connect = false;
670 /* Refill the context, to make sure all thread keys are allocated */
671 lu_env_refill(req->rq_svc_thread->t_env);
673 req_capsule_init(&req->rq_pill, req, RCL_SERVER);
674 tsi->tsi_pill = &req->rq_pill;
675 tsi->tsi_env = req->rq_svc_thread->t_env;
677 /* if request has export then get handlers slice from corresponding
678 * target, otherwise that should be connect operation */
679 if (opc == MDS_CONNECT || opc == OST_CONNECT ||
680 opc == MGS_CONNECT) {
682 req_capsule_set(&req->rq_pill, &RQF_CONNECT);
683 rc = target_handle_connect(req);
685 rc = ptlrpc_error(req);
688 /* recovery-small test 18c asks to drop connect reply */
689 if (unlikely(opc == OST_CONNECT &&
690 OBD_FAIL_CHECK(OBD_FAIL_OST_CONNECT_NET2)))
694 if (unlikely(!class_connected_export(req->rq_export))) {
695 if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT ||
696 opc == SEC_CTX_FINI) {
697 /* sec context initialization has to be handled
698 * by hand in tgt_handle_request0() */
699 tsi->tsi_reply_fail_id = OBD_FAIL_SEC_CTX_INIT_NET;
701 GOTO(handle_recov, rc = 0);
703 CDEBUG(D_HA, "operation %d on unconnected OST from %s\n",
704 opc, libcfs_id2str(req->rq_peer));
705 req->rq_status = -ENOTCONN;
706 rc = ptlrpc_error(req);
710 tsi->tsi_tgt = tgt = class_exp2tgt(req->rq_export);
711 tsi->tsi_exp = req->rq_export;
712 if (exp_connect_flags(req->rq_export) & OBD_CONNECT_JOBSTATS)
713 tsi->tsi_jobid = lustre_msg_get_jobid(req->rq_reqmsg);
715 tsi->tsi_jobid = NULL;
718 DEBUG_REQ(D_ERROR, req, "%s: No target for connected export\n",
719 class_exp2obd(req->rq_export)->obd_name);
720 req->rq_status = -EINVAL;
721 rc = ptlrpc_error(req);
725 /* Skip last_xid processing for the recovery thread, otherwise, the
726 * last_xid on same request could be processed twice: first time when
727 * processing the incoming request, second time when the request is
728 * being processed by recovery thread. */
729 obd = class_exp2obd(req->rq_export);
731 /* reset the exp_last_xid on each connection. */
732 req->rq_export->exp_last_xid = 0;
733 } else if (obd->obd_recovery_data.trd_processing_task !=
735 rc = process_req_last_xid(req);
738 rc = ptlrpc_error(req);
743 request_fail_id = tgt->lut_request_fail_id;
744 tsi->tsi_reply_fail_id = tgt->lut_reply_fail_id;
746 h = tgt_handler_find_check(req);
748 req->rq_status = PTR_ERR(h);
749 rc = ptlrpc_error(req);
753 LASSERTF(h->th_opc == opc, "opcode mismatch %d != %d\n",
756 if (CFS_FAIL_CHECK_ORSET(request_fail_id, CFS_FAIL_ONCE))
759 rc = lustre_msg_check_version(msg, h->th_version);
761 DEBUG_REQ(D_ERROR, req, "%s: drop mal-formed request, version"
762 " %08x, expecting %08x\n", tgt_name(tgt),
763 lustre_msg_get_version(msg), h->th_version);
764 req->rq_status = -EINVAL;
765 rc = ptlrpc_error(req);
770 rc = tgt_handle_recovery(req, tsi->tsi_reply_fail_id);
771 if (likely(rc == 1)) {
772 rc = tgt_handle_request0(tsi, h, req);
778 req_capsule_fini(tsi->tsi_pill);
779 if (tsi->tsi_corpus != NULL) {
780 lu_object_put(tsi->tsi_env, tsi->tsi_corpus);
781 tsi->tsi_corpus = NULL;
785 EXPORT_SYMBOL(tgt_request_handle);
787 /** Assign high priority operations to the request if needed. */
788 int tgt_hpreq_handler(struct ptlrpc_request *req)
790 struct tgt_session_info *tsi = tgt_ses_info(req->rq_svc_thread->t_env);
791 struct tgt_handler *h;
796 if (req->rq_export == NULL)
799 req_capsule_init(&req->rq_pill, req, RCL_SERVER);
800 tsi->tsi_pill = &req->rq_pill;
801 tsi->tsi_env = req->rq_svc_thread->t_env;
802 tsi->tsi_tgt = class_exp2tgt(req->rq_export);
803 tsi->tsi_exp = req->rq_export;
805 h = tgt_handler_find_check(req);
811 rc = tgt_request_preprocess(tsi, h, req);
812 if (unlikely(rc != 0))
815 if (h->th_hp != NULL)
819 EXPORT_SYMBOL(tgt_hpreq_handler);
821 void tgt_counter_incr(struct obd_export *exp, int opcode)
823 lprocfs_counter_incr(exp->exp_obd->obd_stats, opcode);
824 if (exp->exp_nid_stats && exp->exp_nid_stats->nid_stats != NULL)
825 lprocfs_counter_incr(exp->exp_nid_stats->nid_stats, opcode);
827 EXPORT_SYMBOL(tgt_counter_incr);
830 * Unified target generic handlers.
833 int tgt_connect_check_sptlrpc(struct ptlrpc_request *req, struct obd_export *exp)
835 struct lu_target *tgt = class_exp2tgt(exp);
836 struct sptlrpc_flavor flvr;
840 LASSERT(tgt->lut_obd);
841 LASSERT(tgt->lut_slice);
843 /* always allow ECHO client */
844 if (unlikely(strcmp(exp->exp_obd->obd_type->typ_name,
845 LUSTRE_ECHO_NAME) == 0)) {
846 exp->exp_flvr.sf_rpc = SPTLRPC_FLVR_ANY;
850 if (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_INVALID) {
851 read_lock(&tgt->lut_sptlrpc_lock);
852 sptlrpc_target_choose_flavor(&tgt->lut_sptlrpc_rset,
856 read_unlock(&tgt->lut_sptlrpc_lock);
858 spin_lock(&exp->exp_lock);
859 exp->exp_sp_peer = req->rq_sp_from;
860 exp->exp_flvr = flvr;
862 /* when on mgs, if no restriction is set, or if client
863 * is loopback, allow any flavor */
864 if ((strcmp(exp->exp_obd->obd_type->typ_name,
865 LUSTRE_MGS_NAME) == 0) &&
866 (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_NULL ||
867 LNET_NETTYP(LNET_NIDNET(exp->exp_connection->c_peer.nid))
869 exp->exp_flvr.sf_rpc = SPTLRPC_FLVR_ANY;
871 if (exp->exp_flvr.sf_rpc != SPTLRPC_FLVR_ANY &&
872 exp->exp_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
873 CERROR("%s: unauthorized rpc flavor %x from %s, "
874 "expect %x\n", tgt_name(tgt),
876 libcfs_nid2str(req->rq_peer.nid),
877 exp->exp_flvr.sf_rpc);
880 spin_unlock(&exp->exp_lock);
882 if (exp->exp_sp_peer != req->rq_sp_from) {
883 CERROR("%s: RPC source %s doesn't match %s\n",
885 sptlrpc_part2name(req->rq_sp_from),
886 sptlrpc_part2name(exp->exp_sp_peer));
889 rc = sptlrpc_target_export_check(exp, req);
896 int tgt_adapt_sptlrpc_conf(struct lu_target *tgt, int initial)
898 struct sptlrpc_rule_set tmp_rset;
901 if (unlikely(tgt == NULL)) {
902 CERROR("No target passed");
906 sptlrpc_rule_set_init(&tmp_rset);
907 rc = sptlrpc_conf_target_get_rules(tgt->lut_obd, &tmp_rset, initial);
909 CERROR("%s: failed get sptlrpc rules: rc = %d\n",
914 sptlrpc_target_update_exp_flavor(tgt->lut_obd, &tmp_rset);
916 write_lock(&tgt->lut_sptlrpc_lock);
917 sptlrpc_rule_set_free(&tgt->lut_sptlrpc_rset);
918 tgt->lut_sptlrpc_rset = tmp_rset;
919 write_unlock(&tgt->lut_sptlrpc_lock);
923 EXPORT_SYMBOL(tgt_adapt_sptlrpc_conf);
925 int tgt_connect(struct tgt_session_info *tsi)
927 struct ptlrpc_request *req = tgt_ses_req(tsi);
928 struct obd_connect_data *reply;
933 /* XXX: better to call this check right after getting new export but
934 * before last_rcvd slot allocation to avoid server load upon insecure
935 * connects. This is to be fixed after unifiyng all targets.
937 rc = tgt_connect_check_sptlrpc(req, tsi->tsi_exp);
941 /* To avoid exposing partially initialized connection flags, changes up
942 * to this point have been staged in reply->ocd_connect_flags. Now that
943 * connection handling has completed successfully, atomically update
944 * the connect flags in the shared export data structure. LU-1623 */
945 reply = req_capsule_server_get(tsi->tsi_pill, &RMF_CONNECT_DATA);
946 spin_lock(&tsi->tsi_exp->exp_lock);
947 *exp_connect_flags_ptr(tsi->tsi_exp) = reply->ocd_connect_flags;
948 tsi->tsi_exp->exp_connect_data.ocd_brw_size = reply->ocd_brw_size;
949 spin_unlock(&tsi->tsi_exp->exp_lock);
953 obd_disconnect(class_export_get(tsi->tsi_exp));
956 EXPORT_SYMBOL(tgt_connect);
958 int tgt_disconnect(struct tgt_session_info *tsi)
964 rc = target_handle_disconnect(tgt_ses_req(tsi));
966 RETURN(err_serious(rc));
970 EXPORT_SYMBOL(tgt_disconnect);
973 * Unified target OBD handlers
975 int tgt_obd_ping(struct tgt_session_info *tsi)
981 rc = target_handle_ping(tgt_ses_req(tsi));
983 RETURN(err_serious(rc));
987 EXPORT_SYMBOL(tgt_obd_ping);
989 int tgt_obd_log_cancel(struct tgt_session_info *tsi)
991 return err_serious(-EOPNOTSUPP);
994 int tgt_send_buffer(struct tgt_session_info *tsi, struct lu_rdbuf *rdbuf)
996 struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env);
997 struct ptlrpc_request *req = tgt_ses_req(tsi);
998 struct obd_export *exp = req->rq_export;
999 struct ptlrpc_bulk_desc *desc;
1000 struct l_wait_info *lwi = &tti->tti_u.update.tti_wait_info;
1006 desc = ptlrpc_prep_bulk_exp(req, rdbuf->rb_nbufs, 1,
1007 PTLRPC_BULK_PUT_SOURCE | PTLRPC_BULK_BUF_KVEC,
1008 MDS_BULK_PORTAL, &ptlrpc_bulk_kvec_ops);
1012 for (i = 0; i < rdbuf->rb_nbufs; i++)
1013 desc->bd_frag_ops->add_iov_frag(desc,
1014 rdbuf->rb_bufs[i].lb_buf,
1015 rdbuf->rb_bufs[i].lb_len);
1017 rc = target_bulk_io(exp, desc, lwi);
1018 ptlrpc_free_bulk(desc);
1021 EXPORT_SYMBOL(tgt_send_buffer);
1023 int tgt_sendpage(struct tgt_session_info *tsi, struct lu_rdpg *rdpg, int nob)
1025 struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env);
1026 struct ptlrpc_request *req = tgt_ses_req(tsi);
1027 struct obd_export *exp = req->rq_export;
1028 struct ptlrpc_bulk_desc *desc;
1029 struct l_wait_info *lwi = &tti->tti_u.rdpg.tti_wait_info;
1037 desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, 1,
1038 PTLRPC_BULK_PUT_SOURCE |
1039 PTLRPC_BULK_BUF_KIOV,
1041 &ptlrpc_bulk_kiov_pin_ops);
1045 if (!(exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE))
1046 /* old client requires reply size in it's PAGE_CACHE_SIZE,
1047 * which is rdpg->rp_count */
1048 nob = rdpg->rp_count;
1050 for (i = 0, tmpcount = nob; i < rdpg->rp_npages && tmpcount > 0;
1051 i++, tmpcount -= tmpsize) {
1052 tmpsize = min_t(int, tmpcount, PAGE_CACHE_SIZE);
1053 desc->bd_frag_ops->add_kiov_frag(desc, rdpg->rp_pages[i], 0,
1057 LASSERT(desc->bd_nob == nob);
1058 rc = target_bulk_io(exp, desc, lwi);
1059 ptlrpc_free_bulk(desc);
1062 EXPORT_SYMBOL(tgt_sendpage);
1065 * OBD_IDX_READ handler
1067 static int tgt_obd_idx_read(struct tgt_session_info *tsi)
1069 struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env);
1070 struct lu_rdpg *rdpg = &tti->tti_u.rdpg.tti_rdpg;
1071 struct idx_info *req_ii, *rep_ii;
1076 memset(rdpg, 0, sizeof(*rdpg));
1077 req_capsule_set(tsi->tsi_pill, &RQF_OBD_IDX_READ);
1079 /* extract idx_info buffer from request & reply */
1080 req_ii = req_capsule_client_get(tsi->tsi_pill, &RMF_IDX_INFO);
1081 if (req_ii == NULL || req_ii->ii_magic != IDX_INFO_MAGIC)
1082 RETURN(err_serious(-EPROTO));
1084 rc = req_capsule_server_pack(tsi->tsi_pill);
1086 RETURN(err_serious(rc));
1088 rep_ii = req_capsule_server_get(tsi->tsi_pill, &RMF_IDX_INFO);
1090 RETURN(err_serious(-EFAULT));
1091 rep_ii->ii_magic = IDX_INFO_MAGIC;
1093 /* extract hash to start with */
1094 rdpg->rp_hash = req_ii->ii_hash_start;
1096 /* extract requested attributes */
1097 rdpg->rp_attrs = req_ii->ii_attrs;
1099 /* check that fid packed in request is valid and supported */
1100 if (!fid_is_sane(&req_ii->ii_fid))
1102 rep_ii->ii_fid = req_ii->ii_fid;
1105 rep_ii->ii_flags = req_ii->ii_flags;
1107 /* compute number of pages to allocate, ii_count is the number of 4KB
1109 if (req_ii->ii_count <= 0)
1110 GOTO(out, rc = -EFAULT);
1111 rdpg->rp_count = min_t(unsigned int, req_ii->ii_count << LU_PAGE_SHIFT,
1112 exp_max_brw_size(tsi->tsi_exp));
1113 rdpg->rp_npages = (rdpg->rp_count + PAGE_CACHE_SIZE -1) >> PAGE_CACHE_SHIFT;
1115 /* allocate pages to store the containers */
1116 OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
1117 if (rdpg->rp_pages == NULL)
1118 GOTO(out, rc = -ENOMEM);
1119 for (i = 0; i < rdpg->rp_npages; i++) {
1120 rdpg->rp_pages[i] = alloc_page(GFP_NOFS);
1121 if (rdpg->rp_pages[i] == NULL)
1122 GOTO(out, rc = -ENOMEM);
1125 /* populate pages with key/record pairs */
1126 rc = dt_index_read(tsi->tsi_env, tsi->tsi_tgt->lut_bottom, rep_ii, rdpg);
1130 LASSERTF(rc <= rdpg->rp_count, "dt_index_read() returned more than "
1131 "asked %d > %d\n", rc, rdpg->rp_count);
1133 /* send pages to client */
1134 rc = tgt_sendpage(tsi, rdpg, rc);
1139 if (rdpg->rp_pages) {
1140 for (i = 0; i < rdpg->rp_npages; i++)
1141 if (rdpg->rp_pages[i])
1142 __free_page(rdpg->rp_pages[i]);
1143 OBD_FREE(rdpg->rp_pages,
1144 rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
1149 struct tgt_handler tgt_obd_handlers[] = {
1150 TGT_OBD_HDL (0, OBD_PING, tgt_obd_ping),
1151 TGT_OBD_HDL_VAR(0, OBD_LOG_CANCEL, tgt_obd_log_cancel),
1152 TGT_OBD_HDL (0, OBD_IDX_READ, tgt_obd_idx_read)
1154 EXPORT_SYMBOL(tgt_obd_handlers);
1156 int tgt_sync(const struct lu_env *env, struct lu_target *tgt,
1157 struct dt_object *obj, __u64 start, __u64 end)
1163 /* if no objid is specified, it means "sync whole filesystem" */
1165 rc = dt_sync(env, tgt->lut_bottom);
1166 } else if (dt_version_get(env, obj) >
1167 tgt->lut_obd->obd_last_committed) {
1168 rc = dt_object_sync(env, obj, start, end);
1170 atomic_inc(&tgt->lut_sync_count);
1174 EXPORT_SYMBOL(tgt_sync);
1176 * Unified target DLM handlers.
1180 * Unified target BAST
1182 * Ensure data and metadata are synced to disk when lock is canceled if Sync on
1183 * Cancel (SOC) is enabled. If it's extent lock, normally sync obj is enough,
1184 * but if it's cross-MDT lock, because remote object version is not set, a
1185 * filesystem sync is needed.
1187 * \param lock server side lock
1188 * \param desc lock desc
1189 * \param data ldlm_cb_set_arg
1190 * \param flag indicates whether this cancelling or blocking callback
1191 * \retval 0 on success
1192 * \retval negative number on error
1194 static int tgt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
1195 void *data, int flag)
1198 struct lu_target *tgt;
1199 struct dt_object *obj = NULL;
1205 tgt = class_exp2tgt(lock->l_export);
1207 if (unlikely(tgt == NULL)) {
1208 CDEBUG(D_ERROR, "%s: No target for connected export\n",
1209 class_exp2obd(lock->l_export)->obd_name);
1213 if (flag == LDLM_CB_CANCELING &&
1214 (lock->l_granted_mode & (LCK_EX | LCK_PW | LCK_GROUP)) &&
1215 (tgt->lut_sync_lock_cancel == ALWAYS_SYNC_ON_CANCEL ||
1216 (tgt->lut_sync_lock_cancel == BLOCKING_SYNC_ON_CANCEL &&
1217 ldlm_is_cbpending(lock))) &&
1218 ((exp_connect_flags(lock->l_export) & OBD_CONNECT_MDS_MDS) ||
1219 lock->l_resource->lr_type == LDLM_EXTENT)) {
1221 __u64 end = OBD_OBJECT_EOF;
1223 rc = lu_env_init(&env, LCT_DT_THREAD);
1224 if (unlikely(rc != 0))
1227 ost_fid_from_resid(&fid, &lock->l_resource->lr_name,
1228 tgt->lut_lsd.lsd_osd_index);
1230 if (lock->l_resource->lr_type == LDLM_EXTENT) {
1231 obj = dt_locate(&env, tgt->lut_bottom, &fid);
1233 GOTO(err_env, rc = PTR_ERR(obj));
1235 if (!dt_object_exists(obj))
1236 GOTO(err_put, rc = -ENOENT);
1238 start = lock->l_policy_data.l_extent.start;
1239 end = lock->l_policy_data.l_extent.end;
1242 rc = tgt_sync(&env, tgt, obj, start, end);
1244 CERROR("%s: syncing "DFID" ("LPU64"-"LPU64") on lock "
1245 "cancel: rc = %d\n",
1246 tgt_name(tgt), PFID(&fid),
1247 lock->l_policy_data.l_extent.start,
1248 lock->l_policy_data.l_extent.end, rc);
1252 lu_object_put(&env, &obj->do_lu);
1257 rc = ldlm_server_blocking_ast(lock, desc, data, flag);
1261 static struct ldlm_callback_suite tgt_dlm_cbs = {
1262 .lcs_completion = ldlm_server_completion_ast,
1263 .lcs_blocking = tgt_blocking_ast,
1264 .lcs_glimpse = ldlm_server_glimpse_ast
1267 int tgt_enqueue(struct tgt_session_info *tsi)
1269 struct ptlrpc_request *req = tgt_ses_req(tsi);
1274 * tsi->tsi_dlm_req was already swapped and (if necessary) converted,
1275 * tsi->tsi_dlm_cbs was set by the *_req_handle() function.
1277 LASSERT(tsi->tsi_dlm_req != NULL);
1278 rc = ldlm_handle_enqueue0(tsi->tsi_exp->exp_obd->obd_namespace, req,
1279 tsi->tsi_dlm_req, &tgt_dlm_cbs);
1281 RETURN(err_serious(rc));
1283 switch (LUT_FAIL_CLASS(tsi->tsi_reply_fail_id)) {
1285 tsi->tsi_reply_fail_id = OBD_FAIL_MDS_LDLM_REPLY_NET;
1288 tsi->tsi_reply_fail_id = OBD_FAIL_OST_LDLM_REPLY_NET;
1291 tsi->tsi_reply_fail_id = OBD_FAIL_MGS_LDLM_REPLY_NET;
1294 tsi->tsi_reply_fail_id = OBD_FAIL_LDLM_REPLY;
1297 RETURN(req->rq_status);
1299 EXPORT_SYMBOL(tgt_enqueue);
1301 int tgt_convert(struct tgt_session_info *tsi)
1303 struct ptlrpc_request *req = tgt_ses_req(tsi);
1307 LASSERT(tsi->tsi_dlm_req);
1308 rc = ldlm_handle_convert0(req, tsi->tsi_dlm_req);
1310 RETURN(err_serious(rc));
1312 RETURN(req->rq_status);
1315 int tgt_bl_callback(struct tgt_session_info *tsi)
1317 return err_serious(-EOPNOTSUPP);
1320 int tgt_cp_callback(struct tgt_session_info *tsi)
1322 return err_serious(-EOPNOTSUPP);
1325 /* generic LDLM target handler */
1326 struct tgt_handler tgt_dlm_handlers[] = {
1327 TGT_DLM_HDL (HABEO_CLAVIS, LDLM_ENQUEUE, tgt_enqueue),
1328 TGT_DLM_HDL_VAR(HABEO_CLAVIS, LDLM_CONVERT, tgt_convert),
1329 TGT_DLM_HDL_VAR(0, LDLM_BL_CALLBACK, tgt_bl_callback),
1330 TGT_DLM_HDL_VAR(0, LDLM_CP_CALLBACK, tgt_cp_callback)
1332 EXPORT_SYMBOL(tgt_dlm_handlers);
1335 * Unified target LLOG handlers.
1337 int tgt_llog_open(struct tgt_session_info *tsi)
1343 rc = llog_origin_handle_open(tgt_ses_req(tsi));
1347 EXPORT_SYMBOL(tgt_llog_open);
1349 int tgt_llog_close(struct tgt_session_info *tsi)
1355 rc = llog_origin_handle_close(tgt_ses_req(tsi));
1359 EXPORT_SYMBOL(tgt_llog_close);
1362 int tgt_llog_destroy(struct tgt_session_info *tsi)
1368 rc = llog_origin_handle_destroy(tgt_ses_req(tsi));
1373 int tgt_llog_read_header(struct tgt_session_info *tsi)
1379 rc = llog_origin_handle_read_header(tgt_ses_req(tsi));
1383 EXPORT_SYMBOL(tgt_llog_read_header);
1385 int tgt_llog_next_block(struct tgt_session_info *tsi)
1391 rc = llog_origin_handle_next_block(tgt_ses_req(tsi));
1395 EXPORT_SYMBOL(tgt_llog_next_block);
1397 int tgt_llog_prev_block(struct tgt_session_info *tsi)
1403 rc = llog_origin_handle_prev_block(tgt_ses_req(tsi));
1407 EXPORT_SYMBOL(tgt_llog_prev_block);
1409 /* generic llog target handler */
1410 struct tgt_handler tgt_llog_handlers[] = {
1411 TGT_LLOG_HDL (0, LLOG_ORIGIN_HANDLE_CREATE, tgt_llog_open),
1412 TGT_LLOG_HDL (0, LLOG_ORIGIN_HANDLE_NEXT_BLOCK, tgt_llog_next_block),
1413 TGT_LLOG_HDL (0, LLOG_ORIGIN_HANDLE_READ_HEADER, tgt_llog_read_header),
1414 TGT_LLOG_HDL (0, LLOG_ORIGIN_HANDLE_PREV_BLOCK, tgt_llog_prev_block),
1415 TGT_LLOG_HDL (0, LLOG_ORIGIN_HANDLE_DESTROY, tgt_llog_destroy),
1416 TGT_LLOG_HDL_VAR(0, LLOG_ORIGIN_HANDLE_CLOSE, tgt_llog_close),
1418 EXPORT_SYMBOL(tgt_llog_handlers);
1421 * sec context handlers
1423 /* XXX: Implement based on mdt_sec_ctx_handle()? */
1424 static int tgt_sec_ctx_handle(struct tgt_session_info *tsi)
1429 struct tgt_handler tgt_sec_ctx_handlers[] = {
1430 TGT_SEC_HDL_VAR(0, SEC_CTX_INIT, tgt_sec_ctx_handle),
1431 TGT_SEC_HDL_VAR(0, SEC_CTX_INIT_CONT, tgt_sec_ctx_handle),
1432 TGT_SEC_HDL_VAR(0, SEC_CTX_FINI, tgt_sec_ctx_handle),
1434 EXPORT_SYMBOL(tgt_sec_ctx_handlers);
1436 int (*tgt_lfsck_in_notify)(const struct lu_env *env,
1437 struct dt_device *key,
1438 struct lfsck_request *lr,
1439 struct thandle *th) = NULL;
1441 void tgt_register_lfsck_in_notify(int (*notify)(const struct lu_env *,
1443 struct lfsck_request *,
1446 tgt_lfsck_in_notify = notify;
1448 EXPORT_SYMBOL(tgt_register_lfsck_in_notify);
1450 static int (*tgt_lfsck_query)(const struct lu_env *env,
1451 struct dt_device *key,
1452 struct lfsck_request *req,
1453 struct lfsck_reply *rep,
1454 struct lfsck_query *que) = NULL;
1456 void tgt_register_lfsck_query(int (*query)(const struct lu_env *,
1458 struct lfsck_request *,
1459 struct lfsck_reply *,
1460 struct lfsck_query *))
1462 tgt_lfsck_query = query;
1464 EXPORT_SYMBOL(tgt_register_lfsck_query);
1466 /* LFSCK request handlers */
1467 static int tgt_handle_lfsck_notify(struct tgt_session_info *tsi)
1469 const struct lu_env *env = tsi->tsi_env;
1470 struct dt_device *key = tsi->tsi_tgt->lut_bottom;
1471 struct lfsck_request *lr;
1475 lr = req_capsule_client_get(tsi->tsi_pill, &RMF_LFSCK_REQUEST);
1479 rc = tgt_lfsck_in_notify(env, key, lr, NULL);
1484 static int tgt_handle_lfsck_query(struct tgt_session_info *tsi)
1486 struct lfsck_request *request;
1487 struct lfsck_reply *reply;
1491 request = req_capsule_client_get(tsi->tsi_pill, &RMF_LFSCK_REQUEST);
1492 if (request == NULL)
1495 reply = req_capsule_server_get(tsi->tsi_pill, &RMF_LFSCK_REPLY);
1499 rc = tgt_lfsck_query(tsi->tsi_env, tsi->tsi_tgt->lut_bottom,
1500 request, reply, NULL);
1502 RETURN(rc < 0 ? rc : 0);
1505 struct tgt_handler tgt_lfsck_handlers[] = {
1506 TGT_LFSCK_HDL(HABEO_REFERO, LFSCK_NOTIFY, tgt_handle_lfsck_notify),
1507 TGT_LFSCK_HDL(HABEO_REFERO, LFSCK_QUERY, tgt_handle_lfsck_query),
1509 EXPORT_SYMBOL(tgt_lfsck_handlers);
1512 * initialize per-thread page pool (bug 5137).
1514 int tgt_io_thread_init(struct ptlrpc_thread *thread)
1516 struct tgt_thread_big_cache *tbc;
1520 LASSERT(thread != NULL);
1521 LASSERT(thread->t_data == NULL);
1523 OBD_ALLOC_LARGE(tbc, sizeof(*tbc));
1526 thread->t_data = tbc;
1529 EXPORT_SYMBOL(tgt_io_thread_init);
1532 * free per-thread pool created by tgt_thread_init().
1534 void tgt_io_thread_done(struct ptlrpc_thread *thread)
1536 struct tgt_thread_big_cache *tbc;
1540 LASSERT(thread != NULL);
1543 * be prepared to handle partially-initialized pools (because this is
1544 * called from ost_io_thread_init() for cleanup.
1546 tbc = thread->t_data;
1548 OBD_FREE_LARGE(tbc, sizeof(*tbc));
1549 thread->t_data = NULL;
1553 EXPORT_SYMBOL(tgt_io_thread_done);
1555 * Helper function for getting server side [start, start+count] DLM lock
1556 * if asked by client.
1558 int tgt_extent_lock(struct ldlm_namespace *ns, struct ldlm_res_id *res_id,
1559 __u64 start, __u64 end, struct lustre_handle *lh,
1560 int mode, __u64 *flags)
1562 union ldlm_policy_data policy;
1567 LASSERT(lh != NULL);
1568 LASSERT(ns != NULL);
1569 LASSERT(!lustre_handle_is_used(lh));
1571 policy.l_extent.gid = 0;
1572 policy.l_extent.start = start & PAGE_MASK;
1575 * If ->o_blocks is EOF it means "lock till the end of the file".
1576 * Otherwise, it's size of an extent or hole being punched (in bytes).
1578 if (end == OBD_OBJECT_EOF || end < start)
1579 policy.l_extent.end = OBD_OBJECT_EOF;
1581 policy.l_extent.end = end | ~PAGE_MASK;
1583 rc = ldlm_cli_enqueue_local(ns, res_id, LDLM_EXTENT, &policy, mode,
1584 flags, ldlm_blocking_ast,
1585 ldlm_completion_ast, ldlm_glimpse_ast,
1586 NULL, 0, LVB_T_NONE, NULL, lh);
1587 RETURN(rc == ELDLM_OK ? 0 : -EIO);
1589 EXPORT_SYMBOL(tgt_extent_lock);
1591 void tgt_extent_unlock(struct lustre_handle *lh, enum ldlm_mode mode)
1593 LASSERT(lustre_handle_is_used(lh));
1594 ldlm_lock_decref(lh, mode);
1596 EXPORT_SYMBOL(tgt_extent_unlock);
1598 int tgt_brw_lock(struct ldlm_namespace *ns, struct ldlm_res_id *res_id,
1599 struct obd_ioobj *obj, struct niobuf_remote *nb,
1600 struct lustre_handle *lh, enum ldlm_mode mode)
1603 int nrbufs = obj->ioo_bufcnt;
1608 LASSERT(mode == LCK_PR || mode == LCK_PW);
1609 LASSERT(!lustre_handle_is_used(lh));
1611 if (nrbufs == 0 || !(nb[0].rnb_flags & OBD_BRW_SRVLOCK))
1614 for (i = 1; i < nrbufs; i++)
1615 if (!(nb[i].rnb_flags & OBD_BRW_SRVLOCK))
1618 RETURN(tgt_extent_lock(ns, res_id, nb[0].rnb_offset,
1619 nb[nrbufs - 1].rnb_offset +
1620 nb[nrbufs - 1].rnb_len - 1,
1624 void tgt_brw_unlock(struct obd_ioobj *obj, struct niobuf_remote *niob,
1625 struct lustre_handle *lh, enum ldlm_mode mode)
1629 LASSERT(mode == LCK_PR || mode == LCK_PW);
1630 LASSERT((obj->ioo_bufcnt > 0 &&
1631 (niob[0].rnb_flags & OBD_BRW_SRVLOCK)) ==
1632 lustre_handle_is_used(lh));
1634 if (lustre_handle_is_used(lh))
1635 tgt_extent_unlock(lh, mode);
1639 static __u32 tgt_checksum_bulk(struct lu_target *tgt,
1640 struct ptlrpc_bulk_desc *desc, int opc,
1641 cksum_type_t cksum_type)
1643 struct cfs_crypto_hash_desc *hdesc;
1644 unsigned int bufsize;
1646 unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
1649 LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
1651 hdesc = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1652 if (IS_ERR(hdesc)) {
1653 CERROR("%s: unable to initialize checksum hash %s\n",
1654 tgt_name(tgt), cfs_crypto_hash_name(cfs_alg));
1655 return PTR_ERR(hdesc);
1658 CDEBUG(D_INFO, "Checksum for algo %s\n", cfs_crypto_hash_name(cfs_alg));
1659 for (i = 0; i < desc->bd_iov_count; i++) {
1660 /* corrupt the data before we compute the checksum, to
1661 * simulate a client->OST data error */
1662 if (i == 0 && opc == OST_WRITE &&
1663 OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_RECEIVE)) {
1664 int off = BD_GET_KIOV(desc, i).kiov_offset &
1666 int len = BD_GET_KIOV(desc, i).kiov_len;
1667 struct page *np = tgt_page_to_corrupt;
1668 char *ptr = kmap(BD_GET_KIOV(desc, i).kiov_page) + off;
1671 char *ptr2 = kmap(np) + off;
1673 memcpy(ptr2, ptr, len);
1674 memcpy(ptr2, "bad3", min(4, len));
1676 BD_GET_KIOV(desc, i).kiov_page = np;
1678 CERROR("%s: can't alloc page for corruption\n",
1682 cfs_crypto_hash_update_page(hdesc,
1683 BD_GET_KIOV(desc, i).kiov_page,
1684 BD_GET_KIOV(desc, i).kiov_offset &
1686 BD_GET_KIOV(desc, i).kiov_len);
1688 /* corrupt the data after we compute the checksum, to
1689 * simulate an OST->client data error */
1690 if (i == 0 && opc == OST_READ &&
1691 OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_SEND)) {
1692 int off = BD_GET_KIOV(desc, i).kiov_offset
1694 int len = BD_GET_KIOV(desc, i).kiov_len;
1695 struct page *np = tgt_page_to_corrupt;
1697 kmap(BD_GET_KIOV(desc, i).kiov_page) + off;
1700 char *ptr2 = kmap(np) + off;
1702 memcpy(ptr2, ptr, len);
1703 memcpy(ptr2, "bad4", min(4, len));
1705 BD_GET_KIOV(desc, i).kiov_page = np;
1707 CERROR("%s: can't alloc page for corruption\n",
1713 bufsize = sizeof(cksum);
1714 err = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
1719 int tgt_brw_read(struct tgt_session_info *tsi)
1721 struct ptlrpc_request *req = tgt_ses_req(tsi);
1722 struct ptlrpc_bulk_desc *desc = NULL;
1723 struct obd_export *exp = tsi->tsi_exp;
1724 struct niobuf_remote *remote_nb;
1725 struct niobuf_local *local_nb;
1726 struct obd_ioobj *ioo;
1727 struct ost_body *body, *repbody;
1728 struct l_wait_info lwi;
1729 struct lustre_handle lockh = { 0 };
1730 int npages, nob = 0, rc, i, no_reply = 0;
1731 struct tgt_thread_big_cache *tbc = req->rq_svc_thread->t_data;
1735 if (ptlrpc_req2svc(req)->srv_req_portal != OST_IO_PORTAL) {
1736 CERROR("%s: deny read request from %s to portal %u\n",
1737 tgt_name(tsi->tsi_tgt),
1738 obd_export_nid2str(req->rq_export),
1739 ptlrpc_req2svc(req)->srv_req_portal);
1743 req->rq_bulk_read = 1;
1745 if (OBD_FAIL_CHECK(OBD_FAIL_OST_BRW_READ_BULK))
1748 OBD_FAIL_TIMEOUT(OBD_FAIL_OST_BRW_PAUSE_BULK, cfs_fail_val > 0 ?
1749 cfs_fail_val : (obd_timeout + 1) / 4);
1751 /* Check if there is eviction in progress, and if so, wait for it to
1753 if (unlikely(atomic_read(&exp->exp_obd->obd_evict_inprogress))) {
1754 /* We do not care how long it takes */
1755 lwi = LWI_INTR(NULL, NULL);
1756 rc = l_wait_event(exp->exp_obd->obd_evict_inprogress_waitq,
1757 !atomic_read(&exp->exp_obd->obd_evict_inprogress),
1761 /* There must be big cache in current thread to process this request
1762 * if it is NULL then something went wrong and it wasn't allocated,
1763 * report -ENOMEM in that case */
1767 body = tsi->tsi_ost_body;
1768 LASSERT(body != NULL);
1770 ioo = req_capsule_client_get(tsi->tsi_pill, &RMF_OBD_IOOBJ);
1771 LASSERT(ioo != NULL); /* must exists after tgt_ost_body_unpack */
1773 remote_nb = req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE);
1774 LASSERT(remote_nb != NULL); /* must exists after tgt_ost_body_unpack */
1776 local_nb = tbc->local;
1778 rc = tgt_brw_lock(exp->exp_obd->obd_namespace, &tsi->tsi_resid, ioo,
1779 remote_nb, &lockh, LCK_PR);
1784 * If getting the lock took more time than
1785 * client was willing to wait, drop it. b=11330
1787 if (cfs_time_current_sec() > req->rq_deadline ||
1788 OBD_FAIL_CHECK(OBD_FAIL_OST_DROP_REQ)) {
1790 CERROR("Dropping timed-out read from %s because locking"
1791 "object "DOSTID" took %ld seconds (limit was %ld).\n",
1792 libcfs_id2str(req->rq_peer), POSTID(&ioo->ioo_oid),
1793 cfs_time_current_sec() - req->rq_arrival_time.tv_sec,
1794 req->rq_deadline - req->rq_arrival_time.tv_sec);
1795 GOTO(out_lock, rc = -ETIMEDOUT);
1798 repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1799 repbody->oa = body->oa;
1801 npages = PTLRPC_MAX_BRW_PAGES;
1802 rc = obd_preprw(tsi->tsi_env, OBD_BRW_READ, exp, &repbody->oa, 1,
1803 ioo, remote_nb, &npages, local_nb);
1807 desc = ptlrpc_prep_bulk_exp(req, npages, ioobj_max_brw_get(ioo),
1808 PTLRPC_BULK_PUT_SOURCE |
1809 PTLRPC_BULK_BUF_KIOV,
1811 &ptlrpc_bulk_kiov_nopin_ops);
1813 GOTO(out_commitrw, rc = -ENOMEM);
1816 for (i = 0; i < npages; i++) {
1817 int page_rc = local_nb[i].lnb_rc;
1825 if (page_rc != 0) { /* some data! */
1826 LASSERT(local_nb[i].lnb_page != NULL);
1827 desc->bd_frag_ops->add_kiov_frag
1828 (desc, local_nb[i].lnb_page,
1829 local_nb[i].lnb_page_offset,
1833 if (page_rc != local_nb[i].lnb_len) { /* short read */
1834 /* All subsequent pages should be 0 */
1835 while (++i < npages)
1836 LASSERT(local_nb[i].lnb_rc == 0);
1840 if (OBD_FAIL_CHECK(OBD_FAIL_OST_READ_SIZE) &&
1841 nob != cfs_fail_val)
1844 if (body->oa.o_valid & OBD_MD_FLCKSUM) {
1845 cksum_type_t cksum_type =
1846 cksum_type_unpack(body->oa.o_valid & OBD_MD_FLFLAGS ?
1847 body->oa.o_flags : 0);
1848 repbody->oa.o_flags = cksum_type_pack(cksum_type);
1849 repbody->oa.o_valid = OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1850 repbody->oa.o_cksum = tgt_checksum_bulk(tsi->tsi_tgt, desc,
1851 OST_READ, cksum_type);
1852 CDEBUG(D_PAGE, "checksum at read origin: %x\n",
1853 repbody->oa.o_cksum);
1855 repbody->oa.o_valid = 0;
1857 /* We're finishing using body->oa as an input variable */
1859 /* Check if client was evicted while we were doing i/o before touching
1861 if (likely(rc == 0 &&
1862 !CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2) &&
1863 !CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_BULK))) {
1864 rc = target_bulk_io(exp, desc, &lwi);
1869 /* Must commit after prep above in all cases */
1870 rc = obd_commitrw(tsi->tsi_env, OBD_BRW_READ, exp, &repbody->oa, 1, ioo,
1871 remote_nb, npages, local_nb, rc);
1873 tgt_brw_unlock(ioo, remote_nb, &lockh, LCK_PR);
1875 if (desc && !CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2))
1876 ptlrpc_free_bulk(desc);
1881 ptlrpc_lprocfs_brw(req, nob);
1882 } else if (no_reply) {
1883 req->rq_no_reply = 1;
1884 /* reply out callback would free */
1885 ptlrpc_req_drop_rs(req);
1886 LCONSOLE_WARN("%s: Bulk IO read error with %s (at %s), "
1887 "client will retry: rc %d\n",
1888 exp->exp_obd->obd_name,
1889 obd_uuid2str(&exp->exp_client_uuid),
1890 obd_export_nid2str(exp), rc);
1892 /* send a bulk after reply to simulate a network delay or reordering
1894 if (unlikely(CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2))) {
1895 wait_queue_head_t waitq;
1896 struct l_wait_info lwi1;
1898 CDEBUG(D_INFO, "reorder BULK\n");
1899 init_waitqueue_head(&waitq);
1901 lwi1 = LWI_TIMEOUT_INTR(cfs_time_seconds(3), NULL, NULL, NULL);
1902 l_wait_event(waitq, 0, &lwi1);
1903 target_bulk_io(exp, desc, &lwi);
1904 ptlrpc_free_bulk(desc);
1909 EXPORT_SYMBOL(tgt_brw_read);
1911 static void tgt_warn_on_cksum(struct ptlrpc_request *req,
1912 struct ptlrpc_bulk_desc *desc,
1913 struct niobuf_local *local_nb, int npages,
1914 u32 client_cksum, u32 server_cksum,
1917 struct obd_export *exp = req->rq_export;
1918 struct ost_body *body;
1922 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
1923 LASSERT(body != NULL);
1925 if (req->rq_peer.nid != desc->bd_sender) {
1927 router = libcfs_nid2str(desc->bd_sender);
1931 CDEBUG_LIMIT(D_INFO, "client csum %x, server csum %x\n",
1932 client_cksum, server_cksum);
1936 LCONSOLE_ERROR_MSG(0x168, "BAD WRITE CHECKSUM: %s from %s%s%s inode "
1937 DFID" object "DOSTID" extent ["LPU64"-"LPU64
1938 "]: client csum %x, server csum %x\n",
1939 exp->exp_obd->obd_name, libcfs_id2str(req->rq_peer),
1941 body->oa.o_valid & OBD_MD_FLFID ?
1942 body->oa.o_parent_seq : (__u64)0,
1943 body->oa.o_valid & OBD_MD_FLFID ?
1944 body->oa.o_parent_oid : 0,
1945 body->oa.o_valid & OBD_MD_FLFID ?
1946 body->oa.o_parent_ver : 0,
1947 POSTID(&body->oa.o_oi),
1948 local_nb[0].lnb_file_offset,
1949 local_nb[npages-1].lnb_file_offset +
1950 local_nb[npages - 1].lnb_len - 1,
1951 client_cksum, server_cksum);
1954 int tgt_brw_write(struct tgt_session_info *tsi)
1956 struct ptlrpc_request *req = tgt_ses_req(tsi);
1957 struct ptlrpc_bulk_desc *desc = NULL;
1958 struct obd_export *exp = req->rq_export;
1959 struct niobuf_remote *remote_nb;
1960 struct niobuf_local *local_nb;
1961 struct obd_ioobj *ioo;
1962 struct ost_body *body, *repbody;
1963 struct l_wait_info lwi;
1964 struct lustre_handle lockh = {0};
1966 int objcount, niocount, npages;
1968 cksum_type_t cksum_type = OBD_CKSUM_CRC32;
1969 bool no_reply = false, mmap;
1970 struct tgt_thread_big_cache *tbc = req->rq_svc_thread->t_data;
1974 if (ptlrpc_req2svc(req)->srv_req_portal != OST_IO_PORTAL) {
1975 CERROR("%s: deny write request from %s to portal %u\n",
1976 tgt_name(tsi->tsi_tgt),
1977 obd_export_nid2str(req->rq_export),
1978 ptlrpc_req2svc(req)->srv_req_portal);
1979 RETURN(err_serious(-EPROTO));
1982 if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOSPC))
1983 RETURN(err_serious(-ENOSPC));
1984 if (OBD_FAIL_TIMEOUT(OBD_FAIL_OST_EROFS, 1))
1985 RETURN(err_serious(-EROFS));
1987 req->rq_bulk_write = 1;
1989 if (OBD_FAIL_CHECK(OBD_FAIL_OST_BRW_WRITE_BULK))
1990 RETURN(err_serious(-EIO));
1991 if (OBD_FAIL_CHECK(OBD_FAIL_OST_BRW_WRITE_BULK2))
1992 RETURN(err_serious(-EFAULT));
1994 /* pause before transaction has been started */
1995 CFS_FAIL_TIMEOUT(OBD_FAIL_OST_BRW_PAUSE_BULK, cfs_fail_val > 0 ?
1996 cfs_fail_val : (obd_timeout + 1) / 4);
1998 /* There must be big cache in current thread to process this request
1999 * if it is NULL then something went wrong and it wasn't allocated,
2000 * report -ENOMEM in that case */
2004 body = tsi->tsi_ost_body;
2005 LASSERT(body != NULL);
2007 ioo = req_capsule_client_get(&req->rq_pill, &RMF_OBD_IOOBJ);
2008 LASSERT(ioo != NULL); /* must exists after tgt_ost_body_unpack */
2010 objcount = req_capsule_get_size(&req->rq_pill, &RMF_OBD_IOOBJ,
2011 RCL_CLIENT) / sizeof(*ioo);
2013 for (niocount = i = 0; i < objcount; i++)
2014 niocount += ioo[i].ioo_bufcnt;
2016 remote_nb = req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE);
2017 LASSERT(remote_nb != NULL); /* must exists after tgt_ost_body_unpack */
2018 if (niocount != req_capsule_get_size(&req->rq_pill,
2019 &RMF_NIOBUF_REMOTE, RCL_CLIENT) /
2021 RETURN(err_serious(-EPROTO));
2023 if ((remote_nb[0].rnb_flags & OBD_BRW_MEMALLOC) &&
2024 (exp->exp_connection->c_peer.nid == exp->exp_connection->c_self))
2025 memory_pressure_set();
2027 req_capsule_set_size(&req->rq_pill, &RMF_RCS, RCL_SERVER,
2028 niocount * sizeof(*rcs));
2029 rc = req_capsule_server_pack(&req->rq_pill);
2031 GOTO(out, rc = err_serious(rc));
2033 CFS_FAIL_TIMEOUT(OBD_FAIL_OST_BRW_PAUSE_PACK, cfs_fail_val);
2034 rcs = req_capsule_server_get(&req->rq_pill, &RMF_RCS);
2036 local_nb = tbc->local;
2038 rc = tgt_brw_lock(exp->exp_obd->obd_namespace, &tsi->tsi_resid, ioo,
2039 remote_nb, &lockh, LCK_PW);
2044 * If getting the lock took more time than
2045 * client was willing to wait, drop it. b=11330
2047 if (cfs_time_current_sec() > req->rq_deadline ||
2048 OBD_FAIL_CHECK(OBD_FAIL_OST_DROP_REQ)) {
2050 CERROR("%s: Dropping timed-out write from %s because locking "
2051 "object "DOSTID" took %ld seconds (limit was %ld).\n",
2052 tgt_name(tsi->tsi_tgt), libcfs_id2str(req->rq_peer),
2053 POSTID(&ioo->ioo_oid),
2054 cfs_time_current_sec() - req->rq_arrival_time.tv_sec,
2055 req->rq_deadline - req->rq_arrival_time.tv_sec);
2056 GOTO(out_lock, rc = -ETIMEDOUT);
2059 /* Because we already sync grant info with client when reconnect,
2060 * grant info will be cleared for resent req, then fed_grant and
2061 * total_grant will not be modified in following preprw_write */
2062 if (lustre_msg_get_flags(req->rq_reqmsg) & (MSG_RESENT | MSG_REPLAY)) {
2063 DEBUG_REQ(D_CACHE, req, "clear resent/replay req grant info");
2064 body->oa.o_valid &= ~OBD_MD_FLGRANT;
2067 repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
2068 if (repbody == NULL)
2069 GOTO(out_lock, rc = -ENOMEM);
2070 repbody->oa = body->oa;
2072 npages = PTLRPC_MAX_BRW_PAGES;
2073 rc = obd_preprw(tsi->tsi_env, OBD_BRW_WRITE, exp, &repbody->oa,
2074 objcount, ioo, remote_nb, &npages, local_nb);
2078 desc = ptlrpc_prep_bulk_exp(req, npages, ioobj_max_brw_get(ioo),
2079 PTLRPC_BULK_GET_SINK | PTLRPC_BULK_BUF_KIOV,
2081 &ptlrpc_bulk_kiov_nopin_ops);
2083 GOTO(skip_transfer, rc = -ENOMEM);
2085 /* NB Having prepped, we must commit... */
2086 for (i = 0; i < npages; i++)
2087 desc->bd_frag_ops->add_kiov_frag(desc,
2088 local_nb[i].lnb_page,
2089 local_nb[i].lnb_page_offset,
2090 local_nb[i].lnb_len);
2092 rc = sptlrpc_svc_prep_bulk(req, desc);
2094 GOTO(skip_transfer, rc);
2096 rc = target_bulk_io(exp, desc, &lwi);
2100 if (body->oa.o_valid & OBD_MD_FLCKSUM && rc == 0) {
2101 static int cksum_counter;
2103 if (body->oa.o_valid & OBD_MD_FLFLAGS)
2104 cksum_type = cksum_type_unpack(body->oa.o_flags);
2106 repbody->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
2107 repbody->oa.o_flags &= ~OBD_FL_CKSUM_ALL;
2108 repbody->oa.o_flags |= cksum_type_pack(cksum_type);
2109 repbody->oa.o_cksum = tgt_checksum_bulk(tsi->tsi_tgt, desc,
2110 OST_WRITE, cksum_type);
2113 if (unlikely(body->oa.o_cksum != repbody->oa.o_cksum)) {
2114 mmap = (body->oa.o_valid & OBD_MD_FLFLAGS &&
2115 body->oa.o_flags & OBD_FL_MMAP);
2117 tgt_warn_on_cksum(req, desc, local_nb, npages,
2119 repbody->oa.o_cksum, mmap);
2121 } else if ((cksum_counter & (-cksum_counter)) ==
2123 CDEBUG(D_INFO, "Checksum %u from %s OK: %x\n",
2124 cksum_counter, libcfs_id2str(req->rq_peer),
2125 repbody->oa.o_cksum);
2129 /* Must commit after prep above in all cases */
2130 rc = obd_commitrw(tsi->tsi_env, OBD_BRW_WRITE, exp, &repbody->oa,
2131 objcount, ioo, remote_nb, npages, local_nb, rc);
2132 if (rc == -ENOTCONN)
2133 /* quota acquire process has been given up because
2134 * either the client has been evicted or the client
2135 * has timed out the request already */
2139 * Disable sending mtime back to the client. If the client locked the
2140 * whole object, then it has already updated the mtime on its side,
2141 * otherwise it will have to glimpse anyway (see bug 21489, comment 32)
2143 repbody->oa.o_valid &= ~(OBD_MD_FLMTIME | OBD_MD_FLATIME);
2148 /* set per-requested niobuf return codes */
2149 for (i = j = 0; i < niocount; i++) {
2150 int len = remote_nb[i].rnb_len;
2155 LASSERT(j < npages);
2156 if (local_nb[j].lnb_rc < 0)
2157 rcs[i] = local_nb[j].lnb_rc;
2158 len -= local_nb[j].lnb_len;
2163 LASSERT(j == npages);
2164 ptlrpc_lprocfs_brw(req, nob);
2167 tgt_brw_unlock(ioo, remote_nb, &lockh, LCK_PW);
2169 ptlrpc_free_bulk(desc);
2172 req->rq_no_reply = 1;
2173 /* reply out callback would free */
2174 ptlrpc_req_drop_rs(req);
2175 LCONSOLE_WARN("%s: Bulk IO write error with %s (at %s), "
2176 "client will retry: rc %d\n",
2177 exp->exp_obd->obd_name,
2178 obd_uuid2str(&exp->exp_client_uuid),
2179 obd_export_nid2str(exp), rc);
2181 memory_pressure_clr();
2184 EXPORT_SYMBOL(tgt_brw_write);
2186 /* Check if request can be reconstructed from saved reply data
2187 * A copy of the reply data is returned in @trd if the pointer is not NULL
2189 bool req_can_reconstruct(struct ptlrpc_request *req,
2190 struct tg_reply_data *trd)
2192 struct tg_export_data *ted = &req->rq_export->exp_target_data;
2193 struct lsd_client_data *lcd = ted->ted_lcd;
2196 if (tgt_is_multimodrpcs_client(req->rq_export))
2197 return tgt_lookup_reply(req, trd);
2199 mutex_lock(&ted->ted_lcd_lock);
2200 found = req->rq_xid == lcd->lcd_last_xid ||
2201 req->rq_xid == lcd->lcd_last_close_xid;
2203 if (found && trd != NULL) {
2204 if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_CLOSE) {
2205 trd->trd_reply.lrd_xid = lcd->lcd_last_close_xid;
2206 trd->trd_reply.lrd_transno =
2207 lcd->lcd_last_close_transno;
2208 trd->trd_reply.lrd_result = lcd->lcd_last_close_result;
2210 trd->trd_reply.lrd_xid = lcd->lcd_last_xid;
2211 trd->trd_reply.lrd_transno = lcd->lcd_last_transno;
2212 trd->trd_reply.lrd_result = lcd->lcd_last_result;
2213 trd->trd_reply.lrd_data = lcd->lcd_last_data;
2214 trd->trd_pre_versions[0] = lcd->lcd_pre_versions[0];
2215 trd->trd_pre_versions[1] = lcd->lcd_pre_versions[1];
2216 trd->trd_pre_versions[2] = lcd->lcd_pre_versions[2];
2217 trd->trd_pre_versions[3] = lcd->lcd_pre_versions[3];
2220 mutex_unlock(&ted->ted_lcd_lock);
2224 EXPORT_SYMBOL(req_can_reconstruct);