4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 #define DEBUG_SUBSYSTEM S_OSC
39 #include <libcfs/libcfs.h>
42 # include <liblustre.h>
45 #include <lustre_dlm.h>
46 #include <lustre_net.h>
47 #include <lustre/lustre_user.h>
48 #include <obd_cksum.h>
54 #include <lustre_ha.h>
55 #include <lprocfs_status.h>
56 #include <lustre_ioctl.h>
57 #include <lustre_log.h>
58 #include <lustre_debug.h>
59 #include <lustre_param.h>
60 #include <lustre_fid.h>
61 #include "osc_internal.h"
62 #include "osc_cl_internal.h"
64 struct osc_brw_async_args {
68 obd_count aa_page_count;
70 struct brw_page **aa_ppga;
71 struct client_obd *aa_cli;
72 struct list_head aa_oaps;
73 struct list_head aa_exts;
74 struct obd_capa *aa_ocapa;
75 struct cl_req *aa_clerq;
78 #define osc_grant_args osc_brw_async_args
80 struct osc_async_args {
81 struct obd_info *aa_oi;
84 struct osc_setattr_args {
86 obd_enqueue_update_f sa_upcall;
90 struct osc_fsync_args {
91 struct obd_info *fa_oi;
92 obd_enqueue_update_f fa_upcall;
96 struct osc_enqueue_args {
97 struct obd_export *oa_exp;
99 obd_enqueue_update_f oa_upcall;
101 struct ost_lvb *oa_lvb;
102 struct lustre_handle *oa_lockh;
103 struct ldlm_enqueue_info *oa_ei;
104 unsigned int oa_agl:1;
107 static void osc_release_ppga(struct brw_page **ppga, obd_count count);
108 static int brw_interpret(const struct lu_env *env, struct ptlrpc_request *req,
111 /* Unpack OSC object metadata from disk storage (LE byte order). */
112 static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
113 struct lov_mds_md *lmm, int lmm_bytes)
116 struct obd_import *imp = class_exp2cliimp(exp);
120 if (lmm_bytes < sizeof(*lmm)) {
121 CERROR("%s: lov_mds_md too small: %d, need %d\n",
122 exp->exp_obd->obd_name, lmm_bytes,
126 /* XXX LOV_MAGIC etc check? */
128 if (unlikely(ostid_id(&lmm->lmm_oi) == 0)) {
129 CERROR("%s: zero lmm_object_id: rc = %d\n",
130 exp->exp_obd->obd_name, -EINVAL);
135 lsm_size = lov_stripe_md_size(1);
139 if (*lsmp != NULL && lmm == NULL) {
140 OBD_FREE((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
141 OBD_FREE(*lsmp, lsm_size);
147 OBD_ALLOC(*lsmp, lsm_size);
148 if (unlikely(*lsmp == NULL))
150 OBD_ALLOC((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
151 if (unlikely((*lsmp)->lsm_oinfo[0] == NULL)) {
152 OBD_FREE(*lsmp, lsm_size);
155 loi_init((*lsmp)->lsm_oinfo[0]);
156 } else if (unlikely(ostid_id(&(*lsmp)->lsm_oi) == 0)) {
161 /* XXX zero *lsmp? */
162 ostid_le_to_cpu(&lmm->lmm_oi, &(*lsmp)->lsm_oi);
165 (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES))
166 (*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes;
168 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
173 static inline void osc_pack_capa(struct ptlrpc_request *req,
174 struct ost_body *body, void *capa)
176 struct obd_capa *oc = (struct obd_capa *)capa;
177 struct lustre_capa *c;
182 c = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1);
185 body->oa.o_valid |= OBD_MD_FLOSSCAPA;
186 DEBUG_CAPA(D_SEC, c, "pack");
189 static inline void osc_pack_req_body(struct ptlrpc_request *req,
190 struct obd_info *oinfo)
192 struct ost_body *body;
194 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
197 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
199 osc_pack_capa(req, body, oinfo->oi_capa);
202 static inline void osc_set_capa_size(struct ptlrpc_request *req,
203 const struct req_msg_field *field,
207 req_capsule_set_size(&req->rq_pill, field, RCL_CLIENT, 0);
209 /* it is already calculated as sizeof struct obd_capa */
213 static int osc_getattr_interpret(const struct lu_env *env,
214 struct ptlrpc_request *req,
215 struct osc_async_args *aa, int rc)
217 struct ost_body *body;
223 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
225 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
226 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
227 aa->aa_oi->oi_oa, &body->oa);
229 /* This should really be sent by the OST */
230 aa->aa_oi->oi_oa->o_blksize = DT_MAX_BRW_SIZE;
231 aa->aa_oi->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
233 CDEBUG(D_INFO, "can't unpack ost_body\n");
235 aa->aa_oi->oi_oa->o_valid = 0;
238 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
242 static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
243 struct ptlrpc_request_set *set)
245 struct ptlrpc_request *req;
246 struct osc_async_args *aa;
250 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
254 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
255 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
257 ptlrpc_request_free(req);
261 osc_pack_req_body(req, oinfo);
263 ptlrpc_request_set_replen(req);
264 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret;
266 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
267 aa = ptlrpc_req_async_args(req);
270 ptlrpc_set_add_req(set, req);
274 static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
275 struct obd_info *oinfo)
277 struct ptlrpc_request *req;
278 struct ost_body *body;
282 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
286 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
287 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
289 ptlrpc_request_free(req);
293 osc_pack_req_body(req, oinfo);
295 ptlrpc_request_set_replen(req);
297 rc = ptlrpc_queue_wait(req);
301 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
303 GOTO(out, rc = -EPROTO);
305 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
306 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa,
309 oinfo->oi_oa->o_blksize = cli_brw_size(exp->exp_obd);
310 oinfo->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
314 ptlrpc_req_finished(req);
318 static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
319 struct obd_info *oinfo, struct obd_trans_info *oti)
321 struct ptlrpc_request *req;
322 struct ost_body *body;
326 LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP);
328 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
332 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
333 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
335 ptlrpc_request_free(req);
339 osc_pack_req_body(req, oinfo);
341 ptlrpc_request_set_replen(req);
343 rc = ptlrpc_queue_wait(req);
347 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
349 GOTO(out, rc = -EPROTO);
351 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa,
356 ptlrpc_req_finished(req);
360 static int osc_setattr_interpret(const struct lu_env *env,
361 struct ptlrpc_request *req,
362 struct osc_setattr_args *sa, int rc)
364 struct ost_body *body;
370 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
372 GOTO(out, rc = -EPROTO);
374 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa,
377 rc = sa->sa_upcall(sa->sa_cookie, rc);
381 int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
382 struct obd_trans_info *oti,
383 obd_enqueue_update_f upcall, void *cookie,
384 struct ptlrpc_request_set *rqset)
386 struct ptlrpc_request *req;
387 struct osc_setattr_args *sa;
391 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
395 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
396 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
398 ptlrpc_request_free(req);
402 if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
403 oinfo->oi_oa->o_lcookie = *oti->oti_logcookies;
405 osc_pack_req_body(req, oinfo);
407 ptlrpc_request_set_replen(req);
409 /* do mds to ost setattr asynchronously */
411 /* Do not wait for response. */
412 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
414 req->rq_interpret_reply =
415 (ptlrpc_interpterer_t)osc_setattr_interpret;
417 CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args));
418 sa = ptlrpc_req_async_args(req);
419 sa->sa_oa = oinfo->oi_oa;
420 sa->sa_upcall = upcall;
421 sa->sa_cookie = cookie;
423 if (rqset == PTLRPCD_SET)
424 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
426 ptlrpc_set_add_req(rqset, req);
432 static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
433 struct obd_trans_info *oti,
434 struct ptlrpc_request_set *rqset)
436 return osc_setattr_async_base(exp, oinfo, oti,
437 oinfo->oi_cb_up, oinfo, rqset);
440 int osc_real_create(struct obd_export *exp, struct obdo *oa,
441 struct lov_stripe_md **ea, struct obd_trans_info *oti)
443 struct ptlrpc_request *req;
444 struct ost_body *body;
445 struct lov_stripe_md *lsm;
454 rc = obd_alloc_memmd(exp, &lsm);
459 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
461 GOTO(out, rc = -ENOMEM);
463 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
465 ptlrpc_request_free(req);
469 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
472 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
474 ptlrpc_request_set_replen(req);
476 if ((oa->o_valid & OBD_MD_FLFLAGS) &&
477 oa->o_flags == OBD_FL_DELORPHAN) {
479 "delorphan from OST integration");
480 /* Don't resend the delorphan req */
481 req->rq_no_resend = req->rq_no_delay = 1;
484 rc = ptlrpc_queue_wait(req);
488 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
490 GOTO(out_req, rc = -EPROTO);
492 CDEBUG(D_INFO, "oa flags %x\n", oa->o_flags);
493 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
495 oa->o_blksize = cli_brw_size(exp->exp_obd);
496 oa->o_valid |= OBD_MD_FLBLKSZ;
498 /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
499 * have valid lsm_oinfo data structs, so don't go touching that.
500 * This needs to be fixed in a big way.
502 lsm->lsm_oi = oa->o_oi;
506 if (oa->o_valid & OBD_MD_FLCOOKIE) {
507 if (oti->oti_logcookies == NULL)
508 oti->oti_logcookies = &oti->oti_onecookie;
510 *oti->oti_logcookies = oa->o_lcookie;
514 CDEBUG(D_HA, "transno: "LPD64"\n",
515 lustre_msg_get_transno(req->rq_repmsg));
517 ptlrpc_req_finished(req);
520 obd_free_memmd(exp, &lsm);
524 int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
525 obd_enqueue_update_f upcall, void *cookie,
526 struct ptlrpc_request_set *rqset)
528 struct ptlrpc_request *req;
529 struct osc_setattr_args *sa;
530 struct ost_body *body;
534 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH);
538 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
539 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
541 ptlrpc_request_free(req);
544 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
545 ptlrpc_at_set_req_timeout(req);
547 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
549 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
551 osc_pack_capa(req, body, oinfo->oi_capa);
553 ptlrpc_request_set_replen(req);
555 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
556 CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args));
557 sa = ptlrpc_req_async_args(req);
558 sa->sa_oa = oinfo->oi_oa;
559 sa->sa_upcall = upcall;
560 sa->sa_cookie = cookie;
561 if (rqset == PTLRPCD_SET)
562 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
564 ptlrpc_set_add_req(rqset, req);
569 static int osc_sync_interpret(const struct lu_env *env,
570 struct ptlrpc_request *req,
573 struct osc_fsync_args *fa = arg;
574 struct ost_body *body;
580 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
582 CERROR ("can't unpack ost_body\n");
583 GOTO(out, rc = -EPROTO);
586 *fa->fa_oi->oi_oa = body->oa;
588 rc = fa->fa_upcall(fa->fa_cookie, rc);
592 int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
593 obd_enqueue_update_f upcall, void *cookie,
594 struct ptlrpc_request_set *rqset)
596 struct ptlrpc_request *req;
597 struct ost_body *body;
598 struct osc_fsync_args *fa;
602 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
606 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
607 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
609 ptlrpc_request_free(req);
613 /* overload the size and blocks fields in the oa with start/end */
614 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
616 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
618 osc_pack_capa(req, body, oinfo->oi_capa);
620 ptlrpc_request_set_replen(req);
621 req->rq_interpret_reply = osc_sync_interpret;
623 CLASSERT(sizeof(*fa) <= sizeof(req->rq_async_args));
624 fa = ptlrpc_req_async_args(req);
626 fa->fa_upcall = upcall;
627 fa->fa_cookie = cookie;
629 if (rqset == PTLRPCD_SET)
630 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
632 ptlrpc_set_add_req(rqset, req);
637 /* Find and cancel locally locks matched by @mode in the resource found by
638 * @objid. Found locks are added into @cancel list. Returns the amount of
639 * locks added to @cancels list. */
640 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
641 struct list_head *cancels,
642 ldlm_mode_t mode, __u64 lock_flags)
644 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
645 struct ldlm_res_id res_id;
646 struct ldlm_resource *res;
650 /* Return, i.e. cancel nothing, only if ELC is supported (flag in
651 * export) but disabled through procfs (flag in NS).
653 * This distinguishes from a case when ELC is not supported originally,
654 * when we still want to cancel locks in advance and just cancel them
655 * locally, without sending any RPC. */
656 if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
659 ostid_build_res_name(&oa->o_oi, &res_id);
660 res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
664 LDLM_RESOURCE_ADDREF(res);
665 count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
666 lock_flags, 0, NULL);
667 LDLM_RESOURCE_DELREF(res);
668 ldlm_resource_putref(res);
672 static int osc_destroy_interpret(const struct lu_env *env,
673 struct ptlrpc_request *req, void *data,
676 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
678 atomic_dec(&cli->cl_destroy_in_flight);
679 wake_up(&cli->cl_destroy_waitq);
683 static int osc_can_send_destroy(struct client_obd *cli)
685 if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
686 cli->cl_max_rpcs_in_flight) {
687 /* The destroy request can be sent */
690 if (atomic_dec_return(&cli->cl_destroy_in_flight) <
691 cli->cl_max_rpcs_in_flight) {
693 * The counter has been modified between the two atomic
696 wake_up(&cli->cl_destroy_waitq);
701 int osc_create(const struct lu_env *env, struct obd_export *exp,
702 struct obdo *oa, struct lov_stripe_md **ea,
703 struct obd_trans_info *oti)
710 LASSERT(oa->o_valid & OBD_MD_FLGROUP);
712 if ((oa->o_valid & OBD_MD_FLFLAGS) &&
713 oa->o_flags == OBD_FL_RECREATE_OBJS) {
714 RETURN(osc_real_create(exp, oa, ea, oti));
717 if (!fid_seq_is_mdt(ostid_seq(&oa->o_oi)))
718 RETURN(osc_real_create(exp, oa, ea, oti));
720 /* we should not get here anymore */
726 /* Destroy requests can be async always on the client, and we don't even really
727 * care about the return code since the client cannot do anything at all about
729 * When the MDS is unlinking a filename, it saves the file objects into a
730 * recovery llog, and these object records are cancelled when the OST reports
731 * they were destroyed and sync'd to disk (i.e. transaction committed).
732 * If the client dies, or the OST is down when the object should be destroyed,
733 * the records are not cancelled, and when the OST reconnects to the MDS next,
734 * it will retrieve the llog unlink logs and then sends the log cancellation
735 * cookies to the MDS after committing destroy transactions. */
736 static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
737 struct obdo *oa, struct lov_stripe_md *ea,
738 struct obd_trans_info *oti, struct obd_export *md_export,
741 struct client_obd *cli = &exp->exp_obd->u.cli;
742 struct ptlrpc_request *req;
743 struct ost_body *body;
744 struct list_head cancels = LIST_HEAD_INIT(cancels);
749 CDEBUG(D_INFO, "oa NULL\n");
753 count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
754 LDLM_FL_DISCARD_DATA);
756 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
758 ldlm_lock_list_put(&cancels, l_bl_ast, count);
762 osc_set_capa_size(req, &RMF_CAPA1, (struct obd_capa *)capa);
763 rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
766 ptlrpc_request_free(req);
770 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
771 ptlrpc_at_set_req_timeout(req);
773 if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE)
774 oa->o_lcookie = *oti->oti_logcookies;
775 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
777 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
779 osc_pack_capa(req, body, (struct obd_capa *)capa);
780 ptlrpc_request_set_replen(req);
782 /* If osc_destory is for destroying the unlink orphan,
783 * sent from MDT to OST, which should not be blocked here,
784 * because the process might be triggered by ptlrpcd, and
785 * it is not good to block ptlrpcd thread (b=16006)*/
786 if (!(oa->o_flags & OBD_FL_DELORPHAN)) {
787 req->rq_interpret_reply = osc_destroy_interpret;
788 if (!osc_can_send_destroy(cli)) {
789 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP,
793 * Wait until the number of on-going destroy RPCs drops
794 * under max_rpc_in_flight
796 l_wait_event_exclusive(cli->cl_destroy_waitq,
797 osc_can_send_destroy(cli), &lwi);
801 /* Do not wait for response */
802 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
806 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
809 obd_flag bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
811 LASSERT(!(oa->o_valid & bits));
814 client_obd_list_lock(&cli->cl_loi_list_lock);
815 oa->o_dirty = cli->cl_dirty_pages << PAGE_CACHE_SHIFT;
816 if (unlikely(cli->cl_dirty_pages - cli->cl_dirty_transit >
817 cli->cl_dirty_max_pages)) {
818 CERROR("dirty %lu - %lu > dirty_max %lu\n",
819 cli->cl_dirty_pages, cli->cl_dirty_transit,
820 cli->cl_dirty_max_pages);
822 } else if (unlikely(atomic_read(&obd_dirty_pages) -
823 atomic_read(&obd_dirty_transit_pages) >
824 (long)(obd_max_dirty_pages + 1))) {
825 /* The atomic_read() allowing the atomic_inc() are
826 * not covered by a lock thus they may safely race and trip
827 * this CERROR() unless we add in a small fudge factor (+1). */
828 CERROR("%s: dirty %d - %d > system dirty_max %d\n",
829 cli->cl_import->imp_obd->obd_name,
830 atomic_read(&obd_dirty_pages),
831 atomic_read(&obd_dirty_transit_pages),
832 obd_max_dirty_pages);
834 } else if (unlikely(cli->cl_dirty_max_pages - cli->cl_dirty_pages >
836 CERROR("dirty %lu - dirty_max %lu too big???\n",
837 cli->cl_dirty_pages, cli->cl_dirty_max_pages);
840 long max_in_flight = (cli->cl_max_pages_per_rpc <<
842 (cli->cl_max_rpcs_in_flight + 1);
843 oa->o_undirty = max(cli->cl_dirty_max_pages << PAGE_CACHE_SHIFT,
846 oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
847 oa->o_dropped = cli->cl_lost_grant;
848 cli->cl_lost_grant = 0;
849 client_obd_list_unlock(&cli->cl_loi_list_lock);
850 CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n",
851 oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
855 void osc_update_next_shrink(struct client_obd *cli)
857 cli->cl_next_shrink_grant =
858 cfs_time_shift(cli->cl_grant_shrink_interval);
859 CDEBUG(D_CACHE, "next time %ld to shrink grant \n",
860 cli->cl_next_shrink_grant);
863 static void __osc_update_grant(struct client_obd *cli, obd_size grant)
865 client_obd_list_lock(&cli->cl_loi_list_lock);
866 cli->cl_avail_grant += grant;
867 client_obd_list_unlock(&cli->cl_loi_list_lock);
870 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
872 if (body->oa.o_valid & OBD_MD_FLGRANT) {
873 CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant);
874 __osc_update_grant(cli, body->oa.o_grant);
878 static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
879 obd_count keylen, void *key, obd_count vallen,
880 void *val, struct ptlrpc_request_set *set);
882 static int osc_shrink_grant_interpret(const struct lu_env *env,
883 struct ptlrpc_request *req,
886 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
887 struct obdo *oa = ((struct osc_grant_args *)aa)->aa_oa;
888 struct ost_body *body;
891 __osc_update_grant(cli, oa->o_grant);
895 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
897 osc_update_grant(cli, body);
903 static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
905 client_obd_list_lock(&cli->cl_loi_list_lock);
906 oa->o_grant = cli->cl_avail_grant / 4;
907 cli->cl_avail_grant -= oa->o_grant;
908 client_obd_list_unlock(&cli->cl_loi_list_lock);
909 if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
910 oa->o_valid |= OBD_MD_FLFLAGS;
913 oa->o_flags |= OBD_FL_SHRINK_GRANT;
914 osc_update_next_shrink(cli);
917 /* Shrink the current grant, either from some large amount to enough for a
918 * full set of in-flight RPCs, or if we have already shrunk to that limit
919 * then to enough for a single RPC. This avoids keeping more grant than
920 * needed, and avoids shrinking the grant piecemeal. */
921 static int osc_shrink_grant(struct client_obd *cli)
923 __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
924 (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT);
926 client_obd_list_lock(&cli->cl_loi_list_lock);
927 if (cli->cl_avail_grant <= target_bytes)
928 target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
929 client_obd_list_unlock(&cli->cl_loi_list_lock);
931 return osc_shrink_grant_to_target(cli, target_bytes);
934 int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
937 struct ost_body *body;
940 client_obd_list_lock(&cli->cl_loi_list_lock);
941 /* Don't shrink if we are already above or below the desired limit
942 * We don't want to shrink below a single RPC, as that will negatively
943 * impact block allocation and long-term performance. */
944 if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT)
945 target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
947 if (target_bytes >= cli->cl_avail_grant) {
948 client_obd_list_unlock(&cli->cl_loi_list_lock);
951 client_obd_list_unlock(&cli->cl_loi_list_lock);
957 osc_announce_cached(cli, &body->oa, 0);
959 client_obd_list_lock(&cli->cl_loi_list_lock);
960 body->oa.o_grant = cli->cl_avail_grant - target_bytes;
961 cli->cl_avail_grant = target_bytes;
962 client_obd_list_unlock(&cli->cl_loi_list_lock);
963 if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
964 body->oa.o_valid |= OBD_MD_FLFLAGS;
965 body->oa.o_flags = 0;
967 body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
968 osc_update_next_shrink(cli);
970 rc = osc_set_info_async(NULL, cli->cl_import->imp_obd->obd_self_export,
971 sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
972 sizeof(*body), body, NULL);
974 __osc_update_grant(cli, body->oa.o_grant);
979 static int osc_should_shrink_grant(struct client_obd *client)
981 cfs_time_t time = cfs_time_current();
982 cfs_time_t next_shrink = client->cl_next_shrink_grant;
984 if ((client->cl_import->imp_connect_data.ocd_connect_flags &
985 OBD_CONNECT_GRANT_SHRINK) == 0)
988 if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) {
989 /* Get the current RPC size directly, instead of going via:
990 * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
991 * Keep comment here so that it can be found by searching. */
992 int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
994 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
995 client->cl_avail_grant > brw_size)
998 osc_update_next_shrink(client);
1003 static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data)
1005 struct client_obd *client;
1007 list_for_each_entry(client, &item->ti_obd_list, cl_grant_shrink_list) {
1008 if (osc_should_shrink_grant(client))
1009 osc_shrink_grant(client);
1014 static int osc_add_shrink_grant(struct client_obd *client)
1018 rc = ptlrpc_add_timeout_client(client->cl_grant_shrink_interval,
1020 osc_grant_shrink_grant_cb, NULL,
1021 &client->cl_grant_shrink_list);
1023 CERROR("add grant client %s error %d\n",
1024 client->cl_import->imp_obd->obd_name, rc);
1027 CDEBUG(D_CACHE, "add grant client %s \n",
1028 client->cl_import->imp_obd->obd_name);
1029 osc_update_next_shrink(client);
1033 static int osc_del_shrink_grant(struct client_obd *client)
1035 return ptlrpc_del_timeout_client(&client->cl_grant_shrink_list,
1039 static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1042 * ocd_grant is the total grant amount we're expect to hold: if we've
1043 * been evicted, it's the new avail_grant amount, cl_dirty_pages will
1044 * drop to 0 as inflight RPCs fail out; otherwise, it's avail_grant +
1047 * race is tolerable here: if we're evicted, but imp_state already
1048 * left EVICTED state, then cl_dirty_pages must be 0 already.
1050 client_obd_list_lock(&cli->cl_loi_list_lock);
1051 if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED)
1052 cli->cl_avail_grant = ocd->ocd_grant;
1054 cli->cl_avail_grant = ocd->ocd_grant -
1055 (cli->cl_dirty_pages << PAGE_CACHE_SHIFT);
1057 if (cli->cl_avail_grant < 0) {
1058 CWARN("%s: available grant < 0: avail/ocd/dirty %ld/%u/%ld\n",
1059 cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant,
1060 ocd->ocd_grant, cli->cl_dirty_pages << PAGE_CACHE_SHIFT);
1061 /* workaround for servers which do not have the patch from
1063 cli->cl_avail_grant = ocd->ocd_grant;
1066 /* determine the appropriate chunk size used by osc_extent. */
1067 cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize);
1068 client_obd_list_unlock(&cli->cl_loi_list_lock);
1070 CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld."
1071 "chunk bits: %d.\n", cli->cl_import->imp_obd->obd_name,
1072 cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits);
1074 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
1075 list_empty(&cli->cl_grant_shrink_list))
1076 osc_add_shrink_grant(cli);
1079 /* We assume that the reason this OSC got a short read is because it read
1080 * beyond the end of a stripe file; i.e. lustre is reading a sparse file
1081 * via the LOV, and it _knows_ it's reading inside the file, it's just that
1082 * this stripe never got written at or beyond this stripe offset yet. */
1083 static void handle_short_read(int nob_read, obd_count page_count,
1084 struct brw_page **pga)
1089 /* skip bytes read OK */
1090 while (nob_read > 0) {
1091 LASSERT (page_count > 0);
1093 if (pga[i]->count > nob_read) {
1094 /* EOF inside this page */
1095 ptr = kmap(pga[i]->pg) +
1096 (pga[i]->off & ~CFS_PAGE_MASK);
1097 memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1104 nob_read -= pga[i]->count;
1109 /* zero remaining pages */
1110 while (page_count-- > 0) {
1111 ptr = kmap(pga[i]->pg) + (pga[i]->off & ~CFS_PAGE_MASK);
1112 memset(ptr, 0, pga[i]->count);
1118 static int check_write_rcs(struct ptlrpc_request *req,
1119 int requested_nob, int niocount,
1120 obd_count page_count, struct brw_page **pga)
1125 remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
1126 sizeof(*remote_rcs) *
1128 if (remote_rcs == NULL) {
1129 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
1133 /* return error if any niobuf was in error */
1134 for (i = 0; i < niocount; i++) {
1135 if ((int)remote_rcs[i] < 0)
1136 return(remote_rcs[i]);
1138 if (remote_rcs[i] != 0) {
1139 CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
1140 i, remote_rcs[i], req);
1145 if (req->rq_bulk->bd_nob_transferred != requested_nob) {
1146 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
1147 req->rq_bulk->bd_nob_transferred, requested_nob);
1154 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1156 if (p1->flag != p2->flag) {
1157 unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
1158 OBD_BRW_SYNC | OBD_BRW_ASYNC |
1159 OBD_BRW_NOQUOTA | OBD_BRW_SOFT_SYNC);
1161 /* warn if we try to combine flags that we don't know to be
1162 * safe to combine */
1163 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
1164 CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
1165 "report this at http://bugs.whamcloud.com/\n",
1166 p1->flag, p2->flag);
1171 return (p1->off + p1->count == p2->off);
1174 static obd_count osc_checksum_bulk(int nob, obd_count pg_count,
1175 struct brw_page **pga, int opc,
1176 cksum_type_t cksum_type)
1180 struct cfs_crypto_hash_desc *hdesc;
1181 unsigned int bufsize;
1183 unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
1185 LASSERT(pg_count > 0);
1187 hdesc = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1188 if (IS_ERR(hdesc)) {
1189 CERROR("Unable to initialize checksum hash %s\n",
1190 cfs_crypto_hash_name(cfs_alg));
1191 return PTR_ERR(hdesc);
1194 while (nob > 0 && pg_count > 0) {
1195 int count = pga[i]->count > nob ? nob : pga[i]->count;
1197 /* corrupt the data before we compute the checksum, to
1198 * simulate an OST->client data error */
1199 if (i == 0 && opc == OST_READ &&
1200 OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
1201 unsigned char *ptr = kmap(pga[i]->pg);
1202 int off = pga[i]->off & ~CFS_PAGE_MASK;
1204 memcpy(ptr + off, "bad1", min(4, nob));
1207 cfs_crypto_hash_update_page(hdesc, pga[i]->pg,
1208 pga[i]->off & ~CFS_PAGE_MASK,
1210 LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d\n",
1211 (int)(pga[i]->off & ~CFS_PAGE_MASK));
1213 nob -= pga[i]->count;
1218 bufsize = sizeof(cksum);
1219 err = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
1221 /* For sending we only compute the wrong checksum instead
1222 * of corrupting the data so it is still correct on a redo */
1223 if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1229 static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
1230 struct lov_stripe_md *lsm, obd_count page_count,
1231 struct brw_page **pga,
1232 struct ptlrpc_request **reqp,
1233 struct obd_capa *ocapa, int reserve,
1236 struct ptlrpc_request *req;
1237 struct ptlrpc_bulk_desc *desc;
1238 struct ost_body *body;
1239 struct obd_ioobj *ioobj;
1240 struct niobuf_remote *niobuf;
1241 int niocount, i, requested_nob, opc, rc;
1242 struct osc_brw_async_args *aa;
1243 struct req_capsule *pill;
1244 struct brw_page *pg_prev;
1247 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1248 RETURN(-ENOMEM); /* Recoverable */
1249 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1250 RETURN(-EINVAL); /* Fatal */
1252 if ((cmd & OBD_BRW_WRITE) != 0) {
1254 req = ptlrpc_request_alloc_pool(cli->cl_import,
1255 cli->cl_import->imp_rq_pool,
1256 &RQF_OST_BRW_WRITE);
1259 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
1264 for (niocount = i = 1; i < page_count; i++) {
1265 if (!can_merge_pages(pga[i - 1], pga[i]))
1269 pill = &req->rq_pill;
1270 req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
1272 req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1273 niocount * sizeof(*niobuf));
1274 osc_set_capa_size(req, &RMF_CAPA1, ocapa);
1276 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1278 ptlrpc_request_free(req);
1281 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
1282 ptlrpc_at_set_req_timeout(req);
1283 /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
1285 req->rq_no_retry_einprogress = 1;
1287 desc = ptlrpc_prep_bulk_imp(req, page_count,
1288 cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
1289 opc == OST_WRITE ? BULK_GET_SOURCE : BULK_PUT_SINK,
1293 GOTO(out, rc = -ENOMEM);
1294 /* NB request now owns desc and will free it when it gets freed */
1296 body = req_capsule_client_get(pill, &RMF_OST_BODY);
1297 ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1298 niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1299 LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
1301 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1303 obdo_to_ioobj(oa, ioobj);
1304 ioobj->ioo_bufcnt = niocount;
1305 /* The high bits of ioo_max_brw tells server _maximum_ number of bulks
1306 * that might be send for this request. The actual number is decided
1307 * when the RPC is finally sent in ptlrpc_register_bulk(). It sends
1308 * "max - 1" for old client compatibility sending "0", and also so the
1309 * the actual maximum is a power-of-two number, not one less. LU-1431 */
1310 ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
1311 osc_pack_capa(req, body, ocapa);
1312 LASSERT(page_count > 0);
1314 for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1315 struct brw_page *pg = pga[i];
1316 int poff = pg->off & ~CFS_PAGE_MASK;
1318 LASSERT(pg->count > 0);
1319 /* make sure there is no gap in the middle of page array */
1320 LASSERTF(page_count == 1 ||
1321 (ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) &&
1322 ergo(i > 0 && i < page_count - 1,
1323 poff == 0 && pg->count == PAGE_CACHE_SIZE) &&
1324 ergo(i == page_count - 1, poff == 0)),
1325 "i: %d/%d pg: %p off: "LPU64", count: %u\n",
1326 i, page_count, pg, pg->off, pg->count);
1328 LASSERTF(i == 0 || pg->off > pg_prev->off,
1329 "i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
1330 " prev_pg %p [pri %lu ind %lu] off "LPU64"\n",
1332 pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1333 pg_prev->pg, page_private(pg_prev->pg),
1334 pg_prev->pg->index, pg_prev->off);
1336 LASSERTF(i == 0 || pg->off > pg_prev->off,
1337 "i %d p_c %u\n", i, page_count);
1339 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1340 (pg->flag & OBD_BRW_SRVLOCK));
1342 ptlrpc_prep_bulk_page_pin(desc, pg->pg, poff, pg->count);
1343 requested_nob += pg->count;
1345 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1347 niobuf->len += pg->count;
1349 niobuf->offset = pg->off;
1350 niobuf->len = pg->count;
1351 niobuf->flags = pg->flag;
1356 LASSERTF((void *)(niobuf - niocount) ==
1357 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
1358 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
1359 &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
1361 osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1363 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1364 body->oa.o_valid |= OBD_MD_FLFLAGS;
1365 body->oa.o_flags = 0;
1367 body->oa.o_flags |= OBD_FL_RECOV_RESEND;
1370 if (osc_should_shrink_grant(cli))
1371 osc_shrink_grant_local(cli, &body->oa);
1373 /* size[REQ_REC_OFF] still sizeof (*body) */
1374 if (opc == OST_WRITE) {
1375 if (cli->cl_checksum &&
1376 !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1377 /* store cl_cksum_type in a local variable since
1378 * it can be changed via lprocfs */
1379 cksum_type_t cksum_type = cli->cl_cksum_type;
1381 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1382 oa->o_flags &= OBD_FL_LOCAL_MASK;
1383 body->oa.o_flags = 0;
1385 body->oa.o_flags |= cksum_type_pack(cksum_type);
1386 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1387 body->oa.o_cksum = osc_checksum_bulk(requested_nob,
1391 CDEBUG(D_PAGE, "checksum at write origin: %x\n",
1393 /* save this in 'oa', too, for later checking */
1394 oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1395 oa->o_flags |= cksum_type_pack(cksum_type);
1397 /* clear out the checksum flag, in case this is a
1398 * resend but cl_checksum is no longer set. b=11238 */
1399 oa->o_valid &= ~OBD_MD_FLCKSUM;
1401 oa->o_cksum = body->oa.o_cksum;
1402 /* 1 RC per niobuf */
1403 req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
1404 sizeof(__u32) * niocount);
1406 if (cli->cl_checksum &&
1407 !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1408 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1409 body->oa.o_flags = 0;
1410 body->oa.o_flags |= cksum_type_pack(cli->cl_cksum_type);
1411 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1414 ptlrpc_request_set_replen(req);
1416 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1417 aa = ptlrpc_req_async_args(req);
1419 aa->aa_requested_nob = requested_nob;
1420 aa->aa_nio_count = niocount;
1421 aa->aa_page_count = page_count;
1425 INIT_LIST_HEAD(&aa->aa_oaps);
1426 if (ocapa && reserve)
1427 aa->aa_ocapa = capa_get(ocapa);
1433 ptlrpc_req_finished(req);
1437 static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
1438 __u32 client_cksum, __u32 server_cksum, int nob,
1439 obd_count page_count, struct brw_page **pga,
1440 cksum_type_t client_cksum_type)
1444 cksum_type_t cksum_type;
1446 if (server_cksum == client_cksum) {
1447 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1451 cksum_type = cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
1453 new_cksum = osc_checksum_bulk(nob, page_count, pga, OST_WRITE,
1456 if (cksum_type != client_cksum_type)
1457 msg = "the server did not use the checksum type specified in "
1458 "the original request - likely a protocol problem";
1459 else if (new_cksum == server_cksum)
1460 msg = "changed on the client after we checksummed it - "
1461 "likely false positive due to mmap IO (bug 11742)";
1462 else if (new_cksum == client_cksum)
1463 msg = "changed in transit before arrival at OST";
1465 msg = "changed in transit AND doesn't match the original - "
1466 "likely false positive due to mmap IO (bug 11742)";
1468 LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inode "DFID
1469 " object "DOSTID" extent ["LPU64"-"LPU64"]\n",
1470 msg, libcfs_nid2str(peer->nid),
1471 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1472 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1473 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1474 POSTID(&oa->o_oi), pga[0]->off,
1475 pga[page_count-1]->off + pga[page_count-1]->count - 1);
1476 CERROR("original client csum %x (type %x), server csum %x (type %x), "
1477 "client csum now %x\n", client_cksum, client_cksum_type,
1478 server_cksum, cksum_type, new_cksum);
1482 /* Note rc enters this function as number of bytes transferred */
1483 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1485 struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1486 const lnet_process_id_t *peer =
1487 &req->rq_import->imp_connection->c_peer;
1488 struct client_obd *cli = aa->aa_cli;
1489 struct ost_body *body;
1490 __u32 client_cksum = 0;
1493 if (rc < 0 && rc != -EDQUOT) {
1494 DEBUG_REQ(D_INFO, req, "Failed request with rc = %d\n", rc);
1498 LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
1499 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1501 DEBUG_REQ(D_INFO, req, "Can't unpack body\n");
1505 /* set/clear over quota flag for a uid/gid */
1506 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1507 body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) {
1508 unsigned int qid[MAXQUOTAS] = { body->oa.o_uid, body->oa.o_gid };
1510 CDEBUG(D_QUOTA, "setdq for [%u %u] with valid "LPX64", flags %x\n",
1511 body->oa.o_uid, body->oa.o_gid, body->oa.o_valid,
1513 osc_quota_setdq(cli, qid, body->oa.o_valid, body->oa.o_flags);
1516 osc_update_grant(cli, body);
1521 if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
1522 client_cksum = aa->aa_oa->o_cksum; /* save for later */
1524 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1526 CERROR("Unexpected +ve rc %d\n", rc);
1529 LASSERT(req->rq_bulk->bd_nob == aa->aa_requested_nob);
1531 if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
1534 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
1535 check_write_checksum(&body->oa, peer, client_cksum,
1536 body->oa.o_cksum, aa->aa_requested_nob,
1537 aa->aa_page_count, aa->aa_ppga,
1538 cksum_type_unpack(aa->aa_oa->o_flags)))
1541 rc = check_write_rcs(req, aa->aa_requested_nob,aa->aa_nio_count,
1542 aa->aa_page_count, aa->aa_ppga);
1546 /* The rest of this function executes only for OST_READs */
1548 /* if unwrap_bulk failed, return -EAGAIN to retry */
1549 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
1551 GOTO(out, rc = -EAGAIN);
1553 if (rc > aa->aa_requested_nob) {
1554 CERROR("Unexpected rc %d (%d requested)\n", rc,
1555 aa->aa_requested_nob);
1559 if (rc != req->rq_bulk->bd_nob_transferred) {
1560 CERROR ("Unexpected rc %d (%d transferred)\n",
1561 rc, req->rq_bulk->bd_nob_transferred);
1565 if (rc < aa->aa_requested_nob)
1566 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
1568 if (body->oa.o_valid & OBD_MD_FLCKSUM) {
1569 static int cksum_counter;
1570 __u32 server_cksum = body->oa.o_cksum;
1573 cksum_type_t cksum_type;
1575 cksum_type = cksum_type_unpack(body->oa.o_valid &OBD_MD_FLFLAGS?
1576 body->oa.o_flags : 0);
1577 client_cksum = osc_checksum_bulk(rc, aa->aa_page_count,
1578 aa->aa_ppga, OST_READ,
1581 if (peer->nid == req->rq_bulk->bd_sender) {
1585 router = libcfs_nid2str(req->rq_bulk->bd_sender);
1588 if (server_cksum != client_cksum) {
1589 LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
1590 "%s%s%s inode "DFID" object "DOSTID
1591 " extent ["LPU64"-"LPU64"]\n",
1592 req->rq_import->imp_obd->obd_name,
1593 libcfs_nid2str(peer->nid),
1595 body->oa.o_valid & OBD_MD_FLFID ?
1596 body->oa.o_parent_seq : (__u64)0,
1597 body->oa.o_valid & OBD_MD_FLFID ?
1598 body->oa.o_parent_oid : 0,
1599 body->oa.o_valid & OBD_MD_FLFID ?
1600 body->oa.o_parent_ver : 0,
1601 POSTID(&body->oa.o_oi),
1602 aa->aa_ppga[0]->off,
1603 aa->aa_ppga[aa->aa_page_count-1]->off +
1604 aa->aa_ppga[aa->aa_page_count-1]->count -
1606 CERROR("client %x, server %x, cksum_type %x\n",
1607 client_cksum, server_cksum, cksum_type);
1609 aa->aa_oa->o_cksum = client_cksum;
1613 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1616 } else if (unlikely(client_cksum)) {
1617 static int cksum_missed;
1620 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
1621 CERROR("Checksum %u requested from %s but not sent\n",
1622 cksum_missed, libcfs_nid2str(peer->nid));
1628 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
1629 aa->aa_oa, &body->oa);
1634 static int osc_brw_redo_request(struct ptlrpc_request *request,
1635 struct osc_brw_async_args *aa, int rc)
1637 struct ptlrpc_request *new_req;
1638 struct osc_brw_async_args *new_aa;
1639 struct osc_async_page *oap;
1642 DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
1643 "redo for recoverable error %d", rc);
1645 rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
1646 OST_WRITE ? OBD_BRW_WRITE :OBD_BRW_READ,
1647 aa->aa_cli, aa->aa_oa,
1648 NULL /* lsm unused by osc currently */,
1649 aa->aa_page_count, aa->aa_ppga,
1650 &new_req, aa->aa_ocapa, 0, 1);
1654 list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
1655 if (oap->oap_request != NULL) {
1656 LASSERTF(request == oap->oap_request,
1657 "request %p != oap_request %p\n",
1658 request, oap->oap_request);
1659 if (oap->oap_interrupted) {
1660 ptlrpc_req_finished(new_req);
1665 /* New request takes over pga and oaps from old request.
1666 * Note that copying a list_head doesn't work, need to move it... */
1668 new_req->rq_interpret_reply = request->rq_interpret_reply;
1669 new_req->rq_async_args = request->rq_async_args;
1670 new_req->rq_commit_cb = request->rq_commit_cb;
1671 /* cap resend delay to the current request timeout, this is similar to
1672 * what ptlrpc does (see after_reply()) */
1673 if (aa->aa_resends > new_req->rq_timeout)
1674 new_req->rq_sent = cfs_time_current_sec() + new_req->rq_timeout;
1676 new_req->rq_sent = cfs_time_current_sec() + aa->aa_resends;
1677 new_req->rq_generation_set = 1;
1678 new_req->rq_import_generation = request->rq_import_generation;
1680 new_aa = ptlrpc_req_async_args(new_req);
1682 INIT_LIST_HEAD(&new_aa->aa_oaps);
1683 list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps);
1684 INIT_LIST_HEAD(&new_aa->aa_exts);
1685 list_splice_init(&aa->aa_exts, &new_aa->aa_exts);
1686 new_aa->aa_resends = aa->aa_resends;
1688 list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
1689 if (oap->oap_request) {
1690 ptlrpc_req_finished(oap->oap_request);
1691 oap->oap_request = ptlrpc_request_addref(new_req);
1695 new_aa->aa_ocapa = aa->aa_ocapa;
1696 aa->aa_ocapa = NULL;
1698 /* XXX: This code will run into problem if we're going to support
1699 * to add a series of BRW RPCs into a self-defined ptlrpc_request_set
1700 * and wait for all of them to be finished. We should inherit request
1701 * set from old request. */
1702 ptlrpcd_add_req(new_req, PDL_POLICY_SAME, -1);
1704 DEBUG_REQ(D_INFO, new_req, "new request");
1709 * ugh, we want disk allocation on the target to happen in offset order. we'll
1710 * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
1711 * fine for our small page arrays and doesn't require allocation. its an
1712 * insertion sort that swaps elements that are strides apart, shrinking the
1713 * stride down until its '1' and the array is sorted.
1715 static void sort_brw_pages(struct brw_page **array, int num)
1718 struct brw_page *tmp;
1722 for (stride = 1; stride < num ; stride = (stride * 3) + 1)
1727 for (i = stride ; i < num ; i++) {
1730 while (j >= stride && array[j - stride]->off > tmp->off) {
1731 array[j] = array[j - stride];
1736 } while (stride > 1);
1739 static void osc_release_ppga(struct brw_page **ppga, obd_count count)
1741 LASSERT(ppga != NULL);
1742 OBD_FREE(ppga, sizeof(*ppga) * count);
1745 static int brw_interpret(const struct lu_env *env,
1746 struct ptlrpc_request *req, void *data, int rc)
1748 struct osc_brw_async_args *aa = data;
1749 struct osc_extent *ext;
1750 struct osc_extent *tmp;
1751 struct client_obd *cli = aa->aa_cli;
1754 rc = osc_brw_fini_request(req, rc);
1755 CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
1756 /* When server return -EINPROGRESS, client should always retry
1757 * regardless of the number of times the bulk was resent already. */
1758 if (osc_recoverable_error(rc)) {
1759 if (req->rq_import_generation !=
1760 req->rq_import->imp_generation) {
1761 CDEBUG(D_HA, "%s: resend cross eviction for object: "
1762 ""DOSTID", rc = %d.\n",
1763 req->rq_import->imp_obd->obd_name,
1764 POSTID(&aa->aa_oa->o_oi), rc);
1765 } else if (rc == -EINPROGRESS ||
1766 client_should_resend(aa->aa_resends, aa->aa_cli)) {
1767 rc = osc_brw_redo_request(req, aa, rc);
1769 CERROR("%s: too many resent retries for object: "
1770 ""LPU64":"LPU64", rc = %d.\n",
1771 req->rq_import->imp_obd->obd_name,
1772 POSTID(&aa->aa_oa->o_oi), rc);
1777 else if (rc == -EAGAIN || rc == -EINPROGRESS)
1782 capa_put(aa->aa_ocapa);
1783 aa->aa_ocapa = NULL;
1787 struct obdo *oa = aa->aa_oa;
1788 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
1789 unsigned long valid = 0;
1790 struct cl_object *obj;
1791 struct osc_async_page *last;
1793 last = brw_page2oap(aa->aa_ppga[aa->aa_page_count - 1]);
1794 obj = osc2cl(last->oap_obj);
1796 cl_object_attr_lock(obj);
1797 if (oa->o_valid & OBD_MD_FLBLOCKS) {
1798 attr->cat_blocks = oa->o_blocks;
1799 valid |= CAT_BLOCKS;
1801 if (oa->o_valid & OBD_MD_FLMTIME) {
1802 attr->cat_mtime = oa->o_mtime;
1805 if (oa->o_valid & OBD_MD_FLATIME) {
1806 attr->cat_atime = oa->o_atime;
1809 if (oa->o_valid & OBD_MD_FLCTIME) {
1810 attr->cat_ctime = oa->o_ctime;
1814 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1815 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
1816 loff_t last_off = last->oap_count + last->oap_obj_off;
1818 /* Change file size if this is an out of quota or
1819 * direct IO write and it extends the file size */
1820 if (loi->loi_lvb.lvb_size < last_off) {
1821 attr->cat_size = last_off;
1824 /* Extend KMS if it's not a lockless write */
1825 if (loi->loi_kms < last_off &&
1826 oap2osc_page(last)->ops_srvlock == 0) {
1827 attr->cat_kms = last_off;
1833 cl_object_attr_set(env, obj, attr, valid);
1834 cl_object_attr_unlock(obj);
1836 OBDO_FREE(aa->aa_oa);
1838 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && rc == 0)
1839 osc_inc_unstable_pages(req);
1841 list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
1842 list_del_init(&ext->oe_link);
1843 osc_extent_finish(env, ext, 1, rc);
1845 LASSERT(list_empty(&aa->aa_exts));
1846 LASSERT(list_empty(&aa->aa_oaps));
1848 cl_req_completion(env, aa->aa_clerq, rc < 0 ? rc :
1849 req->rq_bulk->bd_nob_transferred);
1850 osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
1851 ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred);
1853 client_obd_list_lock(&cli->cl_loi_list_lock);
1854 /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
1855 * is called so we know whether to go to sync BRWs or wait for more
1856 * RPCs to complete */
1857 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
1858 cli->cl_w_in_flight--;
1860 cli->cl_r_in_flight--;
1861 osc_wake_cache_waiters(cli);
1862 client_obd_list_unlock(&cli->cl_loi_list_lock);
1864 osc_io_unplug(env, cli, NULL, PDL_POLICY_SAME);
1868 static void brw_commit(struct ptlrpc_request *req)
1870 /* If osc_inc_unstable_pages (via osc_extent_finish) races with
1871 * this called via the rq_commit_cb, I need to ensure
1872 * osc_dec_unstable_pages is still called. Otherwise unstable
1873 * pages may be leaked. */
1874 spin_lock(&req->rq_lock);
1875 if (likely(req->rq_unstable)) {
1876 req->rq_unstable = 0;
1877 spin_unlock(&req->rq_lock);
1879 osc_dec_unstable_pages(req);
1881 req->rq_committed = 1;
1882 spin_unlock(&req->rq_lock);
1887 * Build an RPC by the list of extent @ext_list. The caller must ensure
1888 * that the total pages in this list are NOT over max pages per RPC.
1889 * Extents in the list must be in OES_RPC state.
1891 int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
1892 struct list_head *ext_list, int cmd, pdl_policy_t pol)
1894 struct ptlrpc_request *req = NULL;
1895 struct osc_extent *ext;
1896 struct brw_page **pga = NULL;
1897 struct osc_brw_async_args *aa = NULL;
1898 struct obdo *oa = NULL;
1899 struct osc_async_page *oap;
1900 struct osc_async_page *tmp;
1901 struct cl_req *clerq = NULL;
1902 enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE :
1904 struct cl_req_attr *crattr = NULL;
1905 obd_off starting_offset = OBD_OBJECT_EOF;
1906 obd_off ending_offset = 0;
1910 bool soft_sync = false;
1913 struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
1916 LASSERT(!list_empty(ext_list));
1918 /* add pages into rpc_list to build BRW rpc */
1919 list_for_each_entry(ext, ext_list, oe_link) {
1920 LASSERT(ext->oe_state == OES_RPC);
1921 mem_tight |= ext->oe_memalloc;
1922 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
1924 list_add_tail(&oap->oap_rpc_item, &rpc_list);
1925 if (starting_offset > oap->oap_obj_off)
1926 starting_offset = oap->oap_obj_off;
1928 LASSERT(oap->oap_page_off == 0);
1929 if (ending_offset < oap->oap_obj_off + oap->oap_count)
1930 ending_offset = oap->oap_obj_off +
1933 LASSERT(oap->oap_page_off + oap->oap_count ==
1938 soft_sync = osc_over_unstable_soft_limit(cli);
1940 mpflag = cfs_memory_pressure_get_and_set();
1942 OBD_ALLOC(crattr, sizeof(*crattr));
1944 GOTO(out, rc = -ENOMEM);
1946 OBD_ALLOC(pga, sizeof(*pga) * page_count);
1948 GOTO(out, rc = -ENOMEM);
1952 GOTO(out, rc = -ENOMEM);
1955 list_for_each_entry(oap, &rpc_list, oap_rpc_item) {
1956 struct cl_page *page = oap2cl_page(oap);
1957 if (clerq == NULL) {
1958 clerq = cl_req_alloc(env, page, crt,
1959 1 /* only 1-object rpcs for now */);
1961 GOTO(out, rc = PTR_ERR(clerq));
1964 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
1966 oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
1967 pga[i] = &oap->oap_brw_page;
1968 pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
1969 CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
1970 pga[i]->pg, page_index(oap->oap_page), oap,
1973 cl_req_page_add(env, clerq, page);
1976 /* always get the data for the obdo for the rpc */
1977 LASSERT(clerq != NULL);
1978 crattr->cra_oa = oa;
1979 cl_req_attr_set(env, clerq, crattr, ~0ULL);
1981 rc = cl_req_prep(env, clerq);
1983 CERROR("cl_req_prep failed: %d\n", rc);
1987 sort_brw_pages(pga, page_count);
1988 rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
1989 pga, &req, crattr->cra_capa, 1, 0);
1991 CERROR("prep_req failed: %d\n", rc);
1995 req->rq_commit_cb = brw_commit;
1996 req->rq_interpret_reply = brw_interpret;
1999 req->rq_memalloc = 1;
2001 /* Need to update the timestamps after the request is built in case
2002 * we race with setattr (locally or in queue at OST). If OST gets
2003 * later setattr before earlier BRW (as determined by the request xid),
2004 * the OST will not use BRW timestamps. Sadly, there is no obvious
2005 * way to do this in a single call. bug 10150 */
2006 cl_req_attr_set(env, clerq, crattr,
2007 OBD_MD_FLMTIME|OBD_MD_FLCTIME|OBD_MD_FLATIME);
2009 lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
2011 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2012 aa = ptlrpc_req_async_args(req);
2013 INIT_LIST_HEAD(&aa->aa_oaps);
2014 list_splice_init(&rpc_list, &aa->aa_oaps);
2015 INIT_LIST_HEAD(&aa->aa_exts);
2016 list_splice_init(ext_list, &aa->aa_exts);
2017 aa->aa_clerq = clerq;
2019 /* queued sync pages can be torn down while the pages
2020 * were between the pending list and the rpc */
2022 list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
2023 /* only one oap gets a request reference */
2026 if (oap->oap_interrupted && !req->rq_intr) {
2027 CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
2029 ptlrpc_mark_interrupted(req);
2033 tmp->oap_request = ptlrpc_request_addref(req);
2035 client_obd_list_lock(&cli->cl_loi_list_lock);
2036 starting_offset >>= PAGE_CACHE_SHIFT;
2037 if (cmd == OBD_BRW_READ) {
2038 cli->cl_r_in_flight++;
2039 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
2040 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
2041 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
2042 starting_offset + 1);
2044 cli->cl_w_in_flight++;
2045 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
2046 lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight);
2047 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2048 starting_offset + 1);
2050 client_obd_list_unlock(&cli->cl_loi_list_lock);
2052 DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight",
2053 page_count, aa, cli->cl_r_in_flight,
2054 cli->cl_w_in_flight);
2056 /* XXX: Maybe the caller can check the RPC bulk descriptor to
2057 * see which CPU/NUMA node the majority of pages were allocated
2058 * on, and try to assign the async RPC to the CPU core
2059 * (PDL_POLICY_PREFERRED) to reduce cross-CPU memory traffic.
2061 * But on the other hand, we expect that multiple ptlrpcd
2062 * threads and the initial write sponsor can run in parallel,
2063 * especially when data checksum is enabled, which is CPU-bound
2064 * operation and single ptlrpcd thread cannot process in time.
2065 * So more ptlrpcd threads sharing BRW load
2066 * (with PDL_POLICY_ROUND) seems better.
2068 ptlrpcd_add_req(req, pol, -1);
2074 cfs_memory_pressure_restore(mpflag);
2076 if (crattr != NULL) {
2077 capa_put(crattr->cra_capa);
2078 OBD_FREE(crattr, sizeof(*crattr));
2082 LASSERT(req == NULL);
2087 OBD_FREE(pga, sizeof(*pga) * page_count);
2088 /* this should happen rarely and is pretty bad, it makes the
2089 * pending list not follow the dirty order */
2090 while (!list_empty(ext_list)) {
2091 ext = list_entry(ext_list->next, struct osc_extent,
2093 list_del_init(&ext->oe_link);
2094 osc_extent_finish(env, ext, 0, rc);
2096 if (clerq && !IS_ERR(clerq))
2097 cl_req_completion(env, clerq, rc);
2102 static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
2103 struct ldlm_enqueue_info *einfo)
2105 void *data = einfo->ei_cbdata;
2108 LASSERT(lock != NULL);
2109 LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl);
2110 LASSERT(lock->l_resource->lr_type == einfo->ei_type);
2111 LASSERT(lock->l_completion_ast == einfo->ei_cb_cp);
2112 LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
2114 lock_res_and_lock(lock);
2115 spin_lock(&osc_ast_guard);
2117 if (lock->l_ast_data == NULL)
2118 lock->l_ast_data = data;
2119 if (lock->l_ast_data == data)
2122 spin_unlock(&osc_ast_guard);
2123 unlock_res_and_lock(lock);
2128 static int osc_set_data_with_check(struct lustre_handle *lockh,
2129 struct ldlm_enqueue_info *einfo)
2131 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2135 set = osc_set_lock_data_with_check(lock, einfo);
2136 LDLM_LOCK_PUT(lock);
2138 CERROR("lockh %p, data %p - client evicted?\n",
2139 lockh, einfo->ei_cbdata);
2143 static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
2144 ldlm_iterator_t replace, void *data)
2146 struct ldlm_res_id res_id;
2147 struct obd_device *obd = class_exp2obd(exp);
2149 ostid_build_res_name(&lsm->lsm_oi, &res_id);
2150 ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
2154 /* find any ldlm lock of the inode in osc
2158 static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
2159 ldlm_iterator_t replace, void *data)
2161 struct ldlm_res_id res_id;
2162 struct obd_device *obd = class_exp2obd(exp);
2165 ostid_build_res_name(&lsm->lsm_oi, &res_id);
2166 rc = ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
2167 if (rc == LDLM_ITER_STOP)
2169 if (rc == LDLM_ITER_CONTINUE)
2174 static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb,
2175 obd_enqueue_update_f upcall, void *cookie,
2176 __u64 *flags, int agl, int rc)
2178 int intent = *flags & LDLM_FL_HAS_INTENT;
2182 /* The request was created before ldlm_cli_enqueue call. */
2183 if (rc == ELDLM_LOCK_ABORTED) {
2184 struct ldlm_reply *rep;
2185 rep = req_capsule_server_get(&req->rq_pill,
2188 LASSERT(rep != NULL);
2189 rep->lock_policy_res1 =
2190 ptlrpc_status_ntoh(rep->lock_policy_res1);
2191 if (rep->lock_policy_res1)
2192 rc = rep->lock_policy_res1;
2196 if ((intent != 0 && rc == ELDLM_LOCK_ABORTED && agl == 0) ||
2198 *flags |= LDLM_FL_LVB_READY;
2199 CDEBUG(D_INODE,"got kms "LPU64" blocks "LPU64" mtime "LPU64"\n",
2200 lvb->lvb_size, lvb->lvb_blocks, lvb->lvb_mtime);
2203 /* Call the update callback. */
2204 rc = (*upcall)(cookie, rc);
2208 static int osc_enqueue_interpret(const struct lu_env *env,
2209 struct ptlrpc_request *req,
2210 struct osc_enqueue_args *aa, int rc)
2212 struct ldlm_lock *lock;
2213 struct lustre_handle handle;
2215 struct ost_lvb *lvb;
2217 __u64 *flags = aa->oa_flags;
2219 /* Make a local copy of a lock handle and a mode, because aa->oa_*
2220 * might be freed anytime after lock upcall has been called. */
2221 lustre_handle_copy(&handle, aa->oa_lockh);
2222 mode = aa->oa_ei->ei_mode;
2224 /* ldlm_cli_enqueue is holding a reference on the lock, so it must
2226 lock = ldlm_handle2lock(&handle);
2228 /* Take an additional reference so that a blocking AST that
2229 * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
2230 * to arrive after an upcall has been executed by
2231 * osc_enqueue_fini(). */
2232 ldlm_lock_addref(&handle, mode);
2234 /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
2235 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
2237 /* Let CP AST to grant the lock first. */
2238 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
2240 if (aa->oa_agl && rc == ELDLM_LOCK_ABORTED) {
2245 lvb_len = sizeof(*aa->oa_lvb);
2248 /* Complete obtaining the lock procedure. */
2249 rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1,
2250 mode, flags, lvb, lvb_len, &handle, rc);
2251 /* Complete osc stuff. */
2252 rc = osc_enqueue_fini(req, aa->oa_lvb, aa->oa_upcall, aa->oa_cookie,
2253 flags, aa->oa_agl, rc);
2255 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
2257 /* Release the lock for async request. */
2258 if (lustre_handle_is_used(&handle) && rc == ELDLM_OK)
2260 * Releases a reference taken by ldlm_cli_enqueue(), if it is
2261 * not already released by
2262 * ldlm_cli_enqueue_fini()->failed_lock_cleanup()
2264 ldlm_lock_decref(&handle, mode);
2266 LASSERTF(lock != NULL, "lockh %p, req %p, aa %p - client evicted?\n",
2267 aa->oa_lockh, req, aa);
2268 ldlm_lock_decref(&handle, mode);
2269 LDLM_LOCK_PUT(lock);
2273 struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
2275 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
2276 * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
2277 * other synchronous requests, however keeping some locks and trying to obtain
2278 * others may take a considerable amount of time in a case of ost failure; and
2279 * when other sync requests do not get released lock from a client, the client
2280 * is excluded from the cluster -- such scenarious make the life difficult, so
2281 * release locks just after they are obtained. */
2282 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2283 __u64 *flags, ldlm_policy_data_t *policy,
2284 struct ost_lvb *lvb, int kms_valid,
2285 obd_enqueue_update_f upcall, void *cookie,
2286 struct ldlm_enqueue_info *einfo,
2287 struct lustre_handle *lockh,
2288 struct ptlrpc_request_set *rqset, int async, int agl)
2290 struct obd_device *obd = exp->exp_obd;
2291 struct ptlrpc_request *req = NULL;
2292 int intent = *flags & LDLM_FL_HAS_INTENT;
2293 __u64 match_lvb = (agl != 0 ? 0 : LDLM_FL_LVB_READY);
2298 /* Filesystem lock extents are extended to page boundaries so that
2299 * dealing with the page cache is a little smoother. */
2300 policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
2301 policy->l_extent.end |= ~CFS_PAGE_MASK;
2304 * kms is not valid when either object is completely fresh (so that no
2305 * locks are cached), or object was evicted. In the latter case cached
2306 * lock cannot be used, because it would prime inode state with
2307 * potentially stale LVB.
2312 /* Next, search for already existing extent locks that will cover us */
2313 /* If we're trying to read, we also search for an existing PW lock. The
2314 * VFS and page cache already protect us locally, so lots of readers/
2315 * writers can share a single PW lock.
2317 * There are problems with conversion deadlocks, so instead of
2318 * converting a read lock to a write lock, we'll just enqueue a new
2321 * At some point we should cancel the read lock instead of making them
2322 * send us a blocking callback, but there are problems with canceling
2323 * locks out from other users right now, too. */
2324 mode = einfo->ei_mode;
2325 if (einfo->ei_mode == LCK_PR)
2327 mode = ldlm_lock_match(obd->obd_namespace, *flags | match_lvb, res_id,
2328 einfo->ei_type, policy, mode, lockh, 0);
2330 struct ldlm_lock *matched = ldlm_handle2lock(lockh);
2332 if ((agl != 0) && !ldlm_is_lvb_ready(matched)) {
2333 /* For AGL, if enqueue RPC is sent but the lock is not
2334 * granted, then skip to process this strpe.
2335 * Return -ECANCELED to tell the caller. */
2336 ldlm_lock_decref(lockh, mode);
2337 LDLM_LOCK_PUT(matched);
2339 } else if (osc_set_lock_data_with_check(matched, einfo)) {
2340 *flags |= LDLM_FL_LVB_READY;
2341 /* addref the lock only if not async requests and PW
2342 * lock is matched whereas we asked for PR. */
2343 if (!rqset && einfo->ei_mode != mode)
2344 ldlm_lock_addref(lockh, LCK_PR);
2346 /* I would like to be able to ASSERT here that
2347 * rss <= kms, but I can't, for reasons which
2348 * are explained in lov_enqueue() */
2351 /* We already have a lock, and it's referenced.
2353 * At this point, the cl_lock::cll_state is CLS_QUEUING,
2354 * AGL upcall may change it to CLS_HELD directly. */
2355 (*upcall)(cookie, ELDLM_OK);
2357 if (einfo->ei_mode != mode)
2358 ldlm_lock_decref(lockh, LCK_PW);
2360 /* For async requests, decref the lock. */
2361 ldlm_lock_decref(lockh, einfo->ei_mode);
2362 LDLM_LOCK_PUT(matched);
2365 ldlm_lock_decref(lockh, mode);
2366 LDLM_LOCK_PUT(matched);
2372 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
2373 &RQF_LDLM_ENQUEUE_LVB);
2377 rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE);
2379 ptlrpc_request_free(req);
2383 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
2385 ptlrpc_request_set_replen(req);
2388 /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
2389 *flags &= ~LDLM_FL_BLOCK_GRANTED;
2391 rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
2392 sizeof(*lvb), LVB_T_OST, lockh, async);
2395 struct osc_enqueue_args *aa;
2396 CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
2397 aa = ptlrpc_req_async_args(req);
2400 aa->oa_flags = flags;
2401 aa->oa_upcall = upcall;
2402 aa->oa_cookie = cookie;
2404 aa->oa_lockh = lockh;
2407 req->rq_interpret_reply =
2408 (ptlrpc_interpterer_t)osc_enqueue_interpret;
2409 if (rqset == PTLRPCD_SET)
2410 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
2412 ptlrpc_set_add_req(rqset, req);
2413 } else if (intent) {
2414 ptlrpc_req_finished(req);
2419 rc = osc_enqueue_fini(req, lvb, upcall, cookie, flags, agl, rc);
2421 ptlrpc_req_finished(req);
2426 int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2427 __u32 type, ldlm_policy_data_t *policy, __u32 mode,
2428 __u64 *flags, void *data, struct lustre_handle *lockh,
2431 struct obd_device *obd = exp->exp_obd;
2432 __u64 lflags = *flags;
2436 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
2439 /* Filesystem lock extents are extended to page boundaries so that
2440 * dealing with the page cache is a little smoother */
2441 policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
2442 policy->l_extent.end |= ~CFS_PAGE_MASK;
2444 /* Next, search for already existing extent locks that will cover us */
2445 /* If we're trying to read, we also search for an existing PW lock. The
2446 * VFS and page cache already protect us locally, so lots of readers/
2447 * writers can share a single PW lock. */
2451 rc = ldlm_lock_match(obd->obd_namespace, lflags,
2452 res_id, type, policy, rc, lockh, unref);
2455 if (!osc_set_data_with_check(lockh, data)) {
2456 if (!(lflags & LDLM_FL_TEST_LOCK))
2457 ldlm_lock_decref(lockh, rc);
2461 if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) {
2462 ldlm_lock_addref(lockh, LCK_PR);
2463 ldlm_lock_decref(lockh, LCK_PW);
2470 int osc_cancel_base(struct lustre_handle *lockh, __u32 mode)
2474 if (unlikely(mode == LCK_GROUP))
2475 ldlm_lock_decref_and_cancel(lockh, mode);
2477 ldlm_lock_decref(lockh, mode);
2482 static int osc_statfs_interpret(const struct lu_env *env,
2483 struct ptlrpc_request *req,
2484 struct osc_async_args *aa, int rc)
2486 struct obd_statfs *msfs;
2490 /* The request has in fact never been sent
2491 * due to issues at a higher level (LOV).
2492 * Exit immediately since the caller is
2493 * aware of the problem and takes care
2494 * of the clean up */
2497 if ((rc == -ENOTCONN || rc == -EAGAIN) &&
2498 (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
2504 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
2506 GOTO(out, rc = -EPROTO);
2509 *aa->aa_oi->oi_osfs = *msfs;
2511 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
2515 static int osc_statfs_async(struct obd_export *exp,
2516 struct obd_info *oinfo, __u64 max_age,
2517 struct ptlrpc_request_set *rqset)
2519 struct obd_device *obd = class_exp2obd(exp);
2520 struct ptlrpc_request *req;
2521 struct osc_async_args *aa;
2525 /* We could possibly pass max_age in the request (as an absolute
2526 * timestamp or a "seconds.usec ago") so the target can avoid doing
2527 * extra calls into the filesystem if that isn't necessary (e.g.
2528 * during mount that would help a bit). Having relative timestamps
2529 * is not so great if request processing is slow, while absolute
2530 * timestamps are not ideal because they need time synchronization. */
2531 req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
2535 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
2537 ptlrpc_request_free(req);
2540 ptlrpc_request_set_replen(req);
2541 req->rq_request_portal = OST_CREATE_PORTAL;
2542 ptlrpc_at_set_req_timeout(req);
2544 if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
2545 /* procfs requests not want stat in wait for avoid deadlock */
2546 req->rq_no_resend = 1;
2547 req->rq_no_delay = 1;
2550 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
2551 CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
2552 aa = ptlrpc_req_async_args(req);
2555 ptlrpc_set_add_req(rqset, req);
2559 static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
2560 struct obd_statfs *osfs, __u64 max_age, __u32 flags)
2562 struct obd_device *obd = class_exp2obd(exp);
2563 struct obd_statfs *msfs;
2564 struct ptlrpc_request *req;
2565 struct obd_import *imp = NULL;
2569 /*Since the request might also come from lprocfs, so we need
2570 *sync this with client_disconnect_export Bug15684*/
2571 down_read(&obd->u.cli.cl_sem);
2572 if (obd->u.cli.cl_import)
2573 imp = class_import_get(obd->u.cli.cl_import);
2574 up_read(&obd->u.cli.cl_sem);
2578 /* We could possibly pass max_age in the request (as an absolute
2579 * timestamp or a "seconds.usec ago") so the target can avoid doing
2580 * extra calls into the filesystem if that isn't necessary (e.g.
2581 * during mount that would help a bit). Having relative timestamps
2582 * is not so great if request processing is slow, while absolute
2583 * timestamps are not ideal because they need time synchronization. */
2584 req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
2586 class_import_put(imp);
2591 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
2593 ptlrpc_request_free(req);
2596 ptlrpc_request_set_replen(req);
2597 req->rq_request_portal = OST_CREATE_PORTAL;
2598 ptlrpc_at_set_req_timeout(req);
2600 if (flags & OBD_STATFS_NODELAY) {
2601 /* procfs requests not want stat in wait for avoid deadlock */
2602 req->rq_no_resend = 1;
2603 req->rq_no_delay = 1;
2606 rc = ptlrpc_queue_wait(req);
2610 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
2612 GOTO(out, rc = -EPROTO);
2619 ptlrpc_req_finished(req);
2623 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
2624 void *karg, void *uarg)
2626 struct obd_device *obd = exp->exp_obd;
2627 struct obd_ioctl_data *data = karg;
2631 if (!try_module_get(THIS_MODULE)) {
2632 CERROR("%s: cannot get module '%s'\n", obd->obd_name,
2633 module_name(THIS_MODULE));
2637 case OBD_IOC_CLIENT_RECOVER:
2638 err = ptlrpc_recover_import(obd->u.cli.cl_import,
2639 data->ioc_inlbuf1, 0);
2643 case IOC_OSC_SET_ACTIVE:
2644 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
2647 case OBD_IOC_POLL_QUOTACHECK:
2648 err = osc_quota_poll_check(exp, (struct if_quotacheck *)karg);
2650 case OBD_IOC_PING_TARGET:
2651 err = ptlrpc_obd_ping(obd);
2654 CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n",
2655 cmd, current_comm());
2656 GOTO(out, err = -ENOTTY);
2659 module_put(THIS_MODULE);
2663 static int osc_get_info(const struct lu_env *env, struct obd_export *exp,
2664 obd_count keylen, void *key, __u32 *vallen, void *val,
2665 struct lov_stripe_md *lsm)
2668 if (!vallen || !val)
2671 if (KEY_IS(KEY_LOCK_TO_STRIPE)) {
2672 __u32 *stripe = val;
2673 *vallen = sizeof(*stripe);
2676 } else if (KEY_IS(KEY_LAST_ID)) {
2677 struct ptlrpc_request *req;
2682 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
2683 &RQF_OST_GET_INFO_LAST_ID);
2687 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
2688 RCL_CLIENT, keylen);
2689 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
2691 ptlrpc_request_free(req);
2695 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
2696 memcpy(tmp, key, keylen);
2698 req->rq_no_delay = req->rq_no_resend = 1;
2699 ptlrpc_request_set_replen(req);
2700 rc = ptlrpc_queue_wait(req);
2704 reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID);
2706 GOTO(out, rc = -EPROTO);
2708 *((obd_id *)val) = *reply;
2710 ptlrpc_req_finished(req);
2712 } else if (KEY_IS(KEY_FIEMAP)) {
2713 struct ll_fiemap_info_key *fm_key =
2714 (struct ll_fiemap_info_key *)key;
2715 struct ldlm_res_id res_id;
2716 ldlm_policy_data_t policy;
2717 struct lustre_handle lockh;
2718 ldlm_mode_t mode = 0;
2719 struct ptlrpc_request *req;
2720 struct ll_user_fiemap *reply;
2724 if (!(fm_key->fiemap.fm_flags & FIEMAP_FLAG_SYNC))
2727 policy.l_extent.start = fm_key->fiemap.fm_start &
2730 if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
2731 fm_key->fiemap.fm_start + PAGE_CACHE_SIZE - 1)
2732 policy.l_extent.end = OBD_OBJECT_EOF;
2734 policy.l_extent.end = (fm_key->fiemap.fm_start +
2735 fm_key->fiemap.fm_length +
2736 PAGE_CACHE_SIZE - 1) & CFS_PAGE_MASK;
2738 ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
2739 mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
2740 LDLM_FL_BLOCK_GRANTED |
2742 &res_id, LDLM_EXTENT, &policy,
2743 LCK_PR | LCK_PW, &lockh, 0);
2744 if (mode) { /* lock is cached on client */
2745 if (mode != LCK_PR) {
2746 ldlm_lock_addref(&lockh, LCK_PR);
2747 ldlm_lock_decref(&lockh, LCK_PW);
2749 } else { /* no cached lock, needs acquire lock on server side */
2750 fm_key->oa.o_valid |= OBD_MD_FLFLAGS;
2751 fm_key->oa.o_flags |= OBD_FL_SRVLOCK;
2755 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
2756 &RQF_OST_GET_INFO_FIEMAP);
2758 GOTO(drop_lock, rc = -ENOMEM);
2760 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY,
2761 RCL_CLIENT, keylen);
2762 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
2763 RCL_CLIENT, *vallen);
2764 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
2765 RCL_SERVER, *vallen);
2767 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
2769 ptlrpc_request_free(req);
2770 GOTO(drop_lock, rc);
2773 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
2774 memcpy(tmp, key, keylen);
2775 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
2776 memcpy(tmp, val, *vallen);
2778 ptlrpc_request_set_replen(req);
2779 rc = ptlrpc_queue_wait(req);
2783 reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
2785 GOTO(fini_req, rc = -EPROTO);
2787 memcpy(val, reply, *vallen);
2789 ptlrpc_req_finished(req);
2792 ldlm_lock_decref(&lockh, LCK_PR);
2799 static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
2800 obd_count keylen, void *key, obd_count vallen,
2801 void *val, struct ptlrpc_request_set *set)
2803 struct ptlrpc_request *req;
2804 struct obd_device *obd = exp->exp_obd;
2805 struct obd_import *imp = class_exp2cliimp(exp);
2810 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
2812 if (KEY_IS(KEY_CHECKSUM)) {
2813 if (vallen != sizeof(int))
2815 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
2819 if (KEY_IS(KEY_SPTLRPC_CONF)) {
2820 sptlrpc_conf_client_adapt(obd);
2824 if (KEY_IS(KEY_FLUSH_CTX)) {
2825 sptlrpc_import_flush_my_ctx(imp);
2829 if (KEY_IS(KEY_CACHE_SET)) {
2830 struct client_obd *cli = &obd->u.cli;
2832 LASSERT(cli->cl_cache == NULL); /* only once */
2833 cli->cl_cache = (struct cl_client_cache *)val;
2834 atomic_inc(&cli->cl_cache->ccc_users);
2835 cli->cl_lru_left = &cli->cl_cache->ccc_lru_left;
2837 /* add this osc into entity list */
2838 LASSERT(list_empty(&cli->cl_lru_osc));
2839 spin_lock(&cli->cl_cache->ccc_lru_lock);
2840 list_add(&cli->cl_lru_osc, &cli->cl_cache->ccc_lru);
2841 spin_unlock(&cli->cl_cache->ccc_lru_lock);
2846 if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
2847 struct client_obd *cli = &obd->u.cli;
2848 int nr = atomic_read(&cli->cl_lru_in_list) >> 1;
2849 int target = *(int *)val;
2851 nr = osc_lru_shrink(env, cli, min(nr, target), true);
2856 if (!set && !KEY_IS(KEY_GRANT_SHRINK))
2859 /* We pass all other commands directly to OST. Since nobody calls osc
2860 methods directly and everybody is supposed to go through LOV, we
2861 assume lov checked invalid values for us.
2862 The only recognised values so far are evict_by_nid and mds_conn.
2863 Even if something bad goes through, we'd get a -EINVAL from OST
2866 req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
2867 &RQF_OST_SET_GRANT_INFO :
2872 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
2873 RCL_CLIENT, keylen);
2874 if (!KEY_IS(KEY_GRANT_SHRINK))
2875 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
2876 RCL_CLIENT, vallen);
2877 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
2879 ptlrpc_request_free(req);
2883 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
2884 memcpy(tmp, key, keylen);
2885 tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
2888 memcpy(tmp, val, vallen);
2890 if (KEY_IS(KEY_GRANT_SHRINK)) {
2891 struct osc_grant_args *aa;
2894 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2895 aa = ptlrpc_req_async_args(req);
2898 ptlrpc_req_finished(req);
2901 *oa = ((struct ost_body *)val)->oa;
2903 req->rq_interpret_reply = osc_shrink_grant_interpret;
2906 ptlrpc_request_set_replen(req);
2907 if (!KEY_IS(KEY_GRANT_SHRINK)) {
2908 LASSERT(set != NULL);
2909 ptlrpc_set_add_req(set, req);
2910 ptlrpc_check_set(NULL, set);
2912 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
2917 static int osc_reconnect(const struct lu_env *env,
2918 struct obd_export *exp, struct obd_device *obd,
2919 struct obd_uuid *cluuid,
2920 struct obd_connect_data *data,
2923 struct client_obd *cli = &obd->u.cli;
2925 if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
2928 client_obd_list_lock(&cli->cl_loi_list_lock);
2929 data->ocd_grant = (cli->cl_avail_grant +
2930 (cli->cl_dirty_pages << PAGE_CACHE_SHIFT)) ?:
2931 2 * cli_brw_size(obd);
2932 lost_grant = cli->cl_lost_grant;
2933 cli->cl_lost_grant = 0;
2934 client_obd_list_unlock(&cli->cl_loi_list_lock);
2936 CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d"
2937 " ocd_grant: %d, lost: %ld.\n", data->ocd_connect_flags,
2938 data->ocd_version, data->ocd_grant, lost_grant);
2944 static int osc_disconnect(struct obd_export *exp)
2946 struct obd_device *obd = class_exp2obd(exp);
2947 struct llog_ctxt *ctxt;
2950 ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
2952 if (obd->u.cli.cl_conn_count == 1) {
2953 /* Flush any remaining cancel messages out to the
2955 llog_sync(ctxt, exp, 0);
2957 llog_ctxt_put(ctxt);
2959 CDEBUG(D_HA, "No LLOG_SIZE_REPL_CTXT found in obd %p\n",
2963 rc = client_disconnect_export(exp);
2965 * Initially we put del_shrink_grant before disconnect_export, but it
2966 * causes the following problem if setup (connect) and cleanup
2967 * (disconnect) are tangled together.
2968 * connect p1 disconnect p2
2969 * ptlrpc_connect_import
2970 * ............... class_manual_cleanup
2973 * ptlrpc_connect_interrupt
2975 * add this client to shrink list
2977 * Bang! pinger trigger the shrink.
2978 * So the osc should be disconnected from the shrink list, after we
2979 * are sure the import has been destroyed. BUG18662
2981 if (obd->u.cli.cl_import == NULL)
2982 osc_del_shrink_grant(&obd->u.cli);
2986 static int osc_import_event(struct obd_device *obd,
2987 struct obd_import *imp,
2988 enum obd_import_event event)
2990 struct client_obd *cli;
2994 LASSERT(imp->imp_obd == obd);
2997 case IMP_EVENT_DISCON: {
2999 client_obd_list_lock(&cli->cl_loi_list_lock);
3000 cli->cl_avail_grant = 0;
3001 cli->cl_lost_grant = 0;
3002 client_obd_list_unlock(&cli->cl_loi_list_lock);
3005 case IMP_EVENT_INACTIVE: {
3006 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
3009 case IMP_EVENT_INVALIDATE: {
3010 struct ldlm_namespace *ns = obd->obd_namespace;
3014 env = cl_env_get(&refcheck);
3018 /* all pages go to failing rpcs due to the invalid
3020 osc_io_unplug(env, cli, NULL, PDL_POLICY_ROUND);
3022 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3023 cl_env_put(env, &refcheck);
3028 case IMP_EVENT_ACTIVE: {
3029 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
3032 case IMP_EVENT_OCD: {
3033 struct obd_connect_data *ocd = &imp->imp_connect_data;
3035 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
3036 osc_init_grant(&obd->u.cli, ocd);
3039 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
3040 imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
3042 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
3045 case IMP_EVENT_DEACTIVATE: {
3046 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE, NULL);
3049 case IMP_EVENT_ACTIVATE: {
3050 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE, NULL);
3054 CERROR("Unknown import event %d\n", event);
3061 * Determine whether the lock can be canceled before replaying the lock
3062 * during recovery, see bug16774 for detailed information.
3064 * \retval zero the lock can't be canceled
3065 * \retval other ok to cancel
3067 static int osc_cancel_weight(struct ldlm_lock *lock)
3070 * Cancel all unused and granted extent lock.
3072 if (lock->l_resource->lr_type == LDLM_EXTENT &&
3073 lock->l_granted_mode == lock->l_req_mode &&
3074 osc_ldlm_weigh_ast(lock) == 0)
3080 static int brw_queue_work(const struct lu_env *env, void *data)
3082 struct client_obd *cli = data;
3084 CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
3086 osc_io_unplug(env, cli, NULL, PDL_POLICY_SAME);
3090 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
3092 struct client_obd *cli = &obd->u.cli;
3093 struct obd_type *type;
3098 rc = ptlrpcd_addref();
3102 rc = client_obd_setup(obd, lcfg);
3104 GOTO(out_ptlrpcd, rc);
3106 handler = ptlrpcd_alloc_work(cli->cl_import, brw_queue_work, cli);
3107 if (IS_ERR(handler))
3108 GOTO(out_client_setup, rc = PTR_ERR(handler));
3109 cli->cl_writeback_work = handler;
3111 handler = ptlrpcd_alloc_work(cli->cl_import, lru_queue_work, cli);
3112 if (IS_ERR(handler))
3113 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3114 cli->cl_lru_work = handler;
3116 rc = osc_quota_setup(obd);
3118 GOTO(out_ptlrpcd_work, rc);
3120 cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
3123 obd->obd_vars = lprocfs_osc_obd_vars;
3125 /* If this is true then both client (osc) and server (osp) are on the
3126 * same node. The osp layer if loaded first will register the osc proc
3127 * directory. In that case this obd_device will be attached its proc
3128 * tree to type->typ_procsym instead of obd->obd_type->typ_procroot. */
3129 type = class_search_type(LUSTRE_OSP_NAME);
3130 if (type && type->typ_procsym) {
3131 obd->obd_proc_entry = lprocfs_seq_register(obd->obd_name,
3133 obd->obd_vars, obd);
3134 if (IS_ERR(obd->obd_proc_entry)) {
3135 rc = PTR_ERR(obd->obd_proc_entry);
3136 CERROR("error %d setting up lprocfs for %s\n", rc,
3138 obd->obd_proc_entry = NULL;
3141 rc = lprocfs_seq_obd_setup(obd);
3144 /* If the basic OSC proc tree construction succeeded then
3145 * lets do the rest. */
3147 lproc_osc_attach_seqstat(obd);
3148 sptlrpc_lprocfs_cliobd_attach(obd);
3149 ptlrpc_lprocfs_register_obd(obd);
3152 /* We need to allocate a few requests more, because
3153 * brw_interpret tries to create new requests before freeing
3154 * previous ones, Ideally we want to have 2x max_rpcs_in_flight
3155 * reserved, but I'm afraid that might be too much wasted RAM
3156 * in fact, so 2 is just my guess and still should work. */
3157 cli->cl_import->imp_rq_pool =
3158 ptlrpc_init_rq_pool(cli->cl_max_rpcs_in_flight + 2,
3160 ptlrpc_add_rqs_to_pool);
3162 INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
3163 ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
3167 if (cli->cl_writeback_work != NULL) {
3168 ptlrpcd_destroy_work(cli->cl_writeback_work);
3169 cli->cl_writeback_work = NULL;
3171 if (cli->cl_lru_work != NULL) {
3172 ptlrpcd_destroy_work(cli->cl_lru_work);
3173 cli->cl_lru_work = NULL;
3176 client_obd_cleanup(obd);
3182 static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
3188 case OBD_CLEANUP_EARLY: {
3189 struct obd_import *imp;
3190 imp = obd->u.cli.cl_import;
3191 CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
3192 /* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
3193 ptlrpc_deactivate_import(imp);
3194 spin_lock(&imp->imp_lock);
3195 imp->imp_pingable = 0;
3196 spin_unlock(&imp->imp_lock);
3199 case OBD_CLEANUP_EXPORTS: {
3200 struct client_obd *cli = &obd->u.cli;
3202 * for echo client, export may be on zombie list, wait for
3203 * zombie thread to cull it, because cli.cl_import will be
3204 * cleared in client_disconnect_export():
3205 * class_export_destroy() -> obd_cleanup() ->
3206 * echo_device_free() -> echo_client_cleanup() ->
3207 * obd_disconnect() -> osc_disconnect() ->
3208 * client_disconnect_export()
3210 obd_zombie_barrier();
3211 if (cli->cl_writeback_work) {
3212 ptlrpcd_destroy_work(cli->cl_writeback_work);
3213 cli->cl_writeback_work = NULL;
3215 if (cli->cl_lru_work) {
3216 ptlrpcd_destroy_work(cli->cl_lru_work);
3217 cli->cl_lru_work = NULL;
3219 obd_cleanup_client_import(obd);
3220 ptlrpc_lprocfs_unregister_obd(obd);
3221 lprocfs_obd_cleanup(obd);
3222 rc = obd_llog_finish(obd, 0);
3224 CERROR("failed to cleanup llogging subsystems\n");
3231 int osc_cleanup(struct obd_device *obd)
3233 struct client_obd *cli = &obd->u.cli;
3239 if (cli->cl_cache != NULL) {
3240 LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
3241 spin_lock(&cli->cl_cache->ccc_lru_lock);
3242 list_del_init(&cli->cl_lru_osc);
3243 spin_unlock(&cli->cl_cache->ccc_lru_lock);
3244 cli->cl_lru_left = NULL;
3245 atomic_dec(&cli->cl_cache->ccc_users);
3246 cli->cl_cache = NULL;
3249 /* free memory of osc quota cache */
3250 osc_quota_cleanup(obd);
3252 rc = client_obd_cleanup(obd);
3258 int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg)
3260 int rc = class_process_proc_seq_param(PARAM_OSC, obd->obd_vars,
3262 return rc > 0 ? 0: rc;
3265 static int osc_process_config(struct obd_device *obd, obd_count len, void *buf)
3267 return osc_process_config_base(obd, buf);
3270 struct obd_ops osc_obd_ops = {
3271 .o_owner = THIS_MODULE,
3272 .o_setup = osc_setup,
3273 .o_precleanup = osc_precleanup,
3274 .o_cleanup = osc_cleanup,
3275 .o_add_conn = client_import_add_conn,
3276 .o_del_conn = client_import_del_conn,
3277 .o_connect = client_connect_import,
3278 .o_reconnect = osc_reconnect,
3279 .o_disconnect = osc_disconnect,
3280 .o_statfs = osc_statfs,
3281 .o_statfs_async = osc_statfs_async,
3282 .o_unpackmd = osc_unpackmd,
3283 .o_create = osc_create,
3284 .o_destroy = osc_destroy,
3285 .o_getattr = osc_getattr,
3286 .o_getattr_async = osc_getattr_async,
3287 .o_setattr = osc_setattr,
3288 .o_setattr_async = osc_setattr_async,
3289 .o_change_cbdata = osc_change_cbdata,
3290 .o_find_cbdata = osc_find_cbdata,
3291 .o_iocontrol = osc_iocontrol,
3292 .o_get_info = osc_get_info,
3293 .o_set_info_async = osc_set_info_async,
3294 .o_import_event = osc_import_event,
3295 .o_process_config = osc_process_config,
3296 .o_quotactl = osc_quotactl,
3297 .o_quotacheck = osc_quotacheck,
3300 extern struct lu_kmem_descr osc_caches[];
3301 extern spinlock_t osc_ast_guard;
3302 extern struct lock_class_key osc_ast_guard_class;
3304 int __init osc_init(void)
3306 bool enable_proc = true;
3307 struct obd_type *type;
3311 /* print an address of _any_ initialized kernel symbol from this
3312 * module, to allow debugging with gdb that doesn't support data
3313 * symbols from modules.*/
3314 CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
3316 rc = lu_kmem_init(osc_caches);
3320 type = class_search_type(LUSTRE_OSP_NAME);
3321 if (type != NULL && type->typ_procsym != NULL)
3322 enable_proc = false;
3324 rc = class_register_type(&osc_obd_ops, NULL, enable_proc, NULL,
3325 #ifndef HAVE_ONLY_PROCFS_SEQ
3328 LUSTRE_OSC_NAME, &osc_device_type);
3330 lu_kmem_fini(osc_caches);
3334 spin_lock_init(&osc_ast_guard);
3335 lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
3341 static void /*__exit*/ osc_exit(void)
3343 class_unregister_type(LUSTRE_OSC_NAME);
3344 lu_kmem_fini(osc_caches);
3347 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
3348 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3349 MODULE_LICENSE("GPL");
3351 cfs_module(osc, LUSTRE_VERSION_STRING, osc_init, osc_exit);