4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 #define DEBUG_SUBSYSTEM S_OSC
39 #include <libcfs/libcfs.h>
41 #include <lustre_dlm.h>
42 #include <lustre_net.h>
43 #include <lustre/lustre_user.h>
44 #include <obd_cksum.h>
45 #include <lustre_ha.h>
46 #include <lprocfs_status.h>
47 #include <lustre_ioctl.h>
48 #include <lustre_debug.h>
49 #include <lustre_param.h>
50 #include <lustre_fid.h>
51 #include <obd_class.h>
52 #include "osc_internal.h"
53 #include "osc_cl_internal.h"
55 struct osc_brw_async_args {
59 obd_count aa_page_count;
61 struct brw_page **aa_ppga;
62 struct client_obd *aa_cli;
63 struct list_head aa_oaps;
64 struct list_head aa_exts;
65 struct obd_capa *aa_ocapa;
66 struct cl_req *aa_clerq;
69 #define osc_grant_args osc_brw_async_args
71 struct osc_async_args {
72 struct obd_info *aa_oi;
75 struct osc_setattr_args {
77 obd_enqueue_update_f sa_upcall;
81 struct osc_fsync_args {
82 struct obd_info *fa_oi;
83 obd_enqueue_update_f fa_upcall;
87 struct osc_enqueue_args {
88 struct obd_export *oa_exp;
90 obd_enqueue_update_f oa_upcall;
92 struct ost_lvb *oa_lvb;
93 struct lustre_handle *oa_lockh;
94 struct ldlm_enqueue_info *oa_ei;
95 unsigned int oa_agl:1;
98 static void osc_release_ppga(struct brw_page **ppga, obd_count count);
99 static int brw_interpret(const struct lu_env *env, struct ptlrpc_request *req,
102 /* Unpack OSC object metadata from disk storage (LE byte order). */
103 static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
104 struct lov_mds_md *lmm, int lmm_bytes)
107 struct obd_import *imp = class_exp2cliimp(exp);
111 if (lmm_bytes < sizeof(*lmm)) {
112 CERROR("%s: lov_mds_md too small: %d, need %d\n",
113 exp->exp_obd->obd_name, lmm_bytes,
117 /* XXX LOV_MAGIC etc check? */
119 if (unlikely(ostid_id(&lmm->lmm_oi) == 0)) {
120 CERROR("%s: zero lmm_object_id: rc = %d\n",
121 exp->exp_obd->obd_name, -EINVAL);
126 lsm_size = lov_stripe_md_size(1);
130 if (*lsmp != NULL && lmm == NULL) {
131 OBD_FREE((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
132 OBD_FREE(*lsmp, lsm_size);
138 OBD_ALLOC(*lsmp, lsm_size);
139 if (unlikely(*lsmp == NULL))
141 OBD_ALLOC((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
142 if (unlikely((*lsmp)->lsm_oinfo[0] == NULL)) {
143 OBD_FREE(*lsmp, lsm_size);
146 loi_init((*lsmp)->lsm_oinfo[0]);
147 } else if (unlikely(ostid_id(&(*lsmp)->lsm_oi) == 0)) {
152 /* XXX zero *lsmp? */
153 ostid_le_to_cpu(&lmm->lmm_oi, &(*lsmp)->lsm_oi);
156 (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES))
157 (*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes;
159 (*lsmp)->lsm_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
164 static inline void osc_pack_capa(struct ptlrpc_request *req,
165 struct ost_body *body, void *capa)
167 struct obd_capa *oc = (struct obd_capa *)capa;
168 struct lustre_capa *c;
173 c = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1);
176 body->oa.o_valid |= OBD_MD_FLOSSCAPA;
177 DEBUG_CAPA(D_SEC, c, "pack");
180 static inline void osc_pack_req_body(struct ptlrpc_request *req,
181 struct obd_info *oinfo)
183 struct ost_body *body;
185 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
188 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
190 osc_pack_capa(req, body, oinfo->oi_capa);
193 static inline void osc_set_capa_size(struct ptlrpc_request *req,
194 const struct req_msg_field *field,
198 req_capsule_set_size(&req->rq_pill, field, RCL_CLIENT, 0);
200 /* it is already calculated as sizeof struct obd_capa */
204 static int osc_getattr_interpret(const struct lu_env *env,
205 struct ptlrpc_request *req,
206 struct osc_async_args *aa, int rc)
208 struct ost_body *body;
214 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
216 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
217 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
218 aa->aa_oi->oi_oa, &body->oa);
220 /* This should really be sent by the OST */
221 aa->aa_oi->oi_oa->o_blksize = DT_MAX_BRW_SIZE;
222 aa->aa_oi->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
224 CDEBUG(D_INFO, "can't unpack ost_body\n");
226 aa->aa_oi->oi_oa->o_valid = 0;
229 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
233 static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
234 struct ptlrpc_request_set *set)
236 struct ptlrpc_request *req;
237 struct osc_async_args *aa;
241 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
245 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
246 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
248 ptlrpc_request_free(req);
252 osc_pack_req_body(req, oinfo);
254 ptlrpc_request_set_replen(req);
255 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret;
257 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
258 aa = ptlrpc_req_async_args(req);
261 ptlrpc_set_add_req(set, req);
265 static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
266 struct obd_info *oinfo)
268 struct ptlrpc_request *req;
269 struct ost_body *body;
273 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
277 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
278 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
280 ptlrpc_request_free(req);
284 osc_pack_req_body(req, oinfo);
286 ptlrpc_request_set_replen(req);
288 rc = ptlrpc_queue_wait(req);
292 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
294 GOTO(out, rc = -EPROTO);
296 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
297 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa,
300 oinfo->oi_oa->o_blksize = cli_brw_size(exp->exp_obd);
301 oinfo->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
305 ptlrpc_req_finished(req);
309 static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
310 struct obd_info *oinfo, struct obd_trans_info *oti)
312 struct ptlrpc_request *req;
313 struct ost_body *body;
317 LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP);
319 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
323 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
324 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
326 ptlrpc_request_free(req);
330 osc_pack_req_body(req, oinfo);
332 ptlrpc_request_set_replen(req);
334 rc = ptlrpc_queue_wait(req);
338 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
340 GOTO(out, rc = -EPROTO);
342 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa,
347 ptlrpc_req_finished(req);
351 static int osc_setattr_interpret(const struct lu_env *env,
352 struct ptlrpc_request *req,
353 struct osc_setattr_args *sa, int rc)
355 struct ost_body *body;
361 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
363 GOTO(out, rc = -EPROTO);
365 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa,
368 rc = sa->sa_upcall(sa->sa_cookie, rc);
372 int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
373 struct obd_trans_info *oti,
374 obd_enqueue_update_f upcall, void *cookie,
375 struct ptlrpc_request_set *rqset)
377 struct ptlrpc_request *req;
378 struct osc_setattr_args *sa;
382 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
386 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
387 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
389 ptlrpc_request_free(req);
393 if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
394 oinfo->oi_oa->o_lcookie = *oti->oti_logcookies;
396 osc_pack_req_body(req, oinfo);
398 ptlrpc_request_set_replen(req);
400 /* do mds to ost setattr asynchronously */
402 /* Do not wait for response. */
403 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
405 req->rq_interpret_reply =
406 (ptlrpc_interpterer_t)osc_setattr_interpret;
408 CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args));
409 sa = ptlrpc_req_async_args(req);
410 sa->sa_oa = oinfo->oi_oa;
411 sa->sa_upcall = upcall;
412 sa->sa_cookie = cookie;
414 if (rqset == PTLRPCD_SET)
415 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
417 ptlrpc_set_add_req(rqset, req);
423 static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
424 struct obd_trans_info *oti,
425 struct ptlrpc_request_set *rqset)
427 return osc_setattr_async_base(exp, oinfo, oti,
428 oinfo->oi_cb_up, oinfo, rqset);
431 int osc_real_create(struct obd_export *exp, struct obdo *oa,
432 struct lov_stripe_md **ea, struct obd_trans_info *oti)
434 struct ptlrpc_request *req;
435 struct ost_body *body;
436 struct lov_stripe_md *lsm;
445 rc = obd_alloc_memmd(exp, &lsm);
450 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
452 GOTO(out, rc = -ENOMEM);
454 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
456 ptlrpc_request_free(req);
460 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
463 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
465 ptlrpc_request_set_replen(req);
467 if ((oa->o_valid & OBD_MD_FLFLAGS) &&
468 oa->o_flags == OBD_FL_DELORPHAN) {
470 "delorphan from OST integration");
471 /* Don't resend the delorphan req */
472 req->rq_no_resend = req->rq_no_delay = 1;
475 rc = ptlrpc_queue_wait(req);
479 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
481 GOTO(out_req, rc = -EPROTO);
483 CDEBUG(D_INFO, "oa flags %x\n", oa->o_flags);
484 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
486 oa->o_blksize = cli_brw_size(exp->exp_obd);
487 oa->o_valid |= OBD_MD_FLBLKSZ;
489 /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
490 * have valid lsm_oinfo data structs, so don't go touching that.
491 * This needs to be fixed in a big way.
493 lsm->lsm_oi = oa->o_oi;
497 if (oa->o_valid & OBD_MD_FLCOOKIE) {
498 if (oti->oti_logcookies == NULL)
499 oti->oti_logcookies = &oti->oti_onecookie;
501 *oti->oti_logcookies = oa->o_lcookie;
505 CDEBUG(D_HA, "transno: "LPD64"\n",
506 lustre_msg_get_transno(req->rq_repmsg));
508 ptlrpc_req_finished(req);
511 obd_free_memmd(exp, &lsm);
515 int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
516 obd_enqueue_update_f upcall, void *cookie,
517 struct ptlrpc_request_set *rqset)
519 struct ptlrpc_request *req;
520 struct osc_setattr_args *sa;
521 struct ost_body *body;
525 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH);
529 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
530 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
532 ptlrpc_request_free(req);
535 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
536 ptlrpc_at_set_req_timeout(req);
538 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
540 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
542 osc_pack_capa(req, body, oinfo->oi_capa);
544 ptlrpc_request_set_replen(req);
546 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
547 CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args));
548 sa = ptlrpc_req_async_args(req);
549 sa->sa_oa = oinfo->oi_oa;
550 sa->sa_upcall = upcall;
551 sa->sa_cookie = cookie;
552 if (rqset == PTLRPCD_SET)
553 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
555 ptlrpc_set_add_req(rqset, req);
560 static int osc_sync_interpret(const struct lu_env *env,
561 struct ptlrpc_request *req,
564 struct osc_fsync_args *fa = arg;
565 struct ost_body *body;
571 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
573 CERROR ("can't unpack ost_body\n");
574 GOTO(out, rc = -EPROTO);
577 *fa->fa_oi->oi_oa = body->oa;
579 rc = fa->fa_upcall(fa->fa_cookie, rc);
583 int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
584 obd_enqueue_update_f upcall, void *cookie,
585 struct ptlrpc_request_set *rqset)
587 struct ptlrpc_request *req;
588 struct ost_body *body;
589 struct osc_fsync_args *fa;
593 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
597 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
598 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
600 ptlrpc_request_free(req);
604 /* overload the size and blocks fields in the oa with start/end */
605 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
607 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
609 osc_pack_capa(req, body, oinfo->oi_capa);
611 ptlrpc_request_set_replen(req);
612 req->rq_interpret_reply = osc_sync_interpret;
614 CLASSERT(sizeof(*fa) <= sizeof(req->rq_async_args));
615 fa = ptlrpc_req_async_args(req);
617 fa->fa_upcall = upcall;
618 fa->fa_cookie = cookie;
620 if (rqset == PTLRPCD_SET)
621 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
623 ptlrpc_set_add_req(rqset, req);
628 /* Find and cancel locally locks matched by @mode in the resource found by
629 * @objid. Found locks are added into @cancel list. Returns the amount of
630 * locks added to @cancels list. */
631 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
632 struct list_head *cancels,
633 ldlm_mode_t mode, __u64 lock_flags)
635 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
636 struct ldlm_res_id res_id;
637 struct ldlm_resource *res;
641 /* Return, i.e. cancel nothing, only if ELC is supported (flag in
642 * export) but disabled through procfs (flag in NS).
644 * This distinguishes from a case when ELC is not supported originally,
645 * when we still want to cancel locks in advance and just cancel them
646 * locally, without sending any RPC. */
647 if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
650 ostid_build_res_name(&oa->o_oi, &res_id);
651 res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
655 LDLM_RESOURCE_ADDREF(res);
656 count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
657 lock_flags, 0, NULL);
658 LDLM_RESOURCE_DELREF(res);
659 ldlm_resource_putref(res);
663 static int osc_destroy_interpret(const struct lu_env *env,
664 struct ptlrpc_request *req, void *data,
667 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
669 atomic_dec(&cli->cl_destroy_in_flight);
670 wake_up(&cli->cl_destroy_waitq);
674 static int osc_can_send_destroy(struct client_obd *cli)
676 if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
677 cli->cl_max_rpcs_in_flight) {
678 /* The destroy request can be sent */
681 if (atomic_dec_return(&cli->cl_destroy_in_flight) <
682 cli->cl_max_rpcs_in_flight) {
684 * The counter has been modified between the two atomic
687 wake_up(&cli->cl_destroy_waitq);
692 int osc_create(const struct lu_env *env, struct obd_export *exp,
693 struct obdo *oa, struct lov_stripe_md **ea,
694 struct obd_trans_info *oti)
701 LASSERT(oa->o_valid & OBD_MD_FLGROUP);
703 if ((oa->o_valid & OBD_MD_FLFLAGS) &&
704 oa->o_flags == OBD_FL_RECREATE_OBJS) {
705 RETURN(osc_real_create(exp, oa, ea, oti));
708 if (!fid_seq_is_mdt(ostid_seq(&oa->o_oi)))
709 RETURN(osc_real_create(exp, oa, ea, oti));
711 /* we should not get here anymore */
717 /* Destroy requests can be async always on the client, and we don't even really
718 * care about the return code since the client cannot do anything at all about
720 * When the MDS is unlinking a filename, it saves the file objects into a
721 * recovery llog, and these object records are cancelled when the OST reports
722 * they were destroyed and sync'd to disk (i.e. transaction committed).
723 * If the client dies, or the OST is down when the object should be destroyed,
724 * the records are not cancelled, and when the OST reconnects to the MDS next,
725 * it will retrieve the llog unlink logs and then sends the log cancellation
726 * cookies to the MDS after committing destroy transactions. */
727 static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
728 struct obdo *oa, struct lov_stripe_md *ea,
729 struct obd_trans_info *oti, struct obd_export *md_export,
732 struct client_obd *cli = &exp->exp_obd->u.cli;
733 struct ptlrpc_request *req;
734 struct ost_body *body;
735 struct list_head cancels = LIST_HEAD_INIT(cancels);
740 CDEBUG(D_INFO, "oa NULL\n");
744 count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
745 LDLM_FL_DISCARD_DATA);
747 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
749 ldlm_lock_list_put(&cancels, l_bl_ast, count);
753 osc_set_capa_size(req, &RMF_CAPA1, (struct obd_capa *)capa);
754 rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
757 ptlrpc_request_free(req);
761 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
762 ptlrpc_at_set_req_timeout(req);
764 if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE)
765 oa->o_lcookie = *oti->oti_logcookies;
766 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
768 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
770 osc_pack_capa(req, body, (struct obd_capa *)capa);
771 ptlrpc_request_set_replen(req);
773 /* If osc_destory is for destroying the unlink orphan,
774 * sent from MDT to OST, which should not be blocked here,
775 * because the process might be triggered by ptlrpcd, and
776 * it is not good to block ptlrpcd thread (b=16006)*/
777 if (!(oa->o_flags & OBD_FL_DELORPHAN)) {
778 req->rq_interpret_reply = osc_destroy_interpret;
779 if (!osc_can_send_destroy(cli)) {
780 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP,
784 * Wait until the number of on-going destroy RPCs drops
785 * under max_rpc_in_flight
787 l_wait_event_exclusive(cli->cl_destroy_waitq,
788 osc_can_send_destroy(cli), &lwi);
792 /* Do not wait for response */
793 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
797 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
800 obd_flag bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
802 LASSERT(!(oa->o_valid & bits));
805 client_obd_list_lock(&cli->cl_loi_list_lock);
806 oa->o_dirty = cli->cl_dirty_pages << PAGE_CACHE_SHIFT;
807 if (unlikely(cli->cl_dirty_pages - cli->cl_dirty_transit >
808 cli->cl_dirty_max_pages)) {
809 CERROR("dirty %lu - %lu > dirty_max %lu\n",
810 cli->cl_dirty_pages, cli->cl_dirty_transit,
811 cli->cl_dirty_max_pages);
813 } else if (unlikely(atomic_long_read(&obd_dirty_pages) -
814 atomic_long_read(&obd_dirty_transit_pages) >
815 (obd_max_dirty_pages + 1))) {
816 /* The atomic_read() allowing the atomic_inc() are
817 * not covered by a lock thus they may safely race and trip
818 * this CERROR() unless we add in a small fudge factor (+1). */
819 CERROR("%s: dirty %ld - %ld > system dirty_max %lu\n",
820 cli->cl_import->imp_obd->obd_name,
821 atomic_long_read(&obd_dirty_pages),
822 atomic_long_read(&obd_dirty_transit_pages),
823 obd_max_dirty_pages);
825 } else if (unlikely(cli->cl_dirty_max_pages - cli->cl_dirty_pages >
827 CERROR("dirty %lu - dirty_max %lu too big???\n",
828 cli->cl_dirty_pages, cli->cl_dirty_max_pages);
831 unsigned long max_in_flight = (cli->cl_max_pages_per_rpc <<
833 (cli->cl_max_rpcs_in_flight + 1);
834 oa->o_undirty = max(cli->cl_dirty_max_pages << PAGE_CACHE_SHIFT,
837 oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
838 oa->o_dropped = cli->cl_lost_grant;
839 cli->cl_lost_grant = 0;
840 client_obd_list_unlock(&cli->cl_loi_list_lock);
841 CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n",
842 oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
846 void osc_update_next_shrink(struct client_obd *cli)
848 cli->cl_next_shrink_grant =
849 cfs_time_shift(cli->cl_grant_shrink_interval);
850 CDEBUG(D_CACHE, "next time %ld to shrink grant \n",
851 cli->cl_next_shrink_grant);
854 static void __osc_update_grant(struct client_obd *cli, obd_size grant)
856 client_obd_list_lock(&cli->cl_loi_list_lock);
857 cli->cl_avail_grant += grant;
858 client_obd_list_unlock(&cli->cl_loi_list_lock);
861 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
863 if (body->oa.o_valid & OBD_MD_FLGRANT) {
864 CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant);
865 __osc_update_grant(cli, body->oa.o_grant);
869 static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
870 obd_count keylen, void *key, obd_count vallen,
871 void *val, struct ptlrpc_request_set *set);
873 static int osc_shrink_grant_interpret(const struct lu_env *env,
874 struct ptlrpc_request *req,
877 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
878 struct obdo *oa = ((struct osc_grant_args *)aa)->aa_oa;
879 struct ost_body *body;
882 __osc_update_grant(cli, oa->o_grant);
886 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
888 osc_update_grant(cli, body);
894 static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
896 client_obd_list_lock(&cli->cl_loi_list_lock);
897 oa->o_grant = cli->cl_avail_grant / 4;
898 cli->cl_avail_grant -= oa->o_grant;
899 client_obd_list_unlock(&cli->cl_loi_list_lock);
900 if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
901 oa->o_valid |= OBD_MD_FLFLAGS;
904 oa->o_flags |= OBD_FL_SHRINK_GRANT;
905 osc_update_next_shrink(cli);
908 /* Shrink the current grant, either from some large amount to enough for a
909 * full set of in-flight RPCs, or if we have already shrunk to that limit
910 * then to enough for a single RPC. This avoids keeping more grant than
911 * needed, and avoids shrinking the grant piecemeal. */
912 static int osc_shrink_grant(struct client_obd *cli)
914 __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
915 (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT);
917 client_obd_list_lock(&cli->cl_loi_list_lock);
918 if (cli->cl_avail_grant <= target_bytes)
919 target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
920 client_obd_list_unlock(&cli->cl_loi_list_lock);
922 return osc_shrink_grant_to_target(cli, target_bytes);
925 int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
928 struct ost_body *body;
931 client_obd_list_lock(&cli->cl_loi_list_lock);
932 /* Don't shrink if we are already above or below the desired limit
933 * We don't want to shrink below a single RPC, as that will negatively
934 * impact block allocation and long-term performance. */
935 if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT)
936 target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
938 if (target_bytes >= cli->cl_avail_grant) {
939 client_obd_list_unlock(&cli->cl_loi_list_lock);
942 client_obd_list_unlock(&cli->cl_loi_list_lock);
948 osc_announce_cached(cli, &body->oa, 0);
950 client_obd_list_lock(&cli->cl_loi_list_lock);
951 body->oa.o_grant = cli->cl_avail_grant - target_bytes;
952 cli->cl_avail_grant = target_bytes;
953 client_obd_list_unlock(&cli->cl_loi_list_lock);
954 if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
955 body->oa.o_valid |= OBD_MD_FLFLAGS;
956 body->oa.o_flags = 0;
958 body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
959 osc_update_next_shrink(cli);
961 rc = osc_set_info_async(NULL, cli->cl_import->imp_obd->obd_self_export,
962 sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
963 sizeof(*body), body, NULL);
965 __osc_update_grant(cli, body->oa.o_grant);
970 static int osc_should_shrink_grant(struct client_obd *client)
972 cfs_time_t time = cfs_time_current();
973 cfs_time_t next_shrink = client->cl_next_shrink_grant;
975 if ((client->cl_import->imp_connect_data.ocd_connect_flags &
976 OBD_CONNECT_GRANT_SHRINK) == 0)
979 if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) {
980 /* Get the current RPC size directly, instead of going via:
981 * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
982 * Keep comment here so that it can be found by searching. */
983 int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
985 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
986 client->cl_avail_grant > brw_size)
989 osc_update_next_shrink(client);
994 static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data)
996 struct client_obd *client;
998 list_for_each_entry(client, &item->ti_obd_list, cl_grant_shrink_list) {
999 if (osc_should_shrink_grant(client))
1000 osc_shrink_grant(client);
1005 static int osc_add_shrink_grant(struct client_obd *client)
1009 rc = ptlrpc_add_timeout_client(client->cl_grant_shrink_interval,
1011 osc_grant_shrink_grant_cb, NULL,
1012 &client->cl_grant_shrink_list);
1014 CERROR("add grant client %s error %d\n",
1015 client->cl_import->imp_obd->obd_name, rc);
1018 CDEBUG(D_CACHE, "add grant client %s \n",
1019 client->cl_import->imp_obd->obd_name);
1020 osc_update_next_shrink(client);
1024 static int osc_del_shrink_grant(struct client_obd *client)
1026 return ptlrpc_del_timeout_client(&client->cl_grant_shrink_list,
1030 static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1033 * ocd_grant is the total grant amount we're expect to hold: if we've
1034 * been evicted, it's the new avail_grant amount, cl_dirty_pages will
1035 * drop to 0 as inflight RPCs fail out; otherwise, it's avail_grant +
1038 * race is tolerable here: if we're evicted, but imp_state already
1039 * left EVICTED state, then cl_dirty_pages must be 0 already.
1041 client_obd_list_lock(&cli->cl_loi_list_lock);
1042 if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED)
1043 cli->cl_avail_grant = ocd->ocd_grant;
1045 cli->cl_avail_grant = ocd->ocd_grant -
1046 (cli->cl_dirty_pages << PAGE_CACHE_SHIFT);
1048 if (cli->cl_avail_grant < 0) {
1049 CWARN("%s: available grant < 0: avail/ocd/dirty %ld/%u/%ld\n",
1050 cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant,
1051 ocd->ocd_grant, cli->cl_dirty_pages << PAGE_CACHE_SHIFT);
1052 /* workaround for servers which do not have the patch from
1054 cli->cl_avail_grant = ocd->ocd_grant;
1057 /* determine the appropriate chunk size used by osc_extent. */
1058 cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize);
1059 client_obd_list_unlock(&cli->cl_loi_list_lock);
1061 CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld."
1062 "chunk bits: %d.\n", cli->cl_import->imp_obd->obd_name,
1063 cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits);
1065 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
1066 list_empty(&cli->cl_grant_shrink_list))
1067 osc_add_shrink_grant(cli);
1070 /* We assume that the reason this OSC got a short read is because it read
1071 * beyond the end of a stripe file; i.e. lustre is reading a sparse file
1072 * via the LOV, and it _knows_ it's reading inside the file, it's just that
1073 * this stripe never got written at or beyond this stripe offset yet. */
1074 static void handle_short_read(int nob_read, obd_count page_count,
1075 struct brw_page **pga)
1080 /* skip bytes read OK */
1081 while (nob_read > 0) {
1082 LASSERT (page_count > 0);
1084 if (pga[i]->count > nob_read) {
1085 /* EOF inside this page */
1086 ptr = kmap(pga[i]->pg) +
1087 (pga[i]->off & ~CFS_PAGE_MASK);
1088 memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1095 nob_read -= pga[i]->count;
1100 /* zero remaining pages */
1101 while (page_count-- > 0) {
1102 ptr = kmap(pga[i]->pg) + (pga[i]->off & ~CFS_PAGE_MASK);
1103 memset(ptr, 0, pga[i]->count);
1109 static int check_write_rcs(struct ptlrpc_request *req,
1110 int requested_nob, int niocount,
1111 obd_count page_count, struct brw_page **pga)
1116 remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
1117 sizeof(*remote_rcs) *
1119 if (remote_rcs == NULL) {
1120 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
1124 /* return error if any niobuf was in error */
1125 for (i = 0; i < niocount; i++) {
1126 if ((int)remote_rcs[i] < 0)
1127 return(remote_rcs[i]);
1129 if (remote_rcs[i] != 0) {
1130 CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
1131 i, remote_rcs[i], req);
1136 if (req->rq_bulk->bd_nob_transferred != requested_nob) {
1137 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
1138 req->rq_bulk->bd_nob_transferred, requested_nob);
1145 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1147 if (p1->flag != p2->flag) {
1148 unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
1149 OBD_BRW_SYNC | OBD_BRW_ASYNC |
1150 OBD_BRW_NOQUOTA | OBD_BRW_SOFT_SYNC);
1152 /* warn if we try to combine flags that we don't know to be
1153 * safe to combine */
1154 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
1155 CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
1156 "report this at https://jira.hpdd.intel.com/\n",
1157 p1->flag, p2->flag);
1162 return (p1->off + p1->count == p2->off);
1165 static obd_count osc_checksum_bulk(int nob, obd_count pg_count,
1166 struct brw_page **pga, int opc,
1167 cksum_type_t cksum_type)
1171 struct cfs_crypto_hash_desc *hdesc;
1172 unsigned int bufsize;
1174 unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
1176 LASSERT(pg_count > 0);
1178 hdesc = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1179 if (IS_ERR(hdesc)) {
1180 CERROR("Unable to initialize checksum hash %s\n",
1181 cfs_crypto_hash_name(cfs_alg));
1182 return PTR_ERR(hdesc);
1185 while (nob > 0 && pg_count > 0) {
1186 int count = pga[i]->count > nob ? nob : pga[i]->count;
1188 /* corrupt the data before we compute the checksum, to
1189 * simulate an OST->client data error */
1190 if (i == 0 && opc == OST_READ &&
1191 OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
1192 unsigned char *ptr = kmap(pga[i]->pg);
1193 int off = pga[i]->off & ~CFS_PAGE_MASK;
1195 memcpy(ptr + off, "bad1", min(4, nob));
1198 cfs_crypto_hash_update_page(hdesc, pga[i]->pg,
1199 pga[i]->off & ~CFS_PAGE_MASK,
1201 LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d\n",
1202 (int)(pga[i]->off & ~CFS_PAGE_MASK));
1204 nob -= pga[i]->count;
1209 bufsize = sizeof(cksum);
1210 err = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
1212 /* For sending we only compute the wrong checksum instead
1213 * of corrupting the data so it is still correct on a redo */
1214 if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1220 static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
1221 struct lov_stripe_md *lsm, obd_count page_count,
1222 struct brw_page **pga,
1223 struct ptlrpc_request **reqp,
1224 struct obd_capa *ocapa, int reserve,
1227 struct ptlrpc_request *req;
1228 struct ptlrpc_bulk_desc *desc;
1229 struct ost_body *body;
1230 struct obd_ioobj *ioobj;
1231 struct niobuf_remote *niobuf;
1232 int niocount, i, requested_nob, opc, rc;
1233 struct osc_brw_async_args *aa;
1234 struct req_capsule *pill;
1235 struct brw_page *pg_prev;
1238 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1239 RETURN(-ENOMEM); /* Recoverable */
1240 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1241 RETURN(-EINVAL); /* Fatal */
1243 if ((cmd & OBD_BRW_WRITE) != 0) {
1245 req = ptlrpc_request_alloc_pool(cli->cl_import,
1246 cli->cl_import->imp_rq_pool,
1247 &RQF_OST_BRW_WRITE);
1250 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
1255 for (niocount = i = 1; i < page_count; i++) {
1256 if (!can_merge_pages(pga[i - 1], pga[i]))
1260 pill = &req->rq_pill;
1261 req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
1263 req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1264 niocount * sizeof(*niobuf));
1265 osc_set_capa_size(req, &RMF_CAPA1, ocapa);
1267 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1269 ptlrpc_request_free(req);
1272 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
1273 ptlrpc_at_set_req_timeout(req);
1274 /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
1276 req->rq_no_retry_einprogress = 1;
1278 desc = ptlrpc_prep_bulk_imp(req, page_count,
1279 cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
1280 opc == OST_WRITE ? BULK_GET_SOURCE : BULK_PUT_SINK,
1284 GOTO(out, rc = -ENOMEM);
1285 /* NB request now owns desc and will free it when it gets freed */
1287 body = req_capsule_client_get(pill, &RMF_OST_BODY);
1288 ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1289 niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1290 LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
1292 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1294 obdo_to_ioobj(oa, ioobj);
1295 ioobj->ioo_bufcnt = niocount;
1296 /* The high bits of ioo_max_brw tells server _maximum_ number of bulks
1297 * that might be send for this request. The actual number is decided
1298 * when the RPC is finally sent in ptlrpc_register_bulk(). It sends
1299 * "max - 1" for old client compatibility sending "0", and also so the
1300 * the actual maximum is a power-of-two number, not one less. LU-1431 */
1301 ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
1302 osc_pack_capa(req, body, ocapa);
1303 LASSERT(page_count > 0);
1305 for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1306 struct brw_page *pg = pga[i];
1307 int poff = pg->off & ~CFS_PAGE_MASK;
1309 LASSERT(pg->count > 0);
1310 /* make sure there is no gap in the middle of page array */
1311 LASSERTF(page_count == 1 ||
1312 (ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) &&
1313 ergo(i > 0 && i < page_count - 1,
1314 poff == 0 && pg->count == PAGE_CACHE_SIZE) &&
1315 ergo(i == page_count - 1, poff == 0)),
1316 "i: %d/%d pg: %p off: "LPU64", count: %u\n",
1317 i, page_count, pg, pg->off, pg->count);
1318 LASSERTF(i == 0 || pg->off > pg_prev->off,
1319 "i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
1320 " prev_pg %p [pri %lu ind %lu] off "LPU64"\n",
1322 pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1323 pg_prev->pg, page_private(pg_prev->pg),
1324 pg_prev->pg->index, pg_prev->off);
1325 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1326 (pg->flag & OBD_BRW_SRVLOCK));
1328 ptlrpc_prep_bulk_page_pin(desc, pg->pg, poff, pg->count);
1329 requested_nob += pg->count;
1331 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1333 niobuf->rnb_len += pg->count;
1335 niobuf->rnb_offset = pg->off;
1336 niobuf->rnb_len = pg->count;
1337 niobuf->rnb_flags = pg->flag;
1342 LASSERTF((void *)(niobuf - niocount) ==
1343 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
1344 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
1345 &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
1347 osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1349 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1350 body->oa.o_valid |= OBD_MD_FLFLAGS;
1351 body->oa.o_flags = 0;
1353 body->oa.o_flags |= OBD_FL_RECOV_RESEND;
1356 if (osc_should_shrink_grant(cli))
1357 osc_shrink_grant_local(cli, &body->oa);
1359 /* size[REQ_REC_OFF] still sizeof (*body) */
1360 if (opc == OST_WRITE) {
1361 if (cli->cl_checksum &&
1362 !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1363 /* store cl_cksum_type in a local variable since
1364 * it can be changed via lprocfs */
1365 cksum_type_t cksum_type = cli->cl_cksum_type;
1367 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1368 oa->o_flags &= OBD_FL_LOCAL_MASK;
1369 body->oa.o_flags = 0;
1371 body->oa.o_flags |= cksum_type_pack(cksum_type);
1372 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1373 body->oa.o_cksum = osc_checksum_bulk(requested_nob,
1377 CDEBUG(D_PAGE, "checksum at write origin: %x\n",
1379 /* save this in 'oa', too, for later checking */
1380 oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1381 oa->o_flags |= cksum_type_pack(cksum_type);
1383 /* clear out the checksum flag, in case this is a
1384 * resend but cl_checksum is no longer set. b=11238 */
1385 oa->o_valid &= ~OBD_MD_FLCKSUM;
1387 oa->o_cksum = body->oa.o_cksum;
1388 /* 1 RC per niobuf */
1389 req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
1390 sizeof(__u32) * niocount);
1392 if (cli->cl_checksum &&
1393 !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1394 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1395 body->oa.o_flags = 0;
1396 body->oa.o_flags |= cksum_type_pack(cli->cl_cksum_type);
1397 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1400 ptlrpc_request_set_replen(req);
1402 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1403 aa = ptlrpc_req_async_args(req);
1405 aa->aa_requested_nob = requested_nob;
1406 aa->aa_nio_count = niocount;
1407 aa->aa_page_count = page_count;
1411 INIT_LIST_HEAD(&aa->aa_oaps);
1412 if (ocapa && reserve)
1413 aa->aa_ocapa = capa_get(ocapa);
1416 niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1417 CDEBUG(D_RPCTRACE, "brw rpc %p - object "DOSTID" offset %lld<>%lld\n",
1418 req, POSTID(&oa->o_oi), niobuf[0].rnb_offset,
1419 niobuf[niocount - 1].rnb_offset + niobuf[niocount - 1].rnb_len);
1423 ptlrpc_req_finished(req);
1427 static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
1428 __u32 client_cksum, __u32 server_cksum, int nob,
1429 obd_count page_count, struct brw_page **pga,
1430 cksum_type_t client_cksum_type)
1434 cksum_type_t cksum_type;
1436 if (server_cksum == client_cksum) {
1437 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1441 cksum_type = cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
1443 new_cksum = osc_checksum_bulk(nob, page_count, pga, OST_WRITE,
1446 if (cksum_type != client_cksum_type)
1447 msg = "the server did not use the checksum type specified in "
1448 "the original request - likely a protocol problem";
1449 else if (new_cksum == server_cksum)
1450 msg = "changed on the client after we checksummed it - "
1451 "likely false positive due to mmap IO (bug 11742)";
1452 else if (new_cksum == client_cksum)
1453 msg = "changed in transit before arrival at OST";
1455 msg = "changed in transit AND doesn't match the original - "
1456 "likely false positive due to mmap IO (bug 11742)";
1458 LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inode "DFID
1459 " object "DOSTID" extent ["LPU64"-"LPU64"]\n",
1460 msg, libcfs_nid2str(peer->nid),
1461 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1462 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1463 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1464 POSTID(&oa->o_oi), pga[0]->off,
1465 pga[page_count-1]->off + pga[page_count-1]->count - 1);
1466 CERROR("original client csum %x (type %x), server csum %x (type %x), "
1467 "client csum now %x\n", client_cksum, client_cksum_type,
1468 server_cksum, cksum_type, new_cksum);
1472 /* Note rc enters this function as number of bytes transferred */
1473 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1475 struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1476 const lnet_process_id_t *peer =
1477 &req->rq_import->imp_connection->c_peer;
1478 struct client_obd *cli = aa->aa_cli;
1479 struct ost_body *body;
1480 __u32 client_cksum = 0;
1483 if (rc < 0 && rc != -EDQUOT) {
1484 DEBUG_REQ(D_INFO, req, "Failed request with rc = %d\n", rc);
1488 LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
1489 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1491 DEBUG_REQ(D_INFO, req, "Can't unpack body\n");
1495 /* set/clear over quota flag for a uid/gid */
1496 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1497 body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) {
1498 unsigned int qid[MAXQUOTAS] = { body->oa.o_uid, body->oa.o_gid };
1500 CDEBUG(D_QUOTA, "setdq for [%u %u] with valid "LPX64", flags %x\n",
1501 body->oa.o_uid, body->oa.o_gid, body->oa.o_valid,
1503 osc_quota_setdq(cli, qid, body->oa.o_valid, body->oa.o_flags);
1506 osc_update_grant(cli, body);
1511 if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
1512 client_cksum = aa->aa_oa->o_cksum; /* save for later */
1514 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1516 CERROR("Unexpected +ve rc %d\n", rc);
1519 LASSERT(req->rq_bulk->bd_nob == aa->aa_requested_nob);
1521 if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
1524 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
1525 check_write_checksum(&body->oa, peer, client_cksum,
1526 body->oa.o_cksum, aa->aa_requested_nob,
1527 aa->aa_page_count, aa->aa_ppga,
1528 cksum_type_unpack(aa->aa_oa->o_flags)))
1531 rc = check_write_rcs(req, aa->aa_requested_nob,aa->aa_nio_count,
1532 aa->aa_page_count, aa->aa_ppga);
1536 /* The rest of this function executes only for OST_READs */
1538 /* if unwrap_bulk failed, return -EAGAIN to retry */
1539 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
1541 GOTO(out, rc = -EAGAIN);
1543 if (rc > aa->aa_requested_nob) {
1544 CERROR("Unexpected rc %d (%d requested)\n", rc,
1545 aa->aa_requested_nob);
1549 if (rc != req->rq_bulk->bd_nob_transferred) {
1550 CERROR ("Unexpected rc %d (%d transferred)\n",
1551 rc, req->rq_bulk->bd_nob_transferred);
1555 if (rc < aa->aa_requested_nob)
1556 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
1558 if (body->oa.o_valid & OBD_MD_FLCKSUM) {
1559 static int cksum_counter;
1560 __u32 server_cksum = body->oa.o_cksum;
1563 cksum_type_t cksum_type;
1565 cksum_type = cksum_type_unpack(body->oa.o_valid &OBD_MD_FLFLAGS?
1566 body->oa.o_flags : 0);
1567 client_cksum = osc_checksum_bulk(rc, aa->aa_page_count,
1568 aa->aa_ppga, OST_READ,
1571 if (peer->nid == req->rq_bulk->bd_sender) {
1575 router = libcfs_nid2str(req->rq_bulk->bd_sender);
1578 if (server_cksum != client_cksum) {
1579 LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
1580 "%s%s%s inode "DFID" object "DOSTID
1581 " extent ["LPU64"-"LPU64"]\n",
1582 req->rq_import->imp_obd->obd_name,
1583 libcfs_nid2str(peer->nid),
1585 body->oa.o_valid & OBD_MD_FLFID ?
1586 body->oa.o_parent_seq : (__u64)0,
1587 body->oa.o_valid & OBD_MD_FLFID ?
1588 body->oa.o_parent_oid : 0,
1589 body->oa.o_valid & OBD_MD_FLFID ?
1590 body->oa.o_parent_ver : 0,
1591 POSTID(&body->oa.o_oi),
1592 aa->aa_ppga[0]->off,
1593 aa->aa_ppga[aa->aa_page_count-1]->off +
1594 aa->aa_ppga[aa->aa_page_count-1]->count -
1596 CERROR("client %x, server %x, cksum_type %x\n",
1597 client_cksum, server_cksum, cksum_type);
1599 aa->aa_oa->o_cksum = client_cksum;
1603 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1606 } else if (unlikely(client_cksum)) {
1607 static int cksum_missed;
1610 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
1611 CERROR("Checksum %u requested from %s but not sent\n",
1612 cksum_missed, libcfs_nid2str(peer->nid));
1618 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
1619 aa->aa_oa, &body->oa);
1624 static int osc_brw_redo_request(struct ptlrpc_request *request,
1625 struct osc_brw_async_args *aa, int rc)
1627 struct ptlrpc_request *new_req;
1628 struct osc_brw_async_args *new_aa;
1629 struct osc_async_page *oap;
1632 DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
1633 "redo for recoverable error %d", rc);
1635 rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
1636 OST_WRITE ? OBD_BRW_WRITE :OBD_BRW_READ,
1637 aa->aa_cli, aa->aa_oa,
1638 NULL /* lsm unused by osc currently */,
1639 aa->aa_page_count, aa->aa_ppga,
1640 &new_req, aa->aa_ocapa, 0, 1);
1644 list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
1645 if (oap->oap_request != NULL) {
1646 LASSERTF(request == oap->oap_request,
1647 "request %p != oap_request %p\n",
1648 request, oap->oap_request);
1649 if (oap->oap_interrupted) {
1650 ptlrpc_req_finished(new_req);
1655 /* New request takes over pga and oaps from old request.
1656 * Note that copying a list_head doesn't work, need to move it... */
1658 new_req->rq_interpret_reply = request->rq_interpret_reply;
1659 new_req->rq_async_args = request->rq_async_args;
1660 new_req->rq_commit_cb = request->rq_commit_cb;
1661 /* cap resend delay to the current request timeout, this is similar to
1662 * what ptlrpc does (see after_reply()) */
1663 if (aa->aa_resends > new_req->rq_timeout)
1664 new_req->rq_sent = cfs_time_current_sec() + new_req->rq_timeout;
1666 new_req->rq_sent = cfs_time_current_sec() + aa->aa_resends;
1667 new_req->rq_generation_set = 1;
1668 new_req->rq_import_generation = request->rq_import_generation;
1670 new_aa = ptlrpc_req_async_args(new_req);
1672 INIT_LIST_HEAD(&new_aa->aa_oaps);
1673 list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps);
1674 INIT_LIST_HEAD(&new_aa->aa_exts);
1675 list_splice_init(&aa->aa_exts, &new_aa->aa_exts);
1676 new_aa->aa_resends = aa->aa_resends;
1678 list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
1679 if (oap->oap_request) {
1680 ptlrpc_req_finished(oap->oap_request);
1681 oap->oap_request = ptlrpc_request_addref(new_req);
1685 new_aa->aa_ocapa = aa->aa_ocapa;
1686 aa->aa_ocapa = NULL;
1688 /* XXX: This code will run into problem if we're going to support
1689 * to add a series of BRW RPCs into a self-defined ptlrpc_request_set
1690 * and wait for all of them to be finished. We should inherit request
1691 * set from old request. */
1692 ptlrpcd_add_req(new_req, PDL_POLICY_SAME, -1);
1694 DEBUG_REQ(D_INFO, new_req, "new request");
1699 * ugh, we want disk allocation on the target to happen in offset order. we'll
1700 * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
1701 * fine for our small page arrays and doesn't require allocation. its an
1702 * insertion sort that swaps elements that are strides apart, shrinking the
1703 * stride down until its '1' and the array is sorted.
1705 static void sort_brw_pages(struct brw_page **array, int num)
1708 struct brw_page *tmp;
1712 for (stride = 1; stride < num ; stride = (stride * 3) + 1)
1717 for (i = stride ; i < num ; i++) {
1720 while (j >= stride && array[j - stride]->off > tmp->off) {
1721 array[j] = array[j - stride];
1726 } while (stride > 1);
1729 static void osc_release_ppga(struct brw_page **ppga, obd_count count)
1731 LASSERT(ppga != NULL);
1732 OBD_FREE(ppga, sizeof(*ppga) * count);
1735 static int brw_interpret(const struct lu_env *env,
1736 struct ptlrpc_request *req, void *data, int rc)
1738 struct osc_brw_async_args *aa = data;
1739 struct osc_extent *ext;
1740 struct osc_extent *tmp;
1741 struct client_obd *cli = aa->aa_cli;
1744 rc = osc_brw_fini_request(req, rc);
1745 CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
1746 /* When server return -EINPROGRESS, client should always retry
1747 * regardless of the number of times the bulk was resent already. */
1748 if (osc_recoverable_error(rc)) {
1749 if (req->rq_import_generation !=
1750 req->rq_import->imp_generation) {
1751 CDEBUG(D_HA, "%s: resend cross eviction for object: "
1752 ""DOSTID", rc = %d.\n",
1753 req->rq_import->imp_obd->obd_name,
1754 POSTID(&aa->aa_oa->o_oi), rc);
1755 } else if (rc == -EINPROGRESS ||
1756 client_should_resend(aa->aa_resends, aa->aa_cli)) {
1757 rc = osc_brw_redo_request(req, aa, rc);
1759 CERROR("%s: too many resent retries for object: "
1760 ""LPU64":"LPU64", rc = %d.\n",
1761 req->rq_import->imp_obd->obd_name,
1762 POSTID(&aa->aa_oa->o_oi), rc);
1767 else if (rc == -EAGAIN || rc == -EINPROGRESS)
1772 capa_put(aa->aa_ocapa);
1773 aa->aa_ocapa = NULL;
1777 struct obdo *oa = aa->aa_oa;
1778 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
1779 unsigned long valid = 0;
1780 struct cl_object *obj;
1781 struct osc_async_page *last;
1783 last = brw_page2oap(aa->aa_ppga[aa->aa_page_count - 1]);
1784 obj = osc2cl(last->oap_obj);
1786 cl_object_attr_lock(obj);
1787 if (oa->o_valid & OBD_MD_FLBLOCKS) {
1788 attr->cat_blocks = oa->o_blocks;
1789 valid |= CAT_BLOCKS;
1791 if (oa->o_valid & OBD_MD_FLMTIME) {
1792 attr->cat_mtime = oa->o_mtime;
1795 if (oa->o_valid & OBD_MD_FLATIME) {
1796 attr->cat_atime = oa->o_atime;
1799 if (oa->o_valid & OBD_MD_FLCTIME) {
1800 attr->cat_ctime = oa->o_ctime;
1804 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1805 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
1806 loff_t last_off = last->oap_count + last->oap_obj_off +
1809 /* Change file size if this is an out of quota or
1810 * direct IO write and it extends the file size */
1811 if (loi->loi_lvb.lvb_size < last_off) {
1812 attr->cat_size = last_off;
1815 /* Extend KMS if it's not a lockless write */
1816 if (loi->loi_kms < last_off &&
1817 oap2osc_page(last)->ops_srvlock == 0) {
1818 attr->cat_kms = last_off;
1824 cl_object_attr_set(env, obj, attr, valid);
1825 cl_object_attr_unlock(obj);
1827 OBDO_FREE(aa->aa_oa);
1829 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && rc == 0)
1830 osc_inc_unstable_pages(req);
1832 list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
1833 list_del_init(&ext->oe_link);
1834 osc_extent_finish(env, ext, 1, rc);
1836 LASSERT(list_empty(&aa->aa_exts));
1837 LASSERT(list_empty(&aa->aa_oaps));
1839 cl_req_completion(env, aa->aa_clerq, rc < 0 ? rc :
1840 req->rq_bulk->bd_nob_transferred);
1841 osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
1842 ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred);
1844 client_obd_list_lock(&cli->cl_loi_list_lock);
1845 /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
1846 * is called so we know whether to go to sync BRWs or wait for more
1847 * RPCs to complete */
1848 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
1849 cli->cl_w_in_flight--;
1851 cli->cl_r_in_flight--;
1852 osc_wake_cache_waiters(cli);
1853 client_obd_list_unlock(&cli->cl_loi_list_lock);
1855 osc_io_unplug(env, cli, NULL, PDL_POLICY_SAME);
1859 static void brw_commit(struct ptlrpc_request *req)
1861 /* If osc_inc_unstable_pages (via osc_extent_finish) races with
1862 * this called via the rq_commit_cb, I need to ensure
1863 * osc_dec_unstable_pages is still called. Otherwise unstable
1864 * pages may be leaked. */
1865 spin_lock(&req->rq_lock);
1866 if (likely(req->rq_unstable)) {
1867 req->rq_unstable = 0;
1868 spin_unlock(&req->rq_lock);
1870 osc_dec_unstable_pages(req);
1872 req->rq_committed = 1;
1873 spin_unlock(&req->rq_lock);
1878 * Build an RPC by the list of extent @ext_list. The caller must ensure
1879 * that the total pages in this list are NOT over max pages per RPC.
1880 * Extents in the list must be in OES_RPC state.
1882 int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
1883 struct list_head *ext_list, int cmd, pdl_policy_t pol)
1885 struct ptlrpc_request *req = NULL;
1886 struct osc_extent *ext;
1887 struct brw_page **pga = NULL;
1888 struct osc_brw_async_args *aa = NULL;
1889 struct obdo *oa = NULL;
1890 struct osc_async_page *oap;
1891 struct osc_async_page *tmp;
1892 struct cl_req *clerq = NULL;
1893 enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE :
1895 struct cl_req_attr *crattr = NULL;
1896 obd_off starting_offset = OBD_OBJECT_EOF;
1897 obd_off ending_offset = 0;
1901 bool soft_sync = false;
1904 struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
1907 LASSERT(!list_empty(ext_list));
1909 /* add pages into rpc_list to build BRW rpc */
1910 list_for_each_entry(ext, ext_list, oe_link) {
1911 LASSERT(ext->oe_state == OES_RPC);
1912 mem_tight |= ext->oe_memalloc;
1913 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
1915 list_add_tail(&oap->oap_rpc_item, &rpc_list);
1916 if (starting_offset > oap->oap_obj_off)
1917 starting_offset = oap->oap_obj_off;
1919 LASSERT(oap->oap_page_off == 0);
1920 if (ending_offset < oap->oap_obj_off + oap->oap_count)
1921 ending_offset = oap->oap_obj_off +
1924 LASSERT(oap->oap_page_off + oap->oap_count ==
1929 soft_sync = osc_over_unstable_soft_limit(cli);
1931 mpflag = cfs_memory_pressure_get_and_set();
1933 OBD_ALLOC(crattr, sizeof(*crattr));
1935 GOTO(out, rc = -ENOMEM);
1937 OBD_ALLOC(pga, sizeof(*pga) * page_count);
1939 GOTO(out, rc = -ENOMEM);
1943 GOTO(out, rc = -ENOMEM);
1946 list_for_each_entry(oap, &rpc_list, oap_rpc_item) {
1947 struct cl_page *page = oap2cl_page(oap);
1948 if (clerq == NULL) {
1949 clerq = cl_req_alloc(env, page, crt,
1950 1 /* only 1-object rpcs for now */);
1952 GOTO(out, rc = PTR_ERR(clerq));
1955 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
1957 oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
1958 pga[i] = &oap->oap_brw_page;
1959 pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
1960 CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
1961 pga[i]->pg, page_index(oap->oap_page), oap,
1964 cl_req_page_add(env, clerq, page);
1967 /* always get the data for the obdo for the rpc */
1968 LASSERT(clerq != NULL);
1969 crattr->cra_oa = oa;
1970 cl_req_attr_set(env, clerq, crattr, ~0ULL);
1972 rc = cl_req_prep(env, clerq);
1974 CERROR("cl_req_prep failed: %d\n", rc);
1978 sort_brw_pages(pga, page_count);
1979 rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
1980 pga, &req, crattr->cra_capa, 1, 0);
1982 CERROR("prep_req failed: %d\n", rc);
1986 req->rq_commit_cb = brw_commit;
1987 req->rq_interpret_reply = brw_interpret;
1990 req->rq_memalloc = 1;
1992 /* Need to update the timestamps after the request is built in case
1993 * we race with setattr (locally or in queue at OST). If OST gets
1994 * later setattr before earlier BRW (as determined by the request xid),
1995 * the OST will not use BRW timestamps. Sadly, there is no obvious
1996 * way to do this in a single call. bug 10150 */
1997 cl_req_attr_set(env, clerq, crattr,
1998 OBD_MD_FLMTIME|OBD_MD_FLCTIME|OBD_MD_FLATIME);
2000 lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
2002 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2003 aa = ptlrpc_req_async_args(req);
2004 INIT_LIST_HEAD(&aa->aa_oaps);
2005 list_splice_init(&rpc_list, &aa->aa_oaps);
2006 INIT_LIST_HEAD(&aa->aa_exts);
2007 list_splice_init(ext_list, &aa->aa_exts);
2008 aa->aa_clerq = clerq;
2010 /* queued sync pages can be torn down while the pages
2011 * were between the pending list and the rpc */
2013 list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
2014 /* only one oap gets a request reference */
2017 if (oap->oap_interrupted && !req->rq_intr) {
2018 CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
2020 ptlrpc_mark_interrupted(req);
2024 tmp->oap_request = ptlrpc_request_addref(req);
2026 client_obd_list_lock(&cli->cl_loi_list_lock);
2027 starting_offset >>= PAGE_CACHE_SHIFT;
2028 if (cmd == OBD_BRW_READ) {
2029 cli->cl_r_in_flight++;
2030 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
2031 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
2032 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
2033 starting_offset + 1);
2035 cli->cl_w_in_flight++;
2036 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
2037 lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight);
2038 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2039 starting_offset + 1);
2041 client_obd_list_unlock(&cli->cl_loi_list_lock);
2043 DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %ur/%uw in flight",
2044 page_count, aa, cli->cl_r_in_flight,
2045 cli->cl_w_in_flight);
2047 /* XXX: Maybe the caller can check the RPC bulk descriptor to
2048 * see which CPU/NUMA node the majority of pages were allocated
2049 * on, and try to assign the async RPC to the CPU core
2050 * (PDL_POLICY_PREFERRED) to reduce cross-CPU memory traffic.
2052 * But on the other hand, we expect that multiple ptlrpcd
2053 * threads and the initial write sponsor can run in parallel,
2054 * especially when data checksum is enabled, which is CPU-bound
2055 * operation and single ptlrpcd thread cannot process in time.
2056 * So more ptlrpcd threads sharing BRW load
2057 * (with PDL_POLICY_ROUND) seems better.
2059 ptlrpcd_add_req(req, pol, -1);
2065 cfs_memory_pressure_restore(mpflag);
2067 if (crattr != NULL) {
2068 capa_put(crattr->cra_capa);
2069 OBD_FREE(crattr, sizeof(*crattr));
2073 LASSERT(req == NULL);
2078 OBD_FREE(pga, sizeof(*pga) * page_count);
2079 /* this should happen rarely and is pretty bad, it makes the
2080 * pending list not follow the dirty order */
2081 while (!list_empty(ext_list)) {
2082 ext = list_entry(ext_list->next, struct osc_extent,
2084 list_del_init(&ext->oe_link);
2085 osc_extent_finish(env, ext, 0, rc);
2087 if (clerq && !IS_ERR(clerq))
2088 cl_req_completion(env, clerq, rc);
2093 static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
2094 struct ldlm_enqueue_info *einfo)
2096 void *data = einfo->ei_cbdata;
2099 LASSERT(lock != NULL);
2100 LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl);
2101 LASSERT(lock->l_resource->lr_type == einfo->ei_type);
2102 LASSERT(lock->l_completion_ast == einfo->ei_cb_cp);
2103 LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
2105 lock_res_and_lock(lock);
2106 spin_lock(&osc_ast_guard);
2108 if (lock->l_ast_data == NULL)
2109 lock->l_ast_data = data;
2110 if (lock->l_ast_data == data)
2113 spin_unlock(&osc_ast_guard);
2114 unlock_res_and_lock(lock);
2119 static int osc_set_data_with_check(struct lustre_handle *lockh,
2120 struct ldlm_enqueue_info *einfo)
2122 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2126 set = osc_set_lock_data_with_check(lock, einfo);
2127 LDLM_LOCK_PUT(lock);
2129 CERROR("lockh %p, data %p - client evicted?\n",
2130 lockh, einfo->ei_cbdata);
2134 static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
2135 ldlm_iterator_t replace, void *data)
2137 struct ldlm_res_id res_id;
2138 struct obd_device *obd = class_exp2obd(exp);
2140 ostid_build_res_name(&lsm->lsm_oi, &res_id);
2141 ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
2145 /* find any ldlm lock of the inode in osc
2149 static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
2150 ldlm_iterator_t replace, void *data)
2152 struct ldlm_res_id res_id;
2153 struct obd_device *obd = class_exp2obd(exp);
2156 ostid_build_res_name(&lsm->lsm_oi, &res_id);
2157 rc = ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
2158 if (rc == LDLM_ITER_STOP)
2160 if (rc == LDLM_ITER_CONTINUE)
2165 static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb,
2166 obd_enqueue_update_f upcall, void *cookie,
2167 __u64 *flags, int agl, int rc)
2169 int intent = *flags & LDLM_FL_HAS_INTENT;
2173 /* The request was created before ldlm_cli_enqueue call. */
2174 if (rc == ELDLM_LOCK_ABORTED) {
2175 struct ldlm_reply *rep;
2176 rep = req_capsule_server_get(&req->rq_pill,
2179 LASSERT(rep != NULL);
2180 rep->lock_policy_res1 =
2181 ptlrpc_status_ntoh(rep->lock_policy_res1);
2182 if (rep->lock_policy_res1)
2183 rc = rep->lock_policy_res1;
2187 if ((intent != 0 && rc == ELDLM_LOCK_ABORTED && agl == 0) ||
2189 *flags |= LDLM_FL_LVB_READY;
2190 CDEBUG(D_INODE,"got kms "LPU64" blocks "LPU64" mtime "LPU64"\n",
2191 lvb->lvb_size, lvb->lvb_blocks, lvb->lvb_mtime);
2194 /* Call the update callback. */
2195 rc = (*upcall)(cookie, rc);
2199 static int osc_enqueue_interpret(const struct lu_env *env,
2200 struct ptlrpc_request *req,
2201 struct osc_enqueue_args *aa, int rc)
2203 struct ldlm_lock *lock;
2204 struct lustre_handle handle;
2206 struct ost_lvb *lvb;
2208 __u64 *flags = aa->oa_flags;
2210 /* Make a local copy of a lock handle and a mode, because aa->oa_*
2211 * might be freed anytime after lock upcall has been called. */
2212 lustre_handle_copy(&handle, aa->oa_lockh);
2213 mode = aa->oa_ei->ei_mode;
2215 /* ldlm_cli_enqueue is holding a reference on the lock, so it must
2217 lock = ldlm_handle2lock(&handle);
2219 /* Take an additional reference so that a blocking AST that
2220 * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
2221 * to arrive after an upcall has been executed by
2222 * osc_enqueue_fini(). */
2223 ldlm_lock_addref(&handle, mode);
2225 /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
2226 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
2228 /* Let CP AST to grant the lock first. */
2229 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
2231 if (aa->oa_agl && rc == ELDLM_LOCK_ABORTED) {
2236 lvb_len = sizeof(*aa->oa_lvb);
2239 /* Complete obtaining the lock procedure. */
2240 rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1,
2241 mode, flags, lvb, lvb_len, &handle, rc);
2242 /* Complete osc stuff. */
2243 rc = osc_enqueue_fini(req, aa->oa_lvb, aa->oa_upcall, aa->oa_cookie,
2244 flags, aa->oa_agl, rc);
2246 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
2248 /* Release the lock for async request. */
2249 if (lustre_handle_is_used(&handle) && rc == ELDLM_OK)
2251 * Releases a reference taken by ldlm_cli_enqueue(), if it is
2252 * not already released by
2253 * ldlm_cli_enqueue_fini()->failed_lock_cleanup()
2255 ldlm_lock_decref(&handle, mode);
2257 LASSERTF(lock != NULL, "lockh %p, req %p, aa %p - client evicted?\n",
2258 aa->oa_lockh, req, aa);
2259 ldlm_lock_decref(&handle, mode);
2260 LDLM_LOCK_PUT(lock);
2264 struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
2266 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
2267 * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
2268 * other synchronous requests, however keeping some locks and trying to obtain
2269 * others may take a considerable amount of time in a case of ost failure; and
2270 * when other sync requests do not get released lock from a client, the client
2271 * is excluded from the cluster -- such scenarious make the life difficult, so
2272 * release locks just after they are obtained. */
2273 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2274 __u64 *flags, ldlm_policy_data_t *policy,
2275 struct ost_lvb *lvb, int kms_valid,
2276 obd_enqueue_update_f upcall, void *cookie,
2277 struct ldlm_enqueue_info *einfo,
2278 struct lustre_handle *lockh,
2279 struct ptlrpc_request_set *rqset, int async, int agl)
2281 struct obd_device *obd = exp->exp_obd;
2282 struct ptlrpc_request *req = NULL;
2283 int intent = *flags & LDLM_FL_HAS_INTENT;
2284 __u64 match_lvb = (agl != 0 ? 0 : LDLM_FL_LVB_READY);
2289 /* Filesystem lock extents are extended to page boundaries so that
2290 * dealing with the page cache is a little smoother. */
2291 policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
2292 policy->l_extent.end |= ~CFS_PAGE_MASK;
2295 * kms is not valid when either object is completely fresh (so that no
2296 * locks are cached), or object was evicted. In the latter case cached
2297 * lock cannot be used, because it would prime inode state with
2298 * potentially stale LVB.
2303 /* Next, search for already existing extent locks that will cover us */
2304 /* If we're trying to read, we also search for an existing PW lock. The
2305 * VFS and page cache already protect us locally, so lots of readers/
2306 * writers can share a single PW lock.
2308 * There are problems with conversion deadlocks, so instead of
2309 * converting a read lock to a write lock, we'll just enqueue a new
2312 * At some point we should cancel the read lock instead of making them
2313 * send us a blocking callback, but there are problems with canceling
2314 * locks out from other users right now, too. */
2315 mode = einfo->ei_mode;
2316 if (einfo->ei_mode == LCK_PR)
2318 mode = ldlm_lock_match(obd->obd_namespace, *flags | match_lvb, res_id,
2319 einfo->ei_type, policy, mode, lockh, 0);
2321 struct ldlm_lock *matched = ldlm_handle2lock(lockh);
2323 if ((agl != 0) && !ldlm_is_lvb_ready(matched)) {
2324 /* For AGL, if enqueue RPC is sent but the lock is not
2325 * granted, then skip to process this strpe.
2326 * Return -ECANCELED to tell the caller. */
2327 ldlm_lock_decref(lockh, mode);
2328 LDLM_LOCK_PUT(matched);
2330 } else if (osc_set_lock_data_with_check(matched, einfo)) {
2331 *flags |= LDLM_FL_LVB_READY;
2332 /* addref the lock only if not async requests and PW
2333 * lock is matched whereas we asked for PR. */
2334 if (!rqset && einfo->ei_mode != mode)
2335 ldlm_lock_addref(lockh, LCK_PR);
2337 /* I would like to be able to ASSERT here that
2338 * rss <= kms, but I can't, for reasons which
2339 * are explained in lov_enqueue() */
2342 /* We already have a lock, and it's referenced.
2344 * At this point, the cl_lock::cll_state is CLS_QUEUING,
2345 * AGL upcall may change it to CLS_HELD directly. */
2346 (*upcall)(cookie, ELDLM_OK);
2348 if (einfo->ei_mode != mode)
2349 ldlm_lock_decref(lockh, LCK_PW);
2351 /* For async requests, decref the lock. */
2352 ldlm_lock_decref(lockh, einfo->ei_mode);
2353 LDLM_LOCK_PUT(matched);
2356 ldlm_lock_decref(lockh, mode);
2357 LDLM_LOCK_PUT(matched);
2363 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
2364 &RQF_LDLM_ENQUEUE_LVB);
2368 rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE);
2370 ptlrpc_request_free(req);
2374 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
2376 ptlrpc_request_set_replen(req);
2379 /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
2380 *flags &= ~LDLM_FL_BLOCK_GRANTED;
2382 rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
2383 sizeof(*lvb), LVB_T_OST, lockh, async);
2386 struct osc_enqueue_args *aa;
2387 CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
2388 aa = ptlrpc_req_async_args(req);
2391 aa->oa_flags = flags;
2392 aa->oa_upcall = upcall;
2393 aa->oa_cookie = cookie;
2395 aa->oa_lockh = lockh;
2398 req->rq_interpret_reply =
2399 (ptlrpc_interpterer_t)osc_enqueue_interpret;
2400 if (rqset == PTLRPCD_SET)
2401 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
2403 ptlrpc_set_add_req(rqset, req);
2404 } else if (intent) {
2405 ptlrpc_req_finished(req);
2410 rc = osc_enqueue_fini(req, lvb, upcall, cookie, flags, agl, rc);
2412 ptlrpc_req_finished(req);
2417 int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2418 __u32 type, ldlm_policy_data_t *policy, __u32 mode,
2419 __u64 *flags, void *data, struct lustre_handle *lockh,
2422 struct obd_device *obd = exp->exp_obd;
2423 __u64 lflags = *flags;
2427 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
2430 /* Filesystem lock extents are extended to page boundaries so that
2431 * dealing with the page cache is a little smoother */
2432 policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
2433 policy->l_extent.end |= ~CFS_PAGE_MASK;
2435 /* Next, search for already existing extent locks that will cover us */
2436 /* If we're trying to read, we also search for an existing PW lock. The
2437 * VFS and page cache already protect us locally, so lots of readers/
2438 * writers can share a single PW lock. */
2442 rc = ldlm_lock_match(obd->obd_namespace, lflags,
2443 res_id, type, policy, rc, lockh, unref);
2446 if (!osc_set_data_with_check(lockh, data)) {
2447 if (!(lflags & LDLM_FL_TEST_LOCK))
2448 ldlm_lock_decref(lockh, rc);
2452 if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) {
2453 ldlm_lock_addref(lockh, LCK_PR);
2454 ldlm_lock_decref(lockh, LCK_PW);
2461 int osc_cancel_base(struct lustre_handle *lockh, __u32 mode)
2465 if (unlikely(mode == LCK_GROUP))
2466 ldlm_lock_decref_and_cancel(lockh, mode);
2468 ldlm_lock_decref(lockh, mode);
2473 static int osc_statfs_interpret(const struct lu_env *env,
2474 struct ptlrpc_request *req,
2475 struct osc_async_args *aa, int rc)
2477 struct obd_statfs *msfs;
2481 /* The request has in fact never been sent
2482 * due to issues at a higher level (LOV).
2483 * Exit immediately since the caller is
2484 * aware of the problem and takes care
2485 * of the clean up */
2488 if ((rc == -ENOTCONN || rc == -EAGAIN) &&
2489 (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
2495 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
2497 GOTO(out, rc = -EPROTO);
2500 *aa->aa_oi->oi_osfs = *msfs;
2502 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
2506 static int osc_statfs_async(struct obd_export *exp,
2507 struct obd_info *oinfo, __u64 max_age,
2508 struct ptlrpc_request_set *rqset)
2510 struct obd_device *obd = class_exp2obd(exp);
2511 struct ptlrpc_request *req;
2512 struct osc_async_args *aa;
2516 /* We could possibly pass max_age in the request (as an absolute
2517 * timestamp or a "seconds.usec ago") so the target can avoid doing
2518 * extra calls into the filesystem if that isn't necessary (e.g.
2519 * during mount that would help a bit). Having relative timestamps
2520 * is not so great if request processing is slow, while absolute
2521 * timestamps are not ideal because they need time synchronization. */
2522 req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
2526 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
2528 ptlrpc_request_free(req);
2531 ptlrpc_request_set_replen(req);
2532 req->rq_request_portal = OST_CREATE_PORTAL;
2533 ptlrpc_at_set_req_timeout(req);
2535 if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
2536 /* procfs requests not want stat in wait for avoid deadlock */
2537 req->rq_no_resend = 1;
2538 req->rq_no_delay = 1;
2541 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
2542 CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
2543 aa = ptlrpc_req_async_args(req);
2546 ptlrpc_set_add_req(rqset, req);
2550 static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
2551 struct obd_statfs *osfs, __u64 max_age, __u32 flags)
2553 struct obd_device *obd = class_exp2obd(exp);
2554 struct obd_statfs *msfs;
2555 struct ptlrpc_request *req;
2556 struct obd_import *imp = NULL;
2560 /*Since the request might also come from lprocfs, so we need
2561 *sync this with client_disconnect_export Bug15684*/
2562 down_read(&obd->u.cli.cl_sem);
2563 if (obd->u.cli.cl_import)
2564 imp = class_import_get(obd->u.cli.cl_import);
2565 up_read(&obd->u.cli.cl_sem);
2569 /* We could possibly pass max_age in the request (as an absolute
2570 * timestamp or a "seconds.usec ago") so the target can avoid doing
2571 * extra calls into the filesystem if that isn't necessary (e.g.
2572 * during mount that would help a bit). Having relative timestamps
2573 * is not so great if request processing is slow, while absolute
2574 * timestamps are not ideal because they need time synchronization. */
2575 req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
2577 class_import_put(imp);
2582 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
2584 ptlrpc_request_free(req);
2587 ptlrpc_request_set_replen(req);
2588 req->rq_request_portal = OST_CREATE_PORTAL;
2589 ptlrpc_at_set_req_timeout(req);
2591 if (flags & OBD_STATFS_NODELAY) {
2592 /* procfs requests not want stat in wait for avoid deadlock */
2593 req->rq_no_resend = 1;
2594 req->rq_no_delay = 1;
2597 rc = ptlrpc_queue_wait(req);
2601 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
2603 GOTO(out, rc = -EPROTO);
2610 ptlrpc_req_finished(req);
2614 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
2615 void *karg, void *uarg)
2617 struct obd_device *obd = exp->exp_obd;
2618 struct obd_ioctl_data *data = karg;
2622 if (!try_module_get(THIS_MODULE)) {
2623 CERROR("%s: cannot get module '%s'\n", obd->obd_name,
2624 module_name(THIS_MODULE));
2628 case OBD_IOC_CLIENT_RECOVER:
2629 err = ptlrpc_recover_import(obd->u.cli.cl_import,
2630 data->ioc_inlbuf1, 0);
2634 case IOC_OSC_SET_ACTIVE:
2635 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
2638 case OBD_IOC_POLL_QUOTACHECK:
2639 err = osc_quota_poll_check(exp, (struct if_quotacheck *)karg);
2641 case OBD_IOC_PING_TARGET:
2642 err = ptlrpc_obd_ping(obd);
2645 CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n",
2646 cmd, current_comm());
2647 GOTO(out, err = -ENOTTY);
2650 module_put(THIS_MODULE);
2654 static int osc_get_info(const struct lu_env *env, struct obd_export *exp,
2655 obd_count keylen, void *key, __u32 *vallen, void *val,
2656 struct lov_stripe_md *lsm)
2659 if (!vallen || !val)
2662 if (KEY_IS(KEY_LOCK_TO_STRIPE)) {
2663 __u32 *stripe = val;
2664 *vallen = sizeof(*stripe);
2667 } else if (KEY_IS(KEY_LAST_ID)) {
2668 struct ptlrpc_request *req;
2673 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
2674 &RQF_OST_GET_INFO_LAST_ID);
2678 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
2679 RCL_CLIENT, keylen);
2680 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
2682 ptlrpc_request_free(req);
2686 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
2687 memcpy(tmp, key, keylen);
2689 req->rq_no_delay = req->rq_no_resend = 1;
2690 ptlrpc_request_set_replen(req);
2691 rc = ptlrpc_queue_wait(req);
2695 reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID);
2697 GOTO(out, rc = -EPROTO);
2699 *((obd_id *)val) = *reply;
2701 ptlrpc_req_finished(req);
2703 } else if (KEY_IS(KEY_FIEMAP)) {
2704 struct ll_fiemap_info_key *fm_key =
2705 (struct ll_fiemap_info_key *)key;
2706 struct ldlm_res_id res_id;
2707 ldlm_policy_data_t policy;
2708 struct lustre_handle lockh;
2709 ldlm_mode_t mode = 0;
2710 struct ptlrpc_request *req;
2711 struct ll_user_fiemap *reply;
2715 if (!(fm_key->fiemap.fm_flags & FIEMAP_FLAG_SYNC))
2718 policy.l_extent.start = fm_key->fiemap.fm_start &
2721 if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
2722 fm_key->fiemap.fm_start + PAGE_CACHE_SIZE - 1)
2723 policy.l_extent.end = OBD_OBJECT_EOF;
2725 policy.l_extent.end = (fm_key->fiemap.fm_start +
2726 fm_key->fiemap.fm_length +
2727 PAGE_CACHE_SIZE - 1) & CFS_PAGE_MASK;
2729 ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
2730 mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
2731 LDLM_FL_BLOCK_GRANTED |
2733 &res_id, LDLM_EXTENT, &policy,
2734 LCK_PR | LCK_PW, &lockh, 0);
2735 if (mode) { /* lock is cached on client */
2736 if (mode != LCK_PR) {
2737 ldlm_lock_addref(&lockh, LCK_PR);
2738 ldlm_lock_decref(&lockh, LCK_PW);
2740 } else { /* no cached lock, needs acquire lock on server side */
2741 fm_key->oa.o_valid |= OBD_MD_FLFLAGS;
2742 fm_key->oa.o_flags |= OBD_FL_SRVLOCK;
2746 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
2747 &RQF_OST_GET_INFO_FIEMAP);
2749 GOTO(drop_lock, rc = -ENOMEM);
2751 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY,
2752 RCL_CLIENT, keylen);
2753 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
2754 RCL_CLIENT, *vallen);
2755 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
2756 RCL_SERVER, *vallen);
2758 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
2760 ptlrpc_request_free(req);
2761 GOTO(drop_lock, rc);
2764 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
2765 memcpy(tmp, key, keylen);
2766 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
2767 memcpy(tmp, val, *vallen);
2769 ptlrpc_request_set_replen(req);
2770 rc = ptlrpc_queue_wait(req);
2774 reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
2776 GOTO(fini_req, rc = -EPROTO);
2778 memcpy(val, reply, *vallen);
2780 ptlrpc_req_finished(req);
2783 ldlm_lock_decref(&lockh, LCK_PR);
2790 static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
2791 obd_count keylen, void *key, obd_count vallen,
2792 void *val, struct ptlrpc_request_set *set)
2794 struct ptlrpc_request *req;
2795 struct obd_device *obd = exp->exp_obd;
2796 struct obd_import *imp = class_exp2cliimp(exp);
2801 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
2803 if (KEY_IS(KEY_CHECKSUM)) {
2804 if (vallen != sizeof(int))
2806 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
2810 if (KEY_IS(KEY_SPTLRPC_CONF)) {
2811 sptlrpc_conf_client_adapt(obd);
2815 if (KEY_IS(KEY_FLUSH_CTX)) {
2816 sptlrpc_import_flush_my_ctx(imp);
2820 if (KEY_IS(KEY_CACHE_SET)) {
2821 struct client_obd *cli = &obd->u.cli;
2823 LASSERT(cli->cl_cache == NULL); /* only once */
2824 cli->cl_cache = (struct cl_client_cache *)val;
2825 atomic_inc(&cli->cl_cache->ccc_users);
2826 cli->cl_lru_left = &cli->cl_cache->ccc_lru_left;
2828 /* add this osc into entity list */
2829 LASSERT(list_empty(&cli->cl_lru_osc));
2830 spin_lock(&cli->cl_cache->ccc_lru_lock);
2831 list_add(&cli->cl_lru_osc, &cli->cl_cache->ccc_lru);
2832 spin_unlock(&cli->cl_cache->ccc_lru_lock);
2837 if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
2838 struct client_obd *cli = &obd->u.cli;
2839 long nr = atomic_long_read(&cli->cl_lru_in_list) >> 1;
2840 long target = *(long *)val;
2842 nr = osc_lru_shrink(env, cli, min(nr, target), true);
2847 if (!set && !KEY_IS(KEY_GRANT_SHRINK))
2850 /* We pass all other commands directly to OST. Since nobody calls osc
2851 methods directly and everybody is supposed to go through LOV, we
2852 assume lov checked invalid values for us.
2853 The only recognised values so far are evict_by_nid and mds_conn.
2854 Even if something bad goes through, we'd get a -EINVAL from OST
2857 req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
2858 &RQF_OST_SET_GRANT_INFO :
2863 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
2864 RCL_CLIENT, keylen);
2865 if (!KEY_IS(KEY_GRANT_SHRINK))
2866 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
2867 RCL_CLIENT, vallen);
2868 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
2870 ptlrpc_request_free(req);
2874 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
2875 memcpy(tmp, key, keylen);
2876 tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
2879 memcpy(tmp, val, vallen);
2881 if (KEY_IS(KEY_GRANT_SHRINK)) {
2882 struct osc_grant_args *aa;
2885 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2886 aa = ptlrpc_req_async_args(req);
2889 ptlrpc_req_finished(req);
2892 *oa = ((struct ost_body *)val)->oa;
2894 req->rq_interpret_reply = osc_shrink_grant_interpret;
2897 ptlrpc_request_set_replen(req);
2898 if (!KEY_IS(KEY_GRANT_SHRINK)) {
2899 LASSERT(set != NULL);
2900 ptlrpc_set_add_req(set, req);
2901 ptlrpc_check_set(NULL, set);
2903 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
2908 static int osc_reconnect(const struct lu_env *env,
2909 struct obd_export *exp, struct obd_device *obd,
2910 struct obd_uuid *cluuid,
2911 struct obd_connect_data *data,
2914 struct client_obd *cli = &obd->u.cli;
2916 if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
2919 client_obd_list_lock(&cli->cl_loi_list_lock);
2920 data->ocd_grant = (cli->cl_avail_grant +
2921 (cli->cl_dirty_pages << PAGE_CACHE_SHIFT)) ?:
2922 2 * cli_brw_size(obd);
2923 lost_grant = cli->cl_lost_grant;
2924 cli->cl_lost_grant = 0;
2925 client_obd_list_unlock(&cli->cl_loi_list_lock);
2927 CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d"
2928 " ocd_grant: %d, lost: %ld.\n", data->ocd_connect_flags,
2929 data->ocd_version, data->ocd_grant, lost_grant);
2935 static int osc_disconnect(struct obd_export *exp)
2937 struct obd_device *obd = class_exp2obd(exp);
2940 rc = client_disconnect_export(exp);
2942 * Initially we put del_shrink_grant before disconnect_export, but it
2943 * causes the following problem if setup (connect) and cleanup
2944 * (disconnect) are tangled together.
2945 * connect p1 disconnect p2
2946 * ptlrpc_connect_import
2947 * ............... class_manual_cleanup
2950 * ptlrpc_connect_interrupt
2952 * add this client to shrink list
2954 * Bang! pinger trigger the shrink.
2955 * So the osc should be disconnected from the shrink list, after we
2956 * are sure the import has been destroyed. BUG18662
2958 if (obd->u.cli.cl_import == NULL)
2959 osc_del_shrink_grant(&obd->u.cli);
2963 static int osc_import_event(struct obd_device *obd,
2964 struct obd_import *imp,
2965 enum obd_import_event event)
2967 struct client_obd *cli;
2971 LASSERT(imp->imp_obd == obd);
2974 case IMP_EVENT_DISCON: {
2976 client_obd_list_lock(&cli->cl_loi_list_lock);
2977 cli->cl_avail_grant = 0;
2978 cli->cl_lost_grant = 0;
2979 client_obd_list_unlock(&cli->cl_loi_list_lock);
2982 case IMP_EVENT_INACTIVE: {
2983 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
2986 case IMP_EVENT_INVALIDATE: {
2987 struct ldlm_namespace *ns = obd->obd_namespace;
2991 env = cl_env_get(&refcheck);
2995 /* all pages go to failing rpcs due to the invalid
2997 osc_io_unplug(env, cli, NULL, PDL_POLICY_ROUND);
2999 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3000 cl_env_put(env, &refcheck);
3005 case IMP_EVENT_ACTIVE: {
3006 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
3009 case IMP_EVENT_OCD: {
3010 struct obd_connect_data *ocd = &imp->imp_connect_data;
3012 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
3013 osc_init_grant(&obd->u.cli, ocd);
3016 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
3017 imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
3019 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
3022 case IMP_EVENT_DEACTIVATE: {
3023 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE, NULL);
3026 case IMP_EVENT_ACTIVATE: {
3027 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE, NULL);
3031 CERROR("Unknown import event %d\n", event);
3038 * Determine whether the lock can be canceled before replaying the lock
3039 * during recovery, see bug16774 for detailed information.
3041 * \retval zero the lock can't be canceled
3042 * \retval other ok to cancel
3044 static int osc_cancel_weight(struct ldlm_lock *lock)
3047 * Cancel all unused and granted extent lock.
3049 if (lock->l_resource->lr_type == LDLM_EXTENT &&
3050 lock->l_granted_mode == lock->l_req_mode &&
3051 osc_ldlm_weigh_ast(lock) == 0)
3057 static int brw_queue_work(const struct lu_env *env, void *data)
3059 struct client_obd *cli = data;
3061 CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
3063 osc_io_unplug(env, cli, NULL, PDL_POLICY_SAME);
3067 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
3069 struct client_obd *cli = &obd->u.cli;
3070 struct obd_type *type;
3075 rc = ptlrpcd_addref();
3079 rc = client_obd_setup(obd, lcfg);
3081 GOTO(out_ptlrpcd, rc);
3083 handler = ptlrpcd_alloc_work(cli->cl_import, brw_queue_work, cli);
3084 if (IS_ERR(handler))
3085 GOTO(out_client_setup, rc = PTR_ERR(handler));
3086 cli->cl_writeback_work = handler;
3088 handler = ptlrpcd_alloc_work(cli->cl_import, lru_queue_work, cli);
3089 if (IS_ERR(handler))
3090 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3091 cli->cl_lru_work = handler;
3093 rc = osc_quota_setup(obd);
3095 GOTO(out_ptlrpcd_work, rc);
3097 cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
3100 obd->obd_vars = lprocfs_osc_obd_vars;
3102 /* If this is true then both client (osc) and server (osp) are on the
3103 * same node. The osp layer if loaded first will register the osc proc
3104 * directory. In that case this obd_device will be attached its proc
3105 * tree to type->typ_procsym instead of obd->obd_type->typ_procroot. */
3106 type = class_search_type(LUSTRE_OSP_NAME);
3107 if (type && type->typ_procsym) {
3108 obd->obd_proc_entry = lprocfs_seq_register(obd->obd_name,
3110 obd->obd_vars, obd);
3111 if (IS_ERR(obd->obd_proc_entry)) {
3112 rc = PTR_ERR(obd->obd_proc_entry);
3113 CERROR("error %d setting up lprocfs for %s\n", rc,
3115 obd->obd_proc_entry = NULL;
3118 rc = lprocfs_obd_setup(obd);
3121 /* If the basic OSC proc tree construction succeeded then
3122 * lets do the rest. */
3124 lproc_osc_attach_seqstat(obd);
3125 sptlrpc_lprocfs_cliobd_attach(obd);
3126 ptlrpc_lprocfs_register_obd(obd);
3129 /* We need to allocate a few requests more, because
3130 * brw_interpret tries to create new requests before freeing
3131 * previous ones, Ideally we want to have 2x max_rpcs_in_flight
3132 * reserved, but I'm afraid that might be too much wasted RAM
3133 * in fact, so 2 is just my guess and still should work. */
3134 cli->cl_import->imp_rq_pool =
3135 ptlrpc_init_rq_pool(cli->cl_max_rpcs_in_flight + 2,
3137 ptlrpc_add_rqs_to_pool);
3139 INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
3140 ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
3144 if (cli->cl_writeback_work != NULL) {
3145 ptlrpcd_destroy_work(cli->cl_writeback_work);
3146 cli->cl_writeback_work = NULL;
3148 if (cli->cl_lru_work != NULL) {
3149 ptlrpcd_destroy_work(cli->cl_lru_work);
3150 cli->cl_lru_work = NULL;
3153 client_obd_cleanup(obd);
3159 static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
3165 case OBD_CLEANUP_EARLY: {
3166 struct obd_import *imp;
3167 imp = obd->u.cli.cl_import;
3168 CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
3169 /* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
3170 ptlrpc_deactivate_import(imp);
3171 spin_lock(&imp->imp_lock);
3172 imp->imp_pingable = 0;
3173 spin_unlock(&imp->imp_lock);
3176 case OBD_CLEANUP_EXPORTS: {
3177 struct client_obd *cli = &obd->u.cli;
3179 * for echo client, export may be on zombie list, wait for
3180 * zombie thread to cull it, because cli.cl_import will be
3181 * cleared in client_disconnect_export():
3182 * class_export_destroy() -> obd_cleanup() ->
3183 * echo_device_free() -> echo_client_cleanup() ->
3184 * obd_disconnect() -> osc_disconnect() ->
3185 * client_disconnect_export()
3187 obd_zombie_barrier();
3188 if (cli->cl_writeback_work) {
3189 ptlrpcd_destroy_work(cli->cl_writeback_work);
3190 cli->cl_writeback_work = NULL;
3192 if (cli->cl_lru_work) {
3193 ptlrpcd_destroy_work(cli->cl_lru_work);
3194 cli->cl_lru_work = NULL;
3196 obd_cleanup_client_import(obd);
3197 ptlrpc_lprocfs_unregister_obd(obd);
3198 lprocfs_obd_cleanup(obd);
3205 int osc_cleanup(struct obd_device *obd)
3207 struct client_obd *cli = &obd->u.cli;
3213 if (cli->cl_cache != NULL) {
3214 LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
3215 spin_lock(&cli->cl_cache->ccc_lru_lock);
3216 list_del_init(&cli->cl_lru_osc);
3217 spin_unlock(&cli->cl_cache->ccc_lru_lock);
3218 cli->cl_lru_left = NULL;
3219 atomic_dec(&cli->cl_cache->ccc_users);
3220 cli->cl_cache = NULL;
3223 /* free memory of osc quota cache */
3224 osc_quota_cleanup(obd);
3226 rc = client_obd_cleanup(obd);
3232 int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg)
3234 int rc = class_process_proc_param(PARAM_OSC, obd->obd_vars, lcfg, obd);
3235 return rc > 0 ? 0: rc;
3238 static int osc_process_config(struct obd_device *obd, obd_count len, void *buf)
3240 return osc_process_config_base(obd, buf);
3243 struct obd_ops osc_obd_ops = {
3244 .o_owner = THIS_MODULE,
3245 .o_setup = osc_setup,
3246 .o_precleanup = osc_precleanup,
3247 .o_cleanup = osc_cleanup,
3248 .o_add_conn = client_import_add_conn,
3249 .o_del_conn = client_import_del_conn,
3250 .o_connect = client_connect_import,
3251 .o_reconnect = osc_reconnect,
3252 .o_disconnect = osc_disconnect,
3253 .o_statfs = osc_statfs,
3254 .o_statfs_async = osc_statfs_async,
3255 .o_unpackmd = osc_unpackmd,
3256 .o_create = osc_create,
3257 .o_destroy = osc_destroy,
3258 .o_getattr = osc_getattr,
3259 .o_getattr_async = osc_getattr_async,
3260 .o_setattr = osc_setattr,
3261 .o_setattr_async = osc_setattr_async,
3262 .o_change_cbdata = osc_change_cbdata,
3263 .o_find_cbdata = osc_find_cbdata,
3264 .o_iocontrol = osc_iocontrol,
3265 .o_get_info = osc_get_info,
3266 .o_set_info_async = osc_set_info_async,
3267 .o_import_event = osc_import_event,
3268 .o_process_config = osc_process_config,
3269 .o_quotactl = osc_quotactl,
3270 .o_quotacheck = osc_quotacheck,
3273 extern struct lu_kmem_descr osc_caches[];
3274 extern spinlock_t osc_ast_guard;
3275 extern struct lock_class_key osc_ast_guard_class;
3277 int __init osc_init(void)
3279 bool enable_proc = true;
3280 struct obd_type *type;
3284 /* print an address of _any_ initialized kernel symbol from this
3285 * module, to allow debugging with gdb that doesn't support data
3286 * symbols from modules.*/
3287 CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
3289 rc = lu_kmem_init(osc_caches);
3293 type = class_search_type(LUSTRE_OSP_NAME);
3294 if (type != NULL && type->typ_procsym != NULL)
3295 enable_proc = false;
3297 rc = class_register_type(&osc_obd_ops, NULL, enable_proc, NULL,
3298 LUSTRE_OSC_NAME, &osc_device_type);
3300 lu_kmem_fini(osc_caches);
3304 spin_lock_init(&osc_ast_guard);
3305 lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
3310 static void /*__exit*/ osc_exit(void)
3312 class_unregister_type(LUSTRE_OSC_NAME);
3313 lu_kmem_fini(osc_caches);
3316 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
3317 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3318 MODULE_LICENSE("GPL");
3320 cfs_module(osc, LUSTRE_VERSION_STRING, osc_init, osc_exit);