1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * Copyright (c) 2011 Whamcloud, Inc.
36 * This file is part of Lustre, http://www.lustre.org/
37 * Lustre is a trademark of Sun Microsystems, Inc.
41 # define EXPORT_SYMTAB
43 #define DEBUG_SUBSYSTEM S_OSC
45 #include <libcfs/libcfs.h>
48 # include <liblustre.h>
51 #include <lustre_dlm.h>
52 #include <lustre_net.h>
53 #include <lustre/lustre_user.h>
54 #include <obd_cksum.h>
62 #include <lustre_ha.h>
63 #include <lprocfs_status.h>
64 #include <lustre_log.h>
65 #include <lustre_debug.h>
66 #include <lustre_param.h>
67 #include "osc_internal.h"
69 static quota_interface_t *quota_interface = NULL;
70 extern quota_interface_t osc_quota_interface;
72 static void osc_release_ppga(struct brw_page **ppga, obd_count count);
73 static int brw_interpret(const struct lu_env *env,
74 struct ptlrpc_request *req, void *data, int rc);
75 int osc_cleanup(struct obd_device *obd);
77 /* Pack OSC object metadata for disk storage (LE byte order). */
78 static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
79 struct lov_stripe_md *lsm)
84 lmm_size = sizeof(**lmmp);
89 OBD_FREE(*lmmp, lmm_size);
95 OBD_ALLOC(*lmmp, lmm_size);
101 LASSERT(lsm->lsm_object_id);
102 LASSERT_SEQ_IS_MDT(lsm->lsm_object_seq);
103 (*lmmp)->lmm_object_id = cpu_to_le64(lsm->lsm_object_id);
104 (*lmmp)->lmm_object_seq = cpu_to_le64(lsm->lsm_object_seq);
110 /* Unpack OSC object metadata from disk storage (LE byte order). */
111 static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
112 struct lov_mds_md *lmm, int lmm_bytes)
115 struct obd_import *imp = class_exp2cliimp(exp);
119 if (lmm_bytes < sizeof (*lmm)) {
120 CERROR("lov_mds_md too small: %d, need %d\n",
121 lmm_bytes, (int)sizeof(*lmm));
124 /* XXX LOV_MAGIC etc check? */
126 if (lmm->lmm_object_id == 0) {
127 CERROR("lov_mds_md: zero lmm_object_id\n");
132 lsm_size = lov_stripe_md_size(1);
136 if (*lsmp != NULL && lmm == NULL) {
137 OBD_FREE((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
138 OBD_FREE(*lsmp, lsm_size);
144 OBD_ALLOC(*lsmp, lsm_size);
147 OBD_ALLOC((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
148 if ((*lsmp)->lsm_oinfo[0] == NULL) {
149 OBD_FREE(*lsmp, lsm_size);
152 loi_init((*lsmp)->lsm_oinfo[0]);
156 /* XXX zero *lsmp? */
157 (*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id);
158 (*lsmp)->lsm_object_seq = le64_to_cpu (lmm->lmm_object_seq);
159 LASSERT((*lsmp)->lsm_object_id);
160 LASSERT_SEQ_IS_MDT((*lsmp)->lsm_object_seq);
164 (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES))
165 (*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes;
167 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
172 static inline void osc_pack_capa(struct ptlrpc_request *req,
173 struct ost_body *body, void *capa)
175 struct obd_capa *oc = (struct obd_capa *)capa;
176 struct lustre_capa *c;
181 c = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1);
184 body->oa.o_valid |= OBD_MD_FLOSSCAPA;
185 DEBUG_CAPA(D_SEC, c, "pack");
188 static inline void osc_pack_req_body(struct ptlrpc_request *req,
189 struct obd_info *oinfo)
191 struct ost_body *body;
193 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
196 lustre_set_wire_obdo(&body->oa, oinfo->oi_oa);
197 osc_pack_capa(req, body, oinfo->oi_capa);
200 static inline void osc_set_capa_size(struct ptlrpc_request *req,
201 const struct req_msg_field *field,
205 req_capsule_set_size(&req->rq_pill, field, RCL_CLIENT, 0);
207 /* it is already calculated as sizeof struct obd_capa */
211 static int osc_getattr_interpret(const struct lu_env *env,
212 struct ptlrpc_request *req,
213 struct osc_async_args *aa, int rc)
215 struct ost_body *body;
221 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
223 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
224 lustre_get_wire_obdo(aa->aa_oi->oi_oa, &body->oa);
226 /* This should really be sent by the OST */
227 aa->aa_oi->oi_oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
228 aa->aa_oi->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
230 CDEBUG(D_INFO, "can't unpack ost_body\n");
232 aa->aa_oi->oi_oa->o_valid = 0;
235 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
239 static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
240 struct ptlrpc_request_set *set)
242 struct ptlrpc_request *req;
243 struct osc_async_args *aa;
247 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
251 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
252 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
254 ptlrpc_request_free(req);
258 osc_pack_req_body(req, oinfo);
260 ptlrpc_request_set_replen(req);
261 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret;
263 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
264 aa = ptlrpc_req_async_args(req);
267 ptlrpc_set_add_req(set, req);
271 static int osc_getattr(struct obd_export *exp, struct obd_info *oinfo)
273 struct ptlrpc_request *req;
274 struct ost_body *body;
278 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
282 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
283 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
285 ptlrpc_request_free(req);
289 osc_pack_req_body(req, oinfo);
291 ptlrpc_request_set_replen(req);
293 rc = ptlrpc_queue_wait(req);
297 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
299 GOTO(out, rc = -EPROTO);
301 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
302 lustre_get_wire_obdo(oinfo->oi_oa, &body->oa);
304 /* This should really be sent by the OST */
305 oinfo->oi_oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
306 oinfo->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
310 ptlrpc_req_finished(req);
314 static int osc_setattr(struct obd_export *exp, struct obd_info *oinfo,
315 struct obd_trans_info *oti)
317 struct ptlrpc_request *req;
318 struct ost_body *body;
322 LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP);
324 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
328 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
329 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
331 ptlrpc_request_free(req);
335 osc_pack_req_body(req, oinfo);
337 ptlrpc_request_set_replen(req);
339 rc = ptlrpc_queue_wait(req);
343 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
345 GOTO(out, rc = -EPROTO);
347 lustre_get_wire_obdo(oinfo->oi_oa, &body->oa);
351 ptlrpc_req_finished(req);
355 static int osc_setattr_interpret(const struct lu_env *env,
356 struct ptlrpc_request *req,
357 struct osc_setattr_args *sa, int rc)
359 struct ost_body *body;
365 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
367 GOTO(out, rc = -EPROTO);
369 lustre_get_wire_obdo(sa->sa_oa, &body->oa);
371 rc = sa->sa_upcall(sa->sa_cookie, rc);
375 int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
376 struct obd_trans_info *oti,
377 obd_enqueue_update_f upcall, void *cookie,
378 struct ptlrpc_request_set *rqset)
380 struct ptlrpc_request *req;
381 struct osc_setattr_args *sa;
385 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
389 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
390 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
392 ptlrpc_request_free(req);
396 if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
397 oinfo->oi_oa->o_lcookie = *oti->oti_logcookies;
399 osc_pack_req_body(req, oinfo);
401 ptlrpc_request_set_replen(req);
403 /* do mds to ost setattr asynchronously */
405 /* Do not wait for response. */
406 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
408 req->rq_interpret_reply =
409 (ptlrpc_interpterer_t)osc_setattr_interpret;
411 CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args));
412 sa = ptlrpc_req_async_args(req);
413 sa->sa_oa = oinfo->oi_oa;
414 sa->sa_upcall = upcall;
415 sa->sa_cookie = cookie;
417 if (rqset == PTLRPCD_SET)
418 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
420 ptlrpc_set_add_req(rqset, req);
426 static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
427 struct obd_trans_info *oti,
428 struct ptlrpc_request_set *rqset)
430 return osc_setattr_async_base(exp, oinfo, oti,
431 oinfo->oi_cb_up, oinfo, rqset);
434 int osc_real_create(struct obd_export *exp, struct obdo *oa,
435 struct lov_stripe_md **ea, struct obd_trans_info *oti)
437 struct ptlrpc_request *req;
438 struct ost_body *body;
439 struct lov_stripe_md *lsm;
448 rc = obd_alloc_memmd(exp, &lsm);
453 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
455 GOTO(out, rc = -ENOMEM);
457 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
459 ptlrpc_request_free(req);
463 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
465 lustre_set_wire_obdo(&body->oa, oa);
467 ptlrpc_request_set_replen(req);
469 if ((oa->o_valid & OBD_MD_FLFLAGS) &&
470 oa->o_flags == OBD_FL_DELORPHAN) {
472 "delorphan from OST integration");
473 /* Don't resend the delorphan req */
474 req->rq_no_resend = req->rq_no_delay = 1;
477 rc = ptlrpc_queue_wait(req);
481 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
483 GOTO(out_req, rc = -EPROTO);
485 lustre_get_wire_obdo(oa, &body->oa);
487 /* This should really be sent by the OST */
488 oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
489 oa->o_valid |= OBD_MD_FLBLKSZ;
491 /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
492 * have valid lsm_oinfo data structs, so don't go touching that.
493 * This needs to be fixed in a big way.
495 lsm->lsm_object_id = oa->o_id;
496 lsm->lsm_object_seq = oa->o_seq;
500 oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg);
502 if (oa->o_valid & OBD_MD_FLCOOKIE) {
503 if (!oti->oti_logcookies)
504 oti_alloc_cookies(oti, 1);
505 *oti->oti_logcookies = oa->o_lcookie;
509 CDEBUG(D_HA, "transno: "LPD64"\n",
510 lustre_msg_get_transno(req->rq_repmsg));
512 ptlrpc_req_finished(req);
515 obd_free_memmd(exp, &lsm);
519 int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
520 obd_enqueue_update_f upcall, void *cookie,
521 struct ptlrpc_request_set *rqset)
523 struct ptlrpc_request *req;
524 struct osc_setattr_args *sa;
525 struct ost_body *body;
529 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH);
533 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
534 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
536 ptlrpc_request_free(req);
539 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
540 ptlrpc_at_set_req_timeout(req);
542 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
544 lustre_set_wire_obdo(&body->oa, oinfo->oi_oa);
545 osc_pack_capa(req, body, oinfo->oi_capa);
547 ptlrpc_request_set_replen(req);
550 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
551 CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args));
552 sa = ptlrpc_req_async_args(req);
553 sa->sa_oa = oinfo->oi_oa;
554 sa->sa_upcall = upcall;
555 sa->sa_cookie = cookie;
556 if (rqset == PTLRPCD_SET)
557 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
559 ptlrpc_set_add_req(rqset, req);
564 static int osc_punch(struct obd_export *exp, struct obd_info *oinfo,
565 struct obd_trans_info *oti,
566 struct ptlrpc_request_set *rqset)
568 oinfo->oi_oa->o_size = oinfo->oi_policy.l_extent.start;
569 oinfo->oi_oa->o_blocks = oinfo->oi_policy.l_extent.end;
570 oinfo->oi_oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
571 return osc_punch_base(exp, oinfo,
572 oinfo->oi_cb_up, oinfo, rqset);
575 static int osc_sync_interpret(const struct lu_env *env,
576 struct ptlrpc_request *req,
579 struct osc_async_args *aa = arg;
580 struct ost_body *body;
586 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
588 CERROR ("can't unpack ost_body\n");
589 GOTO(out, rc = -EPROTO);
592 *aa->aa_oi->oi_oa = body->oa;
594 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
598 static int osc_sync(struct obd_export *exp, struct obd_info *oinfo,
599 obd_size start, obd_size end,
600 struct ptlrpc_request_set *set)
602 struct ptlrpc_request *req;
603 struct ost_body *body;
604 struct osc_async_args *aa;
609 CDEBUG(D_INFO, "oa NULL\n");
613 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
617 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
618 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
620 ptlrpc_request_free(req);
624 /* overload the size and blocks fields in the oa with start/end */
625 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
627 lustre_set_wire_obdo(&body->oa, oinfo->oi_oa);
628 body->oa.o_size = start;
629 body->oa.o_blocks = end;
630 body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
631 osc_pack_capa(req, body, oinfo->oi_capa);
633 ptlrpc_request_set_replen(req);
634 req->rq_interpret_reply = osc_sync_interpret;
636 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
637 aa = ptlrpc_req_async_args(req);
640 ptlrpc_set_add_req(set, req);
644 /* Find and cancel locally locks matched by @mode in the resource found by
645 * @objid. Found locks are added into @cancel list. Returns the amount of
646 * locks added to @cancels list. */
647 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
649 ldlm_mode_t mode, int lock_flags)
651 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
652 struct ldlm_res_id res_id;
653 struct ldlm_resource *res;
657 osc_build_res_name(oa->o_id, oa->o_seq, &res_id);
658 res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
662 LDLM_RESOURCE_ADDREF(res);
663 count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
664 lock_flags, 0, NULL);
665 LDLM_RESOURCE_DELREF(res);
666 ldlm_resource_putref(res);
670 static int osc_destroy_interpret(const struct lu_env *env,
671 struct ptlrpc_request *req, void *data,
674 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
676 cfs_atomic_dec(&cli->cl_destroy_in_flight);
677 cfs_waitq_signal(&cli->cl_destroy_waitq);
681 static int osc_can_send_destroy(struct client_obd *cli)
683 if (cfs_atomic_inc_return(&cli->cl_destroy_in_flight) <=
684 cli->cl_max_rpcs_in_flight) {
685 /* The destroy request can be sent */
688 if (cfs_atomic_dec_return(&cli->cl_destroy_in_flight) <
689 cli->cl_max_rpcs_in_flight) {
691 * The counter has been modified between the two atomic
694 cfs_waitq_signal(&cli->cl_destroy_waitq);
699 /* Destroy requests can be async always on the client, and we don't even really
700 * care about the return code since the client cannot do anything at all about
702 * When the MDS is unlinking a filename, it saves the file objects into a
703 * recovery llog, and these object records are cancelled when the OST reports
704 * they were destroyed and sync'd to disk (i.e. transaction committed).
705 * If the client dies, or the OST is down when the object should be destroyed,
706 * the records are not cancelled, and when the OST reconnects to the MDS next,
707 * it will retrieve the llog unlink logs and then sends the log cancellation
708 * cookies to the MDS after committing destroy transactions. */
709 static int osc_destroy(struct obd_export *exp, struct obdo *oa,
710 struct lov_stripe_md *ea, struct obd_trans_info *oti,
711 struct obd_export *md_export, void *capa)
713 struct client_obd *cli = &exp->exp_obd->u.cli;
714 struct ptlrpc_request *req;
715 struct ost_body *body;
716 CFS_LIST_HEAD(cancels);
721 CDEBUG(D_INFO, "oa NULL\n");
725 count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
726 LDLM_FL_DISCARD_DATA);
728 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
730 ldlm_lock_list_put(&cancels, l_bl_ast, count);
734 osc_set_capa_size(req, &RMF_CAPA1, (struct obd_capa *)capa);
735 rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
738 ptlrpc_request_free(req);
742 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
743 ptlrpc_at_set_req_timeout(req);
745 if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE)
746 oa->o_lcookie = *oti->oti_logcookies;
747 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
749 lustre_set_wire_obdo(&body->oa, oa);
751 osc_pack_capa(req, body, (struct obd_capa *)capa);
752 ptlrpc_request_set_replen(req);
754 /* don't throttle destroy RPCs for the MDT */
755 if (!(cli->cl_import->imp_connect_flags_orig & OBD_CONNECT_MDS)) {
756 req->rq_interpret_reply = osc_destroy_interpret;
757 if (!osc_can_send_destroy(cli)) {
758 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP,
762 * Wait until the number of on-going destroy RPCs drops
763 * under max_rpc_in_flight
765 l_wait_event_exclusive(cli->cl_destroy_waitq,
766 osc_can_send_destroy(cli), &lwi);
770 /* Do not wait for response */
771 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
775 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
778 obd_flag bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
780 LASSERT(!(oa->o_valid & bits));
783 client_obd_list_lock(&cli->cl_loi_list_lock);
784 oa->o_dirty = cli->cl_dirty;
785 if (cli->cl_dirty - cli->cl_dirty_transit > cli->cl_dirty_max) {
786 CERROR("dirty %lu - %lu > dirty_max %lu\n",
787 cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max);
789 } else if (cfs_atomic_read(&obd_dirty_pages) -
790 cfs_atomic_read(&obd_dirty_transit_pages) >
791 obd_max_dirty_pages + 1){
792 /* The cfs_atomic_read() allowing the cfs_atomic_inc() are
793 * not covered by a lock thus they may safely race and trip
794 * this CERROR() unless we add in a small fudge factor (+1). */
795 CERROR("dirty %d - %d > system dirty_max %d\n",
796 cfs_atomic_read(&obd_dirty_pages),
797 cfs_atomic_read(&obd_dirty_transit_pages),
798 obd_max_dirty_pages);
800 } else if (cli->cl_dirty_max - cli->cl_dirty > 0x7fffffff) {
801 CERROR("dirty %lu - dirty_max %lu too big???\n",
802 cli->cl_dirty, cli->cl_dirty_max);
805 long max_in_flight = (cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT)*
806 (cli->cl_max_rpcs_in_flight + 1);
807 oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
809 oa->o_grant = cli->cl_avail_grant;
810 oa->o_dropped = cli->cl_lost_grant;
811 cli->cl_lost_grant = 0;
812 client_obd_list_unlock(&cli->cl_loi_list_lock);
813 CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n",
814 oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
818 static void osc_update_next_shrink(struct client_obd *cli)
820 cli->cl_next_shrink_grant =
821 cfs_time_shift(cli->cl_grant_shrink_interval);
822 CDEBUG(D_CACHE, "next time %ld to shrink grant \n",
823 cli->cl_next_shrink_grant);
826 /* caller must hold loi_list_lock */
827 static void osc_consume_write_grant(struct client_obd *cli,
828 struct brw_page *pga)
830 LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock);
831 LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
832 cfs_atomic_inc(&obd_dirty_pages);
833 cli->cl_dirty += CFS_PAGE_SIZE;
834 cli->cl_avail_grant -= CFS_PAGE_SIZE;
835 pga->flag |= OBD_BRW_FROM_GRANT;
836 CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
837 CFS_PAGE_SIZE, pga, pga->pg);
838 LASSERT(cli->cl_avail_grant >= 0);
839 osc_update_next_shrink(cli);
842 /* the companion to osc_consume_write_grant, called when a brw has completed.
843 * must be called with the loi lock held. */
844 static void osc_release_write_grant(struct client_obd *cli,
845 struct brw_page *pga, int sent)
847 int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096;
850 LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock);
851 if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
856 pga->flag &= ~OBD_BRW_FROM_GRANT;
857 cfs_atomic_dec(&obd_dirty_pages);
858 cli->cl_dirty -= CFS_PAGE_SIZE;
859 if (pga->flag & OBD_BRW_NOCACHE) {
860 pga->flag &= ~OBD_BRW_NOCACHE;
861 cfs_atomic_dec(&obd_dirty_transit_pages);
862 cli->cl_dirty_transit -= CFS_PAGE_SIZE;
865 cli->cl_lost_grant += CFS_PAGE_SIZE;
866 CDEBUG(D_CACHE, "lost grant: %lu avail grant: %lu dirty: %lu\n",
867 cli->cl_lost_grant, cli->cl_avail_grant, cli->cl_dirty);
868 } else if (CFS_PAGE_SIZE != blocksize && pga->count != CFS_PAGE_SIZE) {
869 /* For short writes we shouldn't count parts of pages that
870 * span a whole block on the OST side, or our accounting goes
871 * wrong. Should match the code in filter_grant_check. */
872 int offset = pga->off & ~CFS_PAGE_MASK;
873 int count = pga->count + (offset & (blocksize - 1));
874 int end = (offset + pga->count) & (blocksize - 1);
876 count += blocksize - end;
878 cli->cl_lost_grant += CFS_PAGE_SIZE - count;
879 CDEBUG(D_CACHE, "lost %lu grant: %lu avail: %lu dirty: %lu\n",
880 CFS_PAGE_SIZE - count, cli->cl_lost_grant,
881 cli->cl_avail_grant, cli->cl_dirty);
887 static unsigned long rpcs_in_flight(struct client_obd *cli)
889 return cli->cl_r_in_flight + cli->cl_w_in_flight;
892 /* caller must hold loi_list_lock */
893 void osc_wake_cache_waiters(struct client_obd *cli)
896 struct osc_cache_waiter *ocw;
899 cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
900 /* if we can't dirty more, we must wait until some is written */
901 if ((cli->cl_dirty + CFS_PAGE_SIZE > cli->cl_dirty_max) ||
902 (cfs_atomic_read(&obd_dirty_pages) + 1 >
903 obd_max_dirty_pages)) {
904 CDEBUG(D_CACHE, "no dirty room: dirty: %ld "
905 "osc max %ld, sys max %d\n", cli->cl_dirty,
906 cli->cl_dirty_max, obd_max_dirty_pages);
910 /* if still dirty cache but no grant wait for pending RPCs that
911 * may yet return us some grant before doing sync writes */
912 if (cli->cl_w_in_flight && cli->cl_avail_grant < CFS_PAGE_SIZE) {
913 CDEBUG(D_CACHE, "%u BRW writes in flight, no grant\n",
914 cli->cl_w_in_flight);
918 ocw = cfs_list_entry(l, struct osc_cache_waiter, ocw_entry);
919 cfs_list_del_init(&ocw->ocw_entry);
920 if (cli->cl_avail_grant < CFS_PAGE_SIZE) {
921 /* no more RPCs in flight to return grant, do sync IO */
922 ocw->ocw_rc = -EDQUOT;
923 CDEBUG(D_INODE, "wake oap %p for sync\n", ocw->ocw_oap);
925 osc_consume_write_grant(cli,
926 &ocw->ocw_oap->oap_brw_page);
929 cfs_waitq_signal(&ocw->ocw_waitq);
935 static void __osc_update_grant(struct client_obd *cli, obd_size grant)
937 client_obd_list_lock(&cli->cl_loi_list_lock);
938 cli->cl_avail_grant += grant;
939 client_obd_list_unlock(&cli->cl_loi_list_lock);
942 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
944 if (body->oa.o_valid & OBD_MD_FLGRANT) {
945 CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant);
946 __osc_update_grant(cli, body->oa.o_grant);
950 static int osc_set_info_async(struct obd_export *exp, obd_count keylen,
951 void *key, obd_count vallen, void *val,
952 struct ptlrpc_request_set *set);
954 static int osc_shrink_grant_interpret(const struct lu_env *env,
955 struct ptlrpc_request *req,
958 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
959 struct obdo *oa = ((struct osc_grant_args *)aa)->aa_oa;
960 struct ost_body *body;
963 __osc_update_grant(cli, oa->o_grant);
967 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
969 osc_update_grant(cli, body);
975 static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
977 client_obd_list_lock(&cli->cl_loi_list_lock);
978 oa->o_grant = cli->cl_avail_grant / 4;
979 cli->cl_avail_grant -= oa->o_grant;
980 client_obd_list_unlock(&cli->cl_loi_list_lock);
981 if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
982 oa->o_valid |= OBD_MD_FLFLAGS;
985 oa->o_flags |= OBD_FL_SHRINK_GRANT;
986 osc_update_next_shrink(cli);
989 /* Shrink the current grant, either from some large amount to enough for a
990 * full set of in-flight RPCs, or if we have already shrunk to that limit
991 * then to enough for a single RPC. This avoids keeping more grant than
992 * needed, and avoids shrinking the grant piecemeal. */
993 static int osc_shrink_grant(struct client_obd *cli)
995 long target = (cli->cl_max_rpcs_in_flight + 1) *
996 cli->cl_max_pages_per_rpc;
998 client_obd_list_lock(&cli->cl_loi_list_lock);
999 if (cli->cl_avail_grant <= target)
1000 target = cli->cl_max_pages_per_rpc;
1001 client_obd_list_unlock(&cli->cl_loi_list_lock);
1003 return osc_shrink_grant_to_target(cli, target);
1006 int osc_shrink_grant_to_target(struct client_obd *cli, long target)
1009 struct ost_body *body;
1012 client_obd_list_lock(&cli->cl_loi_list_lock);
1013 /* Don't shrink if we are already above or below the desired limit
1014 * We don't want to shrink below a single RPC, as that will negatively
1015 * impact block allocation and long-term performance. */
1016 if (target < cli->cl_max_pages_per_rpc)
1017 target = cli->cl_max_pages_per_rpc;
1019 if (target >= cli->cl_avail_grant) {
1020 client_obd_list_unlock(&cli->cl_loi_list_lock);
1023 client_obd_list_unlock(&cli->cl_loi_list_lock);
1025 OBD_ALLOC_PTR(body);
1029 osc_announce_cached(cli, &body->oa, 0);
1031 client_obd_list_lock(&cli->cl_loi_list_lock);
1032 body->oa.o_grant = cli->cl_avail_grant - target;
1033 cli->cl_avail_grant = target;
1034 client_obd_list_unlock(&cli->cl_loi_list_lock);
1035 if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
1036 body->oa.o_valid |= OBD_MD_FLFLAGS;
1037 body->oa.o_flags = 0;
1039 body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
1040 osc_update_next_shrink(cli);
1042 rc = osc_set_info_async(cli->cl_import->imp_obd->obd_self_export,
1043 sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
1044 sizeof(*body), body, NULL);
1046 __osc_update_grant(cli, body->oa.o_grant);
1051 #define GRANT_SHRINK_LIMIT PTLRPC_MAX_BRW_SIZE
1052 static int osc_should_shrink_grant(struct client_obd *client)
1054 cfs_time_t time = cfs_time_current();
1055 cfs_time_t next_shrink = client->cl_next_shrink_grant;
1057 if ((client->cl_import->imp_connect_data.ocd_connect_flags &
1058 OBD_CONNECT_GRANT_SHRINK) == 0)
1061 if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) {
1062 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
1063 client->cl_avail_grant > GRANT_SHRINK_LIMIT)
1066 osc_update_next_shrink(client);
1071 static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data)
1073 struct client_obd *client;
1075 cfs_list_for_each_entry(client, &item->ti_obd_list,
1076 cl_grant_shrink_list) {
1077 if (osc_should_shrink_grant(client))
1078 osc_shrink_grant(client);
1083 static int osc_add_shrink_grant(struct client_obd *client)
1087 rc = ptlrpc_add_timeout_client(client->cl_grant_shrink_interval,
1089 osc_grant_shrink_grant_cb, NULL,
1090 &client->cl_grant_shrink_list);
1092 CERROR("add grant client %s error %d\n",
1093 client->cl_import->imp_obd->obd_name, rc);
1096 CDEBUG(D_CACHE, "add grant client %s \n",
1097 client->cl_import->imp_obd->obd_name);
1098 osc_update_next_shrink(client);
1102 static int osc_del_shrink_grant(struct client_obd *client)
1104 return ptlrpc_del_timeout_client(&client->cl_grant_shrink_list,
1108 static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1111 * ocd_grant is the total grant amount we're expect to hold: if we've
1112 * been evicted, it's the new avail_grant amount, cl_dirty will drop
1113 * to 0 as inflight RPCs fail out; otherwise, it's avail_grant + dirty.
1115 * race is tolerable here: if we're evicted, but imp_state already
1116 * left EVICTED state, then cl_dirty must be 0 already.
1118 client_obd_list_lock(&cli->cl_loi_list_lock);
1119 if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED)
1120 cli->cl_avail_grant = ocd->ocd_grant;
1122 cli->cl_avail_grant = ocd->ocd_grant - cli->cl_dirty;
1124 if (cli->cl_avail_grant < 0) {
1125 CWARN("%s: available grant < 0, the OSS is probably not running"
1126 " with patch from bug20278 (%ld) \n",
1127 cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant);
1128 /* workaround for 1.6 servers which do not have
1129 * the patch from bug20278 */
1130 cli->cl_avail_grant = ocd->ocd_grant;
1133 client_obd_list_unlock(&cli->cl_loi_list_lock);
1135 CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld \n",
1136 cli->cl_import->imp_obd->obd_name,
1137 cli->cl_avail_grant, cli->cl_lost_grant);
1139 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
1140 cfs_list_empty(&cli->cl_grant_shrink_list))
1141 osc_add_shrink_grant(cli);
1144 /* We assume that the reason this OSC got a short read is because it read
1145 * beyond the end of a stripe file; i.e. lustre is reading a sparse file
1146 * via the LOV, and it _knows_ it's reading inside the file, it's just that
1147 * this stripe never got written at or beyond this stripe offset yet. */
1148 static void handle_short_read(int nob_read, obd_count page_count,
1149 struct brw_page **pga)
1154 /* skip bytes read OK */
1155 while (nob_read > 0) {
1156 LASSERT (page_count > 0);
1158 if (pga[i]->count > nob_read) {
1159 /* EOF inside this page */
1160 ptr = cfs_kmap(pga[i]->pg) +
1161 (pga[i]->off & ~CFS_PAGE_MASK);
1162 memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1163 cfs_kunmap(pga[i]->pg);
1169 nob_read -= pga[i]->count;
1174 /* zero remaining pages */
1175 while (page_count-- > 0) {
1176 ptr = cfs_kmap(pga[i]->pg) + (pga[i]->off & ~CFS_PAGE_MASK);
1177 memset(ptr, 0, pga[i]->count);
1178 cfs_kunmap(pga[i]->pg);
1183 static int check_write_rcs(struct ptlrpc_request *req,
1184 int requested_nob, int niocount,
1185 obd_count page_count, struct brw_page **pga)
1190 remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
1191 sizeof(*remote_rcs) *
1193 if (remote_rcs == NULL) {
1194 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
1198 /* return error if any niobuf was in error */
1199 for (i = 0; i < niocount; i++) {
1200 if ((int)remote_rcs[i] < 0)
1201 return(remote_rcs[i]);
1203 if (remote_rcs[i] != 0) {
1204 CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
1205 i, remote_rcs[i], req);
1210 if (req->rq_bulk->bd_nob_transferred != requested_nob) {
1211 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
1212 req->rq_bulk->bd_nob_transferred, requested_nob);
1219 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1221 if (p1->flag != p2->flag) {
1222 unsigned mask = ~(OBD_BRW_FROM_GRANT| OBD_BRW_NOCACHE|
1223 OBD_BRW_SYNC|OBD_BRW_ASYNC|OBD_BRW_NOQUOTA);
1225 /* warn if we try to combine flags that we don't know to be
1226 * safe to combine */
1227 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
1228 CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
1229 "report this at http://bugs.whamcloud.com/\n",
1230 p1->flag, p2->flag);
1235 return (p1->off + p1->count == p2->off);
1238 static obd_count osc_checksum_bulk(int nob, obd_count pg_count,
1239 struct brw_page **pga, int opc,
1240 cksum_type_t cksum_type)
1245 LASSERT (pg_count > 0);
1246 cksum = init_checksum(cksum_type);
1247 while (nob > 0 && pg_count > 0) {
1248 unsigned char *ptr = cfs_kmap(pga[i]->pg);
1249 int off = pga[i]->off & ~CFS_PAGE_MASK;
1250 int count = pga[i]->count > nob ? nob : pga[i]->count;
1252 /* corrupt the data before we compute the checksum, to
1253 * simulate an OST->client data error */
1254 if (i == 0 && opc == OST_READ &&
1255 OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
1256 memcpy(ptr + off, "bad1", min(4, nob));
1257 cksum = compute_checksum(cksum, ptr + off, count, cksum_type);
1258 cfs_kunmap(pga[i]->pg);
1259 LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d checksum %x\n",
1262 nob -= pga[i]->count;
1266 /* For sending we only compute the wrong checksum instead
1267 * of corrupting the data so it is still correct on a redo */
1268 if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1274 static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
1275 struct lov_stripe_md *lsm, obd_count page_count,
1276 struct brw_page **pga,
1277 struct ptlrpc_request **reqp,
1278 struct obd_capa *ocapa, int reserve,
1281 struct ptlrpc_request *req;
1282 struct ptlrpc_bulk_desc *desc;
1283 struct ost_body *body;
1284 struct obd_ioobj *ioobj;
1285 struct niobuf_remote *niobuf;
1286 int niocount, i, requested_nob, opc, rc;
1287 struct osc_brw_async_args *aa;
1288 struct req_capsule *pill;
1289 struct brw_page *pg_prev;
1292 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1293 RETURN(-ENOMEM); /* Recoverable */
1294 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1295 RETURN(-EINVAL); /* Fatal */
1297 if ((cmd & OBD_BRW_WRITE) != 0) {
1299 req = ptlrpc_request_alloc_pool(cli->cl_import,
1300 cli->cl_import->imp_rq_pool,
1301 &RQF_OST_BRW_WRITE);
1304 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
1309 for (niocount = i = 1; i < page_count; i++) {
1310 if (!can_merge_pages(pga[i - 1], pga[i]))
1314 pill = &req->rq_pill;
1315 req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
1317 req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1318 niocount * sizeof(*niobuf));
1319 osc_set_capa_size(req, &RMF_CAPA1, ocapa);
1321 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1323 ptlrpc_request_free(req);
1326 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
1327 ptlrpc_at_set_req_timeout(req);
1329 if (opc == OST_WRITE)
1330 desc = ptlrpc_prep_bulk_imp(req, page_count,
1331 BULK_GET_SOURCE, OST_BULK_PORTAL);
1333 desc = ptlrpc_prep_bulk_imp(req, page_count,
1334 BULK_PUT_SINK, OST_BULK_PORTAL);
1337 GOTO(out, rc = -ENOMEM);
1338 /* NB request now owns desc and will free it when it gets freed */
1340 body = req_capsule_client_get(pill, &RMF_OST_BODY);
1341 ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1342 niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1343 LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
1345 lustre_set_wire_obdo(&body->oa, oa);
1347 obdo_to_ioobj(oa, ioobj);
1348 ioobj->ioo_bufcnt = niocount;
1349 osc_pack_capa(req, body, ocapa);
1350 LASSERT (page_count > 0);
1352 for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1353 struct brw_page *pg = pga[i];
1354 int poff = pg->off & ~CFS_PAGE_MASK;
1356 LASSERT(pg->count > 0);
1357 /* make sure there is no gap in the middle of page array */
1358 LASSERTF(page_count == 1 ||
1359 (ergo(i == 0, poff + pg->count == CFS_PAGE_SIZE) &&
1360 ergo(i > 0 && i < page_count - 1,
1361 poff == 0 && pg->count == CFS_PAGE_SIZE) &&
1362 ergo(i == page_count - 1, poff == 0)),
1363 "i: %d/%d pg: %p off: "LPU64", count: %u\n",
1364 i, page_count, pg, pg->off, pg->count);
1366 LASSERTF(i == 0 || pg->off > pg_prev->off,
1367 "i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
1368 " prev_pg %p [pri %lu ind %lu] off "LPU64"\n",
1370 pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1371 pg_prev->pg, page_private(pg_prev->pg),
1372 pg_prev->pg->index, pg_prev->off);
1374 LASSERTF(i == 0 || pg->off > pg_prev->off,
1375 "i %d p_c %u\n", i, page_count);
1377 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1378 (pg->flag & OBD_BRW_SRVLOCK));
1380 ptlrpc_prep_bulk_page(desc, pg->pg, poff, pg->count);
1381 requested_nob += pg->count;
1383 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1385 niobuf->len += pg->count;
1387 niobuf->offset = pg->off;
1388 niobuf->len = pg->count;
1389 niobuf->flags = pg->flag;
1394 LASSERTF((void *)(niobuf - niocount) ==
1395 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
1396 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
1397 &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
1399 osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1401 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1402 body->oa.o_valid |= OBD_MD_FLFLAGS;
1403 body->oa.o_flags = 0;
1405 body->oa.o_flags |= OBD_FL_RECOV_RESEND;
1408 if (osc_should_shrink_grant(cli))
1409 osc_shrink_grant_local(cli, &body->oa);
1411 /* size[REQ_REC_OFF] still sizeof (*body) */
1412 if (opc == OST_WRITE) {
1413 if (unlikely(cli->cl_checksum) &&
1414 !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1415 /* store cl_cksum_type in a local variable since
1416 * it can be changed via lprocfs */
1417 cksum_type_t cksum_type = cli->cl_cksum_type;
1419 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1420 oa->o_flags &= OBD_FL_LOCAL_MASK;
1421 body->oa.o_flags = 0;
1423 body->oa.o_flags |= cksum_type_pack(cksum_type);
1424 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1425 body->oa.o_cksum = osc_checksum_bulk(requested_nob,
1429 CDEBUG(D_PAGE, "checksum at write origin: %x\n",
1431 /* save this in 'oa', too, for later checking */
1432 oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1433 oa->o_flags |= cksum_type_pack(cksum_type);
1435 /* clear out the checksum flag, in case this is a
1436 * resend but cl_checksum is no longer set. b=11238 */
1437 oa->o_valid &= ~OBD_MD_FLCKSUM;
1439 oa->o_cksum = body->oa.o_cksum;
1440 /* 1 RC per niobuf */
1441 req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
1442 sizeof(__u32) * niocount);
1444 if (unlikely(cli->cl_checksum) &&
1445 !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1446 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1447 body->oa.o_flags = 0;
1448 body->oa.o_flags |= cksum_type_pack(cli->cl_cksum_type);
1449 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1452 ptlrpc_request_set_replen(req);
1454 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1455 aa = ptlrpc_req_async_args(req);
1457 aa->aa_requested_nob = requested_nob;
1458 aa->aa_nio_count = niocount;
1459 aa->aa_page_count = page_count;
1463 CFS_INIT_LIST_HEAD(&aa->aa_oaps);
1464 if (ocapa && reserve)
1465 aa->aa_ocapa = capa_get(ocapa);
1471 ptlrpc_req_finished(req);
1475 static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
1476 __u32 client_cksum, __u32 server_cksum, int nob,
1477 obd_count page_count, struct brw_page **pga,
1478 cksum_type_t client_cksum_type)
1482 cksum_type_t cksum_type;
1484 if (server_cksum == client_cksum) {
1485 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1489 cksum_type = cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
1491 new_cksum = osc_checksum_bulk(nob, page_count, pga, OST_WRITE,
1494 if (cksum_type != client_cksum_type)
1495 msg = "the server did not use the checksum type specified in "
1496 "the original request - likely a protocol problem";
1497 else if (new_cksum == server_cksum)
1498 msg = "changed on the client after we checksummed it - "
1499 "likely false positive due to mmap IO (bug 11742)";
1500 else if (new_cksum == client_cksum)
1501 msg = "changed in transit before arrival at OST";
1503 msg = "changed in transit AND doesn't match the original - "
1504 "likely false positive due to mmap IO (bug 11742)";
1506 LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inode "DFID
1507 " object "LPU64"/"LPU64" extent ["LPU64"-"LPU64"]\n",
1508 msg, libcfs_nid2str(peer->nid),
1509 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1510 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1511 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1513 oa->o_valid & OBD_MD_FLGROUP ? oa->o_seq : (__u64)0,
1515 pga[page_count-1]->off + pga[page_count-1]->count - 1);
1516 CERROR("original client csum %x (type %x), server csum %x (type %x), "
1517 "client csum now %x\n", client_cksum, client_cksum_type,
1518 server_cksum, cksum_type, new_cksum);
1522 /* Note rc enters this function as number of bytes transferred */
1523 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1525 struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1526 const lnet_process_id_t *peer =
1527 &req->rq_import->imp_connection->c_peer;
1528 struct client_obd *cli = aa->aa_cli;
1529 struct ost_body *body;
1530 __u32 client_cksum = 0;
1533 if (rc < 0 && rc != -EDQUOT) {
1534 DEBUG_REQ(D_INFO, req, "Failed request with rc = %d\n", rc);
1538 LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
1539 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1541 DEBUG_REQ(D_INFO, req, "Can't unpack body\n");
1545 #ifdef HAVE_QUOTA_SUPPORT
1546 /* set/clear over quota flag for a uid/gid */
1547 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1548 body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) {
1549 unsigned int qid[MAXQUOTAS] = { body->oa.o_uid, body->oa.o_gid };
1551 CDEBUG(D_QUOTA, "setdq for [%u %u] with valid "LPX64", flags %x\n",
1552 body->oa.o_uid, body->oa.o_gid, body->oa.o_valid,
1554 lquota_setdq(quota_interface, cli, qid, body->oa.o_valid,
1559 osc_update_grant(cli, body);
1564 if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
1565 client_cksum = aa->aa_oa->o_cksum; /* save for later */
1567 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1569 CERROR("Unexpected +ve rc %d\n", rc);
1572 LASSERT(req->rq_bulk->bd_nob == aa->aa_requested_nob);
1574 if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
1577 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
1578 check_write_checksum(&body->oa, peer, client_cksum,
1579 body->oa.o_cksum, aa->aa_requested_nob,
1580 aa->aa_page_count, aa->aa_ppga,
1581 cksum_type_unpack(aa->aa_oa->o_flags)))
1584 rc = check_write_rcs(req, aa->aa_requested_nob,aa->aa_nio_count,
1585 aa->aa_page_count, aa->aa_ppga);
1589 /* The rest of this function executes only for OST_READs */
1591 /* if unwrap_bulk failed, return -EAGAIN to retry */
1592 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
1594 GOTO(out, rc = -EAGAIN);
1596 if (rc > aa->aa_requested_nob) {
1597 CERROR("Unexpected rc %d (%d requested)\n", rc,
1598 aa->aa_requested_nob);
1602 if (rc != req->rq_bulk->bd_nob_transferred) {
1603 CERROR ("Unexpected rc %d (%d transferred)\n",
1604 rc, req->rq_bulk->bd_nob_transferred);
1608 if (rc < aa->aa_requested_nob)
1609 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
1611 if (body->oa.o_valid & OBD_MD_FLCKSUM) {
1612 static int cksum_counter;
1613 __u32 server_cksum = body->oa.o_cksum;
1616 cksum_type_t cksum_type;
1618 cksum_type = cksum_type_unpack(body->oa.o_valid &OBD_MD_FLFLAGS?
1619 body->oa.o_flags : 0);
1620 client_cksum = osc_checksum_bulk(rc, aa->aa_page_count,
1621 aa->aa_ppga, OST_READ,
1624 if (peer->nid == req->rq_bulk->bd_sender) {
1628 router = libcfs_nid2str(req->rq_bulk->bd_sender);
1631 if (server_cksum == ~0 && rc > 0) {
1632 CERROR("Protocol error: server %s set the 'checksum' "
1633 "bit, but didn't send a checksum. Not fatal, "
1634 "but please notify on http://bugs.whamcloud.com/\n",
1635 libcfs_nid2str(peer->nid));
1636 } else if (server_cksum != client_cksum) {
1637 LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
1638 "%s%s%s inode "DFID" object "
1639 LPU64"/"LPU64" extent "
1640 "["LPU64"-"LPU64"]\n",
1641 req->rq_import->imp_obd->obd_name,
1642 libcfs_nid2str(peer->nid),
1644 body->oa.o_valid & OBD_MD_FLFID ?
1645 body->oa.o_parent_seq : (__u64)0,
1646 body->oa.o_valid & OBD_MD_FLFID ?
1647 body->oa.o_parent_oid : 0,
1648 body->oa.o_valid & OBD_MD_FLFID ?
1649 body->oa.o_parent_ver : 0,
1651 body->oa.o_valid & OBD_MD_FLGROUP ?
1652 body->oa.o_seq : (__u64)0,
1653 aa->aa_ppga[0]->off,
1654 aa->aa_ppga[aa->aa_page_count-1]->off +
1655 aa->aa_ppga[aa->aa_page_count-1]->count -
1657 CERROR("client %x, server %x, cksum_type %x\n",
1658 client_cksum, server_cksum, cksum_type);
1660 aa->aa_oa->o_cksum = client_cksum;
1664 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1667 } else if (unlikely(client_cksum)) {
1668 static int cksum_missed;
1671 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
1672 CERROR("Checksum %u requested from %s but not sent\n",
1673 cksum_missed, libcfs_nid2str(peer->nid));
1679 lustre_get_wire_obdo(aa->aa_oa, &body->oa);
1684 static int osc_brw_internal(int cmd, struct obd_export *exp, struct obdo *oa,
1685 struct lov_stripe_md *lsm,
1686 obd_count page_count, struct brw_page **pga,
1687 struct obd_capa *ocapa)
1689 struct ptlrpc_request *req;
1693 struct l_wait_info lwi;
1697 cfs_waitq_init(&waitq);
1700 rc = osc_brw_prep_request(cmd, &exp->exp_obd->u.cli, oa, lsm,
1701 page_count, pga, &req, ocapa, 0, resends);
1705 rc = ptlrpc_queue_wait(req);
1707 if (rc == -ETIMEDOUT && req->rq_resend) {
1708 DEBUG_REQ(D_HA, req, "BULK TIMEOUT");
1709 ptlrpc_req_finished(req);
1713 rc = osc_brw_fini_request(req, rc);
1715 ptlrpc_req_finished(req);
1716 if (osc_recoverable_error(rc)) {
1718 if (!client_should_resend(resends, &exp->exp_obd->u.cli)) {
1719 CERROR("too many resend retries, returning error\n");
1723 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(resends), NULL, NULL, NULL);
1724 l_wait_event(waitq, 0, &lwi);
1732 int osc_brw_redo_request(struct ptlrpc_request *request,
1733 struct osc_brw_async_args *aa)
1735 struct ptlrpc_request *new_req;
1736 struct ptlrpc_request_set *set = request->rq_set;
1737 struct osc_brw_async_args *new_aa;
1738 struct osc_async_page *oap;
1742 if (!client_should_resend(aa->aa_resends, aa->aa_cli)) {
1743 CERROR("too many resent retries, returning error\n");
1747 DEBUG_REQ(D_ERROR, request, "redo for recoverable error");
1749 rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
1750 OST_WRITE ? OBD_BRW_WRITE :OBD_BRW_READ,
1751 aa->aa_cli, aa->aa_oa,
1752 NULL /* lsm unused by osc currently */,
1753 aa->aa_page_count, aa->aa_ppga,
1754 &new_req, aa->aa_ocapa, 0, 1);
1758 client_obd_list_lock(&aa->aa_cli->cl_loi_list_lock);
1760 cfs_list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
1761 if (oap->oap_request != NULL) {
1762 LASSERTF(request == oap->oap_request,
1763 "request %p != oap_request %p\n",
1764 request, oap->oap_request);
1765 if (oap->oap_interrupted) {
1766 client_obd_list_unlock(&aa->aa_cli->cl_loi_list_lock);
1767 ptlrpc_req_finished(new_req);
1772 /* New request takes over pga and oaps from old request.
1773 * Note that copying a list_head doesn't work, need to move it... */
1775 new_req->rq_interpret_reply = request->rq_interpret_reply;
1776 new_req->rq_async_args = request->rq_async_args;
1777 new_req->rq_sent = cfs_time_current_sec() + aa->aa_resends;
1779 new_aa = ptlrpc_req_async_args(new_req);
1781 CFS_INIT_LIST_HEAD(&new_aa->aa_oaps);
1782 cfs_list_splice(&aa->aa_oaps, &new_aa->aa_oaps);
1783 CFS_INIT_LIST_HEAD(&aa->aa_oaps);
1785 cfs_list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
1786 if (oap->oap_request) {
1787 ptlrpc_req_finished(oap->oap_request);
1788 oap->oap_request = ptlrpc_request_addref(new_req);
1792 new_aa->aa_ocapa = aa->aa_ocapa;
1793 aa->aa_ocapa = NULL;
1795 /* use ptlrpc_set_add_req is safe because interpret functions work
1796 * in check_set context. only one way exist with access to request
1797 * from different thread got -EINTR - this way protected with
1798 * cl_loi_list_lock */
1799 ptlrpc_set_add_req(set, new_req);
1801 client_obd_list_unlock(&aa->aa_cli->cl_loi_list_lock);
1803 DEBUG_REQ(D_INFO, new_req, "new request");
1808 * ugh, we want disk allocation on the target to happen in offset order. we'll
1809 * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
1810 * fine for our small page arrays and doesn't require allocation. its an
1811 * insertion sort that swaps elements that are strides apart, shrinking the
1812 * stride down until its '1' and the array is sorted.
1814 static void sort_brw_pages(struct brw_page **array, int num)
1817 struct brw_page *tmp;
1821 for (stride = 1; stride < num ; stride = (stride * 3) + 1)
1826 for (i = stride ; i < num ; i++) {
1829 while (j >= stride && array[j - stride]->off > tmp->off) {
1830 array[j] = array[j - stride];
1835 } while (stride > 1);
1838 static obd_count max_unfragmented_pages(struct brw_page **pg, obd_count pages)
1844 LASSERT (pages > 0);
1845 offset = pg[i]->off & ~CFS_PAGE_MASK;
1849 if (pages == 0) /* that's all */
1852 if (offset + pg[i]->count < CFS_PAGE_SIZE)
1853 return count; /* doesn't end on page boundary */
1856 offset = pg[i]->off & ~CFS_PAGE_MASK;
1857 if (offset != 0) /* doesn't start on page boundary */
1864 static struct brw_page **osc_build_ppga(struct brw_page *pga, obd_count count)
1866 struct brw_page **ppga;
1869 OBD_ALLOC(ppga, sizeof(*ppga) * count);
1873 for (i = 0; i < count; i++)
1878 static void osc_release_ppga(struct brw_page **ppga, obd_count count)
1880 LASSERT(ppga != NULL);
1881 OBD_FREE(ppga, sizeof(*ppga) * count);
1884 static int osc_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo,
1885 obd_count page_count, struct brw_page *pga,
1886 struct obd_trans_info *oti)
1888 struct obdo *saved_oa = NULL;
1889 struct brw_page **ppga, **orig;
1890 struct obd_import *imp = class_exp2cliimp(exp);
1891 struct client_obd *cli;
1892 int rc, page_count_orig;
1895 LASSERT((imp != NULL) && (imp->imp_obd != NULL));
1896 cli = &imp->imp_obd->u.cli;
1898 if (cmd & OBD_BRW_CHECK) {
1899 /* The caller just wants to know if there's a chance that this
1900 * I/O can succeed */
1902 if (imp->imp_invalid)
1907 /* test_brw with a failed create can trip this, maybe others. */
1908 LASSERT(cli->cl_max_pages_per_rpc);
1912 orig = ppga = osc_build_ppga(pga, page_count);
1915 page_count_orig = page_count;
1917 sort_brw_pages(ppga, page_count);
1918 while (page_count) {
1919 obd_count pages_per_brw;
1921 if (page_count > cli->cl_max_pages_per_rpc)
1922 pages_per_brw = cli->cl_max_pages_per_rpc;
1924 pages_per_brw = page_count;
1926 pages_per_brw = max_unfragmented_pages(ppga, pages_per_brw);
1928 if (saved_oa != NULL) {
1929 /* restore previously saved oa */
1930 *oinfo->oi_oa = *saved_oa;
1931 } else if (page_count > pages_per_brw) {
1932 /* save a copy of oa (brw will clobber it) */
1933 OBDO_ALLOC(saved_oa);
1934 if (saved_oa == NULL)
1935 GOTO(out, rc = -ENOMEM);
1936 *saved_oa = *oinfo->oi_oa;
1939 rc = osc_brw_internal(cmd, exp, oinfo->oi_oa, oinfo->oi_md,
1940 pages_per_brw, ppga, oinfo->oi_capa);
1945 page_count -= pages_per_brw;
1946 ppga += pages_per_brw;
1950 osc_release_ppga(orig, page_count_orig);
1952 if (saved_oa != NULL)
1953 OBDO_FREE(saved_oa);
1958 /* The companion to osc_enter_cache(), called when @oap is no longer part of
1959 * the dirty accounting. Writeback completes or truncate happens before
1960 * writing starts. Must be called with the loi lock held. */
1961 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
1964 osc_release_write_grant(cli, &oap->oap_brw_page, sent);
1968 /* This maintains the lists of pending pages to read/write for a given object
1969 * (lop). This is used by osc_check_rpcs->osc_next_loi() and loi_list_maint()
1970 * to quickly find objects that are ready to send an RPC. */
1971 static int lop_makes_rpc(struct client_obd *cli, struct loi_oap_pages *lop,
1977 if (lop->lop_num_pending == 0)
1980 /* if we have an invalid import we want to drain the queued pages
1981 * by forcing them through rpcs that immediately fail and complete
1982 * the pages. recovery relies on this to empty the queued pages
1983 * before canceling the locks and evicting down the llite pages */
1984 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1987 /* stream rpcs in queue order as long as as there is an urgent page
1988 * queued. this is our cheap solution for good batching in the case
1989 * where writepage marks some random page in the middle of the file
1990 * as urgent because of, say, memory pressure */
1991 if (!cfs_list_empty(&lop->lop_urgent)) {
1992 CDEBUG(D_CACHE, "urgent request forcing RPC\n");
1995 /* fire off rpcs when we have 'optimal' rpcs as tuned for the wire. */
1996 optimal = cli->cl_max_pages_per_rpc;
1997 if (cmd & OBD_BRW_WRITE) {
1998 /* trigger a write rpc stream as long as there are dirtiers
1999 * waiting for space. as they're waiting, they're not going to
2000 * create more pages to coalesce with what's waiting.. */
2001 if (!cfs_list_empty(&cli->cl_cache_waiters)) {
2002 CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
2005 /* +16 to avoid triggering rpcs that would want to include pages
2006 * that are being queued but which can't be made ready until
2007 * the queuer finishes with the page. this is a wart for
2008 * llite::commit_write() */
2011 if (lop->lop_num_pending >= optimal)
2017 static int lop_makes_hprpc(struct loi_oap_pages *lop)
2019 struct osc_async_page *oap;
2022 if (cfs_list_empty(&lop->lop_urgent))
2025 oap = cfs_list_entry(lop->lop_urgent.next,
2026 struct osc_async_page, oap_urgent_item);
2028 if (oap->oap_async_flags & ASYNC_HP) {
2029 CDEBUG(D_CACHE, "hp request forcing RPC\n");
2036 static void on_list(cfs_list_t *item, cfs_list_t *list,
2039 if (cfs_list_empty(item) && should_be_on)
2040 cfs_list_add_tail(item, list);
2041 else if (!cfs_list_empty(item) && !should_be_on)
2042 cfs_list_del_init(item);
2045 /* maintain the loi's cli list membership invariants so that osc_send_oap_rpc
2046 * can find pages to build into rpcs quickly */
2047 void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi)
2049 if (lop_makes_hprpc(&loi->loi_write_lop) ||
2050 lop_makes_hprpc(&loi->loi_read_lop)) {
2052 on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list, 0);
2053 on_list(&loi->loi_hp_ready_item, &cli->cl_loi_hp_ready_list, 1);
2055 on_list(&loi->loi_hp_ready_item, &cli->cl_loi_hp_ready_list, 0);
2056 on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list,
2057 lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)||
2058 lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ));
2061 on_list(&loi->loi_write_item, &cli->cl_loi_write_list,
2062 loi->loi_write_lop.lop_num_pending);
2064 on_list(&loi->loi_read_item, &cli->cl_loi_read_list,
2065 loi->loi_read_lop.lop_num_pending);
2068 static void lop_update_pending(struct client_obd *cli,
2069 struct loi_oap_pages *lop, int cmd, int delta)
2071 lop->lop_num_pending += delta;
2072 if (cmd & OBD_BRW_WRITE)
2073 cli->cl_pending_w_pages += delta;
2075 cli->cl_pending_r_pages += delta;
2079 * this is called when a sync waiter receives an interruption. Its job is to
2080 * get the caller woken as soon as possible. If its page hasn't been put in an
2081 * rpc yet it can dequeue immediately. Otherwise it has to mark the rpc as
2082 * desiring interruption which will forcefully complete the rpc once the rpc
2085 int osc_oap_interrupted(const struct lu_env *env, struct osc_async_page *oap)
2087 struct loi_oap_pages *lop;
2088 struct lov_oinfo *loi;
2092 LASSERT(!oap->oap_interrupted);
2093 oap->oap_interrupted = 1;
2095 /* ok, it's been put in an rpc. only one oap gets a request reference */
2096 if (oap->oap_request != NULL) {
2097 ptlrpc_mark_interrupted(oap->oap_request);
2098 ptlrpcd_wake(oap->oap_request);
2099 ptlrpc_req_finished(oap->oap_request);
2100 oap->oap_request = NULL;
2104 * page completion may be called only if ->cpo_prep() method was
2105 * executed by osc_io_submit(), that also adds page the to pending list
2107 if (!cfs_list_empty(&oap->oap_pending_item)) {
2108 cfs_list_del_init(&oap->oap_pending_item);
2109 cfs_list_del_init(&oap->oap_urgent_item);
2112 lop = (oap->oap_cmd & OBD_BRW_WRITE) ?
2113 &loi->loi_write_lop : &loi->loi_read_lop;
2114 lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, -1);
2115 loi_list_maint(oap->oap_cli, oap->oap_loi);
2116 rc = oap->oap_caller_ops->ap_completion(env,
2117 oap->oap_caller_data,
2118 oap->oap_cmd, NULL, -EINTR);
2124 /* this is trying to propogate async writeback errors back up to the
2125 * application. As an async write fails we record the error code for later if
2126 * the app does an fsync. As long as errors persist we force future rpcs to be
2127 * sync so that the app can get a sync error and break the cycle of queueing
2128 * pages for which writeback will fail. */
2129 static void osc_process_ar(struct osc_async_rc *ar, __u64 xid,
2136 ar->ar_force_sync = 1;
2137 ar->ar_min_xid = ptlrpc_sample_next_xid();
2142 if (ar->ar_force_sync && (xid >= ar->ar_min_xid))
2143 ar->ar_force_sync = 0;
2146 void osc_oap_to_pending(struct osc_async_page *oap)
2148 struct loi_oap_pages *lop;
2150 if (oap->oap_cmd & OBD_BRW_WRITE)
2151 lop = &oap->oap_loi->loi_write_lop;
2153 lop = &oap->oap_loi->loi_read_lop;
2155 if (oap->oap_async_flags & ASYNC_HP)
2156 cfs_list_add(&oap->oap_urgent_item, &lop->lop_urgent);
2157 else if (oap->oap_async_flags & ASYNC_URGENT)
2158 cfs_list_add_tail(&oap->oap_urgent_item, &lop->lop_urgent);
2159 cfs_list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
2160 lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, 1);
2163 /* this must be called holding the loi list lock to give coverage to exit_cache,
2164 * async_flag maintenance, and oap_request */
2165 static void osc_ap_completion(const struct lu_env *env,
2166 struct client_obd *cli, struct obdo *oa,
2167 struct osc_async_page *oap, int sent, int rc)
2172 if (oap->oap_request != NULL) {
2173 xid = ptlrpc_req_xid(oap->oap_request);
2174 ptlrpc_req_finished(oap->oap_request);
2175 oap->oap_request = NULL;
2178 cfs_spin_lock(&oap->oap_lock);
2179 oap->oap_async_flags = 0;
2180 cfs_spin_unlock(&oap->oap_lock);
2181 oap->oap_interrupted = 0;
2183 if (oap->oap_cmd & OBD_BRW_WRITE) {
2184 osc_process_ar(&cli->cl_ar, xid, rc);
2185 osc_process_ar(&oap->oap_loi->loi_ar, xid, rc);
2188 if (rc == 0 && oa != NULL) {
2189 if (oa->o_valid & OBD_MD_FLBLOCKS)
2190 oap->oap_loi->loi_lvb.lvb_blocks = oa->o_blocks;
2191 if (oa->o_valid & OBD_MD_FLMTIME)
2192 oap->oap_loi->loi_lvb.lvb_mtime = oa->o_mtime;
2193 if (oa->o_valid & OBD_MD_FLATIME)
2194 oap->oap_loi->loi_lvb.lvb_atime = oa->o_atime;
2195 if (oa->o_valid & OBD_MD_FLCTIME)
2196 oap->oap_loi->loi_lvb.lvb_ctime = oa->o_ctime;
2199 rc = oap->oap_caller_ops->ap_completion(env, oap->oap_caller_data,
2200 oap->oap_cmd, oa, rc);
2202 /* cl_page_completion() drops PG_locked. so, a new I/O on the page could
2203 * start, but OSC calls it under lock and thus we can add oap back to
2206 /* upper layer wants to leave the page on pending queue */
2207 osc_oap_to_pending(oap);
2209 osc_exit_cache(cli, oap, sent);
2213 static int brw_interpret(const struct lu_env *env,
2214 struct ptlrpc_request *req, void *data, int rc)
2216 struct osc_brw_async_args *aa = data;
2217 struct client_obd *cli;
2221 rc = osc_brw_fini_request(req, rc);
2222 CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
2223 if (osc_recoverable_error(rc)) {
2224 rc = osc_brw_redo_request(req, aa);
2230 capa_put(aa->aa_ocapa);
2231 aa->aa_ocapa = NULL;
2236 client_obd_list_lock(&cli->cl_loi_list_lock);
2238 /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
2239 * is called so we know whether to go to sync BRWs or wait for more
2240 * RPCs to complete */
2241 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
2242 cli->cl_w_in_flight--;
2244 cli->cl_r_in_flight--;
2246 async = cfs_list_empty(&aa->aa_oaps);
2247 if (!async) { /* from osc_send_oap_rpc() */
2248 struct osc_async_page *oap, *tmp;
2249 /* the caller may re-use the oap after the completion call so
2250 * we need to clean it up a little */
2251 cfs_list_for_each_entry_safe(oap, tmp, &aa->aa_oaps,
2253 cfs_list_del_init(&oap->oap_rpc_item);
2254 osc_ap_completion(env, cli, aa->aa_oa, oap, 1, rc);
2256 OBDO_FREE(aa->aa_oa);
2257 } else { /* from async_internal() */
2259 for (i = 0; i < aa->aa_page_count; i++)
2260 osc_release_write_grant(aa->aa_cli, aa->aa_ppga[i], 1);
2262 osc_wake_cache_waiters(cli);
2263 osc_check_rpcs(env, cli);
2264 client_obd_list_unlock(&cli->cl_loi_list_lock);
2266 cl_req_completion(env, aa->aa_clerq, rc < 0 ? rc :
2267 req->rq_bulk->bd_nob_transferred);
2268 osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
2269 ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred);
2274 static struct ptlrpc_request *osc_build_req(const struct lu_env *env,
2275 struct client_obd *cli,
2276 cfs_list_t *rpc_list,
2277 int page_count, int cmd)
2279 struct ptlrpc_request *req;
2280 struct brw_page **pga = NULL;
2281 struct osc_brw_async_args *aa;
2282 struct obdo *oa = NULL;
2283 const struct obd_async_page_ops *ops = NULL;
2284 struct osc_async_page *oap;
2285 struct osc_async_page *tmp;
2286 struct cl_req *clerq = NULL;
2287 enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
2288 struct ldlm_lock *lock = NULL;
2289 struct cl_req_attr crattr;
2290 int i, rc, mpflag = 0;
2293 LASSERT(!cfs_list_empty(rpc_list));
2295 if (cmd & OBD_BRW_MEMALLOC)
2296 mpflag = cfs_memory_pressure_get_and_set();
2298 memset(&crattr, 0, sizeof crattr);
2299 OBD_ALLOC(pga, sizeof(*pga) * page_count);
2301 GOTO(out, req = ERR_PTR(-ENOMEM));
2305 GOTO(out, req = ERR_PTR(-ENOMEM));
2308 cfs_list_for_each_entry(oap, rpc_list, oap_rpc_item) {
2309 struct cl_page *page = osc_oap2cl_page(oap);
2311 ops = oap->oap_caller_ops;
2313 clerq = cl_req_alloc(env, page, crt,
2314 1 /* only 1-object rpcs for
2317 GOTO(out, req = (void *)clerq);
2318 lock = oap->oap_ldlm_lock;
2320 pga[i] = &oap->oap_brw_page;
2321 pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
2322 CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
2323 pga[i]->pg, cfs_page_index(oap->oap_page), oap, pga[i]->flag);
2325 cl_req_page_add(env, clerq, page);
2328 /* always get the data for the obdo for the rpc */
2329 LASSERT(ops != NULL);
2331 crattr.cra_capa = NULL;
2332 cl_req_attr_set(env, clerq, &crattr, ~0ULL);
2334 oa->o_handle = lock->l_remote_handle;
2335 oa->o_valid |= OBD_MD_FLHANDLE;
2338 rc = cl_req_prep(env, clerq);
2340 CERROR("cl_req_prep failed: %d\n", rc);
2341 GOTO(out, req = ERR_PTR(rc));
2344 sort_brw_pages(pga, page_count);
2345 rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
2346 pga, &req, crattr.cra_capa, 1, 0);
2348 CERROR("prep_req failed: %d\n", rc);
2349 GOTO(out, req = ERR_PTR(rc));
2352 if (cmd & OBD_BRW_MEMALLOC)
2353 req->rq_memalloc = 1;
2355 /* Need to update the timestamps after the request is built in case
2356 * we race with setattr (locally or in queue at OST). If OST gets
2357 * later setattr before earlier BRW (as determined by the request xid),
2358 * the OST will not use BRW timestamps. Sadly, there is no obvious
2359 * way to do this in a single call. bug 10150 */
2360 cl_req_attr_set(env, clerq, &crattr,
2361 OBD_MD_FLMTIME|OBD_MD_FLCTIME|OBD_MD_FLATIME);
2363 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2364 aa = ptlrpc_req_async_args(req);
2365 CFS_INIT_LIST_HEAD(&aa->aa_oaps);
2366 cfs_list_splice(rpc_list, &aa->aa_oaps);
2367 CFS_INIT_LIST_HEAD(rpc_list);
2368 aa->aa_clerq = clerq;
2370 if (cmd & OBD_BRW_MEMALLOC)
2371 cfs_memory_pressure_restore(mpflag);
2373 capa_put(crattr.cra_capa);
2378 OBD_FREE(pga, sizeof(*pga) * page_count);
2379 /* this should happen rarely and is pretty bad, it makes the
2380 * pending list not follow the dirty order */
2381 client_obd_list_lock(&cli->cl_loi_list_lock);
2382 cfs_list_for_each_entry_safe(oap, tmp, rpc_list, oap_rpc_item) {
2383 cfs_list_del_init(&oap->oap_rpc_item);
2385 /* queued sync pages can be torn down while the pages
2386 * were between the pending list and the rpc */
2387 if (oap->oap_interrupted) {
2388 CDEBUG(D_INODE, "oap %p interrupted\n", oap);
2389 osc_ap_completion(env, cli, NULL, oap, 0,
2393 osc_ap_completion(env, cli, NULL, oap, 0, PTR_ERR(req));
2395 if (clerq && !IS_ERR(clerq))
2396 cl_req_completion(env, clerq, PTR_ERR(req));
2402 * prepare pages for ASYNC io and put pages in send queue.
2404 * \param cmd OBD_BRW_* macroses
2405 * \param lop pending pages
2407 * \return zero if no page added to send queue.
2408 * \return 1 if pages successfully added to send queue.
2409 * \return negative on errors.
2412 osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
2413 struct lov_oinfo *loi,
2414 int cmd, struct loi_oap_pages *lop)
2416 struct ptlrpc_request *req;
2417 obd_count page_count = 0;
2418 struct osc_async_page *oap = NULL, *tmp;
2419 struct osc_brw_async_args *aa;
2420 const struct obd_async_page_ops *ops;
2421 CFS_LIST_HEAD(rpc_list);
2422 int srvlock = 0, mem_tight = 0;
2423 struct cl_object *clob = NULL;
2424 obd_off starting_offset = OBD_OBJECT_EOF;
2425 unsigned int ending_offset;
2426 int starting_page_off = 0;
2429 /* ASYNC_HP pages first. At present, when the lock the pages is
2430 * to be canceled, the pages covered by the lock will be sent out
2431 * with ASYNC_HP. We have to send out them as soon as possible. */
2432 cfs_list_for_each_entry_safe(oap, tmp, &lop->lop_urgent, oap_urgent_item) {
2433 if (oap->oap_async_flags & ASYNC_HP)
2434 cfs_list_move(&oap->oap_pending_item, &lop->lop_pending);
2435 if (++page_count >= cli->cl_max_pages_per_rpc)
2440 /* first we find the pages we're allowed to work with */
2441 cfs_list_for_each_entry_safe(oap, tmp, &lop->lop_pending,
2443 ops = oap->oap_caller_ops;
2445 LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, "
2446 "magic 0x%x\n", oap, oap->oap_magic);
2449 /* pin object in memory, so that completion call-backs
2450 * can be safely called under client_obd_list lock. */
2451 clob = osc_oap2cl_page(oap)->cp_obj;
2452 cl_object_get(clob);
2455 if (page_count != 0 &&
2456 srvlock != !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK)) {
2457 CDEBUG(D_PAGE, "SRVLOCK flag mismatch,"
2458 " oap %p, page %p, srvlock %u\n",
2459 oap, oap->oap_brw_page.pg, (unsigned)!srvlock);
2463 /* If there is a gap at the start of this page, it can't merge
2464 * with any previous page, so we'll hand the network a
2465 * "fragmented" page array that it can't transfer in 1 RDMA */
2466 if (oap->oap_obj_off < starting_offset) {
2467 if (starting_page_off != 0)
2470 starting_page_off = oap->oap_page_off;
2471 starting_offset = oap->oap_obj_off + starting_page_off;
2472 } else if (oap->oap_page_off != 0)
2475 /* in llite being 'ready' equates to the page being locked
2476 * until completion unlocks it. commit_write submits a page
2477 * as not ready because its unlock will happen unconditionally
2478 * as the call returns. if we race with commit_write giving
2479 * us that page we don't want to create a hole in the page
2480 * stream, so we stop and leave the rpc to be fired by
2481 * another dirtier or kupdated interval (the not ready page
2482 * will still be on the dirty list). we could call in
2483 * at the end of ll_file_write to process the queue again. */
2484 if (!(oap->oap_async_flags & ASYNC_READY)) {
2485 int rc = ops->ap_make_ready(env, oap->oap_caller_data,
2488 CDEBUG(D_INODE, "oap %p page %p returned %d "
2489 "instead of ready\n", oap,
2493 /* llite is telling us that the page is still
2494 * in commit_write and that we should try
2495 * and put it in an rpc again later. we
2496 * break out of the loop so we don't create
2497 * a hole in the sequence of pages in the rpc
2502 /* the io isn't needed.. tell the checks
2503 * below to complete the rpc with EINTR */
2504 cfs_spin_lock(&oap->oap_lock);
2505 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
2506 cfs_spin_unlock(&oap->oap_lock);
2507 oap->oap_count = -EINTR;
2510 cfs_spin_lock(&oap->oap_lock);
2511 oap->oap_async_flags |= ASYNC_READY;
2512 cfs_spin_unlock(&oap->oap_lock);
2515 LASSERTF(0, "oap %p page %p returned %d "
2516 "from make_ready\n", oap,
2524 /* take the page out of our book-keeping */
2525 cfs_list_del_init(&oap->oap_pending_item);
2526 lop_update_pending(cli, lop, cmd, -1);
2527 cfs_list_del_init(&oap->oap_urgent_item);
2529 /* ask the caller for the size of the io as the rpc leaves. */
2530 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
2532 ops->ap_refresh_count(env, oap->oap_caller_data,
2534 LASSERT(oap->oap_page_off + oap->oap_count <= CFS_PAGE_SIZE);
2536 if (oap->oap_count <= 0) {
2537 CDEBUG(D_CACHE, "oap %p count %d, completing\n", oap,
2539 osc_ap_completion(env, cli, NULL,
2540 oap, 0, oap->oap_count);
2544 /* now put the page back in our accounting */
2545 cfs_list_add_tail(&oap->oap_rpc_item, &rpc_list);
2546 if (page_count++ == 0)
2547 srvlock = !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK);
2549 if (oap->oap_brw_flags & OBD_BRW_MEMALLOC)
2552 /* End on a PTLRPC_MAX_BRW_SIZE boundary. We want full-sized
2553 * RPCs aligned on PTLRPC_MAX_BRW_SIZE boundaries to help reads
2554 * have the same alignment as the initial writes that allocated
2555 * extents on the server. */
2556 ending_offset = oap->oap_obj_off + oap->oap_page_off +
2558 if (!(ending_offset & (PTLRPC_MAX_BRW_SIZE - 1)))
2561 if (page_count >= cli->cl_max_pages_per_rpc)
2564 /* If there is a gap at the end of this page, it can't merge
2565 * with any subsequent pages, so we'll hand the network a
2566 * "fragmented" page array that it can't transfer in 1 RDMA */
2567 if (oap->oap_page_off + oap->oap_count < CFS_PAGE_SIZE)
2571 osc_wake_cache_waiters(cli);
2573 loi_list_maint(cli, loi);
2575 client_obd_list_unlock(&cli->cl_loi_list_lock);
2578 cl_object_put(env, clob);
2580 if (page_count == 0) {
2581 client_obd_list_lock(&cli->cl_loi_list_lock);
2585 req = osc_build_req(env, cli, &rpc_list, page_count,
2586 mem_tight ? (cmd | OBD_BRW_MEMALLOC) : cmd);
2588 LASSERT(cfs_list_empty(&rpc_list));
2589 loi_list_maint(cli, loi);
2590 RETURN(PTR_ERR(req));
2593 aa = ptlrpc_req_async_args(req);
2595 starting_offset &= PTLRPC_MAX_BRW_SIZE - 1;
2596 if (cmd == OBD_BRW_READ) {
2597 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
2598 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
2599 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
2600 (starting_offset >> CFS_PAGE_SHIFT) + 1);
2602 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
2603 lprocfs_oh_tally(&cli->cl_write_rpc_hist,
2604 cli->cl_w_in_flight);
2605 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2606 (starting_offset >> CFS_PAGE_SHIFT) + 1);
2609 client_obd_list_lock(&cli->cl_loi_list_lock);
2611 if (cmd == OBD_BRW_READ)
2612 cli->cl_r_in_flight++;
2614 cli->cl_w_in_flight++;
2616 /* queued sync pages can be torn down while the pages
2617 * were between the pending list and the rpc */
2619 cfs_list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
2620 /* only one oap gets a request reference */
2623 if (oap->oap_interrupted && !req->rq_intr) {
2624 CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
2626 ptlrpc_mark_interrupted(req);
2630 tmp->oap_request = ptlrpc_request_addref(req);
2632 DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight",
2633 page_count, aa, cli->cl_r_in_flight, cli->cl_w_in_flight);
2635 req->rq_interpret_reply = brw_interpret;
2637 /* XXX: Maybe the caller can check the RPC bulk descriptor to see which
2638 * CPU/NUMA node the majority of pages were allocated on, and try
2639 * to assign the async RPC to the CPU core (PDL_POLICY_PREFERRED)
2640 * to reduce cross-CPU memory traffic.
2642 * But on the other hand, we expect that multiple ptlrpcd threads
2643 * and the initial write sponsor can run in parallel, especially
2644 * when data checksum is enabled, which is CPU-bound operation and
2645 * single ptlrpcd thread cannot process in time. So more ptlrpcd
2646 * threads sharing BRW load (with PDL_POLICY_ROUND) seems better.
2648 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
2652 #define LOI_DEBUG(LOI, STR, args...) \
2653 CDEBUG(D_INODE, "loi ready %d wr %d:%d rd %d:%d " STR, \
2654 !cfs_list_empty(&(LOI)->loi_ready_item) || \
2655 !cfs_list_empty(&(LOI)->loi_hp_ready_item), \
2656 (LOI)->loi_write_lop.lop_num_pending, \
2657 !cfs_list_empty(&(LOI)->loi_write_lop.lop_urgent), \
2658 (LOI)->loi_read_lop.lop_num_pending, \
2659 !cfs_list_empty(&(LOI)->loi_read_lop.lop_urgent), \
2662 /* This is called by osc_check_rpcs() to find which objects have pages that
2663 * we could be sending. These lists are maintained by lop_makes_rpc(). */
2664 struct lov_oinfo *osc_next_loi(struct client_obd *cli)
2668 /* First return objects that have blocked locks so that they
2669 * will be flushed quickly and other clients can get the lock,
2670 * then objects which have pages ready to be stuffed into RPCs */
2671 if (!cfs_list_empty(&cli->cl_loi_hp_ready_list))
2672 RETURN(cfs_list_entry(cli->cl_loi_hp_ready_list.next,
2673 struct lov_oinfo, loi_hp_ready_item));
2674 if (!cfs_list_empty(&cli->cl_loi_ready_list))
2675 RETURN(cfs_list_entry(cli->cl_loi_ready_list.next,
2676 struct lov_oinfo, loi_ready_item));
2678 /* then if we have cache waiters, return all objects with queued
2679 * writes. This is especially important when many small files
2680 * have filled up the cache and not been fired into rpcs because
2681 * they don't pass the nr_pending/object threshhold */
2682 if (!cfs_list_empty(&cli->cl_cache_waiters) &&
2683 !cfs_list_empty(&cli->cl_loi_write_list))
2684 RETURN(cfs_list_entry(cli->cl_loi_write_list.next,
2685 struct lov_oinfo, loi_write_item));
2687 /* then return all queued objects when we have an invalid import
2688 * so that they get flushed */
2689 if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
2690 if (!cfs_list_empty(&cli->cl_loi_write_list))
2691 RETURN(cfs_list_entry(cli->cl_loi_write_list.next,
2694 if (!cfs_list_empty(&cli->cl_loi_read_list))
2695 RETURN(cfs_list_entry(cli->cl_loi_read_list.next,
2696 struct lov_oinfo, loi_read_item));
2701 static int osc_max_rpc_in_flight(struct client_obd *cli, struct lov_oinfo *loi)
2703 struct osc_async_page *oap;
2706 if (!cfs_list_empty(&loi->loi_write_lop.lop_urgent)) {
2707 oap = cfs_list_entry(loi->loi_write_lop.lop_urgent.next,
2708 struct osc_async_page, oap_urgent_item);
2709 hprpc = !!(oap->oap_async_flags & ASYNC_HP);
2712 if (!hprpc && !cfs_list_empty(&loi->loi_read_lop.lop_urgent)) {
2713 oap = cfs_list_entry(loi->loi_read_lop.lop_urgent.next,
2714 struct osc_async_page, oap_urgent_item);
2715 hprpc = !!(oap->oap_async_flags & ASYNC_HP);
2718 return rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight + hprpc;
2721 /* called with the loi list lock held */
2722 void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
2724 struct lov_oinfo *loi;
2725 int rc = 0, race_counter = 0;
2728 while ((loi = osc_next_loi(cli)) != NULL) {
2729 LOI_DEBUG(loi, "%lu in flight\n", rpcs_in_flight(cli));
2731 if (osc_max_rpc_in_flight(cli, loi))
2734 /* attempt some read/write balancing by alternating between
2735 * reads and writes in an object. The makes_rpc checks here
2736 * would be redundant if we were getting read/write work items
2737 * instead of objects. we don't want send_oap_rpc to drain a
2738 * partial read pending queue when we're given this object to
2739 * do io on writes while there are cache waiters */
2740 if (lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) {
2741 rc = osc_send_oap_rpc(env, cli, loi, OBD_BRW_WRITE,
2742 &loi->loi_write_lop);
2744 CERROR("Write request failed with %d\n", rc);
2746 /* osc_send_oap_rpc failed, mostly because of
2749 * It can't break here, because if:
2750 * - a page was submitted by osc_io_submit, so
2752 * - no request in flight
2753 * - no subsequent request
2754 * The system will be in live-lock state,
2755 * because there is no chance to call
2756 * osc_io_unplug() and osc_check_rpcs() any
2757 * more. pdflush can't help in this case,
2758 * because it might be blocked at grabbing
2759 * the page lock as we mentioned.
2761 * Anyway, continue to drain pages. */
2770 if (lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)) {
2771 rc = osc_send_oap_rpc(env, cli, loi, OBD_BRW_READ,
2772 &loi->loi_read_lop);
2774 CERROR("Read request failed with %d\n", rc);
2782 /* attempt some inter-object balancing by issuing rpcs
2783 * for each object in turn */
2784 if (!cfs_list_empty(&loi->loi_hp_ready_item))
2785 cfs_list_del_init(&loi->loi_hp_ready_item);
2786 if (!cfs_list_empty(&loi->loi_ready_item))
2787 cfs_list_del_init(&loi->loi_ready_item);
2788 if (!cfs_list_empty(&loi->loi_write_item))
2789 cfs_list_del_init(&loi->loi_write_item);
2790 if (!cfs_list_empty(&loi->loi_read_item))
2791 cfs_list_del_init(&loi->loi_read_item);
2793 loi_list_maint(cli, loi);
2795 /* send_oap_rpc fails with 0 when make_ready tells it to
2796 * back off. llite's make_ready does this when it tries
2797 * to lock a page queued for write that is already locked.
2798 * we want to try sending rpcs from many objects, but we
2799 * don't want to spin failing with 0. */
2800 if (race_counter == 10)
2806 /* we're trying to queue a page in the osc so we're subject to the
2807 * 'cl_dirty_max' limit on the number of pages that can be queued in the osc.
2808 * If the osc's queued pages are already at that limit, then we want to sleep
2809 * until there is space in the osc's queue for us. We also may be waiting for
2810 * write credits from the OST if there are RPCs in flight that may return some
2811 * before we fall back to sync writes.
2813 * We need this know our allocation was granted in the presence of signals */
2814 static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
2818 client_obd_list_lock(&cli->cl_loi_list_lock);
2819 rc = cfs_list_empty(&ocw->ocw_entry) || rpcs_in_flight(cli) == 0;
2820 client_obd_list_unlock(&cli->cl_loi_list_lock);
2825 * Non-blocking version of osc_enter_cache() that consumes grant only when it
2828 int osc_enter_cache_try(const struct lu_env *env,
2829 struct client_obd *cli, struct lov_oinfo *loi,
2830 struct osc_async_page *oap, int transient)
2834 has_grant = cli->cl_avail_grant >= CFS_PAGE_SIZE;
2836 osc_consume_write_grant(cli, &oap->oap_brw_page);
2838 cli->cl_dirty_transit += CFS_PAGE_SIZE;
2839 cfs_atomic_inc(&obd_dirty_transit_pages);
2840 oap->oap_brw_flags |= OBD_BRW_NOCACHE;
2846 /* Caller must hold loi_list_lock - we drop/regain it if we need to wait for
2847 * grant or cache space. */
2848 static int osc_enter_cache(const struct lu_env *env,
2849 struct client_obd *cli, struct lov_oinfo *loi,
2850 struct osc_async_page *oap)
2852 struct osc_cache_waiter ocw;
2853 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
2857 CDEBUG(D_CACHE, "dirty: %ld/%d dirty_max: %ld/%d dropped: %lu "
2858 "grant: %lu\n", cli->cl_dirty, cfs_atomic_read(&obd_dirty_pages),
2859 cli->cl_dirty_max, obd_max_dirty_pages,
2860 cli->cl_lost_grant, cli->cl_avail_grant);
2862 /* force the caller to try sync io. this can jump the list
2863 * of queued writes and create a discontiguous rpc stream */
2864 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
2865 cli->cl_dirty_max < CFS_PAGE_SIZE ||
2866 cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync)
2869 /* Hopefully normal case - cache space and write credits available */
2870 if (cli->cl_dirty + CFS_PAGE_SIZE <= cli->cl_dirty_max &&
2871 cfs_atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages &&
2872 osc_enter_cache_try(env, cli, loi, oap, 0))
2875 /* It is safe to block as a cache waiter as long as there is grant
2876 * space available or the hope of additional grant being returned
2877 * when an in flight write completes. Using the write back cache
2878 * if possible is preferable to sending the data synchronously
2879 * because write pages can then be merged in to large requests.
2880 * The addition of this cache waiter will causing pending write
2881 * pages to be sent immediately. */
2882 if (cli->cl_w_in_flight || cli->cl_avail_grant >= CFS_PAGE_SIZE) {
2883 cfs_list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
2884 cfs_waitq_init(&ocw.ocw_waitq);
2888 loi_list_maint(cli, loi);
2889 osc_check_rpcs(env, cli);
2890 client_obd_list_unlock(&cli->cl_loi_list_lock);
2892 CDEBUG(D_CACHE, "sleeping for cache space\n");
2893 l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
2895 client_obd_list_lock(&cli->cl_loi_list_lock);
2896 if (!cfs_list_empty(&ocw.ocw_entry)) {
2897 cfs_list_del(&ocw.ocw_entry);
2907 int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
2908 struct lov_oinfo *loi, cfs_page_t *page,
2909 obd_off offset, const struct obd_async_page_ops *ops,
2910 void *data, void **res, int nocache,
2911 struct lustre_handle *lockh)
2913 struct osc_async_page *oap;
2918 return cfs_size_round(sizeof(*oap));
2921 oap->oap_magic = OAP_MAGIC;
2922 oap->oap_cli = &exp->exp_obd->u.cli;
2925 oap->oap_caller_ops = ops;
2926 oap->oap_caller_data = data;
2928 oap->oap_page = page;
2929 oap->oap_obj_off = offset;
2930 if (!client_is_remote(exp) &&
2931 cfs_capable(CFS_CAP_SYS_RESOURCE))
2932 oap->oap_brw_flags = OBD_BRW_NOQUOTA;
2934 LASSERT(!(offset & ~CFS_PAGE_MASK));
2936 CFS_INIT_LIST_HEAD(&oap->oap_pending_item);
2937 CFS_INIT_LIST_HEAD(&oap->oap_urgent_item);
2938 CFS_INIT_LIST_HEAD(&oap->oap_rpc_item);
2939 CFS_INIT_LIST_HEAD(&oap->oap_page_list);
2941 cfs_spin_lock_init(&oap->oap_lock);
2942 CDEBUG(D_CACHE, "oap %p page %p obj off "LPU64"\n", oap, page, offset);
2946 int osc_queue_async_io(const struct lu_env *env, struct obd_export *exp,
2947 struct lov_stripe_md *lsm, struct lov_oinfo *loi,
2948 struct osc_async_page *oap, int cmd, int off,
2949 int count, obd_flag brw_flags, enum async_flags async_flags)
2951 struct client_obd *cli = &exp->exp_obd->u.cli;
2955 if (oap->oap_magic != OAP_MAGIC)
2958 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
2961 if (!cfs_list_empty(&oap->oap_pending_item) ||
2962 !cfs_list_empty(&oap->oap_urgent_item) ||
2963 !cfs_list_empty(&oap->oap_rpc_item))
2966 /* check if the file's owner/group is over quota */
2967 if ((cmd & OBD_BRW_WRITE) && !(cmd & OBD_BRW_NOQUOTA)) {
2968 struct cl_object *obj;
2969 struct cl_attr attr; /* XXX put attr into thread info */
2970 unsigned int qid[MAXQUOTAS];
2972 obj = cl_object_top(osc_oap2cl_page(oap)->cp_obj);
2974 cl_object_attr_lock(obj);
2975 rc = cl_object_attr_get(env, obj, &attr);
2976 cl_object_attr_unlock(obj);
2978 qid[USRQUOTA] = attr.cat_uid;
2979 qid[GRPQUOTA] = attr.cat_gid;
2981 lquota_chkdq(quota_interface, cli, qid) == NO_QUOTA)
2988 loi = lsm->lsm_oinfo[0];
2990 client_obd_list_lock(&cli->cl_loi_list_lock);
2992 LASSERT(off + count <= CFS_PAGE_SIZE);
2994 oap->oap_page_off = off;
2995 oap->oap_count = count;
2996 oap->oap_brw_flags = brw_flags;
2997 /* Give a hint to OST that requests are coming from kswapd - bug19529 */
2998 if (cfs_memory_pressure_get())
2999 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
3000 cfs_spin_lock(&oap->oap_lock);
3001 oap->oap_async_flags = async_flags;
3002 cfs_spin_unlock(&oap->oap_lock);
3004 if (cmd & OBD_BRW_WRITE) {
3005 rc = osc_enter_cache(env, cli, loi, oap);
3007 client_obd_list_unlock(&cli->cl_loi_list_lock);
3012 osc_oap_to_pending(oap);
3013 loi_list_maint(cli, loi);
3015 LOI_DEBUG(loi, "oap %p page %p added for cmd %d\n", oap, oap->oap_page,
3018 osc_check_rpcs(env, cli);
3019 client_obd_list_unlock(&cli->cl_loi_list_lock);
3024 /* aka (~was & now & flag), but this is more clear :) */
3025 #define SETTING(was, now, flag) (!(was & flag) && (now & flag))
3027 int osc_set_async_flags_base(struct client_obd *cli,
3028 struct lov_oinfo *loi, struct osc_async_page *oap,
3029 obd_flag async_flags)
3031 struct loi_oap_pages *lop;
3035 LASSERT(!cfs_list_empty(&oap->oap_pending_item));
3037 if (oap->oap_cmd & OBD_BRW_WRITE) {
3038 lop = &loi->loi_write_lop;
3040 lop = &loi->loi_read_lop;
3043 if ((oap->oap_async_flags & async_flags) == async_flags)
3046 if (SETTING(oap->oap_async_flags, async_flags, ASYNC_READY))
3047 flags |= ASYNC_READY;
3049 if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT) &&
3050 cfs_list_empty(&oap->oap_rpc_item)) {
3051 if (oap->oap_async_flags & ASYNC_HP)
3052 cfs_list_add(&oap->oap_urgent_item, &lop->lop_urgent);
3054 cfs_list_add_tail(&oap->oap_urgent_item,
3056 flags |= ASYNC_URGENT;
3057 loi_list_maint(cli, loi);
3059 cfs_spin_lock(&oap->oap_lock);
3060 oap->oap_async_flags |= flags;
3061 cfs_spin_unlock(&oap->oap_lock);
3063 LOI_DEBUG(loi, "oap %p page %p has flags %x\n", oap, oap->oap_page,
3064 oap->oap_async_flags);
3068 int osc_teardown_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
3069 struct lov_oinfo *loi, struct osc_async_page *oap)
3071 struct client_obd *cli = &exp->exp_obd->u.cli;
3072 struct loi_oap_pages *lop;
3076 if (oap->oap_magic != OAP_MAGIC)
3080 loi = lsm->lsm_oinfo[0];
3082 if (oap->oap_cmd & OBD_BRW_WRITE) {
3083 lop = &loi->loi_write_lop;
3085 lop = &loi->loi_read_lop;
3088 client_obd_list_lock(&cli->cl_loi_list_lock);
3090 if (!cfs_list_empty(&oap->oap_rpc_item))
3091 GOTO(out, rc = -EBUSY);
3093 osc_exit_cache(cli, oap, 0);
3094 osc_wake_cache_waiters(cli);
3096 if (!cfs_list_empty(&oap->oap_urgent_item)) {
3097 cfs_list_del_init(&oap->oap_urgent_item);
3098 cfs_spin_lock(&oap->oap_lock);
3099 oap->oap_async_flags &= ~(ASYNC_URGENT | ASYNC_HP);
3100 cfs_spin_unlock(&oap->oap_lock);
3102 if (!cfs_list_empty(&oap->oap_pending_item)) {
3103 cfs_list_del_init(&oap->oap_pending_item);
3104 lop_update_pending(cli, lop, oap->oap_cmd, -1);
3106 loi_list_maint(cli, loi);
3107 LOI_DEBUG(loi, "oap %p page %p torn down\n", oap, oap->oap_page);
3109 client_obd_list_unlock(&cli->cl_loi_list_lock);
3113 static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
3114 struct ldlm_enqueue_info *einfo)
3116 void *data = einfo->ei_cbdata;
3119 LASSERT(lock != NULL);
3120 LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl);
3121 LASSERT(lock->l_resource->lr_type == einfo->ei_type);
3122 LASSERT(lock->l_completion_ast == einfo->ei_cb_cp);
3123 LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
3125 lock_res_and_lock(lock);
3126 cfs_spin_lock(&osc_ast_guard);
3128 if (lock->l_ast_data == NULL)
3129 lock->l_ast_data = data;
3130 if (lock->l_ast_data == data)
3133 cfs_spin_unlock(&osc_ast_guard);
3134 unlock_res_and_lock(lock);
3139 static int osc_set_data_with_check(struct lustre_handle *lockh,
3140 struct ldlm_enqueue_info *einfo)
3142 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
3146 set = osc_set_lock_data_with_check(lock, einfo);
3147 LDLM_LOCK_PUT(lock);
3149 CERROR("lockh %p, data %p - client evicted?\n",
3150 lockh, einfo->ei_cbdata);
3154 static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
3155 ldlm_iterator_t replace, void *data)
3157 struct ldlm_res_id res_id;
3158 struct obd_device *obd = class_exp2obd(exp);
3160 osc_build_res_name(lsm->lsm_object_id, lsm->lsm_object_seq, &res_id);
3161 ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
3165 /* find any ldlm lock of the inode in osc
3169 static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
3170 ldlm_iterator_t replace, void *data)
3172 struct ldlm_res_id res_id;
3173 struct obd_device *obd = class_exp2obd(exp);
3176 osc_build_res_name(lsm->lsm_object_id, lsm->lsm_object_seq, &res_id);
3177 rc = ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
3178 if (rc == LDLM_ITER_STOP)
3180 if (rc == LDLM_ITER_CONTINUE)
3185 static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb,
3186 obd_enqueue_update_f upcall, void *cookie,
3189 int intent = *flags & LDLM_FL_HAS_INTENT;
3193 /* The request was created before ldlm_cli_enqueue call. */
3194 if (rc == ELDLM_LOCK_ABORTED) {
3195 struct ldlm_reply *rep;
3196 rep = req_capsule_server_get(&req->rq_pill,
3199 LASSERT(rep != NULL);
3200 if (rep->lock_policy_res1)
3201 rc = rep->lock_policy_res1;
3205 if ((intent && rc == ELDLM_LOCK_ABORTED) || !rc) {
3206 *flags |= LDLM_FL_LVB_READY;
3207 CDEBUG(D_INODE,"got kms "LPU64" blocks "LPU64" mtime "LPU64"\n",
3208 lvb->lvb_size, lvb->lvb_blocks, lvb->lvb_mtime);
3211 /* Call the update callback. */
3212 rc = (*upcall)(cookie, rc);
3216 static int osc_enqueue_interpret(const struct lu_env *env,
3217 struct ptlrpc_request *req,
3218 struct osc_enqueue_args *aa, int rc)
3220 struct ldlm_lock *lock;
3221 struct lustre_handle handle;
3224 /* Make a local copy of a lock handle and a mode, because aa->oa_*
3225 * might be freed anytime after lock upcall has been called. */
3226 lustre_handle_copy(&handle, aa->oa_lockh);
3227 mode = aa->oa_ei->ei_mode;
3229 /* ldlm_cli_enqueue is holding a reference on the lock, so it must
3231 lock = ldlm_handle2lock(&handle);
3233 /* Take an additional reference so that a blocking AST that
3234 * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
3235 * to arrive after an upcall has been executed by
3236 * osc_enqueue_fini(). */
3237 ldlm_lock_addref(&handle, mode);
3239 /* Let CP AST to grant the lock first. */
3240 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
3242 /* Complete obtaining the lock procedure. */
3243 rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1,
3244 mode, aa->oa_flags, aa->oa_lvb,
3245 sizeof(*aa->oa_lvb), &handle, rc);
3246 /* Complete osc stuff. */
3247 rc = osc_enqueue_fini(req, aa->oa_lvb,
3248 aa->oa_upcall, aa->oa_cookie, aa->oa_flags, rc);
3250 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
3252 /* Release the lock for async request. */
3253 if (lustre_handle_is_used(&handle) && rc == ELDLM_OK)
3255 * Releases a reference taken by ldlm_cli_enqueue(), if it is
3256 * not already released by
3257 * ldlm_cli_enqueue_fini()->failed_lock_cleanup()
3259 ldlm_lock_decref(&handle, mode);
3261 LASSERTF(lock != NULL, "lockh %p, req %p, aa %p - client evicted?\n",
3262 aa->oa_lockh, req, aa);
3263 ldlm_lock_decref(&handle, mode);
3264 LDLM_LOCK_PUT(lock);
3268 void osc_update_enqueue(struct lustre_handle *lov_lockhp,
3269 struct lov_oinfo *loi, int flags,
3270 struct ost_lvb *lvb, __u32 mode, int rc)
3272 if (rc == ELDLM_OK) {
3273 struct ldlm_lock *lock = ldlm_handle2lock(lov_lockhp);
3276 LASSERT(lock != NULL);
3277 loi->loi_lvb = *lvb;
3278 tmp = loi->loi_lvb.lvb_size;
3279 /* Extend KMS up to the end of this lock and no further
3280 * A lock on [x,y] means a KMS of up to y + 1 bytes! */
3281 if (tmp > lock->l_policy_data.l_extent.end)
3282 tmp = lock->l_policy_data.l_extent.end + 1;
3283 if (tmp >= loi->loi_kms) {
3284 LDLM_DEBUG(lock, "lock acquired, setting rss="LPU64
3285 ", kms="LPU64, loi->loi_lvb.lvb_size, tmp);
3286 loi_kms_set(loi, tmp);
3288 LDLM_DEBUG(lock, "lock acquired, setting rss="
3289 LPU64"; leaving kms="LPU64", end="LPU64,
3290 loi->loi_lvb.lvb_size, loi->loi_kms,
3291 lock->l_policy_data.l_extent.end);
3293 ldlm_lock_allow_match(lock);
3294 LDLM_LOCK_PUT(lock);
3295 } else if (rc == ELDLM_LOCK_ABORTED && (flags & LDLM_FL_HAS_INTENT)) {
3296 loi->loi_lvb = *lvb;
3297 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
3298 " kms="LPU64"\n", loi->loi_lvb.lvb_size, loi->loi_kms);
3302 EXPORT_SYMBOL(osc_update_enqueue);
3304 struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
3306 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
3307 * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
3308 * other synchronous requests, however keeping some locks and trying to obtain
3309 * others may take a considerable amount of time in a case of ost failure; and
3310 * when other sync requests do not get released lock from a client, the client
3311 * is excluded from the cluster -- such scenarious make the life difficult, so
3312 * release locks just after they are obtained. */
3313 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
3314 int *flags, ldlm_policy_data_t *policy,
3315 struct ost_lvb *lvb, int kms_valid,
3316 obd_enqueue_update_f upcall, void *cookie,
3317 struct ldlm_enqueue_info *einfo,
3318 struct lustre_handle *lockh,
3319 struct ptlrpc_request_set *rqset, int async)
3321 struct obd_device *obd = exp->exp_obd;
3322 struct ptlrpc_request *req = NULL;
3323 int intent = *flags & LDLM_FL_HAS_INTENT;
3328 /* Filesystem lock extents are extended to page boundaries so that
3329 * dealing with the page cache is a little smoother. */
3330 policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
3331 policy->l_extent.end |= ~CFS_PAGE_MASK;
3334 * kms is not valid when either object is completely fresh (so that no
3335 * locks are cached), or object was evicted. In the latter case cached
3336 * lock cannot be used, because it would prime inode state with
3337 * potentially stale LVB.
3342 /* Next, search for already existing extent locks that will cover us */
3343 /* If we're trying to read, we also search for an existing PW lock. The
3344 * VFS and page cache already protect us locally, so lots of readers/
3345 * writers can share a single PW lock.
3347 * There are problems with conversion deadlocks, so instead of
3348 * converting a read lock to a write lock, we'll just enqueue a new
3351 * At some point we should cancel the read lock instead of making them
3352 * send us a blocking callback, but there are problems with canceling
3353 * locks out from other users right now, too. */
3354 mode = einfo->ei_mode;
3355 if (einfo->ei_mode == LCK_PR)
3357 mode = ldlm_lock_match(obd->obd_namespace,
3358 *flags | LDLM_FL_LVB_READY, res_id,
3359 einfo->ei_type, policy, mode, lockh, 0);
3361 struct ldlm_lock *matched = ldlm_handle2lock(lockh);
3363 if (osc_set_lock_data_with_check(matched, einfo)) {
3364 /* addref the lock only if not async requests and PW
3365 * lock is matched whereas we asked for PR. */
3366 if (!rqset && einfo->ei_mode != mode)
3367 ldlm_lock_addref(lockh, LCK_PR);
3369 /* I would like to be able to ASSERT here that
3370 * rss <= kms, but I can't, for reasons which
3371 * are explained in lov_enqueue() */
3374 /* We already have a lock, and it's referenced */
3375 (*upcall)(cookie, ELDLM_OK);
3377 /* For async requests, decref the lock. */
3378 if (einfo->ei_mode != mode)
3379 ldlm_lock_decref(lockh, LCK_PW);
3381 ldlm_lock_decref(lockh, einfo->ei_mode);
3382 LDLM_LOCK_PUT(matched);
3385 ldlm_lock_decref(lockh, mode);
3386 LDLM_LOCK_PUT(matched);
3391 CFS_LIST_HEAD(cancels);
3392 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
3393 &RQF_LDLM_ENQUEUE_LVB);
3397 rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0);
3399 ptlrpc_request_free(req);
3403 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
3405 ptlrpc_request_set_replen(req);
3408 /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
3409 *flags &= ~LDLM_FL_BLOCK_GRANTED;
3411 rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
3412 sizeof(*lvb), lockh, async);
3415 struct osc_enqueue_args *aa;
3416 CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
3417 aa = ptlrpc_req_async_args(req);
3420 aa->oa_flags = flags;
3421 aa->oa_upcall = upcall;
3422 aa->oa_cookie = cookie;
3424 aa->oa_lockh = lockh;
3426 req->rq_interpret_reply =
3427 (ptlrpc_interpterer_t)osc_enqueue_interpret;
3428 if (rqset == PTLRPCD_SET)
3429 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
3431 ptlrpc_set_add_req(rqset, req);
3432 } else if (intent) {
3433 ptlrpc_req_finished(req);
3438 rc = osc_enqueue_fini(req, lvb, upcall, cookie, flags, rc);
3440 ptlrpc_req_finished(req);
3445 static int osc_enqueue(struct obd_export *exp, struct obd_info *oinfo,
3446 struct ldlm_enqueue_info *einfo,
3447 struct ptlrpc_request_set *rqset)
3449 struct ldlm_res_id res_id;
3453 osc_build_res_name(oinfo->oi_md->lsm_object_id,
3454 oinfo->oi_md->lsm_object_seq, &res_id);
3456 rc = osc_enqueue_base(exp, &res_id, &oinfo->oi_flags, &oinfo->oi_policy,
3457 &oinfo->oi_md->lsm_oinfo[0]->loi_lvb,
3458 oinfo->oi_md->lsm_oinfo[0]->loi_kms_valid,
3459 oinfo->oi_cb_up, oinfo, einfo, oinfo->oi_lockh,
3460 rqset, rqset != NULL);
3464 int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
3465 __u32 type, ldlm_policy_data_t *policy, __u32 mode,
3466 int *flags, void *data, struct lustre_handle *lockh,
3469 struct obd_device *obd = exp->exp_obd;
3470 int lflags = *flags;
3474 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
3477 /* Filesystem lock extents are extended to page boundaries so that
3478 * dealing with the page cache is a little smoother */
3479 policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
3480 policy->l_extent.end |= ~CFS_PAGE_MASK;
3482 /* Next, search for already existing extent locks that will cover us */
3483 /* If we're trying to read, we also search for an existing PW lock. The
3484 * VFS and page cache already protect us locally, so lots of readers/
3485 * writers can share a single PW lock. */
3489 rc = ldlm_lock_match(obd->obd_namespace, lflags,
3490 res_id, type, policy, rc, lockh, unref);
3493 if (!osc_set_data_with_check(lockh, data)) {
3494 if (!(lflags & LDLM_FL_TEST_LOCK))
3495 ldlm_lock_decref(lockh, rc);
3499 if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) {
3500 ldlm_lock_addref(lockh, LCK_PR);
3501 ldlm_lock_decref(lockh, LCK_PW);
3508 int osc_cancel_base(struct lustre_handle *lockh, __u32 mode)
3512 if (unlikely(mode == LCK_GROUP))
3513 ldlm_lock_decref_and_cancel(lockh, mode);
3515 ldlm_lock_decref(lockh, mode);
3520 static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
3521 __u32 mode, struct lustre_handle *lockh)
3524 RETURN(osc_cancel_base(lockh, mode));
3527 static int osc_cancel_unused(struct obd_export *exp,
3528 struct lov_stripe_md *lsm,
3529 ldlm_cancel_flags_t flags,
3532 struct obd_device *obd = class_exp2obd(exp);
3533 struct ldlm_res_id res_id, *resp = NULL;
3536 resp = osc_build_res_name(lsm->lsm_object_id,
3537 lsm->lsm_object_seq, &res_id);
3540 return ldlm_cli_cancel_unused(obd->obd_namespace, resp, flags, opaque);
3543 static int osc_statfs_interpret(const struct lu_env *env,
3544 struct ptlrpc_request *req,
3545 struct osc_async_args *aa, int rc)
3547 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
3548 struct obd_statfs *msfs;
3553 /* The request has in fact never been sent
3554 * due to issues at a higher level (LOV).
3555 * Exit immediately since the caller is
3556 * aware of the problem and takes care
3557 * of the clean up */
3560 if ((rc == -ENOTCONN || rc == -EAGAIN) &&
3561 (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
3567 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3569 GOTO(out, rc = -EPROTO);
3572 /* Reinitialize the RDONLY and DEGRADED flags at the client
3573 * on each statfs, so they don't stay set permanently. */
3574 cfs_spin_lock(&cli->cl_oscc.oscc_lock);
3576 if (unlikely(msfs->os_state & OS_STATE_DEGRADED))
3577 cli->cl_oscc.oscc_flags |= OSCC_FLAG_DEGRADED;
3578 else if (unlikely(cli->cl_oscc.oscc_flags & OSCC_FLAG_DEGRADED))
3579 cli->cl_oscc.oscc_flags &= ~OSCC_FLAG_DEGRADED;
3581 if (unlikely(msfs->os_state & OS_STATE_READONLY))
3582 cli->cl_oscc.oscc_flags |= OSCC_FLAG_RDONLY;
3583 else if (unlikely(cli->cl_oscc.oscc_flags & OSCC_FLAG_RDONLY))
3584 cli->cl_oscc.oscc_flags &= ~OSCC_FLAG_RDONLY;
3586 /* Add a bit of hysteresis so this flag isn't continually flapping,
3587 * and ensure that new files don't get extremely fragmented due to
3588 * only a small amount of available space in the filesystem.
3589 * We want to set the NOSPC flag when there is less than ~0.1% free
3590 * and clear it when there is at least ~0.2% free space, so:
3591 * avail < ~0.1% max max = avail + used
3592 * 1025 * avail < avail + used used = blocks - free
3593 * 1024 * avail < used
3594 * 1024 * avail < blocks - free
3595 * avail < ((blocks - free) >> 10)
3597 * On very large disk, say 16TB 0.1% will be 16 GB. We don't want to
3598 * lose that amount of space so in those cases we report no space left
3599 * if their is less than 1 GB left. */
3600 used = min_t(__u64,(msfs->os_blocks - msfs->os_bfree) >> 10, 1 << 30);
3601 if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) == 0) &&
3602 ((msfs->os_ffree < 32) || (msfs->os_bavail < used))))
3603 cli->cl_oscc.oscc_flags |= OSCC_FLAG_NOSPC;
3604 else if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) != 0) &&
3605 (msfs->os_ffree > 64) &&
3606 (msfs->os_bavail > (used << 1)))) {
3607 cli->cl_oscc.oscc_flags &= ~(OSCC_FLAG_NOSPC |
3608 OSCC_FLAG_NOSPC_BLK);
3611 if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) != 0) &&
3612 (msfs->os_bavail < used)))
3613 cli->cl_oscc.oscc_flags |= OSCC_FLAG_NOSPC_BLK;
3615 cfs_spin_unlock(&cli->cl_oscc.oscc_lock);
3617 *aa->aa_oi->oi_osfs = *msfs;
3619 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
3623 static int osc_statfs_async(struct obd_device *obd, struct obd_info *oinfo,
3624 __u64 max_age, struct ptlrpc_request_set *rqset)
3626 struct ptlrpc_request *req;
3627 struct osc_async_args *aa;
3631 /* We could possibly pass max_age in the request (as an absolute
3632 * timestamp or a "seconds.usec ago") so the target can avoid doing
3633 * extra calls into the filesystem if that isn't necessary (e.g.
3634 * during mount that would help a bit). Having relative timestamps
3635 * is not so great if request processing is slow, while absolute
3636 * timestamps are not ideal because they need time synchronization. */
3637 req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
3641 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3643 ptlrpc_request_free(req);
3646 ptlrpc_request_set_replen(req);
3647 req->rq_request_portal = OST_CREATE_PORTAL;
3648 ptlrpc_at_set_req_timeout(req);
3650 if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
3651 /* procfs requests not want stat in wait for avoid deadlock */
3652 req->rq_no_resend = 1;
3653 req->rq_no_delay = 1;
3656 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
3657 CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
3658 aa = ptlrpc_req_async_args(req);
3661 ptlrpc_set_add_req(rqset, req);
3665 static int osc_statfs(struct obd_device *obd, struct obd_statfs *osfs,
3666 __u64 max_age, __u32 flags)
3668 struct obd_statfs *msfs;
3669 struct ptlrpc_request *req;
3670 struct obd_import *imp = NULL;
3674 /*Since the request might also come from lprocfs, so we need
3675 *sync this with client_disconnect_export Bug15684*/
3676 cfs_down_read(&obd->u.cli.cl_sem);
3677 if (obd->u.cli.cl_import)
3678 imp = class_import_get(obd->u.cli.cl_import);
3679 cfs_up_read(&obd->u.cli.cl_sem);
3683 /* We could possibly pass max_age in the request (as an absolute
3684 * timestamp or a "seconds.usec ago") so the target can avoid doing
3685 * extra calls into the filesystem if that isn't necessary (e.g.
3686 * during mount that would help a bit). Having relative timestamps
3687 * is not so great if request processing is slow, while absolute
3688 * timestamps are not ideal because they need time synchronization. */
3689 req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
3691 class_import_put(imp);
3696 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3698 ptlrpc_request_free(req);
3701 ptlrpc_request_set_replen(req);
3702 req->rq_request_portal = OST_CREATE_PORTAL;
3703 ptlrpc_at_set_req_timeout(req);
3705 if (flags & OBD_STATFS_NODELAY) {
3706 /* procfs requests not want stat in wait for avoid deadlock */
3707 req->rq_no_resend = 1;
3708 req->rq_no_delay = 1;
3711 rc = ptlrpc_queue_wait(req);
3715 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3717 GOTO(out, rc = -EPROTO);
3724 ptlrpc_req_finished(req);
3728 /* Retrieve object striping information.
3730 * @lmmu is a pointer to an in-core struct with lmm_ost_count indicating
3731 * the maximum number of OST indices which will fit in the user buffer.
3732 * lmm_magic must be LOV_MAGIC (we only use 1 slot here).
3734 static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
3736 /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
3737 struct lov_user_md_v3 lum, *lumk;
3738 struct lov_user_ost_data_v1 *lmm_objects;
3739 int rc = 0, lum_size;
3745 /* we only need the header part from user space to get lmm_magic and
3746 * lmm_stripe_count, (the header part is common to v1 and v3) */
3747 lum_size = sizeof(struct lov_user_md_v1);
3748 if (cfs_copy_from_user(&lum, lump, lum_size))
3751 if ((lum.lmm_magic != LOV_USER_MAGIC_V1) &&
3752 (lum.lmm_magic != LOV_USER_MAGIC_V3))
3755 /* lov_user_md_vX and lov_mds_md_vX must have the same size */
3756 LASSERT(sizeof(struct lov_user_md_v1) == sizeof(struct lov_mds_md_v1));
3757 LASSERT(sizeof(struct lov_user_md_v3) == sizeof(struct lov_mds_md_v3));
3758 LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lumk->lmm_objects[0]));
3760 /* we can use lov_mds_md_size() to compute lum_size
3761 * because lov_user_md_vX and lov_mds_md_vX have the same size */
3762 if (lum.lmm_stripe_count > 0) {
3763 lum_size = lov_mds_md_size(lum.lmm_stripe_count, lum.lmm_magic);
3764 OBD_ALLOC(lumk, lum_size);
3768 if (lum.lmm_magic == LOV_USER_MAGIC_V1)
3769 lmm_objects = &(((struct lov_user_md_v1 *)lumk)->lmm_objects[0]);
3771 lmm_objects = &(lumk->lmm_objects[0]);
3772 lmm_objects->l_object_id = lsm->lsm_object_id;
3774 lum_size = lov_mds_md_size(0, lum.lmm_magic);
3778 lumk->lmm_object_id = lsm->lsm_object_id;
3779 lumk->lmm_object_seq = lsm->lsm_object_seq;
3780 lumk->lmm_stripe_count = 1;
3782 if (cfs_copy_to_user(lump, lumk, lum_size))
3786 OBD_FREE(lumk, lum_size);
3792 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
3793 void *karg, void *uarg)
3795 struct obd_device *obd = exp->exp_obd;
3796 struct obd_ioctl_data *data = karg;
3800 if (!cfs_try_module_get(THIS_MODULE)) {
3801 CERROR("Can't get module. Is it alive?");
3805 case OBD_IOC_LOV_GET_CONFIG: {
3807 struct lov_desc *desc;
3808 struct obd_uuid uuid;
3812 if (obd_ioctl_getdata(&buf, &len, (void *)uarg))
3813 GOTO(out, err = -EINVAL);
3815 data = (struct obd_ioctl_data *)buf;
3817 if (sizeof(*desc) > data->ioc_inllen1) {
3818 obd_ioctl_freedata(buf, len);
3819 GOTO(out, err = -EINVAL);
3822 if (data->ioc_inllen2 < sizeof(uuid)) {
3823 obd_ioctl_freedata(buf, len);
3824 GOTO(out, err = -EINVAL);
3827 desc = (struct lov_desc *)data->ioc_inlbuf1;
3828 desc->ld_tgt_count = 1;
3829 desc->ld_active_tgt_count = 1;
3830 desc->ld_default_stripe_count = 1;
3831 desc->ld_default_stripe_size = 0;
3832 desc->ld_default_stripe_offset = 0;
3833 desc->ld_pattern = 0;
3834 memcpy(&desc->ld_uuid, &obd->obd_uuid, sizeof(uuid));
3836 memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
3838 err = cfs_copy_to_user((void *)uarg, buf, len);
3841 obd_ioctl_freedata(buf, len);
3844 case LL_IOC_LOV_SETSTRIPE:
3845 err = obd_alloc_memmd(exp, karg);
3849 case LL_IOC_LOV_GETSTRIPE:
3850 err = osc_getstripe(karg, uarg);
3852 case OBD_IOC_CLIENT_RECOVER:
3853 err = ptlrpc_recover_import(obd->u.cli.cl_import,
3854 data->ioc_inlbuf1, 0);
3858 case IOC_OSC_SET_ACTIVE:
3859 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
3862 case OBD_IOC_POLL_QUOTACHECK:
3863 err = lquota_poll_check(quota_interface, exp,
3864 (struct if_quotacheck *)karg);
3866 case OBD_IOC_PING_TARGET:
3867 err = ptlrpc_obd_ping(obd);
3870 CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n",
3871 cmd, cfs_curproc_comm());
3872 GOTO(out, err = -ENOTTY);
3875 cfs_module_put(THIS_MODULE);
3879 static int osc_get_info(struct obd_export *exp, obd_count keylen,
3880 void *key, __u32 *vallen, void *val,
3881 struct lov_stripe_md *lsm)
3884 if (!vallen || !val)
3887 if (KEY_IS(KEY_LOCK_TO_STRIPE)) {
3888 __u32 *stripe = val;
3889 *vallen = sizeof(*stripe);
3892 } else if (KEY_IS(KEY_LAST_ID)) {
3893 struct ptlrpc_request *req;
3898 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
3899 &RQF_OST_GET_INFO_LAST_ID);
3903 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
3904 RCL_CLIENT, keylen);
3905 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
3907 ptlrpc_request_free(req);
3911 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
3912 memcpy(tmp, key, keylen);
3914 req->rq_no_delay = req->rq_no_resend = 1;
3915 ptlrpc_request_set_replen(req);
3916 rc = ptlrpc_queue_wait(req);
3920 reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID);
3922 GOTO(out, rc = -EPROTO);
3924 *((obd_id *)val) = *reply;
3926 ptlrpc_req_finished(req);
3928 } else if (KEY_IS(KEY_FIEMAP)) {
3929 struct ptlrpc_request *req;
3930 struct ll_user_fiemap *reply;
3934 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
3935 &RQF_OST_GET_INFO_FIEMAP);
3939 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY,
3940 RCL_CLIENT, keylen);
3941 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
3942 RCL_CLIENT, *vallen);
3943 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
3944 RCL_SERVER, *vallen);
3946 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
3948 ptlrpc_request_free(req);
3952 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
3953 memcpy(tmp, key, keylen);
3954 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
3955 memcpy(tmp, val, *vallen);
3957 ptlrpc_request_set_replen(req);
3958 rc = ptlrpc_queue_wait(req);
3962 reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
3964 GOTO(out1, rc = -EPROTO);
3966 memcpy(val, reply, *vallen);
3968 ptlrpc_req_finished(req);
3976 static int osc_setinfo_mds_connect_import(struct obd_import *imp)
3978 struct llog_ctxt *ctxt;
3982 ctxt = llog_get_context(imp->imp_obd, LLOG_MDS_OST_ORIG_CTXT);
3984 rc = llog_initiator_connect(ctxt);
3985 llog_ctxt_put(ctxt);
3987 /* XXX return an error? skip setting below flags? */
3990 cfs_spin_lock(&imp->imp_lock);
3991 imp->imp_server_timeout = 1;
3992 imp->imp_pingable = 1;
3993 cfs_spin_unlock(&imp->imp_lock);
3994 CDEBUG(D_RPCTRACE, "pinging OST %s\n", obd2cli_tgt(imp->imp_obd));
3999 static int osc_setinfo_mds_conn_interpret(const struct lu_env *env,
4000 struct ptlrpc_request *req,
4007 RETURN(osc_setinfo_mds_connect_import(req->rq_import));
4010 static int osc_set_info_async(struct obd_export *exp, obd_count keylen,
4011 void *key, obd_count vallen, void *val,
4012 struct ptlrpc_request_set *set)
4014 struct ptlrpc_request *req;
4015 struct obd_device *obd = exp->exp_obd;
4016 struct obd_import *imp = class_exp2cliimp(exp);
4021 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
4023 if (KEY_IS(KEY_NEXT_ID)) {
4025 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
4027 if (vallen != sizeof(obd_id))
4032 if (vallen != sizeof(obd_id))
4035 /* avoid race between allocate new object and set next id
4036 * from ll_sync thread */
4037 cfs_spin_lock(&oscc->oscc_lock);
4038 new_val = *((obd_id*)val) + 1;
4039 if (new_val > oscc->oscc_next_id)
4040 oscc->oscc_next_id = new_val;
4041 cfs_spin_unlock(&oscc->oscc_lock);
4042 CDEBUG(D_HA, "%s: set oscc_next_id = "LPU64"\n",
4043 exp->exp_obd->obd_name,
4044 obd->u.cli.cl_oscc.oscc_next_id);
4049 if (KEY_IS(KEY_CHECKSUM)) {
4050 if (vallen != sizeof(int))
4052 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
4056 if (KEY_IS(KEY_SPTLRPC_CONF)) {
4057 sptlrpc_conf_client_adapt(obd);
4061 if (KEY_IS(KEY_FLUSH_CTX)) {
4062 sptlrpc_import_flush_my_ctx(imp);
4066 if (!set && !KEY_IS(KEY_GRANT_SHRINK))
4069 /* We pass all other commands directly to OST. Since nobody calls osc
4070 methods directly and everybody is supposed to go through LOV, we
4071 assume lov checked invalid values for us.
4072 The only recognised values so far are evict_by_nid and mds_conn.
4073 Even if something bad goes through, we'd get a -EINVAL from OST
4076 if (KEY_IS(KEY_GRANT_SHRINK))
4077 req = ptlrpc_request_alloc(imp, &RQF_OST_SET_GRANT_INFO);
4079 req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO);
4084 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
4085 RCL_CLIENT, keylen);
4086 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
4087 RCL_CLIENT, vallen);
4088 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
4090 ptlrpc_request_free(req);
4094 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
4095 memcpy(tmp, key, keylen);
4096 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
4097 memcpy(tmp, val, vallen);
4099 if (KEY_IS(KEY_MDS_CONN)) {
4100 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
4102 oscc->oscc_oa.o_seq = (*(__u32 *)val);
4103 oscc->oscc_oa.o_valid |= OBD_MD_FLGROUP;
4104 LASSERT_SEQ_IS_MDT(oscc->oscc_oa.o_seq);
4105 req->rq_no_delay = req->rq_no_resend = 1;
4106 req->rq_interpret_reply = osc_setinfo_mds_conn_interpret;
4107 } else if (KEY_IS(KEY_GRANT_SHRINK)) {
4108 struct osc_grant_args *aa;
4111 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
4112 aa = ptlrpc_req_async_args(req);
4115 ptlrpc_req_finished(req);
4118 *oa = ((struct ost_body *)val)->oa;
4120 req->rq_interpret_reply = osc_shrink_grant_interpret;
4123 ptlrpc_request_set_replen(req);
4124 if (!KEY_IS(KEY_GRANT_SHRINK)) {
4125 LASSERT(set != NULL);
4126 ptlrpc_set_add_req(set, req);
4127 ptlrpc_check_set(NULL, set);
4129 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
4135 static struct llog_operations osc_size_repl_logops = {
4136 lop_cancel: llog_obd_repl_cancel
4139 static struct llog_operations osc_mds_ost_orig_logops;
4141 static int __osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
4142 struct obd_device *tgt, struct llog_catid *catid)
4147 rc = llog_setup(obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, tgt, 1,
4148 &catid->lci_logid, &osc_mds_ost_orig_logops);
4150 CERROR("failed LLOG_MDS_OST_ORIG_CTXT\n");
4154 rc = llog_setup(obd, &obd->obd_olg, LLOG_SIZE_REPL_CTXT, tgt, 1,
4155 NULL, &osc_size_repl_logops);
4157 struct llog_ctxt *ctxt =
4158 llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
4161 CERROR("failed LLOG_SIZE_REPL_CTXT\n");
4166 CERROR("osc '%s' tgt '%s' catid %p rc=%d\n",
4167 obd->obd_name, tgt->obd_name, catid, rc);
4168 CERROR("logid "LPX64":0x%x\n",
4169 catid->lci_logid.lgl_oid, catid->lci_logid.lgl_ogen);
4174 static int osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
4175 struct obd_device *disk_obd, int *index)
4177 struct llog_catid catid;
4178 static char name[32] = CATLIST;
4182 LASSERT(olg == &obd->obd_olg);
4184 cfs_mutex_down(&olg->olg_cat_processing);
4185 rc = llog_get_cat_list(disk_obd, name, *index, 1, &catid);
4187 CERROR("rc: %d\n", rc);
4191 CDEBUG(D_INFO, "%s: Init llog for %d - catid "LPX64"/"LPX64":%x\n",
4192 obd->obd_name, *index, catid.lci_logid.lgl_oid,
4193 catid.lci_logid.lgl_oseq, catid.lci_logid.lgl_ogen);
4195 rc = __osc_llog_init(obd, olg, disk_obd, &catid);
4197 CERROR("rc: %d\n", rc);
4201 rc = llog_put_cat_list(disk_obd, name, *index, 1, &catid);
4203 CERROR("rc: %d\n", rc);
4208 cfs_mutex_up(&olg->olg_cat_processing);
4213 static int osc_llog_finish(struct obd_device *obd, int count)
4215 struct llog_ctxt *ctxt;
4216 int rc = 0, rc2 = 0;
4219 ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
4221 rc = llog_cleanup(ctxt);
4223 ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
4225 rc2 = llog_cleanup(ctxt);
4232 static int osc_reconnect(const struct lu_env *env,
4233 struct obd_export *exp, struct obd_device *obd,
4234 struct obd_uuid *cluuid,
4235 struct obd_connect_data *data,
4238 struct client_obd *cli = &obd->u.cli;
4240 if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
4243 client_obd_list_lock(&cli->cl_loi_list_lock);
4244 data->ocd_grant = (cli->cl_avail_grant + cli->cl_dirty) ?:
4245 2 * cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT;
4246 lost_grant = cli->cl_lost_grant;
4247 cli->cl_lost_grant = 0;
4248 client_obd_list_unlock(&cli->cl_loi_list_lock);
4250 CDEBUG(D_CACHE, "request ocd_grant: %d cl_avail_grant: %ld "
4251 "cl_dirty: %ld cl_lost_grant: %ld\n", data->ocd_grant,
4252 cli->cl_avail_grant, cli->cl_dirty, lost_grant);
4253 CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d"
4254 " ocd_grant: %d\n", data->ocd_connect_flags,
4255 data->ocd_version, data->ocd_grant);
4261 static int osc_disconnect(struct obd_export *exp)
4263 struct obd_device *obd = class_exp2obd(exp);
4264 struct llog_ctxt *ctxt;
4267 ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
4269 if (obd->u.cli.cl_conn_count == 1) {
4270 /* Flush any remaining cancel messages out to the
4272 llog_sync(ctxt, exp);
4274 llog_ctxt_put(ctxt);
4276 CDEBUG(D_HA, "No LLOG_SIZE_REPL_CTXT found in obd %p\n",
4280 rc = client_disconnect_export(exp);
4282 * Initially we put del_shrink_grant before disconnect_export, but it
4283 * causes the following problem if setup (connect) and cleanup
4284 * (disconnect) are tangled together.
4285 * connect p1 disconnect p2
4286 * ptlrpc_connect_import
4287 * ............... class_manual_cleanup
4290 * ptlrpc_connect_interrupt
4292 * add this client to shrink list
4294 * Bang! pinger trigger the shrink.
4295 * So the osc should be disconnected from the shrink list, after we
4296 * are sure the import has been destroyed. BUG18662
4298 if (obd->u.cli.cl_import == NULL)
4299 osc_del_shrink_grant(&obd->u.cli);
4303 static int osc_import_event(struct obd_device *obd,
4304 struct obd_import *imp,
4305 enum obd_import_event event)
4307 struct client_obd *cli;
4311 LASSERT(imp->imp_obd == obd);
4314 case IMP_EVENT_DISCON: {
4315 /* Only do this on the MDS OSC's */
4316 if (imp->imp_server_timeout) {
4317 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
4319 cfs_spin_lock(&oscc->oscc_lock);
4320 oscc->oscc_flags |= OSCC_FLAG_RECOVERING;
4321 cfs_spin_unlock(&oscc->oscc_lock);
4324 client_obd_list_lock(&cli->cl_loi_list_lock);
4325 cli->cl_avail_grant = 0;
4326 cli->cl_lost_grant = 0;
4327 client_obd_list_unlock(&cli->cl_loi_list_lock);
4330 case IMP_EVENT_INACTIVE: {
4331 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
4334 case IMP_EVENT_INVALIDATE: {
4335 struct ldlm_namespace *ns = obd->obd_namespace;
4339 env = cl_env_get(&refcheck);
4343 client_obd_list_lock(&cli->cl_loi_list_lock);
4344 /* all pages go to failing rpcs due to the invalid
4346 osc_check_rpcs(env, cli);
4347 client_obd_list_unlock(&cli->cl_loi_list_lock);
4349 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
4350 cl_env_put(env, &refcheck);
4355 case IMP_EVENT_ACTIVE: {
4356 /* Only do this on the MDS OSC's */
4357 if (imp->imp_server_timeout) {
4358 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
4360 cfs_spin_lock(&oscc->oscc_lock);
4361 oscc->oscc_flags &= ~(OSCC_FLAG_NOSPC |
4362 OSCC_FLAG_NOSPC_BLK);
4363 cfs_spin_unlock(&oscc->oscc_lock);
4365 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
4368 case IMP_EVENT_OCD: {
4369 struct obd_connect_data *ocd = &imp->imp_connect_data;
4371 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
4372 osc_init_grant(&obd->u.cli, ocd);
4375 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
4376 imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
4378 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
4381 case IMP_EVENT_DEACTIVATE: {
4382 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE, NULL);
4385 case IMP_EVENT_ACTIVATE: {
4386 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE, NULL);
4390 CERROR("Unknown import event %d\n", event);
4397 * Determine whether the lock can be canceled before replaying the lock
4398 * during recovery, see bug16774 for detailed information.
4400 * \retval zero the lock can't be canceled
4401 * \retval other ok to cancel
4403 static int osc_cancel_for_recovery(struct ldlm_lock *lock)
4405 check_res_locked(lock->l_resource);
4408 * Cancel all unused extent lock in granted mode LCK_PR or LCK_CR.
4410 * XXX as a future improvement, we can also cancel unused write lock
4411 * if it doesn't have dirty data and active mmaps.
4413 if (lock->l_resource->lr_type == LDLM_EXTENT &&
4414 (lock->l_granted_mode == LCK_PR ||
4415 lock->l_granted_mode == LCK_CR) &&
4416 (osc_dlm_lock_pageref(lock) == 0))
4422 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
4428 rc = ptlrpcd_addref();
4432 rc = client_obd_setup(obd, lcfg);
4436 struct lprocfs_static_vars lvars = { 0 };
4437 struct client_obd *cli = &obd->u.cli;
4439 cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
4440 lprocfs_osc_init_vars(&lvars);
4441 if (lprocfs_obd_setup(obd, lvars.obd_vars) == 0) {
4442 lproc_osc_attach_seqstat(obd);
4443 sptlrpc_lprocfs_cliobd_attach(obd);
4444 ptlrpc_lprocfs_register_obd(obd);
4448 /* We need to allocate a few requests more, because
4449 brw_interpret tries to create new requests before freeing
4450 previous ones. Ideally we want to have 2x max_rpcs_in_flight
4451 reserved, but I afraid that might be too much wasted RAM
4452 in fact, so 2 is just my guess and still should work. */
4453 cli->cl_import->imp_rq_pool =
4454 ptlrpc_init_rq_pool(cli->cl_max_rpcs_in_flight + 2,
4456 ptlrpc_add_rqs_to_pool);
4458 CFS_INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
4459 cfs_sema_init(&cli->cl_grant_sem, 1);
4461 ns_register_cancel(obd->obd_namespace, osc_cancel_for_recovery);
4467 static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
4473 case OBD_CLEANUP_EARLY: {
4474 struct obd_import *imp;
4475 imp = obd->u.cli.cl_import;
4476 CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
4477 /* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
4478 ptlrpc_deactivate_import(imp);
4479 cfs_spin_lock(&imp->imp_lock);
4480 imp->imp_pingable = 0;
4481 cfs_spin_unlock(&imp->imp_lock);
4484 case OBD_CLEANUP_EXPORTS: {
4486 * for echo client, export may be on zombie list, wait for
4487 * zombie thread to cull it, because cli.cl_import will be
4488 * cleared in client_disconnect_export():
4489 * class_export_destroy() -> obd_cleanup() ->
4490 * echo_device_free() -> echo_client_cleanup() ->
4491 * obd_disconnect() -> osc_disconnect() ->
4492 * client_disconnect_export()
4494 obd_zombie_barrier();
4495 obd_cleanup_client_import(obd);
4496 ptlrpc_lprocfs_unregister_obd(obd);
4497 lprocfs_obd_cleanup(obd);
4498 rc = obd_llog_finish(obd, 0);
4500 CERROR("failed to cleanup llogging subsystems\n");
4507 int osc_cleanup(struct obd_device *obd)
4513 /* free memory of osc quota cache */
4514 lquota_cleanup(quota_interface, obd);
4516 rc = client_obd_cleanup(obd);
4522 int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg)
4524 struct lprocfs_static_vars lvars = { 0 };
4527 lprocfs_osc_init_vars(&lvars);
4529 switch (lcfg->lcfg_command) {
4531 rc = class_process_proc_param(PARAM_OSC, lvars.obd_vars,
4541 static int osc_process_config(struct obd_device *obd, obd_count len, void *buf)
4543 return osc_process_config_base(obd, buf);
4546 struct obd_ops osc_obd_ops = {
4547 .o_owner = THIS_MODULE,
4548 .o_setup = osc_setup,
4549 .o_precleanup = osc_precleanup,
4550 .o_cleanup = osc_cleanup,
4551 .o_add_conn = client_import_add_conn,
4552 .o_del_conn = client_import_del_conn,
4553 .o_connect = client_connect_import,
4554 .o_reconnect = osc_reconnect,
4555 .o_disconnect = osc_disconnect,
4556 .o_statfs = osc_statfs,
4557 .o_statfs_async = osc_statfs_async,
4558 .o_packmd = osc_packmd,
4559 .o_unpackmd = osc_unpackmd,
4560 .o_precreate = osc_precreate,
4561 .o_create = osc_create,
4562 .o_create_async = osc_create_async,
4563 .o_destroy = osc_destroy,
4564 .o_getattr = osc_getattr,
4565 .o_getattr_async = osc_getattr_async,
4566 .o_setattr = osc_setattr,
4567 .o_setattr_async = osc_setattr_async,
4569 .o_punch = osc_punch,
4571 .o_enqueue = osc_enqueue,
4572 .o_change_cbdata = osc_change_cbdata,
4573 .o_find_cbdata = osc_find_cbdata,
4574 .o_cancel = osc_cancel,
4575 .o_cancel_unused = osc_cancel_unused,
4576 .o_iocontrol = osc_iocontrol,
4577 .o_get_info = osc_get_info,
4578 .o_set_info_async = osc_set_info_async,
4579 .o_import_event = osc_import_event,
4580 .o_llog_init = osc_llog_init,
4581 .o_llog_finish = osc_llog_finish,
4582 .o_process_config = osc_process_config,
4585 extern struct lu_kmem_descr osc_caches[];
4586 extern cfs_spinlock_t osc_ast_guard;
4587 extern cfs_lock_class_key_t osc_ast_guard_class;
4589 int __init osc_init(void)
4591 struct lprocfs_static_vars lvars = { 0 };
4595 /* print an address of _any_ initialized kernel symbol from this
4596 * module, to allow debugging with gdb that doesn't support data
4597 * symbols from modules.*/
4598 CDEBUG(D_CONSOLE, "Lustre OSC module (%p).\n", &osc_caches);
4600 rc = lu_kmem_init(osc_caches);
4602 lprocfs_osc_init_vars(&lvars);
4604 cfs_request_module("lquota");
4605 quota_interface = PORTAL_SYMBOL_GET(osc_quota_interface);
4606 lquota_init(quota_interface);
4607 init_obd_quota_ops(quota_interface, &osc_obd_ops);
4609 rc = class_register_type(&osc_obd_ops, NULL, lvars.module_vars,
4610 LUSTRE_OSC_NAME, &osc_device_type);
4612 if (quota_interface)
4613 PORTAL_SYMBOL_PUT(osc_quota_interface);
4614 lu_kmem_fini(osc_caches);
4618 cfs_spin_lock_init(&osc_ast_guard);
4619 cfs_lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
4621 osc_mds_ost_orig_logops = llog_lvfs_ops;
4622 osc_mds_ost_orig_logops.lop_setup = llog_obd_origin_setup;
4623 osc_mds_ost_orig_logops.lop_cleanup = llog_obd_origin_cleanup;
4624 osc_mds_ost_orig_logops.lop_add = llog_obd_origin_add;
4625 osc_mds_ost_orig_logops.lop_connect = llog_origin_connect;
4631 static void /*__exit*/ osc_exit(void)
4633 lu_device_type_fini(&osc_device_type);
4635 lquota_exit(quota_interface);
4636 if (quota_interface)
4637 PORTAL_SYMBOL_PUT(osc_quota_interface);
4639 class_unregister_type(LUSTRE_OSC_NAME);
4640 lu_kmem_fini(osc_caches);
4643 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
4644 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
4645 MODULE_LICENSE("GPL");
4647 cfs_module(osc, LUSTRE_VERSION_STRING, osc_init, osc_exit);