1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001-2003 Cluster File Systems, Inc.
5 * Author Peter Braam <braam@clusterfs.com>
7 * This file is part of the Lustre file system, http://www.lustre.org
8 * Lustre is a trademark of Cluster File Systems, Inc.
10 * You may have signed or agreed to another license before downloading
11 * this software. If so, you are bound by the terms and conditions
12 * of that agreement, and the following does not apply to you. See the
13 * LICENSE file included with this distribution for more information.
15 * If you did not agree to a different license, then this copy of Lustre
16 * is open source software; you can redistribute it and/or modify it
17 * under the terms of version 2 of the GNU General Public License as
18 * published by the Free Software Foundation.
20 * In either case, Lustre is distributed in the hope that it will be
21 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
22 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * license text for more details.
25 * For testing and management it is treated as an obd_device,
26 * although * it does not export a full OBD method table (the
27 * requests are coming * in over the wire, so object target modules
28 * do not have a full * method table.)
33 # define EXPORT_SYMTAB
35 #define DEBUG_SUBSYSTEM S_OSC
37 #include <libcfs/libcfs.h>
40 # include <liblustre.h>
43 #include <lustre_dlm.h>
44 #include <lustre_net.h>
45 #include <lustre/lustre_user.h>
46 #include <obd_cksum.h>
54 #include <lustre_ha.h>
55 #include <lprocfs_status.h>
56 #include <lustre_log.h>
57 #include <lustre_debug.h>
58 #include <lustre_param.h>
59 #include <lustre_cache.h>
60 #include "osc_internal.h"
62 static quota_interface_t *quota_interface = NULL;
63 extern quota_interface_t osc_quota_interface;
65 static void osc_release_ppga(struct brw_page **ppga, obd_count count);
66 static int brw_interpret(struct ptlrpc_request *request, void *data, int rc);
67 int osc_cleanup(struct obd_device *obd);
69 /* Pack OSC object metadata for disk storage (LE byte order). */
70 static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
71 struct lov_stripe_md *lsm)
76 lmm_size = sizeof(**lmmp);
81 OBD_FREE(*lmmp, lmm_size);
87 OBD_ALLOC(*lmmp, lmm_size);
93 LASSERT(lsm->lsm_object_id);
94 LASSERT(lsm->lsm_object_gr);
95 (*lmmp)->lmm_object_id = cpu_to_le64(lsm->lsm_object_id);
96 (*lmmp)->lmm_object_gr = cpu_to_le64(lsm->lsm_object_gr);
102 /* Unpack OSC object metadata from disk storage (LE byte order). */
103 static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
104 struct lov_mds_md *lmm, int lmm_bytes)
110 if (lmm_bytes < sizeof (*lmm)) {
111 CERROR("lov_mds_md too small: %d, need %d\n",
112 lmm_bytes, (int)sizeof(*lmm));
115 /* XXX LOV_MAGIC etc check? */
117 if (lmm->lmm_object_id == 0) {
118 CERROR("lov_mds_md: zero lmm_object_id\n");
123 lsm_size = lov_stripe_md_size(1);
127 if (*lsmp != NULL && lmm == NULL) {
128 OBD_FREE((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
129 OBD_FREE(*lsmp, lsm_size);
135 OBD_ALLOC(*lsmp, lsm_size);
138 OBD_ALLOC((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
139 if ((*lsmp)->lsm_oinfo[0] == NULL) {
140 OBD_FREE(*lsmp, lsm_size);
143 loi_init((*lsmp)->lsm_oinfo[0]);
147 /* XXX zero *lsmp? */
148 (*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id);
149 (*lsmp)->lsm_object_gr = le64_to_cpu (lmm->lmm_object_gr);
150 LASSERT((*lsmp)->lsm_object_id);
151 LASSERT((*lsmp)->lsm_object_gr);
154 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
159 static inline void osc_pack_capa(struct ptlrpc_request *req,
160 struct ost_body *body, void *capa)
162 struct obd_capa *oc = (struct obd_capa *)capa;
163 struct lustre_capa *c;
168 c = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1);
171 body->oa.o_valid |= OBD_MD_FLOSSCAPA;
172 DEBUG_CAPA(D_SEC, c, "pack");
175 static inline void osc_pack_req_body(struct ptlrpc_request *req,
176 struct obd_info *oinfo)
178 struct ost_body *body;
180 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
183 body->oa = *oinfo->oi_oa;
184 osc_pack_capa(req, body, oinfo->oi_capa);
187 static inline void osc_set_capa_size(struct ptlrpc_request *req,
188 const struct req_msg_field *field,
192 req_capsule_set_size(&req->rq_pill, field, RCL_CLIENT, 0);
194 /* it is already calculated as sizeof struct obd_capa */
198 static int osc_getattr_interpret(struct ptlrpc_request *req,
199 struct osc_async_args *aa, int rc)
201 struct ost_body *body;
207 body = lustre_swab_repbuf(req, REPLY_REC_OFF, sizeof(*body),
208 lustre_swab_ost_body);
210 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
211 memcpy(aa->aa_oi->oi_oa, &body->oa, sizeof(*aa->aa_oi->oi_oa));
213 /* This should really be sent by the OST */
214 aa->aa_oi->oi_oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
215 aa->aa_oi->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
217 CDEBUG(D_INFO, "can't unpack ost_body\n");
219 aa->aa_oi->oi_oa->o_valid = 0;
222 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
226 static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
227 struct ptlrpc_request_set *set)
229 struct ptlrpc_request *req;
230 struct osc_async_args *aa;
234 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
238 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
239 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
241 ptlrpc_request_free(req);
245 osc_pack_req_body(req, oinfo);
247 ptlrpc_request_set_replen(req);
248 req->rq_interpret_reply = osc_getattr_interpret;
250 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
251 aa = (struct osc_async_args *)&req->rq_async_args;
254 ptlrpc_set_add_req(set, req);
258 static int osc_getattr(struct obd_export *exp, struct obd_info *oinfo)
260 struct ptlrpc_request *req;
261 struct ost_body *body;
265 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
269 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
270 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
272 ptlrpc_request_free(req);
276 osc_pack_req_body(req, oinfo);
278 ptlrpc_request_set_replen(req);
280 rc = ptlrpc_queue_wait(req);
284 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
286 GOTO(out, rc = -EPROTO);
288 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
289 *oinfo->oi_oa = body->oa;
291 /* This should really be sent by the OST */
292 oinfo->oi_oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
293 oinfo->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
297 ptlrpc_req_finished(req);
301 static int osc_setattr(struct obd_export *exp, struct obd_info *oinfo,
302 struct obd_trans_info *oti)
304 struct ptlrpc_request *req;
305 struct ost_body *body;
309 LASSERT(!(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP) ||
310 oinfo->oi_oa->o_gr > 0);
312 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
316 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
317 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
319 ptlrpc_request_free(req);
323 osc_pack_req_body(req, oinfo);
325 ptlrpc_request_set_replen(req);
328 rc = ptlrpc_queue_wait(req);
332 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
334 GOTO(out, rc = -EPROTO);
336 *oinfo->oi_oa = body->oa;
340 ptlrpc_req_finished(req);
344 static int osc_setattr_interpret(struct ptlrpc_request *req,
345 struct osc_async_args *aa, int rc)
347 struct ost_body *body;
353 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
355 GOTO(out, rc = -EPROTO);
357 *aa->aa_oi->oi_oa = body->oa;
359 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
363 static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
364 struct obd_trans_info *oti,
365 struct ptlrpc_request_set *rqset)
367 struct ptlrpc_request *req;
368 struct osc_async_args *aa;
372 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
376 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
377 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
379 ptlrpc_request_free(req);
383 osc_pack_req_body(req, oinfo);
385 ptlrpc_request_set_replen(req);
387 if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) {
389 *obdo_logcookie(oinfo->oi_oa) = *oti->oti_logcookies;
392 /* do mds to ost setattr asynchronouly */
394 /* Do not wait for response. */
395 ptlrpcd_add_req(req);
397 req->rq_interpret_reply = osc_setattr_interpret;
399 CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
400 aa = (struct osc_async_args *)&req->rq_async_args;
403 ptlrpc_set_add_req(rqset, req);
409 int osc_real_create(struct obd_export *exp, struct obdo *oa,
410 struct lov_stripe_md **ea, struct obd_trans_info *oti)
412 struct ptlrpc_request *req;
413 struct ost_body *body;
414 struct lov_stripe_md *lsm;
423 rc = obd_alloc_memmd(exp, &lsm);
428 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
430 GOTO(out, rc = -ENOMEM);
432 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
434 ptlrpc_request_free(req);
438 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
442 ptlrpc_request_set_replen(req);
444 if (oa->o_valid & OBD_MD_FLINLINE) {
445 LASSERT((oa->o_valid & OBD_MD_FLFLAGS) &&
446 oa->o_flags == OBD_FL_DELORPHAN);
448 "delorphan from OST integration");
449 /* Don't resend the delorphan req */
450 req->rq_no_resend = req->rq_no_delay = 1;
453 rc = ptlrpc_queue_wait(req);
457 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
459 GOTO(out_req, rc = -EPROTO);
463 /* This should really be sent by the OST */
464 oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
465 oa->o_valid |= OBD_MD_FLBLKSZ;
467 /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
468 * have valid lsm_oinfo data structs, so don't go touching that.
469 * This needs to be fixed in a big way.
471 lsm->lsm_object_id = oa->o_id;
472 lsm->lsm_object_gr = oa->o_gr;
476 oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg);
478 if (oa->o_valid & OBD_MD_FLCOOKIE) {
479 if (!oti->oti_logcookies)
480 oti_alloc_cookies(oti, 1);
481 *oti->oti_logcookies = *obdo_logcookie(oa);
485 CDEBUG(D_HA, "transno: "LPD64"\n",
486 lustre_msg_get_transno(req->rq_repmsg));
488 ptlrpc_req_finished(req);
491 obd_free_memmd(exp, &lsm);
495 static int osc_punch_interpret(struct ptlrpc_request *req,
496 struct osc_async_args *aa, int rc)
498 struct ost_body *body;
504 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
506 GOTO(out, rc = -EPROTO);
508 *aa->aa_oi->oi_oa = body->oa;
510 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
514 static int osc_punch(struct obd_export *exp, struct obd_info *oinfo,
515 struct obd_trans_info *oti,
516 struct ptlrpc_request_set *rqset)
518 struct ptlrpc_request *req;
519 struct osc_async_args *aa;
520 struct ost_body *body;
525 CDEBUG(D_INFO, "oa NULL\n");
529 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH);
533 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
534 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
536 ptlrpc_request_free(req);
539 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
540 ptlrpc_at_set_req_timeout(req);
541 osc_pack_req_body(req, oinfo);
543 /* overload the size and blocks fields in the oa with start/end */
544 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
546 body->oa.o_size = oinfo->oi_policy.l_extent.start;
547 body->oa.o_blocks = oinfo->oi_policy.l_extent.end;
548 body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
549 ptlrpc_request_set_replen(req);
552 req->rq_interpret_reply = osc_punch_interpret;
553 CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
554 aa = (struct osc_async_args *)&req->rq_async_args;
556 ptlrpc_set_add_req(rqset, req);
561 static int osc_sync(struct obd_export *exp, struct obdo *oa,
562 struct lov_stripe_md *md, obd_size start, obd_size end,
565 struct ptlrpc_request *req;
566 struct ost_body *body;
571 CDEBUG(D_INFO, "oa NULL\n");
575 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
579 osc_set_capa_size(req, &RMF_CAPA1, capa);
580 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
582 ptlrpc_request_free(req);
586 /* overload the size and blocks fields in the oa with start/end */
587 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
590 body->oa.o_size = start;
591 body->oa.o_blocks = end;
592 body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
593 osc_pack_capa(req, body, capa);
595 ptlrpc_request_set_replen(req);
597 rc = ptlrpc_queue_wait(req);
601 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
603 GOTO(out, rc = -EPROTO);
609 ptlrpc_req_finished(req);
613 /* Find and cancel locally locks matched by @mode in the resource found by
614 * @objid. Found locks are added into @cancel list. Returns the amount of
615 * locks added to @cancels list. */
616 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
617 struct list_head *cancels, ldlm_mode_t mode,
620 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
621 struct ldlm_res_id res_id = { .name = { oa->o_id, 0, oa->o_gr, 0 } };
622 struct ldlm_resource *res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
629 count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
630 lock_flags, 0, NULL);
631 ldlm_resource_putref(res);
635 static int osc_destroy_interpret(struct ptlrpc_request *req, void *data,
638 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
640 atomic_dec(&cli->cl_destroy_in_flight);
641 cfs_waitq_signal(&cli->cl_destroy_waitq);
645 static int osc_can_send_destroy(struct client_obd *cli)
647 if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
648 cli->cl_max_rpcs_in_flight) {
649 /* The destroy request can be sent */
652 if (atomic_dec_return(&cli->cl_destroy_in_flight) <
653 cli->cl_max_rpcs_in_flight) {
655 * The counter has been modified between the two atomic
658 cfs_waitq_signal(&cli->cl_destroy_waitq);
663 /* Destroy requests can be async always on the client, and we don't even really
664 * care about the return code since the client cannot do anything at all about
666 * When the MDS is unlinking a filename, it saves the file objects into a
667 * recovery llog, and these object records are cancelled when the OST reports
668 * they were destroyed and sync'd to disk (i.e. transaction committed).
669 * If the client dies, or the OST is down when the object should be destroyed,
670 * the records are not cancelled, and when the OST reconnects to the MDS next,
671 * it will retrieve the llog unlink logs and then sends the log cancellation
672 * cookies to the MDS after committing destroy transactions. */
673 static int osc_destroy(struct obd_export *exp, struct obdo *oa,
674 struct lov_stripe_md *ea, struct obd_trans_info *oti,
675 struct obd_export *md_export)
677 struct client_obd *cli = &exp->exp_obd->u.cli;
678 struct ptlrpc_request *req;
679 struct ost_body *body;
680 CFS_LIST_HEAD(cancels);
685 CDEBUG(D_INFO, "oa NULL\n");
689 count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
690 LDLM_FL_DISCARD_DATA);
692 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
694 ldlm_lock_list_put(&cancels, l_bl_ast, count);
698 rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
701 ptlrpc_request_free(req);
705 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
706 req->rq_interpret_reply = osc_destroy_interpret;
707 ptlrpc_at_set_req_timeout(req);
709 if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE)
710 memcpy(obdo_logcookie(oa), oti->oti_logcookies,
711 sizeof(*oti->oti_logcookies));
712 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
716 ptlrpc_request_set_replen(req);
718 if (!osc_can_send_destroy(cli)) {
719 struct l_wait_info lwi = { 0 };
722 * Wait until the number of on-going destroy RPCs drops
723 * under max_rpc_in_flight
725 l_wait_event_exclusive(cli->cl_destroy_waitq,
726 osc_can_send_destroy(cli), &lwi);
729 /* Do not wait for response */
730 ptlrpcd_add_req(req);
734 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
737 obd_flag bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
739 LASSERT(!(oa->o_valid & bits));
742 client_obd_list_lock(&cli->cl_loi_list_lock);
743 oa->o_dirty = cli->cl_dirty;
744 if (cli->cl_dirty > cli->cl_dirty_max) {
745 CERROR("dirty %lu > dirty_max %lu\n",
746 cli->cl_dirty, cli->cl_dirty_max);
748 } else if (atomic_read(&obd_dirty_pages) > obd_max_dirty_pages) {
749 CERROR("dirty %d > system dirty_max %d\n",
750 atomic_read(&obd_dirty_pages), obd_max_dirty_pages);
752 } else if (cli->cl_dirty_max - cli->cl_dirty > 0x7fffffff) {
753 CERROR("dirty %lu - dirty_max %lu too big???\n",
754 cli->cl_dirty, cli->cl_dirty_max);
757 long max_in_flight = (cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT)*
758 (cli->cl_max_rpcs_in_flight + 1);
759 oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
761 oa->o_grant = cli->cl_avail_grant;
762 oa->o_dropped = cli->cl_lost_grant;
763 cli->cl_lost_grant = 0;
764 client_obd_list_unlock(&cli->cl_loi_list_lock);
765 CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n",
766 oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
769 /* caller must hold loi_list_lock */
770 static void osc_consume_write_grant(struct client_obd *cli,
771 struct brw_page *pga)
773 atomic_inc(&obd_dirty_pages);
774 cli->cl_dirty += CFS_PAGE_SIZE;
775 cli->cl_avail_grant -= CFS_PAGE_SIZE;
776 pga->flag |= OBD_BRW_FROM_GRANT;
777 CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
778 CFS_PAGE_SIZE, pga, pga->pg);
779 LASSERT(cli->cl_avail_grant >= 0);
782 /* the companion to osc_consume_write_grant, called when a brw has completed.
783 * must be called with the loi lock held. */
784 static void osc_release_write_grant(struct client_obd *cli,
785 struct brw_page *pga, int sent)
787 int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096;
790 if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
795 pga->flag &= ~OBD_BRW_FROM_GRANT;
796 atomic_dec(&obd_dirty_pages);
797 cli->cl_dirty -= CFS_PAGE_SIZE;
799 cli->cl_lost_grant += CFS_PAGE_SIZE;
800 CDEBUG(D_CACHE, "lost grant: %lu avail grant: %lu dirty: %lu\n",
801 cli->cl_lost_grant, cli->cl_avail_grant, cli->cl_dirty);
802 } else if (CFS_PAGE_SIZE != blocksize && pga->count != CFS_PAGE_SIZE) {
803 /* For short writes we shouldn't count parts of pages that
804 * span a whole block on the OST side, or our accounting goes
805 * wrong. Should match the code in filter_grant_check. */
806 int offset = pga->off & ~CFS_PAGE_MASK;
807 int count = pga->count + (offset & (blocksize - 1));
808 int end = (offset + pga->count) & (blocksize - 1);
810 count += blocksize - end;
812 cli->cl_lost_grant += CFS_PAGE_SIZE - count;
813 CDEBUG(D_CACHE, "lost %lu grant: %lu avail: %lu dirty: %lu\n",
814 CFS_PAGE_SIZE - count, cli->cl_lost_grant,
815 cli->cl_avail_grant, cli->cl_dirty);
821 static unsigned long rpcs_in_flight(struct client_obd *cli)
823 return cli->cl_r_in_flight + cli->cl_w_in_flight;
826 /* caller must hold loi_list_lock */
827 void osc_wake_cache_waiters(struct client_obd *cli)
829 struct list_head *l, *tmp;
830 struct osc_cache_waiter *ocw;
833 list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
834 /* if we can't dirty more, we must wait until some is written */
835 if ((cli->cl_dirty + CFS_PAGE_SIZE > cli->cl_dirty_max) ||
836 (atomic_read(&obd_dirty_pages) + 1 > obd_max_dirty_pages)) {
837 CDEBUG(D_CACHE, "no dirty room: dirty: %ld "
838 "osc max %ld, sys max %d\n", cli->cl_dirty,
839 cli->cl_dirty_max, obd_max_dirty_pages);
843 /* if still dirty cache but no grant wait for pending RPCs that
844 * may yet return us some grant before doing sync writes */
845 if (cli->cl_w_in_flight && cli->cl_avail_grant < CFS_PAGE_SIZE) {
846 CDEBUG(D_CACHE, "%u BRW writes in flight, no grant\n",
847 cli->cl_w_in_flight);
851 ocw = list_entry(l, struct osc_cache_waiter, ocw_entry);
852 list_del_init(&ocw->ocw_entry);
853 if (cli->cl_avail_grant < CFS_PAGE_SIZE) {
854 /* no more RPCs in flight to return grant, do sync IO */
855 ocw->ocw_rc = -EDQUOT;
856 CDEBUG(D_INODE, "wake oap %p for sync\n", ocw->ocw_oap);
858 osc_consume_write_grant(cli,
859 &ocw->ocw_oap->oap_brw_page);
862 cfs_waitq_signal(&ocw->ocw_waitq);
868 static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
870 client_obd_list_lock(&cli->cl_loi_list_lock);
871 cli->cl_avail_grant = ocd->ocd_grant;
872 client_obd_list_unlock(&cli->cl_loi_list_lock);
874 CDEBUG(D_CACHE, "setting cl_avail_grant: %ld cl_lost_grant: %ld\n",
875 cli->cl_avail_grant, cli->cl_lost_grant);
876 LASSERT(cli->cl_avail_grant >= 0);
879 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
881 client_obd_list_lock(&cli->cl_loi_list_lock);
882 CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant);
883 if (body->oa.o_valid & OBD_MD_FLGRANT)
884 cli->cl_avail_grant += body->oa.o_grant;
885 /* waiters are woken in brw_interpret */
886 client_obd_list_unlock(&cli->cl_loi_list_lock);
889 /* We assume that the reason this OSC got a short read is because it read
890 * beyond the end of a stripe file; i.e. lustre is reading a sparse file
891 * via the LOV, and it _knows_ it's reading inside the file, it's just that
892 * this stripe never got written at or beyond this stripe offset yet. */
893 static void handle_short_read(int nob_read, obd_count page_count,
894 struct brw_page **pga)
899 /* skip bytes read OK */
900 while (nob_read > 0) {
901 LASSERT (page_count > 0);
903 if (pga[i]->count > nob_read) {
904 /* EOF inside this page */
905 ptr = cfs_kmap(pga[i]->pg) +
906 (pga[i]->off & ~CFS_PAGE_MASK);
907 memset(ptr + nob_read, 0, pga[i]->count - nob_read);
908 cfs_kunmap(pga[i]->pg);
914 nob_read -= pga[i]->count;
919 /* zero remaining pages */
920 while (page_count-- > 0) {
921 ptr = cfs_kmap(pga[i]->pg) + (pga[i]->off & ~CFS_PAGE_MASK);
922 memset(ptr, 0, pga[i]->count);
923 cfs_kunmap(pga[i]->pg);
928 static int check_write_rcs(struct ptlrpc_request *req,
929 int requested_nob, int niocount,
930 obd_count page_count, struct brw_page **pga)
934 /* return error if any niobuf was in error */
935 remote_rcs = lustre_swab_repbuf(req, REQ_REC_OFF + 1,
936 sizeof(*remote_rcs) * niocount, NULL);
937 if (remote_rcs == NULL) {
938 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
941 if (lustre_msg_swabbed(req->rq_repmsg))
942 for (i = 0; i < niocount; i++)
943 __swab32s(&remote_rcs[i]);
945 for (i = 0; i < niocount; i++) {
946 if (remote_rcs[i] < 0)
947 return(remote_rcs[i]);
949 if (remote_rcs[i] != 0) {
950 CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
951 i, remote_rcs[i], req);
956 if (req->rq_bulk->bd_nob_transferred != requested_nob) {
957 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
958 requested_nob, req->rq_bulk->bd_nob_transferred);
965 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
967 if (p1->flag != p2->flag) {
968 unsigned mask = ~OBD_BRW_FROM_GRANT;
970 /* warn if we try to combine flags that we don't know to be
972 if ((p1->flag & mask) != (p2->flag & mask))
973 CERROR("is it ok to have flags 0x%x and 0x%x in the "
974 "same brw?\n", p1->flag, p2->flag);
978 return (p1->off + p1->count == p2->off);
981 static obd_count osc_checksum_bulk(int nob, obd_count pg_count,
982 struct brw_page **pga, int opc,
983 cksum_type_t cksum_type)
988 LASSERT (pg_count > 0);
989 cksum = init_checksum(cksum_type);
990 while (nob > 0 && pg_count > 0) {
991 unsigned char *ptr = cfs_kmap(pga[i]->pg);
992 int off = pga[i]->off & ~CFS_PAGE_MASK;
993 int count = pga[i]->count > nob ? nob : pga[i]->count;
995 /* corrupt the data before we compute the checksum, to
996 * simulate an OST->client data error */
997 if (i == 0 && opc == OST_READ &&
998 OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
999 memcpy(ptr + off, "bad1", min(4, nob));
1000 cksum = compute_checksum(cksum, ptr + off, count, cksum_type);
1001 cfs_kunmap(pga[i]->pg);
1002 LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d checksum %x\n",
1005 nob -= pga[i]->count;
1009 /* For sending we only compute the wrong checksum instead
1010 * of corrupting the data so it is still correct on a redo */
1011 if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1017 static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
1018 struct lov_stripe_md *lsm, obd_count page_count,
1019 struct brw_page **pga,
1020 struct ptlrpc_request **reqp,
1021 struct obd_capa *ocapa)
1023 struct ptlrpc_request *req;
1024 struct ptlrpc_bulk_desc *desc;
1025 struct ost_body *body;
1026 struct obd_ioobj *ioobj;
1027 struct niobuf_remote *niobuf;
1028 int niocount, i, requested_nob, opc, rc;
1029 struct osc_brw_async_args *aa;
1030 struct req_capsule *pill;
1033 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1034 RETURN(-ENOMEM); /* Recoverable */
1035 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1036 RETURN(-EINVAL); /* Fatal */
1038 if ((cmd & OBD_BRW_WRITE) != 0) {
1040 req = ptlrpc_request_alloc_pool(cli->cl_import,
1041 cli->cl_import->imp_rq_pool,
1045 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW);
1051 for (niocount = i = 1; i < page_count; i++) {
1052 if (!can_merge_pages(pga[i - 1], pga[i]))
1056 pill = &req->rq_pill;
1057 req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1058 niocount * sizeof(*niobuf));
1059 osc_set_capa_size(req, &RMF_CAPA1, ocapa);
1061 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1063 ptlrpc_request_free(req);
1066 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
1067 ptlrpc_at_set_req_timeout(req);
1069 if (opc == OST_WRITE)
1070 desc = ptlrpc_prep_bulk_imp(req, page_count,
1071 BULK_GET_SOURCE, OST_BULK_PORTAL);
1073 desc = ptlrpc_prep_bulk_imp(req, page_count,
1074 BULK_PUT_SINK, OST_BULK_PORTAL);
1077 GOTO(out, rc = -ENOMEM);
1078 /* NB request now owns desc and will free it when it gets freed */
1080 body = req_capsule_client_get(pill, &RMF_OST_BODY);
1081 ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1082 niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1083 LASSERT(body && ioobj && niobuf);
1087 obdo_to_ioobj(oa, ioobj);
1088 ioobj->ioo_bufcnt = niocount;
1089 osc_pack_capa(req, body, ocapa);
1090 LASSERT (page_count > 0);
1091 for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1092 struct brw_page *pg = pga[i];
1093 struct brw_page *pg_prev = pga[i - 1];
1095 LASSERT(pg->count > 0);
1096 LASSERTF((pg->off & ~CFS_PAGE_MASK) + pg->count <= CFS_PAGE_SIZE,
1097 "i: %d pg: %p off: "LPU64", count: %u\n", i, pg,
1098 pg->off, pg->count);
1100 LASSERTF(i == 0 || pg->off > pg_prev->off,
1101 "i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
1102 " prev_pg %p [pri %lu ind %lu] off "LPU64"\n",
1104 pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1105 pg_prev->pg, page_private(pg_prev->pg),
1106 pg_prev->pg->index, pg_prev->off);
1108 LASSERTF(i == 0 || pg->off > pg_prev->off,
1109 "i %d p_c %u\n", i, page_count);
1111 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1112 (pg->flag & OBD_BRW_SRVLOCK));
1114 ptlrpc_prep_bulk_page(desc, pg->pg, pg->off & ~CFS_PAGE_MASK,
1116 requested_nob += pg->count;
1118 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1120 niobuf->len += pg->count;
1122 niobuf->offset = pg->off;
1123 niobuf->len = pg->count;
1124 niobuf->flags = pg->flag;
1128 LASSERT((void *)(niobuf - niocount) ==
1129 lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF + 2,
1130 niocount * sizeof(*niobuf)));
1131 osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1133 /* size[REQ_REC_OFF] still sizeof (*body) */
1134 if (opc == OST_WRITE) {
1135 if (unlikely(cli->cl_checksum) &&
1136 req->rq_flvr.sf_bulk_hash == BULK_HASH_ALG_NULL) {
1137 /* store cl_cksum_type in a local variable since
1138 * it can be changed via lprocfs */
1139 cksum_type_t cksum_type = cli->cl_cksum_type;
1141 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1142 oa->o_flags = body->oa.o_flags = 0;
1143 body->oa.o_flags |= cksum_type_pack(cksum_type);
1144 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1145 body->oa.o_cksum = osc_checksum_bulk(requested_nob,
1149 CDEBUG(D_PAGE, "checksum at write origin: %x\n",
1151 /* save this in 'oa', too, for later checking */
1152 oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1153 oa->o_flags |= cksum_type_pack(cksum_type);
1155 /* clear out the checksum flag, in case this is a
1156 * resend but cl_checksum is no longer set. b=11238 */
1157 oa->o_valid &= ~OBD_MD_FLCKSUM;
1159 oa->o_cksum = body->oa.o_cksum;
1160 /* 1 RC per niobuf */
1161 req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_SERVER,
1162 sizeof(__u32) * niocount);
1164 if (unlikely(cli->cl_checksum) &&
1165 req->rq_flvr.sf_bulk_hash == BULK_HASH_ALG_NULL) {
1166 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1167 body->oa.o_flags = 0;
1168 body->oa.o_flags |= cksum_type_pack(cli->cl_cksum_type);
1169 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1171 req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_SERVER, 0);
1172 /* 1 RC for the whole I/O */
1174 ptlrpc_request_set_replen(req);
1176 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1177 aa = (struct osc_brw_async_args *)&req->rq_async_args;
1179 aa->aa_requested_nob = requested_nob;
1180 aa->aa_nio_count = niocount;
1181 aa->aa_page_count = page_count;
1185 CFS_INIT_LIST_HEAD(&aa->aa_oaps);
1191 ptlrpc_req_finished(req);
1195 static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
1196 __u32 client_cksum, __u32 server_cksum, int nob,
1197 obd_count page_count, struct brw_page **pga,
1198 cksum_type_t client_cksum_type)
1202 cksum_type_t cksum_type;
1204 if (server_cksum == client_cksum) {
1205 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1209 if (oa->o_valid & OBD_MD_FLFLAGS)
1210 cksum_type = cksum_type_unpack(oa->o_flags);
1212 cksum_type = OBD_CKSUM_CRC32;
1214 new_cksum = osc_checksum_bulk(nob, page_count, pga, OST_WRITE,
1217 if (cksum_type != client_cksum_type)
1218 msg = "the server did not use the checksum type specified in "
1219 "the original request - likely a protocol problem";
1220 else if (new_cksum == server_cksum)
1221 msg = "changed on the client after we checksummed it - "
1222 "likely false positive due to mmap IO (bug 11742)";
1223 else if (new_cksum == client_cksum)
1224 msg = "changed in transit before arrival at OST";
1226 msg = "changed in transit AND doesn't match the original - "
1227 "likely false positive due to mmap IO (bug 11742)";
1229 LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inum "
1230 LPU64"/"LPU64" object "LPU64"/"LPU64" extent "
1231 "["LPU64"-"LPU64"]\n",
1232 msg, libcfs_nid2str(peer->nid),
1233 oa->o_valid & OBD_MD_FLFID ? oa->o_fid : (__u64)0,
1234 oa->o_valid & OBD_MD_FLFID ? oa->o_generation :
1237 oa->o_valid & OBD_MD_FLGROUP ? oa->o_gr : (__u64)0,
1239 pga[page_count-1]->off + pga[page_count-1]->count - 1);
1240 CERROR("original client csum %x (type %x), server csum %x (type %x), "
1241 "client csum now %x\n", client_cksum, client_cksum_type,
1242 server_cksum, cksum_type, new_cksum);
1246 /* Note rc enters this function as number of bytes transferred */
1247 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1249 struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1250 const lnet_process_id_t *peer =
1251 &req->rq_import->imp_connection->c_peer;
1252 struct client_obd *cli = aa->aa_cli;
1253 struct ost_body *body;
1254 __u32 client_cksum = 0;
1257 if (rc < 0 && rc != -EDQUOT)
1260 LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
1261 body = lustre_swab_repbuf(req, REPLY_REC_OFF, sizeof(*body),
1262 lustre_swab_ost_body);
1264 CDEBUG(D_INFO, "Can't unpack body\n");
1268 /* set/clear over quota flag for a uid/gid */
1269 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1270 body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA))
1271 lquota_setdq(quota_interface, cli, body->oa.o_uid,
1272 body->oa.o_gid, body->oa.o_valid,
1278 if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
1279 client_cksum = aa->aa_oa->o_cksum; /* save for later */
1281 osc_update_grant(cli, body);
1283 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1285 CERROR("Unexpected +ve rc %d\n", rc);
1288 LASSERT(req->rq_bulk->bd_nob == aa->aa_requested_nob);
1290 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
1291 check_write_checksum(&body->oa, peer, client_cksum,
1292 body->oa.o_cksum, aa->aa_requested_nob,
1293 aa->aa_page_count, aa->aa_ppga,
1294 cksum_type_unpack(aa->aa_oa->o_flags)))
1297 if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
1300 rc = check_write_rcs(req, aa->aa_requested_nob,aa->aa_nio_count,
1301 aa->aa_page_count, aa->aa_ppga);
1305 /* The rest of this function executes only for OST_READs */
1306 if (rc > aa->aa_requested_nob) {
1307 CERROR("Unexpected rc %d (%d requested)\n", rc,
1308 aa->aa_requested_nob);
1312 if (rc != req->rq_bulk->bd_nob_transferred) {
1313 CERROR ("Unexpected rc %d (%d transferred)\n",
1314 rc, req->rq_bulk->bd_nob_transferred);
1318 if (rc < aa->aa_requested_nob)
1319 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
1321 if (sptlrpc_cli_unwrap_bulk_read(req, rc, aa->aa_page_count,
1323 GOTO(out, rc = -EAGAIN);
1325 if (body->oa.o_valid & OBD_MD_FLCKSUM) {
1326 static int cksum_counter;
1327 __u32 server_cksum = body->oa.o_cksum;
1330 cksum_type_t cksum_type;
1332 if (body->oa.o_valid & OBD_MD_FLFLAGS)
1333 cksum_type = cksum_type_unpack(body->oa.o_flags);
1335 cksum_type = OBD_CKSUM_CRC32;
1336 client_cksum = osc_checksum_bulk(rc, aa->aa_page_count,
1337 aa->aa_ppga, OST_READ,
1340 if (peer->nid == req->rq_bulk->bd_sender) {
1344 router = libcfs_nid2str(req->rq_bulk->bd_sender);
1347 if (server_cksum == ~0 && rc > 0) {
1348 CERROR("Protocol error: server %s set the 'checksum' "
1349 "bit, but didn't send a checksum. Not fatal, "
1350 "but please tell CFS.\n",
1351 libcfs_nid2str(peer->nid));
1352 } else if (server_cksum != client_cksum) {
1353 LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
1354 "%s%s%s inum "LPU64"/"LPU64" object "
1355 LPU64"/"LPU64" extent "
1356 "["LPU64"-"LPU64"]\n",
1357 req->rq_import->imp_obd->obd_name,
1358 libcfs_nid2str(peer->nid),
1360 body->oa.o_valid & OBD_MD_FLFID ?
1361 body->oa.o_fid : (__u64)0,
1362 body->oa.o_valid & OBD_MD_FLFID ?
1363 body->oa.o_generation :(__u64)0,
1365 body->oa.o_valid & OBD_MD_FLGROUP ?
1366 body->oa.o_gr : (__u64)0,
1367 aa->aa_ppga[0]->off,
1368 aa->aa_ppga[aa->aa_page_count-1]->off +
1369 aa->aa_ppga[aa->aa_page_count-1]->count -
1371 CERROR("client %x, server %x, cksum_type %x\n",
1372 client_cksum, server_cksum, cksum_type);
1374 aa->aa_oa->o_cksum = client_cksum;
1378 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1381 } else if (unlikely(client_cksum)) {
1382 static int cksum_missed;
1385 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
1386 CERROR("Checksum %u requested from %s but not sent\n",
1387 cksum_missed, libcfs_nid2str(peer->nid));
1393 *aa->aa_oa = body->oa;
1398 static int osc_brw_internal(int cmd, struct obd_export *exp, struct obdo *oa,
1399 struct lov_stripe_md *lsm,
1400 obd_count page_count, struct brw_page **pga,
1401 struct obd_capa *ocapa)
1403 struct ptlrpc_request *req;
1407 struct l_wait_info lwi;
1411 cfs_waitq_init(&waitq);
1414 rc = osc_brw_prep_request(cmd, &exp->exp_obd->u.cli, oa, lsm,
1415 page_count, pga, &req, ocapa);
1419 rc = ptlrpc_queue_wait(req);
1421 if (rc == -ETIMEDOUT && req->rq_resend) {
1422 DEBUG_REQ(D_HA, req, "BULK TIMEOUT");
1423 ptlrpc_req_finished(req);
1427 rc = osc_brw_fini_request(req, rc);
1429 ptlrpc_req_finished(req);
1430 if (osc_recoverable_error(rc)) {
1432 if (!osc_should_resend(resends, &exp->exp_obd->u.cli)) {
1433 CERROR("too many resend retries, returning error\n");
1437 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(resends), NULL, NULL, NULL);
1438 l_wait_event(waitq, 0, &lwi);
1446 int osc_brw_redo_request(struct ptlrpc_request *request,
1447 struct osc_brw_async_args *aa)
1449 struct ptlrpc_request *new_req;
1450 struct ptlrpc_request_set *set = request->rq_set;
1451 struct osc_brw_async_args *new_aa;
1452 struct osc_async_page *oap;
1456 if (!osc_should_resend(aa->aa_resends, aa->aa_cli)) {
1457 CERROR("too many resend retries, returning error\n");
1461 DEBUG_REQ(D_ERROR, request, "redo for recoverable error");
1463 body = lustre_msg_buf(request->rq_reqmsg, REQ_REC_OFF, sizeof(*body));
1464 if (body->oa.o_valid & OBD_MD_FLOSSCAPA)
1465 ocapa = lustre_unpack_capa(request->rq_reqmsg,
1468 rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
1469 OST_WRITE ? OBD_BRW_WRITE :OBD_BRW_READ,
1470 aa->aa_cli, aa->aa_oa,
1471 NULL /* lsm unused by osc currently */,
1472 aa->aa_page_count, aa->aa_ppga,
1473 &new_req, NULL /* ocapa */);
1477 client_obd_list_lock(&aa->aa_cli->cl_loi_list_lock);
1479 list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
1480 if (oap->oap_request != NULL) {
1481 LASSERTF(request == oap->oap_request,
1482 "request %p != oap_request %p\n",
1483 request, oap->oap_request);
1484 if (oap->oap_interrupted) {
1485 client_obd_list_unlock(&aa->aa_cli->cl_loi_list_lock);
1486 ptlrpc_req_finished(new_req);
1491 /* New request takes over pga and oaps from old request.
1492 * Note that copying a list_head doesn't work, need to move it... */
1494 new_req->rq_interpret_reply = request->rq_interpret_reply;
1495 new_req->rq_async_args = request->rq_async_args;
1496 new_req->rq_sent = cfs_time_current_sec() + aa->aa_resends;
1498 new_aa = (struct osc_brw_async_args *)&new_req->rq_async_args;
1500 CFS_INIT_LIST_HEAD(&new_aa->aa_oaps);
1501 list_splice(&aa->aa_oaps, &new_aa->aa_oaps);
1502 CFS_INIT_LIST_HEAD(&aa->aa_oaps);
1504 list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
1505 if (oap->oap_request) {
1506 ptlrpc_req_finished(oap->oap_request);
1507 oap->oap_request = ptlrpc_request_addref(new_req);
1511 /* use ptlrpc_set_add_req is safe because interpret functions work
1512 * in check_set context. only one way exist with access to request
1513 * from different thread got -EINTR - this way protected with
1514 * cl_loi_list_lock */
1515 ptlrpc_set_add_req(set, new_req);
1517 client_obd_list_unlock(&aa->aa_cli->cl_loi_list_lock);
1519 DEBUG_REQ(D_INFO, new_req, "new request");
1523 static int async_internal(int cmd, struct obd_export *exp, struct obdo *oa,
1524 struct lov_stripe_md *lsm, obd_count page_count,
1525 struct brw_page **pga, struct ptlrpc_request_set *set,
1526 struct obd_capa *ocapa)
1528 struct ptlrpc_request *req;
1529 struct client_obd *cli = &exp->exp_obd->u.cli;
1531 struct osc_brw_async_args *aa;
1534 /* Consume write credits even if doing a sync write -
1535 * otherwise we may run out of space on OST due to grant. */
1536 if (cmd == OBD_BRW_WRITE) {
1537 spin_lock(&cli->cl_loi_list_lock);
1538 for (i = 0; i < page_count; i++) {
1539 if (cli->cl_avail_grant >= CFS_PAGE_SIZE)
1540 osc_consume_write_grant(cli, pga[i]);
1542 spin_unlock(&cli->cl_loi_list_lock);
1545 rc = osc_brw_prep_request(cmd, cli, oa, lsm, page_count, pga,
1548 aa = (struct osc_brw_async_args *)&req->rq_async_args;
1549 if (cmd == OBD_BRW_READ) {
1550 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
1551 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
1552 ptlrpc_lprocfs_brw(req, OST_READ, aa->aa_requested_nob);
1554 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
1555 lprocfs_oh_tally(&cli->cl_write_rpc_hist,
1556 cli->cl_w_in_flight);
1557 ptlrpc_lprocfs_brw(req, OST_WRITE, aa->aa_requested_nob);
1560 LASSERT(list_empty(&aa->aa_oaps));
1562 req->rq_interpret_reply = brw_interpret;
1563 ptlrpc_set_add_req(set, req);
1564 client_obd_list_lock(&cli->cl_loi_list_lock);
1565 if (cmd == OBD_BRW_READ)
1566 cli->cl_r_in_flight++;
1568 cli->cl_w_in_flight++;
1569 client_obd_list_unlock(&cli->cl_loi_list_lock);
1570 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DIO_PAUSE, 3);
1571 } else if (cmd == OBD_BRW_WRITE) {
1572 client_obd_list_lock(&cli->cl_loi_list_lock);
1573 for (i = 0; i < page_count; i++)
1574 osc_release_write_grant(cli, pga[i], 0);
1575 osc_wake_cache_waiters(cli);
1576 client_obd_list_unlock(&cli->cl_loi_list_lock);
1582 * ugh, we want disk allocation on the target to happen in offset order. we'll
1583 * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
1584 * fine for our small page arrays and doesn't require allocation. its an
1585 * insertion sort that swaps elements that are strides apart, shrinking the
1586 * stride down until its '1' and the array is sorted.
1588 static void sort_brw_pages(struct brw_page **array, int num)
1591 struct brw_page *tmp;
1595 for (stride = 1; stride < num ; stride = (stride * 3) + 1)
1600 for (i = stride ; i < num ; i++) {
1603 while (j >= stride && array[j - stride]->off > tmp->off) {
1604 array[j] = array[j - stride];
1609 } while (stride > 1);
1612 static obd_count max_unfragmented_pages(struct brw_page **pg, obd_count pages)
1618 LASSERT (pages > 0);
1619 offset = pg[i]->off & ~CFS_PAGE_MASK;
1623 if (pages == 0) /* that's all */
1626 if (offset + pg[i]->count < CFS_PAGE_SIZE)
1627 return count; /* doesn't end on page boundary */
1630 offset = pg[i]->off & ~CFS_PAGE_MASK;
1631 if (offset != 0) /* doesn't start on page boundary */
1638 static struct brw_page **osc_build_ppga(struct brw_page *pga, obd_count count)
1640 struct brw_page **ppga;
1643 OBD_ALLOC(ppga, sizeof(*ppga) * count);
1647 for (i = 0; i < count; i++)
1652 static void osc_release_ppga(struct brw_page **ppga, obd_count count)
1654 LASSERT(ppga != NULL);
1655 OBD_FREE(ppga, sizeof(*ppga) * count);
1658 static int osc_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo,
1659 obd_count page_count, struct brw_page *pga,
1660 struct obd_trans_info *oti)
1662 struct obdo *saved_oa = NULL;
1663 struct brw_page **ppga, **orig;
1664 struct obd_import *imp = class_exp2cliimp(exp);
1665 struct client_obd *cli = &imp->imp_obd->u.cli;
1666 int rc, page_count_orig;
1669 if (cmd & OBD_BRW_CHECK) {
1670 /* The caller just wants to know if there's a chance that this
1671 * I/O can succeed */
1673 if (imp == NULL || imp->imp_invalid)
1678 /* test_brw with a failed create can trip this, maybe others. */
1679 LASSERT(cli->cl_max_pages_per_rpc);
1683 orig = ppga = osc_build_ppga(pga, page_count);
1686 page_count_orig = page_count;
1688 sort_brw_pages(ppga, page_count);
1689 while (page_count) {
1690 obd_count pages_per_brw;
1692 if (page_count > cli->cl_max_pages_per_rpc)
1693 pages_per_brw = cli->cl_max_pages_per_rpc;
1695 pages_per_brw = page_count;
1697 pages_per_brw = max_unfragmented_pages(ppga, pages_per_brw);
1699 if (saved_oa != NULL) {
1700 /* restore previously saved oa */
1701 *oinfo->oi_oa = *saved_oa;
1702 } else if (page_count > pages_per_brw) {
1703 /* save a copy of oa (brw will clobber it) */
1704 OBDO_ALLOC(saved_oa);
1705 if (saved_oa == NULL)
1706 GOTO(out, rc = -ENOMEM);
1707 *saved_oa = *oinfo->oi_oa;
1710 rc = osc_brw_internal(cmd, exp, oinfo->oi_oa, oinfo->oi_md,
1711 pages_per_brw, ppga, oinfo->oi_capa);
1716 page_count -= pages_per_brw;
1717 ppga += pages_per_brw;
1721 osc_release_ppga(orig, page_count_orig);
1723 if (saved_oa != NULL)
1724 OBDO_FREE(saved_oa);
1729 static int osc_brw_async(int cmd, struct obd_export *exp,
1730 struct obd_info *oinfo, obd_count page_count,
1731 struct brw_page *pga, struct obd_trans_info *oti,
1732 struct ptlrpc_request_set *set)
1734 struct brw_page **ppga, **orig;
1735 struct client_obd *cli = &exp->exp_obd->u.cli;
1736 int page_count_orig;
1740 if (cmd & OBD_BRW_CHECK) {
1741 struct obd_import *imp = class_exp2cliimp(exp);
1742 /* The caller just wants to know if there's a chance that this
1743 * I/O can succeed */
1745 if (imp == NULL || imp->imp_invalid)
1750 orig = ppga = osc_build_ppga(pga, page_count);
1753 page_count_orig = page_count;
1755 sort_brw_pages(ppga, page_count);
1756 while (page_count) {
1757 struct brw_page **copy;
1758 obd_count pages_per_brw;
1760 pages_per_brw = min_t(obd_count, page_count,
1761 cli->cl_max_pages_per_rpc);
1763 pages_per_brw = max_unfragmented_pages(ppga, pages_per_brw);
1765 /* use ppga only if single RPC is going to fly */
1766 if (pages_per_brw != page_count_orig || ppga != orig) {
1767 OBD_ALLOC(copy, sizeof(*copy) * pages_per_brw);
1769 GOTO(out, rc = -ENOMEM);
1770 memcpy(copy, ppga, sizeof(*copy) * pages_per_brw);
1774 rc = async_internal(cmd, exp, oinfo->oi_oa, oinfo->oi_md,
1775 pages_per_brw, copy, set, oinfo->oi_capa);
1779 OBD_FREE(copy, sizeof(*copy) * pages_per_brw);
1783 /* we passed it to async_internal() which is
1784 * now responsible for releasing memory */
1788 page_count -= pages_per_brw;
1789 ppga += pages_per_brw;
1793 osc_release_ppga(orig, page_count_orig);
1797 static void osc_check_rpcs(struct client_obd *cli);
1799 /* The companion to osc_enter_cache(), called when @oap is no longer part of
1800 * the dirty accounting. Writeback completes or truncate happens before
1801 * writing starts. Must be called with the loi lock held. */
1802 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
1805 osc_release_write_grant(cli, &oap->oap_brw_page, sent);
1809 /* This maintains the lists of pending pages to read/write for a given object
1810 * (lop). This is used by osc_check_rpcs->osc_next_loi() and loi_list_maint()
1811 * to quickly find objects that are ready to send an RPC. */
1812 static int lop_makes_rpc(struct client_obd *cli, struct loi_oap_pages *lop,
1818 if (lop->lop_num_pending == 0)
1821 /* if we have an invalid import we want to drain the queued pages
1822 * by forcing them through rpcs that immediately fail and complete
1823 * the pages. recovery relies on this to empty the queued pages
1824 * before canceling the locks and evicting down the llite pages */
1825 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1828 /* stream rpcs in queue order as long as as there is an urgent page
1829 * queued. this is our cheap solution for good batching in the case
1830 * where writepage marks some random page in the middle of the file
1831 * as urgent because of, say, memory pressure */
1832 if (!list_empty(&lop->lop_urgent)) {
1833 CDEBUG(D_CACHE, "urgent request forcing RPC\n");
1836 /* fire off rpcs when we have 'optimal' rpcs as tuned for the wire. */
1837 optimal = cli->cl_max_pages_per_rpc;
1838 if (cmd & OBD_BRW_WRITE) {
1839 /* trigger a write rpc stream as long as there are dirtiers
1840 * waiting for space. as they're waiting, they're not going to
1841 * create more pages to coallesce with what's waiting.. */
1842 if (!list_empty(&cli->cl_cache_waiters)) {
1843 CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
1846 /* +16 to avoid triggering rpcs that would want to include pages
1847 * that are being queued but which can't be made ready until
1848 * the queuer finishes with the page. this is a wart for
1849 * llite::commit_write() */
1852 if (lop->lop_num_pending >= optimal)
1858 static void on_list(struct list_head *item, struct list_head *list,
1861 if (list_empty(item) && should_be_on)
1862 list_add_tail(item, list);
1863 else if (!list_empty(item) && !should_be_on)
1864 list_del_init(item);
1867 /* maintain the loi's cli list membership invariants so that osc_send_oap_rpc
1868 * can find pages to build into rpcs quickly */
1869 static void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi)
1871 on_list(&loi->loi_cli_item, &cli->cl_loi_ready_list,
1872 lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE) ||
1873 lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ));
1875 on_list(&loi->loi_write_item, &cli->cl_loi_write_list,
1876 loi->loi_write_lop.lop_num_pending);
1878 on_list(&loi->loi_read_item, &cli->cl_loi_read_list,
1879 loi->loi_read_lop.lop_num_pending);
1882 static void lop_update_pending(struct client_obd *cli,
1883 struct loi_oap_pages *lop, int cmd, int delta)
1885 lop->lop_num_pending += delta;
1886 if (cmd & OBD_BRW_WRITE)
1887 cli->cl_pending_w_pages += delta;
1889 cli->cl_pending_r_pages += delta;
1892 /* this is called when a sync waiter receives an interruption. Its job is to
1893 * get the caller woken as soon as possible. If its page hasn't been put in an
1894 * rpc yet it can dequeue immediately. Otherwise it has to mark the rpc as
1895 * desiring interruption which will forcefully complete the rpc once the rpc
1897 static void osc_occ_interrupted(struct oig_callback_context *occ)
1899 struct osc_async_page *oap;
1900 struct loi_oap_pages *lop;
1901 struct lov_oinfo *loi;
1904 /* XXX member_of() */
1905 oap = list_entry(occ, struct osc_async_page, oap_occ);
1907 client_obd_list_lock(&oap->oap_cli->cl_loi_list_lock);
1909 oap->oap_interrupted = 1;
1911 /* ok, it's been put in an rpc. only one oap gets a request reference */
1912 if (oap->oap_request != NULL) {
1913 ptlrpc_mark_interrupted(oap->oap_request);
1914 ptlrpcd_wake(oap->oap_request);
1918 /* we don't get interruption callbacks until osc_trigger_group_io()
1919 * has been called and put the sync oaps in the pending/urgent lists.*/
1920 if (!list_empty(&oap->oap_pending_item)) {
1921 list_del_init(&oap->oap_pending_item);
1922 list_del_init(&oap->oap_urgent_item);
1925 lop = (oap->oap_cmd & OBD_BRW_WRITE) ?
1926 &loi->loi_write_lop : &loi->loi_read_lop;
1927 lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, -1);
1928 loi_list_maint(oap->oap_cli, oap->oap_loi);
1930 oig_complete_one(oap->oap_oig, &oap->oap_occ, -EINTR);
1931 oap->oap_oig = NULL;
1935 client_obd_list_unlock(&oap->oap_cli->cl_loi_list_lock);
1938 /* this is trying to propogate async writeback errors back up to the
1939 * application. As an async write fails we record the error code for later if
1940 * the app does an fsync. As long as errors persist we force future rpcs to be
1941 * sync so that the app can get a sync error and break the cycle of queueing
1942 * pages for which writeback will fail. */
1943 static void osc_process_ar(struct osc_async_rc *ar, __u64 xid,
1950 ar->ar_force_sync = 1;
1951 ar->ar_min_xid = ptlrpc_sample_next_xid();
1956 if (ar->ar_force_sync && (xid >= ar->ar_min_xid))
1957 ar->ar_force_sync = 0;
1960 static void osc_oap_to_pending(struct osc_async_page *oap)
1962 struct loi_oap_pages *lop;
1964 if (oap->oap_cmd & OBD_BRW_WRITE)
1965 lop = &oap->oap_loi->loi_write_lop;
1967 lop = &oap->oap_loi->loi_read_lop;
1969 if (oap->oap_async_flags & ASYNC_URGENT)
1970 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
1971 list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
1972 lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, 1);
1975 /* this must be called holding the loi list lock to give coverage to exit_cache,
1976 * async_flag maintenance, and oap_request */
1977 static void osc_ap_completion(struct client_obd *cli, struct obdo *oa,
1978 struct osc_async_page *oap, int sent, int rc)
1983 if (oap->oap_request != NULL) {
1984 xid = ptlrpc_req_xid(oap->oap_request);
1985 ptlrpc_req_finished(oap->oap_request);
1986 oap->oap_request = NULL;
1989 oap->oap_async_flags = 0;
1990 oap->oap_interrupted = 0;
1992 if (oap->oap_cmd & OBD_BRW_WRITE) {
1993 osc_process_ar(&cli->cl_ar, xid, rc);
1994 osc_process_ar(&oap->oap_loi->loi_ar, xid, rc);
1997 if (rc == 0 && oa != NULL) {
1998 if (oa->o_valid & OBD_MD_FLBLOCKS)
1999 oap->oap_loi->loi_lvb.lvb_blocks = oa->o_blocks;
2000 if (oa->o_valid & OBD_MD_FLMTIME)
2001 oap->oap_loi->loi_lvb.lvb_mtime = oa->o_mtime;
2002 if (oa->o_valid & OBD_MD_FLATIME)
2003 oap->oap_loi->loi_lvb.lvb_atime = oa->o_atime;
2004 if (oa->o_valid & OBD_MD_FLCTIME)
2005 oap->oap_loi->loi_lvb.lvb_ctime = oa->o_ctime;
2009 osc_exit_cache(cli, oap, sent);
2010 oig_complete_one(oap->oap_oig, &oap->oap_occ, rc);
2011 oap->oap_oig = NULL;
2016 rc = oap->oap_caller_ops->ap_completion(oap->oap_caller_data,
2017 oap->oap_cmd, oa, rc);
2019 /* ll_ap_completion (from llite) drops PG_locked. so, a new
2020 * I/O on the page could start, but OSC calls it under lock
2021 * and thus we can add oap back to pending safely */
2023 /* upper layer wants to leave the page on pending queue */
2024 osc_oap_to_pending(oap);
2026 osc_exit_cache(cli, oap, sent);
2030 static int brw_interpret(struct ptlrpc_request *req, void *data, int rc)
2032 struct osc_brw_async_args *aa = data;
2033 struct client_obd *cli;
2036 rc = osc_brw_fini_request(req, rc);
2037 CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
2038 if (osc_recoverable_error(rc)) {
2039 rc = osc_brw_redo_request(req, aa);
2046 client_obd_list_lock(&cli->cl_loi_list_lock);
2048 /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
2049 * is called so we know whether to go to sync BRWs or wait for more
2050 * RPCs to complete */
2051 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
2052 cli->cl_w_in_flight--;
2054 cli->cl_r_in_flight--;
2056 if (!list_empty(&aa->aa_oaps)) { /* from osc_send_oap_rpc() */
2057 struct osc_async_page *oap, *tmp;
2058 /* the caller may re-use the oap after the completion call so
2059 * we need to clean it up a little */
2060 list_for_each_entry_safe(oap, tmp, &aa->aa_oaps, oap_rpc_item) {
2061 list_del_init(&oap->oap_rpc_item);
2062 osc_ap_completion(cli, aa->aa_oa, oap, 1, rc);
2064 OBDO_FREE(aa->aa_oa);
2065 } else { /* from async_internal() */
2067 for (i = 0; i < aa->aa_page_count; i++)
2068 osc_release_write_grant(aa->aa_cli, aa->aa_ppga[i], 1);
2070 osc_wake_cache_waiters(cli);
2071 osc_check_rpcs(cli);
2072 client_obd_list_unlock(&cli->cl_loi_list_lock);
2074 osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
2078 static struct ptlrpc_request *osc_build_req(struct client_obd *cli,
2079 struct list_head *rpc_list,
2080 int page_count, int cmd)
2082 struct ptlrpc_request *req;
2083 struct brw_page **pga = NULL;
2084 struct osc_brw_async_args *aa;
2085 struct obdo *oa = NULL;
2086 struct obd_async_page_ops *ops = NULL;
2087 void *caller_data = NULL;
2088 struct obd_capa *ocapa;
2089 struct osc_async_page *oap;
2093 LASSERT(!list_empty(rpc_list));
2095 OBD_ALLOC(pga, sizeof(*pga) * page_count);
2097 RETURN(ERR_PTR(-ENOMEM));
2101 GOTO(out, req = ERR_PTR(-ENOMEM));
2104 list_for_each_entry(oap, rpc_list, oap_rpc_item) {
2106 ops = oap->oap_caller_ops;
2107 caller_data = oap->oap_caller_data;
2109 pga[i] = &oap->oap_brw_page;
2110 pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
2111 CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
2112 pga[i]->pg, cfs_page_index(oap->oap_page), oap, pga[i]->flag);
2116 /* always get the data for the obdo for the rpc */
2117 LASSERT(ops != NULL);
2118 ops->ap_fill_obdo(caller_data, cmd, oa);
2119 ocapa = ops->ap_lookup_capa(caller_data, cmd);
2121 sort_brw_pages(pga, page_count);
2122 rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
2126 CERROR("prep_req failed: %d\n", rc);
2127 GOTO(out, req = ERR_PTR(rc));
2130 /* Need to update the timestamps after the request is built in case
2131 * we race with setattr (locally or in queue at OST). If OST gets
2132 * later setattr before earlier BRW (as determined by the request xid),
2133 * the OST will not use BRW timestamps. Sadly, there is no obvious
2134 * way to do this in a single call. bug 10150 */
2135 ops->ap_update_obdo(caller_data, cmd, oa,
2136 OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME);
2138 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2139 aa = (struct osc_brw_async_args *)&req->rq_async_args;
2140 CFS_INIT_LIST_HEAD(&aa->aa_oaps);
2141 list_splice(rpc_list, &aa->aa_oaps);
2142 CFS_INIT_LIST_HEAD(rpc_list);
2149 OBD_FREE(pga, sizeof(*pga) * page_count);
2154 /* the loi lock is held across this function but it's allowed to release
2155 * and reacquire it during its work */
2157 * prepare pages for ASYNC io and put pages in send queue.
2161 * \param cmd - OBD_BRW_* macroses
2162 * \param lop - pending pages
2164 * \return zero if pages successfully add to send queue.
2165 * \return not zere if error occurring.
2167 static int osc_send_oap_rpc(struct client_obd *cli, struct lov_oinfo *loi,
2168 int cmd, struct loi_oap_pages *lop)
2170 struct ptlrpc_request *req;
2171 obd_count page_count = 0;
2172 struct osc_async_page *oap = NULL, *tmp;
2173 struct osc_brw_async_args *aa;
2174 struct obd_async_page_ops *ops;
2175 CFS_LIST_HEAD(rpc_list);
2176 unsigned int ending_offset;
2177 unsigned starting_offset = 0;
2181 /* first we find the pages we're allowed to work with */
2182 list_for_each_entry_safe(oap, tmp, &lop->lop_pending,
2184 ops = oap->oap_caller_ops;
2186 LASSERT(oap->oap_magic == OAP_MAGIC);
2188 if (page_count != 0 &&
2189 srvlock != !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK)) {
2190 CDEBUG(D_PAGE, "SRVLOCK flag mismatch,"
2191 " oap %p, page %p, srvlock %u\n",
2192 oap, oap->oap_brw_page.pg, (unsigned)!srvlock);
2195 /* in llite being 'ready' equates to the page being locked
2196 * until completion unlocks it. commit_write submits a page
2197 * as not ready because its unlock will happen unconditionally
2198 * as the call returns. if we race with commit_write giving
2199 * us that page we dont' want to create a hole in the page
2200 * stream, so we stop and leave the rpc to be fired by
2201 * another dirtier or kupdated interval (the not ready page
2202 * will still be on the dirty list). we could call in
2203 * at the end of ll_file_write to process the queue again. */
2204 if (!(oap->oap_async_flags & ASYNC_READY)) {
2205 int rc = ops->ap_make_ready(oap->oap_caller_data, cmd);
2207 CDEBUG(D_INODE, "oap %p page %p returned %d "
2208 "instead of ready\n", oap,
2212 /* llite is telling us that the page is still
2213 * in commit_write and that we should try
2214 * and put it in an rpc again later. we
2215 * break out of the loop so we don't create
2216 * a hole in the sequence of pages in the rpc
2221 /* the io isn't needed.. tell the checks
2222 * below to complete the rpc with EINTR */
2223 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
2224 oap->oap_count = -EINTR;
2227 oap->oap_async_flags |= ASYNC_READY;
2230 LASSERTF(0, "oap %p page %p returned %d "
2231 "from make_ready\n", oap,
2239 * Page submitted for IO has to be locked. Either by
2240 * ->ap_make_ready() or by higher layers.
2242 #if defined(__KERNEL__) && defined(__linux__)
2243 if(!(PageLocked(oap->oap_page) &&
2244 (CheckWriteback(oap->oap_page, cmd) || oap->oap_oig !=NULL))) {
2245 CDEBUG(D_PAGE, "page %p lost wb %lx/%x\n",
2246 oap->oap_page, (long)oap->oap_page->flags, oap->oap_async_flags);
2250 /* If there is a gap at the start of this page, it can't merge
2251 * with any previous page, so we'll hand the network a
2252 * "fragmented" page array that it can't transfer in 1 RDMA */
2253 if (page_count != 0 && oap->oap_page_off != 0)
2256 /* take the page out of our book-keeping */
2257 list_del_init(&oap->oap_pending_item);
2258 lop_update_pending(cli, lop, cmd, -1);
2259 list_del_init(&oap->oap_urgent_item);
2261 if (page_count == 0)
2262 starting_offset = (oap->oap_obj_off+oap->oap_page_off) &
2263 (PTLRPC_MAX_BRW_SIZE - 1);
2265 /* ask the caller for the size of the io as the rpc leaves. */
2266 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE))
2268 ops->ap_refresh_count(oap->oap_caller_data,cmd);
2269 if (oap->oap_count <= 0) {
2270 CDEBUG(D_CACHE, "oap %p count %d, completing\n", oap,
2272 osc_ap_completion(cli, NULL, oap, 0, oap->oap_count);
2276 /* now put the page back in our accounting */
2277 list_add_tail(&oap->oap_rpc_item, &rpc_list);
2278 if (page_count == 0)
2279 srvlock = !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK);
2280 if (++page_count >= cli->cl_max_pages_per_rpc)
2283 /* End on a PTLRPC_MAX_BRW_SIZE boundary. We want full-sized
2284 * RPCs aligned on PTLRPC_MAX_BRW_SIZE boundaries to help reads
2285 * have the same alignment as the initial writes that allocated
2286 * extents on the server. */
2287 ending_offset = (oap->oap_obj_off + oap->oap_page_off +
2288 oap->oap_count) & (PTLRPC_MAX_BRW_SIZE - 1);
2289 if (ending_offset == 0)
2292 /* If there is a gap at the end of this page, it can't merge
2293 * with any subsequent pages, so we'll hand the network a
2294 * "fragmented" page array that it can't transfer in 1 RDMA */
2295 if (oap->oap_page_off + oap->oap_count < CFS_PAGE_SIZE)
2299 osc_wake_cache_waiters(cli);
2301 if (page_count == 0)
2304 loi_list_maint(cli, loi);
2306 client_obd_list_unlock(&cli->cl_loi_list_lock);
2308 req = osc_build_req(cli, &rpc_list, page_count, cmd);
2310 /* this should happen rarely and is pretty bad, it makes the
2311 * pending list not follow the dirty order */
2312 client_obd_list_lock(&cli->cl_loi_list_lock);
2313 list_for_each_entry_safe(oap, tmp, &rpc_list, oap_rpc_item) {
2314 list_del_init(&oap->oap_rpc_item);
2316 /* queued sync pages can be torn down while the pages
2317 * were between the pending list and the rpc */
2318 if (oap->oap_interrupted) {
2319 CDEBUG(D_INODE, "oap %p interrupted\n", oap);
2320 osc_ap_completion(cli, NULL, oap, 0,
2324 osc_ap_completion(cli, NULL, oap, 0, PTR_ERR(req));
2326 loi_list_maint(cli, loi);
2327 RETURN(PTR_ERR(req));
2330 aa = (struct osc_brw_async_args *)&req->rq_async_args;
2332 if (cmd == OBD_BRW_READ) {
2333 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
2334 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
2335 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
2336 (starting_offset >> CFS_PAGE_SHIFT) + 1);
2337 ptlrpc_lprocfs_brw(req, OST_READ, aa->aa_requested_nob);
2339 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
2340 lprocfs_oh_tally(&cli->cl_write_rpc_hist,
2341 cli->cl_w_in_flight);
2342 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2343 (starting_offset >> CFS_PAGE_SHIFT) + 1);
2344 ptlrpc_lprocfs_brw(req, OST_WRITE, aa->aa_requested_nob);
2347 client_obd_list_lock(&cli->cl_loi_list_lock);
2349 if (cmd == OBD_BRW_READ)
2350 cli->cl_r_in_flight++;
2352 cli->cl_w_in_flight++;
2354 /* queued sync pages can be torn down while the pages
2355 * were between the pending list and the rpc */
2357 list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
2358 /* only one oap gets a request reference */
2361 if (oap->oap_interrupted && !req->rq_intr) {
2362 CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
2364 ptlrpc_mark_interrupted(req);
2368 tmp->oap_request = ptlrpc_request_addref(req);
2370 DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight",
2371 page_count, aa, cli->cl_r_in_flight, cli->cl_w_in_flight);
2373 req->rq_interpret_reply = brw_interpret;
2374 ptlrpcd_add_req(req);
2378 #define LOI_DEBUG(LOI, STR, args...) \
2379 CDEBUG(D_INODE, "loi ready %d wr %d:%d rd %d:%d " STR, \
2380 !list_empty(&(LOI)->loi_cli_item), \
2381 (LOI)->loi_write_lop.lop_num_pending, \
2382 !list_empty(&(LOI)->loi_write_lop.lop_urgent), \
2383 (LOI)->loi_read_lop.lop_num_pending, \
2384 !list_empty(&(LOI)->loi_read_lop.lop_urgent), \
2387 /* This is called by osc_check_rpcs() to find which objects have pages that
2388 * we could be sending. These lists are maintained by lop_makes_rpc(). */
2389 struct lov_oinfo *osc_next_loi(struct client_obd *cli)
2392 /* first return all objects which we already know to have
2393 * pages ready to be stuffed into rpcs */
2394 if (!list_empty(&cli->cl_loi_ready_list))
2395 RETURN(list_entry(cli->cl_loi_ready_list.next,
2396 struct lov_oinfo, loi_cli_item));
2398 /* then if we have cache waiters, return all objects with queued
2399 * writes. This is especially important when many small files
2400 * have filled up the cache and not been fired into rpcs because
2401 * they don't pass the nr_pending/object threshhold */
2402 if (!list_empty(&cli->cl_cache_waiters) &&
2403 !list_empty(&cli->cl_loi_write_list))
2404 RETURN(list_entry(cli->cl_loi_write_list.next,
2405 struct lov_oinfo, loi_write_item));
2407 /* then return all queued objects when we have an invalid import
2408 * so that they get flushed */
2409 if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
2410 if (!list_empty(&cli->cl_loi_write_list))
2411 RETURN(list_entry(cli->cl_loi_write_list.next,
2412 struct lov_oinfo, loi_write_item));
2413 if (!list_empty(&cli->cl_loi_read_list))
2414 RETURN(list_entry(cli->cl_loi_read_list.next,
2415 struct lov_oinfo, loi_read_item));
2420 /* called with the loi list lock held */
2421 static void osc_check_rpcs(struct client_obd *cli)
2423 struct lov_oinfo *loi;
2424 int rc = 0, race_counter = 0;
2427 while ((loi = osc_next_loi(cli)) != NULL) {
2428 LOI_DEBUG(loi, "%lu in flight\n", rpcs_in_flight(cli));
2430 if (rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight)
2433 /* attempt some read/write balancing by alternating between
2434 * reads and writes in an object. The makes_rpc checks here
2435 * would be redundant if we were getting read/write work items
2436 * instead of objects. we don't want send_oap_rpc to drain a
2437 * partial read pending queue when we're given this object to
2438 * do io on writes while there are cache waiters */
2439 if (lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) {
2440 rc = osc_send_oap_rpc(cli, loi, OBD_BRW_WRITE,
2441 &loi->loi_write_lop);
2449 if (lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)) {
2450 rc = osc_send_oap_rpc(cli, loi, OBD_BRW_READ,
2451 &loi->loi_read_lop);
2460 /* attempt some inter-object balancing by issueing rpcs
2461 * for each object in turn */
2462 if (!list_empty(&loi->loi_cli_item))
2463 list_del_init(&loi->loi_cli_item);
2464 if (!list_empty(&loi->loi_write_item))
2465 list_del_init(&loi->loi_write_item);
2466 if (!list_empty(&loi->loi_read_item))
2467 list_del_init(&loi->loi_read_item);
2469 loi_list_maint(cli, loi);
2471 /* send_oap_rpc fails with 0 when make_ready tells it to
2472 * back off. llite's make_ready does this when it tries
2473 * to lock a page queued for write that is already locked.
2474 * we want to try sending rpcs from many objects, but we
2475 * don't want to spin failing with 0. */
2476 if (race_counter == 10)
2482 /* we're trying to queue a page in the osc so we're subject to the
2483 * 'cl_dirty_max' limit on the number of pages that can be queued in the osc.
2484 * If the osc's queued pages are already at that limit, then we want to sleep
2485 * until there is space in the osc's queue for us. We also may be waiting for
2486 * write credits from the OST if there are RPCs in flight that may return some
2487 * before we fall back to sync writes.
2489 * We need this know our allocation was granted in the presence of signals */
2490 static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
2494 client_obd_list_lock(&cli->cl_loi_list_lock);
2495 rc = list_empty(&ocw->ocw_entry) || rpcs_in_flight(cli) == 0;
2496 client_obd_list_unlock(&cli->cl_loi_list_lock);
2500 /* Caller must hold loi_list_lock - we drop/regain it if we need to wait for
2501 * grant or cache space. */
2502 static int osc_enter_cache(struct client_obd *cli, struct lov_oinfo *loi,
2503 struct osc_async_page *oap)
2505 struct osc_cache_waiter ocw;
2506 struct l_wait_info lwi = { 0 };
2510 CDEBUG(D_CACHE, "dirty: %ld/%d dirty_max: %ld/%d dropped: %lu "
2511 "grant: %lu\n", cli->cl_dirty, atomic_read(&obd_dirty_pages),
2512 cli->cl_dirty_max, obd_max_dirty_pages,
2513 cli->cl_lost_grant, cli->cl_avail_grant);
2515 /* force the caller to try sync io. this can jump the list
2516 * of queued writes and create a discontiguous rpc stream */
2517 if (cli->cl_dirty_max < CFS_PAGE_SIZE || cli->cl_ar.ar_force_sync ||
2518 loi->loi_ar.ar_force_sync)
2521 /* Hopefully normal case - cache space and write credits available */
2522 if ((cli->cl_dirty + CFS_PAGE_SIZE <= cli->cl_dirty_max) &&
2523 (atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) &&
2524 (cli->cl_avail_grant >= CFS_PAGE_SIZE)) {
2525 /* account for ourselves */
2526 osc_consume_write_grant(cli, &oap->oap_brw_page);
2530 /* Make sure that there are write rpcs in flight to wait for. This
2531 * is a little silly as this object may not have any pending but
2532 * other objects sure might. */
2533 if (cli->cl_w_in_flight) {
2534 list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
2535 cfs_waitq_init(&ocw.ocw_waitq);
2539 loi_list_maint(cli, loi);
2540 osc_check_rpcs(cli);
2541 client_obd_list_unlock(&cli->cl_loi_list_lock);
2543 CDEBUG(D_CACHE, "sleeping for cache space\n");
2544 l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
2546 client_obd_list_lock(&cli->cl_loi_list_lock);
2547 if (!list_empty(&ocw.ocw_entry)) {
2548 list_del(&ocw.ocw_entry);
2558 * Checks if requested extent lock is compatible with a lock under the page.
2560 * Checks if the lock under \a page is compatible with a read or write lock
2561 * (specified by \a rw) for an extent [\a start , \a end].
2563 * \param exp osc export
2564 * \param lsm striping information for the file
2565 * \param res osc_async_page placeholder
2566 * \param rw OBD_BRW_READ if requested for reading,
2567 * OBD_BRW_WRITE if requested for writing
2568 * \param start start of the requested extent
2569 * \param end end of the requested extent
2570 * \param cookie transparent parameter for passing locking context
2572 * \post result == 1, *cookie == context, appropriate lock is referenced or
2575 * \retval 1 owned lock is reused for the request
2576 * \retval 0 no lock reused for the request
2578 * \see osc_release_short_lock
2580 static int osc_reget_short_lock(struct obd_export *exp,
2581 struct lov_stripe_md *lsm,
2583 obd_off start, obd_off end,
2586 struct osc_async_page *oap = *res;
2591 spin_lock(&oap->oap_lock);
2592 rc = ldlm_lock_fast_match(oap->oap_ldlm_lock, rw,
2593 start, end, cookie);
2594 spin_unlock(&oap->oap_lock);
2600 * Releases a reference to a lock taken in a "fast" way.
2602 * Releases a read or a write (specified by \a rw) lock
2603 * referenced by \a cookie.
2605 * \param exp osc export
2606 * \param lsm striping information for the file
2607 * \param end end of the locked extent
2608 * \param rw OBD_BRW_READ if requested for reading,
2609 * OBD_BRW_WRITE if requested for writing
2610 * \param cookie transparent parameter for passing locking context
2612 * \post appropriate lock is dereferenced
2614 * \see osc_reget_short_lock
2616 static int osc_release_short_lock(struct obd_export *exp,
2617 struct lov_stripe_md *lsm, obd_off end,
2618 void *cookie, int rw)
2621 ldlm_lock_fast_release(cookie, rw);
2622 /* no error could have happened at this layer */
2626 int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
2627 struct lov_oinfo *loi, cfs_page_t *page,
2628 obd_off offset, struct obd_async_page_ops *ops,
2629 void *data, void **res, int nocache,
2630 struct lustre_handle *lockh)
2632 struct osc_async_page *oap;
2633 struct ldlm_res_id oid = {{0}};
2638 return size_round(sizeof(*oap));
2641 oap->oap_magic = OAP_MAGIC;
2642 oap->oap_cli = &exp->exp_obd->u.cli;
2645 oap->oap_caller_ops = ops;
2646 oap->oap_caller_data = data;
2648 oap->oap_page = page;
2649 oap->oap_obj_off = offset;
2651 CFS_INIT_LIST_HEAD(&oap->oap_pending_item);
2652 CFS_INIT_LIST_HEAD(&oap->oap_urgent_item);
2653 CFS_INIT_LIST_HEAD(&oap->oap_rpc_item);
2654 CFS_INIT_LIST_HEAD(&oap->oap_page_list);
2656 oap->oap_occ.occ_interrupted = osc_occ_interrupted;
2658 spin_lock_init(&oap->oap_lock);
2660 /* If the page was marked as notcacheable - don't add to any locks */
2662 oid.name[0] = loi->loi_id;
2663 oid.name[2] = loi->loi_gr;
2664 /* This is the only place where we can call cache_add_extent
2665 without oap_lock, because this page is locked now, and
2666 the lock we are adding it to is referenced, so cannot lose
2667 any pages either. */
2668 rc = cache_add_extent(oap->oap_cli->cl_cache, &oid, oap, lockh);
2673 CDEBUG(D_CACHE, "oap %p page %p obj off "LPU64"\n", oap, page, offset);
2677 struct osc_async_page *oap_from_cookie(void *cookie)
2679 struct osc_async_page *oap = cookie;
2680 if (oap->oap_magic != OAP_MAGIC)
2681 return ERR_PTR(-EINVAL);
2685 static int osc_queue_async_io(struct obd_export *exp, struct lov_stripe_md *lsm,
2686 struct lov_oinfo *loi, void *cookie,
2687 int cmd, obd_off off, int count,
2688 obd_flag brw_flags, enum async_flags async_flags)
2690 struct client_obd *cli = &exp->exp_obd->u.cli;
2691 struct osc_async_page *oap;
2695 oap = oap_from_cookie(cookie);
2697 RETURN(PTR_ERR(oap));
2699 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
2702 if (!list_empty(&oap->oap_pending_item) ||
2703 !list_empty(&oap->oap_urgent_item) ||
2704 !list_empty(&oap->oap_rpc_item))
2707 /* check if the file's owner/group is over quota */
2708 #ifdef HAVE_QUOTA_SUPPORT
2709 if ((cmd & OBD_BRW_WRITE) && !(cmd & OBD_BRW_NOQUOTA)){
2710 struct obd_async_page_ops *ops;
2717 ops = oap->oap_caller_ops;
2718 ops->ap_fill_obdo(oap->oap_caller_data, cmd, oa);
2719 if (lquota_chkdq(quota_interface, cli, oa->o_uid, oa->o_gid) ==
2730 loi = lsm->lsm_oinfo[0];
2732 client_obd_list_lock(&cli->cl_loi_list_lock);
2735 oap->oap_page_off = off;
2736 oap->oap_count = count;
2737 oap->oap_brw_flags = brw_flags;
2738 oap->oap_async_flags = async_flags;
2740 if (cmd & OBD_BRW_WRITE) {
2741 rc = osc_enter_cache(cli, loi, oap);
2743 client_obd_list_unlock(&cli->cl_loi_list_lock);
2748 osc_oap_to_pending(oap);
2749 loi_list_maint(cli, loi);
2751 LOI_DEBUG(loi, "oap %p page %p added for cmd %d\n", oap, oap->oap_page,
2754 osc_check_rpcs(cli);
2755 client_obd_list_unlock(&cli->cl_loi_list_lock);
2760 /* aka (~was & now & flag), but this is more clear :) */
2761 #define SETTING(was, now, flag) (!(was & flag) && (now & flag))
2763 static int osc_set_async_flags(struct obd_export *exp,
2764 struct lov_stripe_md *lsm,
2765 struct lov_oinfo *loi, void *cookie,
2766 obd_flag async_flags)
2768 struct client_obd *cli = &exp->exp_obd->u.cli;
2769 struct loi_oap_pages *lop;
2770 struct osc_async_page *oap;
2774 oap = oap_from_cookie(cookie);
2776 RETURN(PTR_ERR(oap));
2779 * bug 7311: OST-side locking is only supported for liblustre for now
2780 * (and liblustre never calls obd_set_async_flags(). I hope.), generic
2781 * implementation has to handle case where OST-locked page was picked
2782 * up by, e.g., ->writepage().
2784 LASSERT(!(oap->oap_brw_flags & OBD_BRW_SRVLOCK));
2785 LASSERT(!LIBLUSTRE_CLIENT); /* check that liblustre angels do fear to
2788 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
2792 loi = lsm->lsm_oinfo[0];
2794 if (oap->oap_cmd & OBD_BRW_WRITE) {
2795 lop = &loi->loi_write_lop;
2797 lop = &loi->loi_read_lop;
2800 client_obd_list_lock(&cli->cl_loi_list_lock);
2802 if (list_empty(&oap->oap_pending_item))
2803 GOTO(out, rc = -EINVAL);
2805 if ((oap->oap_async_flags & async_flags) == async_flags)
2808 if (SETTING(oap->oap_async_flags, async_flags, ASYNC_READY))
2809 oap->oap_async_flags |= ASYNC_READY;
2811 if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT)) {
2812 if (list_empty(&oap->oap_rpc_item)) {
2813 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
2814 loi_list_maint(cli, loi);
2818 LOI_DEBUG(loi, "oap %p page %p has flags %x\n", oap, oap->oap_page,
2819 oap->oap_async_flags);
2821 osc_check_rpcs(cli);
2822 client_obd_list_unlock(&cli->cl_loi_list_lock);
2826 static int osc_queue_group_io(struct obd_export *exp, struct lov_stripe_md *lsm,
2827 struct lov_oinfo *loi,
2828 struct obd_io_group *oig, void *cookie,
2829 int cmd, obd_off off, int count,
2831 obd_flag async_flags)
2833 struct client_obd *cli = &exp->exp_obd->u.cli;
2834 struct osc_async_page *oap;
2835 struct loi_oap_pages *lop;
2839 oap = oap_from_cookie(cookie);
2841 RETURN(PTR_ERR(oap));
2843 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
2846 if (!list_empty(&oap->oap_pending_item) ||
2847 !list_empty(&oap->oap_urgent_item) ||
2848 !list_empty(&oap->oap_rpc_item))
2852 loi = lsm->lsm_oinfo[0];
2854 client_obd_list_lock(&cli->cl_loi_list_lock);
2857 oap->oap_page_off = off;
2858 oap->oap_count = count;
2859 oap->oap_brw_flags = brw_flags;
2860 oap->oap_async_flags = async_flags;
2862 if (cmd & OBD_BRW_WRITE)
2863 lop = &loi->loi_write_lop;
2865 lop = &loi->loi_read_lop;
2867 list_add_tail(&oap->oap_pending_item, &lop->lop_pending_group);
2868 if (oap->oap_async_flags & ASYNC_GROUP_SYNC) {
2870 rc = oig_add_one(oig, &oap->oap_occ);
2873 LOI_DEBUG(loi, "oap %p page %p on group pending: rc %d\n",
2874 oap, oap->oap_page, rc);
2876 client_obd_list_unlock(&cli->cl_loi_list_lock);
2881 static void osc_group_to_pending(struct client_obd *cli, struct lov_oinfo *loi,
2882 struct loi_oap_pages *lop, int cmd)
2884 struct list_head *pos, *tmp;
2885 struct osc_async_page *oap;
2887 list_for_each_safe(pos, tmp, &lop->lop_pending_group) {
2888 oap = list_entry(pos, struct osc_async_page, oap_pending_item);
2889 list_del(&oap->oap_pending_item);
2890 osc_oap_to_pending(oap);
2892 loi_list_maint(cli, loi);
2895 static int osc_trigger_group_io(struct obd_export *exp,
2896 struct lov_stripe_md *lsm,
2897 struct lov_oinfo *loi,
2898 struct obd_io_group *oig)
2900 struct client_obd *cli = &exp->exp_obd->u.cli;
2904 loi = lsm->lsm_oinfo[0];
2906 client_obd_list_lock(&cli->cl_loi_list_lock);
2908 osc_group_to_pending(cli, loi, &loi->loi_write_lop, OBD_BRW_WRITE);
2909 osc_group_to_pending(cli, loi, &loi->loi_read_lop, OBD_BRW_READ);
2911 osc_check_rpcs(cli);
2912 client_obd_list_unlock(&cli->cl_loi_list_lock);
2917 static int osc_teardown_async_page(struct obd_export *exp,
2918 struct lov_stripe_md *lsm,
2919 struct lov_oinfo *loi, void *cookie)
2921 struct client_obd *cli = &exp->exp_obd->u.cli;
2922 struct loi_oap_pages *lop;
2923 struct osc_async_page *oap;
2927 oap = oap_from_cookie(cookie);
2929 RETURN(PTR_ERR(oap));
2932 loi = lsm->lsm_oinfo[0];
2934 if (oap->oap_cmd & OBD_BRW_WRITE) {
2935 lop = &loi->loi_write_lop;
2937 lop = &loi->loi_read_lop;
2940 client_obd_list_lock(&cli->cl_loi_list_lock);
2942 if (!list_empty(&oap->oap_rpc_item))
2943 GOTO(out, rc = -EBUSY);
2945 osc_exit_cache(cli, oap, 0);
2946 osc_wake_cache_waiters(cli);
2948 if (!list_empty(&oap->oap_urgent_item)) {
2949 list_del_init(&oap->oap_urgent_item);
2950 oap->oap_async_flags &= ~ASYNC_URGENT;
2952 if (!list_empty(&oap->oap_pending_item)) {
2953 list_del_init(&oap->oap_pending_item);
2954 lop_update_pending(cli, lop, oap->oap_cmd, -1);
2956 loi_list_maint(cli, loi);
2957 cache_remove_extent(cli->cl_cache, oap);
2959 LOI_DEBUG(loi, "oap %p page %p torn down\n", oap, oap->oap_page);
2961 client_obd_list_unlock(&cli->cl_loi_list_lock);
2965 int osc_extent_blocking_cb(struct ldlm_lock *lock,
2966 struct ldlm_lock_desc *new, void *data,
2969 struct lustre_handle lockh = { 0 };
2973 if ((unsigned long)data > 0 && (unsigned long)data < 0x1000) {
2974 LDLM_ERROR(lock, "cancelling lock with bad data %p", data);
2979 case LDLM_CB_BLOCKING:
2980 ldlm_lock2handle(lock, &lockh);
2981 rc = ldlm_cli_cancel(&lockh);
2983 CERROR("ldlm_cli_cancel failed: %d\n", rc);
2985 case LDLM_CB_CANCELING: {
2987 ldlm_lock2handle(lock, &lockh);
2988 /* This lock wasn't granted, don't try to do anything */
2989 if (lock->l_req_mode != lock->l_granted_mode)
2992 cache_remove_lock(lock->l_conn_export->exp_obd->u.cli.cl_cache,
2995 if (lock->l_conn_export->exp_obd->u.cli.cl_ext_lock_cancel_cb)
2996 lock->l_conn_export->exp_obd->u.cli.cl_ext_lock_cancel_cb(
2997 lock, new, data,flag);
3006 EXPORT_SYMBOL(osc_extent_blocking_cb);
3008 static void osc_set_data_with_check(struct lustre_handle *lockh, void *data,
3011 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
3014 CERROR("lockh %p, data %p - client evicted?\n", lockh, data);
3017 lock_res_and_lock(lock);
3018 #if defined (__KERNEL__) && defined (__linux__)
3019 /* Liang XXX: Darwin and Winnt checking should be added */
3020 if (lock->l_ast_data && lock->l_ast_data != data) {
3021 struct inode *new_inode = data;
3022 struct inode *old_inode = lock->l_ast_data;
3023 if (!(old_inode->i_state & I_FREEING))
3024 LDLM_ERROR(lock, "inconsistent l_ast_data found");
3025 LASSERTF(old_inode->i_state & I_FREEING,
3026 "Found existing inode %p/%lu/%u state %lu in lock: "
3027 "setting data to %p/%lu/%u\n", old_inode,
3028 old_inode->i_ino, old_inode->i_generation,
3030 new_inode, new_inode->i_ino, new_inode->i_generation);
3033 lock->l_ast_data = data;
3034 lock->l_flags |= (flags & LDLM_FL_NO_LRU);
3035 unlock_res_and_lock(lock);
3036 LDLM_LOCK_PUT(lock);
3039 static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
3040 ldlm_iterator_t replace, void *data)
3042 struct ldlm_res_id res_id = { .name = {0} };
3043 struct obd_device *obd = class_exp2obd(exp);
3045 res_id.name[0] = lsm->lsm_object_id;
3046 res_id.name[2] = lsm->lsm_object_gr;
3048 ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
3052 static int osc_enqueue_fini(struct obd_device *obd, struct ptlrpc_request *req,
3053 struct obd_info *oinfo, int intent, int rc)
3058 /* The request was created before ldlm_cli_enqueue call. */
3059 if (rc == ELDLM_LOCK_ABORTED) {
3060 struct ldlm_reply *rep;
3061 rep = req_capsule_server_get(&req->rq_pill,
3064 LASSERT(rep != NULL);
3065 if (rep->lock_policy_res1)
3066 rc = rep->lock_policy_res1;
3070 if ((intent && rc == ELDLM_LOCK_ABORTED) || !rc) {
3071 CDEBUG(D_INODE,"got kms "LPU64" blocks "LPU64" mtime "LPU64"\n",
3072 oinfo->oi_md->lsm_oinfo[0]->loi_lvb.lvb_size,
3073 oinfo->oi_md->lsm_oinfo[0]->loi_lvb.lvb_blocks,
3074 oinfo->oi_md->lsm_oinfo[0]->loi_lvb.lvb_mtime);
3078 cache_add_lock(obd->u.cli.cl_cache, oinfo->oi_lockh);
3080 /* Call the update callback. */
3081 rc = oinfo->oi_cb_up(oinfo, rc);
3085 static int osc_enqueue_interpret(struct ptlrpc_request *req,
3086 struct osc_enqueue_args *aa, int rc)
3088 int intent = aa->oa_oi->oi_flags & LDLM_FL_HAS_INTENT;
3089 struct lov_stripe_md *lsm = aa->oa_oi->oi_md;
3090 struct ldlm_lock *lock;
3092 /* ldlm_cli_enqueue is holding a reference on the lock, so it must
3094 lock = ldlm_handle2lock(aa->oa_oi->oi_lockh);
3096 /* Complete obtaining the lock procedure. */
3097 rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1,
3099 &aa->oa_oi->oi_flags,
3100 &lsm->lsm_oinfo[0]->loi_lvb,
3101 sizeof(lsm->lsm_oinfo[0]->loi_lvb),
3102 lustre_swab_ost_lvb,
3103 aa->oa_oi->oi_lockh, rc);
3105 /* Complete osc stuff. */
3106 rc = osc_enqueue_fini(aa->oa_exp->exp_obd, req, aa->oa_oi, intent, rc);
3108 /* Release the lock for async request. */
3109 if (lustre_handle_is_used(aa->oa_oi->oi_lockh) && rc == ELDLM_OK)
3110 ldlm_lock_decref(aa->oa_oi->oi_lockh, aa->oa_ei->ei_mode);
3112 LASSERTF(lock != NULL, "lockh %p, req %p, aa %p - client evicted?\n",
3113 aa->oa_oi->oi_lockh, req, aa);
3114 LDLM_LOCK_PUT(lock);
3118 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
3119 * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
3120 * other synchronous requests, however keeping some locks and trying to obtain
3121 * others may take a considerable amount of time in a case of ost failure; and
3122 * when other sync requests do not get released lock from a client, the client
3123 * is excluded from the cluster -- such scenarious make the life difficult, so
3124 * release locks just after they are obtained. */
3125 static int osc_enqueue(struct obd_export *exp, struct obd_info *oinfo,
3126 struct ldlm_enqueue_info *einfo,
3127 struct ptlrpc_request_set *rqset)
3129 struct ldlm_res_id res_id = { .name = {0} };
3130 struct obd_device *obd = exp->exp_obd;
3131 struct ptlrpc_request *req = NULL;
3132 int intent = oinfo->oi_flags & LDLM_FL_HAS_INTENT;
3137 res_id.name[0] = oinfo->oi_md->lsm_object_id;
3138 res_id.name[2] = oinfo->oi_md->lsm_object_gr;
3140 /* Filesystem lock extents are extended to page boundaries so that
3141 * dealing with the page cache is a little smoother. */
3142 oinfo->oi_policy.l_extent.start -=
3143 oinfo->oi_policy.l_extent.start & ~CFS_PAGE_MASK;
3144 oinfo->oi_policy.l_extent.end |= ~CFS_PAGE_MASK;
3146 if (oinfo->oi_md->lsm_oinfo[0]->loi_kms_valid == 0)
3149 /* Next, search for already existing extent locks that will cover us */
3150 /* If we're trying to read, we also search for an existing PW lock. The
3151 * VFS and page cache already protect us locally, so lots of readers/
3152 * writers can share a single PW lock.
3154 * There are problems with conversion deadlocks, so instead of
3155 * converting a read lock to a write lock, we'll just enqueue a new
3158 * At some point we should cancel the read lock instead of making them
3159 * send us a blocking callback, but there are problems with canceling
3160 * locks out from other users right now, too. */
3161 mode = einfo->ei_mode;
3162 if (einfo->ei_mode == LCK_PR)
3164 mode = ldlm_lock_match(obd->obd_namespace,
3165 oinfo->oi_flags | LDLM_FL_LVB_READY, &res_id,
3166 einfo->ei_type, &oinfo->oi_policy, mode,
3169 /* addref the lock only if not async requests and PW lock is
3170 * matched whereas we asked for PR. */
3171 if (!rqset && einfo->ei_mode != mode)
3172 ldlm_lock_addref(oinfo->oi_lockh, LCK_PR);
3173 osc_set_data_with_check(oinfo->oi_lockh, einfo->ei_cbdata,
3176 /* I would like to be able to ASSERT here that rss <=
3177 * kms, but I can't, for reasons which are explained in
3181 /* We already have a lock, and it's referenced */
3182 oinfo->oi_cb_up(oinfo, ELDLM_OK);
3184 /* For async requests, decref the lock. */
3185 if (einfo->ei_mode != mode)
3186 ldlm_lock_decref(oinfo->oi_lockh, LCK_PW);
3188 ldlm_lock_decref(oinfo->oi_lockh, einfo->ei_mode);
3195 CFS_LIST_HEAD(cancels);
3196 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
3197 &RQF_LDLM_ENQUEUE_LVB);
3201 rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0);
3205 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
3206 sizeof(oinfo->oi_md->lsm_oinfo[0]->loi_lvb));
3207 ptlrpc_request_set_replen(req);
3210 /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
3211 oinfo->oi_flags &= ~LDLM_FL_BLOCK_GRANTED;
3213 rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id,
3214 &oinfo->oi_policy, &oinfo->oi_flags,
3215 &oinfo->oi_md->lsm_oinfo[0]->loi_lvb,
3216 sizeof(oinfo->oi_md->lsm_oinfo[0]->loi_lvb),
3217 lustre_swab_ost_lvb, oinfo->oi_lockh,
3221 struct osc_enqueue_args *aa;
3222 CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
3223 aa = (struct osc_enqueue_args *)&req->rq_async_args;
3228 req->rq_interpret_reply = osc_enqueue_interpret;
3229 ptlrpc_set_add_req(rqset, req);
3230 } else if (intent) {
3231 ptlrpc_req_finished(req);
3236 rc = osc_enqueue_fini(obd, req, oinfo, intent, rc);
3238 ptlrpc_req_finished(req);
3243 static int osc_match(struct obd_export *exp, struct lov_stripe_md *lsm,
3244 __u32 type, ldlm_policy_data_t *policy, __u32 mode,
3245 int *flags, void *data, struct lustre_handle *lockh)
3247 struct ldlm_res_id res_id = { .name = {0} };
3248 struct obd_device *obd = exp->exp_obd;
3249 int lflags = *flags;
3253 res_id.name[0] = lsm->lsm_object_id;
3254 res_id.name[2] = lsm->lsm_object_gr;
3256 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
3259 /* Filesystem lock extents are extended to page boundaries so that
3260 * dealing with the page cache is a little smoother */
3261 policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
3262 policy->l_extent.end |= ~CFS_PAGE_MASK;
3264 /* Next, search for already existing extent locks that will cover us */
3265 /* If we're trying to read, we also search for an existing PW lock. The
3266 * VFS and page cache already protect us locally, so lots of readers/
3267 * writers can share a single PW lock. */
3271 rc = ldlm_lock_match(obd->obd_namespace, lflags | LDLM_FL_LVB_READY,
3272 &res_id, type, policy, rc, lockh);
3274 osc_set_data_with_check(lockh, data, lflags);
3275 if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) {
3276 ldlm_lock_addref(lockh, LCK_PR);
3277 ldlm_lock_decref(lockh, LCK_PW);
3284 static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
3285 __u32 mode, struct lustre_handle *lockh)
3289 if (unlikely(mode == LCK_GROUP))
3290 ldlm_lock_decref_and_cancel(lockh, mode);
3292 ldlm_lock_decref(lockh, mode);
3297 static int osc_cancel_unused(struct obd_export *exp,
3298 struct lov_stripe_md *lsm, int flags,
3301 struct obd_device *obd = class_exp2obd(exp);
3302 struct ldlm_res_id res_id = { .name = {0} }, *resp = NULL;
3305 res_id.name[0] = lsm->lsm_object_id;
3306 res_id.name[2] = lsm->lsm_object_gr;
3310 return ldlm_cli_cancel_unused(obd->obd_namespace, resp, flags, opaque);
3313 static int osc_join_lru(struct obd_export *exp,
3314 struct lov_stripe_md *lsm, int join)
3316 struct obd_device *obd = class_exp2obd(exp);
3317 struct ldlm_res_id res_id = { .name = {0} }, *resp = NULL;
3320 res_id.name[0] = lsm->lsm_object_id;
3321 res_id.name[2] = lsm->lsm_object_gr;
3325 return ldlm_cli_join_lru(obd->obd_namespace, resp, join);
3328 static int osc_statfs_interpret(struct ptlrpc_request *req,
3329 struct osc_async_args *aa, int rc)
3331 struct obd_statfs *msfs;
3337 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3339 GOTO(out, rc = -EPROTO);
3342 *aa->aa_oi->oi_osfs = *msfs;
3344 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
3348 static int osc_statfs_async(struct obd_device *obd, struct obd_info *oinfo,
3349 __u64 max_age, struct ptlrpc_request_set *rqset)
3351 struct ptlrpc_request *req;
3352 struct osc_async_args *aa;
3356 /* We could possibly pass max_age in the request (as an absolute
3357 * timestamp or a "seconds.usec ago") so the target can avoid doing
3358 * extra calls into the filesystem if that isn't necessary (e.g.
3359 * during mount that would help a bit). Having relative timestamps
3360 * is not so great if request processing is slow, while absolute
3361 * timestamps are not ideal because they need time synchronization. */
3362 req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
3366 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3368 ptlrpc_request_free(req);
3371 ptlrpc_request_set_replen(req);
3372 req->rq_request_portal = OST_CREATE_PORTAL;
3373 ptlrpc_at_set_req_timeout(req);
3375 if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
3376 /* procfs requests not want stat in wait for avoid deadlock */
3377 req->rq_no_resend = 1;
3378 req->rq_no_delay = 1;
3381 req->rq_interpret_reply = osc_statfs_interpret;
3382 CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
3383 aa = (struct osc_async_args *)&req->rq_async_args;
3386 ptlrpc_set_add_req(rqset, req);
3390 static int osc_statfs(struct obd_device *obd, struct obd_statfs *osfs,
3391 __u64 max_age, __u32 flags)
3393 struct obd_statfs *msfs;
3394 struct ptlrpc_request *req;
3395 struct obd_import *imp = NULL;
3399 /*Since the request might also come from lprocfs, so we need
3400 *sync this with client_disconnect_export Bug15684*/
3401 down_read(&obd->u.cli.cl_sem);
3402 if (obd->u.cli.cl_import)
3403 imp = class_import_get(obd->u.cli.cl_import);
3404 up_read(&obd->u.cli.cl_sem);
3408 /* We could possibly pass max_age in the request (as an absolute
3409 * timestamp or a "seconds.usec ago") so the target can avoid doing
3410 * extra calls into the filesystem if that isn't necessary (e.g.
3411 * during mount that would help a bit). Having relative timestamps
3412 * is not so great if request processing is slow, while absolute
3413 * timestamps are not ideal because they need time synchronization. */
3414 req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
3416 class_import_put(imp);
3421 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3423 ptlrpc_request_free(req);
3426 ptlrpc_request_set_replen(req);
3427 req->rq_request_portal = OST_CREATE_PORTAL;
3428 ptlrpc_at_set_req_timeout(req);
3430 if (flags & OBD_STATFS_NODELAY) {
3431 /* procfs requests not want stat in wait for avoid deadlock */
3432 req->rq_no_resend = 1;
3433 req->rq_no_delay = 1;
3436 rc = ptlrpc_queue_wait(req);
3440 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3442 GOTO(out, rc = -EPROTO);
3449 ptlrpc_req_finished(req);
3453 /* Retrieve object striping information.
3455 * @lmmu is a pointer to an in-core struct with lmm_ost_count indicating
3456 * the maximum number of OST indices which will fit in the user buffer.
3457 * lmm_magic must be LOV_MAGIC (we only use 1 slot here).
3459 static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
3461 struct lov_user_md lum, *lumk;
3462 int rc = 0, lum_size;
3468 if (copy_from_user(&lum, lump, sizeof(lum)))
3471 if (lum.lmm_magic != LOV_USER_MAGIC)
3474 if (lum.lmm_stripe_count > 0) {
3475 lum_size = sizeof(lum) + sizeof(lum.lmm_objects[0]);
3476 OBD_ALLOC(lumk, lum_size);
3480 lumk->lmm_objects[0].l_object_id = lsm->lsm_object_id;
3481 lumk->lmm_objects[0].l_object_gr = lsm->lsm_object_gr;
3483 lum_size = sizeof(lum);
3487 lumk->lmm_object_id = lsm->lsm_object_id;
3488 lumk->lmm_object_gr = lsm->lsm_object_gr;
3489 lumk->lmm_stripe_count = 1;
3491 if (copy_to_user(lump, lumk, lum_size))
3495 OBD_FREE(lumk, lum_size);
3501 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
3502 void *karg, void *uarg)
3504 struct obd_device *obd = exp->exp_obd;
3505 struct obd_ioctl_data *data = karg;
3509 if (!try_module_get(THIS_MODULE)) {
3510 CERROR("Can't get module. Is it alive?");
3514 case OBD_IOC_LOV_GET_CONFIG: {
3516 struct lov_desc *desc;
3517 struct obd_uuid uuid;
3521 if (obd_ioctl_getdata(&buf, &len, (void *)uarg))
3522 GOTO(out, err = -EINVAL);
3524 data = (struct obd_ioctl_data *)buf;
3526 if (sizeof(*desc) > data->ioc_inllen1) {
3527 obd_ioctl_freedata(buf, len);
3528 GOTO(out, err = -EINVAL);
3531 if (data->ioc_inllen2 < sizeof(uuid)) {
3532 obd_ioctl_freedata(buf, len);
3533 GOTO(out, err = -EINVAL);
3536 desc = (struct lov_desc *)data->ioc_inlbuf1;
3537 desc->ld_tgt_count = 1;
3538 desc->ld_active_tgt_count = 1;
3539 desc->ld_default_stripe_count = 1;
3540 desc->ld_default_stripe_size = 0;
3541 desc->ld_default_stripe_offset = 0;
3542 desc->ld_pattern = 0;
3543 memcpy(&desc->ld_uuid, &obd->obd_uuid, sizeof(uuid));
3545 memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
3547 err = copy_to_user((void *)uarg, buf, len);
3550 obd_ioctl_freedata(buf, len);
3553 case LL_IOC_LOV_SETSTRIPE:
3554 err = obd_alloc_memmd(exp, karg);
3558 case LL_IOC_LOV_GETSTRIPE:
3559 err = osc_getstripe(karg, uarg);
3561 case OBD_IOC_CLIENT_RECOVER:
3562 err = ptlrpc_recover_import(obd->u.cli.cl_import,
3567 case IOC_OSC_SET_ACTIVE:
3568 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
3571 case OBD_IOC_POLL_QUOTACHECK:
3572 err = lquota_poll_check(quota_interface, exp,
3573 (struct if_quotacheck *)karg);
3576 CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n",
3577 cmd, cfs_curproc_comm());
3578 GOTO(out, err = -ENOTTY);
3581 module_put(THIS_MODULE);
3585 static int osc_get_info(struct obd_export *exp, obd_count keylen,
3586 void *key, __u32 *vallen, void *val)
3589 if (!vallen || !val)
3592 if (KEY_IS(KEY_LOCK_TO_STRIPE)) {
3593 __u32 *stripe = val;
3594 *vallen = sizeof(*stripe);
3597 } else if (KEY_IS(KEY_LAST_ID)) {
3598 struct ptlrpc_request *req;
3603 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
3604 &RQF_OST_GET_INFO_LAST_ID);
3608 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
3609 RCL_CLIENT, keylen);
3610 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
3612 ptlrpc_request_free(req);
3616 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
3617 memcpy(tmp, key, keylen);
3619 ptlrpc_request_set_replen(req);
3620 rc = ptlrpc_queue_wait(req);
3624 reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID);
3626 GOTO(out, rc = -EPROTO);
3628 *((obd_id *)val) = *reply;
3630 ptlrpc_req_finished(req);
3636 static int osc_setinfo_mds_conn_interpret(struct ptlrpc_request *req,
3639 struct llog_ctxt *ctxt;
3640 struct obd_import *imp = req->rq_import;
3646 ctxt = llog_get_context(imp->imp_obd, LLOG_MDS_OST_ORIG_CTXT);
3649 rc = llog_initiator_connect(ctxt);
3651 CERROR("cannot establish connection for "
3652 "ctxt %p: %d\n", ctxt, rc);
3655 llog_ctxt_put(ctxt);
3656 spin_lock(&imp->imp_lock);
3657 imp->imp_server_timeout = 1;
3658 imp->imp_pingable = 1;
3659 spin_unlock(&imp->imp_lock);
3660 CDEBUG(D_RPCTRACE, "pinging OST %s\n", obd2cli_tgt(imp->imp_obd));
3665 static int osc_set_info_async(struct obd_export *exp, obd_count keylen,
3666 void *key, obd_count vallen, void *val,
3667 struct ptlrpc_request_set *set)
3669 struct ptlrpc_request *req;
3670 struct obd_device *obd = exp->exp_obd;
3671 struct obd_import *imp = class_exp2cliimp(exp);
3676 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
3678 if (KEY_IS(KEY_NEXT_ID)) {
3679 if (vallen != sizeof(obd_id))
3683 obd->u.cli.cl_oscc.oscc_next_id = *((obd_id*)val) + 1;
3684 CDEBUG(D_HA, "%s: set oscc_next_id = "LPU64"\n",
3685 exp->exp_obd->obd_name,
3686 obd->u.cli.cl_oscc.oscc_next_id);
3691 if (KEY_IS(KEY_UNLINKED)) {
3692 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3693 spin_lock(&oscc->oscc_lock);
3694 oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
3695 spin_unlock(&oscc->oscc_lock);
3699 if (KEY_IS(KEY_INIT_RECOV)) {
3700 if (vallen != sizeof(int))
3702 spin_lock(&imp->imp_lock);
3703 imp->imp_initial_recov = *(int *)val;
3704 spin_unlock(&imp->imp_lock);
3705 CDEBUG(D_HA, "%s: set imp_initial_recov = %d\n",
3706 exp->exp_obd->obd_name,
3707 imp->imp_initial_recov);
3711 if (KEY_IS(KEY_CHECKSUM)) {
3712 if (vallen != sizeof(int))
3714 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
3718 if (KEY_IS(KEY_FLUSH_CTX)) {
3719 sptlrpc_import_flush_my_ctx(imp);
3726 /* We pass all other commands directly to OST. Since nobody calls osc
3727 methods directly and everybody is supposed to go through LOV, we
3728 assume lov checked invalid values for us.
3729 The only recognised values so far are evict_by_nid and mds_conn.
3730 Even if something bad goes through, we'd get a -EINVAL from OST
3734 req = ptlrpc_request_alloc(imp, &RQF_OST_SET_INFO);
3738 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
3739 RCL_CLIENT, keylen);
3740 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
3741 RCL_CLIENT, vallen);
3742 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
3744 ptlrpc_request_free(req);
3748 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
3749 memcpy(tmp, key, keylen);
3750 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
3751 memcpy(tmp, val, vallen);
3753 if (KEY_IS(KEY_MDS_CONN)) {
3754 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3756 oscc->oscc_oa.o_gr = (*(__u32 *)val);
3757 oscc->oscc_oa.o_valid |= OBD_MD_FLGROUP;
3758 LASSERT(oscc->oscc_oa.o_gr > 0);
3759 req->rq_interpret_reply = osc_setinfo_mds_conn_interpret;
3762 ptlrpc_request_set_replen(req);
3763 ptlrpc_set_add_req(set, req);
3764 ptlrpc_check_set(set);
3770 static struct llog_operations osc_size_repl_logops = {
3771 lop_cancel: llog_obd_repl_cancel
3774 static struct llog_operations osc_mds_ost_orig_logops;
3775 static int osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
3776 struct obd_device *tgt, int count,
3777 struct llog_catid *catid, struct obd_uuid *uuid)
3782 LASSERT(olg == &obd->obd_olg);
3783 spin_lock(&obd->obd_dev_lock);
3784 if (osc_mds_ost_orig_logops.lop_setup != llog_obd_origin_setup) {
3785 osc_mds_ost_orig_logops = llog_lvfs_ops;
3786 osc_mds_ost_orig_logops.lop_setup = llog_obd_origin_setup;
3787 osc_mds_ost_orig_logops.lop_cleanup = llog_obd_origin_cleanup;
3788 osc_mds_ost_orig_logops.lop_add = llog_obd_origin_add;
3789 osc_mds_ost_orig_logops.lop_connect = llog_origin_connect;
3791 spin_unlock(&obd->obd_dev_lock);
3793 rc = llog_setup(obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, tgt, count,
3794 &catid->lci_logid, &osc_mds_ost_orig_logops);
3796 CERROR("failed LLOG_MDS_OST_ORIG_CTXT\n");
3800 rc = llog_setup(obd, &obd->obd_olg, LLOG_SIZE_REPL_CTXT, tgt, count,
3801 NULL, &osc_size_repl_logops);
3803 CERROR("failed LLOG_SIZE_REPL_CTXT\n");
3806 CERROR("osc '%s' tgt '%s' cnt %d catid %p rc=%d\n",
3807 obd->obd_name, tgt->obd_name, count, catid, rc);
3808 CERROR("logid "LPX64":0x%x\n",
3809 catid->lci_logid.lgl_oid, catid->lci_logid.lgl_ogen);
3814 static int osc_llog_finish(struct obd_device *obd, int count)
3816 struct llog_ctxt *ctxt;
3817 int rc = 0, rc2 = 0;
3820 ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
3822 rc = llog_cleanup(ctxt);
3824 ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
3826 rc2 = llog_cleanup(ctxt);
3833 static int osc_reconnect(const struct lu_env *env,
3834 struct obd_export *exp, struct obd_device *obd,
3835 struct obd_uuid *cluuid,
3836 struct obd_connect_data *data)
3838 struct client_obd *cli = &obd->u.cli;
3840 if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
3843 client_obd_list_lock(&cli->cl_loi_list_lock);
3844 data->ocd_grant = cli->cl_avail_grant ?:
3845 2 * cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT;
3846 lost_grant = cli->cl_lost_grant;
3847 cli->cl_lost_grant = 0;
3848 client_obd_list_unlock(&cli->cl_loi_list_lock);
3850 CDEBUG(D_CACHE, "request ocd_grant: %d cl_avail_grant: %ld "
3851 "cl_lost_grant: %ld\n", data->ocd_grant,
3852 cli->cl_avail_grant, lost_grant);
3853 CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d"
3854 " ocd_grant: %d\n", data->ocd_connect_flags,
3855 data->ocd_version, data->ocd_grant);
3861 static int osc_disconnect(struct obd_export *exp)
3863 struct obd_device *obd = class_exp2obd(exp);
3864 struct llog_ctxt *ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
3867 if (obd->u.cli.cl_conn_count == 1)
3868 /* flush any remaining cancel messages out to the target */
3869 llog_sync(ctxt, exp);
3871 llog_ctxt_put(ctxt);
3873 rc = client_disconnect_export(exp);
3877 static int osc_import_event(struct obd_device *obd,
3878 struct obd_import *imp,
3879 enum obd_import_event event)
3881 struct client_obd *cli;
3885 LASSERT(imp->imp_obd == obd);
3888 case IMP_EVENT_DISCON: {
3889 /* Only do this on the MDS OSC's */
3890 if (imp->imp_server_timeout) {
3891 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3893 spin_lock(&oscc->oscc_lock);
3894 oscc->oscc_flags |= OSCC_FLAG_RECOVERING;
3895 spin_unlock(&oscc->oscc_lock);
3898 client_obd_list_lock(&cli->cl_loi_list_lock);
3899 cli->cl_avail_grant = 0;
3900 cli->cl_lost_grant = 0;
3901 client_obd_list_unlock(&cli->cl_loi_list_lock);
3904 case IMP_EVENT_INACTIVE: {
3905 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
3908 case IMP_EVENT_INVALIDATE: {
3909 struct ldlm_namespace *ns = obd->obd_namespace;
3913 client_obd_list_lock(&cli->cl_loi_list_lock);
3914 /* all pages go to failing rpcs due to the invalid import */
3915 osc_check_rpcs(cli);
3916 client_obd_list_unlock(&cli->cl_loi_list_lock);
3918 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3922 case IMP_EVENT_ACTIVE: {
3923 /* Only do this on the MDS OSC's */
3924 if (imp->imp_server_timeout) {
3925 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3927 spin_lock(&oscc->oscc_lock);
3928 oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
3929 spin_unlock(&oscc->oscc_lock);
3931 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
3934 case IMP_EVENT_OCD: {
3935 struct obd_connect_data *ocd = &imp->imp_connect_data;
3937 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
3938 osc_init_grant(&obd->u.cli, ocd);
3941 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
3942 imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
3944 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
3948 CERROR("Unknown import event %d\n", event);
3954 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
3960 rc = ptlrpcd_addref();
3964 rc = client_obd_setup(obd, lcfg);
3968 struct lprocfs_static_vars lvars = { 0 };
3969 struct client_obd *cli = &obd->u.cli;
3971 lprocfs_osc_init_vars(&lvars);
3972 if (lprocfs_obd_setup(obd, lvars.obd_vars) == 0) {
3973 lproc_osc_attach_seqstat(obd);
3974 sptlrpc_lprocfs_cliobd_attach(obd);
3975 ptlrpc_lprocfs_register_obd(obd);
3979 /* We need to allocate a few requests more, because
3980 brw_interpret tries to create new requests before freeing
3981 previous ones. Ideally we want to have 2x max_rpcs_in_flight
3982 reserved, but I afraid that might be too much wasted RAM
3983 in fact, so 2 is just my guess and still should work. */
3984 cli->cl_import->imp_rq_pool =
3985 ptlrpc_init_rq_pool(cli->cl_max_rpcs_in_flight + 2,
3987 ptlrpc_add_rqs_to_pool);
3988 cli->cl_cache = cache_create(obd);
3989 if (!cli->cl_cache) {
3998 static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
4004 case OBD_CLEANUP_EARLY: {
4005 struct obd_import *imp;
4006 imp = obd->u.cli.cl_import;
4007 CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
4008 /* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
4009 ptlrpc_deactivate_import(imp);
4010 spin_lock(&imp->imp_lock);
4011 imp->imp_pingable = 0;
4012 spin_unlock(&imp->imp_lock);
4015 case OBD_CLEANUP_EXPORTS: {
4016 /* If we set up but never connected, the
4017 client import will not have been cleaned. */
4018 if (obd->u.cli.cl_import) {
4019 struct obd_import *imp;
4020 imp = obd->u.cli.cl_import;
4021 CDEBUG(D_CONFIG, "%s: client import never connected\n",
4023 ptlrpc_invalidate_import(imp);
4024 ptlrpc_free_rq_pool(imp->imp_rq_pool);
4025 class_destroy_import(imp);
4026 obd->u.cli.cl_import = NULL;
4028 rc = obd_llog_finish(obd, 0);
4030 CERROR("failed to cleanup llogging subsystems\n");
4037 int osc_cleanup(struct obd_device *obd)
4039 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
4043 ptlrpc_lprocfs_unregister_obd(obd);
4044 lprocfs_obd_cleanup(obd);
4046 spin_lock(&oscc->oscc_lock);
4047 oscc->oscc_flags &= ~OSCC_FLAG_RECOVERING;
4048 oscc->oscc_flags |= OSCC_FLAG_EXITING;
4049 spin_unlock(&oscc->oscc_lock);
4051 /* free memory of osc quota cache */
4052 lquota_cleanup(quota_interface, obd);
4054 cache_destroy(obd->u.cli.cl_cache);
4055 rc = client_obd_cleanup(obd);
4061 static int osc_register_page_removal_cb(struct obd_export *exp,
4062 obd_page_removal_cb_t func,
4063 obd_pin_extent_cb pin_cb)
4065 return cache_add_extent_removal_cb(exp->exp_obd->u.cli.cl_cache, func,
4069 static int osc_unregister_page_removal_cb(struct obd_export *exp,
4070 obd_page_removal_cb_t func)
4072 return cache_del_extent_removal_cb(exp->exp_obd->u.cli.cl_cache, func);
4075 static int osc_register_lock_cancel_cb(struct obd_export *exp,
4076 obd_lock_cancel_cb cb)
4078 LASSERT(exp->exp_obd->u.cli.cl_ext_lock_cancel_cb == NULL);
4080 exp->exp_obd->u.cli.cl_ext_lock_cancel_cb = cb;
4084 static int osc_unregister_lock_cancel_cb(struct obd_export *exp,
4085 obd_lock_cancel_cb cb)
4087 if (exp->exp_obd->u.cli.cl_ext_lock_cancel_cb != cb) {
4088 CERROR("Unregistering cancel cb %p, while only %p was "
4090 exp->exp_obd->u.cli.cl_ext_lock_cancel_cb);
4094 exp->exp_obd->u.cli.cl_ext_lock_cancel_cb = NULL;
4098 static int osc_process_config(struct obd_device *obd, obd_count len, void *buf)
4100 struct lustre_cfg *lcfg = buf;
4101 struct lprocfs_static_vars lvars = { 0 };
4104 lprocfs_osc_init_vars(&lvars);
4106 switch (lcfg->lcfg_command) {
4107 case LCFG_SPTLRPC_CONF:
4108 rc = sptlrpc_cliobd_process_config(obd, lcfg);
4111 rc = class_process_proc_param(PARAM_OSC, lvars.obd_vars,
4119 struct obd_ops osc_obd_ops = {
4120 .o_owner = THIS_MODULE,
4121 .o_setup = osc_setup,
4122 .o_precleanup = osc_precleanup,
4123 .o_cleanup = osc_cleanup,
4124 .o_add_conn = client_import_add_conn,
4125 .o_del_conn = client_import_del_conn,
4126 .o_connect = client_connect_import,
4127 .o_reconnect = osc_reconnect,
4128 .o_disconnect = osc_disconnect,
4129 .o_statfs = osc_statfs,
4130 .o_statfs_async = osc_statfs_async,
4131 .o_packmd = osc_packmd,
4132 .o_unpackmd = osc_unpackmd,
4133 .o_precreate = osc_precreate,
4134 .o_create = osc_create,
4135 .o_destroy = osc_destroy,
4136 .o_getattr = osc_getattr,
4137 .o_getattr_async = osc_getattr_async,
4138 .o_setattr = osc_setattr,
4139 .o_setattr_async = osc_setattr_async,
4141 .o_brw_async = osc_brw_async,
4142 .o_prep_async_page = osc_prep_async_page,
4143 .o_reget_short_lock = osc_reget_short_lock,
4144 .o_release_short_lock = osc_release_short_lock,
4145 .o_queue_async_io = osc_queue_async_io,
4146 .o_set_async_flags = osc_set_async_flags,
4147 .o_queue_group_io = osc_queue_group_io,
4148 .o_trigger_group_io = osc_trigger_group_io,
4149 .o_teardown_async_page = osc_teardown_async_page,
4150 .o_punch = osc_punch,
4152 .o_enqueue = osc_enqueue,
4153 .o_match = osc_match,
4154 .o_change_cbdata = osc_change_cbdata,
4155 .o_cancel = osc_cancel,
4156 .o_cancel_unused = osc_cancel_unused,
4157 .o_join_lru = osc_join_lru,
4158 .o_iocontrol = osc_iocontrol,
4159 .o_get_info = osc_get_info,
4160 .o_set_info_async = osc_set_info_async,
4161 .o_import_event = osc_import_event,
4162 .o_llog_init = osc_llog_init,
4163 .o_llog_finish = osc_llog_finish,
4164 .o_process_config = osc_process_config,
4165 .o_register_page_removal_cb = osc_register_page_removal_cb,
4166 .o_unregister_page_removal_cb = osc_unregister_page_removal_cb,
4167 .o_register_lock_cancel_cb = osc_register_lock_cancel_cb,
4168 .o_unregister_lock_cancel_cb = osc_unregister_lock_cancel_cb,
4170 int __init osc_init(void)
4172 struct lprocfs_static_vars lvars = { 0 };
4176 lprocfs_osc_init_vars(&lvars);
4178 request_module("lquota");
4179 quota_interface = PORTAL_SYMBOL_GET(osc_quota_interface);
4180 lquota_init(quota_interface);
4181 init_obd_quota_ops(quota_interface, &osc_obd_ops);
4183 rc = class_register_type(&osc_obd_ops, NULL, lvars.module_vars,
4184 LUSTRE_OSC_NAME, NULL);
4186 if (quota_interface)
4187 PORTAL_SYMBOL_PUT(osc_quota_interface);
4195 static void /*__exit*/ osc_exit(void)
4197 lquota_exit(quota_interface);
4198 if (quota_interface)
4199 PORTAL_SYMBOL_PUT(osc_quota_interface);
4201 class_unregister_type(LUSTRE_OSC_NAME);
4204 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
4205 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
4206 MODULE_LICENSE("GPL");
4208 cfs_module(osc, LUSTRE_VERSION_STRING, osc_init, osc_exit);