1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001-2003 Cluster File Systems, Inc.
5 * Author Peter Braam <braam@clusterfs.com>
7 * This file is part of the Lustre file system, http://www.lustre.org
8 * Lustre is a trademark of Cluster File Systems, Inc.
10 * You may have signed or agreed to another license before downloading
11 * this software. If so, you are bound by the terms and conditions
12 * of that agreement, and the following does not apply to you. See the
13 * LICENSE file included with this distribution for more information.
15 * If you did not agree to a different license, then this copy of Lustre
16 * is open source software; you can redistribute it and/or modify it
17 * under the terms of version 2 of the GNU General Public License as
18 * published by the Free Software Foundation.
20 * In either case, Lustre is distributed in the hope that it will be
21 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
22 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * license text for more details.
25 * For testing and management it is treated as an obd_device,
26 * although * it does not export a full OBD method table (the
27 * requests are coming * in over the wire, so object target modules
28 * do not have a full * method table.)
33 # define EXPORT_SYMTAB
35 #define DEBUG_SUBSYSTEM S_OSC
37 #include <libcfs/libcfs.h>
40 # include <liblustre.h>
43 #include <lustre_dlm.h>
44 #include <lustre_net.h>
45 #include <lustre/lustre_user.h>
46 #include <obd_cksum.h>
54 #include <lustre_ha.h>
55 #include <lprocfs_status.h>
56 #include <lustre_log.h>
57 #include <lustre_debug.h>
58 #include <lustre_param.h>
59 #include <lustre_cache.h>
60 #include "osc_internal.h"
62 static quota_interface_t *quota_interface = NULL;
63 extern quota_interface_t osc_quota_interface;
65 static void osc_release_ppga(struct brw_page **ppga, obd_count count);
66 static int brw_interpret(struct ptlrpc_request *request, void *data, int rc);
67 int osc_cleanup(struct obd_device *obd);
69 /* Pack OSC object metadata for disk storage (LE byte order). */
70 static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
71 struct lov_stripe_md *lsm)
76 lmm_size = sizeof(**lmmp);
81 OBD_FREE(*lmmp, lmm_size);
87 OBD_ALLOC(*lmmp, lmm_size);
93 LASSERT(lsm->lsm_object_id);
94 LASSERT(lsm->lsm_object_gr);
95 (*lmmp)->lmm_object_id = cpu_to_le64(lsm->lsm_object_id);
96 (*lmmp)->lmm_object_gr = cpu_to_le64(lsm->lsm_object_gr);
102 /* Unpack OSC object metadata from disk storage (LE byte order). */
103 static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
104 struct lov_mds_md *lmm, int lmm_bytes)
110 if (lmm_bytes < sizeof (*lmm)) {
111 CERROR("lov_mds_md too small: %d, need %d\n",
112 lmm_bytes, (int)sizeof(*lmm));
115 /* XXX LOV_MAGIC etc check? */
117 if (lmm->lmm_object_id == 0) {
118 CERROR("lov_mds_md: zero lmm_object_id\n");
123 lsm_size = lov_stripe_md_size(1);
127 if (*lsmp != NULL && lmm == NULL) {
128 OBD_FREE((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
129 OBD_FREE(*lsmp, lsm_size);
135 OBD_ALLOC(*lsmp, lsm_size);
138 OBD_ALLOC((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
139 if ((*lsmp)->lsm_oinfo[0] == NULL) {
140 OBD_FREE(*lsmp, lsm_size);
143 loi_init((*lsmp)->lsm_oinfo[0]);
147 /* XXX zero *lsmp? */
148 (*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id);
149 (*lsmp)->lsm_object_gr = le64_to_cpu (lmm->lmm_object_gr);
150 LASSERT((*lsmp)->lsm_object_id);
151 LASSERT((*lsmp)->lsm_object_gr);
154 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
159 static inline void osc_pack_capa(struct ptlrpc_request *req,
160 struct ost_body *body, void *capa)
162 struct obd_capa *oc = (struct obd_capa *)capa;
163 struct lustre_capa *c;
168 c = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1);
171 body->oa.o_valid |= OBD_MD_FLOSSCAPA;
172 DEBUG_CAPA(D_SEC, c, "pack");
175 static inline void osc_pack_req_body(struct ptlrpc_request *req,
176 struct obd_info *oinfo)
178 struct ost_body *body;
180 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
183 body->oa = *oinfo->oi_oa;
184 osc_pack_capa(req, body, oinfo->oi_capa);
187 static inline void osc_set_capa_size(struct ptlrpc_request *req,
188 const struct req_msg_field *field,
192 req_capsule_set_size(&req->rq_pill, field, RCL_CLIENT, 0);
194 /* it is already calculated as sizeof struct obd_capa */
198 static int osc_getattr_interpret(struct ptlrpc_request *req,
199 struct osc_async_args *aa, int rc)
201 struct ost_body *body;
207 body = lustre_swab_repbuf(req, REPLY_REC_OFF, sizeof(*body),
208 lustre_swab_ost_body);
210 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
211 memcpy(aa->aa_oi->oi_oa, &body->oa, sizeof(*aa->aa_oi->oi_oa));
213 /* This should really be sent by the OST */
214 aa->aa_oi->oi_oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
215 aa->aa_oi->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
217 CDEBUG(D_INFO, "can't unpack ost_body\n");
219 aa->aa_oi->oi_oa->o_valid = 0;
222 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
226 static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
227 struct ptlrpc_request_set *set)
229 struct ptlrpc_request *req;
230 struct osc_async_args *aa;
234 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
238 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
239 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
241 ptlrpc_request_free(req);
245 osc_pack_req_body(req, oinfo);
247 ptlrpc_request_set_replen(req);
248 req->rq_interpret_reply = osc_getattr_interpret;
250 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
251 aa = (struct osc_async_args *)&req->rq_async_args;
254 ptlrpc_set_add_req(set, req);
258 static int osc_getattr(struct obd_export *exp, struct obd_info *oinfo)
260 struct ptlrpc_request *req;
261 struct ost_body *body;
265 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
269 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
270 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
272 ptlrpc_request_free(req);
276 osc_pack_req_body(req, oinfo);
278 ptlrpc_request_set_replen(req);
280 rc = ptlrpc_queue_wait(req);
284 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
286 GOTO(out, rc = -EPROTO);
288 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
289 *oinfo->oi_oa = body->oa;
291 /* This should really be sent by the OST */
292 oinfo->oi_oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
293 oinfo->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
297 ptlrpc_req_finished(req);
301 static int osc_setattr(struct obd_export *exp, struct obd_info *oinfo,
302 struct obd_trans_info *oti)
304 struct ptlrpc_request *req;
305 struct ost_body *body;
309 LASSERT(!(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP) ||
310 oinfo->oi_oa->o_gr > 0);
312 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
316 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
317 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
319 ptlrpc_request_free(req);
323 osc_pack_req_body(req, oinfo);
325 ptlrpc_request_set_replen(req);
328 rc = ptlrpc_queue_wait(req);
332 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
334 GOTO(out, rc = -EPROTO);
336 *oinfo->oi_oa = body->oa;
340 ptlrpc_req_finished(req);
344 static int osc_setattr_interpret(struct ptlrpc_request *req,
345 struct osc_async_args *aa, int rc)
347 struct ost_body *body;
353 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
355 GOTO(out, rc = -EPROTO);
357 *aa->aa_oi->oi_oa = body->oa;
359 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
363 static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
364 struct obd_trans_info *oti,
365 struct ptlrpc_request_set *rqset)
367 struct ptlrpc_request *req;
368 struct osc_async_args *aa;
372 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
376 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
377 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
379 ptlrpc_request_free(req);
383 osc_pack_req_body(req, oinfo);
385 ptlrpc_request_set_replen(req);
387 if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) {
389 *obdo_logcookie(oinfo->oi_oa) = *oti->oti_logcookies;
392 /* do mds to ost setattr asynchronouly */
394 /* Do not wait for response. */
395 ptlrpcd_add_req(req);
397 req->rq_interpret_reply = osc_setattr_interpret;
399 CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
400 aa = (struct osc_async_args *)&req->rq_async_args;
403 ptlrpc_set_add_req(rqset, req);
409 int osc_real_create(struct obd_export *exp, struct obdo *oa,
410 struct lov_stripe_md **ea, struct obd_trans_info *oti)
412 struct ptlrpc_request *req;
413 struct ost_body *body;
414 struct lov_stripe_md *lsm;
423 rc = obd_alloc_memmd(exp, &lsm);
428 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
430 GOTO(out, rc = -ENOMEM);
432 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
434 ptlrpc_request_free(req);
438 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
442 ptlrpc_request_set_replen(req);
444 if (oa->o_valid & OBD_MD_FLINLINE) {
445 LASSERT((oa->o_valid & OBD_MD_FLFLAGS) &&
446 oa->o_flags == OBD_FL_DELORPHAN);
448 "delorphan from OST integration");
449 /* Don't resend the delorphan req */
450 req->rq_no_resend = req->rq_no_delay = 1;
453 rc = ptlrpc_queue_wait(req);
457 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
459 GOTO(out_req, rc = -EPROTO);
463 /* This should really be sent by the OST */
464 oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
465 oa->o_valid |= OBD_MD_FLBLKSZ;
467 /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
468 * have valid lsm_oinfo data structs, so don't go touching that.
469 * This needs to be fixed in a big way.
471 lsm->lsm_object_id = oa->o_id;
472 lsm->lsm_object_gr = oa->o_gr;
476 oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg);
478 if (oa->o_valid & OBD_MD_FLCOOKIE) {
479 if (!oti->oti_logcookies)
480 oti_alloc_cookies(oti, 1);
481 *oti->oti_logcookies = *obdo_logcookie(oa);
485 CDEBUG(D_HA, "transno: "LPD64"\n",
486 lustre_msg_get_transno(req->rq_repmsg));
488 ptlrpc_req_finished(req);
491 obd_free_memmd(exp, &lsm);
495 static int osc_punch_interpret(struct ptlrpc_request *req,
496 struct osc_async_args *aa, int rc)
498 struct ost_body *body;
504 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
506 GOTO(out, rc = -EPROTO);
508 *aa->aa_oi->oi_oa = body->oa;
510 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
514 static int osc_punch(struct obd_export *exp, struct obd_info *oinfo,
515 struct obd_trans_info *oti,
516 struct ptlrpc_request_set *rqset)
518 struct ptlrpc_request *req;
519 struct osc_async_args *aa;
520 struct ost_body *body;
525 CDEBUG(D_INFO, "oa NULL\n");
529 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH);
533 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
534 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
536 ptlrpc_request_free(req);
539 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
540 ptlrpc_at_set_req_timeout(req);
541 osc_pack_req_body(req, oinfo);
543 /* overload the size and blocks fields in the oa with start/end */
544 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
546 body->oa.o_size = oinfo->oi_policy.l_extent.start;
547 body->oa.o_blocks = oinfo->oi_policy.l_extent.end;
548 body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
549 ptlrpc_request_set_replen(req);
552 req->rq_interpret_reply = osc_punch_interpret;
553 CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
554 aa = (struct osc_async_args *)&req->rq_async_args;
556 ptlrpc_set_add_req(rqset, req);
561 static int osc_sync(struct obd_export *exp, struct obdo *oa,
562 struct lov_stripe_md *md, obd_size start, obd_size end,
565 struct ptlrpc_request *req;
566 struct ost_body *body;
571 CDEBUG(D_INFO, "oa NULL\n");
575 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
579 osc_set_capa_size(req, &RMF_CAPA1, capa);
580 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
582 ptlrpc_request_free(req);
586 /* overload the size and blocks fields in the oa with start/end */
587 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
590 body->oa.o_size = start;
591 body->oa.o_blocks = end;
592 body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
593 osc_pack_capa(req, body, capa);
595 ptlrpc_request_set_replen(req);
597 rc = ptlrpc_queue_wait(req);
601 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
603 GOTO(out, rc = -EPROTO);
609 ptlrpc_req_finished(req);
613 /* Find and cancel locally locks matched by @mode in the resource found by
614 * @objid. Found locks are added into @cancel list. Returns the amount of
615 * locks added to @cancels list. */
616 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
617 struct list_head *cancels, ldlm_mode_t mode,
620 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
621 struct ldlm_res_id res_id = { .name = { oa->o_id, 0, oa->o_gr, 0 } };
622 struct ldlm_resource *res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
629 count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
630 lock_flags, 0, NULL);
631 ldlm_resource_putref(res);
635 static int osc_destroy_interpret(struct ptlrpc_request *req, void *data,
638 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
640 atomic_dec(&cli->cl_destroy_in_flight);
641 cfs_waitq_signal(&cli->cl_destroy_waitq);
645 static int osc_can_send_destroy(struct client_obd *cli)
647 if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
648 cli->cl_max_rpcs_in_flight) {
649 /* The destroy request can be sent */
652 if (atomic_dec_return(&cli->cl_destroy_in_flight) <
653 cli->cl_max_rpcs_in_flight) {
655 * The counter has been modified between the two atomic
658 cfs_waitq_signal(&cli->cl_destroy_waitq);
663 /* Destroy requests can be async always on the client, and we don't even really
664 * care about the return code since the client cannot do anything at all about
666 * When the MDS is unlinking a filename, it saves the file objects into a
667 * recovery llog, and these object records are cancelled when the OST reports
668 * they were destroyed and sync'd to disk (i.e. transaction committed).
669 * If the client dies, or the OST is down when the object should be destroyed,
670 * the records are not cancelled, and when the OST reconnects to the MDS next,
671 * it will retrieve the llog unlink logs and then sends the log cancellation
672 * cookies to the MDS after committing destroy transactions. */
673 static int osc_destroy(struct obd_export *exp, struct obdo *oa,
674 struct lov_stripe_md *ea, struct obd_trans_info *oti,
675 struct obd_export *md_export)
677 struct client_obd *cli = &exp->exp_obd->u.cli;
678 struct ptlrpc_request *req;
679 struct ost_body *body;
680 CFS_LIST_HEAD(cancels);
685 CDEBUG(D_INFO, "oa NULL\n");
689 count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
690 LDLM_FL_DISCARD_DATA);
692 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
694 ldlm_lock_list_put(&cancels, l_bl_ast, count);
698 rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
701 ptlrpc_request_free(req);
705 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
706 req->rq_interpret_reply = osc_destroy_interpret;
707 ptlrpc_at_set_req_timeout(req);
709 if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE)
710 memcpy(obdo_logcookie(oa), oti->oti_logcookies,
711 sizeof(*oti->oti_logcookies));
712 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
716 ptlrpc_request_set_replen(req);
718 if (!osc_can_send_destroy(cli)) {
719 struct l_wait_info lwi = { 0 };
722 * Wait until the number of on-going destroy RPCs drops
723 * under max_rpc_in_flight
725 l_wait_event_exclusive(cli->cl_destroy_waitq,
726 osc_can_send_destroy(cli), &lwi);
729 /* Do not wait for response */
730 ptlrpcd_add_req(req);
734 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
737 obd_flag bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
739 LASSERT(!(oa->o_valid & bits));
742 client_obd_list_lock(&cli->cl_loi_list_lock);
743 oa->o_dirty = cli->cl_dirty;
744 if (cli->cl_dirty > cli->cl_dirty_max) {
745 CERROR("dirty %lu > dirty_max %lu\n",
746 cli->cl_dirty, cli->cl_dirty_max);
748 } else if (atomic_read(&obd_dirty_pages) > obd_max_dirty_pages) {
749 CERROR("dirty %d > system dirty_max %d\n",
750 atomic_read(&obd_dirty_pages), obd_max_dirty_pages);
752 } else if (cli->cl_dirty_max - cli->cl_dirty > 0x7fffffff) {
753 CERROR("dirty %lu - dirty_max %lu too big???\n",
754 cli->cl_dirty, cli->cl_dirty_max);
757 long max_in_flight = (cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT)*
758 (cli->cl_max_rpcs_in_flight + 1);
759 oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
761 oa->o_grant = cli->cl_avail_grant;
762 oa->o_dropped = cli->cl_lost_grant;
763 cli->cl_lost_grant = 0;
764 client_obd_list_unlock(&cli->cl_loi_list_lock);
765 CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n",
766 oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
769 /* caller must hold loi_list_lock */
770 static void osc_consume_write_grant(struct client_obd *cli,
771 struct brw_page *pga)
773 atomic_inc(&obd_dirty_pages);
774 cli->cl_dirty += CFS_PAGE_SIZE;
775 cli->cl_avail_grant -= CFS_PAGE_SIZE;
776 pga->flag |= OBD_BRW_FROM_GRANT;
777 CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
778 CFS_PAGE_SIZE, pga, pga->pg);
779 LASSERT(cli->cl_avail_grant >= 0);
782 /* the companion to osc_consume_write_grant, called when a brw has completed.
783 * must be called with the loi lock held. */
784 static void osc_release_write_grant(struct client_obd *cli,
785 struct brw_page *pga, int sent)
787 int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096;
790 if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
795 pga->flag &= ~OBD_BRW_FROM_GRANT;
796 atomic_dec(&obd_dirty_pages);
797 cli->cl_dirty -= CFS_PAGE_SIZE;
799 cli->cl_lost_grant += CFS_PAGE_SIZE;
800 CDEBUG(D_CACHE, "lost grant: %lu avail grant: %lu dirty: %lu\n",
801 cli->cl_lost_grant, cli->cl_avail_grant, cli->cl_dirty);
802 } else if (CFS_PAGE_SIZE != blocksize && pga->count != CFS_PAGE_SIZE) {
803 /* For short writes we shouldn't count parts of pages that
804 * span a whole block on the OST side, or our accounting goes
805 * wrong. Should match the code in filter_grant_check. */
806 int offset = pga->off & ~CFS_PAGE_MASK;
807 int count = pga->count + (offset & (blocksize - 1));
808 int end = (offset + pga->count) & (blocksize - 1);
810 count += blocksize - end;
812 cli->cl_lost_grant += CFS_PAGE_SIZE - count;
813 CDEBUG(D_CACHE, "lost %lu grant: %lu avail: %lu dirty: %lu\n",
814 CFS_PAGE_SIZE - count, cli->cl_lost_grant,
815 cli->cl_avail_grant, cli->cl_dirty);
821 static unsigned long rpcs_in_flight(struct client_obd *cli)
823 return cli->cl_r_in_flight + cli->cl_w_in_flight;
826 /* caller must hold loi_list_lock */
827 void osc_wake_cache_waiters(struct client_obd *cli)
829 struct list_head *l, *tmp;
830 struct osc_cache_waiter *ocw;
833 list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
834 /* if we can't dirty more, we must wait until some is written */
835 if ((cli->cl_dirty + CFS_PAGE_SIZE > cli->cl_dirty_max) ||
836 (atomic_read(&obd_dirty_pages) + 1 > obd_max_dirty_pages)) {
837 CDEBUG(D_CACHE, "no dirty room: dirty: %ld "
838 "osc max %ld, sys max %d\n", cli->cl_dirty,
839 cli->cl_dirty_max, obd_max_dirty_pages);
843 /* if still dirty cache but no grant wait for pending RPCs that
844 * may yet return us some grant before doing sync writes */
845 if (cli->cl_w_in_flight && cli->cl_avail_grant < CFS_PAGE_SIZE) {
846 CDEBUG(D_CACHE, "%u BRW writes in flight, no grant\n",
847 cli->cl_w_in_flight);
851 ocw = list_entry(l, struct osc_cache_waiter, ocw_entry);
852 list_del_init(&ocw->ocw_entry);
853 if (cli->cl_avail_grant < CFS_PAGE_SIZE) {
854 /* no more RPCs in flight to return grant, do sync IO */
855 ocw->ocw_rc = -EDQUOT;
856 CDEBUG(D_INODE, "wake oap %p for sync\n", ocw->ocw_oap);
858 osc_consume_write_grant(cli,
859 &ocw->ocw_oap->oap_brw_page);
862 cfs_waitq_signal(&ocw->ocw_waitq);
868 static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
870 client_obd_list_lock(&cli->cl_loi_list_lock);
871 cli->cl_avail_grant = ocd->ocd_grant;
872 client_obd_list_unlock(&cli->cl_loi_list_lock);
874 CDEBUG(D_CACHE, "setting cl_avail_grant: %ld cl_lost_grant: %ld\n",
875 cli->cl_avail_grant, cli->cl_lost_grant);
876 LASSERT(cli->cl_avail_grant >= 0);
879 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
881 client_obd_list_lock(&cli->cl_loi_list_lock);
882 CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant);
883 if (body->oa.o_valid & OBD_MD_FLGRANT)
884 cli->cl_avail_grant += body->oa.o_grant;
885 /* waiters are woken in brw_interpret */
886 client_obd_list_unlock(&cli->cl_loi_list_lock);
889 /* We assume that the reason this OSC got a short read is because it read
890 * beyond the end of a stripe file; i.e. lustre is reading a sparse file
891 * via the LOV, and it _knows_ it's reading inside the file, it's just that
892 * this stripe never got written at or beyond this stripe offset yet. */
893 static void handle_short_read(int nob_read, obd_count page_count,
894 struct brw_page **pga)
899 /* skip bytes read OK */
900 while (nob_read > 0) {
901 LASSERT (page_count > 0);
903 if (pga[i]->count > nob_read) {
904 /* EOF inside this page */
905 ptr = cfs_kmap(pga[i]->pg) +
906 (pga[i]->off & ~CFS_PAGE_MASK);
907 memset(ptr + nob_read, 0, pga[i]->count - nob_read);
908 cfs_kunmap(pga[i]->pg);
914 nob_read -= pga[i]->count;
919 /* zero remaining pages */
920 while (page_count-- > 0) {
921 ptr = cfs_kmap(pga[i]->pg) + (pga[i]->off & ~CFS_PAGE_MASK);
922 memset(ptr, 0, pga[i]->count);
923 cfs_kunmap(pga[i]->pg);
928 static int check_write_rcs(struct ptlrpc_request *req,
929 int requested_nob, int niocount,
930 obd_count page_count, struct brw_page **pga)
934 /* return error if any niobuf was in error */
935 remote_rcs = lustre_swab_repbuf(req, REQ_REC_OFF + 1,
936 sizeof(*remote_rcs) * niocount, NULL);
937 if (remote_rcs == NULL) {
938 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
941 if (lustre_msg_swabbed(req->rq_repmsg))
942 for (i = 0; i < niocount; i++)
943 __swab32s(&remote_rcs[i]);
945 for (i = 0; i < niocount; i++) {
946 if (remote_rcs[i] < 0)
947 return(remote_rcs[i]);
949 if (remote_rcs[i] != 0) {
950 CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
951 i, remote_rcs[i], req);
956 if (req->rq_bulk->bd_nob_transferred != requested_nob) {
957 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
958 requested_nob, req->rq_bulk->bd_nob_transferred);
965 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
967 if (p1->flag != p2->flag) {
968 unsigned mask = ~OBD_BRW_FROM_GRANT;
970 /* warn if we try to combine flags that we don't know to be
972 if ((p1->flag & mask) != (p2->flag & mask))
973 CERROR("is it ok to have flags 0x%x and 0x%x in the "
974 "same brw?\n", p1->flag, p2->flag);
978 return (p1->off + p1->count == p2->off);
981 static obd_count osc_checksum_bulk(int nob, obd_count pg_count,
982 struct brw_page **pga, int opc,
983 cksum_type_t cksum_type)
988 LASSERT (pg_count > 0);
989 cksum = init_checksum(cksum_type);
990 while (nob > 0 && pg_count > 0) {
991 unsigned char *ptr = cfs_kmap(pga[i]->pg);
992 int off = pga[i]->off & ~CFS_PAGE_MASK;
993 int count = pga[i]->count > nob ? nob : pga[i]->count;
995 /* corrupt the data before we compute the checksum, to
996 * simulate an OST->client data error */
997 if (i == 0 && opc == OST_READ &&
998 OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
999 memcpy(ptr + off, "bad1", min(4, nob));
1000 cksum = compute_checksum(cksum, ptr + off, count, cksum_type);
1001 cfs_kunmap(pga[i]->pg);
1002 LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d checksum %x\n",
1005 nob -= pga[i]->count;
1009 /* For sending we only compute the wrong checksum instead
1010 * of corrupting the data so it is still correct on a redo */
1011 if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1017 static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
1018 struct lov_stripe_md *lsm, obd_count page_count,
1019 struct brw_page **pga,
1020 struct ptlrpc_request **reqp,
1021 struct obd_capa *ocapa)
1023 struct ptlrpc_request *req;
1024 struct ptlrpc_bulk_desc *desc;
1025 struct ost_body *body;
1026 struct obd_ioobj *ioobj;
1027 struct niobuf_remote *niobuf;
1028 int niocount, i, requested_nob, opc, rc;
1029 struct osc_brw_async_args *aa;
1030 struct req_capsule *pill;
1033 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1034 RETURN(-ENOMEM); /* Recoverable */
1035 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1036 RETURN(-EINVAL); /* Fatal */
1038 if ((cmd & OBD_BRW_WRITE) != 0) {
1040 req = ptlrpc_request_alloc_pool(cli->cl_import,
1041 cli->cl_import->imp_rq_pool,
1045 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW);
1051 for (niocount = i = 1; i < page_count; i++) {
1052 if (!can_merge_pages(pga[i - 1], pga[i]))
1056 pill = &req->rq_pill;
1057 req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1058 niocount * sizeof(*niobuf));
1059 osc_set_capa_size(req, &RMF_CAPA1, ocapa);
1061 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1063 ptlrpc_request_free(req);
1066 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
1067 ptlrpc_at_set_req_timeout(req);
1069 if (opc == OST_WRITE)
1070 desc = ptlrpc_prep_bulk_imp(req, page_count,
1071 BULK_GET_SOURCE, OST_BULK_PORTAL);
1073 desc = ptlrpc_prep_bulk_imp(req, page_count,
1074 BULK_PUT_SINK, OST_BULK_PORTAL);
1077 GOTO(out, rc = -ENOMEM);
1078 /* NB request now owns desc and will free it when it gets freed */
1080 body = req_capsule_client_get(pill, &RMF_OST_BODY);
1081 ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1082 niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1083 LASSERT(body && ioobj && niobuf);
1087 obdo_to_ioobj(oa, ioobj);
1088 ioobj->ioo_bufcnt = niocount;
1089 osc_pack_capa(req, body, ocapa);
1090 LASSERT (page_count > 0);
1091 for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1092 struct brw_page *pg = pga[i];
1093 struct brw_page *pg_prev = pga[i - 1];
1095 LASSERT(pg->count > 0);
1096 LASSERTF((pg->off & ~CFS_PAGE_MASK) + pg->count <= CFS_PAGE_SIZE,
1097 "i: %d pg: %p off: "LPU64", count: %u\n", i, pg,
1098 pg->off, pg->count);
1100 LASSERTF(i == 0 || pg->off > pg_prev->off,
1101 "i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
1102 " prev_pg %p [pri %lu ind %lu] off "LPU64"\n",
1104 pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1105 pg_prev->pg, page_private(pg_prev->pg),
1106 pg_prev->pg->index, pg_prev->off);
1108 LASSERTF(i == 0 || pg->off > pg_prev->off,
1109 "i %d p_c %u\n", i, page_count);
1111 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1112 (pg->flag & OBD_BRW_SRVLOCK));
1114 ptlrpc_prep_bulk_page(desc, pg->pg, pg->off & ~CFS_PAGE_MASK,
1116 requested_nob += pg->count;
1118 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1120 niobuf->len += pg->count;
1122 niobuf->offset = pg->off;
1123 niobuf->len = pg->count;
1124 niobuf->flags = pg->flag;
1128 LASSERT((void *)(niobuf - niocount) ==
1129 lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF + 2,
1130 niocount * sizeof(*niobuf)));
1131 osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1133 /* size[REQ_REC_OFF] still sizeof (*body) */
1134 if (opc == OST_WRITE) {
1135 if (unlikely(cli->cl_checksum) &&
1136 req->rq_flvr.sf_bulk_hash == BULK_HASH_ALG_NULL) {
1137 /* store cl_cksum_type in a local variable since
1138 * it can be changed via lprocfs */
1139 cksum_type_t cksum_type = cli->cl_cksum_type;
1141 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1142 oa->o_flags = body->oa.o_flags = 0;
1143 body->oa.o_flags |= cksum_type_pack(cksum_type);
1144 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1145 body->oa.o_cksum = osc_checksum_bulk(requested_nob,
1149 CDEBUG(D_PAGE, "checksum at write origin: %x\n",
1151 /* save this in 'oa', too, for later checking */
1152 oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1153 oa->o_flags |= cksum_type_pack(cksum_type);
1155 /* clear out the checksum flag, in case this is a
1156 * resend but cl_checksum is no longer set. b=11238 */
1157 oa->o_valid &= ~OBD_MD_FLCKSUM;
1159 oa->o_cksum = body->oa.o_cksum;
1160 /* 1 RC per niobuf */
1161 req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_SERVER,
1162 sizeof(__u32) * niocount);
1164 if (unlikely(cli->cl_checksum) &&
1165 req->rq_flvr.sf_bulk_hash == BULK_HASH_ALG_NULL) {
1166 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1167 body->oa.o_flags = 0;
1168 body->oa.o_flags |= cksum_type_pack(cli->cl_cksum_type);
1169 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1171 req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_SERVER, 0);
1172 /* 1 RC for the whole I/O */
1174 ptlrpc_request_set_replen(req);
1176 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1177 aa = (struct osc_brw_async_args *)&req->rq_async_args;
1179 aa->aa_requested_nob = requested_nob;
1180 aa->aa_nio_count = niocount;
1181 aa->aa_page_count = page_count;
1185 CFS_INIT_LIST_HEAD(&aa->aa_oaps);
1191 ptlrpc_req_finished(req);
1195 static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
1196 __u32 client_cksum, __u32 server_cksum, int nob,
1197 obd_count page_count, struct brw_page **pga,
1198 cksum_type_t client_cksum_type)
1202 cksum_type_t cksum_type;
1204 if (server_cksum == client_cksum) {
1205 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1209 if (oa->o_valid & OBD_MD_FLFLAGS)
1210 cksum_type = cksum_type_unpack(oa->o_flags);
1212 cksum_type = OBD_CKSUM_CRC32;
1214 new_cksum = osc_checksum_bulk(nob, page_count, pga, OST_WRITE,
1217 if (cksum_type != client_cksum_type)
1218 msg = "the server did not use the checksum type specified in "
1219 "the original request - likely a protocol problem";
1220 else if (new_cksum == server_cksum)
1221 msg = "changed on the client after we checksummed it - "
1222 "likely false positive due to mmap IO (bug 11742)";
1223 else if (new_cksum == client_cksum)
1224 msg = "changed in transit before arrival at OST";
1226 msg = "changed in transit AND doesn't match the original - "
1227 "likely false positive due to mmap IO (bug 11742)";
1229 LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inum "
1230 LPU64"/"LPU64" object "LPU64"/"LPU64" extent "
1231 "["LPU64"-"LPU64"]\n",
1232 msg, libcfs_nid2str(peer->nid),
1233 oa->o_valid & OBD_MD_FLFID ? oa->o_fid : (__u64)0,
1234 oa->o_valid & OBD_MD_FLFID ? oa->o_generation :
1237 oa->o_valid & OBD_MD_FLGROUP ? oa->o_gr : (__u64)0,
1239 pga[page_count-1]->off + pga[page_count-1]->count - 1);
1240 CERROR("original client csum %x (type %x), server csum %x (type %x), "
1241 "client csum now %x\n", client_cksum, client_cksum_type,
1242 server_cksum, cksum_type, new_cksum);
1246 /* Note rc enters this function as number of bytes transferred */
1247 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1249 struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1250 const lnet_process_id_t *peer =
1251 &req->rq_import->imp_connection->c_peer;
1252 struct client_obd *cli = aa->aa_cli;
1253 struct ost_body *body;
1254 __u32 client_cksum = 0;
1257 if (rc < 0 && rc != -EDQUOT)
1260 LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
1261 body = lustre_swab_repbuf(req, REPLY_REC_OFF, sizeof(*body),
1262 lustre_swab_ost_body);
1264 CDEBUG(D_INFO, "Can't unpack body\n");
1268 /* set/clear over quota flag for a uid/gid */
1269 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1270 body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA))
1271 lquota_setdq(quota_interface, cli, body->oa.o_uid,
1272 body->oa.o_gid, body->oa.o_valid,
1278 if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
1279 client_cksum = aa->aa_oa->o_cksum; /* save for later */
1281 osc_update_grant(cli, body);
1283 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1285 CERROR("Unexpected +ve rc %d\n", rc);
1288 LASSERT(req->rq_bulk->bd_nob == aa->aa_requested_nob);
1290 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
1291 check_write_checksum(&body->oa, peer, client_cksum,
1292 body->oa.o_cksum, aa->aa_requested_nob,
1293 aa->aa_page_count, aa->aa_ppga,
1294 cksum_type_unpack(aa->aa_oa->o_flags)))
1297 if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
1300 rc = check_write_rcs(req, aa->aa_requested_nob,aa->aa_nio_count,
1301 aa->aa_page_count, aa->aa_ppga);
1305 /* The rest of this function executes only for OST_READs */
1306 if (rc > aa->aa_requested_nob) {
1307 CERROR("Unexpected rc %d (%d requested)\n", rc,
1308 aa->aa_requested_nob);
1312 if (rc != req->rq_bulk->bd_nob_transferred) {
1313 CERROR ("Unexpected rc %d (%d transferred)\n",
1314 rc, req->rq_bulk->bd_nob_transferred);
1318 if (rc < aa->aa_requested_nob)
1319 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
1321 if (sptlrpc_cli_unwrap_bulk_read(req, rc, aa->aa_page_count,
1323 GOTO(out, rc = -EAGAIN);
1325 if (body->oa.o_valid & OBD_MD_FLCKSUM) {
1326 static int cksum_counter;
1327 __u32 server_cksum = body->oa.o_cksum;
1330 cksum_type_t cksum_type;
1332 if (body->oa.o_valid & OBD_MD_FLFLAGS)
1333 cksum_type = cksum_type_unpack(body->oa.o_flags);
1335 cksum_type = OBD_CKSUM_CRC32;
1336 client_cksum = osc_checksum_bulk(rc, aa->aa_page_count,
1337 aa->aa_ppga, OST_READ,
1340 if (peer->nid == req->rq_bulk->bd_sender) {
1344 router = libcfs_nid2str(req->rq_bulk->bd_sender);
1347 if (server_cksum == ~0 && rc > 0) {
1348 CERROR("Protocol error: server %s set the 'checksum' "
1349 "bit, but didn't send a checksum. Not fatal, "
1350 "but please tell CFS.\n",
1351 libcfs_nid2str(peer->nid));
1352 } else if (server_cksum != client_cksum) {
1353 LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
1354 "%s%s%s inum "LPU64"/"LPU64" object "
1355 LPU64"/"LPU64" extent "
1356 "["LPU64"-"LPU64"]\n",
1357 req->rq_import->imp_obd->obd_name,
1358 libcfs_nid2str(peer->nid),
1360 body->oa.o_valid & OBD_MD_FLFID ?
1361 body->oa.o_fid : (__u64)0,
1362 body->oa.o_valid & OBD_MD_FLFID ?
1363 body->oa.o_generation :(__u64)0,
1365 body->oa.o_valid & OBD_MD_FLGROUP ?
1366 body->oa.o_gr : (__u64)0,
1367 aa->aa_ppga[0]->off,
1368 aa->aa_ppga[aa->aa_page_count-1]->off +
1369 aa->aa_ppga[aa->aa_page_count-1]->count -
1371 CERROR("client %x, server %x, cksum_type %x\n",
1372 client_cksum, server_cksum, cksum_type);
1374 aa->aa_oa->o_cksum = client_cksum;
1378 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1381 } else if (unlikely(client_cksum)) {
1382 static int cksum_missed;
1385 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
1386 CERROR("Checksum %u requested from %s but not sent\n",
1387 cksum_missed, libcfs_nid2str(peer->nid));
1393 *aa->aa_oa = body->oa;
1398 static int osc_brw_internal(int cmd, struct obd_export *exp, struct obdo *oa,
1399 struct lov_stripe_md *lsm,
1400 obd_count page_count, struct brw_page **pga,
1401 struct obd_capa *ocapa)
1403 struct ptlrpc_request *req;
1407 struct l_wait_info lwi;
1411 cfs_waitq_init(&waitq);
1414 rc = osc_brw_prep_request(cmd, &exp->exp_obd->u.cli, oa, lsm,
1415 page_count, pga, &req, ocapa);
1419 rc = ptlrpc_queue_wait(req);
1421 if (rc == -ETIMEDOUT && req->rq_resend) {
1422 DEBUG_REQ(D_HA, req, "BULK TIMEOUT");
1423 ptlrpc_req_finished(req);
1427 rc = osc_brw_fini_request(req, rc);
1429 ptlrpc_req_finished(req);
1430 if (osc_recoverable_error(rc)) {
1432 if (!osc_should_resend(resends, &exp->exp_obd->u.cli)) {
1433 CERROR("too many resend retries, returning error\n");
1437 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(resends), NULL, NULL, NULL);
1438 l_wait_event(waitq, 0, &lwi);
1446 int osc_brw_redo_request(struct ptlrpc_request *request,
1447 struct osc_brw_async_args *aa)
1449 struct ptlrpc_request *new_req;
1450 struct ptlrpc_request_set *set = request->rq_set;
1451 struct osc_brw_async_args *new_aa;
1452 struct osc_async_page *oap;
1456 if (!osc_should_resend(aa->aa_resends, aa->aa_cli)) {
1457 CERROR("too many resend retries, returning error\n");
1461 DEBUG_REQ(D_ERROR, request, "redo for recoverable error");
1463 body = lustre_msg_buf(request->rq_reqmsg, REQ_REC_OFF, sizeof(*body));
1464 if (body->oa.o_valid & OBD_MD_FLOSSCAPA)
1465 ocapa = lustre_unpack_capa(request->rq_reqmsg,
1468 rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
1469 OST_WRITE ? OBD_BRW_WRITE :OBD_BRW_READ,
1470 aa->aa_cli, aa->aa_oa,
1471 NULL /* lsm unused by osc currently */,
1472 aa->aa_page_count, aa->aa_ppga,
1473 &new_req, NULL /* ocapa */);
1477 client_obd_list_lock(&aa->aa_cli->cl_loi_list_lock);
1479 list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
1480 if (oap->oap_request != NULL) {
1481 LASSERTF(request == oap->oap_request,
1482 "request %p != oap_request %p\n",
1483 request, oap->oap_request);
1484 if (oap->oap_interrupted) {
1485 client_obd_list_unlock(&aa->aa_cli->cl_loi_list_lock);
1486 ptlrpc_req_finished(new_req);
1491 /* New request takes over pga and oaps from old request.
1492 * Note that copying a list_head doesn't work, need to move it... */
1494 new_req->rq_interpret_reply = request->rq_interpret_reply;
1495 new_req->rq_async_args = request->rq_async_args;
1496 new_req->rq_sent = cfs_time_current_sec() + aa->aa_resends;
1498 new_aa = (struct osc_brw_async_args *)&new_req->rq_async_args;
1500 CFS_INIT_LIST_HEAD(&new_aa->aa_oaps);
1501 list_splice(&aa->aa_oaps, &new_aa->aa_oaps);
1502 CFS_INIT_LIST_HEAD(&aa->aa_oaps);
1504 list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
1505 if (oap->oap_request) {
1506 ptlrpc_req_finished(oap->oap_request);
1507 oap->oap_request = ptlrpc_request_addref(new_req);
1511 /* use ptlrpc_set_add_req is safe because interpret functions work
1512 * in check_set context. only one way exist with access to request
1513 * from different thread got -EINTR - this way protected with
1514 * cl_loi_list_lock */
1515 ptlrpc_set_add_req(set, new_req);
1517 client_obd_list_unlock(&aa->aa_cli->cl_loi_list_lock);
1519 DEBUG_REQ(D_INFO, new_req, "new request");
1523 static int async_internal(int cmd, struct obd_export *exp, struct obdo *oa,
1524 struct lov_stripe_md *lsm, obd_count page_count,
1525 struct brw_page **pga, struct ptlrpc_request_set *set,
1526 struct obd_capa *ocapa)
1528 struct ptlrpc_request *req;
1529 struct client_obd *cli = &exp->exp_obd->u.cli;
1531 struct osc_brw_async_args *aa;
1534 /* Consume write credits even if doing a sync write -
1535 * otherwise we may run out of space on OST due to grant. */
1536 if (cmd == OBD_BRW_WRITE) {
1537 spin_lock(&cli->cl_loi_list_lock);
1538 for (i = 0; i < page_count; i++) {
1539 if (cli->cl_avail_grant >= CFS_PAGE_SIZE)
1540 osc_consume_write_grant(cli, pga[i]);
1542 spin_unlock(&cli->cl_loi_list_lock);
1545 rc = osc_brw_prep_request(cmd, cli, oa, lsm, page_count, pga,
1548 aa = (struct osc_brw_async_args *)&req->rq_async_args;
1549 if (cmd == OBD_BRW_READ) {
1550 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
1551 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
1552 ptlrpc_lprocfs_brw(req, OST_READ, aa->aa_requested_nob);
1554 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
1555 lprocfs_oh_tally(&cli->cl_write_rpc_hist,
1556 cli->cl_w_in_flight);
1557 ptlrpc_lprocfs_brw(req, OST_WRITE, aa->aa_requested_nob);
1560 LASSERT(list_empty(&aa->aa_oaps));
1562 req->rq_interpret_reply = brw_interpret;
1563 ptlrpc_set_add_req(set, req);
1564 client_obd_list_lock(&cli->cl_loi_list_lock);
1565 if (cmd == OBD_BRW_READ)
1566 cli->cl_r_in_flight++;
1568 cli->cl_w_in_flight++;
1569 client_obd_list_unlock(&cli->cl_loi_list_lock);
1570 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DIO_PAUSE, 3);
1571 } else if (cmd == OBD_BRW_WRITE) {
1572 client_obd_list_lock(&cli->cl_loi_list_lock);
1573 for (i = 0; i < page_count; i++)
1574 osc_release_write_grant(cli, pga[i], 0);
1575 osc_wake_cache_waiters(cli);
1576 client_obd_list_unlock(&cli->cl_loi_list_lock);
1582 * ugh, we want disk allocation on the target to happen in offset order. we'll
1583 * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
1584 * fine for our small page arrays and doesn't require allocation. its an
1585 * insertion sort that swaps elements that are strides apart, shrinking the
1586 * stride down until its '1' and the array is sorted.
1588 static void sort_brw_pages(struct brw_page **array, int num)
1591 struct brw_page *tmp;
1595 for (stride = 1; stride < num ; stride = (stride * 3) + 1)
1600 for (i = stride ; i < num ; i++) {
1603 while (j >= stride && array[j - stride]->off > tmp->off) {
1604 array[j] = array[j - stride];
1609 } while (stride > 1);
1612 static obd_count max_unfragmented_pages(struct brw_page **pg, obd_count pages)
1618 LASSERT (pages > 0);
1619 offset = pg[i]->off & ~CFS_PAGE_MASK;
1623 if (pages == 0) /* that's all */
1626 if (offset + pg[i]->count < CFS_PAGE_SIZE)
1627 return count; /* doesn't end on page boundary */
1630 offset = pg[i]->off & ~CFS_PAGE_MASK;
1631 if (offset != 0) /* doesn't start on page boundary */
1638 static struct brw_page **osc_build_ppga(struct brw_page *pga, obd_count count)
1640 struct brw_page **ppga;
1643 OBD_ALLOC(ppga, sizeof(*ppga) * count);
1647 for (i = 0; i < count; i++)
1652 static void osc_release_ppga(struct brw_page **ppga, obd_count count)
1654 LASSERT(ppga != NULL);
1655 OBD_FREE(ppga, sizeof(*ppga) * count);
1658 static int osc_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo,
1659 obd_count page_count, struct brw_page *pga,
1660 struct obd_trans_info *oti)
1662 struct obdo *saved_oa = NULL;
1663 struct brw_page **ppga, **orig;
1664 struct obd_import *imp = class_exp2cliimp(exp);
1665 struct client_obd *cli = &imp->imp_obd->u.cli;
1666 int rc, page_count_orig;
1669 if (cmd & OBD_BRW_CHECK) {
1670 /* The caller just wants to know if there's a chance that this
1671 * I/O can succeed */
1673 if (imp == NULL || imp->imp_invalid)
1678 /* test_brw with a failed create can trip this, maybe others. */
1679 LASSERT(cli->cl_max_pages_per_rpc);
1683 orig = ppga = osc_build_ppga(pga, page_count);
1686 page_count_orig = page_count;
1688 sort_brw_pages(ppga, page_count);
1689 while (page_count) {
1690 obd_count pages_per_brw;
1692 if (page_count > cli->cl_max_pages_per_rpc)
1693 pages_per_brw = cli->cl_max_pages_per_rpc;
1695 pages_per_brw = page_count;
1697 pages_per_brw = max_unfragmented_pages(ppga, pages_per_brw);
1699 if (saved_oa != NULL) {
1700 /* restore previously saved oa */
1701 *oinfo->oi_oa = *saved_oa;
1702 } else if (page_count > pages_per_brw) {
1703 /* save a copy of oa (brw will clobber it) */
1704 OBDO_ALLOC(saved_oa);
1705 if (saved_oa == NULL)
1706 GOTO(out, rc = -ENOMEM);
1707 *saved_oa = *oinfo->oi_oa;
1710 rc = osc_brw_internal(cmd, exp, oinfo->oi_oa, oinfo->oi_md,
1711 pages_per_brw, ppga, oinfo->oi_capa);
1716 page_count -= pages_per_brw;
1717 ppga += pages_per_brw;
1721 osc_release_ppga(orig, page_count_orig);
1723 if (saved_oa != NULL)
1724 OBDO_FREE(saved_oa);
1729 static int osc_brw_async(int cmd, struct obd_export *exp,
1730 struct obd_info *oinfo, obd_count page_count,
1731 struct brw_page *pga, struct obd_trans_info *oti,
1732 struct ptlrpc_request_set *set)
1734 struct brw_page **ppga, **orig;
1735 struct client_obd *cli = &exp->exp_obd->u.cli;
1736 int page_count_orig;
1740 if (cmd & OBD_BRW_CHECK) {
1741 struct obd_import *imp = class_exp2cliimp(exp);
1742 /* The caller just wants to know if there's a chance that this
1743 * I/O can succeed */
1745 if (imp == NULL || imp->imp_invalid)
1750 orig = ppga = osc_build_ppga(pga, page_count);
1753 page_count_orig = page_count;
1755 sort_brw_pages(ppga, page_count);
1756 while (page_count) {
1757 struct brw_page **copy;
1758 obd_count pages_per_brw;
1760 pages_per_brw = min_t(obd_count, page_count,
1761 cli->cl_max_pages_per_rpc);
1763 pages_per_brw = max_unfragmented_pages(ppga, pages_per_brw);
1765 /* use ppga only if single RPC is going to fly */
1766 if (pages_per_brw != page_count_orig || ppga != orig) {
1767 OBD_ALLOC(copy, sizeof(*copy) * pages_per_brw);
1769 GOTO(out, rc = -ENOMEM);
1770 memcpy(copy, ppga, sizeof(*copy) * pages_per_brw);
1774 rc = async_internal(cmd, exp, oinfo->oi_oa, oinfo->oi_md,
1775 pages_per_brw, copy, set, oinfo->oi_capa);
1779 OBD_FREE(copy, sizeof(*copy) * pages_per_brw);
1783 /* we passed it to async_internal() which is
1784 * now responsible for releasing memory */
1788 page_count -= pages_per_brw;
1789 ppga += pages_per_brw;
1793 osc_release_ppga(orig, page_count_orig);
1797 static void osc_check_rpcs(struct client_obd *cli);
1799 /* The companion to osc_enter_cache(), called when @oap is no longer part of
1800 * the dirty accounting. Writeback completes or truncate happens before
1801 * writing starts. Must be called with the loi lock held. */
1802 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
1805 osc_release_write_grant(cli, &oap->oap_brw_page, sent);
1809 /* This maintains the lists of pending pages to read/write for a given object
1810 * (lop). This is used by osc_check_rpcs->osc_next_loi() and loi_list_maint()
1811 * to quickly find objects that are ready to send an RPC. */
1812 static int lop_makes_rpc(struct client_obd *cli, struct loi_oap_pages *lop,
1818 if (lop->lop_num_pending == 0)
1821 /* if we have an invalid import we want to drain the queued pages
1822 * by forcing them through rpcs that immediately fail and complete
1823 * the pages. recovery relies on this to empty the queued pages
1824 * before canceling the locks and evicting down the llite pages */
1825 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1828 /* stream rpcs in queue order as long as as there is an urgent page
1829 * queued. this is our cheap solution for good batching in the case
1830 * where writepage marks some random page in the middle of the file
1831 * as urgent because of, say, memory pressure */
1832 if (!list_empty(&lop->lop_urgent)) {
1833 CDEBUG(D_CACHE, "urgent request forcing RPC\n");
1836 /* fire off rpcs when we have 'optimal' rpcs as tuned for the wire. */
1837 optimal = cli->cl_max_pages_per_rpc;
1838 if (cmd & OBD_BRW_WRITE) {
1839 /* trigger a write rpc stream as long as there are dirtiers
1840 * waiting for space. as they're waiting, they're not going to
1841 * create more pages to coallesce with what's waiting.. */
1842 if (!list_empty(&cli->cl_cache_waiters)) {
1843 CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
1846 /* +16 to avoid triggering rpcs that would want to include pages
1847 * that are being queued but which can't be made ready until
1848 * the queuer finishes with the page. this is a wart for
1849 * llite::commit_write() */
1852 if (lop->lop_num_pending >= optimal)
1858 static void on_list(struct list_head *item, struct list_head *list,
1861 if (list_empty(item) && should_be_on)
1862 list_add_tail(item, list);
1863 else if (!list_empty(item) && !should_be_on)
1864 list_del_init(item);
1867 /* maintain the loi's cli list membership invariants so that osc_send_oap_rpc
1868 * can find pages to build into rpcs quickly */
1869 static void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi)
1871 on_list(&loi->loi_cli_item, &cli->cl_loi_ready_list,
1872 lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE) ||
1873 lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ));
1875 on_list(&loi->loi_write_item, &cli->cl_loi_write_list,
1876 loi->loi_write_lop.lop_num_pending);
1878 on_list(&loi->loi_read_item, &cli->cl_loi_read_list,
1879 loi->loi_read_lop.lop_num_pending);
1882 static void lop_update_pending(struct client_obd *cli,
1883 struct loi_oap_pages *lop, int cmd, int delta)
1885 lop->lop_num_pending += delta;
1886 if (cmd & OBD_BRW_WRITE)
1887 cli->cl_pending_w_pages += delta;
1889 cli->cl_pending_r_pages += delta;
1892 /* this is called when a sync waiter receives an interruption. Its job is to
1893 * get the caller woken as soon as possible. If its page hasn't been put in an
1894 * rpc yet it can dequeue immediately. Otherwise it has to mark the rpc as
1895 * desiring interruption which will forcefully complete the rpc once the rpc
1897 static void osc_occ_interrupted(struct oig_callback_context *occ)
1899 struct osc_async_page *oap;
1900 struct loi_oap_pages *lop;
1901 struct lov_oinfo *loi;
1904 /* XXX member_of() */
1905 oap = list_entry(occ, struct osc_async_page, oap_occ);
1907 client_obd_list_lock(&oap->oap_cli->cl_loi_list_lock);
1909 oap->oap_interrupted = 1;
1911 /* ok, it's been put in an rpc. only one oap gets a request reference */
1912 if (oap->oap_request != NULL) {
1913 ptlrpc_mark_interrupted(oap->oap_request);
1914 ptlrpcd_wake(oap->oap_request);
1918 /* we don't get interruption callbacks until osc_trigger_group_io()
1919 * has been called and put the sync oaps in the pending/urgent lists.*/
1920 if (!list_empty(&oap->oap_pending_item)) {
1921 list_del_init(&oap->oap_pending_item);
1922 list_del_init(&oap->oap_urgent_item);
1925 lop = (oap->oap_cmd & OBD_BRW_WRITE) ?
1926 &loi->loi_write_lop : &loi->loi_read_lop;
1927 lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, -1);
1928 loi_list_maint(oap->oap_cli, oap->oap_loi);
1930 oig_complete_one(oap->oap_oig, &oap->oap_occ, -EINTR);
1931 oap->oap_oig = NULL;
1935 client_obd_list_unlock(&oap->oap_cli->cl_loi_list_lock);
1938 /* this is trying to propogate async writeback errors back up to the
1939 * application. As an async write fails we record the error code for later if
1940 * the app does an fsync. As long as errors persist we force future rpcs to be
1941 * sync so that the app can get a sync error and break the cycle of queueing
1942 * pages for which writeback will fail. */
1943 static void osc_process_ar(struct osc_async_rc *ar, __u64 xid,
1950 ar->ar_force_sync = 1;
1951 ar->ar_min_xid = ptlrpc_sample_next_xid();
1956 if (ar->ar_force_sync && (xid >= ar->ar_min_xid))
1957 ar->ar_force_sync = 0;
1960 static void osc_oap_to_pending(struct osc_async_page *oap)
1962 struct loi_oap_pages *lop;
1964 if (oap->oap_cmd & OBD_BRW_WRITE)
1965 lop = &oap->oap_loi->loi_write_lop;
1967 lop = &oap->oap_loi->loi_read_lop;
1969 if (oap->oap_async_flags & ASYNC_URGENT)
1970 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
1971 list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
1972 lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, 1);
1975 /* this must be called holding the loi list lock to give coverage to exit_cache,
1976 * async_flag maintenance, and oap_request */
1977 static void osc_ap_completion(struct client_obd *cli, struct obdo *oa,
1978 struct osc_async_page *oap, int sent, int rc)
1983 if (oap->oap_request != NULL) {
1984 xid = ptlrpc_req_xid(oap->oap_request);
1985 ptlrpc_req_finished(oap->oap_request);
1986 oap->oap_request = NULL;
1989 oap->oap_async_flags = 0;
1990 oap->oap_interrupted = 0;
1992 if (oap->oap_cmd & OBD_BRW_WRITE) {
1993 osc_process_ar(&cli->cl_ar, xid, rc);
1994 osc_process_ar(&oap->oap_loi->loi_ar, xid, rc);
1997 if (rc == 0 && oa != NULL) {
1998 if (oa->o_valid & OBD_MD_FLBLOCKS)
1999 oap->oap_loi->loi_lvb.lvb_blocks = oa->o_blocks;
2000 if (oa->o_valid & OBD_MD_FLMTIME)
2001 oap->oap_loi->loi_lvb.lvb_mtime = oa->o_mtime;
2002 if (oa->o_valid & OBD_MD_FLATIME)
2003 oap->oap_loi->loi_lvb.lvb_atime = oa->o_atime;
2004 if (oa->o_valid & OBD_MD_FLCTIME)
2005 oap->oap_loi->loi_lvb.lvb_ctime = oa->o_ctime;
2009 osc_exit_cache(cli, oap, sent);
2010 oig_complete_one(oap->oap_oig, &oap->oap_occ, rc);
2011 oap->oap_oig = NULL;
2016 rc = oap->oap_caller_ops->ap_completion(oap->oap_caller_data,
2017 oap->oap_cmd, oa, rc);
2019 /* ll_ap_completion (from llite) drops PG_locked. so, a new
2020 * I/O on the page could start, but OSC calls it under lock
2021 * and thus we can add oap back to pending safely */
2023 /* upper layer wants to leave the page on pending queue */
2024 osc_oap_to_pending(oap);
2026 osc_exit_cache(cli, oap, sent);
2030 static int brw_interpret(struct ptlrpc_request *req, void *data, int rc)
2032 struct osc_brw_async_args *aa = data;
2033 struct client_obd *cli;
2036 rc = osc_brw_fini_request(req, rc);
2037 CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
2038 if (osc_recoverable_error(rc)) {
2039 rc = osc_brw_redo_request(req, aa);
2046 client_obd_list_lock(&cli->cl_loi_list_lock);
2048 /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
2049 * is called so we know whether to go to sync BRWs or wait for more
2050 * RPCs to complete */
2051 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
2052 cli->cl_w_in_flight--;
2054 cli->cl_r_in_flight--;
2056 if (!list_empty(&aa->aa_oaps)) { /* from osc_send_oap_rpc() */
2057 struct osc_async_page *oap, *tmp;
2058 /* the caller may re-use the oap after the completion call so
2059 * we need to clean it up a little */
2060 list_for_each_entry_safe(oap, tmp, &aa->aa_oaps, oap_rpc_item) {
2061 list_del_init(&oap->oap_rpc_item);
2062 osc_ap_completion(cli, aa->aa_oa, oap, 1, rc);
2064 OBDO_FREE(aa->aa_oa);
2065 } else { /* from async_internal() */
2067 for (i = 0; i < aa->aa_page_count; i++)
2068 osc_release_write_grant(aa->aa_cli, aa->aa_ppga[i], 1);
2070 osc_wake_cache_waiters(cli);
2071 osc_check_rpcs(cli);
2072 client_obd_list_unlock(&cli->cl_loi_list_lock);
2074 osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
2078 static struct ptlrpc_request *osc_build_req(struct client_obd *cli,
2079 struct list_head *rpc_list,
2080 int page_count, int cmd)
2082 struct ptlrpc_request *req;
2083 struct brw_page **pga = NULL;
2084 struct osc_brw_async_args *aa;
2085 struct obdo *oa = NULL;
2086 struct obd_async_page_ops *ops = NULL;
2087 void *caller_data = NULL;
2088 struct obd_capa *ocapa;
2089 struct osc_async_page *oap;
2093 LASSERT(!list_empty(rpc_list));
2095 OBD_ALLOC(pga, sizeof(*pga) * page_count);
2097 RETURN(ERR_PTR(-ENOMEM));
2101 GOTO(out, req = ERR_PTR(-ENOMEM));
2104 list_for_each_entry(oap, rpc_list, oap_rpc_item) {
2106 ops = oap->oap_caller_ops;
2107 caller_data = oap->oap_caller_data;
2109 pga[i] = &oap->oap_brw_page;
2110 pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
2111 CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
2112 pga[i]->pg, cfs_page_index(oap->oap_page), oap, pga[i]->flag);
2116 /* always get the data for the obdo for the rpc */
2117 LASSERT(ops != NULL);
2118 ops->ap_fill_obdo(caller_data, cmd, oa);
2119 ocapa = ops->ap_lookup_capa(caller_data, cmd);
2121 sort_brw_pages(pga, page_count);
2122 rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
2126 CERROR("prep_req failed: %d\n", rc);
2127 GOTO(out, req = ERR_PTR(rc));
2130 /* Need to update the timestamps after the request is built in case
2131 * we race with setattr (locally or in queue at OST). If OST gets
2132 * later setattr before earlier BRW (as determined by the request xid),
2133 * the OST will not use BRW timestamps. Sadly, there is no obvious
2134 * way to do this in a single call. bug 10150 */
2135 ops->ap_update_obdo(caller_data, cmd, oa,
2136 OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME);
2138 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2139 aa = (struct osc_brw_async_args *)&req->rq_async_args;
2140 CFS_INIT_LIST_HEAD(&aa->aa_oaps);
2141 list_splice(rpc_list, &aa->aa_oaps);
2142 CFS_INIT_LIST_HEAD(rpc_list);
2149 OBD_FREE(pga, sizeof(*pga) * page_count);
2154 /* the loi lock is held across this function but it's allowed to release
2155 * and reacquire it during its work */
2157 * prepare pages for ASYNC io and put pages in send queue.
2161 * \param cmd - OBD_BRW_* macroses
2162 * \param lop - pending pages
2164 * \return zero if pages successfully add to send queue.
2165 * \return not zere if error occurring.
2167 static int osc_send_oap_rpc(struct client_obd *cli, struct lov_oinfo *loi,
2168 int cmd, struct loi_oap_pages *lop)
2170 struct ptlrpc_request *req;
2171 obd_count page_count = 0;
2172 struct osc_async_page *oap = NULL, *tmp;
2173 struct osc_brw_async_args *aa;
2174 struct obd_async_page_ops *ops;
2175 CFS_LIST_HEAD(rpc_list);
2176 unsigned int ending_offset;
2177 unsigned starting_offset = 0;
2181 /* first we find the pages we're allowed to work with */
2182 list_for_each_entry_safe(oap, tmp, &lop->lop_pending,
2184 ops = oap->oap_caller_ops;
2186 LASSERT(oap->oap_magic == OAP_MAGIC);
2188 if (page_count != 0 &&
2189 srvlock != !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK)) {
2190 CDEBUG(D_PAGE, "SRVLOCK flag mismatch,"
2191 " oap %p, page %p, srvlock %u\n",
2192 oap, oap->oap_brw_page.pg, (unsigned)!srvlock);
2195 /* in llite being 'ready' equates to the page being locked
2196 * until completion unlocks it. commit_write submits a page
2197 * as not ready because its unlock will happen unconditionally
2198 * as the call returns. if we race with commit_write giving
2199 * us that page we dont' want to create a hole in the page
2200 * stream, so we stop and leave the rpc to be fired by
2201 * another dirtier or kupdated interval (the not ready page
2202 * will still be on the dirty list). we could call in
2203 * at the end of ll_file_write to process the queue again. */
2204 if (!(oap->oap_async_flags & ASYNC_READY)) {
2205 int rc = ops->ap_make_ready(oap->oap_caller_data, cmd);
2207 CDEBUG(D_INODE, "oap %p page %p returned %d "
2208 "instead of ready\n", oap,
2212 /* llite is telling us that the page is still
2213 * in commit_write and that we should try
2214 * and put it in an rpc again later. we
2215 * break out of the loop so we don't create
2216 * a hole in the sequence of pages in the rpc
2221 /* the io isn't needed.. tell the checks
2222 * below to complete the rpc with EINTR */
2223 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
2224 oap->oap_count = -EINTR;
2227 oap->oap_async_flags |= ASYNC_READY;
2230 LASSERTF(0, "oap %p page %p returned %d "
2231 "from make_ready\n", oap,
2239 * Page submitted for IO has to be locked. Either by
2240 * ->ap_make_ready() or by higher layers.
2242 #if defined(__KERNEL__) && defined(__linux__)
2243 if(!(PageLocked(oap->oap_page) &&
2244 (CheckWriteback(oap->oap_page, cmd) || oap->oap_oig !=NULL))) {
2245 CDEBUG(D_PAGE, "page %p lost wb %lx/%x\n",
2246 oap->oap_page, (long)oap->oap_page->flags, oap->oap_async_flags);
2250 /* If there is a gap at the start of this page, it can't merge
2251 * with any previous page, so we'll hand the network a
2252 * "fragmented" page array that it can't transfer in 1 RDMA */
2253 if (page_count != 0 && oap->oap_page_off != 0)
2256 /* take the page out of our book-keeping */
2257 list_del_init(&oap->oap_pending_item);
2258 lop_update_pending(cli, lop, cmd, -1);
2259 list_del_init(&oap->oap_urgent_item);
2261 if (page_count == 0)
2262 starting_offset = (oap->oap_obj_off+oap->oap_page_off) &
2263 (PTLRPC_MAX_BRW_SIZE - 1);
2265 /* ask the caller for the size of the io as the rpc leaves. */
2266 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE))
2268 ops->ap_refresh_count(oap->oap_caller_data,cmd);
2269 if (oap->oap_count <= 0) {
2270 CDEBUG(D_CACHE, "oap %p count %d, completing\n", oap,
2272 osc_ap_completion(cli, NULL, oap, 0, oap->oap_count);
2276 /* now put the page back in our accounting */
2277 list_add_tail(&oap->oap_rpc_item, &rpc_list);
2278 if (page_count == 0)
2279 srvlock = !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK);
2280 if (++page_count >= cli->cl_max_pages_per_rpc)
2283 /* End on a PTLRPC_MAX_BRW_SIZE boundary. We want full-sized
2284 * RPCs aligned on PTLRPC_MAX_BRW_SIZE boundaries to help reads
2285 * have the same alignment as the initial writes that allocated
2286 * extents on the server. */
2287 ending_offset = (oap->oap_obj_off + oap->oap_page_off +
2288 oap->oap_count) & (PTLRPC_MAX_BRW_SIZE - 1);
2289 if (ending_offset == 0)
2292 /* If there is a gap at the end of this page, it can't merge
2293 * with any subsequent pages, so we'll hand the network a
2294 * "fragmented" page array that it can't transfer in 1 RDMA */
2295 if (oap->oap_page_off + oap->oap_count < CFS_PAGE_SIZE)
2299 osc_wake_cache_waiters(cli);
2301 if (page_count == 0)
2304 loi_list_maint(cli, loi);
2306 client_obd_list_unlock(&cli->cl_loi_list_lock);
2308 req = osc_build_req(cli, &rpc_list, page_count, cmd);
2310 /* this should happen rarely and is pretty bad, it makes the
2311 * pending list not follow the dirty order */
2312 client_obd_list_lock(&cli->cl_loi_list_lock);
2313 list_for_each_entry_safe(oap, tmp, &rpc_list, oap_rpc_item) {
2314 list_del_init(&oap->oap_rpc_item);
2316 /* queued sync pages can be torn down while the pages
2317 * were between the pending list and the rpc */
2318 if (oap->oap_interrupted) {
2319 CDEBUG(D_INODE, "oap %p interrupted\n", oap);
2320 osc_ap_completion(cli, NULL, oap, 0,
2324 osc_ap_completion(cli, NULL, oap, 0, PTR_ERR(req));
2326 loi_list_maint(cli, loi);
2327 RETURN(PTR_ERR(req));
2330 aa = (struct osc_brw_async_args *)&req->rq_async_args;
2332 if (cmd == OBD_BRW_READ) {
2333 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
2334 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
2335 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
2336 (starting_offset >> CFS_PAGE_SHIFT) + 1);
2337 ptlrpc_lprocfs_brw(req, OST_READ, aa->aa_requested_nob);
2339 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
2340 lprocfs_oh_tally(&cli->cl_write_rpc_hist,
2341 cli->cl_w_in_flight);
2342 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2343 (starting_offset >> CFS_PAGE_SHIFT) + 1);
2344 ptlrpc_lprocfs_brw(req, OST_WRITE, aa->aa_requested_nob);
2347 client_obd_list_lock(&cli->cl_loi_list_lock);
2349 if (cmd == OBD_BRW_READ)
2350 cli->cl_r_in_flight++;
2352 cli->cl_w_in_flight++;
2354 /* queued sync pages can be torn down while the pages
2355 * were between the pending list and the rpc */
2357 list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
2358 /* only one oap gets a request reference */
2361 if (oap->oap_interrupted && !req->rq_intr) {
2362 CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
2364 ptlrpc_mark_interrupted(req);
2368 tmp->oap_request = ptlrpc_request_addref(req);
2370 DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight",
2371 page_count, aa, cli->cl_r_in_flight, cli->cl_w_in_flight);
2373 req->rq_interpret_reply = brw_interpret;
2374 ptlrpcd_add_req(req);
2378 #define LOI_DEBUG(LOI, STR, args...) \
2379 CDEBUG(D_INODE, "loi ready %d wr %d:%d rd %d:%d " STR, \
2380 !list_empty(&(LOI)->loi_cli_item), \
2381 (LOI)->loi_write_lop.lop_num_pending, \
2382 !list_empty(&(LOI)->loi_write_lop.lop_urgent), \
2383 (LOI)->loi_read_lop.lop_num_pending, \
2384 !list_empty(&(LOI)->loi_read_lop.lop_urgent), \
2387 /* This is called by osc_check_rpcs() to find which objects have pages that
2388 * we could be sending. These lists are maintained by lop_makes_rpc(). */
2389 struct lov_oinfo *osc_next_loi(struct client_obd *cli)
2392 /* first return all objects which we already know to have
2393 * pages ready to be stuffed into rpcs */
2394 if (!list_empty(&cli->cl_loi_ready_list))
2395 RETURN(list_entry(cli->cl_loi_ready_list.next,
2396 struct lov_oinfo, loi_cli_item));
2398 /* then if we have cache waiters, return all objects with queued
2399 * writes. This is especially important when many small files
2400 * have filled up the cache and not been fired into rpcs because
2401 * they don't pass the nr_pending/object threshhold */
2402 if (!list_empty(&cli->cl_cache_waiters) &&
2403 !list_empty(&cli->cl_loi_write_list))
2404 RETURN(list_entry(cli->cl_loi_write_list.next,
2405 struct lov_oinfo, loi_write_item));
2407 /* then return all queued objects when we have an invalid import
2408 * so that they get flushed */
2409 if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
2410 if (!list_empty(&cli->cl_loi_write_list))
2411 RETURN(list_entry(cli->cl_loi_write_list.next,
2412 struct lov_oinfo, loi_write_item));
2413 if (!list_empty(&cli->cl_loi_read_list))
2414 RETURN(list_entry(cli->cl_loi_read_list.next,
2415 struct lov_oinfo, loi_read_item));
2420 /* called with the loi list lock held */
2421 static void osc_check_rpcs(struct client_obd *cli)
2423 struct lov_oinfo *loi;
2424 int rc = 0, race_counter = 0;
2427 while ((loi = osc_next_loi(cli)) != NULL) {
2428 LOI_DEBUG(loi, "%lu in flight\n", rpcs_in_flight(cli));
2430 if (rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight)
2433 /* attempt some read/write balancing by alternating between
2434 * reads and writes in an object. The makes_rpc checks here
2435 * would be redundant if we were getting read/write work items
2436 * instead of objects. we don't want send_oap_rpc to drain a
2437 * partial read pending queue when we're given this object to
2438 * do io on writes while there are cache waiters */
2439 if (lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) {
2440 rc = osc_send_oap_rpc(cli, loi, OBD_BRW_WRITE,
2441 &loi->loi_write_lop);
2449 if (lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)) {
2450 rc = osc_send_oap_rpc(cli, loi, OBD_BRW_READ,
2451 &loi->loi_read_lop);
2460 /* attempt some inter-object balancing by issueing rpcs
2461 * for each object in turn */
2462 if (!list_empty(&loi->loi_cli_item))
2463 list_del_init(&loi->loi_cli_item);
2464 if (!list_empty(&loi->loi_write_item))
2465 list_del_init(&loi->loi_write_item);
2466 if (!list_empty(&loi->loi_read_item))
2467 list_del_init(&loi->loi_read_item);
2469 loi_list_maint(cli, loi);
2471 /* send_oap_rpc fails with 0 when make_ready tells it to
2472 * back off. llite's make_ready does this when it tries
2473 * to lock a page queued for write that is already locked.
2474 * we want to try sending rpcs from many objects, but we
2475 * don't want to spin failing with 0. */
2476 if (race_counter == 10)
2482 /* we're trying to queue a page in the osc so we're subject to the
2483 * 'cl_dirty_max' limit on the number of pages that can be queued in the osc.
2484 * If the osc's queued pages are already at that limit, then we want to sleep
2485 * until there is space in the osc's queue for us. We also may be waiting for
2486 * write credits from the OST if there are RPCs in flight that may return some
2487 * before we fall back to sync writes.
2489 * We need this know our allocation was granted in the presence of signals */
2490 static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
2494 client_obd_list_lock(&cli->cl_loi_list_lock);
2495 rc = list_empty(&ocw->ocw_entry) || rpcs_in_flight(cli) == 0;
2496 client_obd_list_unlock(&cli->cl_loi_list_lock);
2500 /* Caller must hold loi_list_lock - we drop/regain it if we need to wait for
2501 * grant or cache space. */
2502 static int osc_enter_cache(struct client_obd *cli, struct lov_oinfo *loi,
2503 struct osc_async_page *oap)
2505 struct osc_cache_waiter ocw;
2506 struct l_wait_info lwi = { 0 };
2510 CDEBUG(D_CACHE, "dirty: %ld/%d dirty_max: %ld/%d dropped: %lu "
2511 "grant: %lu\n", cli->cl_dirty, atomic_read(&obd_dirty_pages),
2512 cli->cl_dirty_max, obd_max_dirty_pages,
2513 cli->cl_lost_grant, cli->cl_avail_grant);
2515 /* force the caller to try sync io. this can jump the list
2516 * of queued writes and create a discontiguous rpc stream */
2517 if (cli->cl_dirty_max < CFS_PAGE_SIZE || cli->cl_ar.ar_force_sync ||
2518 loi->loi_ar.ar_force_sync)
2521 /* Hopefully normal case - cache space and write credits available */
2522 if ((cli->cl_dirty + CFS_PAGE_SIZE <= cli->cl_dirty_max) &&
2523 (atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) &&
2524 (cli->cl_avail_grant >= CFS_PAGE_SIZE)) {
2525 /* account for ourselves */
2526 osc_consume_write_grant(cli, &oap->oap_brw_page);
2530 /* Make sure that there are write rpcs in flight to wait for. This
2531 * is a little silly as this object may not have any pending but
2532 * other objects sure might. */
2533 if (cli->cl_w_in_flight) {
2534 list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
2535 cfs_waitq_init(&ocw.ocw_waitq);
2539 loi_list_maint(cli, loi);
2540 osc_check_rpcs(cli);
2541 client_obd_list_unlock(&cli->cl_loi_list_lock);
2543 CDEBUG(D_CACHE, "sleeping for cache space\n");
2544 l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
2546 client_obd_list_lock(&cli->cl_loi_list_lock);
2547 if (!list_empty(&ocw.ocw_entry)) {
2548 list_del(&ocw.ocw_entry);
2557 static int osc_reget_short_lock(struct obd_export *exp,
2558 struct lov_stripe_md *lsm,
2560 obd_off start, obd_off end,
2563 struct osc_async_page *oap = *res;
2568 spin_lock(&oap->oap_lock);
2569 rc = ldlm_lock_fast_match(oap->oap_ldlm_lock, rw,
2570 start, end, cookie);
2571 spin_unlock(&oap->oap_lock);
2576 static int osc_release_short_lock(struct obd_export *exp,
2577 struct lov_stripe_md *lsm, obd_off end,
2578 void *cookie, int rw)
2581 ldlm_lock_fast_release(cookie, rw);
2582 /* no error could have happened at this layer */
2586 int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
2587 struct lov_oinfo *loi, cfs_page_t *page,
2588 obd_off offset, struct obd_async_page_ops *ops,
2589 void *data, void **res, int nocache,
2590 struct lustre_handle *lockh)
2592 struct osc_async_page *oap;
2593 struct ldlm_res_id oid = {{0}};
2598 return size_round(sizeof(*oap));
2601 oap->oap_magic = OAP_MAGIC;
2602 oap->oap_cli = &exp->exp_obd->u.cli;
2605 oap->oap_caller_ops = ops;
2606 oap->oap_caller_data = data;
2608 oap->oap_page = page;
2609 oap->oap_obj_off = offset;
2611 CFS_INIT_LIST_HEAD(&oap->oap_pending_item);
2612 CFS_INIT_LIST_HEAD(&oap->oap_urgent_item);
2613 CFS_INIT_LIST_HEAD(&oap->oap_rpc_item);
2614 CFS_INIT_LIST_HEAD(&oap->oap_page_list);
2616 oap->oap_occ.occ_interrupted = osc_occ_interrupted;
2618 spin_lock_init(&oap->oap_lock);
2620 /* If the page was marked as notcacheable - don't add to any locks */
2622 oid.name[0] = loi->loi_id;
2623 oid.name[2] = loi->loi_gr;
2624 /* This is the only place where we can call cache_add_extent
2625 without oap_lock, because this page is locked now, and
2626 the lock we are adding it to is referenced, so cannot lose
2627 any pages either. */
2628 rc = cache_add_extent(oap->oap_cli->cl_cache, &oid, oap, lockh);
2633 CDEBUG(D_CACHE, "oap %p page %p obj off "LPU64"\n", oap, page, offset);
2637 struct osc_async_page *oap_from_cookie(void *cookie)
2639 struct osc_async_page *oap = cookie;
2640 if (oap->oap_magic != OAP_MAGIC)
2641 return ERR_PTR(-EINVAL);
2645 static int osc_queue_async_io(struct obd_export *exp, struct lov_stripe_md *lsm,
2646 struct lov_oinfo *loi, void *cookie,
2647 int cmd, obd_off off, int count,
2648 obd_flag brw_flags, enum async_flags async_flags)
2650 struct client_obd *cli = &exp->exp_obd->u.cli;
2651 struct osc_async_page *oap;
2655 oap = oap_from_cookie(cookie);
2657 RETURN(PTR_ERR(oap));
2659 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
2662 if (!list_empty(&oap->oap_pending_item) ||
2663 !list_empty(&oap->oap_urgent_item) ||
2664 !list_empty(&oap->oap_rpc_item))
2667 /* check if the file's owner/group is over quota */
2668 #ifdef HAVE_QUOTA_SUPPORT
2669 if ((cmd & OBD_BRW_WRITE) && !(cmd & OBD_BRW_NOQUOTA)){
2670 struct obd_async_page_ops *ops;
2677 ops = oap->oap_caller_ops;
2678 ops->ap_fill_obdo(oap->oap_caller_data, cmd, oa);
2679 if (lquota_chkdq(quota_interface, cli, oa->o_uid, oa->o_gid) ==
2690 loi = lsm->lsm_oinfo[0];
2692 client_obd_list_lock(&cli->cl_loi_list_lock);
2695 oap->oap_page_off = off;
2696 oap->oap_count = count;
2697 oap->oap_brw_flags = brw_flags;
2698 oap->oap_async_flags = async_flags;
2700 if (cmd & OBD_BRW_WRITE) {
2701 rc = osc_enter_cache(cli, loi, oap);
2703 client_obd_list_unlock(&cli->cl_loi_list_lock);
2708 osc_oap_to_pending(oap);
2709 loi_list_maint(cli, loi);
2711 LOI_DEBUG(loi, "oap %p page %p added for cmd %d\n", oap, oap->oap_page,
2714 osc_check_rpcs(cli);
2715 client_obd_list_unlock(&cli->cl_loi_list_lock);
2720 /* aka (~was & now & flag), but this is more clear :) */
2721 #define SETTING(was, now, flag) (!(was & flag) && (now & flag))
2723 static int osc_set_async_flags(struct obd_export *exp,
2724 struct lov_stripe_md *lsm,
2725 struct lov_oinfo *loi, void *cookie,
2726 obd_flag async_flags)
2728 struct client_obd *cli = &exp->exp_obd->u.cli;
2729 struct loi_oap_pages *lop;
2730 struct osc_async_page *oap;
2734 oap = oap_from_cookie(cookie);
2736 RETURN(PTR_ERR(oap));
2739 * bug 7311: OST-side locking is only supported for liblustre for now
2740 * (and liblustre never calls obd_set_async_flags(). I hope.), generic
2741 * implementation has to handle case where OST-locked page was picked
2742 * up by, e.g., ->writepage().
2744 LASSERT(!(oap->oap_brw_flags & OBD_BRW_SRVLOCK));
2745 LASSERT(!LIBLUSTRE_CLIENT); /* check that liblustre angels do fear to
2748 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
2752 loi = lsm->lsm_oinfo[0];
2754 if (oap->oap_cmd & OBD_BRW_WRITE) {
2755 lop = &loi->loi_write_lop;
2757 lop = &loi->loi_read_lop;
2760 client_obd_list_lock(&cli->cl_loi_list_lock);
2762 if (list_empty(&oap->oap_pending_item))
2763 GOTO(out, rc = -EINVAL);
2765 if ((oap->oap_async_flags & async_flags) == async_flags)
2768 if (SETTING(oap->oap_async_flags, async_flags, ASYNC_READY))
2769 oap->oap_async_flags |= ASYNC_READY;
2771 if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT)) {
2772 if (list_empty(&oap->oap_rpc_item)) {
2773 list_add(&oap->oap_urgent_item, &lop->lop_urgent);
2774 loi_list_maint(cli, loi);
2778 LOI_DEBUG(loi, "oap %p page %p has flags %x\n", oap, oap->oap_page,
2779 oap->oap_async_flags);
2781 osc_check_rpcs(cli);
2782 client_obd_list_unlock(&cli->cl_loi_list_lock);
2786 static int osc_queue_group_io(struct obd_export *exp, struct lov_stripe_md *lsm,
2787 struct lov_oinfo *loi,
2788 struct obd_io_group *oig, void *cookie,
2789 int cmd, obd_off off, int count,
2791 obd_flag async_flags)
2793 struct client_obd *cli = &exp->exp_obd->u.cli;
2794 struct osc_async_page *oap;
2795 struct loi_oap_pages *lop;
2799 oap = oap_from_cookie(cookie);
2801 RETURN(PTR_ERR(oap));
2803 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
2806 if (!list_empty(&oap->oap_pending_item) ||
2807 !list_empty(&oap->oap_urgent_item) ||
2808 !list_empty(&oap->oap_rpc_item))
2812 loi = lsm->lsm_oinfo[0];
2814 client_obd_list_lock(&cli->cl_loi_list_lock);
2817 oap->oap_page_off = off;
2818 oap->oap_count = count;
2819 oap->oap_brw_flags = brw_flags;
2820 oap->oap_async_flags = async_flags;
2822 if (cmd & OBD_BRW_WRITE)
2823 lop = &loi->loi_write_lop;
2825 lop = &loi->loi_read_lop;
2827 list_add_tail(&oap->oap_pending_item, &lop->lop_pending_group);
2828 if (oap->oap_async_flags & ASYNC_GROUP_SYNC) {
2830 rc = oig_add_one(oig, &oap->oap_occ);
2833 LOI_DEBUG(loi, "oap %p page %p on group pending: rc %d\n",
2834 oap, oap->oap_page, rc);
2836 client_obd_list_unlock(&cli->cl_loi_list_lock);
2841 static void osc_group_to_pending(struct client_obd *cli, struct lov_oinfo *loi,
2842 struct loi_oap_pages *lop, int cmd)
2844 struct list_head *pos, *tmp;
2845 struct osc_async_page *oap;
2847 list_for_each_safe(pos, tmp, &lop->lop_pending_group) {
2848 oap = list_entry(pos, struct osc_async_page, oap_pending_item);
2849 list_del(&oap->oap_pending_item);
2850 osc_oap_to_pending(oap);
2852 loi_list_maint(cli, loi);
2855 static int osc_trigger_group_io(struct obd_export *exp,
2856 struct lov_stripe_md *lsm,
2857 struct lov_oinfo *loi,
2858 struct obd_io_group *oig)
2860 struct client_obd *cli = &exp->exp_obd->u.cli;
2864 loi = lsm->lsm_oinfo[0];
2866 client_obd_list_lock(&cli->cl_loi_list_lock);
2868 osc_group_to_pending(cli, loi, &loi->loi_write_lop, OBD_BRW_WRITE);
2869 osc_group_to_pending(cli, loi, &loi->loi_read_lop, OBD_BRW_READ);
2871 osc_check_rpcs(cli);
2872 client_obd_list_unlock(&cli->cl_loi_list_lock);
2877 static int osc_teardown_async_page(struct obd_export *exp,
2878 struct lov_stripe_md *lsm,
2879 struct lov_oinfo *loi, void *cookie)
2881 struct client_obd *cli = &exp->exp_obd->u.cli;
2882 struct loi_oap_pages *lop;
2883 struct osc_async_page *oap;
2887 oap = oap_from_cookie(cookie);
2889 RETURN(PTR_ERR(oap));
2892 loi = lsm->lsm_oinfo[0];
2894 if (oap->oap_cmd & OBD_BRW_WRITE) {
2895 lop = &loi->loi_write_lop;
2897 lop = &loi->loi_read_lop;
2900 client_obd_list_lock(&cli->cl_loi_list_lock);
2902 if (!list_empty(&oap->oap_rpc_item))
2903 GOTO(out, rc = -EBUSY);
2905 osc_exit_cache(cli, oap, 0);
2906 osc_wake_cache_waiters(cli);
2908 if (!list_empty(&oap->oap_urgent_item)) {
2909 list_del_init(&oap->oap_urgent_item);
2910 oap->oap_async_flags &= ~ASYNC_URGENT;
2912 if (!list_empty(&oap->oap_pending_item)) {
2913 list_del_init(&oap->oap_pending_item);
2914 lop_update_pending(cli, lop, oap->oap_cmd, -1);
2916 loi_list_maint(cli, loi);
2917 cache_remove_extent(cli->cl_cache, oap);
2919 LOI_DEBUG(loi, "oap %p page %p torn down\n", oap, oap->oap_page);
2921 client_obd_list_unlock(&cli->cl_loi_list_lock);
2925 int osc_extent_blocking_cb(struct ldlm_lock *lock,
2926 struct ldlm_lock_desc *new, void *data,
2929 struct lustre_handle lockh = { 0 };
2933 if ((unsigned long)data > 0 && (unsigned long)data < 0x1000) {
2934 LDLM_ERROR(lock, "cancelling lock with bad data %p", data);
2939 case LDLM_CB_BLOCKING:
2940 ldlm_lock2handle(lock, &lockh);
2941 rc = ldlm_cli_cancel(&lockh);
2943 CERROR("ldlm_cli_cancel failed: %d\n", rc);
2945 case LDLM_CB_CANCELING: {
2947 ldlm_lock2handle(lock, &lockh);
2948 /* This lock wasn't granted, don't try to do anything */
2949 if (lock->l_req_mode != lock->l_granted_mode)
2952 cache_remove_lock(lock->l_conn_export->exp_obd->u.cli.cl_cache,
2955 if (lock->l_conn_export->exp_obd->u.cli.cl_ext_lock_cancel_cb)
2956 lock->l_conn_export->exp_obd->u.cli.cl_ext_lock_cancel_cb(
2957 lock, new, data,flag);
2966 EXPORT_SYMBOL(osc_extent_blocking_cb);
2968 static void osc_set_data_with_check(struct lustre_handle *lockh, void *data,
2971 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2974 CERROR("lockh %p, data %p - client evicted?\n", lockh, data);
2977 lock_res_and_lock(lock);
2978 #if defined (__KERNEL__) && defined (__linux__)
2979 /* Liang XXX: Darwin and Winnt checking should be added */
2980 if (lock->l_ast_data && lock->l_ast_data != data) {
2981 struct inode *new_inode = data;
2982 struct inode *old_inode = lock->l_ast_data;
2983 if (!(old_inode->i_state & I_FREEING))
2984 LDLM_ERROR(lock, "inconsistent l_ast_data found");
2985 LASSERTF(old_inode->i_state & I_FREEING,
2986 "Found existing inode %p/%lu/%u state %lu in lock: "
2987 "setting data to %p/%lu/%u\n", old_inode,
2988 old_inode->i_ino, old_inode->i_generation,
2990 new_inode, new_inode->i_ino, new_inode->i_generation);
2993 lock->l_ast_data = data;
2994 lock->l_flags |= (flags & LDLM_FL_NO_LRU);
2995 unlock_res_and_lock(lock);
2996 LDLM_LOCK_PUT(lock);
2999 static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
3000 ldlm_iterator_t replace, void *data)
3002 struct ldlm_res_id res_id = { .name = {0} };
3003 struct obd_device *obd = class_exp2obd(exp);
3005 res_id.name[0] = lsm->lsm_object_id;
3006 res_id.name[2] = lsm->lsm_object_gr;
3008 ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
3012 static int osc_enqueue_fini(struct obd_device *obd, struct ptlrpc_request *req,
3013 struct obd_info *oinfo, int intent, int rc)
3018 /* The request was created before ldlm_cli_enqueue call. */
3019 if (rc == ELDLM_LOCK_ABORTED) {
3020 struct ldlm_reply *rep;
3021 rep = req_capsule_server_get(&req->rq_pill,
3024 LASSERT(rep != NULL);
3025 if (rep->lock_policy_res1)
3026 rc = rep->lock_policy_res1;
3030 if ((intent && rc == ELDLM_LOCK_ABORTED) || !rc) {
3031 CDEBUG(D_INODE,"got kms "LPU64" blocks "LPU64" mtime "LPU64"\n",
3032 oinfo->oi_md->lsm_oinfo[0]->loi_lvb.lvb_size,
3033 oinfo->oi_md->lsm_oinfo[0]->loi_lvb.lvb_blocks,
3034 oinfo->oi_md->lsm_oinfo[0]->loi_lvb.lvb_mtime);
3038 cache_add_lock(obd->u.cli.cl_cache, oinfo->oi_lockh);
3040 /* Call the update callback. */
3041 rc = oinfo->oi_cb_up(oinfo, rc);
3045 static int osc_enqueue_interpret(struct ptlrpc_request *req,
3046 struct osc_enqueue_args *aa, int rc)
3048 int intent = aa->oa_oi->oi_flags & LDLM_FL_HAS_INTENT;
3049 struct lov_stripe_md *lsm = aa->oa_oi->oi_md;
3050 struct ldlm_lock *lock;
3052 /* ldlm_cli_enqueue is holding a reference on the lock, so it must
3054 lock = ldlm_handle2lock(aa->oa_oi->oi_lockh);
3056 /* Complete obtaining the lock procedure. */
3057 rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1,
3059 &aa->oa_oi->oi_flags,
3060 &lsm->lsm_oinfo[0]->loi_lvb,
3061 sizeof(lsm->lsm_oinfo[0]->loi_lvb),
3062 lustre_swab_ost_lvb,
3063 aa->oa_oi->oi_lockh, rc);
3065 /* Complete osc stuff. */
3066 rc = osc_enqueue_fini(aa->oa_exp->exp_obd, req, aa->oa_oi, intent, rc);
3068 /* Release the lock for async request. */
3069 if (lustre_handle_is_used(aa->oa_oi->oi_lockh) && rc == ELDLM_OK)
3070 ldlm_lock_decref(aa->oa_oi->oi_lockh, aa->oa_ei->ei_mode);
3072 LASSERTF(lock != NULL, "lockh %p, req %p, aa %p - client evicted?\n",
3073 aa->oa_oi->oi_lockh, req, aa);
3074 LDLM_LOCK_PUT(lock);
3078 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
3079 * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
3080 * other synchronous requests, however keeping some locks and trying to obtain
3081 * others may take a considerable amount of time in a case of ost failure; and
3082 * when other sync requests do not get released lock from a client, the client
3083 * is excluded from the cluster -- such scenarious make the life difficult, so
3084 * release locks just after they are obtained. */
3085 static int osc_enqueue(struct obd_export *exp, struct obd_info *oinfo,
3086 struct ldlm_enqueue_info *einfo,
3087 struct ptlrpc_request_set *rqset)
3089 struct ldlm_res_id res_id = { .name = {0} };
3090 struct obd_device *obd = exp->exp_obd;
3091 struct ptlrpc_request *req = NULL;
3092 int intent = oinfo->oi_flags & LDLM_FL_HAS_INTENT;
3097 res_id.name[0] = oinfo->oi_md->lsm_object_id;
3098 res_id.name[2] = oinfo->oi_md->lsm_object_gr;
3100 /* Filesystem lock extents are extended to page boundaries so that
3101 * dealing with the page cache is a little smoother. */
3102 oinfo->oi_policy.l_extent.start -=
3103 oinfo->oi_policy.l_extent.start & ~CFS_PAGE_MASK;
3104 oinfo->oi_policy.l_extent.end |= ~CFS_PAGE_MASK;
3106 if (oinfo->oi_md->lsm_oinfo[0]->loi_kms_valid == 0)
3109 /* Next, search for already existing extent locks that will cover us */
3110 /* If we're trying to read, we also search for an existing PW lock. The
3111 * VFS and page cache already protect us locally, so lots of readers/
3112 * writers can share a single PW lock.
3114 * There are problems with conversion deadlocks, so instead of
3115 * converting a read lock to a write lock, we'll just enqueue a new
3118 * At some point we should cancel the read lock instead of making them
3119 * send us a blocking callback, but there are problems with canceling
3120 * locks out from other users right now, too. */
3121 mode = einfo->ei_mode;
3122 if (einfo->ei_mode == LCK_PR)
3124 mode = ldlm_lock_match(obd->obd_namespace,
3125 oinfo->oi_flags | LDLM_FL_LVB_READY, &res_id,
3126 einfo->ei_type, &oinfo->oi_policy, mode,
3129 /* addref the lock only if not async requests and PW lock is
3130 * matched whereas we asked for PR. */
3131 if (!rqset && einfo->ei_mode != mode)
3132 ldlm_lock_addref(oinfo->oi_lockh, LCK_PR);
3133 osc_set_data_with_check(oinfo->oi_lockh, einfo->ei_cbdata,
3136 /* I would like to be able to ASSERT here that rss <=
3137 * kms, but I can't, for reasons which are explained in
3141 /* We already have a lock, and it's referenced */
3142 oinfo->oi_cb_up(oinfo, ELDLM_OK);
3144 /* For async requests, decref the lock. */
3145 if (einfo->ei_mode != mode)
3146 ldlm_lock_decref(oinfo->oi_lockh, LCK_PW);
3148 ldlm_lock_decref(oinfo->oi_lockh, einfo->ei_mode);
3155 CFS_LIST_HEAD(cancels);
3156 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
3157 &RQF_LDLM_ENQUEUE_LVB);
3161 rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0);
3165 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
3166 sizeof(oinfo->oi_md->lsm_oinfo[0]->loi_lvb));
3167 ptlrpc_request_set_replen(req);
3170 /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
3171 oinfo->oi_flags &= ~LDLM_FL_BLOCK_GRANTED;
3173 rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id,
3174 &oinfo->oi_policy, &oinfo->oi_flags,
3175 &oinfo->oi_md->lsm_oinfo[0]->loi_lvb,
3176 sizeof(oinfo->oi_md->lsm_oinfo[0]->loi_lvb),
3177 lustre_swab_ost_lvb, oinfo->oi_lockh,
3181 struct osc_enqueue_args *aa;
3182 CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
3183 aa = (struct osc_enqueue_args *)&req->rq_async_args;
3188 req->rq_interpret_reply = osc_enqueue_interpret;
3189 ptlrpc_set_add_req(rqset, req);
3190 } else if (intent) {
3191 ptlrpc_req_finished(req);
3196 rc = osc_enqueue_fini(obd, req, oinfo, intent, rc);
3198 ptlrpc_req_finished(req);
3203 static int osc_match(struct obd_export *exp, struct lov_stripe_md *lsm,
3204 __u32 type, ldlm_policy_data_t *policy, __u32 mode,
3205 int *flags, void *data, struct lustre_handle *lockh)
3207 struct ldlm_res_id res_id = { .name = {0} };
3208 struct obd_device *obd = exp->exp_obd;
3209 int lflags = *flags;
3213 res_id.name[0] = lsm->lsm_object_id;
3214 res_id.name[2] = lsm->lsm_object_gr;
3216 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
3219 /* Filesystem lock extents are extended to page boundaries so that
3220 * dealing with the page cache is a little smoother */
3221 policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
3222 policy->l_extent.end |= ~CFS_PAGE_MASK;
3224 /* Next, search for already existing extent locks that will cover us */
3225 /* If we're trying to read, we also search for an existing PW lock. The
3226 * VFS and page cache already protect us locally, so lots of readers/
3227 * writers can share a single PW lock. */
3231 rc = ldlm_lock_match(obd->obd_namespace, lflags | LDLM_FL_LVB_READY,
3232 &res_id, type, policy, rc, lockh);
3234 osc_set_data_with_check(lockh, data, lflags);
3235 if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) {
3236 ldlm_lock_addref(lockh, LCK_PR);
3237 ldlm_lock_decref(lockh, LCK_PW);
3244 static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
3245 __u32 mode, struct lustre_handle *lockh)
3249 if (unlikely(mode == LCK_GROUP))
3250 ldlm_lock_decref_and_cancel(lockh, mode);
3252 ldlm_lock_decref(lockh, mode);
3257 static int osc_cancel_unused(struct obd_export *exp,
3258 struct lov_stripe_md *lsm, int flags,
3261 struct obd_device *obd = class_exp2obd(exp);
3262 struct ldlm_res_id res_id = { .name = {0} }, *resp = NULL;
3265 res_id.name[0] = lsm->lsm_object_id;
3266 res_id.name[2] = lsm->lsm_object_gr;
3270 return ldlm_cli_cancel_unused(obd->obd_namespace, resp, flags, opaque);
3273 static int osc_join_lru(struct obd_export *exp,
3274 struct lov_stripe_md *lsm, int join)
3276 struct obd_device *obd = class_exp2obd(exp);
3277 struct ldlm_res_id res_id = { .name = {0} }, *resp = NULL;
3280 res_id.name[0] = lsm->lsm_object_id;
3281 res_id.name[2] = lsm->lsm_object_gr;
3285 return ldlm_cli_join_lru(obd->obd_namespace, resp, join);
3288 static int osc_statfs_interpret(struct ptlrpc_request *req,
3289 struct osc_async_args *aa, int rc)
3291 struct obd_statfs *msfs;
3297 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3299 GOTO(out, rc = -EPROTO);
3302 *aa->aa_oi->oi_osfs = *msfs;
3304 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
3308 static int osc_statfs_async(struct obd_device *obd, struct obd_info *oinfo,
3309 __u64 max_age, struct ptlrpc_request_set *rqset)
3311 struct ptlrpc_request *req;
3312 struct osc_async_args *aa;
3316 /* We could possibly pass max_age in the request (as an absolute
3317 * timestamp or a "seconds.usec ago") so the target can avoid doing
3318 * extra calls into the filesystem if that isn't necessary (e.g.
3319 * during mount that would help a bit). Having relative timestamps
3320 * is not so great if request processing is slow, while absolute
3321 * timestamps are not ideal because they need time synchronization. */
3322 req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
3326 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3328 ptlrpc_request_free(req);
3331 ptlrpc_request_set_replen(req);
3332 req->rq_request_portal = OST_CREATE_PORTAL;
3333 ptlrpc_at_set_req_timeout(req);
3335 if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
3336 /* procfs requests not want stat in wait for avoid deadlock */
3337 req->rq_no_resend = 1;
3338 req->rq_no_delay = 1;
3341 req->rq_interpret_reply = osc_statfs_interpret;
3342 CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
3343 aa = (struct osc_async_args *)&req->rq_async_args;
3346 ptlrpc_set_add_req(rqset, req);
3350 static int osc_statfs(struct obd_device *obd, struct obd_statfs *osfs,
3351 __u64 max_age, __u32 flags)
3353 struct obd_statfs *msfs;
3354 struct ptlrpc_request *req;
3355 struct obd_import *imp = NULL;
3359 /*Since the request might also come from lprocfs, so we need
3360 *sync this with client_disconnect_export Bug15684*/
3361 down_read(&obd->u.cli.cl_sem);
3362 if (obd->u.cli.cl_import)
3363 imp = class_import_get(obd->u.cli.cl_import);
3364 up_read(&obd->u.cli.cl_sem);
3368 /* We could possibly pass max_age in the request (as an absolute
3369 * timestamp or a "seconds.usec ago") so the target can avoid doing
3370 * extra calls into the filesystem if that isn't necessary (e.g.
3371 * during mount that would help a bit). Having relative timestamps
3372 * is not so great if request processing is slow, while absolute
3373 * timestamps are not ideal because they need time synchronization. */
3374 req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
3376 class_import_put(imp);
3381 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3383 ptlrpc_request_free(req);
3386 ptlrpc_request_set_replen(req);
3387 req->rq_request_portal = OST_CREATE_PORTAL;
3388 ptlrpc_at_set_req_timeout(req);
3390 if (flags & OBD_STATFS_NODELAY) {
3391 /* procfs requests not want stat in wait for avoid deadlock */
3392 req->rq_no_resend = 1;
3393 req->rq_no_delay = 1;
3396 rc = ptlrpc_queue_wait(req);
3400 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3402 GOTO(out, rc = -EPROTO);
3409 ptlrpc_req_finished(req);
3413 /* Retrieve object striping information.
3415 * @lmmu is a pointer to an in-core struct with lmm_ost_count indicating
3416 * the maximum number of OST indices which will fit in the user buffer.
3417 * lmm_magic must be LOV_MAGIC (we only use 1 slot here).
3419 static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
3421 struct lov_user_md lum, *lumk;
3422 int rc = 0, lum_size;
3428 if (copy_from_user(&lum, lump, sizeof(lum)))
3431 if (lum.lmm_magic != LOV_USER_MAGIC)
3434 if (lum.lmm_stripe_count > 0) {
3435 lum_size = sizeof(lum) + sizeof(lum.lmm_objects[0]);
3436 OBD_ALLOC(lumk, lum_size);
3440 lumk->lmm_objects[0].l_object_id = lsm->lsm_object_id;
3441 lumk->lmm_objects[0].l_object_gr = lsm->lsm_object_gr;
3443 lum_size = sizeof(lum);
3447 lumk->lmm_object_id = lsm->lsm_object_id;
3448 lumk->lmm_object_gr = lsm->lsm_object_gr;
3449 lumk->lmm_stripe_count = 1;
3451 if (copy_to_user(lump, lumk, lum_size))
3455 OBD_FREE(lumk, lum_size);
3461 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
3462 void *karg, void *uarg)
3464 struct obd_device *obd = exp->exp_obd;
3465 struct obd_ioctl_data *data = karg;
3469 if (!try_module_get(THIS_MODULE)) {
3470 CERROR("Can't get module. Is it alive?");
3474 case OBD_IOC_LOV_GET_CONFIG: {
3476 struct lov_desc *desc;
3477 struct obd_uuid uuid;
3481 if (obd_ioctl_getdata(&buf, &len, (void *)uarg))
3482 GOTO(out, err = -EINVAL);
3484 data = (struct obd_ioctl_data *)buf;
3486 if (sizeof(*desc) > data->ioc_inllen1) {
3487 obd_ioctl_freedata(buf, len);
3488 GOTO(out, err = -EINVAL);
3491 if (data->ioc_inllen2 < sizeof(uuid)) {
3492 obd_ioctl_freedata(buf, len);
3493 GOTO(out, err = -EINVAL);
3496 desc = (struct lov_desc *)data->ioc_inlbuf1;
3497 desc->ld_tgt_count = 1;
3498 desc->ld_active_tgt_count = 1;
3499 desc->ld_default_stripe_count = 1;
3500 desc->ld_default_stripe_size = 0;
3501 desc->ld_default_stripe_offset = 0;
3502 desc->ld_pattern = 0;
3503 memcpy(&desc->ld_uuid, &obd->obd_uuid, sizeof(uuid));
3505 memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
3507 err = copy_to_user((void *)uarg, buf, len);
3510 obd_ioctl_freedata(buf, len);
3513 case LL_IOC_LOV_SETSTRIPE:
3514 err = obd_alloc_memmd(exp, karg);
3518 case LL_IOC_LOV_GETSTRIPE:
3519 err = osc_getstripe(karg, uarg);
3521 case OBD_IOC_CLIENT_RECOVER:
3522 err = ptlrpc_recover_import(obd->u.cli.cl_import,
3527 case IOC_OSC_SET_ACTIVE:
3528 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
3531 case OBD_IOC_POLL_QUOTACHECK:
3532 err = lquota_poll_check(quota_interface, exp,
3533 (struct if_quotacheck *)karg);
3536 CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n",
3537 cmd, cfs_curproc_comm());
3538 GOTO(out, err = -ENOTTY);
3541 module_put(THIS_MODULE);
3545 static int osc_get_info(struct obd_export *exp, obd_count keylen,
3546 void *key, __u32 *vallen, void *val)
3549 if (!vallen || !val)
3552 if (KEY_IS(KEY_LOCK_TO_STRIPE)) {
3553 __u32 *stripe = val;
3554 *vallen = sizeof(*stripe);
3557 } else if (KEY_IS(KEY_LAST_ID)) {
3558 struct ptlrpc_request *req;
3563 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
3564 &RQF_OST_GET_INFO_LAST_ID);
3568 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
3569 RCL_CLIENT, keylen);
3570 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
3572 ptlrpc_request_free(req);
3576 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
3577 memcpy(tmp, key, keylen);
3579 ptlrpc_request_set_replen(req);
3580 rc = ptlrpc_queue_wait(req);
3584 reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID);
3586 GOTO(out, rc = -EPROTO);
3588 *((obd_id *)val) = *reply;
3590 ptlrpc_req_finished(req);
3596 static int osc_setinfo_mds_conn_interpret(struct ptlrpc_request *req,
3599 struct llog_ctxt *ctxt;
3600 struct obd_import *imp = req->rq_import;
3606 ctxt = llog_get_context(imp->imp_obd, LLOG_MDS_OST_ORIG_CTXT);
3609 rc = llog_initiator_connect(ctxt);
3611 CERROR("cannot establish connection for "
3612 "ctxt %p: %d\n", ctxt, rc);
3615 llog_ctxt_put(ctxt);
3616 spin_lock(&imp->imp_lock);
3617 imp->imp_server_timeout = 1;
3618 imp->imp_pingable = 1;
3619 spin_unlock(&imp->imp_lock);
3620 CDEBUG(D_RPCTRACE, "pinging OST %s\n", obd2cli_tgt(imp->imp_obd));
3625 static int osc_set_info_async(struct obd_export *exp, obd_count keylen,
3626 void *key, obd_count vallen, void *val,
3627 struct ptlrpc_request_set *set)
3629 struct ptlrpc_request *req;
3630 struct obd_device *obd = exp->exp_obd;
3631 struct obd_import *imp = class_exp2cliimp(exp);
3636 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
3638 if (KEY_IS(KEY_NEXT_ID)) {
3639 if (vallen != sizeof(obd_id))
3643 obd->u.cli.cl_oscc.oscc_next_id = *((obd_id*)val) + 1;
3644 CDEBUG(D_HA, "%s: set oscc_next_id = "LPU64"\n",
3645 exp->exp_obd->obd_name,
3646 obd->u.cli.cl_oscc.oscc_next_id);
3651 if (KEY_IS(KEY_UNLINKED)) {
3652 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3653 spin_lock(&oscc->oscc_lock);
3654 oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
3655 spin_unlock(&oscc->oscc_lock);
3659 if (KEY_IS(KEY_INIT_RECOV)) {
3660 if (vallen != sizeof(int))
3662 spin_lock(&imp->imp_lock);
3663 imp->imp_initial_recov = *(int *)val;
3664 spin_unlock(&imp->imp_lock);
3665 CDEBUG(D_HA, "%s: set imp_initial_recov = %d\n",
3666 exp->exp_obd->obd_name,
3667 imp->imp_initial_recov);
3671 if (KEY_IS(KEY_CHECKSUM)) {
3672 if (vallen != sizeof(int))
3674 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
3678 if (KEY_IS(KEY_FLUSH_CTX)) {
3679 sptlrpc_import_flush_my_ctx(imp);
3686 /* We pass all other commands directly to OST. Since nobody calls osc
3687 methods directly and everybody is supposed to go through LOV, we
3688 assume lov checked invalid values for us.
3689 The only recognised values so far are evict_by_nid and mds_conn.
3690 Even if something bad goes through, we'd get a -EINVAL from OST
3694 req = ptlrpc_request_alloc(imp, &RQF_OST_SET_INFO);
3698 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
3699 RCL_CLIENT, keylen);
3700 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
3701 RCL_CLIENT, vallen);
3702 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
3704 ptlrpc_request_free(req);
3708 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
3709 memcpy(tmp, key, keylen);
3710 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
3711 memcpy(tmp, val, vallen);
3713 if (KEY_IS(KEY_MDS_CONN)) {
3714 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3716 oscc->oscc_oa.o_gr = (*(__u32 *)val);
3717 oscc->oscc_oa.o_valid |= OBD_MD_FLGROUP;
3718 LASSERT(oscc->oscc_oa.o_gr > 0);
3719 req->rq_interpret_reply = osc_setinfo_mds_conn_interpret;
3722 ptlrpc_request_set_replen(req);
3723 ptlrpc_set_add_req(set, req);
3724 ptlrpc_check_set(set);
3730 static struct llog_operations osc_size_repl_logops = {
3731 lop_cancel: llog_obd_repl_cancel
3734 static struct llog_operations osc_mds_ost_orig_logops;
3735 static int osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
3736 struct obd_device *tgt, int count,
3737 struct llog_catid *catid, struct obd_uuid *uuid)
3742 LASSERT(olg == &obd->obd_olg);
3743 spin_lock(&obd->obd_dev_lock);
3744 if (osc_mds_ost_orig_logops.lop_setup != llog_obd_origin_setup) {
3745 osc_mds_ost_orig_logops = llog_lvfs_ops;
3746 osc_mds_ost_orig_logops.lop_setup = llog_obd_origin_setup;
3747 osc_mds_ost_orig_logops.lop_cleanup = llog_obd_origin_cleanup;
3748 osc_mds_ost_orig_logops.lop_add = llog_obd_origin_add;
3749 osc_mds_ost_orig_logops.lop_connect = llog_origin_connect;
3751 spin_unlock(&obd->obd_dev_lock);
3753 rc = llog_setup(obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, tgt, count,
3754 &catid->lci_logid, &osc_mds_ost_orig_logops);
3756 CERROR("failed LLOG_MDS_OST_ORIG_CTXT\n");
3760 rc = llog_setup(obd, &obd->obd_olg, LLOG_SIZE_REPL_CTXT, tgt, count,
3761 NULL, &osc_size_repl_logops);
3763 CERROR("failed LLOG_SIZE_REPL_CTXT\n");
3766 CERROR("osc '%s' tgt '%s' cnt %d catid %p rc=%d\n",
3767 obd->obd_name, tgt->obd_name, count, catid, rc);
3768 CERROR("logid "LPX64":0x%x\n",
3769 catid->lci_logid.lgl_oid, catid->lci_logid.lgl_ogen);
3774 static int osc_llog_finish(struct obd_device *obd, int count)
3776 struct llog_ctxt *ctxt;
3777 int rc = 0, rc2 = 0;
3780 ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
3782 rc = llog_cleanup(ctxt);
3784 ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
3786 rc2 = llog_cleanup(ctxt);
3793 static int osc_reconnect(const struct lu_env *env,
3794 struct obd_export *exp, struct obd_device *obd,
3795 struct obd_uuid *cluuid,
3796 struct obd_connect_data *data)
3798 struct client_obd *cli = &obd->u.cli;
3800 if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
3803 client_obd_list_lock(&cli->cl_loi_list_lock);
3804 data->ocd_grant = cli->cl_avail_grant ?:
3805 2 * cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT;
3806 lost_grant = cli->cl_lost_grant;
3807 cli->cl_lost_grant = 0;
3808 client_obd_list_unlock(&cli->cl_loi_list_lock);
3810 CDEBUG(D_CACHE, "request ocd_grant: %d cl_avail_grant: %ld "
3811 "cl_lost_grant: %ld\n", data->ocd_grant,
3812 cli->cl_avail_grant, lost_grant);
3813 CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d"
3814 " ocd_grant: %d\n", data->ocd_connect_flags,
3815 data->ocd_version, data->ocd_grant);
3821 static int osc_disconnect(struct obd_export *exp)
3823 struct obd_device *obd = class_exp2obd(exp);
3824 struct llog_ctxt *ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
3827 if (obd->u.cli.cl_conn_count == 1)
3828 /* flush any remaining cancel messages out to the target */
3829 llog_sync(ctxt, exp);
3831 llog_ctxt_put(ctxt);
3833 rc = client_disconnect_export(exp);
3837 static int osc_import_event(struct obd_device *obd,
3838 struct obd_import *imp,
3839 enum obd_import_event event)
3841 struct client_obd *cli;
3845 LASSERT(imp->imp_obd == obd);
3848 case IMP_EVENT_DISCON: {
3849 /* Only do this on the MDS OSC's */
3850 if (imp->imp_server_timeout) {
3851 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3853 spin_lock(&oscc->oscc_lock);
3854 oscc->oscc_flags |= OSCC_FLAG_RECOVERING;
3855 spin_unlock(&oscc->oscc_lock);
3858 client_obd_list_lock(&cli->cl_loi_list_lock);
3859 cli->cl_avail_grant = 0;
3860 cli->cl_lost_grant = 0;
3861 client_obd_list_unlock(&cli->cl_loi_list_lock);
3864 case IMP_EVENT_INACTIVE: {
3865 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
3868 case IMP_EVENT_INVALIDATE: {
3869 struct ldlm_namespace *ns = obd->obd_namespace;
3873 client_obd_list_lock(&cli->cl_loi_list_lock);
3874 /* all pages go to failing rpcs due to the invalid import */
3875 osc_check_rpcs(cli);
3876 client_obd_list_unlock(&cli->cl_loi_list_lock);
3878 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3882 case IMP_EVENT_ACTIVE: {
3883 /* Only do this on the MDS OSC's */
3884 if (imp->imp_server_timeout) {
3885 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
3887 spin_lock(&oscc->oscc_lock);
3888 oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
3889 spin_unlock(&oscc->oscc_lock);
3891 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
3894 case IMP_EVENT_OCD: {
3895 struct obd_connect_data *ocd = &imp->imp_connect_data;
3897 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
3898 osc_init_grant(&obd->u.cli, ocd);
3901 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
3902 imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
3904 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
3908 CERROR("Unknown import event %d\n", event);
3914 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
3920 rc = ptlrpcd_addref();
3924 rc = client_obd_setup(obd, lcfg);
3928 struct lprocfs_static_vars lvars = { 0 };
3929 struct client_obd *cli = &obd->u.cli;
3931 lprocfs_osc_init_vars(&lvars);
3932 if (lprocfs_obd_setup(obd, lvars.obd_vars) == 0) {
3933 lproc_osc_attach_seqstat(obd);
3934 sptlrpc_lprocfs_cliobd_attach(obd);
3935 ptlrpc_lprocfs_register_obd(obd);
3939 /* We need to allocate a few requests more, because
3940 brw_interpret tries to create new requests before freeing
3941 previous ones. Ideally we want to have 2x max_rpcs_in_flight
3942 reserved, but I afraid that might be too much wasted RAM
3943 in fact, so 2 is just my guess and still should work. */
3944 cli->cl_import->imp_rq_pool =
3945 ptlrpc_init_rq_pool(cli->cl_max_rpcs_in_flight + 2,
3947 ptlrpc_add_rqs_to_pool);
3948 cli->cl_cache = cache_create(obd);
3949 if (!cli->cl_cache) {
3958 static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
3964 case OBD_CLEANUP_EARLY: {
3965 struct obd_import *imp;
3966 imp = obd->u.cli.cl_import;
3967 CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
3968 /* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
3969 ptlrpc_deactivate_import(imp);
3970 spin_lock(&imp->imp_lock);
3971 imp->imp_pingable = 0;
3972 spin_unlock(&imp->imp_lock);
3975 case OBD_CLEANUP_EXPORTS: {
3976 /* If we set up but never connected, the
3977 client import will not have been cleaned. */
3978 if (obd->u.cli.cl_import) {
3979 struct obd_import *imp;
3980 imp = obd->u.cli.cl_import;
3981 CDEBUG(D_CONFIG, "%s: client import never connected\n",
3983 ptlrpc_invalidate_import(imp);
3984 ptlrpc_free_rq_pool(imp->imp_rq_pool);
3985 class_destroy_import(imp);
3986 obd->u.cli.cl_import = NULL;
3988 rc = obd_llog_finish(obd, 0);
3990 CERROR("failed to cleanup llogging subsystems\n");
3993 case OBD_CLEANUP_SELF_EXP:
3995 case OBD_CLEANUP_OBD:
4001 int osc_cleanup(struct obd_device *obd)
4003 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
4007 ptlrpc_lprocfs_unregister_obd(obd);
4008 lprocfs_obd_cleanup(obd);
4010 spin_lock(&oscc->oscc_lock);
4011 oscc->oscc_flags &= ~OSCC_FLAG_RECOVERING;
4012 oscc->oscc_flags |= OSCC_FLAG_EXITING;
4013 spin_unlock(&oscc->oscc_lock);
4015 /* free memory of osc quota cache */
4016 lquota_cleanup(quota_interface, obd);
4018 cache_destroy(obd->u.cli.cl_cache);
4019 rc = client_obd_cleanup(obd);
4025 static int osc_register_page_removal_cb(struct obd_export *exp,
4026 obd_page_removal_cb_t func,
4027 obd_pin_extent_cb pin_cb)
4029 return cache_add_extent_removal_cb(exp->exp_obd->u.cli.cl_cache, func,
4033 static int osc_unregister_page_removal_cb(struct obd_export *exp,
4034 obd_page_removal_cb_t func)
4036 return cache_del_extent_removal_cb(exp->exp_obd->u.cli.cl_cache, func);
4039 static int osc_register_lock_cancel_cb(struct obd_export *exp,
4040 obd_lock_cancel_cb cb)
4042 LASSERT(exp->exp_obd->u.cli.cl_ext_lock_cancel_cb == NULL);
4044 exp->exp_obd->u.cli.cl_ext_lock_cancel_cb = cb;
4048 static int osc_unregister_lock_cancel_cb(struct obd_export *exp,
4049 obd_lock_cancel_cb cb)
4051 if (exp->exp_obd->u.cli.cl_ext_lock_cancel_cb != cb) {
4052 CERROR("Unregistering cancel cb %p, while only %p was "
4054 exp->exp_obd->u.cli.cl_ext_lock_cancel_cb);
4058 exp->exp_obd->u.cli.cl_ext_lock_cancel_cb = NULL;
4062 static int osc_process_config(struct obd_device *obd, obd_count len, void *buf)
4064 struct lustre_cfg *lcfg = buf;
4065 struct lprocfs_static_vars lvars = { 0 };
4068 lprocfs_osc_init_vars(&lvars);
4070 switch (lcfg->lcfg_command) {
4071 case LCFG_SPTLRPC_CONF:
4072 rc = sptlrpc_cliobd_process_config(obd, lcfg);
4075 rc = class_process_proc_param(PARAM_OSC, lvars.obd_vars,
4083 struct obd_ops osc_obd_ops = {
4084 .o_owner = THIS_MODULE,
4085 .o_setup = osc_setup,
4086 .o_precleanup = osc_precleanup,
4087 .o_cleanup = osc_cleanup,
4088 .o_add_conn = client_import_add_conn,
4089 .o_del_conn = client_import_del_conn,
4090 .o_connect = client_connect_import,
4091 .o_reconnect = osc_reconnect,
4092 .o_disconnect = osc_disconnect,
4093 .o_statfs = osc_statfs,
4094 .o_statfs_async = osc_statfs_async,
4095 .o_packmd = osc_packmd,
4096 .o_unpackmd = osc_unpackmd,
4097 .o_precreate = osc_precreate,
4098 .o_create = osc_create,
4099 .o_destroy = osc_destroy,
4100 .o_getattr = osc_getattr,
4101 .o_getattr_async = osc_getattr_async,
4102 .o_setattr = osc_setattr,
4103 .o_setattr_async = osc_setattr_async,
4105 .o_brw_async = osc_brw_async,
4106 .o_prep_async_page = osc_prep_async_page,
4107 .o_reget_short_lock = osc_reget_short_lock,
4108 .o_release_short_lock = osc_release_short_lock,
4109 .o_queue_async_io = osc_queue_async_io,
4110 .o_set_async_flags = osc_set_async_flags,
4111 .o_queue_group_io = osc_queue_group_io,
4112 .o_trigger_group_io = osc_trigger_group_io,
4113 .o_teardown_async_page = osc_teardown_async_page,
4114 .o_punch = osc_punch,
4116 .o_enqueue = osc_enqueue,
4117 .o_match = osc_match,
4118 .o_change_cbdata = osc_change_cbdata,
4119 .o_cancel = osc_cancel,
4120 .o_cancel_unused = osc_cancel_unused,
4121 .o_join_lru = osc_join_lru,
4122 .o_iocontrol = osc_iocontrol,
4123 .o_get_info = osc_get_info,
4124 .o_set_info_async = osc_set_info_async,
4125 .o_import_event = osc_import_event,
4126 .o_llog_init = osc_llog_init,
4127 .o_llog_finish = osc_llog_finish,
4128 .o_process_config = osc_process_config,
4129 .o_register_page_removal_cb = osc_register_page_removal_cb,
4130 .o_unregister_page_removal_cb = osc_unregister_page_removal_cb,
4131 .o_register_lock_cancel_cb = osc_register_lock_cancel_cb,
4132 .o_unregister_lock_cancel_cb = osc_unregister_lock_cancel_cb,
4134 int __init osc_init(void)
4136 struct lprocfs_static_vars lvars = { 0 };
4140 lprocfs_osc_init_vars(&lvars);
4142 request_module("lquota");
4143 quota_interface = PORTAL_SYMBOL_GET(osc_quota_interface);
4144 lquota_init(quota_interface);
4145 init_obd_quota_ops(quota_interface, &osc_obd_ops);
4147 rc = class_register_type(&osc_obd_ops, NULL, lvars.module_vars,
4148 LUSTRE_OSC_NAME, NULL);
4150 if (quota_interface)
4151 PORTAL_SYMBOL_PUT(osc_quota_interface);
4159 static void /*__exit*/ osc_exit(void)
4161 lquota_exit(quota_interface);
4162 if (quota_interface)
4163 PORTAL_SYMBOL_PUT(osc_quota_interface);
4165 class_unregister_type(LUSTRE_OSC_NAME);
4168 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
4169 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
4170 MODULE_LICENSE("GPL");
4172 cfs_module(osc, LUSTRE_VERSION_STRING, osc_init, osc_exit);