4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 #define DEBUG_SUBSYSTEM S_OSC
34 #include <linux/workqueue.h>
35 #include <libcfs/libcfs.h>
36 #include <linux/falloc.h>
37 #include <lprocfs_status.h>
38 #include <lustre_dlm.h>
39 #include <lustre_fid.h>
40 #include <lustre_ha.h>
41 #include <uapi/linux/lustre/lustre_ioctl.h>
42 #include <lustre_net.h>
43 #include <lustre_obdo.h>
45 #include <obd_cksum.h>
46 #include <obd_class.h>
47 #include <lustre_osc.h>
48 #include <linux/falloc.h>
50 #include "osc_internal.h"
51 #include <lnet/lnet_rdma.h>
53 atomic_t osc_pool_req_count;
54 unsigned int osc_reqpool_maxreqcount;
55 struct ptlrpc_request_pool *osc_rq_pool;
57 /* max memory used for request pool, unit is MB */
58 static unsigned int osc_reqpool_mem_max = 5;
59 module_param(osc_reqpool_mem_max, uint, 0444);
61 static int osc_idle_timeout = 20;
62 module_param(osc_idle_timeout, uint, 0644);
64 #define osc_grant_args osc_brw_async_args
66 struct osc_setattr_args {
68 obd_enqueue_update_f sa_upcall;
72 struct osc_fsync_args {
73 struct osc_object *fa_obj;
75 obd_enqueue_update_f fa_upcall;
79 struct osc_ladvise_args {
81 obd_enqueue_update_f la_upcall;
85 static void osc_release_ppga(struct brw_page **ppga, size_t count);
86 static int brw_interpret(const struct lu_env *env, struct ptlrpc_request *req,
89 void osc_pack_req_body(struct ptlrpc_request *req, struct obdo *oa)
91 struct ost_body *body;
93 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
96 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
99 static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
102 struct ptlrpc_request *req;
103 struct ost_body *body;
107 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
111 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
113 ptlrpc_request_free(req);
117 osc_pack_req_body(req, oa);
119 ptlrpc_request_set_replen(req);
121 rc = ptlrpc_queue_wait(req);
125 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
127 GOTO(out, rc = -EPROTO);
129 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
130 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
132 oa->o_blksize = cli_brw_size(exp->exp_obd);
133 oa->o_valid |= OBD_MD_FLBLKSZ;
137 ptlrpc_req_finished(req);
142 static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
145 struct ptlrpc_request *req;
146 struct ost_body *body;
150 LASSERT(oa->o_valid & OBD_MD_FLGROUP);
152 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
156 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
158 ptlrpc_request_free(req);
162 osc_pack_req_body(req, oa);
164 ptlrpc_request_set_replen(req);
166 rc = ptlrpc_queue_wait(req);
170 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
172 GOTO(out, rc = -EPROTO);
174 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
178 ptlrpc_req_finished(req);
183 static int osc_setattr_interpret(const struct lu_env *env,
184 struct ptlrpc_request *req, void *args, int rc)
186 struct osc_setattr_args *sa = args;
187 struct ost_body *body;
194 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
196 GOTO(out, rc = -EPROTO);
198 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa,
201 rc = sa->sa_upcall(sa->sa_cookie, rc);
205 int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
206 obd_enqueue_update_f upcall, void *cookie,
207 struct ptlrpc_request_set *rqset)
209 struct ptlrpc_request *req;
210 struct osc_setattr_args *sa;
215 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
219 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
221 ptlrpc_request_free(req);
225 osc_pack_req_body(req, oa);
227 ptlrpc_request_set_replen(req);
229 /* do mds to ost setattr asynchronously */
231 /* Do not wait for response. */
232 ptlrpcd_add_req(req);
234 req->rq_interpret_reply = osc_setattr_interpret;
236 sa = ptlrpc_req_async_args(sa, req);
238 sa->sa_upcall = upcall;
239 sa->sa_cookie = cookie;
241 ptlrpc_set_add_req(rqset, req);
247 static int osc_ladvise_interpret(const struct lu_env *env,
248 struct ptlrpc_request *req,
251 struct osc_ladvise_args *la = arg;
252 struct ost_body *body;
258 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
260 GOTO(out, rc = -EPROTO);
262 *la->la_oa = body->oa;
264 rc = la->la_upcall(la->la_cookie, rc);
269 * If rqset is NULL, do not wait for response. Upcall and cookie could also
270 * be NULL in this case
272 int osc_ladvise_base(struct obd_export *exp, struct obdo *oa,
273 struct ladvise_hdr *ladvise_hdr,
274 obd_enqueue_update_f upcall, void *cookie,
275 struct ptlrpc_request_set *rqset)
277 struct ptlrpc_request *req;
278 struct ost_body *body;
279 struct osc_ladvise_args *la;
281 struct lu_ladvise *req_ladvise;
282 struct lu_ladvise *ladvise = ladvise_hdr->lah_advise;
283 int num_advise = ladvise_hdr->lah_count;
284 struct ladvise_hdr *req_ladvise_hdr;
287 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_LADVISE);
291 req_capsule_set_size(&req->rq_pill, &RMF_OST_LADVISE, RCL_CLIENT,
292 num_advise * sizeof(*ladvise));
293 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_LADVISE);
295 ptlrpc_request_free(req);
298 req->rq_request_portal = OST_IO_PORTAL;
299 ptlrpc_at_set_req_timeout(req);
301 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
303 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
306 req_ladvise_hdr = req_capsule_client_get(&req->rq_pill,
307 &RMF_OST_LADVISE_HDR);
308 memcpy(req_ladvise_hdr, ladvise_hdr, sizeof(*ladvise_hdr));
310 req_ladvise = req_capsule_client_get(&req->rq_pill, &RMF_OST_LADVISE);
311 memcpy(req_ladvise, ladvise, sizeof(*ladvise) * num_advise);
312 ptlrpc_request_set_replen(req);
315 /* Do not wait for response. */
316 ptlrpcd_add_req(req);
320 req->rq_interpret_reply = osc_ladvise_interpret;
321 la = ptlrpc_req_async_args(la, req);
323 la->la_upcall = upcall;
324 la->la_cookie = cookie;
326 ptlrpc_set_add_req(rqset, req);
331 static int osc_create(const struct lu_env *env, struct obd_export *exp,
334 struct ptlrpc_request *req;
335 struct ost_body *body;
340 LASSERT(oa->o_valid & OBD_MD_FLGROUP);
341 LASSERT(fid_seq_is_echo(ostid_seq(&oa->o_oi)));
343 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
345 GOTO(out, rc = -ENOMEM);
347 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
349 ptlrpc_request_free(req);
353 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
356 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
358 ptlrpc_request_set_replen(req);
360 rc = ptlrpc_queue_wait(req);
364 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
366 GOTO(out_req, rc = -EPROTO);
368 CDEBUG(D_INFO, "oa flags %x\n", oa->o_flags);
369 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
371 oa->o_blksize = cli_brw_size(exp->exp_obd);
372 oa->o_valid |= OBD_MD_FLBLKSZ;
374 CDEBUG(D_HA, "transno: %lld\n",
375 lustre_msg_get_transno(req->rq_repmsg));
377 ptlrpc_req_finished(req);
382 int osc_punch_send(struct obd_export *exp, struct obdo *oa,
383 obd_enqueue_update_f upcall, void *cookie)
385 struct ptlrpc_request *req;
386 struct osc_setattr_args *sa;
387 struct obd_import *imp = class_exp2cliimp(exp);
388 struct ost_body *body;
393 req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
397 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
399 ptlrpc_request_free(req);
403 osc_set_io_portal(req);
405 ptlrpc_at_set_req_timeout(req);
407 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
409 lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
411 ptlrpc_request_set_replen(req);
413 req->rq_interpret_reply = osc_setattr_interpret;
414 sa = ptlrpc_req_async_args(sa, req);
416 sa->sa_upcall = upcall;
417 sa->sa_cookie = cookie;
419 ptlrpcd_add_req(req);
423 EXPORT_SYMBOL(osc_punch_send);
426 * osc_fallocate_base() - Handles fallocate request.
428 * @exp: Export structure
429 * @oa: Attributes passed to OSS from client (obdo structure)
430 * @upcall: Primary & supplementary group information
431 * @cookie: Exclusive identifier
432 * @rqset: Request list.
433 * @mode: Operation done on given range.
435 * osc_fallocate_base() - Handles fallocate requests only. Only block
436 * allocation or standard preallocate operation is supported currently.
437 * Other mode flags is not supported yet. ftruncate(2) or truncate(2)
438 * is supported via SETATTR request.
440 * Return: Non-zero on failure and O on success.
442 int osc_fallocate_base(struct obd_export *exp, struct obdo *oa,
443 obd_enqueue_update_f upcall, void *cookie, int mode)
445 struct ptlrpc_request *req;
446 struct osc_setattr_args *sa;
447 struct ost_body *body;
448 struct obd_import *imp = class_exp2cliimp(exp);
452 oa->o_falloc_mode = mode;
453 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
458 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_FALLOCATE);
460 ptlrpc_request_free(req);
464 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
467 lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
469 ptlrpc_request_set_replen(req);
471 req->rq_interpret_reply = osc_setattr_interpret;
472 BUILD_BUG_ON(sizeof(*sa) > sizeof(req->rq_async_args));
473 sa = ptlrpc_req_async_args(sa, req);
475 sa->sa_upcall = upcall;
476 sa->sa_cookie = cookie;
478 ptlrpcd_add_req(req);
482 EXPORT_SYMBOL(osc_fallocate_base);
484 static int osc_sync_interpret(const struct lu_env *env,
485 struct ptlrpc_request *req, void *args, int rc)
487 struct osc_fsync_args *fa = args;
488 struct ost_body *body;
489 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
490 unsigned long valid = 0;
491 struct cl_object *obj;
497 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
499 CERROR("can't unpack ost_body\n");
500 GOTO(out, rc = -EPROTO);
503 *fa->fa_oa = body->oa;
504 obj = osc2cl(fa->fa_obj);
506 /* Update osc object's blocks attribute */
507 cl_object_attr_lock(obj);
508 if (body->oa.o_valid & OBD_MD_FLBLOCKS) {
509 attr->cat_blocks = body->oa.o_blocks;
514 cl_object_attr_update(env, obj, attr, valid);
515 cl_object_attr_unlock(obj);
518 rc = fa->fa_upcall(fa->fa_cookie, rc);
522 int osc_sync_base(struct osc_object *obj, struct obdo *oa,
523 obd_enqueue_update_f upcall, void *cookie,
524 struct ptlrpc_request_set *rqset)
526 struct obd_export *exp = osc_export(obj);
527 struct ptlrpc_request *req;
528 struct ost_body *body;
529 struct osc_fsync_args *fa;
533 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
537 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
539 ptlrpc_request_free(req);
543 /* overload the size and blocks fields in the oa with start/end */
544 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
546 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
548 ptlrpc_request_set_replen(req);
549 req->rq_interpret_reply = osc_sync_interpret;
551 fa = ptlrpc_req_async_args(fa, req);
554 fa->fa_upcall = upcall;
555 fa->fa_cookie = cookie;
557 ptlrpc_set_add_req(rqset, req);
562 /* Find and cancel locally locks matched by @mode in the resource found by
563 * @objid. Found locks are added into @cancel list. Returns the amount of
564 * locks added to @cancels list. */
565 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
566 struct list_head *cancels,
567 enum ldlm_mode mode, __u64 lock_flags)
569 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
570 struct ldlm_res_id res_id;
571 struct ldlm_resource *res;
575 /* Return, i.e. cancel nothing, only if ELC is supported (flag in
576 * export) but disabled through procfs (flag in NS).
578 * This distinguishes from a case when ELC is not supported originally,
579 * when we still want to cancel locks in advance and just cancel them
580 * locally, without sending any RPC. */
581 if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
584 ostid_build_res_name(&oa->o_oi, &res_id);
585 res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
589 LDLM_RESOURCE_ADDREF(res);
590 count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
591 lock_flags, 0, NULL);
592 LDLM_RESOURCE_DELREF(res);
593 ldlm_resource_putref(res);
597 static int osc_destroy_interpret(const struct lu_env *env,
598 struct ptlrpc_request *req, void *args, int rc)
600 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
602 atomic_dec(&cli->cl_destroy_in_flight);
603 wake_up(&cli->cl_destroy_waitq);
608 static int osc_can_send_destroy(struct client_obd *cli)
610 if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
611 cli->cl_max_rpcs_in_flight) {
612 /* The destroy request can be sent */
615 if (atomic_dec_return(&cli->cl_destroy_in_flight) <
616 cli->cl_max_rpcs_in_flight) {
618 * The counter has been modified between the two atomic
621 wake_up(&cli->cl_destroy_waitq);
626 static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
629 struct client_obd *cli = &exp->exp_obd->u.cli;
630 struct ptlrpc_request *req;
631 struct ost_body *body;
637 CDEBUG(D_INFO, "oa NULL\n");
641 count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
642 LDLM_FL_DISCARD_DATA);
644 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
646 ldlm_lock_list_put(&cancels, l_bl_ast, count);
650 rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
653 ptlrpc_request_free(req);
657 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
658 ptlrpc_at_set_req_timeout(req);
660 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
662 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
664 ptlrpc_request_set_replen(req);
666 req->rq_interpret_reply = osc_destroy_interpret;
667 if (!osc_can_send_destroy(cli)) {
669 * Wait until the number of on-going destroy RPCs drops
670 * under max_rpc_in_flight
672 rc = l_wait_event_abortable_exclusive(
673 cli->cl_destroy_waitq,
674 osc_can_send_destroy(cli));
676 ptlrpc_req_finished(req);
681 /* Do not wait for response */
682 ptlrpcd_add_req(req);
686 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
689 u64 bits = OBD_MD_FLBLOCKS | OBD_MD_FLGRANT;
691 LASSERT(!(oa->o_valid & bits));
694 spin_lock(&cli->cl_loi_list_lock);
695 if (cli->cl_ocd_grant_param)
696 oa->o_dirty = cli->cl_dirty_grant;
698 oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT;
699 if (unlikely(cli->cl_dirty_pages > cli->cl_dirty_max_pages)) {
700 CERROR("dirty %lu > dirty_max %lu\n",
702 cli->cl_dirty_max_pages);
704 } else if (unlikely(atomic_long_read(&obd_dirty_pages) >
705 (long)(obd_max_dirty_pages + 1))) {
706 /* The atomic_read() allowing the atomic_inc() are
707 * not covered by a lock thus they may safely race and trip
708 * this CERROR() unless we add in a small fudge factor (+1). */
709 CERROR("%s: dirty %ld > system dirty_max %ld\n",
710 cli_name(cli), atomic_long_read(&obd_dirty_pages),
711 obd_max_dirty_pages);
713 } else if (unlikely(cli->cl_dirty_max_pages - cli->cl_dirty_pages >
715 CERROR("dirty %lu - dirty_max %lu too big???\n",
716 cli->cl_dirty_pages, cli->cl_dirty_max_pages);
719 unsigned long nrpages;
720 unsigned long undirty;
722 nrpages = cli->cl_max_pages_per_rpc;
723 nrpages *= cli->cl_max_rpcs_in_flight + 1;
724 nrpages = max(nrpages, cli->cl_dirty_max_pages);
725 undirty = nrpages << PAGE_SHIFT;
726 if (cli->cl_ocd_grant_param) {
729 /* take extent tax into account when asking for more
731 nrextents = (nrpages + cli->cl_max_extent_pages - 1) /
732 cli->cl_max_extent_pages;
733 undirty += nrextents * cli->cl_grant_extent_tax;
735 /* Do not ask for more than OBD_MAX_GRANT - a margin for server
736 * to add extent tax, etc.
738 oa->o_undirty = min(undirty, OBD_MAX_GRANT &
739 ~(PTLRPC_MAX_BRW_SIZE * 4UL));
741 oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
742 /* o_dropped AKA o_misc is 32 bits, but cl_lost_grant is 64 bits */
743 if (cli->cl_lost_grant > INT_MAX) {
745 "%s: avoided o_dropped overflow: cl_lost_grant %lu\n",
746 cli_name(cli), cli->cl_lost_grant);
747 oa->o_dropped = INT_MAX;
749 oa->o_dropped = cli->cl_lost_grant;
751 cli->cl_lost_grant -= oa->o_dropped;
752 spin_unlock(&cli->cl_loi_list_lock);
753 CDEBUG(D_CACHE, "%s: dirty: %llu undirty: %u dropped %u grant: %llu"
754 " cl_lost_grant %lu\n", cli_name(cli), oa->o_dirty,
755 oa->o_undirty, oa->o_dropped, oa->o_grant, cli->cl_lost_grant);
758 void osc_update_next_shrink(struct client_obd *cli)
760 cli->cl_next_shrink_grant = ktime_get_seconds() +
761 cli->cl_grant_shrink_interval;
763 CDEBUG(D_CACHE, "next time %lld to shrink grant\n",
764 cli->cl_next_shrink_grant);
767 static void __osc_update_grant(struct client_obd *cli, u64 grant)
769 spin_lock(&cli->cl_loi_list_lock);
770 cli->cl_avail_grant += grant;
771 spin_unlock(&cli->cl_loi_list_lock);
774 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
776 if (body->oa.o_valid & OBD_MD_FLGRANT) {
777 CDEBUG(D_CACHE, "got %llu extra grant\n", body->oa.o_grant);
778 __osc_update_grant(cli, body->oa.o_grant);
783 * grant thread data for shrinking space.
785 struct grant_thread_data {
786 struct list_head gtd_clients;
787 struct mutex gtd_mutex;
788 unsigned long gtd_stopped:1;
790 static struct grant_thread_data client_gtd;
792 static int osc_shrink_grant_interpret(const struct lu_env *env,
793 struct ptlrpc_request *req,
796 struct osc_grant_args *aa = args;
797 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
798 struct ost_body *body;
801 __osc_update_grant(cli, aa->aa_oa->o_grant);
805 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
807 osc_update_grant(cli, body);
809 OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
815 static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
817 spin_lock(&cli->cl_loi_list_lock);
818 oa->o_grant = cli->cl_avail_grant / 4;
819 cli->cl_avail_grant -= oa->o_grant;
820 spin_unlock(&cli->cl_loi_list_lock);
821 if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
822 oa->o_valid |= OBD_MD_FLFLAGS;
825 oa->o_flags |= OBD_FL_SHRINK_GRANT;
826 osc_update_next_shrink(cli);
829 /* Shrink the current grant, either from some large amount to enough for a
830 * full set of in-flight RPCs, or if we have already shrunk to that limit
831 * then to enough for a single RPC. This avoids keeping more grant than
832 * needed, and avoids shrinking the grant piecemeal. */
833 static int osc_shrink_grant(struct client_obd *cli)
835 __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
836 (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
838 spin_lock(&cli->cl_loi_list_lock);
839 if (cli->cl_avail_grant <= target_bytes)
840 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
841 spin_unlock(&cli->cl_loi_list_lock);
843 return osc_shrink_grant_to_target(cli, target_bytes);
846 int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
849 struct ost_body *body;
852 spin_lock(&cli->cl_loi_list_lock);
853 /* Don't shrink if we are already above or below the desired limit
854 * We don't want to shrink below a single RPC, as that will negatively
855 * impact block allocation and long-term performance. */
856 if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
857 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
859 if (target_bytes >= cli->cl_avail_grant) {
860 spin_unlock(&cli->cl_loi_list_lock);
863 spin_unlock(&cli->cl_loi_list_lock);
869 osc_announce_cached(cli, &body->oa, 0);
871 spin_lock(&cli->cl_loi_list_lock);
872 if (target_bytes >= cli->cl_avail_grant) {
873 /* available grant has changed since target calculation */
874 spin_unlock(&cli->cl_loi_list_lock);
875 GOTO(out_free, rc = 0);
877 body->oa.o_grant = cli->cl_avail_grant - target_bytes;
878 cli->cl_avail_grant = target_bytes;
879 spin_unlock(&cli->cl_loi_list_lock);
880 if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
881 body->oa.o_valid |= OBD_MD_FLFLAGS;
882 body->oa.o_flags = 0;
884 body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
885 osc_update_next_shrink(cli);
887 rc = osc_set_info_async(NULL, cli->cl_import->imp_obd->obd_self_export,
888 sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
889 sizeof(*body), body, NULL);
891 __osc_update_grant(cli, body->oa.o_grant);
897 static int osc_should_shrink_grant(struct client_obd *client)
899 time64_t next_shrink = client->cl_next_shrink_grant;
901 if (client->cl_import == NULL)
904 if (!OCD_HAS_FLAG(&client->cl_import->imp_connect_data, GRANT_SHRINK) ||
905 client->cl_import->imp_grant_shrink_disabled) {
906 osc_update_next_shrink(client);
910 if (ktime_get_seconds() >= next_shrink - 5) {
911 /* Get the current RPC size directly, instead of going via:
912 * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
913 * Keep comment here so that it can be found by searching. */
914 int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
916 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
917 client->cl_avail_grant > brw_size)
920 osc_update_next_shrink(client);
925 #define GRANT_SHRINK_RPC_BATCH 100
927 static struct delayed_work work;
929 static void osc_grant_work_handler(struct work_struct *data)
931 struct client_obd *cli;
933 bool init_next_shrink = true;
934 time64_t next_shrink = ktime_get_seconds() + GRANT_SHRINK_INTERVAL;
937 mutex_lock(&client_gtd.gtd_mutex);
938 list_for_each_entry(cli, &client_gtd.gtd_clients,
940 if (rpc_sent < GRANT_SHRINK_RPC_BATCH &&
941 osc_should_shrink_grant(cli)) {
942 osc_shrink_grant(cli);
946 if (!init_next_shrink) {
947 if (cli->cl_next_shrink_grant < next_shrink &&
948 cli->cl_next_shrink_grant > ktime_get_seconds())
949 next_shrink = cli->cl_next_shrink_grant;
951 init_next_shrink = false;
952 next_shrink = cli->cl_next_shrink_grant;
955 mutex_unlock(&client_gtd.gtd_mutex);
957 if (client_gtd.gtd_stopped == 1)
960 if (next_shrink > ktime_get_seconds()) {
961 time64_t delay = next_shrink - ktime_get_seconds();
963 schedule_delayed_work(&work, cfs_time_seconds(delay));
965 schedule_work(&work.work);
969 void osc_schedule_grant_work(void)
971 cancel_delayed_work_sync(&work);
972 schedule_work(&work.work);
976 * Start grant thread for returing grant to server for idle clients.
978 static int osc_start_grant_work(void)
980 client_gtd.gtd_stopped = 0;
981 mutex_init(&client_gtd.gtd_mutex);
982 INIT_LIST_HEAD(&client_gtd.gtd_clients);
984 INIT_DELAYED_WORK(&work, osc_grant_work_handler);
985 schedule_work(&work.work);
990 static void osc_stop_grant_work(void)
992 client_gtd.gtd_stopped = 1;
993 cancel_delayed_work_sync(&work);
996 static void osc_add_grant_list(struct client_obd *client)
998 mutex_lock(&client_gtd.gtd_mutex);
999 list_add(&client->cl_grant_chain, &client_gtd.gtd_clients);
1000 mutex_unlock(&client_gtd.gtd_mutex);
1003 static void osc_del_grant_list(struct client_obd *client)
1005 if (list_empty(&client->cl_grant_chain))
1008 mutex_lock(&client_gtd.gtd_mutex);
1009 list_del_init(&client->cl_grant_chain);
1010 mutex_unlock(&client_gtd.gtd_mutex);
1013 void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1016 * ocd_grant is the total grant amount we're expect to hold: if we've
1017 * been evicted, it's the new avail_grant amount, cl_dirty_pages will
1018 * drop to 0 as inflight RPCs fail out; otherwise, it's avail_grant +
1021 * race is tolerable here: if we're evicted, but imp_state already
1022 * left EVICTED state, then cl_dirty_pages must be 0 already.
1024 spin_lock(&cli->cl_loi_list_lock);
1025 cli->cl_avail_grant = ocd->ocd_grant;
1026 if (cli->cl_import->imp_state != LUSTRE_IMP_EVICTED) {
1027 unsigned long consumed = cli->cl_reserved_grant;
1029 if (OCD_HAS_FLAG(ocd, GRANT_PARAM))
1030 consumed += cli->cl_dirty_grant;
1032 consumed += cli->cl_dirty_pages << PAGE_SHIFT;
1033 if (cli->cl_avail_grant < consumed) {
1034 CERROR("%s: granted %ld but already consumed %ld\n",
1035 cli_name(cli), cli->cl_avail_grant, consumed);
1036 cli->cl_avail_grant = 0;
1038 cli->cl_avail_grant -= consumed;
1042 if (OCD_HAS_FLAG(ocd, GRANT_PARAM)) {
1046 /* overhead for each extent insertion */
1047 cli->cl_grant_extent_tax = ocd->ocd_grant_tax_kb << 10;
1048 /* determine the appropriate chunk size used by osc_extent. */
1049 cli->cl_chunkbits = max_t(int, PAGE_SHIFT,
1050 ocd->ocd_grant_blkbits);
1051 /* max_pages_per_rpc must be chunk aligned */
1052 chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
1053 cli->cl_max_pages_per_rpc = (cli->cl_max_pages_per_rpc +
1054 ~chunk_mask) & chunk_mask;
1055 /* determine maximum extent size, in #pages */
1056 size = (u64)ocd->ocd_grant_max_blks << ocd->ocd_grant_blkbits;
1057 cli->cl_max_extent_pages = (size >> PAGE_SHIFT) ?: 1;
1058 cli->cl_ocd_grant_param = 1;
1060 cli->cl_ocd_grant_param = 0;
1061 cli->cl_grant_extent_tax = 0;
1062 cli->cl_chunkbits = PAGE_SHIFT;
1063 cli->cl_max_extent_pages = DT_MAX_BRW_PAGES;
1065 spin_unlock(&cli->cl_loi_list_lock);
1068 "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld. chunk bits: %d cl_max_extent_pages: %d\n",
1070 cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
1071 cli->cl_max_extent_pages);
1073 if (OCD_HAS_FLAG(ocd, GRANT_SHRINK) && list_empty(&cli->cl_grant_chain))
1074 osc_add_grant_list(cli);
1076 EXPORT_SYMBOL(osc_init_grant);
1078 /* We assume that the reason this OSC got a short read is because it read
1079 * beyond the end of a stripe file; i.e. lustre is reading a sparse file
1080 * via the LOV, and it _knows_ it's reading inside the file, it's just that
1081 * this stripe never got written at or beyond this stripe offset yet. */
1082 static void handle_short_read(int nob_read, size_t page_count,
1083 struct brw_page **pga)
1088 /* skip bytes read OK */
1089 while (nob_read > 0) {
1090 LASSERT (page_count > 0);
1092 if (pga[i]->count > nob_read) {
1093 /* EOF inside this page */
1094 ptr = kmap(pga[i]->pg) +
1095 (pga[i]->off & ~PAGE_MASK);
1096 memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1103 nob_read -= pga[i]->count;
1108 /* zero remaining pages */
1109 while (page_count-- > 0) {
1110 ptr = kmap(pga[i]->pg) + (pga[i]->off & ~PAGE_MASK);
1111 memset(ptr, 0, pga[i]->count);
1117 static int check_write_rcs(struct ptlrpc_request *req,
1118 int requested_nob, int niocount,
1119 size_t page_count, struct brw_page **pga)
1124 remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
1125 sizeof(*remote_rcs) *
1127 if (remote_rcs == NULL) {
1128 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
1132 /* return error if any niobuf was in error */
1133 for (i = 0; i < niocount; i++) {
1134 if ((int)remote_rcs[i] < 0) {
1135 CDEBUG(D_INFO, "rc[%d]: %d req %p\n",
1136 i, remote_rcs[i], req);
1137 return remote_rcs[i];
1140 if (remote_rcs[i] != 0) {
1141 CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
1142 i, remote_rcs[i], req);
1146 if (req->rq_bulk != NULL &&
1147 req->rq_bulk->bd_nob_transferred != requested_nob) {
1148 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
1149 req->rq_bulk->bd_nob_transferred, requested_nob);
1156 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1158 if (p1->flag != p2->flag) {
1159 unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
1160 OBD_BRW_SYNC | OBD_BRW_ASYNC |
1161 OBD_BRW_NOQUOTA | OBD_BRW_SOFT_SYNC);
1163 /* warn if we try to combine flags that we don't know to be
1164 * safe to combine */
1165 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
1166 CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
1167 "report this at https://jira.whamcloud.com/\n",
1168 p1->flag, p2->flag);
1173 return (p1->off + p1->count == p2->off);
1176 #if IS_ENABLED(CONFIG_CRC_T10DIF)
1177 static int osc_checksum_bulk_t10pi(const char *obd_name, int nob,
1178 size_t pg_count, struct brw_page **pga,
1179 int opc, obd_dif_csum_fn *fn,
1181 u32 *check_sum, bool resend)
1183 struct ahash_request *req;
1184 /* Used Adler as the default checksum type on top of DIF tags */
1185 unsigned char cfs_alg = cksum_obd2cfs(OBD_CKSUM_T10_TOP);
1186 struct page *__page;
1187 unsigned char *buffer;
1189 unsigned int bufsize;
1191 int used_number = 0;
1197 LASSERT(pg_count > 0);
1199 __page = alloc_page(GFP_KERNEL);
1203 req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1206 CERROR("%s: unable to initialize checksum hash %s: rc = %d\n",
1207 obd_name, cfs_crypto_hash_name(cfs_alg), rc);
1211 buffer = kmap(__page);
1212 guard_start = (__u16 *)buffer;
1213 guard_number = PAGE_SIZE / sizeof(*guard_start);
1214 CDEBUG(D_PAGE | (resend ? D_HA : 0),
1215 "GRD tags per page=%u, resend=%u, bytes=%u, pages=%zu\n",
1216 guard_number, resend, nob, pg_count);
1218 while (nob > 0 && pg_count > 0) {
1219 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1221 /* corrupt the data before we compute the checksum, to
1222 * simulate an OST->client data error */
1223 if (unlikely(i == 0 && opc == OST_READ &&
1224 OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))) {
1225 unsigned char *ptr = kmap(pga[i]->pg);
1226 int off = pga[i]->off & ~PAGE_MASK;
1228 memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1233 * The left guard number should be able to hold checksums of a
1236 rc = obd_page_dif_generate_buffer(obd_name, pga[i]->pg,
1237 pga[i]->off & ~PAGE_MASK,
1239 guard_start + used_number,
1240 guard_number - used_number,
1243 if (unlikely(resend))
1244 CDEBUG(D_PAGE | D_HA,
1245 "pga[%u]: used %u off %llu+%u gen checksum: %*phN\n",
1246 i, used, pga[i]->off & ~PAGE_MASK, count,
1247 (int)(used * sizeof(*guard_start)),
1248 guard_start + used_number);
1252 used_number += used;
1253 if (used_number == guard_number) {
1254 cfs_crypto_hash_update_page(req, __page, 0,
1255 used_number * sizeof(*guard_start));
1259 nob -= pga[i]->count;
1267 if (used_number != 0)
1268 cfs_crypto_hash_update_page(req, __page, 0,
1269 used_number * sizeof(*guard_start));
1271 bufsize = sizeof(cksum);
1272 cfs_crypto_hash_final(req, (unsigned char *)&cksum, &bufsize);
1274 /* For sending we only compute the wrong checksum instead
1275 * of corrupting the data so it is still correct on a redo */
1276 if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1281 __free_page(__page);
1284 #else /* !CONFIG_CRC_T10DIF */
1285 #define obd_dif_ip_fn NULL
1286 #define obd_dif_crc_fn NULL
1287 #define osc_checksum_bulk_t10pi(name, nob, pgc, pga, opc, fn, ssize, csum, re) \
1289 #endif /* CONFIG_CRC_T10DIF */
1291 static int osc_checksum_bulk(int nob, size_t pg_count,
1292 struct brw_page **pga, int opc,
1293 enum cksum_types cksum_type,
1297 struct ahash_request *req;
1298 unsigned int bufsize;
1299 unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
1301 LASSERT(pg_count > 0);
1303 req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1305 CERROR("Unable to initialize checksum hash %s\n",
1306 cfs_crypto_hash_name(cfs_alg));
1307 return PTR_ERR(req);
1310 while (nob > 0 && pg_count > 0) {
1311 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1313 /* corrupt the data before we compute the checksum, to
1314 * simulate an OST->client data error */
1315 if (i == 0 && opc == OST_READ &&
1316 OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
1317 unsigned char *ptr = kmap(pga[i]->pg);
1318 int off = pga[i]->off & ~PAGE_MASK;
1320 memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1323 cfs_crypto_hash_update_page(req, pga[i]->pg,
1324 pga[i]->off & ~PAGE_MASK,
1326 LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d\n",
1327 (int)(pga[i]->off & ~PAGE_MASK));
1329 nob -= pga[i]->count;
1334 bufsize = sizeof(*cksum);
1335 cfs_crypto_hash_final(req, (unsigned char *)cksum, &bufsize);
1337 /* For sending we only compute the wrong checksum instead
1338 * of corrupting the data so it is still correct on a redo */
1339 if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1345 static int osc_checksum_bulk_rw(const char *obd_name,
1346 enum cksum_types cksum_type,
1347 int nob, size_t pg_count,
1348 struct brw_page **pga, int opc,
1349 u32 *check_sum, bool resend)
1351 obd_dif_csum_fn *fn = NULL;
1352 int sector_size = 0;
1356 obd_t10_cksum2dif(cksum_type, &fn, §or_size);
1359 rc = osc_checksum_bulk_t10pi(obd_name, nob, pg_count, pga,
1360 opc, fn, sector_size, check_sum,
1363 rc = osc_checksum_bulk(nob, pg_count, pga, opc, cksum_type,
1369 static inline void osc_release_bounce_pages(struct brw_page **pga,
1372 #ifdef HAVE_LUSTRE_CRYPTO
1375 for (i = 0; i < page_count; i++) {
1376 /* Bounce pages allocated by a call to
1377 * llcrypt_encrypt_pagecache_blocks() in osc_brw_prep_request()
1378 * are identified thanks to the PageChecked flag.
1380 if (PageChecked(pga[i]->pg))
1381 llcrypt_finalize_bounce_page(&pga[i]->pg);
1382 pga[i]->count -= pga[i]->bp_count_diff;
1383 pga[i]->off += pga[i]->bp_off_diff;
1389 osc_brw_prep_request(int cmd, struct client_obd *cli, struct obdo *oa,
1390 u32 page_count, struct brw_page **pga,
1391 struct ptlrpc_request **reqp, int resend)
1393 struct ptlrpc_request *req;
1394 struct ptlrpc_bulk_desc *desc;
1395 struct ost_body *body;
1396 struct obd_ioobj *ioobj;
1397 struct niobuf_remote *niobuf;
1398 int niocount, i, requested_nob, opc, rc, short_io_size = 0;
1399 struct osc_brw_async_args *aa;
1400 struct req_capsule *pill;
1401 struct brw_page *pg_prev;
1403 const char *obd_name = cli->cl_import->imp_obd->obd_name;
1404 struct inode *inode = NULL;
1405 bool directio = false;
1406 bool enable_checksum = true;
1410 inode = page2inode(pga[0]->pg);
1411 if (inode == NULL) {
1412 /* Try to get reference to inode from cl_page if we are
1413 * dealing with direct IO, as handled pages are not
1414 * actual page cache pages.
1416 struct osc_async_page *oap = brw_page2oap(pga[0]);
1417 struct cl_page *clpage = oap2cl_page(oap);
1419 inode = clpage->cp_inode;
1424 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1425 RETURN(-ENOMEM); /* Recoverable */
1426 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1427 RETURN(-EINVAL); /* Fatal */
1429 if ((cmd & OBD_BRW_WRITE) != 0) {
1431 req = ptlrpc_request_alloc_pool(cli->cl_import,
1433 &RQF_OST_BRW_WRITE);
1436 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
1441 if (opc == OST_WRITE && inode && IS_ENCRYPTED(inode)) {
1442 for (i = 0; i < page_count; i++) {
1443 struct brw_page *pg = pga[i];
1444 struct page *data_page = NULL;
1445 bool retried = false;
1446 bool lockedbymyself;
1447 u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
1448 struct address_space *map_orig = NULL;
1452 if (nunits & ~LUSTRE_ENCRYPTION_MASK)
1453 nunits = (nunits & LUSTRE_ENCRYPTION_MASK) +
1454 LUSTRE_ENCRYPTION_UNIT_SIZE;
1455 /* The page can already be locked when we arrive here.
1456 * This is possible when cl_page_assume/vvp_page_assume
1457 * is stuck on wait_on_page_writeback with page lock
1458 * held. In this case there is no risk for the lock to
1459 * be released while we are doing our encryption
1460 * processing, because writeback against that page will
1461 * end in vvp_page_completion_write/cl_page_completion,
1462 * which means only once the page is fully processed.
1464 lockedbymyself = trylock_page(pg->pg);
1466 map_orig = pg->pg->mapping;
1467 pg->pg->mapping = inode->i_mapping;
1468 index_orig = pg->pg->index;
1469 pg->pg->index = pg->off >> PAGE_SHIFT;
1472 llcrypt_encrypt_pagecache_blocks(pg->pg,
1476 pg->pg->mapping = map_orig;
1477 pg->pg->index = index_orig;
1480 unlock_page(pg->pg);
1481 if (IS_ERR(data_page)) {
1482 rc = PTR_ERR(data_page);
1483 if (rc == -ENOMEM && !retried) {
1488 ptlrpc_request_free(req);
1491 /* Set PageChecked flag on bounce page for
1492 * disambiguation in osc_release_bounce_pages().
1494 SetPageChecked(data_page);
1496 /* there should be no gap in the middle of page array */
1497 if (i == page_count - 1) {
1498 struct osc_async_page *oap = brw_page2oap(pg);
1500 oa->o_size = oap->oap_count +
1501 oap->oap_obj_off + oap->oap_page_off;
1503 /* len is forced to nunits, and relative offset to 0
1504 * so store the old, clear text info
1506 pg->bp_count_diff = nunits - pg->count;
1508 pg->bp_off_diff = pg->off & ~PAGE_MASK;
1509 pg->off = pg->off & PAGE_MASK;
1511 } else if (opc == OST_READ && inode && IS_ENCRYPTED(inode)) {
1512 for (i = 0; i < page_count; i++) {
1513 struct brw_page *pg = pga[i];
1514 u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
1516 if (nunits & ~LUSTRE_ENCRYPTION_MASK)
1517 nunits = (nunits & LUSTRE_ENCRYPTION_MASK) +
1518 LUSTRE_ENCRYPTION_UNIT_SIZE;
1519 /* count/off are forced to cover the whole encryption
1520 * unit size so that all encrypted data is stored on the
1521 * OST, so adjust bp_{count,off}_diff for the size of
1524 pg->bp_count_diff = nunits - pg->count;
1526 pg->bp_off_diff = pg->off & ~PAGE_MASK;
1527 pg->off = pg->off & PAGE_MASK;
1531 for (niocount = i = 1; i < page_count; i++) {
1532 if (!can_merge_pages(pga[i - 1], pga[i]))
1536 pill = &req->rq_pill;
1537 req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
1539 req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1540 niocount * sizeof(*niobuf));
1542 for (i = 0; i < page_count; i++) {
1543 short_io_size += pga[i]->count;
1544 if (!inode || !IS_ENCRYPTED(inode)) {
1545 pga[i]->bp_count_diff = 0;
1546 pga[i]->bp_off_diff = 0;
1550 if (lnet_is_rdma_only_page(pga[0]->pg)) {
1551 enable_checksum = false;
1555 /* Check if read/write is small enough to be a short io. */
1556 if (short_io_size > cli->cl_max_short_io_bytes || niocount > 1 ||
1557 !imp_connect_shortio(cli->cl_import))
1560 /* If this is an empty RPC to old server, just ignore it */
1561 if (!short_io_size && !pga[0]->pg) {
1562 ptlrpc_request_free(req);
1566 req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_CLIENT,
1567 opc == OST_READ ? 0 : short_io_size);
1568 if (opc == OST_READ)
1569 req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_SERVER,
1572 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1574 ptlrpc_request_free(req);
1577 osc_set_io_portal(req);
1579 ptlrpc_at_set_req_timeout(req);
1580 /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
1582 req->rq_no_retry_einprogress = 1;
1584 if (short_io_size != 0) {
1586 short_io_buf = NULL;
1590 desc = ptlrpc_prep_bulk_imp(req, page_count,
1591 cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
1592 (opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE :
1593 PTLRPC_BULK_PUT_SINK),
1595 &ptlrpc_bulk_kiov_pin_ops);
1598 GOTO(out, rc = -ENOMEM);
1599 /* NB request now owns desc and will free it when it gets freed */
1601 body = req_capsule_client_get(pill, &RMF_OST_BODY);
1602 ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1603 niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1604 LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
1606 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1608 /* For READ and WRITE, we can't fill o_uid and o_gid using from_kuid()
1609 * and from_kgid(), because they are asynchronous. Fortunately, variable
1610 * oa contains valid o_uid and o_gid in these two operations.
1611 * Besides, filling o_uid and o_gid is enough for nrs-tbf, see LU-9658.
1612 * OBD_MD_FLUID and OBD_MD_FLUID is not set in order to avoid breaking
1613 * other process logic */
1614 body->oa.o_uid = oa->o_uid;
1615 body->oa.o_gid = oa->o_gid;
1617 obdo_to_ioobj(oa, ioobj);
1618 ioobj->ioo_bufcnt = niocount;
1619 /* The high bits of ioo_max_brw tells server _maximum_ number of bulks
1620 * that might be send for this request. The actual number is decided
1621 * when the RPC is finally sent in ptlrpc_register_bulk(). It sends
1622 * "max - 1" for old client compatibility sending "0", and also so the
1623 * the actual maximum is a power-of-two number, not one less. LU-1431 */
1625 ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
1627 ioobj_max_brw_set(ioobj, 0);
1629 if (short_io_size != 0) {
1630 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1631 body->oa.o_valid |= OBD_MD_FLFLAGS;
1632 body->oa.o_flags = 0;
1634 body->oa.o_flags |= OBD_FL_SHORT_IO;
1635 CDEBUG(D_CACHE, "Using short io for data transfer, size = %d\n",
1637 if (opc == OST_WRITE) {
1638 short_io_buf = req_capsule_client_get(pill,
1640 LASSERT(short_io_buf != NULL);
1644 LASSERT(page_count > 0);
1646 for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1647 struct brw_page *pg = pga[i];
1648 int poff = pg->off & ~PAGE_MASK;
1650 LASSERT(pg->count > 0);
1651 /* make sure there is no gap in the middle of page array */
1652 LASSERTF(page_count == 1 ||
1653 (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
1654 ergo(i > 0 && i < page_count - 1,
1655 poff == 0 && pg->count == PAGE_SIZE) &&
1656 ergo(i == page_count - 1, poff == 0)),
1657 "i: %d/%d pg: %p off: %llu, count: %u\n",
1658 i, page_count, pg, pg->off, pg->count);
1659 LASSERTF(i == 0 || pg->off > pg_prev->off,
1660 "i %d p_c %u pg %p [pri %lu ind %lu] off %llu"
1661 " prev_pg %p [pri %lu ind %lu] off %llu\n",
1663 pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1664 pg_prev->pg, page_private(pg_prev->pg),
1665 pg_prev->pg->index, pg_prev->off);
1666 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1667 (pg->flag & OBD_BRW_SRVLOCK));
1668 if (short_io_size != 0 && opc == OST_WRITE) {
1669 unsigned char *ptr = kmap_atomic(pg->pg);
1671 LASSERT(short_io_size >= requested_nob + pg->count);
1672 memcpy(short_io_buf + requested_nob,
1676 } else if (short_io_size == 0) {
1677 desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff,
1680 requested_nob += pg->count;
1682 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1684 niobuf->rnb_len += pg->count;
1686 niobuf->rnb_offset = pg->off;
1687 niobuf->rnb_len = pg->count;
1688 niobuf->rnb_flags = pg->flag;
1693 LASSERTF((void *)(niobuf - niocount) ==
1694 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
1695 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
1696 &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
1698 osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1700 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1701 body->oa.o_valid |= OBD_MD_FLFLAGS;
1702 body->oa.o_flags = 0;
1704 body->oa.o_flags |= OBD_FL_RECOV_RESEND;
1707 if (osc_should_shrink_grant(cli))
1708 osc_shrink_grant_local(cli, &body->oa);
1710 if (!cli->cl_checksum || sptlrpc_flavor_has_bulk(&req->rq_flvr))
1711 enable_checksum = false;
1713 /* size[REQ_REC_OFF] still sizeof (*body) */
1714 if (opc == OST_WRITE) {
1715 if (enable_checksum) {
1716 /* store cl_cksum_type in a local variable since
1717 * it can be changed via lprocfs */
1718 enum cksum_types cksum_type = cli->cl_cksum_type;
1720 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1721 body->oa.o_flags = 0;
1723 body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1725 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1727 rc = osc_checksum_bulk_rw(obd_name, cksum_type,
1728 requested_nob, page_count,
1730 &body->oa.o_cksum, resend);
1732 CDEBUG(D_PAGE, "failed to checksum: rc = %d\n",
1736 CDEBUG(D_PAGE | (resend ? D_HA : 0),
1737 "checksum at write origin: %x (%x)\n",
1738 body->oa.o_cksum, cksum_type);
1740 /* save this in 'oa', too, for later checking */
1741 oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1742 oa->o_flags |= obd_cksum_type_pack(obd_name,
1745 /* clear out the checksum flag, in case this is a
1746 * resend but cl_checksum is no longer set. b=11238 */
1747 oa->o_valid &= ~OBD_MD_FLCKSUM;
1749 oa->o_cksum = body->oa.o_cksum;
1750 /* 1 RC per niobuf */
1751 req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
1752 sizeof(__u32) * niocount);
1754 if (enable_checksum) {
1755 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1756 body->oa.o_flags = 0;
1757 body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1758 cli->cl_cksum_type);
1759 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1762 /* Client cksum has been already copied to wire obdo in previous
1763 * lustre_set_wire_obdo(), and in the case a bulk-read is being
1764 * resent due to cksum error, this will allow Server to
1765 * check+dump pages on its side */
1767 ptlrpc_request_set_replen(req);
1769 aa = ptlrpc_req_async_args(aa, req);
1771 aa->aa_requested_nob = requested_nob;
1772 aa->aa_nio_count = niocount;
1773 aa->aa_page_count = page_count;
1777 INIT_LIST_HEAD(&aa->aa_oaps);
1780 niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1781 CDEBUG(D_RPCTRACE, "brw rpc %p - object "DOSTID" offset %lld<>%lld\n",
1782 req, POSTID(&oa->o_oi), niobuf[0].rnb_offset,
1783 niobuf[niocount - 1].rnb_offset + niobuf[niocount - 1].rnb_len);
1787 ptlrpc_req_finished(req);
1791 char dbgcksum_file_name[PATH_MAX];
1793 static void dump_all_bulk_pages(struct obdo *oa, __u32 page_count,
1794 struct brw_page **pga, __u32 server_cksum,
1802 /* will only keep dump of pages on first error for the same range in
1803 * file/fid, not during the resends/retries. */
1804 snprintf(dbgcksum_file_name, sizeof(dbgcksum_file_name),
1805 "%s-checksum_dump-osc-"DFID":[%llu-%llu]-%x-%x",
1806 (strncmp(libcfs_debug_file_path, "NONE", 4) != 0 ?
1807 libcfs_debug_file_path : LIBCFS_DEBUG_FILE_PATH_DEFAULT),
1808 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : 0ULL,
1809 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1810 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1812 pga[page_count-1]->off + pga[page_count-1]->count - 1,
1813 client_cksum, server_cksum);
1814 CWARN("dumping checksum data to %s\n", dbgcksum_file_name);
1815 filp = filp_open(dbgcksum_file_name,
1816 O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, 0600);
1820 CDEBUG(D_INFO, "%s: can't open to dump pages with "
1821 "checksum error: rc = %d\n", dbgcksum_file_name,
1824 CERROR("%s: can't open to dump pages with checksum "
1825 "error: rc = %d\n", dbgcksum_file_name, rc);
1829 for (i = 0; i < page_count; i++) {
1830 len = pga[i]->count;
1831 buf = kmap(pga[i]->pg);
1833 rc = cfs_kernel_write(filp, buf, len, &filp->f_pos);
1835 CERROR("%s: wanted to write %u but got %d "
1836 "error\n", dbgcksum_file_name, len, rc);
1845 rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
1847 CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
1848 filp_close(filp, NULL);
1850 libcfs_debug_dumplog();
1854 check_write_checksum(struct obdo *oa, const struct lnet_process_id *peer,
1855 __u32 client_cksum, __u32 server_cksum,
1856 struct osc_brw_async_args *aa)
1858 const char *obd_name = aa->aa_cli->cl_import->imp_obd->obd_name;
1859 enum cksum_types cksum_type;
1860 obd_dif_csum_fn *fn = NULL;
1861 int sector_size = 0;
1866 if (server_cksum == client_cksum) {
1867 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1871 if (aa->aa_cli->cl_checksum_dump)
1872 dump_all_bulk_pages(oa, aa->aa_page_count, aa->aa_ppga,
1873 server_cksum, client_cksum);
1875 cksum_type = obd_cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
1878 switch (cksum_type) {
1879 case OBD_CKSUM_T10IP512:
1883 case OBD_CKSUM_T10IP4K:
1887 case OBD_CKSUM_T10CRC512:
1888 fn = obd_dif_crc_fn;
1891 case OBD_CKSUM_T10CRC4K:
1892 fn = obd_dif_crc_fn;
1900 rc = osc_checksum_bulk_t10pi(obd_name, aa->aa_requested_nob,
1901 aa->aa_page_count, aa->aa_ppga,
1902 OST_WRITE, fn, sector_size,
1905 rc = osc_checksum_bulk(aa->aa_requested_nob, aa->aa_page_count,
1906 aa->aa_ppga, OST_WRITE, cksum_type,
1910 msg = "failed to calculate the client write checksum";
1911 else if (cksum_type != obd_cksum_type_unpack(aa->aa_oa->o_flags))
1912 msg = "the server did not use the checksum type specified in "
1913 "the original request - likely a protocol problem";
1914 else if (new_cksum == server_cksum)
1915 msg = "changed on the client after we checksummed it - "
1916 "likely false positive due to mmap IO (bug 11742)";
1917 else if (new_cksum == client_cksum)
1918 msg = "changed in transit before arrival at OST";
1920 msg = "changed in transit AND doesn't match the original - "
1921 "likely false positive due to mmap IO (bug 11742)";
1923 LCONSOLE_ERROR_MSG(0x132, "%s: BAD WRITE CHECKSUM: %s: from %s inode "
1924 DFID " object "DOSTID" extent [%llu-%llu], original "
1925 "client csum %x (type %x), server csum %x (type %x),"
1926 " client csum now %x\n",
1927 obd_name, msg, libcfs_nid2str(peer->nid),
1928 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1929 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1930 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1931 POSTID(&oa->o_oi), aa->aa_ppga[0]->off,
1932 aa->aa_ppga[aa->aa_page_count - 1]->off +
1933 aa->aa_ppga[aa->aa_page_count-1]->count - 1,
1935 obd_cksum_type_unpack(aa->aa_oa->o_flags),
1936 server_cksum, cksum_type, new_cksum);
1940 /* Note rc enters this function as number of bytes transferred */
1941 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1943 struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1944 struct client_obd *cli = aa->aa_cli;
1945 const char *obd_name = cli->cl_import->imp_obd->obd_name;
1946 const struct lnet_process_id *peer =
1947 &req->rq_import->imp_connection->c_peer;
1948 struct ost_body *body;
1949 u32 client_cksum = 0;
1950 struct inode *inode;
1951 unsigned int blockbits = 0, blocksize = 0;
1955 if (rc < 0 && rc != -EDQUOT) {
1956 DEBUG_REQ(D_INFO, req, "Failed request: rc = %d", rc);
1960 LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
1961 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1963 DEBUG_REQ(D_INFO, req, "cannot unpack body");
1967 /* set/clear over quota flag for a uid/gid/projid */
1968 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1969 body->oa.o_valid & (OBD_MD_FLALLQUOTA)) {
1970 unsigned qid[LL_MAXQUOTAS] = {
1971 body->oa.o_uid, body->oa.o_gid,
1972 body->oa.o_projid };
1974 "setdq for [%u %u %u] with valid %#llx, flags %x\n",
1975 body->oa.o_uid, body->oa.o_gid, body->oa.o_projid,
1976 body->oa.o_valid, body->oa.o_flags);
1977 osc_quota_setdq(cli, req->rq_xid, qid, body->oa.o_valid,
1981 osc_update_grant(cli, body);
1986 if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
1987 client_cksum = aa->aa_oa->o_cksum; /* save for later */
1989 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1991 CERROR("%s: unexpected positive size %d\n",
1996 if (req->rq_bulk != NULL &&
1997 sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
2000 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
2001 check_write_checksum(&body->oa, peer, client_cksum,
2002 body->oa.o_cksum, aa))
2005 rc = check_write_rcs(req, aa->aa_requested_nob,
2006 aa->aa_nio_count, aa->aa_page_count,
2011 /* The rest of this function executes only for OST_READs */
2013 if (req->rq_bulk == NULL) {
2014 rc = req_capsule_get_size(&req->rq_pill, &RMF_SHORT_IO,
2016 LASSERT(rc == req->rq_status);
2018 /* if unwrap_bulk failed, return -EAGAIN to retry */
2019 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
2022 GOTO(out, rc = -EAGAIN);
2024 if (rc > aa->aa_requested_nob) {
2025 CERROR("%s: unexpected size %d, requested %d\n", obd_name,
2026 rc, aa->aa_requested_nob);
2030 if (req->rq_bulk != NULL && rc != req->rq_bulk->bd_nob_transferred) {
2031 CERROR("%s: unexpected size %d, transferred %d\n", obd_name,
2032 rc, req->rq_bulk->bd_nob_transferred);
2036 if (req->rq_bulk == NULL) {
2038 int nob, pg_count, i = 0;
2041 CDEBUG(D_CACHE, "Using short io read, size %d\n", rc);
2042 pg_count = aa->aa_page_count;
2043 buf = req_capsule_server_sized_get(&req->rq_pill, &RMF_SHORT_IO,
2046 while (nob > 0 && pg_count > 0) {
2048 int count = aa->aa_ppga[i]->count > nob ?
2049 nob : aa->aa_ppga[i]->count;
2051 CDEBUG(D_CACHE, "page %p count %d\n",
2052 aa->aa_ppga[i]->pg, count);
2053 ptr = kmap_atomic(aa->aa_ppga[i]->pg);
2054 memcpy(ptr + (aa->aa_ppga[i]->off & ~PAGE_MASK), buf,
2056 kunmap_atomic((void *) ptr);
2065 if (rc < aa->aa_requested_nob)
2066 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
2068 if (body->oa.o_valid & OBD_MD_FLCKSUM) {
2069 static int cksum_counter;
2070 u32 server_cksum = body->oa.o_cksum;
2074 enum cksum_types cksum_type;
2075 u32 o_flags = body->oa.o_valid & OBD_MD_FLFLAGS ?
2076 body->oa.o_flags : 0;
2078 cksum_type = obd_cksum_type_unpack(o_flags);
2079 rc = osc_checksum_bulk_rw(obd_name, cksum_type, nob,
2080 aa->aa_page_count, aa->aa_ppga,
2081 OST_READ, &client_cksum, false);
2085 if (req->rq_bulk != NULL &&
2086 peer->nid != req->rq_bulk->bd_sender) {
2088 router = libcfs_nid2str(req->rq_bulk->bd_sender);
2091 if (server_cksum != client_cksum) {
2092 struct ost_body *clbody;
2093 __u32 client_cksum2;
2094 u32 page_count = aa->aa_page_count;
2096 osc_checksum_bulk_rw(obd_name, cksum_type, nob,
2097 page_count, aa->aa_ppga,
2098 OST_READ, &client_cksum2, true);
2099 clbody = req_capsule_client_get(&req->rq_pill,
2101 if (cli->cl_checksum_dump)
2102 dump_all_bulk_pages(&clbody->oa, page_count,
2103 aa->aa_ppga, server_cksum,
2106 LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
2107 "%s%s%s inode "DFID" object "DOSTID
2108 " extent [%llu-%llu], client %x/%x, "
2109 "server %x, cksum_type %x\n",
2111 libcfs_nid2str(peer->nid),
2113 clbody->oa.o_valid & OBD_MD_FLFID ?
2114 clbody->oa.o_parent_seq : 0ULL,
2115 clbody->oa.o_valid & OBD_MD_FLFID ?
2116 clbody->oa.o_parent_oid : 0,
2117 clbody->oa.o_valid & OBD_MD_FLFID ?
2118 clbody->oa.o_parent_ver : 0,
2119 POSTID(&body->oa.o_oi),
2120 aa->aa_ppga[0]->off,
2121 aa->aa_ppga[page_count-1]->off +
2122 aa->aa_ppga[page_count-1]->count - 1,
2123 client_cksum, client_cksum2,
2124 server_cksum, cksum_type);
2126 aa->aa_oa->o_cksum = client_cksum;
2130 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
2133 } else if (unlikely(client_cksum)) {
2134 static int cksum_missed;
2137 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
2138 CERROR("%s: checksum %u requested from %s but not sent\n",
2139 obd_name, cksum_missed,
2140 libcfs_nid2str(peer->nid));
2145 inode = page2inode(aa->aa_ppga[0]->pg);
2146 if (inode == NULL) {
2147 /* Try to get reference to inode from cl_page if we are
2148 * dealing with direct IO, as handled pages are not
2149 * actual page cache pages.
2151 struct osc_async_page *oap = brw_page2oap(aa->aa_ppga[0]);
2153 inode = oap2cl_page(oap)->cp_inode;
2155 blockbits = inode->i_blkbits;
2156 blocksize = 1 << blockbits;
2159 if (inode && IS_ENCRYPTED(inode)) {
2162 if (!llcrypt_has_encryption_key(inode)) {
2163 CDEBUG(D_SEC, "no enc key for ino %lu\n", inode->i_ino);
2166 for (idx = 0; idx < aa->aa_page_count; idx++) {
2167 struct brw_page *pg = aa->aa_ppga[idx];
2168 unsigned int offs = 0;
2170 while (offs < PAGE_SIZE) {
2171 /* do not decrypt if page is all 0s */
2172 if (memchr_inv(page_address(pg->pg) + offs, 0,
2173 LUSTRE_ENCRYPTION_UNIT_SIZE) == NULL) {
2174 /* if page is empty forward info to
2175 * upper layers (ll_io_zero_page) by
2176 * clearing PagePrivate2
2179 ClearPagePrivate2(pg->pg);
2184 /* This is direct IO case. Directly call
2185 * decrypt function that takes inode as
2186 * input parameter. Page does not need
2190 ((u64)(pg->off >> PAGE_SHIFT) <<
2191 (PAGE_SHIFT - blockbits)) +
2192 (offs >> blockbits);
2197 LUSTRE_ENCRYPTION_UNIT_SIZE;
2198 i += blocksize, lblk_num++) {
2200 llcrypt_decrypt_block_inplace(
2208 rc = llcrypt_decrypt_pagecache_blocks(
2210 LUSTRE_ENCRYPTION_UNIT_SIZE,
2216 offs += LUSTRE_ENCRYPTION_UNIT_SIZE;
2223 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
2224 aa->aa_oa, &body->oa);
2229 static int osc_brw_redo_request(struct ptlrpc_request *request,
2230 struct osc_brw_async_args *aa, int rc)
2232 struct ptlrpc_request *new_req;
2233 struct osc_brw_async_args *new_aa;
2234 struct osc_async_page *oap;
2237 /* The below message is checked in replay-ost-single.sh test_8ae*/
2238 DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
2239 "redo for recoverable error %d", rc);
2241 rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
2242 OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
2243 aa->aa_cli, aa->aa_oa, aa->aa_page_count,
2244 aa->aa_ppga, &new_req, 1);
2248 list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
2249 if (oap->oap_request != NULL) {
2250 LASSERTF(request == oap->oap_request,
2251 "request %p != oap_request %p\n",
2252 request, oap->oap_request);
2256 * New request takes over pga and oaps from old request.
2257 * Note that copying a list_head doesn't work, need to move it...
2260 new_req->rq_interpret_reply = request->rq_interpret_reply;
2261 new_req->rq_async_args = request->rq_async_args;
2262 new_req->rq_commit_cb = request->rq_commit_cb;
2263 /* cap resend delay to the current request timeout, this is similar to
2264 * what ptlrpc does (see after_reply()) */
2265 if (aa->aa_resends > new_req->rq_timeout)
2266 new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
2268 new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
2269 new_req->rq_generation_set = 1;
2270 new_req->rq_import_generation = request->rq_import_generation;
2272 new_aa = ptlrpc_req_async_args(new_aa, new_req);
2274 INIT_LIST_HEAD(&new_aa->aa_oaps);
2275 list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps);
2276 INIT_LIST_HEAD(&new_aa->aa_exts);
2277 list_splice_init(&aa->aa_exts, &new_aa->aa_exts);
2278 new_aa->aa_resends = aa->aa_resends;
2280 list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
2281 if (oap->oap_request) {
2282 ptlrpc_req_finished(oap->oap_request);
2283 oap->oap_request = ptlrpc_request_addref(new_req);
2287 /* XXX: This code will run into problem if we're going to support
2288 * to add a series of BRW RPCs into a self-defined ptlrpc_request_set
2289 * and wait for all of them to be finished. We should inherit request
2290 * set from old request. */
2291 ptlrpcd_add_req(new_req);
2293 DEBUG_REQ(D_INFO, new_req, "new request");
2298 * ugh, we want disk allocation on the target to happen in offset order. we'll
2299 * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
2300 * fine for our small page arrays and doesn't require allocation. its an
2301 * insertion sort that swaps elements that are strides apart, shrinking the
2302 * stride down until its '1' and the array is sorted.
2304 static void sort_brw_pages(struct brw_page **array, int num)
2307 struct brw_page *tmp;
2311 for (stride = 1; stride < num ; stride = (stride * 3) + 1)
2316 for (i = stride ; i < num ; i++) {
2319 while (j >= stride && array[j - stride]->off > tmp->off) {
2320 array[j] = array[j - stride];
2325 } while (stride > 1);
2328 static void osc_release_ppga(struct brw_page **ppga, size_t count)
2330 LASSERT(ppga != NULL);
2331 OBD_FREE_PTR_ARRAY_LARGE(ppga, count);
2334 static int brw_interpret(const struct lu_env *env,
2335 struct ptlrpc_request *req, void *args, int rc)
2337 struct osc_brw_async_args *aa = args;
2338 struct osc_extent *ext;
2339 struct osc_extent *tmp;
2340 struct client_obd *cli = aa->aa_cli;
2341 unsigned long transferred = 0;
2345 rc = osc_brw_fini_request(req, rc);
2346 CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
2348 /* restore clear text pages */
2349 osc_release_bounce_pages(aa->aa_ppga, aa->aa_page_count);
2352 * When server returns -EINPROGRESS, client should always retry
2353 * regardless of the number of times the bulk was resent already.
2355 if (osc_recoverable_error(rc) && !req->rq_no_delay) {
2356 if (req->rq_import_generation !=
2357 req->rq_import->imp_generation) {
2358 CDEBUG(D_HA, "%s: resend cross eviction for object: "
2359 ""DOSTID", rc = %d.\n",
2360 req->rq_import->imp_obd->obd_name,
2361 POSTID(&aa->aa_oa->o_oi), rc);
2362 } else if (rc == -EINPROGRESS ||
2363 client_should_resend(aa->aa_resends, aa->aa_cli)) {
2364 rc = osc_brw_redo_request(req, aa, rc);
2366 CERROR("%s: too many resent retries for object: "
2367 "%llu:%llu, rc = %d.\n",
2368 req->rq_import->imp_obd->obd_name,
2369 POSTID(&aa->aa_oa->o_oi), rc);
2374 else if (rc == -EAGAIN || rc == -EINPROGRESS)
2379 struct obdo *oa = aa->aa_oa;
2380 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
2381 unsigned long valid = 0;
2382 struct cl_object *obj;
2383 struct osc_async_page *last;
2385 last = brw_page2oap(aa->aa_ppga[aa->aa_page_count - 1]);
2386 obj = osc2cl(last->oap_obj);
2388 cl_object_attr_lock(obj);
2389 if (oa->o_valid & OBD_MD_FLBLOCKS) {
2390 attr->cat_blocks = oa->o_blocks;
2391 valid |= CAT_BLOCKS;
2393 if (oa->o_valid & OBD_MD_FLMTIME) {
2394 attr->cat_mtime = oa->o_mtime;
2397 if (oa->o_valid & OBD_MD_FLATIME) {
2398 attr->cat_atime = oa->o_atime;
2401 if (oa->o_valid & OBD_MD_FLCTIME) {
2402 attr->cat_ctime = oa->o_ctime;
2406 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
2407 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
2408 loff_t last_off = last->oap_count + last->oap_obj_off +
2411 /* Change file size if this is an out of quota or
2412 * direct IO write and it extends the file size */
2413 if (loi->loi_lvb.lvb_size < last_off) {
2414 attr->cat_size = last_off;
2417 /* Extend KMS if it's not a lockless write */
2418 if (loi->loi_kms < last_off &&
2419 oap2osc_page(last)->ops_srvlock == 0) {
2420 attr->cat_kms = last_off;
2426 cl_object_attr_update(env, obj, attr, valid);
2427 cl_object_attr_unlock(obj);
2429 OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
2432 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && rc == 0)
2433 osc_inc_unstable_pages(req);
2435 list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
2436 list_del_init(&ext->oe_link);
2437 osc_extent_finish(env, ext, 1,
2438 rc && req->rq_no_delay ? -EAGAIN : rc);
2440 LASSERT(list_empty(&aa->aa_exts));
2441 LASSERT(list_empty(&aa->aa_oaps));
2443 transferred = (req->rq_bulk == NULL ? /* short io */
2444 aa->aa_requested_nob :
2445 req->rq_bulk->bd_nob_transferred);
2447 osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
2448 ptlrpc_lprocfs_brw(req, transferred);
2450 spin_lock(&cli->cl_loi_list_lock);
2451 /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
2452 * is called so we know whether to go to sync BRWs or wait for more
2453 * RPCs to complete */
2454 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
2455 cli->cl_w_in_flight--;
2457 cli->cl_r_in_flight--;
2458 osc_wake_cache_waiters(cli);
2459 spin_unlock(&cli->cl_loi_list_lock);
2461 osc_io_unplug(env, cli, NULL);
2465 static void brw_commit(struct ptlrpc_request *req)
2467 /* If osc_inc_unstable_pages (via osc_extent_finish) races with
2468 * this called via the rq_commit_cb, I need to ensure
2469 * osc_dec_unstable_pages is still called. Otherwise unstable
2470 * pages may be leaked. */
2471 spin_lock(&req->rq_lock);
2472 if (likely(req->rq_unstable)) {
2473 req->rq_unstable = 0;
2474 spin_unlock(&req->rq_lock);
2476 osc_dec_unstable_pages(req);
2478 req->rq_committed = 1;
2479 spin_unlock(&req->rq_lock);
2484 * Build an RPC by the list of extent @ext_list. The caller must ensure
2485 * that the total pages in this list are NOT over max pages per RPC.
2486 * Extents in the list must be in OES_RPC state.
2488 int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
2489 struct list_head *ext_list, int cmd)
2491 struct ptlrpc_request *req = NULL;
2492 struct osc_extent *ext;
2493 struct brw_page **pga = NULL;
2494 struct osc_brw_async_args *aa = NULL;
2495 struct obdo *oa = NULL;
2496 struct osc_async_page *oap;
2497 struct osc_object *obj = NULL;
2498 struct cl_req_attr *crattr = NULL;
2499 loff_t starting_offset = OBD_OBJECT_EOF;
2500 loff_t ending_offset = 0;
2501 /* '1' for consistency with code that checks !mpflag to restore */
2505 bool soft_sync = false;
2506 bool ndelay = false;
2510 __u32 layout_version = 0;
2511 LIST_HEAD(rpc_list);
2512 struct ost_body *body;
2514 LASSERT(!list_empty(ext_list));
2516 /* add pages into rpc_list to build BRW rpc */
2517 list_for_each_entry(ext, ext_list, oe_link) {
2518 LASSERT(ext->oe_state == OES_RPC);
2519 mem_tight |= ext->oe_memalloc;
2520 grant += ext->oe_grants;
2521 page_count += ext->oe_nr_pages;
2522 layout_version = max(layout_version, ext->oe_layout_version);
2527 soft_sync = osc_over_unstable_soft_limit(cli);
2529 mpflag = memalloc_noreclaim_save();
2531 OBD_ALLOC_PTR_ARRAY_LARGE(pga, page_count);
2533 GOTO(out, rc = -ENOMEM);
2535 OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
2537 GOTO(out, rc = -ENOMEM);
2540 list_for_each_entry(ext, ext_list, oe_link) {
2541 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
2543 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
2545 oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
2546 pga[i] = &oap->oap_brw_page;
2547 pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
2550 list_add_tail(&oap->oap_rpc_item, &rpc_list);
2551 if (starting_offset == OBD_OBJECT_EOF ||
2552 starting_offset > oap->oap_obj_off)
2553 starting_offset = oap->oap_obj_off;
2555 LASSERT(oap->oap_page_off == 0);
2556 if (ending_offset < oap->oap_obj_off + oap->oap_count)
2557 ending_offset = oap->oap_obj_off +
2560 LASSERT(oap->oap_page_off + oap->oap_count ==
2567 /* first page in the list */
2568 oap = list_first_entry(&rpc_list, typeof(*oap), oap_rpc_item);
2570 crattr = &osc_env_info(env)->oti_req_attr;
2571 memset(crattr, 0, sizeof(*crattr));
2572 crattr->cra_type = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
2573 crattr->cra_flags = ~0ULL;
2574 crattr->cra_page = oap2cl_page(oap);
2575 crattr->cra_oa = oa;
2576 cl_req_attr_set(env, osc2cl(obj), crattr);
2578 if (cmd == OBD_BRW_WRITE) {
2579 oa->o_grant_used = grant;
2580 if (layout_version > 0) {
2581 CDEBUG(D_LAYOUT, DFID": write with layout version %u\n",
2582 PFID(&oa->o_oi.oi_fid), layout_version);
2584 oa->o_layout_version = layout_version;
2585 oa->o_valid |= OBD_MD_LAYOUT_VERSION;
2589 sort_brw_pages(pga, page_count);
2590 rc = osc_brw_prep_request(cmd, cli, oa, page_count, pga, &req, 0);
2592 CERROR("prep_req failed: %d\n", rc);
2596 req->rq_commit_cb = brw_commit;
2597 req->rq_interpret_reply = brw_interpret;
2598 req->rq_memalloc = mem_tight != 0;
2599 oap->oap_request = ptlrpc_request_addref(req);
2601 req->rq_no_resend = req->rq_no_delay = 1;
2602 /* probably set a shorter timeout value.
2603 * to handle ETIMEDOUT in brw_interpret() correctly. */
2604 /* lustre_msg_set_timeout(req, req->rq_timeout / 2); */
2607 /* Need to update the timestamps after the request is built in case
2608 * we race with setattr (locally or in queue at OST). If OST gets
2609 * later setattr before earlier BRW (as determined by the request xid),
2610 * the OST will not use BRW timestamps. Sadly, there is no obvious
2611 * way to do this in a single call. bug 10150 */
2612 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
2613 crattr->cra_oa = &body->oa;
2614 crattr->cra_flags = OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME;
2615 cl_req_attr_set(env, osc2cl(obj), crattr);
2616 lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
2618 aa = ptlrpc_req_async_args(aa, req);
2619 INIT_LIST_HEAD(&aa->aa_oaps);
2620 list_splice_init(&rpc_list, &aa->aa_oaps);
2621 INIT_LIST_HEAD(&aa->aa_exts);
2622 list_splice_init(ext_list, &aa->aa_exts);
2624 spin_lock(&cli->cl_loi_list_lock);
2625 starting_offset >>= PAGE_SHIFT;
2626 if (cmd == OBD_BRW_READ) {
2627 cli->cl_r_in_flight++;
2628 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
2629 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
2630 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
2631 starting_offset + 1);
2633 cli->cl_w_in_flight++;
2634 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
2635 lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight);
2636 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2637 starting_offset + 1);
2639 spin_unlock(&cli->cl_loi_list_lock);
2641 DEBUG_REQ(D_INODE, req, "%d pages, aa %p, now %ur/%uw in flight",
2642 page_count, aa, cli->cl_r_in_flight,
2643 cli->cl_w_in_flight);
2644 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_IO, cfs_fail_val);
2646 ptlrpcd_add_req(req);
2652 memalloc_noreclaim_restore(mpflag);
2655 LASSERT(req == NULL);
2658 OBD_SLAB_FREE_PTR(oa, osc_obdo_kmem);
2660 osc_release_bounce_pages(pga, page_count);
2661 osc_release_ppga(pga, page_count);
2663 /* this should happen rarely and is pretty bad, it makes the
2664 * pending list not follow the dirty order
2666 while ((ext = list_first_entry_or_null(ext_list,
2668 oe_link)) != NULL) {
2669 list_del_init(&ext->oe_link);
2670 osc_extent_finish(env, ext, 0, rc);
2676 /* This is to refresh our lock in face of no RPCs. */
2677 void osc_send_empty_rpc(struct osc_object *osc, pgoff_t start)
2679 struct ptlrpc_request *req;
2681 struct brw_page bpg = { .off = start, .count = 1};
2682 struct brw_page *pga = &bpg;
2685 memset(&oa, 0, sizeof(oa));
2686 oa.o_oi = osc->oo_oinfo->loi_oi;
2687 oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLFLAGS;
2688 /* For updated servers - don't do a read */
2689 oa.o_flags = OBD_FL_NORPC;
2691 rc = osc_brw_prep_request(OBD_BRW_READ, osc_cli(osc), &oa, 1, &pga,
2694 /* If we succeeded we ship it off, if not there's no point in doing
2695 * anything. Also no resends.
2696 * No interpret callback, no commit callback.
2699 req->rq_no_resend = 1;
2700 ptlrpcd_add_req(req);
2704 static int osc_set_lock_data(struct ldlm_lock *lock, void *data)
2708 LASSERT(lock != NULL);
2710 lock_res_and_lock(lock);
2712 if (lock->l_ast_data == NULL)
2713 lock->l_ast_data = data;
2714 if (lock->l_ast_data == data)
2717 unlock_res_and_lock(lock);
2722 int osc_enqueue_fini(struct ptlrpc_request *req, osc_enqueue_upcall_f upcall,
2723 void *cookie, struct lustre_handle *lockh,
2724 enum ldlm_mode mode, __u64 *flags, bool speculative,
2727 bool intent = *flags & LDLM_FL_HAS_INTENT;
2731 /* The request was created before ldlm_cli_enqueue call. */
2732 if (intent && errcode == ELDLM_LOCK_ABORTED) {
2733 struct ldlm_reply *rep;
2735 rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
2736 LASSERT(rep != NULL);
2738 rep->lock_policy_res1 =
2739 ptlrpc_status_ntoh(rep->lock_policy_res1);
2740 if (rep->lock_policy_res1)
2741 errcode = rep->lock_policy_res1;
2743 *flags |= LDLM_FL_LVB_READY;
2744 } else if (errcode == ELDLM_OK) {
2745 *flags |= LDLM_FL_LVB_READY;
2748 /* Call the update callback. */
2749 rc = (*upcall)(cookie, lockh, errcode);
2751 /* release the reference taken in ldlm_cli_enqueue() */
2752 if (errcode == ELDLM_LOCK_MATCHED)
2754 if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
2755 ldlm_lock_decref(lockh, mode);
2760 int osc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req,
2763 struct osc_enqueue_args *aa = args;
2764 struct ldlm_lock *lock;
2765 struct lustre_handle *lockh = &aa->oa_lockh;
2766 enum ldlm_mode mode = aa->oa_mode;
2767 struct ost_lvb *lvb = aa->oa_lvb;
2768 __u32 lvb_len = sizeof(*lvb);
2770 struct ldlm_enqueue_info einfo = {
2771 .ei_type = aa->oa_type,
2777 /* ldlm_cli_enqueue is holding a reference on the lock, so it must
2779 lock = ldlm_handle2lock(lockh);
2780 LASSERTF(lock != NULL,
2781 "lockh %#llx, req %p, aa %p - client evicted?\n",
2782 lockh->cookie, req, aa);
2784 /* Take an additional reference so that a blocking AST that
2785 * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
2786 * to arrive after an upcall has been executed by
2787 * osc_enqueue_fini(). */
2788 ldlm_lock_addref(lockh, mode);
2790 /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
2791 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
2793 /* Let CP AST to grant the lock first. */
2794 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
2796 if (aa->oa_speculative) {
2797 LASSERT(aa->oa_lvb == NULL);
2798 LASSERT(aa->oa_flags == NULL);
2799 aa->oa_flags = &flags;
2802 /* Complete obtaining the lock procedure. */
2803 rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, &einfo, 1, aa->oa_flags,
2804 lvb, lvb_len, lockh, rc);
2805 /* Complete osc stuff. */
2806 rc = osc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode,
2807 aa->oa_flags, aa->oa_speculative, rc);
2809 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
2811 ldlm_lock_decref(lockh, mode);
2812 LDLM_LOCK_PUT(lock);
2816 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
2817 * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
2818 * other synchronous requests, however keeping some locks and trying to obtain
2819 * others may take a considerable amount of time in a case of ost failure; and
2820 * when other sync requests do not get released lock from a client, the client
2821 * is evicted from the cluster -- such scenarious make the life difficult, so
2822 * release locks just after they are obtained. */
2823 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2824 __u64 *flags, union ldlm_policy_data *policy,
2825 struct ost_lvb *lvb, osc_enqueue_upcall_f upcall,
2826 void *cookie, struct ldlm_enqueue_info *einfo,
2827 struct ptlrpc_request_set *rqset, int async,
2830 struct obd_device *obd = exp->exp_obd;
2831 struct lustre_handle lockh = { 0 };
2832 struct ptlrpc_request *req = NULL;
2833 int intent = *flags & LDLM_FL_HAS_INTENT;
2834 __u64 match_flags = *flags;
2835 enum ldlm_mode mode;
2839 /* Filesystem lock extents are extended to page boundaries so that
2840 * dealing with the page cache is a little smoother. */
2841 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2842 policy->l_extent.end |= ~PAGE_MASK;
2844 /* Next, search for already existing extent locks that will cover us */
2845 /* If we're trying to read, we also search for an existing PW lock. The
2846 * VFS and page cache already protect us locally, so lots of readers/
2847 * writers can share a single PW lock.
2849 * There are problems with conversion deadlocks, so instead of
2850 * converting a read lock to a write lock, we'll just enqueue a new
2853 * At some point we should cancel the read lock instead of making them
2854 * send us a blocking callback, but there are problems with canceling
2855 * locks out from other users right now, too. */
2856 mode = einfo->ei_mode;
2857 if (einfo->ei_mode == LCK_PR)
2859 /* Normal lock requests must wait for the LVB to be ready before
2860 * matching a lock; speculative lock requests do not need to,
2861 * because they will not actually use the lock. */
2863 match_flags |= LDLM_FL_LVB_READY;
2865 match_flags |= LDLM_FL_BLOCK_GRANTED;
2866 mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
2867 einfo->ei_type, policy, mode, &lockh);
2869 struct ldlm_lock *matched;
2871 if (*flags & LDLM_FL_TEST_LOCK)
2874 matched = ldlm_handle2lock(&lockh);
2876 /* This DLM lock request is speculative, and does not
2877 * have an associated IO request. Therefore if there
2878 * is already a DLM lock, it wll just inform the
2879 * caller to cancel the request for this stripe.*/
2880 lock_res_and_lock(matched);
2881 if (ldlm_extent_equal(&policy->l_extent,
2882 &matched->l_policy_data.l_extent))
2886 unlock_res_and_lock(matched);
2888 ldlm_lock_decref(&lockh, mode);
2889 LDLM_LOCK_PUT(matched);
2891 } else if (osc_set_lock_data(matched, einfo->ei_cbdata)) {
2892 *flags |= LDLM_FL_LVB_READY;
2894 /* We already have a lock, and it's referenced. */
2895 (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
2897 ldlm_lock_decref(&lockh, mode);
2898 LDLM_LOCK_PUT(matched);
2901 ldlm_lock_decref(&lockh, mode);
2902 LDLM_LOCK_PUT(matched);
2906 if (*flags & (LDLM_FL_TEST_LOCK | LDLM_FL_MATCH_LOCK))
2909 /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
2910 *flags &= ~LDLM_FL_BLOCK_GRANTED;
2912 rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
2913 sizeof(*lvb), LVB_T_OST, &lockh, async);
2916 struct osc_enqueue_args *aa;
2917 aa = ptlrpc_req_async_args(aa, req);
2919 aa->oa_mode = einfo->ei_mode;
2920 aa->oa_type = einfo->ei_type;
2921 lustre_handle_copy(&aa->oa_lockh, &lockh);
2922 aa->oa_upcall = upcall;
2923 aa->oa_cookie = cookie;
2924 aa->oa_speculative = speculative;
2926 aa->oa_flags = flags;
2929 /* speculative locks are essentially to enqueue
2930 * a DLM lock in advance, so we don't care
2931 * about the result of the enqueue. */
2933 aa->oa_flags = NULL;
2936 req->rq_interpret_reply = osc_enqueue_interpret;
2937 ptlrpc_set_add_req(rqset, req);
2942 rc = osc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode,
2943 flags, speculative, rc);
2948 int osc_match_base(const struct lu_env *env, struct obd_export *exp,
2949 struct ldlm_res_id *res_id, enum ldlm_type type,
2950 union ldlm_policy_data *policy, enum ldlm_mode mode,
2951 __u64 *flags, struct osc_object *obj,
2952 struct lustre_handle *lockh, enum ldlm_match_flags match_flags)
2954 struct obd_device *obd = exp->exp_obd;
2955 __u64 lflags = *flags;
2959 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
2962 /* Filesystem lock extents are extended to page boundaries so that
2963 * dealing with the page cache is a little smoother */
2964 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2965 policy->l_extent.end |= ~PAGE_MASK;
2967 /* Next, search for already existing extent locks that will cover us */
2968 rc = ldlm_lock_match_with_skip(obd->obd_namespace, lflags, 0,
2969 res_id, type, policy, mode, lockh,
2971 if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
2975 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2977 LASSERT(lock != NULL);
2978 if (osc_set_lock_data(lock, obj)) {
2979 lock_res_and_lock(lock);
2980 if (!ldlm_is_lvb_cached(lock)) {
2981 LASSERT(lock->l_ast_data == obj);
2982 osc_lock_lvb_update(env, obj, lock, NULL);
2983 ldlm_set_lvb_cached(lock);
2985 unlock_res_and_lock(lock);
2987 ldlm_lock_decref(lockh, rc);
2990 LDLM_LOCK_PUT(lock);
2995 static int osc_statfs_interpret(const struct lu_env *env,
2996 struct ptlrpc_request *req, void *args, int rc)
2998 struct osc_async_args *aa = args;
2999 struct obd_statfs *msfs;
3004 * The request has in fact never been sent due to issues at
3005 * a higher level (LOV). Exit immediately since the caller
3006 * is aware of the problem and takes care of the clean up.
3010 if ((rc == -ENOTCONN || rc == -EAGAIN) &&
3011 (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
3017 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3019 GOTO(out, rc = -EPROTO);
3021 *aa->aa_oi->oi_osfs = *msfs;
3023 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
3028 static int osc_statfs_async(struct obd_export *exp,
3029 struct obd_info *oinfo, time64_t max_age,
3030 struct ptlrpc_request_set *rqset)
3032 struct obd_device *obd = class_exp2obd(exp);
3033 struct ptlrpc_request *req;
3034 struct osc_async_args *aa;
3038 if (obd->obd_osfs_age >= max_age) {
3040 "%s: use %p cache blocks %llu/%llu objects %llu/%llu\n",
3041 obd->obd_name, &obd->obd_osfs,
3042 obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
3043 obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
3044 spin_lock(&obd->obd_osfs_lock);
3045 memcpy(oinfo->oi_osfs, &obd->obd_osfs, sizeof(*oinfo->oi_osfs));
3046 spin_unlock(&obd->obd_osfs_lock);
3047 oinfo->oi_flags |= OBD_STATFS_FROM_CACHE;
3048 if (oinfo->oi_cb_up)
3049 oinfo->oi_cb_up(oinfo, 0);
3054 /* We could possibly pass max_age in the request (as an absolute
3055 * timestamp or a "seconds.usec ago") so the target can avoid doing
3056 * extra calls into the filesystem if that isn't necessary (e.g.
3057 * during mount that would help a bit). Having relative timestamps
3058 * is not so great if request processing is slow, while absolute
3059 * timestamps are not ideal because they need time synchronization. */
3060 req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
3064 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3066 ptlrpc_request_free(req);
3069 ptlrpc_request_set_replen(req);
3070 req->rq_request_portal = OST_CREATE_PORTAL;
3071 ptlrpc_at_set_req_timeout(req);
3073 if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
3074 /* procfs requests not want stat in wait for avoid deadlock */
3075 req->rq_no_resend = 1;
3076 req->rq_no_delay = 1;
3079 req->rq_interpret_reply = osc_statfs_interpret;
3080 aa = ptlrpc_req_async_args(aa, req);
3083 ptlrpc_set_add_req(rqset, req);
3087 static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
3088 struct obd_statfs *osfs, time64_t max_age, __u32 flags)
3090 struct obd_device *obd = class_exp2obd(exp);
3091 struct obd_statfs *msfs;
3092 struct ptlrpc_request *req;
3093 struct obd_import *imp, *imp0;
3097 /*Since the request might also come from lprocfs, so we need
3098 *sync this with client_disconnect_export Bug15684
3100 with_imp_locked(obd, imp0, rc)
3101 imp = class_import_get(imp0);
3105 /* We could possibly pass max_age in the request (as an absolute
3106 * timestamp or a "seconds.usec ago") so the target can avoid doing
3107 * extra calls into the filesystem if that isn't necessary (e.g.
3108 * during mount that would help a bit). Having relative timestamps
3109 * is not so great if request processing is slow, while absolute
3110 * timestamps are not ideal because they need time synchronization. */
3111 req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
3113 class_import_put(imp);
3118 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3120 ptlrpc_request_free(req);
3123 ptlrpc_request_set_replen(req);
3124 req->rq_request_portal = OST_CREATE_PORTAL;
3125 ptlrpc_at_set_req_timeout(req);
3127 if (flags & OBD_STATFS_NODELAY) {
3128 /* procfs requests not want stat in wait for avoid deadlock */
3129 req->rq_no_resend = 1;
3130 req->rq_no_delay = 1;
3133 rc = ptlrpc_queue_wait(req);
3137 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3139 GOTO(out, rc = -EPROTO);
3145 ptlrpc_req_finished(req);
3149 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
3150 void *karg, void __user *uarg)
3152 struct obd_device *obd = exp->exp_obd;
3153 struct obd_ioctl_data *data = karg;
3157 if (!try_module_get(THIS_MODULE)) {
3158 CERROR("%s: cannot get module '%s'\n", obd->obd_name,
3159 module_name(THIS_MODULE));
3163 case OBD_IOC_CLIENT_RECOVER:
3164 rc = ptlrpc_recover_import(obd->u.cli.cl_import,
3165 data->ioc_inlbuf1, 0);
3169 case IOC_OSC_SET_ACTIVE:
3170 rc = ptlrpc_set_import_active(obd->u.cli.cl_import,
3175 CDEBUG(D_INODE, "%s: unrecognised ioctl %#x by %s: rc = %d\n",
3176 obd->obd_name, cmd, current->comm, rc);
3180 module_put(THIS_MODULE);
3184 int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
3185 u32 keylen, void *key, u32 vallen, void *val,
3186 struct ptlrpc_request_set *set)
3188 struct ptlrpc_request *req;
3189 struct obd_device *obd = exp->exp_obd;
3190 struct obd_import *imp = class_exp2cliimp(exp);
3195 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
3197 if (KEY_IS(KEY_CHECKSUM)) {
3198 if (vallen != sizeof(int))
3200 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
3204 if (KEY_IS(KEY_SPTLRPC_CONF)) {
3205 sptlrpc_conf_client_adapt(obd);
3209 if (KEY_IS(KEY_FLUSH_CTX)) {
3210 sptlrpc_import_flush_my_ctx(imp);
3214 if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
3215 struct client_obd *cli = &obd->u.cli;
3216 long nr = atomic_long_read(&cli->cl_lru_in_list) >> 1;
3217 long target = *(long *)val;
3219 nr = osc_lru_shrink(env, cli, min(nr, target), true);
3224 if (!set && !KEY_IS(KEY_GRANT_SHRINK))
3227 /* We pass all other commands directly to OST. Since nobody calls osc
3228 methods directly and everybody is supposed to go through LOV, we
3229 assume lov checked invalid values for us.
3230 The only recognised values so far are evict_by_nid and mds_conn.
3231 Even if something bad goes through, we'd get a -EINVAL from OST
3234 req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
3235 &RQF_OST_SET_GRANT_INFO :
3240 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
3241 RCL_CLIENT, keylen);
3242 if (!KEY_IS(KEY_GRANT_SHRINK))
3243 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
3244 RCL_CLIENT, vallen);
3245 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
3247 ptlrpc_request_free(req);
3251 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
3252 memcpy(tmp, key, keylen);
3253 tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
3256 memcpy(tmp, val, vallen);
3258 if (KEY_IS(KEY_GRANT_SHRINK)) {
3259 struct osc_grant_args *aa;
3262 aa = ptlrpc_req_async_args(aa, req);
3263 OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
3265 ptlrpc_req_finished(req);
3268 *oa = ((struct ost_body *)val)->oa;
3270 req->rq_interpret_reply = osc_shrink_grant_interpret;
3273 ptlrpc_request_set_replen(req);
3274 if (!KEY_IS(KEY_GRANT_SHRINK)) {
3275 LASSERT(set != NULL);
3276 ptlrpc_set_add_req(set, req);
3277 ptlrpc_check_set(NULL, set);
3279 ptlrpcd_add_req(req);
3284 EXPORT_SYMBOL(osc_set_info_async);
3286 int osc_reconnect(const struct lu_env *env, struct obd_export *exp,
3287 struct obd_device *obd, struct obd_uuid *cluuid,
3288 struct obd_connect_data *data, void *localdata)
3290 struct client_obd *cli = &obd->u.cli;
3292 if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
3296 spin_lock(&cli->cl_loi_list_lock);
3297 grant = cli->cl_avail_grant + cli->cl_reserved_grant;
3298 if (data->ocd_connect_flags & OBD_CONNECT_GRANT_PARAM) {
3299 /* restore ocd_grant_blkbits as client page bits */
3300 data->ocd_grant_blkbits = PAGE_SHIFT;
3301 grant += cli->cl_dirty_grant;
3303 grant += cli->cl_dirty_pages << PAGE_SHIFT;
3305 data->ocd_grant = grant ? : 2 * cli_brw_size(obd);
3306 lost_grant = cli->cl_lost_grant;
3307 cli->cl_lost_grant = 0;
3308 spin_unlock(&cli->cl_loi_list_lock);
3310 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d"
3311 " ocd_grant: %d, lost: %ld.\n", data->ocd_connect_flags,
3312 data->ocd_version, data->ocd_grant, lost_grant);
3317 EXPORT_SYMBOL(osc_reconnect);
3319 int osc_disconnect(struct obd_export *exp)
3321 struct obd_device *obd = class_exp2obd(exp);
3324 rc = client_disconnect_export(exp);
3326 * Initially we put del_shrink_grant before disconnect_export, but it
3327 * causes the following problem if setup (connect) and cleanup
3328 * (disconnect) are tangled together.
3329 * connect p1 disconnect p2
3330 * ptlrpc_connect_import
3331 * ............... class_manual_cleanup
3334 * ptlrpc_connect_interrupt
3336 * add this client to shrink list
3338 * Bang! grant shrink thread trigger the shrink. BUG18662
3340 osc_del_grant_list(&obd->u.cli);
3343 EXPORT_SYMBOL(osc_disconnect);
3345 int osc_ldlm_resource_invalidate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
3346 struct hlist_node *hnode, void *arg)
3348 struct lu_env *env = arg;
3349 struct ldlm_resource *res = cfs_hash_object(hs, hnode);
3350 struct ldlm_lock *lock;
3351 struct osc_object *osc = NULL;
3355 list_for_each_entry(lock, &res->lr_granted, l_res_link) {
3356 if (lock->l_ast_data != NULL && osc == NULL) {
3357 osc = lock->l_ast_data;
3358 cl_object_get(osc2cl(osc));
3361 /* clear LDLM_FL_CLEANED flag to make sure it will be canceled
3362 * by the 2nd round of ldlm_namespace_clean() call in
3363 * osc_import_event(). */
3364 ldlm_clear_cleaned(lock);
3369 osc_object_invalidate(env, osc);
3370 cl_object_put(env, osc2cl(osc));
3375 EXPORT_SYMBOL(osc_ldlm_resource_invalidate);
3377 static int osc_import_event(struct obd_device *obd,
3378 struct obd_import *imp,
3379 enum obd_import_event event)
3381 struct client_obd *cli;
3385 LASSERT(imp->imp_obd == obd);
3388 case IMP_EVENT_DISCON: {
3390 spin_lock(&cli->cl_loi_list_lock);
3391 cli->cl_avail_grant = 0;
3392 cli->cl_lost_grant = 0;
3393 spin_unlock(&cli->cl_loi_list_lock);
3396 case IMP_EVENT_INACTIVE: {
3397 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE);
3400 case IMP_EVENT_INVALIDATE: {
3401 struct ldlm_namespace *ns = obd->obd_namespace;
3405 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3407 env = cl_env_get(&refcheck);
3409 osc_io_unplug(env, &obd->u.cli, NULL);
3411 cfs_hash_for_each_nolock(ns->ns_rs_hash,
3412 osc_ldlm_resource_invalidate,
3414 cl_env_put(env, &refcheck);
3416 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3421 case IMP_EVENT_ACTIVE: {
3422 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE);
3425 case IMP_EVENT_OCD: {
3426 struct obd_connect_data *ocd = &imp->imp_connect_data;
3428 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
3429 osc_init_grant(&obd->u.cli, ocd);
3432 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
3433 imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
3435 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD);
3438 case IMP_EVENT_DEACTIVATE: {
3439 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE);
3442 case IMP_EVENT_ACTIVATE: {
3443 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE);
3447 CERROR("Unknown import event %d\n", event);
3454 * Determine whether the lock can be canceled before replaying the lock
3455 * during recovery, see bug16774 for detailed information.
3457 * \retval zero the lock can't be canceled
3458 * \retval other ok to cancel
3460 static int osc_cancel_weight(struct ldlm_lock *lock)
3463 * Cancel all unused and granted extent lock.
3465 if (lock->l_resource->lr_type == LDLM_EXTENT &&
3466 ldlm_is_granted(lock) &&
3467 osc_ldlm_weigh_ast(lock) == 0)
3473 static int brw_queue_work(const struct lu_env *env, void *data)
3475 struct client_obd *cli = data;
3477 CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
3479 osc_io_unplug(env, cli, NULL);
3483 int osc_setup_common(struct obd_device *obd, struct lustre_cfg *lcfg)
3485 struct client_obd *cli = &obd->u.cli;
3491 rc = ptlrpcd_addref();
3495 rc = client_obd_setup(obd, lcfg);
3497 GOTO(out_ptlrpcd, rc);
3500 handler = ptlrpcd_alloc_work(cli->cl_import, brw_queue_work, cli);
3501 if (IS_ERR(handler))
3502 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3503 cli->cl_writeback_work = handler;
3505 handler = ptlrpcd_alloc_work(cli->cl_import, lru_queue_work, cli);
3506 if (IS_ERR(handler))
3507 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3508 cli->cl_lru_work = handler;
3510 rc = osc_quota_setup(obd);
3512 GOTO(out_ptlrpcd_work, rc);
3514 cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
3515 osc_update_next_shrink(cli);
3520 if (cli->cl_writeback_work != NULL) {
3521 ptlrpcd_destroy_work(cli->cl_writeback_work);
3522 cli->cl_writeback_work = NULL;
3524 if (cli->cl_lru_work != NULL) {
3525 ptlrpcd_destroy_work(cli->cl_lru_work);
3526 cli->cl_lru_work = NULL;
3528 client_obd_cleanup(obd);
3533 EXPORT_SYMBOL(osc_setup_common);
3535 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
3537 struct client_obd *cli = &obd->u.cli;
3545 rc = osc_setup_common(obd, lcfg);
3549 rc = osc_tunables_init(obd);
3554 * We try to control the total number of requests with a upper limit
3555 * osc_reqpool_maxreqcount. There might be some race which will cause
3556 * over-limit allocation, but it is fine.
3558 req_count = atomic_read(&osc_pool_req_count);
3559 if (req_count < osc_reqpool_maxreqcount) {
3560 adding = cli->cl_max_rpcs_in_flight + 2;
3561 if (req_count + adding > osc_reqpool_maxreqcount)
3562 adding = osc_reqpool_maxreqcount - req_count;
3564 added = ptlrpc_add_rqs_to_pool(osc_rq_pool, adding);
3565 atomic_add(added, &osc_pool_req_count);
3568 ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
3570 spin_lock(&osc_shrink_lock);
3571 list_add_tail(&cli->cl_shrink_list, &osc_shrink_list);
3572 spin_unlock(&osc_shrink_lock);
3573 cli->cl_import->imp_idle_timeout = osc_idle_timeout;
3574 cli->cl_import->imp_idle_debug = D_HA;
3579 int osc_precleanup_common(struct obd_device *obd)
3581 struct client_obd *cli = &obd->u.cli;
3585 * for echo client, export may be on zombie list, wait for
3586 * zombie thread to cull it, because cli.cl_import will be
3587 * cleared in client_disconnect_export():
3588 * class_export_destroy() -> obd_cleanup() ->
3589 * echo_device_free() -> echo_client_cleanup() ->
3590 * obd_disconnect() -> osc_disconnect() ->
3591 * client_disconnect_export()
3593 obd_zombie_barrier();
3594 if (cli->cl_writeback_work) {
3595 ptlrpcd_destroy_work(cli->cl_writeback_work);
3596 cli->cl_writeback_work = NULL;
3599 if (cli->cl_lru_work) {
3600 ptlrpcd_destroy_work(cli->cl_lru_work);
3601 cli->cl_lru_work = NULL;
3604 obd_cleanup_client_import(obd);
3607 EXPORT_SYMBOL(osc_precleanup_common);
3609 static int osc_precleanup(struct obd_device *obd)
3613 osc_precleanup_common(obd);
3615 ptlrpc_lprocfs_unregister_obd(obd);
3619 int osc_cleanup_common(struct obd_device *obd)
3621 struct client_obd *cli = &obd->u.cli;
3626 spin_lock(&osc_shrink_lock);
3627 list_del(&cli->cl_shrink_list);
3628 spin_unlock(&osc_shrink_lock);
3631 if (cli->cl_cache != NULL) {
3632 LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
3633 spin_lock(&cli->cl_cache->ccc_lru_lock);
3634 list_del_init(&cli->cl_lru_osc);
3635 spin_unlock(&cli->cl_cache->ccc_lru_lock);
3636 cli->cl_lru_left = NULL;
3637 cl_cache_decref(cli->cl_cache);
3638 cli->cl_cache = NULL;
3641 /* free memory of osc quota cache */
3642 osc_quota_cleanup(obd);
3644 rc = client_obd_cleanup(obd);
3649 EXPORT_SYMBOL(osc_cleanup_common);
3651 static const struct obd_ops osc_obd_ops = {
3652 .o_owner = THIS_MODULE,
3653 .o_setup = osc_setup,
3654 .o_precleanup = osc_precleanup,
3655 .o_cleanup = osc_cleanup_common,
3656 .o_add_conn = client_import_add_conn,
3657 .o_del_conn = client_import_del_conn,
3658 .o_connect = client_connect_import,
3659 .o_reconnect = osc_reconnect,
3660 .o_disconnect = osc_disconnect,
3661 .o_statfs = osc_statfs,
3662 .o_statfs_async = osc_statfs_async,
3663 .o_create = osc_create,
3664 .o_destroy = osc_destroy,
3665 .o_getattr = osc_getattr,
3666 .o_setattr = osc_setattr,
3667 .o_iocontrol = osc_iocontrol,
3668 .o_set_info_async = osc_set_info_async,
3669 .o_import_event = osc_import_event,
3670 .o_quotactl = osc_quotactl,
3673 LIST_HEAD(osc_shrink_list);
3674 DEFINE_SPINLOCK(osc_shrink_lock);
3676 #ifdef HAVE_SHRINKER_COUNT
3677 static struct shrinker osc_cache_shrinker = {
3678 .count_objects = osc_cache_shrink_count,
3679 .scan_objects = osc_cache_shrink_scan,
3680 .seeks = DEFAULT_SEEKS,
3683 static int osc_cache_shrink(struct shrinker *shrinker,
3684 struct shrink_control *sc)
3686 (void)osc_cache_shrink_scan(shrinker, sc);
3688 return osc_cache_shrink_count(shrinker, sc);
3691 static struct shrinker osc_cache_shrinker = {
3692 .shrink = osc_cache_shrink,
3693 .seeks = DEFAULT_SEEKS,
3697 static int __init osc_init(void)
3699 unsigned int reqpool_size;
3700 unsigned int reqsize;
3704 /* print an address of _any_ initialized kernel symbol from this
3705 * module, to allow debugging with gdb that doesn't support data
3706 * symbols from modules.*/
3707 CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
3709 rc = lu_kmem_init(osc_caches);
3713 rc = class_register_type(&osc_obd_ops, NULL, true,
3714 LUSTRE_OSC_NAME, &osc_device_type);
3718 rc = register_shrinker(&osc_cache_shrinker);
3722 /* This is obviously too much memory, only prevent overflow here */
3723 if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0)
3724 GOTO(out_shrinker, rc = -EINVAL);
3726 reqpool_size = osc_reqpool_mem_max << 20;
3729 while (reqsize < OST_IO_MAXREQSIZE)
3730 reqsize = reqsize << 1;
3733 * We don't enlarge the request count in OSC pool according to
3734 * cl_max_rpcs_in_flight. The allocation from the pool will only be
3735 * tried after normal allocation failed. So a small OSC pool won't
3736 * cause much performance degression in most of cases.
3738 osc_reqpool_maxreqcount = reqpool_size / reqsize;
3740 atomic_set(&osc_pool_req_count, 0);
3741 osc_rq_pool = ptlrpc_init_rq_pool(0, OST_IO_MAXREQSIZE,
3742 ptlrpc_add_rqs_to_pool);
3744 if (osc_rq_pool == NULL)
3745 GOTO(out_shrinker, rc = -ENOMEM);
3747 rc = osc_start_grant_work();
3749 GOTO(out_req_pool, rc);
3754 ptlrpc_free_rq_pool(osc_rq_pool);
3756 unregister_shrinker(&osc_cache_shrinker);
3758 class_unregister_type(LUSTRE_OSC_NAME);
3760 lu_kmem_fini(osc_caches);
3765 static void __exit osc_exit(void)
3767 osc_stop_grant_work();
3768 unregister_shrinker(&osc_cache_shrinker);
3769 class_unregister_type(LUSTRE_OSC_NAME);
3770 lu_kmem_fini(osc_caches);
3771 ptlrpc_free_rq_pool(osc_rq_pool);
3774 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
3775 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3776 MODULE_VERSION(LUSTRE_VERSION_STRING);
3777 MODULE_LICENSE("GPL");
3779 module_init(osc_init);
3780 module_exit(osc_exit);